Merge branch 'dev/etan/df-forkedblobs' into feat_eip-7688
This commit is contained in:
commit
2abfee3488
|
@ -35,21 +35,16 @@ jobs:
|
|||
cpu: amd64
|
||||
- os: windows
|
||||
cpu: amd64
|
||||
branch: [~, upstream/version-1-6, v2.0.6]
|
||||
branch: [~, upstream/version-2-0]
|
||||
exclude:
|
||||
- target:
|
||||
os: macos
|
||||
branch: upstream/version-1-6
|
||||
branch: upstream/version-2-0
|
||||
- target:
|
||||
os: windows
|
||||
branch: upstream/version-1-6
|
||||
- target:
|
||||
os: windows
|
||||
branch: ~
|
||||
branch: upstream/version-2-0
|
||||
include:
|
||||
- branch: upstream/version-1-6
|
||||
branch-short: version-1-6
|
||||
- branch: v2.0.6
|
||||
- branch: upstream/version-2-0
|
||||
branch-short: version-2-0
|
||||
nimflags-extra: --mm:refc
|
||||
- target:
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
name: PR block
|
||||
on:
|
||||
pull_request:
|
||||
pull_request_target:
|
||||
branches:
|
||||
- stable
|
||||
|
||||
|
|
|
@ -54,7 +54,8 @@ OK: 4/4 Fail: 0/4 Skip: 0/4
|
|||
+ sanity check Deneb blocks [Preset: mainnet] OK
|
||||
+ sanity check Deneb states [Preset: mainnet] OK
|
||||
+ sanity check Deneb states, reusing buffers [Preset: mainnet] OK
|
||||
+ sanity check blobs [Preset: mainnet] OK
|
||||
+ sanity check Electra blocks [Preset: mainnet] OK
|
||||
+ sanity check blobs (Deneb) [Preset: mainnet] OK
|
||||
+ sanity check genesis roundtrip [Preset: mainnet] OK
|
||||
+ sanity check phase 0 blocks [Preset: mainnet] OK
|
||||
+ sanity check phase 0 getState rollback [Preset: mainnet] OK
|
||||
|
@ -62,7 +63,7 @@ OK: 4/4 Fail: 0/4 Skip: 0/4
|
|||
+ sanity check phase 0 states, reusing buffers [Preset: mainnet] OK
|
||||
+ sanity check state diff roundtrip [Preset: mainnet] OK
|
||||
```
|
||||
OK: 25/25 Fail: 0/25 Skip: 0/25
|
||||
OK: 26/26 Fail: 0/26 Skip: 0/26
|
||||
## Beacon state [Preset: mainnet]
|
||||
```diff
|
||||
+ Smoke test initialize_beacon_state_from_eth1 [Preset: mainnet] OK
|
||||
|
@ -834,9 +835,10 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
|
|||
## Spec helpers
|
||||
```diff
|
||||
+ build_proof - BeaconState OK
|
||||
+ hypergeom_cdf OK
|
||||
+ integer_squareroot OK
|
||||
```
|
||||
OK: 2/2 Fail: 0/2 Skip: 0/2
|
||||
OK: 3/3 Fail: 0/3 Skip: 0/3
|
||||
## Specific field types
|
||||
```diff
|
||||
+ root update OK
|
||||
|
@ -1034,4 +1036,4 @@ OK: 2/2 Fail: 0/2 Skip: 0/2
|
|||
OK: 9/9 Fail: 0/9 Skip: 0/9
|
||||
|
||||
---TOTAL---
|
||||
OK: 689/694 Fail: 0/694 Skip: 5/694
|
||||
OK: 691/696 Fail: 0/696 Skip: 5/696
|
||||
|
|
|
@ -114,7 +114,7 @@ type
|
|||
keyValues: KvStoreRef # Random stuff using DbKeyKind - suitable for small values mainly!
|
||||
blocks: array[ConsensusFork, KvStoreRef] # BlockRoot -> TrustedSignedBeaconBlock
|
||||
|
||||
blobs: KvStoreRef # (BlockRoot -> BlobSidecar)
|
||||
blobs: array[BlobFork, KvStoreRef] # (BlockRoot -> BlobSidecar)
|
||||
|
||||
stateRoots: KvStoreRef # (Slot, BlockRoot) -> StateRoot
|
||||
|
||||
|
@ -559,9 +559,9 @@ proc new*(T: type BeaconChainDB,
|
|||
sealedPeriods: "lc_sealed_periods")).expectDb()
|
||||
static: doAssert LightClientDataFork.high == LightClientDataFork.Electra
|
||||
|
||||
var blobs : KvStoreRef
|
||||
var blobs: array[BlobFork, KvStoreRef]
|
||||
if cfg.DENEB_FORK_EPOCH != FAR_FUTURE_EPOCH:
|
||||
blobs = kvStore db.openKvStore("deneb_blobs").expectDb()
|
||||
blobs[BlobFork.Deneb] = kvStore db.openKvStore("deneb_blobs").expectDb()
|
||||
|
||||
# Versions prior to 1.4.0 (altair) stored validators in `immutable_validators`
|
||||
# which stores validator keys in compressed format - this is
|
||||
|
@ -765,8 +765,9 @@ proc close*(db: BeaconChainDB) =
|
|||
if db.db == nil: return
|
||||
|
||||
# Close things roughly in reverse order
|
||||
if not isNil(db.blobs):
|
||||
discard db.blobs.close()
|
||||
for blobFork in BlobFork:
|
||||
if not isNil(db.blobs[blobFork]):
|
||||
discard db.blobs[blobFork].close()
|
||||
db.lcData.close()
|
||||
db.finalizedBlocks.close()
|
||||
discard db.summaries.close()
|
||||
|
@ -812,16 +813,19 @@ proc putBlock*(
|
|||
db.blocks[type(value).kind].putSZSSZ(value.root.data, value)
|
||||
db.putBeaconBlockSummary(value.root, value.message.toBeaconBlockSummary())
|
||||
|
||||
proc putBlobSidecar*(
|
||||
db: BeaconChainDB,
|
||||
value: BlobSidecar) =
|
||||
proc putBlobSidecar*[T: ForkyBlobSidecar](
|
||||
db: BeaconChainDB, value: T) =
|
||||
let block_root = hash_tree_root(value.signed_block_header.message)
|
||||
db.blobs.putSZSSZ(blobkey(block_root, value.index), value)
|
||||
db.blobs[T.kind].putSZSSZ(blobkey(block_root, value.index), value)
|
||||
|
||||
proc delBlobSidecar*(
|
||||
db: BeaconChainDB,
|
||||
root: Eth2Digest, index: BlobIndex): bool =
|
||||
db.blobs.del(blobkey(root, index)).expectDb()
|
||||
var res = false
|
||||
for blobFork in BlobFork:
|
||||
if db.blobs[blobFork].del(blobkey(root, index)).expectDb():
|
||||
res = true
|
||||
res
|
||||
|
||||
proc updateImmutableValidators*(
|
||||
db: BeaconChainDB, validators: openArray[Validator]) =
|
||||
|
@ -1070,16 +1074,18 @@ proc getBlockSSZ*(
|
|||
withConsensusFork(fork):
|
||||
getBlockSSZ(db, key, data, consensusFork.TrustedSignedBeaconBlock)
|
||||
|
||||
proc getBlobSidecarSZ*(db: BeaconChainDB, root: Eth2Digest, index: BlobIndex,
|
||||
data: var seq[byte]): bool =
|
||||
proc getBlobSidecarSZ*[T: ForkyBlobSidecar](
|
||||
db: BeaconChainDB, root: Eth2Digest, index: BlobIndex,
|
||||
data: var seq[byte]): bool =
|
||||
let dataPtr = addr data # Short-lived
|
||||
func decode(data: openArray[byte]) =
|
||||
assign(dataPtr[], data)
|
||||
db.blobs.get(blobkey(root, index), decode).expectDb()
|
||||
db.blobs[T.kind].get(blobkey(root, index), decode).expectDb()
|
||||
|
||||
proc getBlobSidecar*(db: BeaconChainDB, root: Eth2Digest, index: BlobIndex,
|
||||
value: var BlobSidecar): bool =
|
||||
db.blobs.getSZSSZ(blobkey(root, index), value) == GetResult.found
|
||||
proc getBlobSidecar*[T: ForkyBlobSidecar](
|
||||
db: BeaconChainDB, root: Eth2Digest, index: BlobIndex,
|
||||
value: var T): bool =
|
||||
db.blobs[T.kind].getSZSSZ(blobkey(root, index), value) == GetResult.found
|
||||
|
||||
proc getBlockSZ*(
|
||||
db: BeaconChainDB, key: Eth2Digest, data: var seq[byte],
|
||||
|
@ -1366,7 +1372,8 @@ proc containsBlock*(
|
|||
|
||||
proc containsBlock*[
|
||||
X: altair.TrustedSignedBeaconBlock | bellatrix.TrustedSignedBeaconBlock |
|
||||
capella.TrustedSignedBeaconBlock | deneb.TrustedSignedBeaconBlock](
|
||||
capella.TrustedSignedBeaconBlock | deneb.TrustedSignedBeaconBlock |
|
||||
electra.TrustedSignedBeaconBlock](
|
||||
db: BeaconChainDB, key: Eth2Digest, T: type X): bool =
|
||||
db.blocks[X.kind].contains(key.data).expectDb()
|
||||
|
||||
|
|
|
@ -562,9 +562,10 @@ type
|
|||
name: "light-client-data-max-periods" .}: Option[uint64]
|
||||
|
||||
longRangeSync* {.
|
||||
hidden
|
||||
desc: "Enable long-range syncing (genesis sync)",
|
||||
defaultValue: LongRangeSyncMode.Light,
|
||||
name: "long-range-sync".}: LongRangeSyncMode
|
||||
defaultValue: LongRangeSyncMode.Lenient,
|
||||
name: "debug-long-range-sync".}: LongRangeSyncMode
|
||||
|
||||
inProcessValidators* {.
|
||||
desc: "Disable the push model (the beacon node tells a signing process with the private keys of the validators what to sign and when) and load the validators in the beacon node itself"
|
||||
|
@ -1269,8 +1270,11 @@ func completeCmdArg*(T: type WalletName, input: string): seq[string] =
|
|||
return @[]
|
||||
|
||||
proc parseCmdArg*(T: type enr.Record, p: string): T {.raises: [ValueError].} =
|
||||
if not fromURI(result, p):
|
||||
raise newException(ValueError, "Invalid ENR")
|
||||
let res = enr.Record.fromURI(p)
|
||||
if res.isErr:
|
||||
raise newException(ValueError, "Invalid ENR:" & $res.error)
|
||||
|
||||
res.value
|
||||
|
||||
func completeCmdArg*(T: type enr.Record, val: string): seq[string] =
|
||||
return @[]
|
||||
|
|
|
@ -21,15 +21,16 @@ const
|
|||
|
||||
type
|
||||
BlobQuarantine* = object
|
||||
blobs*:
|
||||
OrderedTable[(Eth2Digest, BlobIndex, KzgCommitment), ref BlobSidecar]
|
||||
blobs*: OrderedTable[
|
||||
(Eth2Digest, BlobIndex, KzgCommitment), ForkedBlobSidecar]
|
||||
onBlobSidecarCallback*: OnBlobSidecarCallback
|
||||
|
||||
BlobFetchRecord* = object
|
||||
block_root*: Eth2Digest
|
||||
indices*: seq[BlobIndex]
|
||||
|
||||
OnBlobSidecarCallback = proc(data: BlobSidecar) {.gcsafe, raises: [].}
|
||||
OnBlobSidecarCallback = proc(
|
||||
data: BlobSidecarInfoObject) {.gcsafe, raises: [].}
|
||||
|
||||
func shortLog*(x: seq[BlobIndex]): string =
|
||||
"<" & x.mapIt($it).join(", ") & ">"
|
||||
|
@ -37,7 +38,7 @@ func shortLog*(x: seq[BlobIndex]): string =
|
|||
func shortLog*(x: seq[BlobFetchRecord]): string =
|
||||
"[" & x.mapIt(shortLog(it.block_root) & shortLog(it.indices)).join(", ") & "]"
|
||||
|
||||
func put*(quarantine: var BlobQuarantine, blobSidecar: ref BlobSidecar) =
|
||||
func put*(quarantine: var BlobQuarantine, blobSidecar: ForkedBlobSidecar) =
|
||||
if quarantine.blobs.lenu64 >= MaxBlobs:
|
||||
# FIFO if full. For example, sync manager and request manager can race to
|
||||
# put blobs in at the same time, so one gets blob insert -> block resolve
|
||||
|
@ -52,43 +53,61 @@ func put*(quarantine: var BlobQuarantine, blobSidecar: ref BlobSidecar) =
|
|||
oldest_blob_key = k
|
||||
break
|
||||
quarantine.blobs.del oldest_blob_key
|
||||
let block_root = hash_tree_root(blobSidecar.signed_block_header.message)
|
||||
discard quarantine.blobs.hasKeyOrPut(
|
||||
(block_root, blobSidecar.index, blobSidecar.kzg_commitment), blobSidecar)
|
||||
withForkyBlob(blobSidecar):
|
||||
let block_root = hash_tree_root(forkyBlob[].signed_block_header.message)
|
||||
discard quarantine.blobs.hasKeyOrPut(
|
||||
(block_root, forkyBlob[].index, forkyBlob[].kzg_commitment), blobSidecar)
|
||||
|
||||
func put*(quarantine: var BlobQuarantine, blobSidecar: ref ForkyBlobSidecar) =
|
||||
quarantine.put(ForkedBlobSidecar.init(blobSidecar))
|
||||
|
||||
func hasBlob*(
|
||||
quarantine: BlobQuarantine,
|
||||
slot: Slot,
|
||||
proposer_index: uint64,
|
||||
index: BlobIndex): bool =
|
||||
for blob_sidecar in quarantine.blobs.values:
|
||||
template block_header: untyped = blob_sidecar.signed_block_header.message
|
||||
if block_header.slot == slot and
|
||||
block_header.proposer_index == proposer_index and
|
||||
blob_sidecar.index == index:
|
||||
return true
|
||||
for blobSidecar in quarantine.blobs.values:
|
||||
withForkyBlob(blobSidecar):
|
||||
template block_header: untyped = forkyBlob[].signed_block_header.message
|
||||
if block_header.slot == slot and
|
||||
block_header.proposer_index == proposer_index and
|
||||
forkyBlob[].index == index:
|
||||
return true
|
||||
false
|
||||
|
||||
func popBlobs*(
|
||||
quarantine: var BlobQuarantine, digest: Eth2Digest,
|
||||
blck: deneb.SignedBeaconBlock | electra.SignedBeaconBlock):
|
||||
seq[ref BlobSidecar] =
|
||||
var r: seq[ref BlobSidecar] = @[]
|
||||
blck:
|
||||
deneb.SignedBeaconBlock |
|
||||
electra.SignedBeaconBlock): auto =
|
||||
const blobFork = blobForkAtConsensusFork(typeof(blck).kind).expect("Blobs OK")
|
||||
type ResultType = blobFork.BlobSidecars
|
||||
var r: ResultType = @[]
|
||||
for idx, kzg_commitment in blck.message.body.blob_kzg_commitments:
|
||||
var b: ref BlobSidecar
|
||||
var b: ForkedBlobSidecar
|
||||
if quarantine.blobs.pop((digest, BlobIndex idx, kzg_commitment), b):
|
||||
r.add(b)
|
||||
# It was already verified that the blob is linked to `blck`.
|
||||
# Therefore, we can assume that `BlobFork` is correct.
|
||||
doAssert b.kind == blobFork,
|
||||
"Must verify blob inclusion proof before `BlobQuarantine.put`"
|
||||
r.add(b.forky(blobFork))
|
||||
r
|
||||
|
||||
func hasBlobs*(quarantine: BlobQuarantine,
|
||||
blck: deneb.SignedBeaconBlock | electra.SignedBeaconBlock): bool =
|
||||
func hasBlobs*(
|
||||
quarantine: BlobQuarantine,
|
||||
blck:
|
||||
deneb.SignedBeaconBlock |
|
||||
electra.SignedBeaconBlock): bool =
|
||||
for idx, kzg_commitment in blck.message.body.blob_kzg_commitments:
|
||||
if (blck.root, BlobIndex idx, kzg_commitment) notin quarantine.blobs:
|
||||
return false
|
||||
true
|
||||
|
||||
func blobFetchRecord*(quarantine: BlobQuarantine,
|
||||
blck: deneb.SignedBeaconBlock | electra.SignedBeaconBlock): BlobFetchRecord =
|
||||
func blobFetchRecord*(
|
||||
quarantine: BlobQuarantine,
|
||||
blck:
|
||||
deneb.SignedBeaconBlock |
|
||||
electra.SignedBeaconBlock): BlobFetchRecord =
|
||||
var indices: seq[BlobIndex]
|
||||
for i in 0..<len(blck.message.body.blob_kzg_commitments):
|
||||
let idx = BlobIndex(i)
|
||||
|
|
|
@ -123,21 +123,21 @@ iterator get_attesting_indices*(shufflingRef: ShufflingRef,
|
|||
iterator get_attesting_indices*(
|
||||
dag: ChainDAGRef, attestation: phase0.TrustedAttestation,
|
||||
on_chain: static bool = true): ValidatorIndex =
|
||||
block: # `return` is not allowed in an inline iterator
|
||||
block gaiBlock: # `return` is not allowed in an inline iterator
|
||||
let
|
||||
slot =
|
||||
check_attestation_slot_target(attestation.data).valueOr:
|
||||
warn "Invalid attestation slot in trusted attestation",
|
||||
attestation = shortLog(attestation)
|
||||
doAssert strictVerification notin dag.updateFlags
|
||||
break
|
||||
break gaiBlock
|
||||
blck =
|
||||
dag.getBlockRef(attestation.data.beacon_block_root).valueOr:
|
||||
# Attestation block unknown - this is fairly common because we
|
||||
# discard alternative histories on restart
|
||||
debug "Pruned block in trusted attestation",
|
||||
attestation = shortLog(attestation)
|
||||
break
|
||||
break gaiBlock
|
||||
target =
|
||||
blck.atCheckpoint(attestation.data.target).valueOr:
|
||||
# This may happen when there's no block at the epoch boundary slot
|
||||
|
@ -148,7 +148,7 @@ iterator get_attesting_indices*(
|
|||
blck = shortLog(blck),
|
||||
attestation = shortLog(attestation)
|
||||
doAssert strictVerification notin dag.updateFlags
|
||||
break
|
||||
break gaiBlock
|
||||
shufflingRef =
|
||||
dag.getShufflingRef(target.blck, target.slot.epoch, false).valueOr:
|
||||
warn "Attestation shuffling not found",
|
||||
|
@ -156,7 +156,7 @@ iterator get_attesting_indices*(
|
|||
attestation = shortLog(attestation)
|
||||
|
||||
doAssert strictVerification notin dag.updateFlags
|
||||
break
|
||||
break gaiBlock
|
||||
|
||||
committeesPerSlot = get_committee_count_per_slot(shufflingRef)
|
||||
committeeIndex =
|
||||
|
@ -166,7 +166,7 @@ iterator get_attesting_indices*(
|
|||
attestation = shortLog(attestation)
|
||||
|
||||
doAssert strictVerification notin dag.updateFlags
|
||||
break
|
||||
break gaiBlock
|
||||
|
||||
for validator in get_attesting_indices(
|
||||
shufflingRef, slot, committeeIndex, attestation.aggregation_bits):
|
||||
|
@ -175,21 +175,21 @@ iterator get_attesting_indices*(
|
|||
iterator get_attesting_indices*(
|
||||
dag: ChainDAGRef, attestation: electra.TrustedAttestation,
|
||||
on_chain: static bool): ValidatorIndex =
|
||||
block: # `return` is not allowed in an inline iterator
|
||||
block gaiBlock: # `return` is not allowed in an inline iterator
|
||||
let
|
||||
slot =
|
||||
check_attestation_slot_target(attestation.data).valueOr:
|
||||
warn "Invalid attestation slot in trusted attestation",
|
||||
attestation = shortLog(attestation)
|
||||
doAssert strictVerification notin dag.updateFlags
|
||||
break
|
||||
break gaiBlock
|
||||
blck =
|
||||
dag.getBlockRef(attestation.data.beacon_block_root).valueOr:
|
||||
# Attestation block unknown - this is fairly common because we
|
||||
# discard alternative histories on restart
|
||||
debug "Pruned block in trusted attestation",
|
||||
attestation = shortLog(attestation)
|
||||
break
|
||||
break gaiBlock
|
||||
target =
|
||||
blck.atCheckpoint(attestation.data.target).valueOr:
|
||||
# This may happen when there's no block at the epoch boundary slot
|
||||
|
@ -200,7 +200,7 @@ iterator get_attesting_indices*(
|
|||
blck = shortLog(blck),
|
||||
attestation = shortLog(attestation)
|
||||
doAssert strictVerification notin dag.updateFlags
|
||||
break
|
||||
break gaiBlock
|
||||
shufflingRef =
|
||||
dag.getShufflingRef(target.blck, target.slot.epoch, false).valueOr:
|
||||
warn "Attestation shuffling not found",
|
||||
|
@ -208,7 +208,7 @@ iterator get_attesting_indices*(
|
|||
attestation = shortLog(attestation)
|
||||
|
||||
doAssert strictVerification notin dag.updateFlags
|
||||
break
|
||||
break gaiBlock
|
||||
|
||||
for validator in get_attesting_indices(
|
||||
shufflingRef, slot, attestation.committee_bits,
|
||||
|
|
|
@ -491,9 +491,11 @@ func asConsensusType*(payload: engine_api.GetPayloadV3Response):
|
|||
# Both are defined as `array[N, byte]` under the hood.
|
||||
blobsBundle: deneb.BlobsBundle(
|
||||
commitments: KzgCommitments.init(
|
||||
payload.blobsBundle.commitments.mapIt(it.bytes)),
|
||||
payload.blobsBundle.commitments.mapIt(
|
||||
kzg_abi.KzgCommitment(bytes: it.bytes))),
|
||||
proofs: KzgProofs.init(
|
||||
payload.blobsBundle.proofs.mapIt(it.bytes)),
|
||||
payload.blobsBundle.proofs.mapIt(
|
||||
kzg_abi.KzgProof(bytes: it.bytes))),
|
||||
blobs: Blobs.init(
|
||||
payload.blobsBundle.blobs.mapIt(it.bytes))))
|
||||
|
||||
|
@ -502,24 +504,25 @@ func asConsensusType*(rpcExecutionPayload: ExecutionPayloadV4):
|
|||
template getTransaction(tt: TypedTransaction): bellatrix.Transaction =
|
||||
bellatrix.Transaction.init(tt.distinctBase)
|
||||
|
||||
template getDepositRequest(dr: DepositRequestV1): DepositRequest =
|
||||
DepositRequest(
|
||||
template getDepositRequest(
|
||||
dr: DepositRequestV1): electra.DepositRequest =
|
||||
electra.DepositRequest(
|
||||
pubkey: ValidatorPubKey(blob: dr.pubkey.distinctBase),
|
||||
withdrawal_credentials: dr.withdrawalCredentials.asEth2Digest,
|
||||
amount: dr.amount.Gwei,
|
||||
signature: ValidatorSig(blob: dr.signature.distinctBase),
|
||||
index: dr.index.uint64)
|
||||
|
||||
template getWithdrawalRequest(wr: WithdrawalRequestV1): WithdrawalRequest =
|
||||
WithdrawalRequest(
|
||||
template getWithdrawalRequest(
|
||||
wr: WithdrawalRequestV1): electra.WithdrawalRequest =
|
||||
electra.WithdrawalRequest(
|
||||
source_address: ExecutionAddress(data: wr.sourceAddress.distinctBase),
|
||||
validator_pubkey: ValidatorPubKey(
|
||||
blob: wr.validatorPublicKey.distinctBase),
|
||||
validator_pubkey: ValidatorPubKey(blob: wr.validatorPubkey.distinctBase),
|
||||
amount: wr.amount.Gwei)
|
||||
|
||||
template getConsolidationRequest(cr: ConsolidationRequestV1):
|
||||
ConsolidationRequest =
|
||||
ConsolidationRequest(
|
||||
template getConsolidationRequest(
|
||||
cr: ConsolidationRequestV1): electra.ConsolidationRequest =
|
||||
electra.ConsolidationRequest(
|
||||
source_address: ExecutionAddress(data: cr.sourceAddress.distinctBase),
|
||||
source_pubkey: ValidatorPubKey(blob: cr.sourcePubkey.distinctBase),
|
||||
target_pubkey: ValidatorPubKey(blob: cr.targetPubkey.distinctBase))
|
||||
|
@ -569,9 +572,11 @@ func asConsensusType*(payload: engine_api.GetPayloadV4Response):
|
|||
# Both are defined as `array[N, byte]` under the hood.
|
||||
blobsBundle: deneb.BlobsBundle(
|
||||
commitments: KzgCommitments.init(
|
||||
payload.blobsBundle.commitments.mapIt(it.bytes)),
|
||||
payload.blobsBundle.commitments.mapIt(
|
||||
kzg_abi.KzgCommitment(bytes: it.bytes))),
|
||||
proofs: KzgProofs.init(
|
||||
payload.blobsBundle.proofs.mapIt(it.bytes)),
|
||||
payload.blobsBundle.proofs.mapIt(
|
||||
kzg_abi.KzgProof(bytes: it.bytes))),
|
||||
blobs: Blobs.init(
|
||||
payload.blobsBundle.blobs.mapIt(it.bytes))))
|
||||
|
||||
|
@ -656,7 +661,8 @@ func asEngineExecutionPayload*(executionPayload: electra.ExecutionPayload):
|
|||
template getTypedTransaction(tt: bellatrix.Transaction): TypedTransaction =
|
||||
TypedTransaction(tt.distinctBase)
|
||||
|
||||
template getDepositRequest(dr: DepositRequest): DepositRequestV1 =
|
||||
template getDepositRequest(
|
||||
dr: electra.DepositRequest): DepositRequestV1 =
|
||||
DepositRequestV1(
|
||||
pubkey: FixedBytes[RawPubKeySize](dr.pubkey.blob),
|
||||
withdrawalCredentials: FixedBytes[32](dr.withdrawal_credentials.data),
|
||||
|
@ -664,14 +670,15 @@ func asEngineExecutionPayload*(executionPayload: electra.ExecutionPayload):
|
|||
signature: FixedBytes[RawSigSize](dr.signature.blob),
|
||||
index: dr.index.Quantity)
|
||||
|
||||
template getWithdrawalRequest(wr: WithdrawalRequest): WithdrawalRequestV1 =
|
||||
template getWithdrawalRequest(
|
||||
wr: electra.WithdrawalRequest): WithdrawalRequestV1 =
|
||||
WithdrawalRequestV1(
|
||||
sourceAddress: Address(wr.source_address.data),
|
||||
validatorPublicKey: FixedBytes[RawPubKeySize](wr.validator_pubkey.blob),
|
||||
validatorPubkey: FixedBytes[RawPubKeySize](wr.validator_pubkey.blob),
|
||||
amount: wr.amount.Quantity)
|
||||
|
||||
template getConsolidationRequest(cr: ConsolidationRequest):
|
||||
ConsolidationRequestV1 =
|
||||
template getConsolidationRequest(
|
||||
cr: electra.ConsolidationRequest): ConsolidationRequestV1 =
|
||||
ConsolidationRequestV1(
|
||||
sourceAddress: Address(cr.source_address.data),
|
||||
sourcePubkey: FixedBytes[RawPubKeySize](cr.source_pubkey.blob),
|
||||
|
@ -1352,6 +1359,14 @@ proc sendNewPayload*(
|
|||
if len(pendingRequests) == 0:
|
||||
# All requests failed, we will continue our attempts until deadline
|
||||
# is not finished.
|
||||
|
||||
# To avoid continous spam of requests when EL node is offline we
|
||||
# going to sleep until next attempt for
|
||||
# (NEWPAYLOAD_TIMEOUT / 4) time (2.seconds).
|
||||
let timeout =
|
||||
chronos.nanoseconds(NEWPAYLOAD_TIMEOUT.nanoseconds div 4)
|
||||
await sleepAsync(timeout)
|
||||
|
||||
break mainLoop
|
||||
|
||||
proc forkchoiceUpdatedForSingleEL(
|
||||
|
@ -1525,6 +1540,14 @@ proc forkchoiceUpdated*(
|
|||
if len(pendingRequests) == 0:
|
||||
# All requests failed, we will continue our attempts until deadline
|
||||
# is not finished.
|
||||
|
||||
# To avoid continous spam of requests when EL node is offline we
|
||||
# going to sleep until next attempt for
|
||||
# (FORKCHOICEUPDATED_TIMEOUT / 4) time (2.seconds).
|
||||
let timeout =
|
||||
chronos.nanoseconds(FORKCHOICEUPDATED_TIMEOUT.nanoseconds div 4)
|
||||
await sleepAsync(timeout)
|
||||
|
||||
break mainLoop
|
||||
|
||||
# TODO can't be defined within exchangeConfigWithSingleEL
|
||||
|
|
|
@ -56,7 +56,7 @@ const
|
|||
type
|
||||
BlockEntry = object
|
||||
blck*: ForkedSignedBeaconBlock
|
||||
blobs*: Opt[BlobSidecars]
|
||||
blobs*: Opt[ForkedBlobSidecars]
|
||||
maybeFinalized*: bool
|
||||
## The block source claims the block has been finalized already
|
||||
resfut*: Future[Result[void, VerifierError]].Raising([CancelledError])
|
||||
|
@ -173,7 +173,12 @@ from ../consensus_object_pools/block_clearance import
|
|||
proc storeBackfillBlock(
|
||||
self: var BlockProcessor,
|
||||
signedBlock: ForkySignedBeaconBlock,
|
||||
blobsOpt: Opt[BlobSidecars]): Result[void, VerifierError] =
|
||||
blobsOpt: Opt[ForkyBlobSidecars]
|
||||
): Result[void, VerifierError] =
|
||||
const
|
||||
consensusFork = typeof(signedBlock).kind
|
||||
blobFork = blobForkAtConsensusFork(consensusFork).get(BlobFork.Deneb)
|
||||
static: doAssert typeof(blobsOpt).T is blobFork.BlobSidecars
|
||||
|
||||
# The block is certainly not missing any more
|
||||
self.consensusManager.quarantine[].missing.del(signedBlock.root)
|
||||
|
@ -181,12 +186,12 @@ proc storeBackfillBlock(
|
|||
# Establish blob viability before calling addbackfillBlock to avoid
|
||||
# writing the block in case of blob error.
|
||||
var blobsOk = true
|
||||
when typeof(signedBlock).kind >= ConsensusFork.Deneb:
|
||||
when consensusFork >= ConsensusFork.Deneb:
|
||||
if blobsOpt.isSome:
|
||||
let blobs = blobsOpt.get()
|
||||
let kzgCommits = signedBlock.message.body.blob_kzg_commitments.asSeq
|
||||
if blobs.len > 0 or kzgCommits.len > 0:
|
||||
let r = validate_blobs(kzgCommits, blobs.mapIt(it.blob),
|
||||
let r = validate_blobs(kzgCommits, blobs.mapIt(KzgBlob(bytes: it.blob)),
|
||||
blobs.mapIt(it.kzg_proof))
|
||||
if r.isErr():
|
||||
debug "backfill blob validation failed",
|
||||
|
@ -220,7 +225,7 @@ proc storeBackfillBlock(
|
|||
return res
|
||||
|
||||
# Only store blobs after successfully establishing block viability.
|
||||
let blobs = blobsOpt.valueOr: BlobSidecars @[]
|
||||
let blobs = blobsOpt.valueOr: blobFork.BlobSidecars() @[]
|
||||
for b in blobs:
|
||||
self.consensusManager.dag.db.putBlobSidecar(b[])
|
||||
|
||||
|
@ -381,17 +386,42 @@ proc checkBloblessSignature(
|
|||
return err("checkBloblessSignature: Invalid proposer signature")
|
||||
ok()
|
||||
|
||||
template withForkyBlckAndBlobs(
|
||||
blck: ForkedSignedBeaconBlock,
|
||||
blobs: Opt[ForkedBlobSidecars],
|
||||
body: untyped): untyped =
|
||||
withBlck(blck):
|
||||
when consensusFork >= ConsensusFork.Deneb:
|
||||
const blobFork = blobForkAtConsensusFork(consensusFork).expect("Blobs OK")
|
||||
let forkyBlobs {.inject, used.} =
|
||||
if blobs.isSome:
|
||||
# Nim 2.0.8: `forks.BlobSidecars(blobFork)` does not work here:
|
||||
# > type mismatch: got 'BlobFork' for 'blobFork`gensym15'
|
||||
# but expected 'BlobSidecars'
|
||||
var fBlobs: deneb.BlobSidecars
|
||||
for blob in blobs.get:
|
||||
doAssert blob.kind == blobFork,
|
||||
"Must verify blob inclusion proof before `enqueueBlock`"
|
||||
fBlobs.add blob.forky(blobFork)
|
||||
Opt.some fBlobs
|
||||
else:
|
||||
Opt.none deneb.BlobSidecars
|
||||
else:
|
||||
doAssert blobs.isNone, "Blobs are not supported before Deneb"
|
||||
let forkyBlobs {.inject, used.} = Opt.none deneb.BlobSidecars
|
||||
body
|
||||
|
||||
proc enqueueBlock*(
|
||||
self: var BlockProcessor, src: MsgSource, blck: ForkedSignedBeaconBlock,
|
||||
blobs: Opt[BlobSidecars],
|
||||
blobs: Opt[ForkedBlobSidecars],
|
||||
resfut: Future[Result[void, VerifierError]].Raising([CancelledError]) = nil,
|
||||
maybeFinalized = false,
|
||||
validationDur = Duration()) =
|
||||
withBlck(blck):
|
||||
withForkyBlckAndBlobs(blck, blobs):
|
||||
if forkyBlck.message.slot <= self.consensusManager.dag.finalizedHead.slot:
|
||||
# let backfill blocks skip the queue - these are always "fast" to process
|
||||
# because there are no state rewinds to deal with
|
||||
let res = self.storeBackfillBlock(forkyBlck, blobs)
|
||||
let res = self.storeBackfillBlock(forkyBlck, forkyBlobs)
|
||||
resfut.complete(res)
|
||||
return
|
||||
|
||||
|
@ -409,14 +439,20 @@ proc enqueueBlock*(
|
|||
proc storeBlock(
|
||||
self: ref BlockProcessor, src: MsgSource, wallTime: BeaconTime,
|
||||
signedBlock: ForkySignedBeaconBlock,
|
||||
blobsOpt: Opt[BlobSidecars],
|
||||
blobsOpt: Opt[ForkyBlobSidecars],
|
||||
maybeFinalized = false,
|
||||
queueTick: Moment = Moment.now(), validationDur = Duration()):
|
||||
Future[Result[BlockRef, (VerifierError, ProcessingStatus)]] {.async: (raises: [CancelledError]).} =
|
||||
queueTick: Moment = Moment.now(),
|
||||
validationDur = Duration()
|
||||
): Future[Result[BlockRef, (VerifierError, ProcessingStatus)]] {.
|
||||
async: (raises: [CancelledError]).} =
|
||||
## storeBlock is the main entry point for unvalidated blocks - all untrusted
|
||||
## blocks, regardless of origin, pass through here. When storing a block,
|
||||
## we will add it to the dag and pass it to all block consumers that need
|
||||
## to know about it, such as the fork choice and the monitoring
|
||||
const
|
||||
consensusFork = typeof(signedBlock).kind
|
||||
blobFork = blobForkAtConsensusFork(consensusFork).get(BlobFork.Deneb)
|
||||
static: doAssert typeof(blobsOpt).T is blobFork.BlobSidecars
|
||||
|
||||
let
|
||||
attestationPool = self.consensusManager.attestationPool
|
||||
|
@ -497,16 +533,18 @@ proc storeBlock(
|
|||
let blobs =
|
||||
withBlck(parentBlck.get()):
|
||||
when consensusFork >= ConsensusFork.Deneb:
|
||||
var blob_sidecars: BlobSidecars
|
||||
const blobFork =
|
||||
blobForkAtConsensusFork(consensusFork).expect("Blobs OK")
|
||||
var blob_sidecars: ForkedBlobSidecars
|
||||
for i in 0 ..< forkyBlck.message.body.blob_kzg_commitments.len:
|
||||
let blob = BlobSidecar.new()
|
||||
let blob = blobFork.BlobSidecar.new()
|
||||
if not dag.db.getBlobSidecar(parent_root, i.BlobIndex, blob[]):
|
||||
blobsOk = false # Pruned, or inconsistent DB
|
||||
break
|
||||
blob_sidecars.add blob
|
||||
blob_sidecars.add ForkedBlobSidecar.init(blob)
|
||||
Opt.some blob_sidecars
|
||||
else:
|
||||
Opt.none BlobSidecars
|
||||
Opt.none ForkedBlobSidecars
|
||||
if blobsOk:
|
||||
debug "Loaded parent block from storage", parent_root
|
||||
self[].enqueueBlock(
|
||||
|
@ -555,8 +593,7 @@ proc storeBlock(
|
|||
# This should simulate an unsynced EL, which still must perform these
|
||||
# checks. This means it must be able to do so without context, beyond
|
||||
# whatever data the block itself contains.
|
||||
when typeof(signedBlock).kind >= ConsensusFork.Bellatrix and typeof(signedBlock).kind <= ConsensusFork.Deneb:
|
||||
debugComment "electra can do this in principle"
|
||||
when typeof(signedBlock).kind >= ConsensusFork.Bellatrix:
|
||||
template payload(): auto = signedBlock.message.body.execution_payload
|
||||
if signedBlock.message.is_execution_block and
|
||||
payload.block_hash !=
|
||||
|
@ -578,7 +615,7 @@ proc storeBlock(
|
|||
let blobs = blobsOpt.get()
|
||||
let kzgCommits = signedBlock.message.body.blob_kzg_commitments.asSeq
|
||||
if blobs.len > 0 or kzgCommits.len > 0:
|
||||
let r = validate_blobs(kzgCommits, blobs.mapIt(it.blob),
|
||||
let r = validate_blobs(kzgCommits, blobs.mapIt(KzgBlob(bytes: it.blob)),
|
||||
blobs.mapIt(it.kzg_proof))
|
||||
if r.isErr():
|
||||
debug "blob validation failed",
|
||||
|
@ -773,11 +810,11 @@ proc storeBlock(
|
|||
withBlck(quarantined):
|
||||
when typeof(forkyBlck).kind < ConsensusFork.Deneb:
|
||||
self[].enqueueBlock(
|
||||
MsgSource.gossip, quarantined, Opt.none(BlobSidecars))
|
||||
MsgSource.gossip, quarantined, Opt.none(ForkedBlobSidecars))
|
||||
else:
|
||||
if len(forkyBlck.message.body.blob_kzg_commitments) == 0:
|
||||
self[].enqueueBlock(
|
||||
MsgSource.gossip, quarantined, Opt.some(BlobSidecars @[]))
|
||||
MsgSource.gossip, quarantined, Opt.some(ForkedBlobSidecars @[]))
|
||||
else:
|
||||
if (let res = checkBloblessSignature(self[], forkyBlck); res.isErr):
|
||||
warn "Failed to verify signature of unorphaned blobless block",
|
||||
|
@ -785,8 +822,9 @@ proc storeBlock(
|
|||
error = res.error()
|
||||
continue
|
||||
if self.blobQuarantine[].hasBlobs(forkyBlck):
|
||||
let blobs = self.blobQuarantine[].popBlobs(
|
||||
forkyBlck.root, forkyBlck)
|
||||
let blobs = self.blobQuarantine[]
|
||||
.popBlobs(forkyBlck.root, forkyBlck)
|
||||
.mapIt(ForkedBlobSidecar.init(it))
|
||||
self[].enqueueBlock(MsgSource.gossip, quarantined, Opt.some(blobs))
|
||||
else:
|
||||
discard self.consensusManager.quarantine[].addBlobless(
|
||||
|
@ -799,8 +837,10 @@ proc storeBlock(
|
|||
|
||||
proc addBlock*(
|
||||
self: var BlockProcessor, src: MsgSource, blck: ForkedSignedBeaconBlock,
|
||||
blobs: Opt[BlobSidecars], maybeFinalized = false,
|
||||
validationDur = Duration()): Future[Result[void, VerifierError]] {.async: (raises: [CancelledError], raw: true).} =
|
||||
blobs: Opt[ForkedBlobSidecars], maybeFinalized = false,
|
||||
validationDur = Duration()
|
||||
): Future[Result[void, VerifierError]] {.
|
||||
async: (raises: [CancelledError], raw: true).} =
|
||||
## Enqueue a Gossip-validated block for consensus verification
|
||||
# Backpressure:
|
||||
# There is no backpressure here - producers must wait for `resfut` to
|
||||
|
@ -830,9 +870,9 @@ proc processBlock(
|
|||
error "Processing block before genesis, clock turned back?"
|
||||
quit 1
|
||||
|
||||
let res = withBlck(entry.blck):
|
||||
let res = withForkyBlckAndBlobs(entry.blck, entry.blobs):
|
||||
await self.storeBlock(
|
||||
entry.src, wallTime, forkyBlck, entry.blobs, entry.maybeFinalized,
|
||||
entry.src, wallTime, forkyBlck, forkyBlobs, entry.maybeFinalized,
|
||||
entry.queueTick, entry.validationDur)
|
||||
|
||||
if res.isErr and res.error[1] == ProcessingStatus.notCompleted:
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/tables,
|
||||
std/[sequtils, tables],
|
||||
chronicles, chronos, metrics,
|
||||
taskpools,
|
||||
../spec/[helpers, forks],
|
||||
|
@ -244,13 +244,15 @@ proc processSignedBeaconBlock*(
|
|||
let blobs =
|
||||
when typeof(signedBlock).kind >= ConsensusFork.Deneb:
|
||||
if self.blobQuarantine[].hasBlobs(signedBlock):
|
||||
Opt.some(self.blobQuarantine[].popBlobs(signedBlock.root, signedBlock))
|
||||
Opt.some(self.blobQuarantine[]
|
||||
.popBlobs(signedBlock.root, signedBlock)
|
||||
.mapIt(ForkedBlobSidecar.init(it)))
|
||||
else:
|
||||
discard self.quarantine[].addBlobless(self.dag.finalizedHead.slot,
|
||||
signedBlock)
|
||||
return v
|
||||
else:
|
||||
Opt.none(BlobSidecars)
|
||||
Opt.none(ForkedBlobSidecars)
|
||||
|
||||
self.blockProcessor[].enqueueBlock(
|
||||
src, ForkedSignedBeaconBlock.init(signedBlock),
|
||||
|
@ -308,7 +310,9 @@ proc processBlobSidecar*(
|
|||
if self.blobQuarantine[].hasBlobs(forkyBlck):
|
||||
self.blockProcessor[].enqueueBlock(
|
||||
MsgSource.gossip, blobless,
|
||||
Opt.some(self.blobQuarantine[].popBlobs(block_root, forkyBlck)))
|
||||
Opt.some(self.blobQuarantine[]
|
||||
.popBlobs(block_root, forkyBlck)
|
||||
.mapIt(ForkedBlobSidecar.init(it))))
|
||||
else:
|
||||
discard self.quarantine[].addBlobless(
|
||||
self.dag.finalizedHead.slot, forkyBlck)
|
||||
|
|
|
@ -11,6 +11,7 @@ import
|
|||
# Status
|
||||
chronicles, chronos, metrics,
|
||||
results,
|
||||
stew/byteutils,
|
||||
# Internals
|
||||
../spec/[
|
||||
beaconstate, state_transition_block, forks, helpers, network, signatures],
|
||||
|
@ -458,7 +459,7 @@ proc validateBlobSidecar*(
|
|||
# blob_sidecar.blob, blob_sidecar.kzg_commitment, blob_sidecar.kzg_proof)`.
|
||||
block:
|
||||
let ok = verifyProof(
|
||||
blob_sidecar.blob,
|
||||
KzgBlob(bytes: blob_sidecar.blob),
|
||||
blob_sidecar.kzg_commitment,
|
||||
blob_sidecar.kzg_proof).valueOr:
|
||||
return dag.checkedReject("BlobSidecar: blob verify failed")
|
||||
|
@ -467,7 +468,13 @@ proc validateBlobSidecar*(
|
|||
|
||||
# Send notification about new blob sidecar via callback
|
||||
if not(isNil(blobQuarantine.onBlobSidecarCallback)):
|
||||
blobQuarantine.onBlobSidecarCallback(blob_sidecar)
|
||||
blobQuarantine.onBlobSidecarCallback BlobSidecarInfoObject(
|
||||
block_root: hash_tree_root(blob_sidecar.signed_block_header.message),
|
||||
index: blob_sidecar.index,
|
||||
slot: blob_sidecar.signed_block_header.message.slot,
|
||||
kzg_commitment: blob_sidecar.kzg_commitment,
|
||||
versioned_hash:
|
||||
blob_sidecar.kzg_commitment.kzg_commitment_to_versioned_hash.to0xHex())
|
||||
|
||||
ok()
|
||||
|
||||
|
@ -1132,7 +1139,6 @@ proc validateAggregate*(
|
|||
Future[Result[
|
||||
tuple[attestingIndices: seq[ValidatorIndex], sig: CookedSig],
|
||||
ValidationError]] {.async: (raises: [CancelledError]).} =
|
||||
debugComment "is not"
|
||||
template aggregate_and_proof: untyped = signedAggregateAndProof.message
|
||||
template aggregate: untyped = aggregate_and_proof.aggregate
|
||||
|
||||
|
|
|
@ -63,7 +63,7 @@ typedef struct ETHRandomNumber ETHRandomNumber;
|
|||
* @return `NULL` - If an error occurred.
|
||||
*/
|
||||
ETH_RESULT_USE_CHECK
|
||||
ETHRandomNumber *ETHRandomNumberCreate(void);
|
||||
ETHRandomNumber *_Nullable ETHRandomNumberCreate(void);
|
||||
|
||||
/**
|
||||
* Destroys a cryptographically secure random number generator.
|
||||
|
@ -97,7 +97,7 @@ typedef struct ETHConsensusConfig ETHConsensusConfig;
|
|||
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/configs/README.md
|
||||
*/
|
||||
ETH_RESULT_USE_CHECK
|
||||
ETHConsensusConfig *ETHConsensusConfigCreateFromYaml(const char *configFileContent);
|
||||
ETHConsensusConfig *_Nullable ETHConsensusConfigCreateFromYaml(const char *configFileContent);
|
||||
|
||||
/**
|
||||
* Destroys an Ethereum Consensus Layer network configuration.
|
||||
|
@ -156,7 +156,7 @@ typedef struct ETHBeaconState ETHBeaconState;
|
|||
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/configs/README.md
|
||||
*/
|
||||
ETH_RESULT_USE_CHECK
|
||||
ETHBeaconState *ETHBeaconStateCreateFromSsz(
|
||||
ETHBeaconState *_Nullable ETHBeaconStateCreateFromSsz(
|
||||
const ETHConsensusConfig *cfg,
|
||||
const char *consensusVersion,
|
||||
const void *sszBytes,
|
||||
|
@ -251,7 +251,7 @@ typedef struct ETHBeaconClock ETHBeaconClock;
|
|||
* NULL if the state contained an invalid time.
|
||||
*/
|
||||
ETH_RESULT_USE_CHECK
|
||||
ETHBeaconClock *ETHBeaconClockCreateFromState(
|
||||
ETHBeaconClock *_Nullable ETHBeaconClockCreateFromState(
|
||||
const ETHConsensusConfig *cfg, const ETHBeaconState *state);
|
||||
|
||||
/**
|
||||
|
@ -329,7 +329,7 @@ typedef struct ETHLightClientStore ETHLightClientStore;
|
|||
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/weak-subjectivity.md#weak-subjectivity-period
|
||||
*/
|
||||
ETH_RESULT_USE_CHECK
|
||||
ETHLightClientStore *ETHLightClientStoreCreateFromBootstrap(
|
||||
ETHLightClientStore *_Nullable ETHLightClientStoreCreateFromBootstrap(
|
||||
const ETHConsensusConfig *cfg,
|
||||
const ETHRoot *trustedBlockRoot,
|
||||
const char *mediaType,
|
||||
|
@ -1040,7 +1040,7 @@ typedef struct ETHExecutionBlockHeader ETHExecutionBlockHeader;
|
|||
* @see https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_getblockbyhash
|
||||
*/
|
||||
ETH_RESULT_USE_CHECK
|
||||
ETHExecutionBlockHeader *ETHExecutionBlockHeaderCreateFromJson(
|
||||
ETHExecutionBlockHeader *_Nullable ETHExecutionBlockHeaderCreateFromJson(
|
||||
const ETHRoot *executionHash,
|
||||
const char *blockHeaderJson);
|
||||
|
||||
|
@ -1129,7 +1129,7 @@ typedef struct ETHTransactions ETHTransactions;
|
|||
* @see https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_getblockbyhash
|
||||
*/
|
||||
ETH_RESULT_USE_CHECK
|
||||
ETHTransactions *ETHTransactionsCreateFromJson(
|
||||
ETHTransactions *_Nullable ETHTransactionsCreateFromJson(
|
||||
const ETHRoot *transactionsRoot,
|
||||
const char *transactionsJson);
|
||||
|
||||
|
@ -1539,7 +1539,7 @@ typedef struct ETHReceipts ETHReceipts;
|
|||
* @see https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_gettransactionreceipt
|
||||
*/
|
||||
ETH_RESULT_USE_CHECK
|
||||
ETHReceipts *ETHReceiptsCreateFromJson(
|
||||
ETHReceipts *_Nullable ETHReceiptsCreateFromJson(
|
||||
const ETHRoot *receiptsRoot,
|
||||
const char *receiptsJson,
|
||||
const ETHTransactions *transactions);
|
||||
|
|
|
@ -9,7 +9,6 @@
|
|||
|
||||
import
|
||||
std/[json, sequtils, times],
|
||||
stew/saturation_arith,
|
||||
eth/common/[eth_types_rlp, transaction],
|
||||
eth/keys,
|
||||
eth/p2p/discoveryv5/random2,
|
||||
|
@ -1242,10 +1241,8 @@ proc ETHExecutionBlockHeaderCreateFromJson(
|
|||
|
||||
# Construct block header
|
||||
static: # `GasInt` is signed. We only use it for hashing.
|
||||
doAssert sizeof(int64) == sizeof(data.gasLimit)
|
||||
doAssert sizeof(int64) == sizeof(data.gasUsed)
|
||||
if distinctBase(data.timestamp) > int64.high.uint64:
|
||||
return nil
|
||||
doAssert sizeof(uint64) == sizeof(data.gasLimit)
|
||||
doAssert sizeof(uint64) == sizeof(data.gasUsed)
|
||||
if data.nonce.isNone:
|
||||
return nil
|
||||
let blockHeader = ExecutionBlockHeader(
|
||||
|
@ -1258,8 +1255,8 @@ proc ETHExecutionBlockHeaderCreateFromJson(
|
|||
logsBloom: distinctBase(data.logsBloom),
|
||||
difficulty: data.difficulty,
|
||||
number: distinctBase(data.number),
|
||||
gasLimit: GasInt.saturate distinctBase(data.gasLimit),
|
||||
gasUsed: GasInt.saturate distinctBase(data.gasUsed),
|
||||
gasLimit: distinctBase(data.gasLimit),
|
||||
gasUsed: distinctBase(data.gasUsed),
|
||||
timestamp: EthTime(distinctBase(data.timestamp)),
|
||||
extraData: distinctBase(data.extraData),
|
||||
mixHash: data.mixHash.asEth2Digest,
|
||||
|
@ -1322,7 +1319,7 @@ proc ETHExecutionBlockHeaderCreateFromJson(
|
|||
var tr = initHexaryTrie(newMemoryDB())
|
||||
for i, wd in wds:
|
||||
try:
|
||||
tr.put(rlp.encode(i), wd.bytes)
|
||||
tr.put(rlp.encode(i.uint), wd.bytes)
|
||||
except RlpError:
|
||||
raiseAssert "Unreachable"
|
||||
if tr.rootHash() != data.withdrawalsRoot.get.asEth2Digest:
|
||||
|
@ -1497,25 +1494,15 @@ proc ETHTransactionsCreateFromJson(
|
|||
# Construct transaction
|
||||
static:
|
||||
doAssert sizeof(uint64) == sizeof(ChainId)
|
||||
doAssert sizeof(int64) == sizeof(data.gasPrice)
|
||||
doAssert sizeof(int64) == sizeof(data.maxPriorityFeePerGas.get)
|
||||
doAssert sizeof(uint64) == sizeof(data.gas)
|
||||
doAssert sizeof(uint64) == sizeof(data.gasPrice)
|
||||
doAssert sizeof(uint64) == sizeof(data.maxPriorityFeePerGas.get)
|
||||
doAssert sizeof(UInt256) == sizeof(data.maxFeePerBlobGas.get)
|
||||
if distinctBase(data.chainId.get(0.Quantity)) > distinctBase(ChainId.high):
|
||||
return nil
|
||||
if distinctBase(data.gasPrice) > int64.high.uint64:
|
||||
return nil
|
||||
if distinctBase(data.maxFeePerGas.get(0.Quantity)) > int64.high.uint64:
|
||||
return nil
|
||||
if distinctBase(data.maxPriorityFeePerGas.get(0.Quantity)) >
|
||||
int64.high.uint64:
|
||||
return nil
|
||||
if data.maxFeePerBlobGas.get(0.u256) >
|
||||
uint64.high.u256:
|
||||
return nil
|
||||
if distinctBase(data.gas) > int64.high.uint64:
|
||||
return nil
|
||||
if distinctBase(data.v) > int64.high.uint64:
|
||||
return nil
|
||||
if data.yParity.isSome:
|
||||
# This is not always included, but if it is, make sure it's correct
|
||||
let yParity = data.yParity.get
|
||||
|
@ -1555,7 +1542,7 @@ proc ETHTransactionsCreateFromJson(
|
|||
ExecutionHash256(data: distinctBase(it)))
|
||||
else:
|
||||
@[],
|
||||
V: data.v.uint64,
|
||||
V: distinctBase(data.v),
|
||||
R: data.r,
|
||||
S: data.s)
|
||||
rlpBytes =
|
||||
|
@ -1632,7 +1619,7 @@ proc ETHTransactionsCreateFromJson(
|
|||
var tr = initHexaryTrie(newMemoryDB())
|
||||
for i, transaction in txs:
|
||||
try:
|
||||
tr.put(rlp.encode(i), distinctBase(transaction.bytes))
|
||||
tr.put(rlp.encode(i.uint), distinctBase(transaction.bytes))
|
||||
except RlpError:
|
||||
raiseAssert "Unreachable"
|
||||
if tr.rootHash() != transactionsRoot[]:
|
||||
|
@ -2208,7 +2195,7 @@ proc ETHReceiptsCreateFromJson(
|
|||
var tr = initHexaryTrie(newMemoryDB())
|
||||
for i, rec in recs:
|
||||
try:
|
||||
tr.put(rlp.encode(i), rec.bytes)
|
||||
tr.put(rlp.encode(i.uint), rec.bytes)
|
||||
except RlpError:
|
||||
raiseAssert "Unreachable"
|
||||
if tr.rootHash() != receiptsRoot[]:
|
||||
|
|
|
@ -0,0 +1,141 @@
|
|||
# beacon_chain
|
||||
# Copyright (c) 2024 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import stew/base10
|
||||
import std/tables
|
||||
|
||||
type
|
||||
Eth2Agent* {.pure.} = enum
|
||||
Unknown,
|
||||
Nimbus,
|
||||
Lighthouse,
|
||||
Prysm,
|
||||
Teku,
|
||||
Lodestar,
|
||||
Grandine
|
||||
|
||||
func `$`*(a: Eth2Agent): string =
|
||||
case a
|
||||
of Eth2Agent.Unknown:
|
||||
"pending/unknown"
|
||||
of Eth2Agent.Nimbus:
|
||||
"nimbus"
|
||||
of Eth2Agent.Lighthouse:
|
||||
"lighthouse"
|
||||
of Eth2Agent.Prysm:
|
||||
"prysm"
|
||||
of Eth2Agent.Teku:
|
||||
"teku"
|
||||
of Eth2Agent.Lodestar:
|
||||
"lodestar"
|
||||
of Eth2Agent.Grandine:
|
||||
"grandine"
|
||||
|
||||
const
|
||||
# Lighthouse errors could be found here
|
||||
# https://github.com/sigp/lighthouse/blob/5fdd3b39bb8150d1ea8622e42e0166ed46af7693/beacon_node/lighthouse_network/src/rpc/methods.rs#L171
|
||||
LighthouseErrors = [
|
||||
(128'u64, "Unable to verify network"),
|
||||
(129'u64, "The node has too many connected peers"),
|
||||
(250'u64, "Peer score is too low"),
|
||||
(251'u64, "The peer is banned"),
|
||||
(252'u64, "The IP address the peer is using is banned"),
|
||||
].toTable()
|
||||
|
||||
# Prysm errors could be found here
|
||||
# https://github.com/prysmaticlabs/prysm/blob/7a394062e1054d73014e793819cb9cf0d20ff2e3/beacon-chain/p2p/types/rpc_goodbye_codes.go#L12
|
||||
PrysmErrors = [
|
||||
(128'u64, "Unable to verify network"),
|
||||
(129'u64, "The node has too many connected peers"),
|
||||
(250'u64, "Peer score is too low"),
|
||||
(251'u64, "The peer is banned")
|
||||
].toTable()
|
||||
|
||||
# Lodestar errors could be found here
|
||||
# https://github.com/ChainSafe/lodestar/blob/7280234bea66b49da3900b916a1b54c4666e4173/packages/beacon-node/src/constants/network.ts#L20
|
||||
LodestarErrors = [
|
||||
(128'u64, "Unable to verify network"),
|
||||
(129'u64, "The node has too many connected peers"),
|
||||
(250'u64, "Peer score is too low"),
|
||||
(251'u64, "The peer is banned")
|
||||
].toTable()
|
||||
|
||||
# Teku errors could be found here
|
||||
# https://github.com/Consensys/teku/blob/a3f7ebc75f24ec942286b0c1ae192e411f84aa7e/ethereum/spec/src/main/java/tech/pegasys/teku/spec/datastructures/networking/libp2p/rpc/GoodbyeMessage.java#L42
|
||||
TekuErrors = [
|
||||
(128'u64, "Unable to verify network"),
|
||||
(129'u64, "The node has too many connected peers"),
|
||||
(130'u64, "Too many requests from the peer")
|
||||
].toTable()
|
||||
|
||||
# Nimbus errors could be found here
|
||||
# https://github.com/status-im/nimbus-eth2/blob/9b6b42c8f9792e657397bb3669a80b57da470c04/beacon_chain/networking/eth2_network.nim#L176
|
||||
NimbusErrors = [
|
||||
(237'u64, "Peer score is too low")
|
||||
].toTable()
|
||||
|
||||
# Grandine errors could be found here
|
||||
# https://github.com/grandinetech/eth2_libp2p/blob/63a0c5e662847b86b1d5617478e39bccd39df0a9/src/rpc/methods.rs#L246
|
||||
GrandineErrors = [
|
||||
(128'u64, "Unable to verify network"),
|
||||
(129'u64, "The node has too many connected peers"),
|
||||
(250'u64, "Peer score is too low"),
|
||||
(251'u64, "The peer is banned"),
|
||||
(252'u64, "The IP address the peer is using is banned"),
|
||||
].toTable()
|
||||
|
||||
# This is combination of all the errors, we need it when remote agent is not
|
||||
# identified yet.
|
||||
UnknownErrors = [
|
||||
(128'u64, "Unable to verify network"),
|
||||
(129'u64, "The node has too many connected peers"),
|
||||
(130'u64, "Too many requests from the peer"),
|
||||
(237'u64, "Peer score is too low"),
|
||||
(250'u64, "Peer score is too low"),
|
||||
(251'u64, "The peer is banned"),
|
||||
(252'u64, "The IP address the peer is using is banned"),
|
||||
].toTable()
|
||||
|
||||
func disconnectReasonName*(agent: Eth2Agent, code: uint64): string =
|
||||
if code < 128'u64:
|
||||
case code
|
||||
of 0'u64:
|
||||
"Unknown error (0)"
|
||||
of 1'u64:
|
||||
"Client shutdown (1)"
|
||||
of 2'u64:
|
||||
"Irrelevant network (2)"
|
||||
of 3'u64:
|
||||
"Fault or error (3)"
|
||||
else:
|
||||
let
|
||||
scode = " (" & Base10.toString(code) & ")"
|
||||
defaultMessage = "Disconnected"
|
||||
|
||||
defaultMessage & scode
|
||||
else:
|
||||
let
|
||||
scode = " (" & Base10.toString(code) & ")"
|
||||
defaultMessage = "Disconnected"
|
||||
|
||||
case agent
|
||||
of Eth2Agent.Unknown:
|
||||
UnknownErrors.getOrDefault(code, defaultMessage) & scode
|
||||
of Eth2Agent.Nimbus:
|
||||
NimbusErrors.getOrDefault(code, defaultMessage) & scode
|
||||
of Eth2Agent.Lighthouse:
|
||||
LighthouseErrors.getOrDefault(code, defaultMessage) & scode
|
||||
of Eth2Agent.Prysm:
|
||||
PrysmErrors.getOrDefault(code, defaultMessage) & scode
|
||||
of Eth2Agent.Teku:
|
||||
TekuErrors.getOrDefault(code, defaultMessage) & scode
|
||||
of Eth2Agent.Lodestar:
|
||||
LodestarErrors.getOrDefault(code, defaultMessage) & scode
|
||||
of Eth2Agent.Grandine:
|
||||
GrandineErrors.getOrDefault(code, defaultMessage) & scode
|
|
@ -25,13 +25,13 @@ type
|
|||
Eth2DiscoveryId* = NodeId
|
||||
|
||||
func parseBootstrapAddress*(address: string):
|
||||
Result[enr.Record, cstring] =
|
||||
Result[enr.Record, string] =
|
||||
let lowerCaseAddress = toLowerAscii(address)
|
||||
if lowerCaseAddress.startsWith("enr:"):
|
||||
var enrRec: enr.Record
|
||||
if enrRec.fromURI(address):
|
||||
return ok enrRec
|
||||
return err "Invalid ENR bootstrap record"
|
||||
let res = enr.Record.fromURI(address)
|
||||
if res.isOk():
|
||||
return ok res.value
|
||||
return err "Invalid bootstrap ENR: " & $res.error
|
||||
elif lowerCaseAddress.startsWith("enode:"):
|
||||
return err "ENode bootstrap addresses are not supported"
|
||||
else:
|
||||
|
|
|
@ -23,18 +23,20 @@ import
|
|||
libp2p/protocols/pubsub/[
|
||||
pubsub, gossipsub, rpc/message, rpc/messages, peertable, pubsubpeer],
|
||||
libp2p/stream/connection,
|
||||
libp2p/services/wildcardresolverservice,
|
||||
eth/[keys, async_utils],
|
||||
eth/net/nat, eth/p2p/discoveryv5/[enr, node, random2],
|
||||
".."/[version, conf, beacon_clock, conf_light_client],
|
||||
../spec/datatypes/[phase0, altair, bellatrix],
|
||||
../spec/[eth2_ssz_serialization, network, helpers, forks],
|
||||
../validators/keystore_management,
|
||||
"."/[eth2_discovery, eth2_protocol_dsl, libp2p_json_serialization, peer_pool, peer_scores]
|
||||
"."/[eth2_discovery, eth2_protocol_dsl, eth2_agents,
|
||||
libp2p_json_serialization, peer_pool, peer_scores]
|
||||
|
||||
export
|
||||
tables, chronos, ratelimit, version, multiaddress, peerinfo, p2pProtocol,
|
||||
connection, libp2p_json_serialization, eth2_ssz_serialization, results,
|
||||
eth2_discovery, peer_pool, peer_scores
|
||||
eth2_discovery, peer_pool, peer_scores, eth2_agents
|
||||
|
||||
logScope:
|
||||
topics = "networking"
|
||||
|
@ -81,6 +83,7 @@ type
|
|||
rng*: ref HmacDrbgContext
|
||||
peers*: Table[PeerId, Peer]
|
||||
directPeers*: DirectPeers
|
||||
announcedAddresses*: seq[MultiAddress]
|
||||
validTopics: HashSet[string]
|
||||
peerPingerHeartbeatFut: Future[void].Raising([CancelledError])
|
||||
peerTrimmerHeartbeatFut: Future[void].Raising([CancelledError])
|
||||
|
@ -96,6 +99,7 @@ type
|
|||
Peer* = ref object
|
||||
network*: Eth2Node
|
||||
peerId*: PeerId
|
||||
remoteAgent*: Eth2Agent
|
||||
discoveryId*: Eth2DiscoveryId
|
||||
connectionState*: ConnectionState
|
||||
protocolStates*: seq[RootRef]
|
||||
|
@ -336,6 +340,31 @@ func shortProtocolId(protocolId: string): string =
|
|||
protocolId.high
|
||||
protocolId[start..ends]
|
||||
|
||||
proc updateAgent*(peer: Peer) =
|
||||
let
|
||||
agent = toLowerAscii(peer.network.switch.peerStore[AgentBook][peer.peerId])
|
||||
# proto = peer.network.switch.peerStore[ProtoVersionBook][peer.peerId]
|
||||
|
||||
if "nimbus" in agent:
|
||||
peer.remoteAgent = Eth2Agent.Nimbus
|
||||
elif "lighthouse" in agent:
|
||||
peer.remoteAgent = Eth2Agent.Lighthouse
|
||||
elif "teku" in agent:
|
||||
peer.remoteAgent = Eth2Agent.Teku
|
||||
elif "lodestar" in agent:
|
||||
peer.remoteAgent = Eth2Agent.Lodestar
|
||||
elif "prysm" in agent:
|
||||
peer.remoteAgent = Eth2Agent.Prysm
|
||||
elif "grandine" in agent:
|
||||
peer.remoteAgent = Eth2Agent.Grandine
|
||||
else:
|
||||
peer.remoteAgent = Eth2Agent.Unknown
|
||||
|
||||
proc getRemoteAgent*(peer: Peer): Eth2Agent =
|
||||
if peer.remoteAgent == Eth2Agent.Unknown:
|
||||
peer.updateAgent()
|
||||
peer.remoteAgent
|
||||
|
||||
proc openStream(node: Eth2Node,
|
||||
peer: Peer,
|
||||
protocolId: string): Future[NetRes[Connection]]
|
||||
|
@ -1388,7 +1417,7 @@ proc connectWorker(node: Eth2Node, index: int) {.async: (raises: [CancelledError
|
|||
node.connTable.excl(remotePeerAddr.peerId)
|
||||
|
||||
proc toPeerAddr(node: Node): Result[PeerAddr, cstring] =
|
||||
let nodeRecord = ? node.record.toTypedRecord()
|
||||
let nodeRecord = TypedRecord.fromRecord(node.record)
|
||||
let peerAddr = ? nodeRecord.toPeerAddr(tcpProtocol)
|
||||
ok(peerAddr)
|
||||
|
||||
|
@ -1767,7 +1796,7 @@ proc new(T: type Eth2Node,
|
|||
switch: Switch, pubsub: GossipSub,
|
||||
ip: Opt[IpAddress], tcpPort, udpPort: Opt[Port],
|
||||
privKey: keys.PrivateKey, discovery: bool,
|
||||
directPeers: DirectPeers,
|
||||
directPeers: DirectPeers, announcedAddresses: openArray[MultiAddress],
|
||||
rng: ref HmacDrbgContext): T {.raises: [CatchableError].} =
|
||||
when not defined(local_testnet):
|
||||
let
|
||||
|
@ -1811,6 +1840,7 @@ proc new(T: type Eth2Node,
|
|||
connectTimeout: connectTimeout,
|
||||
seenThreshold: seenThreshold,
|
||||
directPeers: directPeers,
|
||||
announcedAddresses: @announcedAddresses,
|
||||
quota: TokenBucket.new(maxGlobalQuota, fullReplenishTime)
|
||||
)
|
||||
|
||||
|
@ -1879,11 +1909,9 @@ proc start*(node: Eth2Node) {.async: (raises: [CancelledError]).} =
|
|||
notice "Discovery disabled; trying bootstrap nodes",
|
||||
nodes = node.discovery.bootstrapRecords.len
|
||||
for enr in node.discovery.bootstrapRecords:
|
||||
let tr = enr.toTypedRecord()
|
||||
if tr.isOk():
|
||||
let pa = tr.get().toPeerAddr(tcpProtocol)
|
||||
if pa.isOk():
|
||||
await node.connQueue.addLast(pa.get())
|
||||
let pa = TypedRecord.fromRecord(enr).toPeerAddr(tcpProtocol)
|
||||
if pa.isOk():
|
||||
await node.connQueue.addLast(pa.get())
|
||||
node.peerPingerHeartbeatFut = node.peerPingerHeartbeat()
|
||||
node.peerTrimmerHeartbeatFut = node.peerTrimmerHeartbeat()
|
||||
|
||||
|
@ -2223,6 +2251,8 @@ func gossipId(
|
|||
proc newBeaconSwitch(config: BeaconNodeConf | LightClientConf,
|
||||
seckey: PrivateKey, address: MultiAddress,
|
||||
rng: ref HmacDrbgContext): Switch {.raises: [CatchableError].} =
|
||||
let service: Service = WildcardAddressResolverService.new()
|
||||
|
||||
var sb =
|
||||
if config.enableYamux:
|
||||
SwitchBuilder.new().withYamux()
|
||||
|
@ -2239,6 +2269,7 @@ proc newBeaconSwitch(config: BeaconNodeConf | LightClientConf,
|
|||
.withMaxConnections(config.maxPeers)
|
||||
.withAgentVersion(config.agentString)
|
||||
.withTcpTransport({ServerFlags.ReuseAddr})
|
||||
.withServices(@[service])
|
||||
.build()
|
||||
|
||||
proc createEth2Node*(rng: ref HmacDrbgContext,
|
||||
|
@ -2272,7 +2303,10 @@ proc createEth2Node*(rng: ref HmacDrbgContext,
|
|||
let (peerId, address) =
|
||||
if s.startsWith("enr:"):
|
||||
let
|
||||
typedEnr = parseBootstrapAddress(s).get().toTypedRecord().get()
|
||||
enr = parseBootstrapAddress(s).valueOr:
|
||||
fatal "Failed to parse bootstrap address", enr=s
|
||||
quit 1
|
||||
typedEnr = TypedRecord.fromRecord(enr)
|
||||
peerAddress = toPeerAddr(typedEnr, tcpProtocol).get()
|
||||
(peerAddress.peerId, peerAddress.addrs[0])
|
||||
elif s.startsWith("/"):
|
||||
|
@ -2359,7 +2393,8 @@ proc createEth2Node*(rng: ref HmacDrbgContext,
|
|||
let node = Eth2Node.new(
|
||||
config, cfg, enrForkId, discoveryForkId, forkDigests, getBeaconTime, switch, pubsub, extIp,
|
||||
extTcpPort, extUdpPort, netKeys.seckey.asEthKey,
|
||||
discovery = config.discv5Enabled, directPeers, rng = rng)
|
||||
discovery = config.discv5Enabled, directPeers, announcedAddresses,
|
||||
rng = rng)
|
||||
|
||||
node.pubsub.subscriptionValidator =
|
||||
proc(topic: string): bool {.gcsafe, raises: [].} =
|
||||
|
@ -2656,26 +2691,31 @@ proc broadcastBeaconBlock*(
|
|||
node.broadcast(topic, blck)
|
||||
|
||||
proc broadcastBlobSidecar*(
|
||||
node: Eth2Node, subnet_id: BlobId, blob: deneb.BlobSidecar):
|
||||
node: Eth2Node, subnet_id: BlobId, blob: ForkyBlobSidecar):
|
||||
Future[SendResult] {.async: (raises: [CancelledError], raw: true).} =
|
||||
let
|
||||
forkPrefix = node.forkDigestAtEpoch(node.getWallEpoch)
|
||||
topic = getBlobSidecarTopic(forkPrefix, subnet_id)
|
||||
contextEpoch = blob.signed_block_header.message.slot.epoch
|
||||
topic = getBlobSidecarTopic(
|
||||
node.forkDigestAtEpoch(contextEpoch), subnet_id)
|
||||
node.broadcast(topic, blob)
|
||||
|
||||
proc broadcastSyncCommitteeMessage*(
|
||||
node: Eth2Node, msg: SyncCommitteeMessage,
|
||||
subcommitteeIdx: SyncSubcommitteeIndex):
|
||||
Future[SendResult] {.async: (raises: [CancelledError], raw: true).} =
|
||||
let topic = getSyncCommitteeTopic(
|
||||
node.forkDigestAtEpoch(node.getWallEpoch), subcommitteeIdx)
|
||||
let
|
||||
contextEpoch = msg.slot.epoch
|
||||
topic = getSyncCommitteeTopic(
|
||||
node.forkDigestAtEpoch(contextEpoch), subcommitteeIdx)
|
||||
node.broadcast(topic, msg)
|
||||
|
||||
proc broadcastSignedContributionAndProof*(
|
||||
node: Eth2Node, msg: SignedContributionAndProof):
|
||||
Future[SendResult] {.async: (raises: [CancelledError], raw: true).} =
|
||||
let topic = getSyncCommitteeContributionAndProofTopic(
|
||||
node.forkDigestAtEpoch(node.getWallEpoch))
|
||||
let
|
||||
contextEpoch = msg.message.contribution.slot.epoch
|
||||
topic = getSyncCommitteeContributionAndProofTopic(
|
||||
node.forkDigestAtEpoch(contextEpoch))
|
||||
node.broadcast(topic, msg)
|
||||
|
||||
proc broadcastLightClientFinalityUpdate*(
|
||||
|
|
|
@ -294,7 +294,7 @@ elif const_preset == "mainnet":
|
|||
vendorDir & "/mainnet/metadata/genesis.ssz")
|
||||
|
||||
sepoliaGenesis* = slurp(
|
||||
vendorDir & "/sepolia/bepolia/genesis.ssz")
|
||||
vendorDir & "/sepolia/metadata/genesis.ssz")
|
||||
|
||||
const
|
||||
mainnetMetadata = loadCompileTimeNetworkMetadata(
|
||||
|
@ -310,7 +310,7 @@ elif const_preset == "mainnet":
|
|||
digest: Eth2Digest.fromHex "0x0ea3f6f9515823b59c863454675fefcd1d8b4f2dbe454db166206a41fda060a0"))
|
||||
|
||||
sepoliaMetadata = loadCompileTimeNetworkMetadata(
|
||||
vendorDir & "/sepolia/bepolia",
|
||||
vendorDir & "/sepolia/metadata",
|
||||
Opt.some sepolia,
|
||||
useBakedInGenesis = Opt.some "sepolia")
|
||||
|
||||
|
@ -343,7 +343,7 @@ proc getMetadataForNetwork*(networkName: string): Eth2NetworkMetadata =
|
|||
quit 1
|
||||
|
||||
if networkName in ["goerli", "prater"]:
|
||||
warn "Goerli is deprecated and will stop being supported; https://blog.ethereum.org/2023/11/30/goerli-lts-update suggests migrating to Holesky or Sepolia"
|
||||
warn "Goerli is deprecated and unsupported; https://blog.ethereum.org/2023/11/30/goerli-lts-update suggests migrating to Holesky or Sepolia"
|
||||
|
||||
let metadata =
|
||||
when const_preset == "gnosis":
|
||||
|
|
|
@ -36,7 +36,7 @@ cdecl(eth2_mainnet_genesis_size):
|
|||
.quad eth2_mainnet_genesis_end - eth2_mainnet_genesis_data
|
||||
|
||||
eth2_sepolia_genesis_data:
|
||||
.incbin "sepolia/bepolia/genesis.ssz"
|
||||
.incbin "sepolia/metadata/genesis.ssz"
|
||||
eth2_sepolia_genesis_end:
|
||||
.global cdecl(eth2_sepolia_genesis_size)
|
||||
.p2align 3
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
{.push raises: [].}
|
||||
|
||||
import
|
||||
chronicles,
|
||||
chronicles, stew/base10, metrics,
|
||||
../spec/network,
|
||||
".."/[beacon_clock],
|
||||
../networking/eth2_network,
|
||||
|
@ -37,6 +37,9 @@ type
|
|||
statusLastTime: chronos.Moment
|
||||
statusMsg: StatusMsg
|
||||
|
||||
declareCounter nbc_disconnects_count,
|
||||
"Number disconnected peers", labels = ["agent", "reason"]
|
||||
|
||||
func shortLog*(s: StatusMsg): auto =
|
||||
(
|
||||
forkDigest: s.forkDigest,
|
||||
|
@ -47,13 +50,6 @@ func shortLog*(s: StatusMsg): auto =
|
|||
)
|
||||
chronicles.formatIt(StatusMsg): shortLog(it)
|
||||
|
||||
func disconnectReasonName(reason: uint64): string =
|
||||
# haha, nim doesn't support uint64 in `case`!
|
||||
if reason == uint64(ClientShutDown): "Client shutdown"
|
||||
elif reason == uint64(IrrelevantNetwork): "Irrelevant network"
|
||||
elif reason == uint64(FaultOrError): "Fault or error"
|
||||
else: "Disconnected (" & $reason & ")"
|
||||
|
||||
func forkDigestAtEpoch(state: PeerSyncNetworkState,
|
||||
epoch: Epoch): ForkDigest =
|
||||
state.forkDigests[].atEpoch(epoch, state.cfg)
|
||||
|
@ -131,9 +127,9 @@ p2pProtocol PeerSync(version = 1,
|
|||
networkState = PeerSyncNetworkState,
|
||||
peerState = PeerSyncPeerState):
|
||||
|
||||
onPeerConnected do (peer: Peer, incoming: bool) {.async: (raises: [CancelledError]).}:
|
||||
debug "Peer connected",
|
||||
peer, peerId = shortLog(peer.peerId), incoming
|
||||
onPeerConnected do (peer: Peer, incoming: bool) {.
|
||||
async: (raises: [CancelledError]).}:
|
||||
debug "Peer connected", peer, peerId = shortLog(peer.peerId), incoming
|
||||
# Per the eth2 protocol, whoever dials must send a status message when
|
||||
# connected for the first time, but because of how libp2p works, there may
|
||||
# be a race between incoming and outgoing connections and disconnects that
|
||||
|
@ -152,6 +148,7 @@ p2pProtocol PeerSync(version = 1,
|
|||
|
||||
if theirStatus.isOk:
|
||||
discard await peer.handleStatus(peer.networkState, theirStatus.get())
|
||||
peer.updateAgent()
|
||||
else:
|
||||
debug "Status response not received in time",
|
||||
peer, errorKind = theirStatus.error.kind
|
||||
|
@ -179,9 +176,13 @@ p2pProtocol PeerSync(version = 1,
|
|||
{.libp2pProtocol("metadata", 2).} =
|
||||
peer.network.metadata
|
||||
|
||||
proc goodbye(peer: Peer, reason: uint64)
|
||||
{.async, libp2pProtocol("goodbye", 1).} =
|
||||
debug "Received Goodbye message", reason = disconnectReasonName(reason), peer
|
||||
proc goodbye(peer: Peer, reason: uint64) {.
|
||||
async, libp2pProtocol("goodbye", 1).} =
|
||||
let remoteAgent = peer.getRemoteAgent()
|
||||
nbc_disconnects_count.inc(1, [$remoteAgent, Base10.toString(reason)])
|
||||
debug "Received Goodbye message",
|
||||
reason = disconnectReasonName(remoteAgent, reason),
|
||||
remote_agent = $remoteAgent, peer
|
||||
|
||||
proc setStatusMsg(peer: Peer, statusMsg: StatusMsg) =
|
||||
debug "Peer status", peer, statusMsg
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[os, random, terminal, times],
|
||||
std/[os, random, sequtils, terminal, times],
|
||||
chronos, chronicles,
|
||||
metrics, metrics/chronos_httpserver,
|
||||
stew/[byteutils, io2],
|
||||
|
@ -293,15 +293,8 @@ proc initFullNode(
|
|||
node.eventBus.propSlashQueue.emit(data)
|
||||
proc onAttesterSlashingAdded(data: phase0.AttesterSlashing) =
|
||||
node.eventBus.attSlashQueue.emit(data)
|
||||
proc onBlobSidecarAdded(data: BlobSidecar) =
|
||||
node.eventBus.blobSidecarQueue.emit(
|
||||
BlobSidecarInfoObject(
|
||||
block_root: hash_tree_root(data.signed_block_header.message),
|
||||
index: data.index,
|
||||
slot: data.signed_block_header.message.slot,
|
||||
kzg_commitment: data.kzg_commitment,
|
||||
versioned_hash:
|
||||
data.kzg_commitment.kzg_commitment_to_versioned_hash.to0xHex))
|
||||
proc onBlobSidecarAdded(data: BlobSidecarInfoObject) =
|
||||
node.eventBus.blobSidecarQueue.emit(data)
|
||||
proc onBlockAdded(data: ForkedTrustedSignedBeaconBlock) =
|
||||
let optimistic =
|
||||
if node.currentSlot().epoch() >= dag.cfg.BELLATRIX_FORK_EPOCH:
|
||||
|
@ -410,18 +403,22 @@ proc initFullNode(
|
|||
config.dumpEnabled, config.dumpDirInvalid, config.dumpDirIncoming,
|
||||
rng, taskpool, consensusManager, node.validatorMonitor,
|
||||
blobQuarantine, getBeaconTime)
|
||||
blockVerifier = proc(signedBlock: ForkedSignedBeaconBlock,
|
||||
blobs: Opt[BlobSidecars], maybeFinalized: bool):
|
||||
Future[Result[void, VerifierError]] {.async: (raises: [CancelledError], raw: true).} =
|
||||
blockVerifier = proc(
|
||||
signedBlock: ForkedSignedBeaconBlock,
|
||||
blobs: Opt[ForkedBlobSidecars],
|
||||
maybeFinalized: bool
|
||||
): Future[Result[void, VerifierError]] {.
|
||||
async: (raises: [CancelledError], raw: true).} =
|
||||
# The design with a callback for block verification is unusual compared
|
||||
# to the rest of the application, but fits with the general approach
|
||||
# taken in the sync/request managers - this is an architectural compromise
|
||||
# that should probably be reimagined more holistically in the future.
|
||||
blockProcessor[].addBlock(
|
||||
MsgSource.gossip, signedBlock, blobs, maybeFinalized = maybeFinalized)
|
||||
rmanBlockVerifier = proc(signedBlock: ForkedSignedBeaconBlock,
|
||||
maybeFinalized: bool):
|
||||
Future[Result[void, VerifierError]] {.async: (raises: [CancelledError]).} =
|
||||
rmanBlockVerifier = proc(
|
||||
signedBlock: ForkedSignedBeaconBlock, maybeFinalized: bool
|
||||
): Future[Result[void, VerifierError]] {.
|
||||
async: (raises: [CancelledError]).} =
|
||||
withBlck(signedBlock):
|
||||
when consensusFork >= ConsensusFork.Deneb:
|
||||
if not blobQuarantine[].hasBlobs(forkyBlck):
|
||||
|
@ -432,24 +429,27 @@ proc initFullNode(
|
|||
else:
|
||||
err(VerifierError.MissingParent)
|
||||
else:
|
||||
let blobs = blobQuarantine[].popBlobs(forkyBlck.root, forkyBlck)
|
||||
await blockProcessor[].addBlock(MsgSource.gossip, signedBlock,
|
||||
Opt.some(blobs),
|
||||
maybeFinalized = maybeFinalized)
|
||||
let blobs = blobQuarantine[]
|
||||
.popBlobs(forkyBlck.root, forkyBlck)
|
||||
.mapIt(ForkedBlobSidecar.init(newClone(it)))
|
||||
await blockProcessor[].addBlock(
|
||||
MsgSource.gossip, signedBlock, Opt.some(blobs),
|
||||
maybeFinalized = maybeFinalized)
|
||||
else:
|
||||
await blockProcessor[].addBlock(MsgSource.gossip, signedBlock,
|
||||
Opt.none(BlobSidecars),
|
||||
maybeFinalized = maybeFinalized)
|
||||
await blockProcessor[].addBlock(
|
||||
MsgSource.gossip, signedBlock, Opt.none(ForkedBlobSidecars),
|
||||
maybeFinalized = maybeFinalized)
|
||||
rmanBlockLoader = proc(
|
||||
blockRoot: Eth2Digest): Opt[ForkedTrustedSignedBeaconBlock] =
|
||||
dag.getForkedBlock(blockRoot)
|
||||
rmanBlobLoader = proc(
|
||||
blobId: BlobIdentifier): Opt[ref BlobSidecar] =
|
||||
var blob_sidecar = BlobSidecar.new()
|
||||
if dag.db.getBlobSidecar(blobId.block_root, blobId.index, blob_sidecar[]):
|
||||
Opt.some blob_sidecar
|
||||
else:
|
||||
Opt.none(ref BlobSidecar)
|
||||
blobId: BlobIdentifier): Opt[ForkedBlobSidecar] =
|
||||
withAll(BlobFork):
|
||||
var blob_sidecar = blobFork.BlobSidecar.new()
|
||||
if dag.db.getBlobSidecar(
|
||||
blobId.block_root, blobId.index, blob_sidecar[]):
|
||||
return Opt.some ForkedBlobSidecar.init(blob_sidecar)
|
||||
Opt.none(ForkedBlobSidecar)
|
||||
|
||||
processor = Eth2Processor.new(
|
||||
config.doppelgangerDetection,
|
||||
|
@ -826,6 +826,7 @@ proc init*(T: type BeaconNode,
|
|||
RestServerRef.init(config.restAddress, config.restPort,
|
||||
config.restAllowedOrigin,
|
||||
validateBeaconApiQueries,
|
||||
nimbusAgentStr,
|
||||
config)
|
||||
else:
|
||||
nil
|
||||
|
@ -1919,15 +1920,24 @@ proc installMessageValidators(node: BeaconNode) =
|
|||
MsgSource.gossip, msg)))
|
||||
|
||||
when consensusFork >= ConsensusFork.Deneb:
|
||||
const blobFork =
|
||||
blobForkAtConsensusFork(consensusFork).expect("Blobs OK")
|
||||
|
||||
# blob_sidecar_{subnet_id}
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/deneb/p2p-interface.md#blob_sidecar_subnet_id
|
||||
for it in BlobId:
|
||||
closureScope: # Needed for inner `proc`; don't lift it out of loop.
|
||||
let subnet_id = it
|
||||
let
|
||||
contextFork = consensusFork
|
||||
subnet_id = it
|
||||
node.network.addValidator(
|
||||
getBlobSidecarTopic(digest, subnet_id), proc (
|
||||
blobSidecar: deneb.BlobSidecar
|
||||
blobSidecar: blobFork.BlobSidecar
|
||||
): ValidationResult =
|
||||
if contextFork != node.dag.cfg.consensusForkAtEpoch(
|
||||
blobSidecar.signed_block_header.message.slot.epoch):
|
||||
return ValidationResult.Reject
|
||||
|
||||
toValidationResult(
|
||||
node.processor[].processBlobSidecar(
|
||||
MsgSource.gossip, blobSidecar, subnet_id)))
|
||||
|
|
|
@ -357,6 +357,7 @@ proc init*(T: type RestServerRef,
|
|||
port: Port,
|
||||
allowedOrigin: Option[string],
|
||||
validateFn: PatternCallback,
|
||||
ident: string,
|
||||
config: AnyConf): T =
|
||||
let
|
||||
address = initTAddress(ip, port)
|
||||
|
@ -375,6 +376,7 @@ proc init*(T: type RestServerRef,
|
|||
|
||||
let res = RestServerRef.new(RestRouter.init(validateFn, allowedOrigin),
|
||||
address, serverFlags = serverFlags,
|
||||
serverIdent = ident,
|
||||
httpHeadersTimeout = headersTimeout,
|
||||
maxHeadersSize = maxHeadersSize,
|
||||
maxRequestBodySize = maxRequestBodySize,
|
||||
|
@ -428,11 +430,13 @@ proc initKeymanagerServer*(
|
|||
RestServerRef.init(config.keymanagerAddress, config.keymanagerPort,
|
||||
config.keymanagerAllowedOrigin,
|
||||
validateKeymanagerApiQueries,
|
||||
nimbusAgentStr,
|
||||
config)
|
||||
else:
|
||||
RestServerRef.init(config.keymanagerAddress, config.keymanagerPort,
|
||||
config.keymanagerAllowedOrigin,
|
||||
validateKeymanagerApiQueries,
|
||||
nimbusAgentStr,
|
||||
config)
|
||||
else:
|
||||
nil
|
||||
|
|
|
@ -17,6 +17,10 @@ import
|
|||
const
|
||||
PREGENESIS_EPOCHS_COUNT = 1
|
||||
|
||||
declareGauge validator_client_node_counts,
|
||||
"Number of connected beacon nodes and their status",
|
||||
labels = ["status"]
|
||||
|
||||
proc initGenesis(vc: ValidatorClientRef): Future[RestGenesis] {.async.} =
|
||||
info "Initializing genesis", nodes_count = len(vc.beaconNodes)
|
||||
var nodes = vc.beaconNodes
|
||||
|
@ -214,19 +218,24 @@ proc runVCSlotLoop(vc: ValidatorClientRef) {.async.} =
|
|||
|
||||
vc.processingDelay = Opt.some(nanoseconds(delay.nanoseconds))
|
||||
|
||||
let
|
||||
counts = vc.getNodeCounts()
|
||||
# Good nodes are nodes which can be used for ALL the requests.
|
||||
goodNodes = counts.data[int(RestBeaconNodeStatus.Synced)]
|
||||
# Viable nodes are nodes which can be used only SOME of the requests.
|
||||
viableNodes = counts.data[int(RestBeaconNodeStatus.OptSynced)] +
|
||||
counts.data[int(RestBeaconNodeStatus.NotSynced)] +
|
||||
counts.data[int(RestBeaconNodeStatus.Compatible)]
|
||||
# Bad nodes are nodes which can't be used at all.
|
||||
badNodes = counts.data[int(RestBeaconNodeStatus.Offline)] +
|
||||
counts.data[int(RestBeaconNodeStatus.Online)] +
|
||||
counts.data[int(RestBeaconNodeStatus.Incompatible)]
|
||||
|
||||
validator_client_node_counts.set(int64(goodNodes), ["good"])
|
||||
validator_client_node_counts.set(int64(viableNodes), ["viable"])
|
||||
validator_client_node_counts.set(int64(badNodes), ["bad"])
|
||||
|
||||
if len(vc.beaconNodes) > 1:
|
||||
let
|
||||
counts = vc.getNodeCounts()
|
||||
# Good nodes are nodes which can be used for ALL the requests.
|
||||
goodNodes = counts.data[int(RestBeaconNodeStatus.Synced)]
|
||||
# Viable nodes are nodes which can be used only SOME of the requests.
|
||||
viableNodes = counts.data[int(RestBeaconNodeStatus.OptSynced)] +
|
||||
counts.data[int(RestBeaconNodeStatus.NotSynced)] +
|
||||
counts.data[int(RestBeaconNodeStatus.Compatible)]
|
||||
# Bad nodes are nodes which can't be used at all.
|
||||
badNodes = counts.data[int(RestBeaconNodeStatus.Offline)] +
|
||||
counts.data[int(RestBeaconNodeStatus.Online)] +
|
||||
counts.data[int(RestBeaconNodeStatus.Incompatible)]
|
||||
info "Slot start",
|
||||
slot = shortLog(wallSlot),
|
||||
epoch = shortLog(wallSlot.epoch()),
|
||||
|
|
|
@ -425,7 +425,11 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
Http400, InvalidRequestBodyError, $error)
|
||||
let
|
||||
ids = request.ids.valueOr: @[]
|
||||
filter = request.status.valueOr: AllValidatorFilterKinds
|
||||
filter =
|
||||
if request.status.isNone() or len(request.status.get) == 0:
|
||||
AllValidatorFilterKinds
|
||||
else:
|
||||
request.status.get
|
||||
(ids, filter)
|
||||
sid = state_id.valueOr:
|
||||
return RestApiResponse.jsonError(Http400, InvalidStateIdValueError,
|
||||
|
@ -1102,6 +1106,89 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
|
||||
RestApiResponse.jsonMsgResponse(BlockValidationSuccess)
|
||||
|
||||
# https://ethereum.github.io/beacon-APIs/#/Beacon/publishBlindedBlockV2
|
||||
router.api(MethodPost, "/eth/v2/beacon/blinded_blocks") do (
|
||||
broadcast_validation: Option[BroadcastValidationType],
|
||||
contentBody: Option[ContentBody]) -> RestApiResponse:
|
||||
if contentBody.isNone():
|
||||
return RestApiResponse.jsonError(Http400, EmptyRequestBodyError)
|
||||
|
||||
let
|
||||
currentEpochFork =
|
||||
node.dag.cfg.consensusForkAtEpoch(node.currentSlot().epoch())
|
||||
version = request.headers.getString("eth-consensus-version")
|
||||
validation =
|
||||
if broadcast_validation.isNone():
|
||||
BroadcastValidationType.Gossip
|
||||
else:
|
||||
let res = broadcast_validation.get().valueOr:
|
||||
return RestApiResponse.jsonError(Http400,
|
||||
InvalidBroadcastValidationType)
|
||||
# TODO (cheatfate): support 'consensus' and
|
||||
# 'consensus_and_equivocation' broadcast_validation types.
|
||||
if res != BroadcastValidationType.Gossip:
|
||||
return RestApiResponse.jsonError(Http500,
|
||||
"Only `gossip` broadcast_validation option supported")
|
||||
res
|
||||
body = contentBody.get()
|
||||
|
||||
if (body.contentType == OctetStreamMediaType) and
|
||||
(currentEpochFork.toString != version):
|
||||
return RestApiResponse.jsonError(Http400, BlockIncorrectFork)
|
||||
|
||||
withConsensusFork(currentEpochFork):
|
||||
# TODO (cheatfate): handle broadcast_validation flag
|
||||
when consensusFork >= ConsensusFork.Deneb:
|
||||
let
|
||||
restBlock = decodeBodyJsonOrSsz(
|
||||
consensusFork.SignedBlindedBeaconBlock, body).valueOr:
|
||||
return RestApiResponse.jsonError(error)
|
||||
payloadBuilderClient = node.getPayloadBuilderClient(
|
||||
restBlock.message.proposer_index).valueOr:
|
||||
return RestApiResponse.jsonError(
|
||||
Http400, "Unable to initialize payload builder client: " & $error)
|
||||
res = await node.unblindAndRouteBlockMEV(
|
||||
payloadBuilderClient, restBlock)
|
||||
|
||||
if res.isErr():
|
||||
return RestApiResponse.jsonError(
|
||||
Http500, InternalServerError, $res.error)
|
||||
if res.get().isNone():
|
||||
return RestApiResponse.jsonError(Http202, BlockValidationError)
|
||||
|
||||
return RestApiResponse.jsonMsgResponse(BlockValidationSuccess)
|
||||
elif consensusFork >= ConsensusFork.Bellatrix:
|
||||
return RestApiResponse.jsonError(
|
||||
Http400, $consensusFork & " builder API unsupported")
|
||||
else:
|
||||
# Pre-Bellatrix, this endpoint will accept a `SignedBeaconBlock`.
|
||||
#
|
||||
# This is mostly the same as /eth/v1/beacon/blocks for phase 0 and
|
||||
# altair.
|
||||
var
|
||||
restBlock = decodeBody(
|
||||
RestPublishedSignedBeaconBlock, body, version).valueOr:
|
||||
return RestApiResponse.jsonError(error)
|
||||
forked = ForkedSignedBeaconBlock(restBlock)
|
||||
|
||||
if forked.kind != node.dag.cfg.consensusForkAtEpoch(
|
||||
getForkedBlockField(forked, slot).epoch):
|
||||
return RestApiResponse.jsonError(Http400, InvalidBlockObjectError)
|
||||
|
||||
let res = withBlck(forked):
|
||||
forkyBlck.root = hash_tree_root(forkyBlck.message)
|
||||
await node.router.routeSignedBeaconBlock(
|
||||
forkyBlck, Opt.none(seq[BlobSidecar]),
|
||||
checkValidator = true)
|
||||
|
||||
if res.isErr():
|
||||
return RestApiResponse.jsonError(
|
||||
Http503, BeaconNodeInSyncError, $res.error)
|
||||
elif res.get().isNone():
|
||||
return RestApiResponse.jsonError(Http202, BlockValidationError)
|
||||
|
||||
RestApiResponse.jsonMsgResponse(BlockValidationSuccess)
|
||||
|
||||
# https://ethereum.github.io/beacon-APIs/#/Beacon/getBlock
|
||||
router.api2(MethodGet, "/eth/v1/beacon/blocks/{block_id}") do (
|
||||
block_id: BlockIdent) -> RestApiResponse:
|
||||
|
@ -1431,29 +1518,32 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
return RestApiResponse.jsonError(Http406, ContentNotAcceptableError)
|
||||
res.get()
|
||||
|
||||
# https://github.com/ethereum/beacon-APIs/blob/v2.4.2/types/deneb/blob_sidecar.yaml#L2-L28
|
||||
let data = newClone(default(List[BlobSidecar, Limit MAX_BLOBS_PER_BLOCK]))
|
||||
consensusFork = node.dag.cfg.consensusForkAtEpoch(bid.slot.epoch)
|
||||
|
||||
if indices.isErr:
|
||||
return RestApiResponse.jsonError(Http400,
|
||||
InvalidSidecarIndexValueError)
|
||||
withBlobFork(blobForkAtConsensusFork(consensusFork).get(BlobFork.Deneb)):
|
||||
# https://github.com/ethereum/beacon-APIs/blob/v2.4.2/types/deneb/blob_sidecar.yaml#L2-L28
|
||||
let data = newClone(
|
||||
default(List[blobFork.BlobSidecar, Limit MAX_BLOBS_PER_BLOCK]))
|
||||
|
||||
let indexFilter = indices.get.toHashSet
|
||||
if indices.isErr:
|
||||
return RestApiResponse.jsonError(Http400,
|
||||
InvalidSidecarIndexValueError)
|
||||
|
||||
for blobIndex in 0'u64 ..< MAX_BLOBS_PER_BLOCK:
|
||||
if indexFilter.len > 0 and blobIndex notin indexFilter:
|
||||
continue
|
||||
let indexFilter = indices.get.toHashSet
|
||||
|
||||
var blobSidecar = new BlobSidecar
|
||||
for blobIndex in 0'u64 ..< MAX_BLOBS_PER_BLOCK:
|
||||
if indexFilter.len > 0 and blobIndex notin indexFilter:
|
||||
continue
|
||||
|
||||
if node.dag.db.getBlobSidecar(bid.root, blobIndex, blobSidecar[]):
|
||||
discard data[].add blobSidecar[]
|
||||
var blobSidecar = new blobFork.BlobSidecar
|
||||
|
||||
if contentType == sszMediaType:
|
||||
RestApiResponse.sszResponse(
|
||||
data[], headers = [("eth-consensus-version",
|
||||
node.dag.cfg.consensusForkAtEpoch(bid.slot.epoch).toString())])
|
||||
elif contentType == jsonMediaType:
|
||||
RestApiResponse.jsonResponse(data)
|
||||
else:
|
||||
RestApiResponse.jsonError(Http500, InvalidAcceptError)
|
||||
if node.dag.db.getBlobSidecar(bid.root, blobIndex, blobSidecar[]):
|
||||
discard data[].add blobSidecar[]
|
||||
|
||||
if contentType == sszMediaType:
|
||||
RestApiResponse.sszResponse(data[], headers = [
|
||||
("eth-consensus-version", consensusFork.toString())])
|
||||
elif contentType == jsonMediaType:
|
||||
RestApiResponse.jsonResponse(data)
|
||||
else:
|
||||
RestApiResponse.jsonError(Http500, InvalidAcceptError)
|
||||
|
|
|
@ -106,65 +106,51 @@ proc getLastSeenAddress(node: BeaconNode, id: PeerId): string =
|
|||
$addrs[len(addrs) - 1]
|
||||
else:
|
||||
""
|
||||
proc getDiscoveryAddresses(node: BeaconNode): Option[seq[string]] =
|
||||
let restr = node.network.enrRecord().toTypedRecord()
|
||||
if restr.isErr():
|
||||
return none[seq[string]]()
|
||||
let respa = restr.get().toPeerAddr(udpProtocol)
|
||||
if respa.isErr():
|
||||
return none[seq[string]]()
|
||||
let pa = respa.get()
|
||||
let mpa = MultiAddress.init(multiCodec("p2p"), pa.peerId)
|
||||
if mpa.isErr():
|
||||
return none[seq[string]]()
|
||||
var addresses = newSeqOfCap[string](len(pa.addrs))
|
||||
for item in pa.addrs:
|
||||
let resa = concat(item, mpa.get())
|
||||
if resa.isOk():
|
||||
addresses.add($(resa.get()))
|
||||
return some(addresses)
|
||||
proc getDiscoveryAddresses(node: BeaconNode): seq[string] =
|
||||
let
|
||||
typedRec = TypedRecord.fromRecord(node.network.enrRecord())
|
||||
peerAddr = typedRec.toPeerAddr(udpProtocol).valueOr:
|
||||
return default(seq[string])
|
||||
maddress = MultiAddress.init(multiCodec("p2p"), peerAddr.peerId).valueOr:
|
||||
return default(seq[string])
|
||||
|
||||
proc getP2PAddresses(node: BeaconNode): Option[seq[string]] =
|
||||
let pinfo = node.network.switch.peerInfo
|
||||
let mpa = MultiAddress.init(multiCodec("p2p"), pinfo.peerId)
|
||||
if mpa.isErr():
|
||||
return none[seq[string]]()
|
||||
var addresses = newSeqOfCap[string](len(pinfo.addrs))
|
||||
var addresses: seq[string]
|
||||
for item in peerAddr.addrs:
|
||||
let res = concat(item, maddress)
|
||||
if res.isOk():
|
||||
addresses.add($(res.get()))
|
||||
addresses
|
||||
|
||||
proc getP2PAddresses(node: BeaconNode): seq[string] =
|
||||
let
|
||||
pinfo = node.network.switch.peerInfo
|
||||
maddress = MultiAddress.init(multiCodec("p2p"), pinfo.peerId).valueOr:
|
||||
return default(seq[string])
|
||||
|
||||
var addresses: seq[string]
|
||||
for item in node.network.announcedAddresses:
|
||||
let res = concat(item, maddress)
|
||||
if res.isOk():
|
||||
addresses.add($(res.get()))
|
||||
for item in pinfo.addrs:
|
||||
let resa = concat(item, mpa.get())
|
||||
if resa.isOk():
|
||||
addresses.add($(resa.get()))
|
||||
return some(addresses)
|
||||
let res = concat(item, maddress)
|
||||
if res.isOk():
|
||||
addresses.add($(res.get()))
|
||||
addresses
|
||||
|
||||
proc installNodeApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
||||
let
|
||||
cachedVersion =
|
||||
RestApiResponse.prepareJsonResponse((version: "Nimbus/" & fullVersionStr))
|
||||
RestApiResponse.prepareJsonResponse((version: nimbusAgentStr))
|
||||
|
||||
# https://ethereum.github.io/beacon-APIs/#/Node/getNetworkIdentity
|
||||
router.api2(MethodGet, "/eth/v1/node/identity") do () -> RestApiResponse:
|
||||
let discoveryAddresses =
|
||||
block:
|
||||
let res = node.getDiscoveryAddresses()
|
||||
if res.isSome():
|
||||
res.get()
|
||||
else:
|
||||
newSeq[string](0)
|
||||
|
||||
let p2pAddresses =
|
||||
block:
|
||||
let res = node.getP2PAddresses()
|
||||
if res.isSome():
|
||||
res.get()
|
||||
else:
|
||||
newSeq[string]()
|
||||
|
||||
RestApiResponse.jsonResponse(
|
||||
(
|
||||
peer_id: $node.network.peerId(),
|
||||
enr: node.network.enrRecord().toURI(),
|
||||
p2p_addresses: p2pAddresses,
|
||||
discovery_addresses: discoveryAddresses,
|
||||
p2p_addresses: node.getP2PAddresses(),
|
||||
discovery_addresses: node.getDiscoveryAddresses(),
|
||||
metadata: (
|
||||
seq_number: node.network.metadata.seq_number,
|
||||
syncnets: to0xHex(node.network.metadata.syncnets.bytes),
|
||||
|
@ -297,4 +283,4 @@ proc installNodeApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
Http206
|
||||
else:
|
||||
Http200
|
||||
RestApiResponse.response("", status, contentType = "")
|
||||
RestApiResponse.response(status)
|
||||
|
|
|
@ -1102,7 +1102,7 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
numUpdatedFeeRecipients = numUpdated,
|
||||
numRefreshedFeeRecipients = numRefreshed
|
||||
|
||||
RestApiResponse.response("", Http200, "text/plain")
|
||||
RestApiResponse.response(Http200)
|
||||
|
||||
# https://ethereum.github.io/beacon-APIs/#/Validator/registerValidator
|
||||
# https://github.com/ethereum/beacon-APIs/blob/v2.3.0/apis/validator/register_validator.yaml
|
||||
|
@ -1129,7 +1129,7 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
node.externalBuilderRegistrations[signedValidatorRegistration.message.pubkey] =
|
||||
signedValidatorRegistration
|
||||
|
||||
RestApiResponse.response("", Http200, "text/plain")
|
||||
RestApiResponse.response(Http200)
|
||||
|
||||
# https://ethereum.github.io/beacon-APIs/#/Validator/getLiveness
|
||||
router.api2(MethodPost, "/eth/v1/validator/liveness/{epoch}") do (
|
||||
|
|
|
@ -515,10 +515,17 @@ template get_total_balance(
|
|||
max(EFFECTIVE_BALANCE_INCREMENT.Gwei, res)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#is_eligible_for_activation_queue
|
||||
func is_eligible_for_activation_queue*(validator: Validator): bool =
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#updated-is_eligible_for_activation_queue
|
||||
func is_eligible_for_activation_queue*(
|
||||
fork: static ConsensusFork, validator: Validator): bool =
|
||||
## Check if ``validator`` is eligible to be placed into the activation queue.
|
||||
validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and
|
||||
validator.effective_balance == MAX_EFFECTIVE_BALANCE.Gwei
|
||||
when fork <= ConsensusFork.Deneb:
|
||||
validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and
|
||||
validator.effective_balance == MAX_EFFECTIVE_BALANCE.Gwei
|
||||
else:
|
||||
# [Modified in Electra:EIP7251]
|
||||
validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and
|
||||
validator.effective_balance >= MIN_ACTIVATION_BALANCE.Gwei
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#is_eligible_for_activation
|
||||
func is_eligible_for_activation*(
|
||||
|
|
|
@ -604,7 +604,7 @@ func shortLog*(v: LightClientUpdate): auto =
|
|||
(
|
||||
attested: shortLog(v.attested_header),
|
||||
has_next_sync_committee:
|
||||
v.next_sync_committee != default(typeof(v.next_sync_committee)),
|
||||
v.next_sync_committee != static(default(typeof(v.next_sync_committee))),
|
||||
finalized: shortLog(v.finalized_header),
|
||||
num_active_participants: v.sync_aggregate.num_active_participants,
|
||||
signature_slot: v.signature_slot
|
||||
|
|
|
@ -634,8 +634,8 @@ func is_valid_light_client_header*(
|
|||
|
||||
if epoch < cfg.CAPELLA_FORK_EPOCH:
|
||||
return
|
||||
header.execution == default(ExecutionPayloadHeader) and
|
||||
header.execution_branch == default(ExecutionBranch)
|
||||
header.execution == static(default(ExecutionPayloadHeader)) and
|
||||
header.execution_branch == static(default(ExecutionBranch))
|
||||
|
||||
is_valid_merkle_branch(
|
||||
get_lc_execution_root(header, cfg),
|
||||
|
@ -705,7 +705,7 @@ func shortLog*(v: LightClientUpdate): auto =
|
|||
(
|
||||
attested: shortLog(v.attested_header),
|
||||
has_next_sync_committee:
|
||||
v.next_sync_committee != default(typeof(v.next_sync_committee)),
|
||||
v.next_sync_committee != static(default(typeof(v.next_sync_committee))),
|
||||
finalized: shortLog(v.finalized_header),
|
||||
num_active_participants: v.sync_aggregate.num_active_participants,
|
||||
signature_slot: v.signature_slot
|
||||
|
|
|
@ -526,7 +526,7 @@ func initHashedBeaconState*(s: BeaconState): HashedBeaconState =
|
|||
HashedBeaconState(data: s)
|
||||
|
||||
func shortLog*(v: KzgCommitment | KzgProof): auto =
|
||||
to0xHex(v)
|
||||
to0xHex(v.bytes)
|
||||
|
||||
func shortLog*(v: Blob): auto =
|
||||
to0xHex(v.toOpenArray(0, 31))
|
||||
|
@ -668,8 +668,8 @@ func is_valid_light_client_header*(
|
|||
|
||||
if epoch < cfg.CAPELLA_FORK_EPOCH:
|
||||
return
|
||||
header.execution == default(ExecutionPayloadHeader) and
|
||||
header.execution_branch == default(ExecutionBranch)
|
||||
header.execution == static(default(ExecutionPayloadHeader)) and
|
||||
header.execution_branch == static(default(ExecutionBranch))
|
||||
|
||||
is_valid_merkle_branch(
|
||||
get_lc_execution_root(header, cfg),
|
||||
|
@ -758,7 +758,7 @@ func shortLog*(v: LightClientUpdate): auto =
|
|||
(
|
||||
attested: shortLog(v.attested_header),
|
||||
has_next_sync_committee:
|
||||
v.next_sync_committee != default(typeof(v.next_sync_committee)),
|
||||
v.next_sync_committee != static(default(typeof(v.next_sync_committee))),
|
||||
finalized: shortLog(v.finalized_header),
|
||||
num_active_participants: v.sync_aggregate.num_active_participants,
|
||||
signature_slot: v.signature_slot
|
||||
|
|
|
@ -119,7 +119,7 @@ type
|
|||
## [New in Electra:EIP6110]
|
||||
withdrawal_requests*:
|
||||
List[WithdrawalRequest, MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD]
|
||||
## [New in Electra:EIP6110]
|
||||
## [New in Electra:EIP7002:EIP7251]
|
||||
consolidation_requests*:
|
||||
List[ConsolidationRequest, Limit MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD]
|
||||
## [New in Electra:EIP7251]
|
||||
|
@ -725,8 +725,8 @@ func is_valid_light_client_header*(
|
|||
|
||||
if epoch < cfg.CAPELLA_FORK_EPOCH:
|
||||
return
|
||||
header.execution == default(ExecutionPayloadHeader) and
|
||||
header.execution_branch == default(ExecutionBranch)
|
||||
header.execution == static(default(ExecutionPayloadHeader)) and
|
||||
header.execution_branch == static(default(ExecutionBranch))
|
||||
|
||||
is_valid_merkle_branch(
|
||||
get_lc_execution_root(header, cfg),
|
||||
|
@ -839,7 +839,7 @@ func shortLog*(v: LightClientUpdate): auto =
|
|||
(
|
||||
attested: shortLog(v.attested_header),
|
||||
has_next_sync_committee:
|
||||
v.next_sync_committee != default(typeof(v.next_sync_committee)),
|
||||
v.next_sync_committee != static(default(typeof(v.next_sync_committee))),
|
||||
finalized: shortLog(v.finalized_header),
|
||||
num_active_participants: v.sync_aggregate.num_active_participants,
|
||||
signature_slot: v.signature_slot
|
||||
|
|
|
@ -47,7 +47,6 @@ RestJson.useDefaultSerializationFor(
|
|||
AttestationData,
|
||||
BLSToExecutionChange,
|
||||
BeaconBlockHeader,
|
||||
BlobSidecar,
|
||||
BlobSidecarInfoObject,
|
||||
BlobsBundle,
|
||||
Checkpoint,
|
||||
|
@ -228,6 +227,7 @@ RestJson.useDefaultSerializationFor(
|
|||
deneb.BeaconBlock,
|
||||
deneb.BeaconBlockBody,
|
||||
deneb.BeaconState,
|
||||
deneb.BlobSidecar,
|
||||
deneb.BlockContents,
|
||||
deneb.ExecutionPayload,
|
||||
deneb.ExecutionPayloadHeader,
|
||||
|
@ -1362,7 +1362,7 @@ proc readValue*(reader: var JsonReader[RestJson],
|
|||
value: var (KzgCommitment|KzgProof)) {.
|
||||
raises: [IOError, SerializationError].} =
|
||||
try:
|
||||
hexToByteArray(reader.readValue(string), distinctBase(value))
|
||||
hexToByteArray(reader.readValue(string), distinctBase(value.bytes))
|
||||
except ValueError:
|
||||
raiseUnexpectedValue(reader,
|
||||
"KzgCommitment value should be a valid hex string")
|
||||
|
@ -1370,7 +1370,7 @@ proc readValue*(reader: var JsonReader[RestJson],
|
|||
proc writeValue*(
|
||||
writer: var JsonWriter[RestJson], value: KzgCommitment | KzgProof
|
||||
) {.raises: [IOError].} =
|
||||
writeValue(writer, hexOriginal(distinctBase(value)))
|
||||
writeValue(writer, hexOriginal(distinctBase(value.bytes)))
|
||||
|
||||
## GraffitiBytes
|
||||
proc writeValue*(
|
||||
|
@ -3536,7 +3536,9 @@ proc decodeBody*(
|
|||
of ConsensusFork.Phase0:
|
||||
let blck =
|
||||
try:
|
||||
SSZ.decode(body.data, phase0.SignedBeaconBlock)
|
||||
var res = SSZ.decode(body.data, phase0.SignedBeaconBlock)
|
||||
res.root = hash_tree_root(res.message)
|
||||
res
|
||||
except SerializationError as exc:
|
||||
return err(RestErrorMessage.init(Http400, UnableDecodeError,
|
||||
[version, exc.formatMsg("<data>")]))
|
||||
|
@ -3548,7 +3550,9 @@ proc decodeBody*(
|
|||
of ConsensusFork.Altair:
|
||||
let blck =
|
||||
try:
|
||||
SSZ.decode(body.data, altair.SignedBeaconBlock)
|
||||
var res = SSZ.decode(body.data, altair.SignedBeaconBlock)
|
||||
res.root = hash_tree_root(res.message)
|
||||
res
|
||||
except SerializationError as exc:
|
||||
return err(RestErrorMessage.init(Http400, UnableDecodeError,
|
||||
[version, exc.formatMsg("<data>")]))
|
||||
|
@ -3560,7 +3564,9 @@ proc decodeBody*(
|
|||
of ConsensusFork.Bellatrix:
|
||||
let blck =
|
||||
try:
|
||||
SSZ.decode(body.data, bellatrix.SignedBeaconBlock)
|
||||
var res = SSZ.decode(body.data, bellatrix.SignedBeaconBlock)
|
||||
res.root = hash_tree_root(res.message)
|
||||
res
|
||||
except SerializationError as exc:
|
||||
return err(RestErrorMessage.init(Http400, UnableDecodeError,
|
||||
[version, exc.formatMsg("<data>")]))
|
||||
|
@ -3572,7 +3578,9 @@ proc decodeBody*(
|
|||
of ConsensusFork.Capella:
|
||||
let blck =
|
||||
try:
|
||||
SSZ.decode(body.data, capella.SignedBeaconBlock)
|
||||
var res = SSZ.decode(body.data, capella.SignedBeaconBlock)
|
||||
res.root = hash_tree_root(res.message)
|
||||
res
|
||||
except SerializationError as exc:
|
||||
return err(RestErrorMessage.init(Http400, UnableDecodeError,
|
||||
[version, exc.formatMsg("<data>")]))
|
||||
|
@ -3584,7 +3592,9 @@ proc decodeBody*(
|
|||
of ConsensusFork.Deneb:
|
||||
let blckContents =
|
||||
try:
|
||||
SSZ.decode(body.data, DenebSignedBlockContents)
|
||||
var res = SSZ.decode(body.data, DenebSignedBlockContents)
|
||||
res.signed_block.root = hash_tree_root(res.signed_block.message)
|
||||
res
|
||||
except SerializationError as exc:
|
||||
return err(RestErrorMessage.init(Http400, UnableDecodeError,
|
||||
[version, exc.formatMsg("<data>")]))
|
||||
|
@ -3596,7 +3606,9 @@ proc decodeBody*(
|
|||
of ConsensusFork.Electra:
|
||||
let blckContents =
|
||||
try:
|
||||
SSZ.decode(body.data, ElectraSignedBlockContents)
|
||||
var res = SSZ.decode(body.data, ElectraSignedBlockContents)
|
||||
res.signed_block.root = hash_tree_root(res.signed_block.message)
|
||||
res
|
||||
except SerializationError as exc:
|
||||
return err(RestErrorMessage.init(Http400, UnableDecodeError,
|
||||
[version, exc.formatMsg("<data>")]))
|
||||
|
|
|
@ -274,6 +274,22 @@ type
|
|||
ForkyMsgTrustedSignedBeaconBlock |
|
||||
ForkyTrustedSignedBeaconBlock
|
||||
|
||||
BlobFork* {.pure.} = enum
|
||||
Deneb
|
||||
|
||||
ForkyBlobSidecar* =
|
||||
deneb.BlobSidecar
|
||||
|
||||
ForkyBlobSidecars* =
|
||||
deneb.BlobSidecars
|
||||
|
||||
ForkedBlobSidecar* = object
|
||||
case kind*: BlobFork
|
||||
of BlobFork.Deneb:
|
||||
denebData*: ref deneb.BlobSidecar
|
||||
|
||||
ForkedBlobSidecars* = seq[ForkedBlobSidecar]
|
||||
|
||||
EpochInfoFork* {.pure.} = enum
|
||||
Phase0
|
||||
Altair
|
||||
|
@ -815,6 +831,75 @@ static:
|
|||
for fork in ConsensusFork:
|
||||
doAssert ConsensusFork.init(fork.toString()).expect("init defined") == fork
|
||||
|
||||
template kind*(x: typedesc[deneb.BlobSidecar]): BlobFork =
|
||||
BlobFork.Deneb
|
||||
|
||||
template kzg_commitment_inclusion_proof_gindex*(
|
||||
kind: static BlobFork, index: BlobIndex): GeneralizedIndex =
|
||||
when kind == BlobFork.Deneb:
|
||||
deneb.kzg_commitment_inclusion_proof_gindex(index)
|
||||
else:
|
||||
{.error: "kzg_commitment_inclusion_proof_gindex does not support " & $kind.}
|
||||
|
||||
template BlobSidecar*(kind: static BlobFork): auto =
|
||||
when kind == BlobFork.Deneb:
|
||||
typedesc[deneb.BlobSidecar]
|
||||
else:
|
||||
{.error: "BlobSidecar does not support " & $kind.}
|
||||
|
||||
template BlobSidecars*(kind: static BlobFork): auto =
|
||||
when kind == BlobFork.Deneb:
|
||||
typedesc[deneb.BlobSidecars]
|
||||
else:
|
||||
{.error: "BlobSidecars does not support " & $kind.}
|
||||
|
||||
template withAll*(x: typedesc[BlobFork], body: untyped): untyped =
|
||||
static: doAssert BlobFork.high == BlobFork.Deneb
|
||||
block:
|
||||
const blobFork {.inject, used.} = BlobFork.Deneb
|
||||
body
|
||||
|
||||
template withBlobFork*(x: BlobFork, body: untyped): untyped =
|
||||
case x
|
||||
of BlobFork.Deneb:
|
||||
const blobFork {.inject, used.} = BlobFork.Deneb
|
||||
body
|
||||
|
||||
template withForkyBlob*(x: ForkedBlobSidecar, body: untyped): untyped =
|
||||
case x.kind
|
||||
of BlobFork.Deneb:
|
||||
const blobFork {.inject, used.} = BlobFork.Deneb
|
||||
template forkyBlob: untyped {.inject, used.} = x.denebData
|
||||
body
|
||||
|
||||
func init*(
|
||||
x: typedesc[ForkedBlobSidecar],
|
||||
forkyData: ref ForkyBlobSidecar): ForkedBlobSidecar =
|
||||
const kind = typeof(forkyData[]).kind
|
||||
when kind == BlobFork.Deneb:
|
||||
ForkedBlobSidecar(kind: kind, denebData: forkyData)
|
||||
else:
|
||||
{.error: "ForkedBlobSidecar.init does not support " & $kind.}
|
||||
|
||||
template forky*(x: ForkedBlobSidecar, kind: static BlobFork): untyped =
|
||||
when kind == BlobFork.Deneb:
|
||||
x.denebData
|
||||
else:
|
||||
{.error: "ForkedBlobSidecar.forky does not support " & $kind.}
|
||||
|
||||
func shortLog*[T: ForkedBlobSidecar](x: T): auto =
|
||||
type ResultType = object
|
||||
case kind: BlobFork
|
||||
of BlobFork.Deneb:
|
||||
denebData: typeof(x.denebData.shortLog())
|
||||
|
||||
let xKind = x.kind # https://github.com/nim-lang/Nim/issues/23762
|
||||
case xKind
|
||||
of BlobFork.Deneb:
|
||||
ResultType(kind: xKind, denebData: x.denebData.shortLog())
|
||||
|
||||
chronicles.formatIt ForkedBlobSidecar: it.shortLog
|
||||
|
||||
template init*(T: type ForkedEpochInfo, info: phase0.EpochInfo): T =
|
||||
T(kind: EpochInfoFork.Phase0, phase0Data: info)
|
||||
template init*(T: type ForkedEpochInfo, info: altair.EpochInfo): T =
|
||||
|
@ -1323,6 +1408,13 @@ func forkVersion*(cfg: RuntimeConfig, consensusFork: ConsensusFork): Version =
|
|||
of ConsensusFork.Deneb: cfg.DENEB_FORK_VERSION
|
||||
of ConsensusFork.Electra: cfg.ELECTRA_FORK_VERSION
|
||||
|
||||
func blobForkAtConsensusFork*(consensusFork: ConsensusFork): Opt[BlobFork] =
|
||||
static: doAssert BlobFork.high == BlobFork.Deneb
|
||||
if consensusFork >= ConsensusFork.Deneb:
|
||||
Opt.some BlobFork.Deneb
|
||||
else:
|
||||
Opt.none BlobFork
|
||||
|
||||
func lcDataForkAtConsensusFork*(
|
||||
consensusFork: ConsensusFork): LightClientDataFork =
|
||||
static: doAssert LightClientDataFork.high == LightClientDataFork.Electra
|
||||
|
@ -1405,6 +1497,35 @@ func readSszForkedSignedBeaconBlock*(
|
|||
withBlck(result):
|
||||
readSszBytes(data, forkyBlck)
|
||||
|
||||
func readSszForkedBlobSidecar*(
|
||||
cfg: RuntimeConfig, data: openArray[byte]
|
||||
): ForkedBlobSidecar {.raises: [SerializationError].} =
|
||||
## Helper to read `BlobSidecar` from bytes when it's not certain what
|
||||
## `BlobFork` it is
|
||||
type ForkedBlobSidecarHeader = object
|
||||
index: BlobIndex
|
||||
blob: Blob
|
||||
kzg_commitment: KzgCommitment
|
||||
kzg_proof: KzgProof
|
||||
signed_block_header*: SignedBeaconBlockHeader
|
||||
|
||||
const numHeaderBytes = fixedPortionSize(ForkedBlobSidecarHeader)
|
||||
if data.len() < numHeaderBytes:
|
||||
raise (ref MalformedSszError)(msg: "Incomplete BlobSidecar header")
|
||||
let
|
||||
header = SSZ.decode(
|
||||
data.toOpenArray(0, numHeaderBytes - 1), ForkedBlobSidecarHeader)
|
||||
consensusFork = cfg.consensusForkAtEpoch(
|
||||
header.signed_block_header.message.slot.epoch)
|
||||
blobFork = blobForkAtConsensusFork(consensusFork).valueOr:
|
||||
raise (ref MalformedSszError)(msg: "BlobSidecar slot is pre-Deneb")
|
||||
|
||||
# TODO https://github.com/nim-lang/Nim/issues/19357
|
||||
result = ForkedBlobSidecar(kind: blobFork)
|
||||
withForkyBlob(result):
|
||||
forkyBlob = new blobFork.BlobSidecar()
|
||||
readSszBytes(data, forkyBlob[])
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#compute_fork_data_root
|
||||
func compute_fork_data_root*(current_version: Version,
|
||||
genesis_validators_root: Eth2Digest): Eth2Digest =
|
||||
|
|
|
@ -184,7 +184,8 @@ func lcDataForkAtEpoch*(
|
|||
LightClientDataFork.None
|
||||
|
||||
template kind*(
|
||||
x: typedesc[ # `SomeLightClientObject` doesn't work here (Nim 1.6)
|
||||
# `SomeLightClientObject`: https://github.com/nim-lang/Nim/issues/18095
|
||||
x: typedesc[
|
||||
altair.LightClientHeader |
|
||||
altair.LightClientBootstrap |
|
||||
altair.LightClientUpdate |
|
||||
|
@ -194,7 +195,8 @@ template kind*(
|
|||
LightClientDataFork.Altair
|
||||
|
||||
template kind*(
|
||||
x: typedesc[ # `SomeLightClientObject` doesn't work here (Nim 1.6)
|
||||
# `SomeLightClientObject`: https://github.com/nim-lang/Nim/issues/18095
|
||||
x: typedesc[
|
||||
capella.LightClientHeader |
|
||||
capella.LightClientBootstrap |
|
||||
capella.LightClientUpdate |
|
||||
|
@ -204,7 +206,8 @@ template kind*(
|
|||
LightClientDataFork.Capella
|
||||
|
||||
template kind*(
|
||||
x: typedesc[ # `SomeLightClientObject` doesn't work here (Nim 1.6)
|
||||
# `SomeLightClientObject`: https://github.com/nim-lang/Nim/issues/18095
|
||||
x: typedesc[
|
||||
deneb.LightClientHeader |
|
||||
deneb.LightClientBootstrap |
|
||||
deneb.LightClientUpdate |
|
||||
|
@ -214,7 +217,8 @@ template kind*(
|
|||
LightClientDataFork.Deneb
|
||||
|
||||
template kind*(
|
||||
x: typedesc[ # `SomeLightClientObject` doesn't work here (Nim 1.6)
|
||||
# `SomeLightClientObject`: https://github.com/nim-lang/Nim/issues/18095
|
||||
x: typedesc[
|
||||
electra.LightClientHeader |
|
||||
electra.LightClientBootstrap |
|
||||
electra.LightClientUpdate |
|
||||
|
@ -1015,7 +1019,8 @@ func migratingToDataFork*[
|
|||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/full-node.md#block_to_light_client_header
|
||||
func toAltairLightClientHeader(
|
||||
blck: # `SomeSignedBeaconBlock` doesn't work here (Nim 1.6)
|
||||
# `SomeSignedBeaconBlock`: https://github.com/nim-lang/Nim/issues/18095
|
||||
blck:
|
||||
phase0.SignedBeaconBlock | phase0.TrustedSignedBeaconBlock |
|
||||
altair.SignedBeaconBlock | altair.TrustedSignedBeaconBlock |
|
||||
bellatrix.SignedBeaconBlock | bellatrix.TrustedSignedBeaconBlock
|
||||
|
@ -1025,7 +1030,8 @@ func toAltairLightClientHeader(
|
|||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/light-client/full-node.md#modified-block_to_light_client_header
|
||||
func toCapellaLightClientHeader(
|
||||
blck: # `SomeSignedBeaconBlock` doesn't work here (Nim 1.6)
|
||||
# `SomeSignedBeaconBlock`: https://github.com/nim-lang/Nim/issues/18095
|
||||
blck:
|
||||
phase0.SignedBeaconBlock | phase0.TrustedSignedBeaconBlock |
|
||||
altair.SignedBeaconBlock | altair.TrustedSignedBeaconBlock |
|
||||
bellatrix.SignedBeaconBlock | bellatrix.TrustedSignedBeaconBlock
|
||||
|
@ -1040,7 +1046,8 @@ func toCapellaLightClientHeader(
|
|||
beacon: blck.message.toBeaconBlockHeader())
|
||||
|
||||
func toCapellaLightClientHeader(
|
||||
blck: # `SomeSignedBeaconBlock` doesn't work here (Nim 1.6)
|
||||
# `SomeSignedBeaconBlock`: https://github.com/nim-lang/Nim/issues/18095
|
||||
blck:
|
||||
capella.SignedBeaconBlock | capella.TrustedSignedBeaconBlock
|
||||
): capella.LightClientHeader =
|
||||
template payload: untyped = blck.message.body.execution_payload
|
||||
|
@ -1067,7 +1074,8 @@ func toCapellaLightClientHeader(
|
|||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.0/specs/deneb/light-client/full-node.md#modified-block_to_light_client_header
|
||||
func toDenebLightClientHeader(
|
||||
blck: # `SomeSignedBeaconBlock` doesn't work here (Nim 1.6)
|
||||
# `SomeSignedBeaconBlock`: https://github.com/nim-lang/Nim/issues/18095
|
||||
blck:
|
||||
phase0.SignedBeaconBlock | phase0.TrustedSignedBeaconBlock |
|
||||
altair.SignedBeaconBlock | altair.TrustedSignedBeaconBlock |
|
||||
bellatrix.SignedBeaconBlock | bellatrix.TrustedSignedBeaconBlock
|
||||
|
@ -1082,7 +1090,8 @@ func toDenebLightClientHeader(
|
|||
beacon: blck.message.toBeaconBlockHeader())
|
||||
|
||||
func toDenebLightClientHeader(
|
||||
blck: # `SomeSignedBeaconBlock` doesn't work here (Nim 1.6)
|
||||
# `SomeSignedBeaconBlock`: https://github.com/nim-lang/Nim/issues/18095
|
||||
blck:
|
||||
capella.SignedBeaconBlock | capella.TrustedSignedBeaconBlock
|
||||
): deneb.LightClientHeader =
|
||||
template payload: untyped = blck.message.body.execution_payload
|
||||
|
@ -1108,7 +1117,8 @@ func toDenebLightClientHeader(
|
|||
blck.message.body.build_proof(EXECUTION_PAYLOAD_GINDEX).get)
|
||||
|
||||
func toDenebLightClientHeader(
|
||||
blck: # `SomeSignedBeaconBlock` doesn't work here (Nim 1.6)
|
||||
# `SomeSignedBeaconBlock`: https://github.com/nim-lang/Nim/issues/18095
|
||||
blck:
|
||||
deneb.SignedBeaconBlock | deneb.TrustedSignedBeaconBlock
|
||||
): deneb.LightClientHeader =
|
||||
template payload: untyped = blck.message.body.execution_payload
|
||||
|
@ -1137,7 +1147,8 @@ func toDenebLightClientHeader(
|
|||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/light-client/full-node.md#modified-block_to_light_client_header
|
||||
func toElectraLightClientHeader(
|
||||
blck: # `SomeSignedBeaconBlock` doesn't work here (Nim 1.6)
|
||||
# `SomeSignedBeaconBlock`: https://github.com/nim-lang/Nim/issues/18095
|
||||
blck:
|
||||
phase0.SignedBeaconBlock | phase0.TrustedSignedBeaconBlock |
|
||||
altair.SignedBeaconBlock | altair.TrustedSignedBeaconBlock |
|
||||
bellatrix.SignedBeaconBlock | bellatrix.TrustedSignedBeaconBlock
|
||||
|
@ -1152,7 +1163,8 @@ func toElectraLightClientHeader(
|
|||
beacon: blck.message.toBeaconBlockHeader())
|
||||
|
||||
func toElectraLightClientHeader(
|
||||
blck: # `SomeSignedBeaconBlock` doesn't work here (Nim 1.6)
|
||||
# `SomeSignedBeaconBlock`: https://github.com/nim-lang/Nim/issues/18095
|
||||
blck:
|
||||
capella.SignedBeaconBlock | capella.TrustedSignedBeaconBlock
|
||||
): electra.LightClientHeader =
|
||||
template payload: untyped = blck.message.body.execution_payload
|
||||
|
@ -1179,7 +1191,8 @@ func toElectraLightClientHeader(
|
|||
EXECUTION_PAYLOAD_GINDEX_ELECTRA))
|
||||
|
||||
func toElectraLightClientHeader(
|
||||
blck: # `SomeSignedBeaconBlock` doesn't work here (Nim 1.6)
|
||||
# `SomeSignedBeaconBlock`: https://github.com/nim-lang/Nim/issues/18095
|
||||
blck:
|
||||
deneb.SignedBeaconBlock | deneb.TrustedSignedBeaconBlock
|
||||
): electra.LightClientHeader =
|
||||
template payload: untyped = blck.message.body.execution_payload
|
||||
|
@ -1208,7 +1221,8 @@ func toElectraLightClientHeader(
|
|||
EXECUTION_PAYLOAD_GINDEX_ELECTRA))
|
||||
|
||||
func toElectraLightClientHeader(
|
||||
blck: # `SomeSignedBeaconBlock` doesn't work here (Nim 1.6)
|
||||
# `SomeSignedBeaconBlock`: https://github.com/nim-lang/Nim/issues/18095
|
||||
blck:
|
||||
electra.SignedBeaconBlock | electra.TrustedSignedBeaconBlock
|
||||
): electra.LightClientHeader =
|
||||
template payload: untyped = blck.message.body.execution_payload
|
||||
|
@ -1239,8 +1253,115 @@ func toElectraLightClientHeader(
|
|||
execution_branch:
|
||||
blck.message.body.build_proof(EXECUTION_PAYLOAD_GINDEX_ELECTRA).get)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/light-client/full-node.md#modified-block_to_light_client_header
|
||||
func toElectraLightClientHeader(
|
||||
# `SomeSignedBeaconBlock`: https://github.com/nim-lang/Nim/issues/18095
|
||||
blck:
|
||||
phase0.SignedBeaconBlock | phase0.TrustedSignedBeaconBlock |
|
||||
altair.SignedBeaconBlock | altair.TrustedSignedBeaconBlock |
|
||||
bellatrix.SignedBeaconBlock | bellatrix.TrustedSignedBeaconBlock
|
||||
): electra.LightClientHeader =
|
||||
# Note that during fork transitions, `finalized_header` may still
|
||||
# point to earlier forks. While Bellatrix blocks also contain an
|
||||
# `ExecutionPayload` (minus `withdrawals_root`), it was not included
|
||||
# in the corresponding light client data. To ensure compatibility
|
||||
# with legacy data going through `upgrade_lc_header_to_capella`,
|
||||
# leave out execution data.
|
||||
electra.LightClientHeader(
|
||||
beacon: blck.message.toBeaconBlockHeader())
|
||||
|
||||
func toElectraLightClientHeader(
|
||||
# `SomeSignedBeaconBlock`: https://github.com/nim-lang/Nim/issues/18095
|
||||
blck:
|
||||
capella.SignedBeaconBlock | capella.TrustedSignedBeaconBlock
|
||||
): electra.LightClientHeader =
|
||||
template payload: untyped = blck.message.body.execution_payload
|
||||
electra.LightClientHeader(
|
||||
beacon: blck.message.toBeaconBlockHeader(),
|
||||
execution: electra.ExecutionPayloadHeader(
|
||||
parent_hash: payload.parent_hash,
|
||||
fee_recipient: payload.fee_recipient,
|
||||
state_root: payload.state_root,
|
||||
receipts_root: payload.receipts_root,
|
||||
logs_bloom: payload.logs_bloom,
|
||||
prev_randao: payload.prev_randao,
|
||||
block_number: payload.block_number,
|
||||
gas_limit: payload.gas_limit,
|
||||
gas_used: payload.gas_used,
|
||||
timestamp: payload.timestamp,
|
||||
extra_data: payload.extra_data,
|
||||
base_fee_per_gas: payload.base_fee_per_gas,
|
||||
block_hash: payload.block_hash,
|
||||
transactions_root: hash_tree_root(payload.transactions),
|
||||
withdrawals_root: hash_tree_root(payload.withdrawals)),
|
||||
execution_branch: blck.message.body.build_proof(
|
||||
capella.EXECUTION_PAYLOAD_GINDEX).get)
|
||||
|
||||
func toElectraLightClientHeader(
|
||||
# `SomeSignedBeaconBlock`: https://github.com/nim-lang/Nim/issues/18095
|
||||
blck:
|
||||
deneb.SignedBeaconBlock | deneb.TrustedSignedBeaconBlock
|
||||
): electra.LightClientHeader =
|
||||
template payload: untyped = blck.message.body.execution_payload
|
||||
electra.LightClientHeader(
|
||||
beacon: blck.message.toBeaconBlockHeader(),
|
||||
execution: electra.ExecutionPayloadHeader(
|
||||
parent_hash: payload.parent_hash,
|
||||
fee_recipient: payload.fee_recipient,
|
||||
state_root: payload.state_root,
|
||||
receipts_root: payload.receipts_root,
|
||||
logs_bloom: payload.logs_bloom,
|
||||
prev_randao: payload.prev_randao,
|
||||
block_number: payload.block_number,
|
||||
gas_limit: payload.gas_limit,
|
||||
gas_used: payload.gas_used,
|
||||
timestamp: payload.timestamp,
|
||||
extra_data: payload.extra_data,
|
||||
base_fee_per_gas: payload.base_fee_per_gas,
|
||||
block_hash: payload.block_hash,
|
||||
transactions_root: hash_tree_root(payload.transactions),
|
||||
withdrawals_root: hash_tree_root(payload.withdrawals),
|
||||
blob_gas_used: payload.blob_gas_used,
|
||||
excess_blob_gas: payload.excess_blob_gas),
|
||||
execution_branch: blck.message.body.build_proof(
|
||||
capella.EXECUTION_PAYLOAD_GINDEX).get)
|
||||
|
||||
func toElectraLightClientHeader(
|
||||
# `SomeSignedBeaconBlock`: https://github.com/nim-lang/Nim/issues/18095
|
||||
blck:
|
||||
electra.SignedBeaconBlock | electra.TrustedSignedBeaconBlock
|
||||
): electra.LightClientHeader =
|
||||
template payload: untyped = blck.message.body.execution_payload
|
||||
electra.LightClientHeader(
|
||||
beacon: blck.message.toBeaconBlockHeader(),
|
||||
execution: electra.ExecutionPayloadHeader(
|
||||
parent_hash: payload.parent_hash,
|
||||
fee_recipient: payload.fee_recipient,
|
||||
state_root: payload.state_root,
|
||||
receipts_root: payload.receipts_root,
|
||||
logs_bloom: payload.logs_bloom,
|
||||
prev_randao: payload.prev_randao,
|
||||
block_number: payload.block_number,
|
||||
gas_limit: payload.gas_limit,
|
||||
gas_used: payload.gas_used,
|
||||
timestamp: payload.timestamp,
|
||||
extra_data: payload.extra_data,
|
||||
base_fee_per_gas: payload.base_fee_per_gas,
|
||||
block_hash: payload.block_hash,
|
||||
transactions_root: hash_tree_root(payload.transactions),
|
||||
withdrawals_root: hash_tree_root(payload.withdrawals),
|
||||
blob_gas_used: payload.blob_gas_used,
|
||||
excess_blob_gas: payload.excess_blob_gas,
|
||||
deposit_requests_root: hash_tree_root(payload.deposit_requests),
|
||||
withdrawal_requests_root: hash_tree_root(payload.withdrawal_requests),
|
||||
consolidation_requests_root:
|
||||
hash_tree_root(payload.consolidation_requests)),
|
||||
execution_branch: blck.message.body.build_proof(
|
||||
capella.EXECUTION_PAYLOAD_GINDEX).get)
|
||||
|
||||
func toLightClientHeader*(
|
||||
blck: # `SomeSignedBeaconBlock` doesn't work here (Nim 1.6)
|
||||
# `SomeSignedBeaconBlock`: https://github.com/nim-lang/Nim/issues/18095
|
||||
blck:
|
||||
phase0.SignedBeaconBlock | phase0.TrustedSignedBeaconBlock |
|
||||
altair.SignedBeaconBlock | altair.TrustedSignedBeaconBlock |
|
||||
bellatrix.SignedBeaconBlock | bellatrix.TrustedSignedBeaconBlock |
|
||||
|
@ -1280,7 +1401,7 @@ func shortLog*[
|
|||
of LightClientDataFork.Electra:
|
||||
electraData: typeof(x.electraData.shortLog())
|
||||
|
||||
let xKind = x.kind # Nim 1.6.12: Using `kind: x.kind` inside case is broken
|
||||
let xKind = x.kind # https://github.com/nim-lang/Nim/issues/23762
|
||||
case xKind
|
||||
of LightClientDataFork.Electra:
|
||||
ResultType(kind: xKind, electraData: x.electraData.shortLog())
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
|
||||
import
|
||||
# Status libraries
|
||||
stew/[byteutils, endians2, objects, saturation_arith],
|
||||
stew/[bitops2, byteutils, endians2, objects],
|
||||
chronicles,
|
||||
eth/common/[eth_types, eth_types_rlp],
|
||||
eth/rlp, eth/trie/[db, hexary],
|
||||
|
@ -39,6 +39,9 @@ type
|
|||
ExecutionTransaction* = eth_types.Transaction
|
||||
ExecutionReceipt* = eth_types.Receipt
|
||||
ExecutionWithdrawal* = eth_types.Withdrawal
|
||||
ExecutionDepositRequest* = eth_types.DepositRequest
|
||||
ExecutionWithdrawalRequest* = eth_types.WithdrawalRequest
|
||||
ExecutionConsolidationRequest* = eth_types.ConsolidationRequest
|
||||
ExecutionBlockHeader* = eth_types.BlockHeader
|
||||
|
||||
FinalityCheckpoints* = object
|
||||
|
@ -220,12 +223,13 @@ func has_flag*(flags: ParticipationFlags, flag_index: TimelyFlag): bool =
|
|||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/deneb/p2p-interface.md#check_blob_sidecar_inclusion_proof
|
||||
func verify_blob_sidecar_inclusion_proof*(
|
||||
blob_sidecar: BlobSidecar): Result[void, string] =
|
||||
let gindex = kzg_commitment_inclusion_proof_gindex(blob_sidecar.index)
|
||||
blob_sidecar: ForkyBlobSidecar): Result[void, string] =
|
||||
let gindex = withBlobFork(typeof(blob_sidecar).kind):
|
||||
blobFork.kzg_commitment_inclusion_proof_gindex(blob_sidecar.index)
|
||||
if not is_valid_merkle_branch(
|
||||
hash_tree_root(blob_sidecar.kzg_commitment),
|
||||
blob_sidecar.kzg_commitment_inclusion_proof,
|
||||
KZG_COMMITMENT_INCLUSION_PROOF_DEPTH,
|
||||
log2trunc(gindex),
|
||||
get_subtree_index(gindex),
|
||||
blob_sidecar.signed_block_header.message.body_root):
|
||||
return err("BlobSidecar: inclusion proof not valid")
|
||||
|
@ -234,23 +238,28 @@ func verify_blob_sidecar_inclusion_proof*(
|
|||
func create_blob_sidecars*(
|
||||
forkyBlck: deneb.SignedBeaconBlock | electra.SignedBeaconBlock,
|
||||
kzg_proofs: KzgProofs,
|
||||
blobs: Blobs): seq[BlobSidecar] =
|
||||
blobs: Blobs): auto =
|
||||
const
|
||||
consensusFork = typeof(forkyBlck).kind
|
||||
blobFork = blobForkAtConsensusFork(consensusFork).expect("Blobs OK")
|
||||
type ResultType = seq[blobFork.BlobSidecar]
|
||||
|
||||
template kzg_commitments: untyped =
|
||||
forkyBlck.message.body.blob_kzg_commitments
|
||||
doAssert kzg_proofs.len == blobs.len
|
||||
doAssert kzg_proofs.len == kzg_commitments.len
|
||||
|
||||
var res = newSeqOfCap[BlobSidecar](blobs.len)
|
||||
var res: ResultType = newSeqOfCap[blobFork.BlobSidecar](blobs.len)
|
||||
let signedBlockHeader = forkyBlck.toSignedBeaconBlockHeader()
|
||||
for i in 0 ..< blobs.lenu64:
|
||||
var sidecar = BlobSidecar(
|
||||
var sidecar = blobFork.BlobSidecar(
|
||||
index: i,
|
||||
blob: blobs[i],
|
||||
kzg_commitment: kzg_commitments[i],
|
||||
kzg_proof: kzg_proofs[i],
|
||||
signed_block_header: signedBlockHeader)
|
||||
forkyBlck.message.body.build_proof(
|
||||
kzg_commitment_inclusion_proof_gindex(i),
|
||||
blobFork.kzg_commitment_inclusion_proof_gindex(i),
|
||||
sidecar.kzg_commitment_inclusion_proof).expect("Valid gindex")
|
||||
res.add(sidecar)
|
||||
res
|
||||
|
@ -445,9 +454,10 @@ proc computeTransactionsTrieRoot*(
|
|||
var tr = initHexaryTrie(newMemoryDB())
|
||||
for i, transaction in payload.transactions:
|
||||
try:
|
||||
tr.put(rlp.encode(i), distinctBase(transaction)) # Already RLP encoded
|
||||
# Transactions are already RLP encoded
|
||||
tr.put(rlp.encode(i.uint), distinctBase(transaction))
|
||||
except RlpError as exc:
|
||||
doAssert false, "HexaryTrie.put failed: " & $exc.msg
|
||||
raiseAssert "HexaryTrie.put failed: " & $exc.msg
|
||||
tr.rootHash()
|
||||
|
||||
func toExecutionWithdrawal*(
|
||||
|
@ -468,9 +478,77 @@ proc computeWithdrawalsTrieRoot*(
|
|||
var tr = initHexaryTrie(newMemoryDB())
|
||||
for i, withdrawal in payload.withdrawals:
|
||||
try:
|
||||
tr.put(rlp.encode(i), rlp.encode(toExecutionWithdrawal(withdrawal)))
|
||||
tr.put(rlp.encode(i.uint), rlp.encode(toExecutionWithdrawal(withdrawal)))
|
||||
except RlpError as exc:
|
||||
doAssert false, "HexaryTrie.put failed: " & $exc.msg
|
||||
raiseAssert "HexaryTrie.put failed: " & $exc.msg
|
||||
tr.rootHash()
|
||||
|
||||
func toExecutionDepositRequest*(
|
||||
request: electra.DepositRequest): ExecutionDepositRequest =
|
||||
ExecutionDepositRequest(
|
||||
pubkey: request.pubkey.blob,
|
||||
withdrawalCredentials: request.withdrawal_credentials.data,
|
||||
amount: distinctBase(request.amount),
|
||||
signature: request.signature.blob,
|
||||
index: request.index)
|
||||
|
||||
func toExecutionWithdrawalRequest*(
|
||||
request: electra.WithdrawalRequest): ExecutionWithdrawalRequest =
|
||||
ExecutionWithdrawalRequest(
|
||||
sourceAddress: request.source_address.data,
|
||||
validatorPubkey: request.validator_pubkey.blob,
|
||||
amount: distinctBase(request.amount))
|
||||
|
||||
func toExecutionConsolidationRequest*(
|
||||
request: electra.ConsolidationRequest): ExecutionConsolidationRequest =
|
||||
ExecutionConsolidationRequest(
|
||||
sourceAddress: request.source_address.data,
|
||||
sourcePubkey: request.source_pubkey.blob,
|
||||
targetPubkey: request.target_pubkey.blob)
|
||||
|
||||
# https://eips.ethereum.org/EIPS/eip-7685
|
||||
proc computeRequestsTrieRoot*(
|
||||
payload: electra.ExecutionPayload): ExecutionHash256 =
|
||||
if payload.deposit_requests.len == 0 and
|
||||
payload.withdrawal_requests.len == 0 and
|
||||
payload.consolidation_requests.len == 0:
|
||||
return EMPTY_ROOT_HASH
|
||||
|
||||
var
|
||||
tr = initHexaryTrie(newMemoryDB())
|
||||
i = 0'u64
|
||||
|
||||
static:
|
||||
doAssert DEPOSIT_REQUEST_TYPE < WITHDRAWAL_REQUEST_TYPE
|
||||
doAssert WITHDRAWAL_REQUEST_TYPE < CONSOLIDATION_REQUEST_TYPE
|
||||
|
||||
# EIP-6110
|
||||
for request in payload.deposit_requests:
|
||||
try:
|
||||
tr.put(rlp.encode(i.uint), rlp.encode(
|
||||
toExecutionDepositRequest(request)))
|
||||
except RlpError as exc:
|
||||
raiseAssert "HexaryTree.put failed: " & $exc.msg
|
||||
inc i
|
||||
|
||||
# EIP-7002
|
||||
for request in payload.withdrawal_requests:
|
||||
try:
|
||||
tr.put(rlp.encode(i.uint), rlp.encode(
|
||||
toExecutionWithdrawalRequest(request)))
|
||||
except RlpError as exc:
|
||||
raiseAssert "HexaryTree.put failed: " & $exc.msg
|
||||
inc i
|
||||
|
||||
# EIP-7251
|
||||
for request in payload.consolidation_requests:
|
||||
try:
|
||||
tr.put(rlp.encode(i.uint), rlp.encode(
|
||||
toExecutionConsolidationRequest(request)))
|
||||
except RlpError as exc:
|
||||
raiseAssert "HexaryTree.put failed: " & $exc.msg
|
||||
inc i
|
||||
|
||||
tr.rootHash()
|
||||
|
||||
proc blockToBlockHeader*(blck: ForkyBeaconBlock): ExecutionBlockHeader =
|
||||
|
@ -502,6 +580,11 @@ proc blockToBlockHeader*(blck: ForkyBeaconBlock): ExecutionBlockHeader =
|
|||
Opt.some ExecutionHash256(data: blck.parent_root.data)
|
||||
else:
|
||||
Opt.none(ExecutionHash256)
|
||||
requestsRoot =
|
||||
when typeof(payload).kind >= ConsensusFork.Electra:
|
||||
Opt.some payload.computeRequestsTrieRoot()
|
||||
else:
|
||||
Opt.none(ExecutionHash256)
|
||||
|
||||
ExecutionBlockHeader(
|
||||
parentHash : payload.parent_hash,
|
||||
|
@ -513,8 +596,8 @@ proc blockToBlockHeader*(blck: ForkyBeaconBlock): ExecutionBlockHeader =
|
|||
logsBloom : payload.logs_bloom.data,
|
||||
difficulty : default(DifficultyInt),
|
||||
number : payload.block_number,
|
||||
gasLimit : GasInt.saturate(payload.gas_limit),
|
||||
gasUsed : GasInt.saturate(payload.gas_used),
|
||||
gasLimit : payload.gas_limit,
|
||||
gasUsed : payload.gas_used,
|
||||
timestamp : EthTime(payload.timestamp),
|
||||
extraData : payload.extra_data.asSeq,
|
||||
mixHash : payload.prev_randao, # EIP-4399 `mixHash` -> `prevRandao`
|
||||
|
@ -523,7 +606,31 @@ proc blockToBlockHeader*(blck: ForkyBeaconBlock): ExecutionBlockHeader =
|
|||
withdrawalsRoot : withdrawalsRoot,
|
||||
blobGasUsed : blobGasUsed, # EIP-4844
|
||||
excessBlobGas : excessBlobGas, # EIP-4844
|
||||
parentBeaconBlockRoot : parentBeaconBlockRoot) # EIP-4788
|
||||
parentBeaconBlockRoot : parentBeaconBlockRoot, # EIP-4788
|
||||
requestsRoot : requestsRoot) # EIP-7685
|
||||
|
||||
proc compute_execution_block_hash*(blck: ForkyBeaconBlock): Eth2Digest =
|
||||
rlpHash blockToBlockHeader(blck)
|
||||
|
||||
from std/math import exp, ln
|
||||
from std/sequtils import foldl
|
||||
|
||||
func ln_binomial(n, k: int): float64 =
|
||||
if k > n:
|
||||
low(float64)
|
||||
else:
|
||||
template ln_factorial(n: int): float64 =
|
||||
(2 .. n).foldl(a + ln(b.float64), 0.0)
|
||||
ln_factorial(n) - ln_factorial(k) - ln_factorial(n - k)
|
||||
|
||||
func hypergeom_cdf*(k: int, population: int, successes: int, draws: int):
|
||||
float64 =
|
||||
if k < draws + successes - population:
|
||||
0.0
|
||||
elif k >= min(successes, draws):
|
||||
1.0
|
||||
else:
|
||||
let ln_denom = ln_binomial(population, draws)
|
||||
(0 .. k).foldl(a + exp(
|
||||
ln_binomial(successes, b) +
|
||||
ln_binomial(population - successes, draws - b) - ln_denom), 0.0)
|
||||
|
|
|
@ -1085,7 +1085,7 @@ func kzg_commitment_to_versioned_hash*(
|
|||
|
||||
var res: VersionedHash
|
||||
res[0] = VERSIONED_HASH_VERSION_KZG
|
||||
res[1 .. 31] = eth2digest(kzg_commitment).data.toOpenArray(1, 31)
|
||||
res[1 .. 31] = eth2digest(kzg_commitment.bytes).data.toOpenArray(1, 31)
|
||||
res
|
||||
|
||||
proc validate_blobs*(
|
||||
|
|
|
@ -707,13 +707,11 @@ template get_flag_and_inactivity_delta(
|
|||
state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState |
|
||||
deneb.BeaconState | electra.BeaconState,
|
||||
base_reward_per_increment: Gwei, finality_delay: uint64,
|
||||
previous_epoch: Epoch,
|
||||
active_increments: uint64,
|
||||
previous_epoch: Epoch, active_increments: uint64,
|
||||
penalty_denominator: uint64,
|
||||
epoch_participation: ptr EpochParticipationFlags,
|
||||
participating_increments: array[3, uint64],
|
||||
info: var altair.EpochInfo,
|
||||
vidx: ValidatorIndex
|
||||
participating_increments: array[3, uint64], info: var altair.EpochInfo,
|
||||
vidx: ValidatorIndex, inactivity_score: uint64
|
||||
): (ValidatorIndex, Gwei, Gwei, Gwei, Gwei, Gwei, Gwei) =
|
||||
let
|
||||
base_reward = get_base_reward_increment(state, vidx, base_reward_per_increment)
|
||||
|
@ -751,7 +749,7 @@ template get_flag_and_inactivity_delta(
|
|||
0.Gwei
|
||||
else:
|
||||
let penalty_numerator =
|
||||
state.validators[vidx].effective_balance * state.inactivity_scores[vidx]
|
||||
state.validators[vidx].effective_balance * inactivity_score
|
||||
penalty_numerator div penalty_denominator
|
||||
|
||||
(vidx, reward(TIMELY_SOURCE_FLAG_INDEX),
|
||||
|
@ -804,7 +802,46 @@ iterator get_flag_and_inactivity_deltas*(
|
|||
yield get_flag_and_inactivity_delta(
|
||||
state, base_reward_per_increment, finality_delay, previous_epoch,
|
||||
active_increments, penalty_denominator, epoch_participation,
|
||||
participating_increments, info, vidx)
|
||||
participating_increments, info, vidx, state.inactivity_scores[vidx])
|
||||
|
||||
func get_flag_and_inactivity_delta_for_validator(
|
||||
cfg: RuntimeConfig,
|
||||
state: deneb.BeaconState | electra.BeaconState,
|
||||
base_reward_per_increment: Gwei, info: var altair.EpochInfo,
|
||||
finality_delay: uint64, vidx: ValidatorIndex, inactivity_score: Gwei):
|
||||
Opt[(ValidatorIndex, Gwei, Gwei, Gwei, Gwei, Gwei, Gwei)] =
|
||||
## Return the deltas for a given ``flag_index`` by scanning through the
|
||||
## participation flags.
|
||||
const INACTIVITY_PENALTY_QUOTIENT =
|
||||
when state is altair.BeaconState:
|
||||
INACTIVITY_PENALTY_QUOTIENT_ALTAIR
|
||||
else:
|
||||
INACTIVITY_PENALTY_QUOTIENT_BELLATRIX
|
||||
|
||||
static: doAssert ord(high(TimelyFlag)) == 2
|
||||
|
||||
let
|
||||
previous_epoch = get_previous_epoch(state)
|
||||
active_increments = get_active_increments(info)
|
||||
penalty_denominator =
|
||||
cfg.INACTIVITY_SCORE_BIAS * INACTIVITY_PENALTY_QUOTIENT
|
||||
epoch_participation =
|
||||
if previous_epoch == get_current_epoch(state):
|
||||
unsafeAddr state.current_epoch_participation
|
||||
else:
|
||||
unsafeAddr state.previous_epoch_participation
|
||||
participating_increments = [
|
||||
get_unslashed_participating_increment(info, TIMELY_SOURCE_FLAG_INDEX),
|
||||
get_unslashed_participating_increment(info, TIMELY_TARGET_FLAG_INDEX),
|
||||
get_unslashed_participating_increment(info, TIMELY_HEAD_FLAG_INDEX)]
|
||||
|
||||
if not is_eligible_validator(info.validators[vidx]):
|
||||
return Opt.none((ValidatorIndex, Gwei, Gwei, Gwei, Gwei, Gwei, Gwei))
|
||||
|
||||
Opt.some get_flag_and_inactivity_delta(
|
||||
state, base_reward_per_increment, finality_delay, previous_epoch,
|
||||
active_increments, penalty_denominator, epoch_participation,
|
||||
participating_increments, info, vidx, inactivity_score.uint64)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#rewards-and-penalties-1
|
||||
func process_rewards_and_penalties*(
|
||||
|
@ -895,7 +932,8 @@ func process_registry_updates*(
|
|||
var maybe_exit_queue_info: Opt[ExitQueueInfo]
|
||||
|
||||
for vidx in state.validators.vindices:
|
||||
if is_eligible_for_activation_queue(state.validators.item(vidx)):
|
||||
if is_eligible_for_activation_queue(
|
||||
typeof(state).kind, state.validators.item(vidx)):
|
||||
state.validators.mitem(vidx).activation_eligibility_epoch =
|
||||
get_current_epoch(state) + 1
|
||||
|
||||
|
@ -940,7 +978,7 @@ func process_registry_updates*(
|
|||
# Process activation eligibility and ejections
|
||||
for index in 0 ..< state.validators.len:
|
||||
let validator = state.validators.item(index)
|
||||
if is_eligible_for_activation_queue(validator):
|
||||
if is_eligible_for_activation_queue(typeof(state).kind, validator):
|
||||
# Usually not too many at once, so do this individually
|
||||
state.validators.mitem(index).activation_eligibility_epoch =
|
||||
get_current_epoch(state) + 1
|
||||
|
@ -1000,6 +1038,22 @@ func get_slashing_penalty*(validator: Validator,
|
|||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slashings
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#slashings
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#slashings
|
||||
func get_slashing(
|
||||
state: ForkyBeaconState, total_balance: Gwei, vidx: ValidatorIndex): Gwei =
|
||||
# For efficiency reasons, it doesn't make sense to have process_slashings use
|
||||
# this per-validator index version, but keep them parallel otherwise.
|
||||
let
|
||||
epoch = get_current_epoch(state)
|
||||
adjusted_total_slashing_balance = get_adjusted_total_slashing_balance(
|
||||
state, total_balance)
|
||||
|
||||
let validator = unsafeAddr state.validators.item(vidx)
|
||||
if slashing_penalty_applies(validator[], epoch):
|
||||
get_slashing_penalty(
|
||||
validator[], adjusted_total_slashing_balance, total_balance)
|
||||
else:
|
||||
0.Gwei
|
||||
|
||||
func process_slashings*(state: var ForkyBeaconState, total_balance: Gwei) =
|
||||
let
|
||||
epoch = get_current_epoch(state)
|
||||
|
@ -1164,7 +1218,7 @@ template compute_inactivity_update(
|
|||
# TODO activeness already checked; remove redundant checks between
|
||||
# is_active_validator and is_unslashed_participating_index
|
||||
if is_unslashed_participating_index(
|
||||
state, TIMELY_TARGET_FLAG_INDEX, previous_epoch, index.ValidatorIndex):
|
||||
state, TIMELY_TARGET_FLAG_INDEX, previous_epoch, index):
|
||||
inactivity_score -= min(1'u64, inactivity_score)
|
||||
else:
|
||||
inactivity_score += cfg.INACTIVITY_SCORE_BIAS
|
||||
|
@ -1195,6 +1249,7 @@ func process_inactivity_updates*(
|
|||
|
||||
let
|
||||
pre_inactivity_score = state.inactivity_scores.asSeq()[index]
|
||||
index = index.ValidatorIndex # intentional shadowing
|
||||
inactivity_score =
|
||||
compute_inactivity_update(cfg, state, info, pre_inactivity_score)
|
||||
|
||||
|
@ -1507,3 +1562,108 @@ proc process_epoch*(
|
|||
process_sync_committee_updates(state)
|
||||
|
||||
ok()
|
||||
|
||||
proc get_validator_balance_after_epoch*(
|
||||
cfg: RuntimeConfig,
|
||||
state: deneb.BeaconState | electra.BeaconState,
|
||||
flags: UpdateFlags, cache: var StateCache, info: var altair.EpochInfo,
|
||||
index: ValidatorIndex): Gwei =
|
||||
# Run a subset of process_epoch() which affects an individual validator,
|
||||
# without modifying state itself
|
||||
info.init(state) # TODO avoid quadratic aspects here
|
||||
|
||||
# Can't use process_justification_and_finalization(), but use its helper
|
||||
# function. Used to calculate inactivity_score.
|
||||
let jf_info =
|
||||
# process_justification_and_finalization() skips first two epochs
|
||||
if get_current_epoch(state) <= GENESIS_EPOCH + 1:
|
||||
JustificationAndFinalizationInfo(
|
||||
previous_justified_checkpoint: state.previous_justified_checkpoint,
|
||||
current_justified_checkpoint: state.current_justified_checkpoint,
|
||||
finalized_checkpoint: state.finalized_checkpoint,
|
||||
justification_bits: state.justification_bits)
|
||||
else:
|
||||
weigh_justification_and_finalization(
|
||||
state, info.balances.current_epoch,
|
||||
info.balances.previous_epoch[TIMELY_TARGET_FLAG_INDEX],
|
||||
info.balances.current_epoch_TIMELY_TARGET, flags)
|
||||
|
||||
# Used as part of process_rewards_and_penalties
|
||||
let inactivity_score =
|
||||
# process_inactivity_updates skips GENESIS_EPOCH and ineligible validators
|
||||
if get_current_epoch(state) == GENESIS_EPOCH or
|
||||
not is_eligible_validator(info.validators[index]):
|
||||
0.Gwei
|
||||
else:
|
||||
let
|
||||
finality_delay =
|
||||
get_previous_epoch(state) - jf_info.finalized_checkpoint.epoch
|
||||
not_in_inactivity_leak = not is_in_inactivity_leak(finality_delay)
|
||||
pre_inactivity_score = state.inactivity_scores.asSeq()[index]
|
||||
|
||||
# This is a template which uses not_in_inactivity_leak and index
|
||||
compute_inactivity_update(cfg, state, info, pre_inactivity_score).Gwei
|
||||
|
||||
# process_rewards_and_penalties for a single validator
|
||||
let reward_and_penalties_balance = block:
|
||||
# process_rewards_and_penalties doesn't run at GENESIS_EPOCH
|
||||
if get_current_epoch(state) == GENESIS_EPOCH:
|
||||
state.balances.item(index)
|
||||
else:
|
||||
let
|
||||
total_active_balance = info.balances.current_epoch
|
||||
base_reward_per_increment = get_base_reward_per_increment(
|
||||
total_active_balance)
|
||||
finality_delay = get_finality_delay(state)
|
||||
|
||||
var balance = state.balances.item(index)
|
||||
let maybeDelta = get_flag_and_inactivity_delta_for_validator(
|
||||
cfg, state, base_reward_per_increment, info, finality_delay, index,
|
||||
inactivity_score)
|
||||
if maybeDelta.isOk:
|
||||
# Can't use isErrOr in generics
|
||||
let (validator_index, reward0, reward1, reward2, penalty0, penalty1, penalty2) =
|
||||
maybeDelta.get
|
||||
info.validators[validator_index].delta.rewards += reward0 + reward1 + reward2
|
||||
info.validators[validator_index].delta.penalties += penalty0 + penalty1 + penalty2
|
||||
increase_balance(balance, info.validators[index].delta.rewards)
|
||||
decrease_balance(balance, info.validators[index].delta.penalties)
|
||||
balance
|
||||
|
||||
# The two directly balance-changing operations, from Altair through Deneb,
|
||||
# are these. The rest is necessary to look past a single epoch transition,
|
||||
# but that's not the use case here.
|
||||
var post_epoch_balance = reward_and_penalties_balance
|
||||
decrease_balance(
|
||||
post_epoch_balance,
|
||||
get_slashing(state, info.balances.current_epoch, index))
|
||||
|
||||
# Electra adds process_pending_balance_deposit to the list of potential
|
||||
# balance-changing epoch operations. This should probably be cached, so
|
||||
# the 16+ invocations of this function each time, e.g., withdrawals are
|
||||
# calculated don't repeat it, if it's empirically too expensive. Limits
|
||||
# exist on how large this structure can get though.
|
||||
when type(state).kind >= ConsensusFork.Electra:
|
||||
let available_for_processing = state.deposit_balance_to_consume +
|
||||
get_activation_exit_churn_limit(cfg, state, cache)
|
||||
var processed_amount = 0.Gwei
|
||||
|
||||
for deposit in state.pending_balance_deposits:
|
||||
let
|
||||
validator = state.validators.item(deposit.index)
|
||||
deposit_validator_index = ValidatorIndex.init(deposit.index).valueOr:
|
||||
break
|
||||
|
||||
# Validator is exiting, postpone the deposit until after withdrawable epoch
|
||||
if validator.exit_epoch < FAR_FUTURE_EPOCH:
|
||||
if not(get_current_epoch(state) <= validator.withdrawable_epoch) and
|
||||
deposit_validator_index == index:
|
||||
increase_balance(post_epoch_balance, deposit.amount)
|
||||
# Validator is not exiting, attempt to process deposit
|
||||
else:
|
||||
if not(processed_amount + deposit.amount > available_for_processing):
|
||||
if deposit_validator_index == index:
|
||||
increase_balance(post_epoch_balance, deposit.amount)
|
||||
processed_amount += deposit.amount
|
||||
|
||||
post_epoch_balance
|
||||
|
|
|
@ -47,7 +47,7 @@ type
|
|||
): Opt[ForkedTrustedSignedBeaconBlock] {.gcsafe, raises: [].}
|
||||
|
||||
BlobLoaderFn* = proc(
|
||||
blobId: BlobIdentifier): Opt[ref BlobSidecar] {.gcsafe, raises: [].}
|
||||
blobId: BlobIdentifier): Opt[ForkedBlobSidecar] {.gcsafe, raises: [].}
|
||||
|
||||
InhibitFn* = proc: bool {.gcsafe, raises: [].}
|
||||
|
||||
|
@ -102,21 +102,23 @@ proc checkResponse(roots: openArray[Eth2Digest],
|
|||
checks.del(res)
|
||||
true
|
||||
|
||||
proc checkResponse(idList: seq[BlobIdentifier],
|
||||
blobs: openArray[ref BlobSidecar]): bool =
|
||||
proc checkResponse(
|
||||
idList: seq[BlobIdentifier],
|
||||
blobs: openArray[ForkedBlobSidecar]): bool =
|
||||
if len(blobs) > len(idList):
|
||||
return false
|
||||
for blob in blobs:
|
||||
let block_root = hash_tree_root(blob.signed_block_header.message)
|
||||
var found = false
|
||||
for id in idList:
|
||||
if id.block_root == block_root and id.index == blob.index:
|
||||
found = true
|
||||
break
|
||||
if not found:
|
||||
return false
|
||||
blob[].verify_blob_sidecar_inclusion_proof().isOkOr:
|
||||
return false
|
||||
withForkyBlob(blob):
|
||||
let block_root = hash_tree_root(forkyBlob[].signed_block_header.message)
|
||||
var found = false
|
||||
for id in idList:
|
||||
if id.block_root == block_root and id.index == forkyBlob[].index:
|
||||
found = true
|
||||
break
|
||||
if not found:
|
||||
return false
|
||||
forkyBlob[].verify_blob_sidecar_inclusion_proof().isOkOr:
|
||||
return false
|
||||
true
|
||||
|
||||
proc requestBlocksByRoot(rman: RequestManager, items: seq[Eth2Digest]) {.async: (raises: [CancelledError]).} =
|
||||
|
@ -214,7 +216,8 @@ proc fetchBlobsFromNetwork(self: RequestManager,
|
|||
self.blobQuarantine[].put(b)
|
||||
var curRoot: Eth2Digest
|
||||
for b in ublobs:
|
||||
let block_root = hash_tree_root(b.signed_block_header.message)
|
||||
let block_root = withForkyBlob(b):
|
||||
hash_tree_root(forkyBlob[].signed_block_header.message)
|
||||
if block_root != curRoot:
|
||||
curRoot = block_root
|
||||
if (let o = self.quarantine[].popBlobless(curRoot); o.isSome):
|
||||
|
|
|
@ -88,7 +88,8 @@ type
|
|||
|
||||
BeaconBlocksRes =
|
||||
NetRes[List[ref ForkedSignedBeaconBlock, Limit MAX_REQUEST_BLOCKS]]
|
||||
BlobSidecarsRes = NetRes[List[ref BlobSidecar, Limit(MAX_REQUEST_BLOB_SIDECARS)]]
|
||||
BlobSidecarsRes =
|
||||
NetRes[List[ForkedBlobSidecar, Limit(MAX_REQUEST_BLOB_SIDECARS)]]
|
||||
|
||||
proc now*(sm: typedesc[SyncMoment], slots: uint64): SyncMoment {.inline.} =
|
||||
SyncMoment(stamp: now(chronos.Moment), slots: slots)
|
||||
|
@ -225,12 +226,12 @@ proc remainingSlots(man: SyncManager): uint64 =
|
|||
else:
|
||||
0'u64
|
||||
|
||||
func groupBlobs*[T](req: SyncRequest[T],
|
||||
blocks: seq[ref ForkedSignedBeaconBlock],
|
||||
blobs: seq[ref BlobSidecar]):
|
||||
Result[seq[BlobSidecars], string] =
|
||||
func groupBlobs*[T](
|
||||
req: SyncRequest[T],
|
||||
blocks: seq[ref ForkedSignedBeaconBlock],
|
||||
blobs: seq[ForkedBlobSidecar]): Result[seq[ForkedBlobSidecars], string] =
|
||||
var
|
||||
grouped = newSeq[BlobSidecars](len(blocks))
|
||||
grouped = newSeq[ForkedBlobSidecars](len(blocks))
|
||||
blob_cursor = 0
|
||||
for block_idx, blck in blocks:
|
||||
withBlck(blck[]):
|
||||
|
@ -241,17 +242,23 @@ func groupBlobs*[T](req: SyncRequest[T],
|
|||
# Clients MUST include all blob sidecars of each block from which they include blob sidecars.
|
||||
# The following blob sidecars, where they exist, MUST be sent in consecutive (slot, index) order.
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/deneb/p2p-interface.md#blobsidecarsbyrange-v1
|
||||
const expectedBlobFork =
|
||||
blobForkAtConsensusFork(consensusFork).expect("Blobs OK")
|
||||
let header = forkyBlck.toSignedBeaconBlockHeader()
|
||||
for blob_idx, kzg_commitment in kzgs:
|
||||
if blob_cursor >= blobs.len:
|
||||
return err("BlobSidecar: response too short")
|
||||
let blob_sidecar = blobs[blob_cursor]
|
||||
if blob_sidecar.index != BlobIndex blob_idx:
|
||||
return err("BlobSidecar: unexpected index")
|
||||
if blob_sidecar.kzg_commitment != kzg_commitment:
|
||||
return err("BlobSidecar: unexpected kzg_commitment")
|
||||
if blob_sidecar.signed_block_header != header:
|
||||
return err("BlobSidecar: unexpected signed_block_header")
|
||||
withForkyBlob(blob_sidecar):
|
||||
when blobFork != expectedBlobFork:
|
||||
return err("BlobSidecar: unexpected data fork")
|
||||
else:
|
||||
if forkyBlob[].index != BlobIndex blob_idx:
|
||||
return err("BlobSidecar: unexpected index")
|
||||
if forkyBlob[].kzg_commitment != kzg_commitment:
|
||||
return err("BlobSidecar: unexpected kzg_commitment")
|
||||
if forkyBlob[].signed_block_header != header:
|
||||
return err("BlobSidecar: unexpected signed_block_header")
|
||||
grouped[block_idx].add(blob_sidecar)
|
||||
inc blob_cursor
|
||||
|
||||
|
@ -259,14 +266,15 @@ func groupBlobs*[T](req: SyncRequest[T],
|
|||
# we reached end of blocks without consuming all blobs so either
|
||||
# the peer we got too few blocks in the paired request, or the
|
||||
# peer is sending us spurious blobs.
|
||||
Result[seq[BlobSidecars], string].err "invalid block or blob sequence"
|
||||
Result[seq[ForkedBlobSidecars], string].err "invalid block or blob sequence"
|
||||
else:
|
||||
Result[seq[BlobSidecars], string].ok grouped
|
||||
Result[seq[ForkedBlobSidecars], string].ok grouped
|
||||
|
||||
func checkBlobs(blobs: seq[BlobSidecars]): Result[void, string] =
|
||||
func checkBlobs(blobs: seq[ForkedBlobSidecars]): Result[void, string] =
|
||||
for blob_sidecars in blobs:
|
||||
for blob_sidecar in blob_sidecars:
|
||||
? blob_sidecar[].verify_blob_sidecar_inclusion_proof()
|
||||
withForkyBlob(blob_sidecar):
|
||||
? forkyBlob[].verify_blob_sidecar_inclusion_proof()
|
||||
ok()
|
||||
|
||||
proc syncStep[A, B](man: SyncManager[A, B], index: int, peer: A)
|
||||
|
@ -456,7 +464,8 @@ proc syncStep[A, B](man: SyncManager[A, B], index: int, peer: A)
|
|||
blobs_map = blobSmap, request = req
|
||||
|
||||
if len(blobData) > 0:
|
||||
let slots = mapIt(blobData, it[].signed_block_header.message.slot)
|
||||
let slots = mapIt(blobData, it.withForkyBlob(
|
||||
forkyBlob[].signed_block_header.message.slot))
|
||||
let uniqueSlots = foldl(slots, combine(a, b), @[slots[0]])
|
||||
if not(checkResponse(req, uniqueSlots)):
|
||||
peer.updateScore(PeerScoreBadResponse)
|
||||
|
@ -483,7 +492,7 @@ proc syncStep[A, B](man: SyncManager[A, B], index: int, peer: A)
|
|||
return
|
||||
Opt.some(groupedBlobs.get())
|
||||
else:
|
||||
Opt.none(seq[BlobSidecars])
|
||||
Opt.none(seq[ForkedBlobSidecars])
|
||||
|
||||
if len(blockData) == 0 and man.direction == SyncQueueKind.Backward and
|
||||
req.contains(man.getSafeSlot()):
|
||||
|
|
|
@ -44,70 +44,44 @@ proc readChunkPayload*(
|
|||
var contextBytes: ForkDigest
|
||||
try:
|
||||
await conn.readExactly(addr contextBytes, sizeof contextBytes)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError:
|
||||
return neterr UnexpectedEOF
|
||||
let contextFork =
|
||||
peer.network.forkDigests[].consensusForkForDigest(contextBytes).valueOr:
|
||||
return neterr InvalidContextBytes
|
||||
|
||||
static: doAssert ConsensusFork.high == ConsensusFork.Electra
|
||||
if contextBytes == peer.network.forkDigests.phase0:
|
||||
let res = await readChunkPayload(conn, peer, phase0.SignedBeaconBlock)
|
||||
withConsensusFork(contextFork):
|
||||
let res = await readChunkPayload(
|
||||
conn, peer, consensusFork.SignedBeaconBlock)
|
||||
if res.isOk:
|
||||
return ok newClone(ForkedSignedBeaconBlock.init(res.get))
|
||||
else:
|
||||
return err(res.error)
|
||||
elif contextBytes == peer.network.forkDigests.altair:
|
||||
let res = await readChunkPayload(conn, peer, altair.SignedBeaconBlock)
|
||||
if res.isOk:
|
||||
return ok newClone(ForkedSignedBeaconBlock.init(res.get))
|
||||
else:
|
||||
return err(res.error)
|
||||
elif contextBytes == peer.network.forkDigests.bellatrix:
|
||||
let res = await readChunkPayload(conn, peer, bellatrix.SignedBeaconBlock)
|
||||
if res.isOk:
|
||||
return ok newClone(ForkedSignedBeaconBlock.init(res.get))
|
||||
else:
|
||||
return err(res.error)
|
||||
elif contextBytes == peer.network.forkDigests.capella:
|
||||
let res = await readChunkPayload(conn, peer, capella.SignedBeaconBlock)
|
||||
if res.isOk:
|
||||
return ok newClone(ForkedSignedBeaconBlock.init(res.get))
|
||||
else:
|
||||
return err(res.error)
|
||||
elif contextBytes == peer.network.forkDigests.deneb:
|
||||
let res = await readChunkPayload(conn, peer, deneb.SignedBeaconBlock)
|
||||
if res.isOk:
|
||||
return ok newClone(ForkedSignedBeaconBlock.init(res.get))
|
||||
else:
|
||||
return err(res.error)
|
||||
elif contextBytes == peer.network.forkDigests.electra:
|
||||
let res = await readChunkPayload(conn, peer, electra.SignedBeaconBlock)
|
||||
if res.isOk:
|
||||
return ok newClone(ForkedSignedBeaconBlock.init(res.get))
|
||||
else:
|
||||
return err(res.error)
|
||||
else:
|
||||
return neterr InvalidContextBytes
|
||||
|
||||
proc readChunkPayload*(
|
||||
conn: Connection, peer: Peer, MsgType: type (ref BlobSidecar)):
|
||||
conn: Connection, peer: Peer, MsgType: type (ForkedBlobSidecar)):
|
||||
Future[NetRes[MsgType]] {.async: (raises: [CancelledError]).} =
|
||||
var contextBytes: ForkDigest
|
||||
try:
|
||||
await conn.readExactly(addr contextBytes, sizeof contextBytes)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError:
|
||||
return neterr UnexpectedEOF
|
||||
let
|
||||
contextFork =
|
||||
peer.network.forkDigests[].consensusForkForDigest(contextBytes).valueOr:
|
||||
return neterr InvalidContextBytes
|
||||
blobFork = blobForkAtConsensusFork(contextFork).valueOr:
|
||||
return neterr InvalidContextBytes
|
||||
|
||||
if contextBytes == peer.network.forkDigests.deneb:
|
||||
let res = await readChunkPayload(conn, peer, BlobSidecar)
|
||||
withBlobFork(blobFork):
|
||||
let res = await readChunkPayload(conn, peer, blobFork.BlobSidecar)
|
||||
if res.isOk:
|
||||
return ok newClone(res.get)
|
||||
if contextFork != peer.network.cfg.consensusForkAtEpoch(
|
||||
res.get.signed_block_header.message.slot.epoch):
|
||||
return neterr InvalidContextBytes
|
||||
return ok ForkedBlobSidecar.init(newClone(res.get))
|
||||
else:
|
||||
return err(res.error)
|
||||
else:
|
||||
return neterr InvalidContextBytes
|
||||
|
||||
{.pop.} # TODO fix p2p macro for raises
|
||||
|
||||
|
@ -249,7 +223,7 @@ p2pProtocol BeaconSync(version = 1,
|
|||
peer: Peer,
|
||||
blobIds: BlobIdentifierList,
|
||||
response: MultipleChunksResponse[
|
||||
ref BlobSidecar, Limit(MAX_REQUEST_BLOB_SIDECARS)])
|
||||
ForkedBlobSidecar, Limit(MAX_REQUEST_BLOB_SIDECARS)])
|
||||
{.async, libp2pProtocol("blob_sidecars_by_root", 1).} =
|
||||
# TODO Semantically, this request should return a non-ref, but doing so
|
||||
# runs into extreme inefficiency due to the compiler introducing
|
||||
|
@ -260,7 +234,7 @@ p2pProtocol BeaconSync(version = 1,
|
|||
# implementation (it's used to derive the signature of the client
|
||||
# function, not in the code below!)
|
||||
# TODO although you can't tell from this function definition, a magic
|
||||
# client call that returns `seq[ref BlobSidecar]` will
|
||||
# client call that returns `seq[ForkedBlobSidecar]` will
|
||||
# will be generated by the libp2p macro - we guarantee that seq items
|
||||
# are `not-nil` in the implementation
|
||||
trace "got blobs range request", peer, len = blobIds.len
|
||||
|
@ -276,10 +250,17 @@ p2pProtocol BeaconSync(version = 1,
|
|||
bytes: seq[byte]
|
||||
|
||||
for i in 0..<count:
|
||||
let blockRef = dag.getBlockRef(blobIds[i].block_root).valueOr:
|
||||
continue
|
||||
let index = blobIds[i].index
|
||||
if dag.db.getBlobSidecarSZ(blockRef.bid.root, index, bytes):
|
||||
let
|
||||
blockRef = dag.getBlockRef(blobIds[i].block_root).valueOr:
|
||||
continue
|
||||
consensusFork = dag.cfg.consensusForkAtEpoch(blockRef.bid.slot.epoch)
|
||||
blobFork = blobForkAtConsensusFork(consensusFork).valueOr:
|
||||
continue # Pre-Deneb
|
||||
index = blobIds[i].index
|
||||
ok = withBlobFork(blobFork):
|
||||
getBlobSidecarSZ[blobFork.BlobSidecar](
|
||||
dag.db, blockRef.bid.root, index, bytes)
|
||||
if ok:
|
||||
let uncompressedLen = uncompressedLenFramed(bytes).valueOr:
|
||||
warn "Cannot read blob size, database corrupt?",
|
||||
bytes = bytes.len(), blck = shortLog(blockRef), blobindex = index
|
||||
|
@ -302,14 +283,14 @@ p2pProtocol BeaconSync(version = 1,
|
|||
startSlot: Slot,
|
||||
reqCount: uint64,
|
||||
response: MultipleChunksResponse[
|
||||
ref BlobSidecar, Limit(MAX_REQUEST_BLOB_SIDECARS)])
|
||||
ForkedBlobSidecar, Limit(MAX_REQUEST_BLOB_SIDECARS)])
|
||||
{.async, libp2pProtocol("blob_sidecars_by_range", 1).} =
|
||||
# TODO This code is more complicated than it needs to be, since the type
|
||||
# of the multiple chunks response is not actually used in this server
|
||||
# implementation (it's used to derive the signature of the client
|
||||
# function, not in the code below!)
|
||||
# TODO although you can't tell from this function definition, a magic
|
||||
# client call that returns `seq[ref BlobSidecar]` will
|
||||
# client call that returns `seq[ForkedBlobSidecar]` will
|
||||
# will be generated by the libp2p macro - we guarantee that seq items
|
||||
# are `not-nil` in the implementation
|
||||
|
||||
|
@ -340,8 +321,15 @@ p2pProtocol BeaconSync(version = 1,
|
|||
bytes: seq[byte]
|
||||
|
||||
for i in startIndex..endIndex:
|
||||
let
|
||||
consensusFork = dag.cfg.consensusForkAtEpoch(blockIds[i].slot.epoch)
|
||||
blobFork = blobForkAtConsensusFork(consensusFork).valueOr:
|
||||
continue # Pre-Deneb
|
||||
for j in 0..<MAX_BLOBS_PER_BLOCK:
|
||||
if dag.db.getBlobSidecarSZ(blockIds[i].root, BlobIndex(j), bytes):
|
||||
let ok = withBlobFork(blobFork):
|
||||
getBlobSidecarSZ[blobFork.BlobSidecar](
|
||||
dag.db, blockIds[i].root, BlobIndex(j), bytes)
|
||||
if ok:
|
||||
# In general, there is not much intermediate time between post-merge
|
||||
# blocks all being optimistic and none of them being optimistic. The
|
||||
# EL catches up, tells the CL the head is verified, and that's it.
|
||||
|
|
|
@ -26,9 +26,11 @@ type
|
|||
GetSlotCallback* = proc(): Slot {.gcsafe, raises: [].}
|
||||
GetBoolCallback* = proc(): bool {.gcsafe, raises: [].}
|
||||
ProcessingCallback* = proc() {.gcsafe, raises: [].}
|
||||
BlockVerifier* = proc(signedBlock: ForkedSignedBeaconBlock,
|
||||
blobs: Opt[BlobSidecars], maybeFinalized: bool):
|
||||
Future[Result[void, VerifierError]] {.async: (raises: [CancelledError]).}
|
||||
BlockVerifier* = proc(
|
||||
signedBlock: ForkedSignedBeaconBlock,
|
||||
blobs: Opt[ForkedBlobSidecars],
|
||||
maybeFinalized: bool
|
||||
): Future[Result[void, VerifierError]] {.async: (raises: [CancelledError]).}
|
||||
|
||||
SyncQueueKind* {.pure.} = enum
|
||||
Forward, Backward
|
||||
|
@ -43,7 +45,7 @@ type
|
|||
SyncResult*[T] = object
|
||||
request*: SyncRequest[T]
|
||||
data*: seq[ref ForkedSignedBeaconBlock]
|
||||
blobs*: Opt[seq[BlobSidecars]]
|
||||
blobs*: Opt[seq[ForkedBlobSidecars]]
|
||||
|
||||
GapItem*[T] = object
|
||||
start*: Slot
|
||||
|
@ -90,8 +92,8 @@ chronicles.expandIt SyncRequest:
|
|||
peer = shortLog(it.item)
|
||||
direction = toLowerAscii($it.kind)
|
||||
|
||||
proc getShortMap*[T](req: SyncRequest[T],
|
||||
data: openArray[ref ForkedSignedBeaconBlock]): string =
|
||||
proc getShortMap*[T](
|
||||
req: SyncRequest[T], data: openArray[ref ForkedSignedBeaconBlock]): string =
|
||||
## Returns all slot numbers in ``data`` as placement map.
|
||||
var res = newStringOfCap(req.count)
|
||||
var slider = req.slot
|
||||
|
@ -111,8 +113,8 @@ proc getShortMap*[T](req: SyncRequest[T],
|
|||
slider = slider + 1
|
||||
res
|
||||
|
||||
proc getShortMap*[T](req: SyncRequest[T],
|
||||
data: openArray[ref BlobSidecar]): string =
|
||||
proc getShortMap*[T](
|
||||
req: SyncRequest[T], data: openArray[ForkedBlobSidecar]): string =
|
||||
## Returns all slot numbers in ``data`` as placement map.
|
||||
var res = newStringOfCap(req.count * MAX_BLOBS_PER_BLOCK)
|
||||
var cur : uint64 = 0
|
||||
|
@ -120,9 +122,11 @@ proc getShortMap*[T](req: SyncRequest[T],
|
|||
if cur >= lenu64(data):
|
||||
res.add('|')
|
||||
continue
|
||||
if slot == data[cur].signed_block_header.message.slot:
|
||||
let blobSlot = withForkyBlob(data[cur]):
|
||||
forkyBlob[].signed_block_header.message.slot
|
||||
if slot == blobSlot:
|
||||
for k in cur..<cur+MAX_BLOBS_PER_BLOCK:
|
||||
if k >= lenu64(data) or slot != data[k].signed_block_header.message.slot:
|
||||
if k >= lenu64(data) or slot != blobSlot:
|
||||
res.add('|')
|
||||
break
|
||||
else:
|
||||
|
@ -541,14 +545,16 @@ proc getRewindPoint*[T](sq: SyncQueue[T], failSlot: Slot,
|
|||
|
||||
# This belongs inside the blocks iterator below, but can't be there due to
|
||||
# https://github.com/nim-lang/Nim/issues/21242
|
||||
func getOpt(blobs: Opt[seq[BlobSidecars]], i: int): Opt[BlobSidecars] =
|
||||
func getOpt(
|
||||
blobs: Opt[seq[ForkedBlobSidecars]], i: int): Opt[ForkedBlobSidecars] =
|
||||
if blobs.isSome:
|
||||
Opt.some(blobs.get()[i])
|
||||
else:
|
||||
Opt.none(BlobSidecars)
|
||||
Opt.none(ForkedBlobSidecars)
|
||||
|
||||
iterator blocks[T](sq: SyncQueue[T],
|
||||
sr: SyncResult[T]): (ref ForkedSignedBeaconBlock, Opt[BlobSidecars]) =
|
||||
iterator blocks[T](
|
||||
sq: SyncQueue[T],
|
||||
sr: SyncResult[T]): (ref ForkedSignedBeaconBlock, Opt[ForkedBlobSidecars]) =
|
||||
case sq.kind
|
||||
of SyncQueueKind.Forward:
|
||||
for i in countup(0, len(sr.data) - 1):
|
||||
|
@ -607,11 +613,13 @@ func numAlreadyKnownSlots[T](sq: SyncQueue[T], sr: SyncRequest[T]): uint64 =
|
|||
# Entire request is still relevant.
|
||||
0
|
||||
|
||||
proc push*[T](sq: SyncQueue[T], sr: SyncRequest[T],
|
||||
data: seq[ref ForkedSignedBeaconBlock],
|
||||
blobs: Opt[seq[BlobSidecars]],
|
||||
maybeFinalized: bool = false,
|
||||
processingCb: ProcessingCallback = nil) {.async: (raises: [CancelledError]).} =
|
||||
proc push*[T](
|
||||
sq: SyncQueue[T], sr: SyncRequest[T],
|
||||
data: seq[ref ForkedSignedBeaconBlock],
|
||||
blobs: Opt[seq[ForkedBlobSidecars]],
|
||||
maybeFinalized: bool = false,
|
||||
processingCb: ProcessingCallback = nil
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
logScope:
|
||||
sync_ident = sq.ident
|
||||
topics = "syncman"
|
||||
|
|
|
@ -1733,8 +1733,10 @@ proc registerValidatorsPerBuilder(
|
|||
validatorRegistrations.add @[validatorRegistration]
|
||||
|
||||
# First, check for VC-added keys; cheaper because provided pre-signed
|
||||
# See issue #5599: currently VC have no way to provide BN with per-validator builders per the specs, so we have to
|
||||
# resort to use the BN fallback default (--payload-builder-url value, obtained by calling getPayloadBuilderAddress)
|
||||
# See issue #5599: currently VC have no way to provide BN with per-validator
|
||||
# builders per the specs, so we have to resort to use the BN fallback
|
||||
# default (--payload-builder-url value, obtained by calling
|
||||
# getPayloadBuilderAddress)
|
||||
var nonExitedVcPubkeys: HashSet[ValidatorPubKey]
|
||||
if node.externalBuilderRegistrations.len > 0 and
|
||||
payloadBuilderAddress == node.config.getPayloadBuilderAddress.value:
|
||||
|
|
|
@ -1481,6 +1481,7 @@ proc removeFeeRecipientFile*(host: KeymanagerHost,
|
|||
if fileExists(path):
|
||||
io2.removeFile(path).isOkOr:
|
||||
return err($uint(error) & " " & ioErrorMsg(error))
|
||||
host.validatorPool[].invalidateValidatorRegistration(pubkey)
|
||||
ok()
|
||||
|
||||
proc removeGasLimitFile*(host: KeymanagerHost,
|
||||
|
@ -1499,15 +1500,22 @@ proc removeGraffitiFile*(host: KeymanagerHost,
|
|||
return err($uint(error) & " " & ioErrorMsg(error))
|
||||
ok()
|
||||
|
||||
proc setFeeRecipient*(host: KeymanagerHost, pubkey: ValidatorPubKey, feeRecipient: Eth1Address): Result[void, string] =
|
||||
proc setFeeRecipient*(
|
||||
host: KeymanagerHost, pubkey: ValidatorPubKey, feeRecipient: Eth1Address):
|
||||
Result[void, string] =
|
||||
let validatorKeystoreDir = host.validatorKeystoreDir(pubkey)
|
||||
|
||||
? secureCreatePath(validatorKeystoreDir).mapErr(proc(e: auto): string =
|
||||
"Could not create wallet directory [" & validatorKeystoreDir & "]: " & $e)
|
||||
|
||||
io2.writeFile(validatorKeystoreDir / FeeRecipientFilename, $feeRecipient)
|
||||
let res = io2.writeFile(
|
||||
validatorKeystoreDir / FeeRecipientFilename, $feeRecipient)
|
||||
.mapErr(proc(e: auto): string = "Failed to write fee recipient file: " & $e)
|
||||
|
||||
if res.isOk:
|
||||
host.validatorPool[].invalidateValidatorRegistration(pubkey)
|
||||
|
||||
res
|
||||
|
||||
proc setGasLimit*(host: KeymanagerHost,
|
||||
pubkey: ValidatorPubKey,
|
||||
gasLimit: uint64): Result[void, string] =
|
||||
|
|
|
@ -84,11 +84,16 @@ template getCurrentBeaconTime(router: MessageRouter): BeaconTime =
|
|||
type RouteBlockResult = Result[Opt[BlockRef], string]
|
||||
proc routeSignedBeaconBlock*(
|
||||
router: ref MessageRouter, blck: ForkySignedBeaconBlock,
|
||||
blobsOpt: Opt[seq[BlobSidecar]], checkValidator: bool):
|
||||
blobsOpt: Opt[seq[ForkyBlobSidecar]], checkValidator: bool):
|
||||
Future[RouteBlockResult] {.async: (raises: [CancelledError]).} =
|
||||
## Validate and broadcast beacon block, then add it to the block database
|
||||
## Returns the new Head when block is added successfully to dag, none when
|
||||
## block passes validation but is not added, and error otherwise
|
||||
const
|
||||
consensusFork = typeof(blck).kind
|
||||
blobFork = blobForkAtConsensusFork(consensusFork).get(BlobFork.Deneb)
|
||||
static: doAssert typeof(blobsOpt).T is seq[blobFork.BlobSidecar]
|
||||
|
||||
let wallTime = router[].getCurrentBeaconTime()
|
||||
|
||||
block:
|
||||
|
@ -117,8 +122,10 @@ proc routeSignedBeaconBlock*(
|
|||
let blobs = blobsOpt.get()
|
||||
let kzgCommits = blck.message.body.blob_kzg_commitments.asSeq
|
||||
if blobs.len > 0 or kzgCommits.len > 0:
|
||||
let res = validate_blobs(kzgCommits, blobs.mapIt(it.blob),
|
||||
blobs.mapIt(it.kzg_proof))
|
||||
let res = validate_blobs(
|
||||
kzgCommits,
|
||||
blobs.mapIt(KzgBlob(bytes: it.blob)),
|
||||
blobs.mapIt(it.kzg_proof))
|
||||
if res.isErr():
|
||||
warn "blobs failed validation",
|
||||
blockRoot = shortLog(blck.root),
|
||||
|
@ -150,7 +157,7 @@ proc routeSignedBeaconBlock*(
|
|||
blockRoot = shortLog(blck.root), blck = shortLog(blck.message),
|
||||
signature = shortLog(blck.signature), error = res.error()
|
||||
|
||||
var blobRefs = Opt.none(BlobSidecars)
|
||||
var blobRefs = Opt.none(ForkedBlobSidecars)
|
||||
if blobsOpt.isSome():
|
||||
let blobs = blobsOpt.get()
|
||||
var workers = newSeq[Future[SendResult]](blobs.len)
|
||||
|
@ -166,7 +173,7 @@ proc routeSignedBeaconBlock*(
|
|||
blob = shortLog(blobs[i]), error = res.error[]
|
||||
else:
|
||||
notice "Blob sent", blob = shortLog(blobs[i])
|
||||
blobRefs = Opt.some(blobs.mapIt(newClone(it)))
|
||||
blobRefs = Opt.some(blobs.mapIt(ForkedBlobSidecar.init(newClone(it))))
|
||||
|
||||
let added = await router[].blockProcessor[].addBlock(
|
||||
MsgSource.api, ForkedSignedBeaconBlock.init(blck), blobRefs)
|
||||
|
|
|
@ -129,7 +129,7 @@ proc unblindAndRouteBlockMEV*(
|
|||
bundle.data.blobs_bundle.commitments:
|
||||
return err("unblinded blobs bundle has unexpected commitments")
|
||||
let ok = verifyProofs(
|
||||
asSeq blobs_bundle.blobs,
|
||||
blobs_bundle.blobs.mapIt(KzgBlob(bytes: it)),
|
||||
asSeq blobs_bundle.commitments,
|
||||
asSeq blobs_bundle.proofs).valueOr:
|
||||
return err("unblinded blobs bundle fails verification")
|
||||
|
|
|
@ -288,6 +288,15 @@ proc updateValidator*(pool: var ValidatorPool,
|
|||
|
||||
validator.activationEpoch = activationEpoch
|
||||
|
||||
func invalidateValidatorRegistration*(
|
||||
pool: var ValidatorPool, pubkey: ValidatorPubKey) =
|
||||
# When the per-validator fee recipient changes via keymanager, the builder
|
||||
# API validator registration needs to be recomputed. This will happen when
|
||||
# next the registrations are sent, but ensure here that will happen rather
|
||||
# than relying on a now-outdated, cached, validator registration.
|
||||
pool.getValidator(pubkey).isErrOr:
|
||||
value.externalBuilderRegistration.reset()
|
||||
|
||||
proc close*(pool: var ValidatorPool) =
|
||||
## Unlock and close all validator keystore's files managed by ``pool``.
|
||||
for validator in pool.validators.values():
|
||||
|
|
|
@ -51,6 +51,8 @@ const
|
|||
|
||||
fullVersionStr* = "v" & versionAsStr & "-" & gitRevision & "-" & versionBlob
|
||||
|
||||
nimbusAgentStr* = "Nimbus/" & fullVersionStr
|
||||
|
||||
func getNimGitHash*(): string =
|
||||
const gitPrefix = "git hash: "
|
||||
let tmp = splitLines(nimFullBanner)
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
* * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
* at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
*/
|
||||
library 'status-jenkins-lib@v1.8.14'
|
||||
library 'status-jenkins-lib@v1.9.2'
|
||||
|
||||
pipeline {
|
||||
/* This way we run the same Jenkinsfile on different platforms. */
|
||||
|
|
|
@ -1,85 +0,0 @@
|
|||
#!/usr/bin/env groovy
|
||||
/* beacon_chain
|
||||
* Copyright (c) 2019-2024 Status Research & Development GmbH
|
||||
* Licensed and distributed under either of
|
||||
* * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
* * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
* at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
*/
|
||||
library 'status-jenkins-lib@nix/flake-build'
|
||||
|
||||
pipeline {
|
||||
/* This way we run the same Jenkinsfile on different platforms. */
|
||||
agent { label params.AGENT_LABEL }
|
||||
|
||||
parameters {
|
||||
string(
|
||||
name: 'AGENT_LABEL',
|
||||
description: 'Label for targetted CI slave host: linux/macos',
|
||||
defaultValue: params.AGENT_LABEL ?: getAgentLabel(),
|
||||
)
|
||||
choice(
|
||||
name: 'VERBOSITY',
|
||||
description: 'Value for the V make flag to increase log verbosity',
|
||||
choices: [0, 1, 2]
|
||||
)
|
||||
}
|
||||
|
||||
options {
|
||||
timestamps()
|
||||
ansiColor('xterm')
|
||||
/* This also includes wait time in the queue. */
|
||||
timeout(time: 1, unit: 'HOURS')
|
||||
/* Limit builds retained. */
|
||||
buildDiscarder(logRotator(
|
||||
numToKeepStr: '5',
|
||||
daysToKeepStr: '30',
|
||||
))
|
||||
/* Abort old builds for non-main branches. */
|
||||
disableConcurrentBuilds(
|
||||
abortPrevious: !isMainBranch()
|
||||
)
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Beacon Node') {
|
||||
steps { script {
|
||||
nix.flake('beacon_node')
|
||||
} }
|
||||
}
|
||||
|
||||
stage('Version check') {
|
||||
steps { script {
|
||||
sh 'result/bin/nimbus_beacon_node --version'
|
||||
} }
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
always {
|
||||
cleanWs(
|
||||
disableDeferredWipeout: true,
|
||||
deleteDirs: true
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def isMainBranch() {
|
||||
return ['stable', 'testing', 'unstable'].contains(env.BRANCH_NAME)
|
||||
}
|
||||
|
||||
/* This allows us to use one Jenkinsfile and run
|
||||
* jobs on different platforms based on job name. */
|
||||
def getAgentLabel() {
|
||||
if (params.AGENT_LABEL) { return params.AGENT_LABEL }
|
||||
/* We extract the name of the job from currentThread because
|
||||
* before an agent is picket env is not available. */
|
||||
def tokens = Thread.currentThread().getName().split('/')
|
||||
def labels = []
|
||||
/* Check if the job path contains any of the valid labels. */
|
||||
['linux', 'macos', 'x86_64', 'aarch64', 'arm64'].each {
|
||||
if (tokens.contains(it)) { labels.add(it) }
|
||||
}
|
||||
return labels.join(' && ')
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
nix.Jenkinsfile
|
|
@ -0,0 +1,85 @@
|
|||
#!/usr/bin/env groovy
|
||||
/* beacon_chain
|
||||
* Copyright (c) 2019-2024 Status Research & Development GmbH
|
||||
* Licensed and distributed under either of
|
||||
* * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
* * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
* at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
*/
|
||||
library 'status-jenkins-lib@v1.9.2'
|
||||
|
||||
pipeline {
|
||||
/* This way we run the same Jenkinsfile on different platforms. */
|
||||
agent { label params.AGENT_LABEL }
|
||||
|
||||
parameters {
|
||||
string(
|
||||
name: 'AGENT_LABEL',
|
||||
description: 'Label for targetted CI slave host: linux/macos',
|
||||
defaultValue: params.AGENT_LABEL ?: getAgentLabel(),
|
||||
)
|
||||
choice(
|
||||
name: 'VERBOSITY',
|
||||
description: 'Value for the V make flag to increase log verbosity',
|
||||
choices: [0, 1, 2]
|
||||
)
|
||||
}
|
||||
|
||||
options {
|
||||
timestamps()
|
||||
ansiColor('xterm')
|
||||
/* This also includes wait time in the queue. */
|
||||
timeout(time: 1, unit: 'HOURS')
|
||||
/* Limit builds retained. */
|
||||
buildDiscarder(logRotator(
|
||||
numToKeepStr: '5',
|
||||
daysToKeepStr: '30',
|
||||
))
|
||||
/* Abort old builds for non-main branches. */
|
||||
disableConcurrentBuilds(
|
||||
abortPrevious: !isMainBranch()
|
||||
)
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Beacon Node') {
|
||||
steps { script {
|
||||
nix.flake('beacon_node')
|
||||
} }
|
||||
}
|
||||
|
||||
stage('Version check') {
|
||||
steps { script {
|
||||
sh 'result/bin/nimbus_beacon_node --version'
|
||||
} }
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
always {
|
||||
cleanWs(
|
||||
disableDeferredWipeout: true,
|
||||
deleteDirs: true
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def isMainBranch() {
|
||||
return ['stable', 'testing', 'unstable'].contains(env.BRANCH_NAME)
|
||||
}
|
||||
|
||||
/* This allows us to use one Jenkinsfile and run
|
||||
* jobs on different platforms based on job name. */
|
||||
def getAgentLabel() {
|
||||
if (params.AGENT_LABEL) { return params.AGENT_LABEL }
|
||||
/* We extract the name of the job from currentThread because
|
||||
* before an agent is picket env is not available. */
|
||||
def tokens = Thread.currentThread().getName().split('/')
|
||||
def labels = []
|
||||
/* Check if the job path contains any of the valid labels. */
|
||||
['linux', 'macos', 'x86_64', 'aarch64', 'arm64'].each {
|
||||
if (tokens.contains(it)) { labels.add(it) }
|
||||
}
|
||||
return labels.join(' && ')
|
||||
}
|
11
config.nims
11
config.nims
|
@ -120,6 +120,11 @@ elif defined(macosx) and defined(arm64):
|
|||
# Apple's Clang can't handle "-march=native" on M1: https://github.com/status-im/nimbus-eth2/issues/2758
|
||||
switch("passC", "-mcpu=apple-m1")
|
||||
switch("passL", "-mcpu=apple-m1")
|
||||
elif defined(riscv64):
|
||||
# riscv64 needs specification of ISA with extensions. 'gc' is widely supported
|
||||
# and seems to be the minimum extensions needed to build.
|
||||
switch("passC", "-march=rv64gc")
|
||||
switch("passL", "-march=rv64gc")
|
||||
else:
|
||||
switch("passC", "-march=native")
|
||||
switch("passL", "-march=native")
|
||||
|
@ -187,9 +192,6 @@ switch("warning", "CaseTransition:off")
|
|||
# do its (N)RVO pass: https://github.com/nim-lang/RFCs/issues/230
|
||||
switch("warning", "ObservableStores:off")
|
||||
|
||||
# Too many false positives for "Warning: method has lock level <unknown>, but another method has 0 [LockLevel]"
|
||||
switch("warning", "LockLevel:off")
|
||||
|
||||
# Too many right now to read compiler output. Warnings are legitimate, but
|
||||
# should be fixed out-of-band of `unstable` branch.
|
||||
switch("warning", "BareExcept:off")
|
||||
|
@ -218,7 +220,8 @@ put("server.always", "-fno-lto")
|
|||
put("assembly.always", "-fno-lto")
|
||||
|
||||
# Secp256k1
|
||||
put("secp256k1.always", "-fno-lto")
|
||||
# -fomit-frame-pointer for https://github.com/status-im/nimbus-eth2/issues/6324
|
||||
put("secp256k1.always", "-fno-lto -fomit-frame-pointer")
|
||||
|
||||
# BearSSL - only RNGs
|
||||
put("aesctr_drbg.always", "-fno-lto")
|
||||
|
|
|
@ -66,7 +66,7 @@ watchdog==2.1.9
|
|||
# via mkdocs
|
||||
wheel==0.38.1
|
||||
# via pip-tools
|
||||
zipp==3.8.1
|
||||
zipp==3.19.1
|
||||
# via importlib-metadata
|
||||
|
||||
# The following packages are considered to be unsafe in a requirements file:
|
||||
|
|
|
@ -112,7 +112,6 @@ The following options are available:
|
|||
--light-client-data-import-mode Which classes of light client data to import. Must be one of: none, only-new,
|
||||
full (slow startup), on-demand (may miss validator duties) [=only-new].
|
||||
--light-client-data-max-periods Maximum number of sync committee periods to retain light client data.
|
||||
--long-range-sync Enable long-range syncing (genesis sync) [=LongRangeSyncMode.Light].
|
||||
--in-process-validators Disable the push model (the beacon node tells a signing process with the private
|
||||
keys of the validators what to sign and when) and load the validators in the
|
||||
beacon node itself [=true].
|
||||
|
|
|
@ -383,6 +383,7 @@ proc cmdDumpState(conf: DbConf) =
|
|||
bellatrixState = (ref bellatrix.HashedBeaconState)()
|
||||
capellaState = (ref capella.HashedBeaconState)()
|
||||
denebState = (ref deneb.HashedBeaconState)()
|
||||
electraState = (ref electra.HashedBeaconState)()
|
||||
|
||||
for stateRoot in conf.stateRoot:
|
||||
if shouldShutDown: quit QuitSuccess
|
||||
|
@ -401,6 +402,7 @@ proc cmdDumpState(conf: DbConf) =
|
|||
doit(bellatrixState[])
|
||||
doit(capellaState[])
|
||||
doit(denebState[])
|
||||
doit(electraState[])
|
||||
|
||||
echo "Couldn't load ", stateRoot
|
||||
|
||||
|
@ -487,19 +489,22 @@ proc cmdPutBlob(conf: DbConf, cfg: RuntimeConfig) =
|
|||
let
|
||||
blob =
|
||||
try:
|
||||
SSZ.decode(readAllBytes(file).tryGet(), BlobSidecar)
|
||||
readSszForkedBlobSidecar(
|
||||
cfg, readAllBytes(file).tryGet())
|
||||
except ResultError[IoErrorCode] as e:
|
||||
echo "Couldn't load ", file, ": ", e.msg
|
||||
continue
|
||||
except SerializationError as e:
|
||||
echo "Malformed ", file, ": ", e.msg
|
||||
continue
|
||||
res = blob.verify_blob_sidecar_inclusion_proof()
|
||||
res = withForkyBlob(blob):
|
||||
forkyBlob[].verify_blob_sidecar_inclusion_proof()
|
||||
if res.isErr:
|
||||
echo "Invalid ", file, ": ", res.error
|
||||
continue
|
||||
|
||||
db.putBlobSidecar(blob)
|
||||
withForkyBlob(blob):
|
||||
db.putBlobSidecar(forkyBlob[])
|
||||
|
||||
proc cmdRewindState(conf: DbConf, cfg: RuntimeConfig) =
|
||||
echo "Opening database..."
|
||||
|
@ -1212,4 +1217,4 @@ when isMainModule:
|
|||
of DbCmd.validatorPerf:
|
||||
cmdValidatorPerf(conf, cfg)
|
||||
of DbCmd.validatorDb:
|
||||
cmdValidatorDb(conf, cfg)
|
||||
cmdValidatorDb(conf, cfg)
|
||||
|
|
|
@ -4748,8 +4748,7 @@
|
|||
"body": {"content-type": "application/json", "data": "[{\"message\":{\"fee_recipient\":\"0xb943c2c22b1b186a34f47c4dbe2fe367de9ec180\",\"gas_limit\":\"40000000\",\"timestamp\":\"1661879190\",\"pubkey\":\"0xa37b7bb9c412b8cc318fabf7b1fec33eb9634680687f07b977393180ce99889dbcfda81900f3afb9f2281930cf49f5d8\"},\"signature\":\"0xa493085fab365d13bea2376434abc3dbfba00a576276c853acabd7b9cb2f2b4b0a90738dd9baeaef75d0f42fa94119a70a09b0ed38fbebb6dde92c9ca062447018821f36c19d6fe34eb8c357d62e5d33e5c1d35035472ef7dd22a7425cdba0c5\"}]"}
|
||||
},
|
||||
"response": {
|
||||
"status": {"operator": "equals", "value": "200"},
|
||||
"headers": [{"key": "Content-Type", "value": "text/plain", "operator": "equals"}]
|
||||
"status": {"operator": "equals", "value": "200"}
|
||||
}
|
||||
},
|
||||
{
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
{ pkgs ? import <nixpkgs> { } }:
|
||||
|
||||
let
|
||||
tools = pkgs.callPackage ./tools.nix {};
|
||||
sourceFile = ../vendor/nimbus-build-system/vendor/Nim/koch.nim;
|
||||
in pkgs.fetchFromGitHub {
|
||||
owner = "nim-lang";
|
||||
repo = "checksums";
|
||||
rev = tools.findKeyValue "^ +ChecksumsStableCommit = \"([a-f0-9]+)\"$" sourceFile;
|
||||
# WARNING: Requires manual updates when Nim compiler version changes.
|
||||
hash = "sha256-RB2IXs2xcfYHhV9d7l1mtHW51mtsrqrYRapSoTikvHw=";
|
||||
}
|
|
@ -5,8 +5,8 @@ let
|
|||
sourceFile = ../vendor/nimbus-build-system/vendor/Nim/config/build_config.txt;
|
||||
in pkgs.fetchFromGitHub {
|
||||
owner = "nim-lang";
|
||||
repo = "csources_v1";
|
||||
repo = "csources_v2";
|
||||
rev = tools.findKeyValue "^nim_csourcesHash=([a-f0-9]+)$" sourceFile;
|
||||
# WARNING: Requires manual updates when Nim compiler version changes.
|
||||
hash = "sha256-gwBFuR7lzO4zttR/6rgdjXMRxVhwKeLqDwpmOwMyU7A=";
|
||||
hash = "sha256-UCLtoxOcGYjBdvHx7A47x6FjLMi6VZqpSs65MN7fpBs=";
|
||||
}
|
||||
|
|
|
@ -19,9 +19,7 @@
|
|||
let
|
||||
inherit (pkgs) stdenv lib writeScriptBin callPackage;
|
||||
|
||||
nimble = callPackage ./nimble.nix {};
|
||||
csources = callPackage ./csources.nix {};
|
||||
revision = lib.substring 0 8 (src.rev or "dirty");
|
||||
revision = lib.substring 0 8 (src.rev or "unknown");
|
||||
in stdenv.mkDerivation rec {
|
||||
pname = "nimbus-eth2";
|
||||
version = "${callPackage ./version.nix {}}-${revision}";
|
||||
|
@ -60,9 +58,10 @@ in stdenv.mkDerivation rec {
|
|||
preBuild = ''
|
||||
pushd vendor/nimbus-build-system/vendor/Nim
|
||||
mkdir dist
|
||||
cp -r ${nimble} dist/nimble
|
||||
cp -r ${csources} csources_v1
|
||||
chmod 777 -R dist/nimble csources_v1
|
||||
cp -r ${callPackage ./nimble.nix {}} dist/nimble
|
||||
cp -r ${callPackage ./checksums.nix {}} dist/checksums
|
||||
cp -r ${callPackage ./csources.nix {}} csources_v2
|
||||
chmod 777 -R dist/nimble csources_v2
|
||||
sed -i 's/isGitRepo(destDir)/false/' tools/deps.nim
|
||||
popd
|
||||
'';
|
||||
|
|
|
@ -7,7 +7,7 @@ let
|
|||
in {
|
||||
findKeyValue = regex: sourceFile:
|
||||
let
|
||||
linesFrom = sourceFile: splitString "\n" (fileContents sourceFile);
|
||||
linesFrom = file: splitString "\n" (fileContents file);
|
||||
matching = regex: lines: map (line: match regex line) lines;
|
||||
extractMatch = matches: last (flatten (remove null matches));
|
||||
in
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (c) 2020-2023 Status Research & Development GmbH. Licensed under
|
||||
# Copyright (c) 2020-2024 Status Research & Development GmbH. Licensed under
|
||||
# either of:
|
||||
# - Apache License, version 2.0
|
||||
# - MIT license
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (c) 2020-2021 Status Research & Development GmbH. Licensed under
|
||||
# Copyright (c) 2020-2024 Status Research & Development GmbH. Licensed under
|
||||
# either of:
|
||||
# - Apache License, version 2.0
|
||||
# - MIT license
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (c) 2022 Status Research & Development GmbH. Licensed under
|
||||
# Copyright (c) 2024 Status Research & Development GmbH. Licensed under
|
||||
# either of:
|
||||
# - Apache License, version 2.0
|
||||
# - MIT license
|
||||
|
|
|
@ -21,7 +21,7 @@ source "${SCRIPTS_DIR}/bash_utils.sh"
|
|||
|
||||
download_geth_stable() {
|
||||
if [[ ! -e "${STABLE_GETH_BINARY}" ]]; then
|
||||
GETH_VERSION="1.14.5-0dd173a7" # https://geth.ethereum.org/downloads
|
||||
GETH_VERSION="1.14.7-aa55f5ea" # https://geth.ethereum.org/downloads
|
||||
GETH_URL="https://gethstore.blob.core.windows.net/builds/"
|
||||
|
||||
case "${OS}-${ARCH}" in
|
||||
|
|
|
@ -173,4 +173,4 @@ suite baseDescription & "Voluntary Exit " & preset():
|
|||
for path in walkTests(OpVoluntaryExitDir):
|
||||
runTest[SignedVoluntaryExit, typeof applyVoluntaryExit](
|
||||
OpVoluntaryExitDir, suiteName, "Voluntary Exit", "voluntary_exit",
|
||||
applyVoluntaryExit, path)
|
||||
applyVoluntaryExit, path)
|
||||
|
|
|
@ -108,10 +108,10 @@ suite "EF - Altair - SSZ consensus objects " & preset():
|
|||
let hash = loadExpectedHashTreeRoot(path)
|
||||
|
||||
case sszType:
|
||||
of "AggregateAndProof": checkSSZ(AggregateAndProof, path, hash)
|
||||
of "Attestation": checkSSZ(Attestation, path, hash)
|
||||
of "AggregateAndProof": checkSSZ(phase0.AggregateAndProof, path, hash)
|
||||
of "Attestation": checkSSZ(phase0.Attestation, path, hash)
|
||||
of "AttestationData": checkSSZ(AttestationData, path, hash)
|
||||
of "AttesterSlashing": checkSSZ(AttesterSlashing, path, hash)
|
||||
of "AttesterSlashing": checkSSZ(phase0.AttesterSlashing, path, hash)
|
||||
of "BeaconBlock": checkSSZ(altair.BeaconBlock, path, hash)
|
||||
of "BeaconBlockBody": checkSSZ(altair.BeaconBlockBody, path, hash)
|
||||
of "BeaconBlockHeader": checkSSZ(BeaconBlockHeader, path, hash)
|
||||
|
@ -126,7 +126,8 @@ suite "EF - Altair - SSZ consensus objects " & preset():
|
|||
of "Fork": checkSSZ(Fork, path, hash)
|
||||
of "ForkData": checkSSZ(ForkData, path, hash)
|
||||
of "HistoricalBatch": checkSSZ(HistoricalBatch, path, hash)
|
||||
of "IndexedAttestation": checkSSZ(IndexedAttestation, path, hash)
|
||||
of "IndexedAttestation":
|
||||
checkSSZ(phase0.IndexedAttestation, path, hash)
|
||||
of "LightClientBootstrap":
|
||||
checkSSZ(altair.LightClientBootstrap, path, hash)
|
||||
of "LightClientHeader":
|
||||
|
@ -140,7 +141,7 @@ suite "EF - Altair - SSZ consensus objects " & preset():
|
|||
of "PendingAttestation": checkSSZ(PendingAttestation, path, hash)
|
||||
of "ProposerSlashing": checkSSZ(ProposerSlashing, path, hash)
|
||||
of "SignedAggregateAndProof":
|
||||
checkSSZ(SignedAggregateAndProof, path, hash)
|
||||
checkSSZ(phase0.SignedAggregateAndProof, path, hash)
|
||||
of "SignedBeaconBlock": checkSSZ(altair.SignedBeaconBlock, path, hash)
|
||||
of "SignedBeaconBlockHeader":
|
||||
checkSSZ(SignedBeaconBlockHeader, path, hash)
|
||||
|
|
|
@ -75,7 +75,7 @@ proc runTest[T, U](
|
|||
|
||||
suite baseDescription & "Attestation " & preset():
|
||||
proc applyAttestation(
|
||||
preState: var bellatrix.BeaconState, attestation: Attestation):
|
||||
preState: var bellatrix.BeaconState, attestation: phase0.Attestation):
|
||||
Result[void, cstring] =
|
||||
var cache: StateCache
|
||||
let
|
||||
|
@ -90,7 +90,7 @@ suite baseDescription & "Attestation " & preset():
|
|||
ok()
|
||||
|
||||
for path in walkTests(OpAttestationsDir):
|
||||
runTest[Attestation, typeof applyAttestation](
|
||||
runTest[phase0.Attestation, typeof applyAttestation](
|
||||
OpAttestationsDir, suiteName, "Attestation", "attestation",
|
||||
applyAttestation, path)
|
||||
|
||||
|
@ -198,4 +198,4 @@ suite baseDescription & "Voluntary Exit " & preset():
|
|||
for path in walkTests(OpVoluntaryExitDir):
|
||||
runTest[SignedVoluntaryExit, typeof applyVoluntaryExit](
|
||||
OpVoluntaryExitDir, suiteName, "Voluntary Exit", "voluntary_exit",
|
||||
applyVoluntaryExit, path)
|
||||
applyVoluntaryExit, path)
|
||||
|
|
|
@ -108,8 +108,8 @@ suite "EF - Bellatrix - SSZ consensus objects " & preset():
|
|||
let hash = loadExpectedHashTreeRoot(path)
|
||||
|
||||
case sszType:
|
||||
of "AggregateAndProof": checkSSZ(AggregateAndProof, path, hash)
|
||||
of "Attestation": checkSSZ(Attestation, path, hash)
|
||||
of "AggregateAndProof": checkSSZ(phase0.AggregateAndProof, path, hash)
|
||||
of "Attestation": checkSSZ(phase0.Attestation, path, hash)
|
||||
of "AttestationData": checkSSZ(AttestationData, path, hash)
|
||||
of "AttesterSlashing": checkSSZ(phase0.AttesterSlashing, path, hash)
|
||||
of "BeaconBlock": checkSSZ(bellatrix.BeaconBlock, path, hash)
|
||||
|
@ -123,9 +123,10 @@ suite "EF - Bellatrix - SSZ consensus objects " & preset():
|
|||
of "DepositMessage": checkSSZ(DepositMessage, path, hash)
|
||||
of "Eth1Block": checkSSZ(Eth1Block, path, hash)
|
||||
of "Eth1Data": checkSSZ(Eth1Data, path, hash)
|
||||
of "ExecutionPayload": checkSSZ(ExecutionPayload, path, hash)
|
||||
of "ExecutionPayload":
|
||||
checkSSZ(bellatrix.ExecutionPayload, path, hash)
|
||||
of "ExecutionPayloadHeader":
|
||||
checkSSZ(ExecutionPayloadHeader, path, hash)
|
||||
checkSSZ(bellatrix.ExecutionPayloadHeader, path, hash)
|
||||
of "Fork": checkSSZ(Fork, path, hash)
|
||||
of "ForkData": checkSSZ(ForkData, path, hash)
|
||||
of "HistoricalBatch": checkSSZ(HistoricalBatch, path, hash)
|
||||
|
@ -145,7 +146,7 @@ suite "EF - Bellatrix - SSZ consensus objects " & preset():
|
|||
of "PowBlock": checkSSZ(PowBlock, path, hash)
|
||||
of "ProposerSlashing": checkSSZ(ProposerSlashing, path, hash)
|
||||
of "SignedAggregateAndProof":
|
||||
checkSSZ(SignedAggregateAndProof, path, hash)
|
||||
checkSSZ(phase0.SignedAggregateAndProof, path, hash)
|
||||
of "SignedBeaconBlock":
|
||||
checkSSZ(bellatrix.SignedBeaconBlock, path, hash)
|
||||
of "SignedBeaconBlockHeader":
|
||||
|
@ -164,4 +165,4 @@ suite "EF - Bellatrix - SSZ consensus objects " & preset():
|
|||
of "Validator": checkSSZ(Validator, path, hash)
|
||||
of "VoluntaryExit": checkSSZ(VoluntaryExit, path, hash)
|
||||
else:
|
||||
raise newException(ValueError, "Unsupported test: " & sszType)
|
||||
raise newException(ValueError, "Unsupported test: " & sszType)
|
||||
|
|
|
@ -79,7 +79,7 @@ proc runTest[T, U](
|
|||
|
||||
suite baseDescription & "Attestation " & preset():
|
||||
proc applyAttestation(
|
||||
preState: var capella.BeaconState, attestation: Attestation):
|
||||
preState: var capella.BeaconState, attestation: phase0.Attestation):
|
||||
Result[void, cstring] =
|
||||
var cache: StateCache
|
||||
let
|
||||
|
@ -94,14 +94,14 @@ suite baseDescription & "Attestation " & preset():
|
|||
ok()
|
||||
|
||||
for path in walkTests(OpAttestationsDir):
|
||||
runTest[Attestation, typeof applyAttestation](
|
||||
runTest[phase0.Attestation, typeof applyAttestation](
|
||||
OpAttestationsDir, suiteName, "Attestation", "attestation",
|
||||
applyAttestation, path)
|
||||
|
||||
suite baseDescription & "Attester Slashing " & preset():
|
||||
proc applyAttesterSlashing(
|
||||
preState: var capella.BeaconState, attesterSlashing: AttesterSlashing):
|
||||
Result[void, cstring] =
|
||||
preState: var capella.BeaconState,
|
||||
attesterSlashing: phase0.AttesterSlashing): Result[void, cstring] =
|
||||
var cache: StateCache
|
||||
doAssert (? process_attester_slashing(
|
||||
defaultRuntimeConfig, preState, attesterSlashing, {strictVerification},
|
||||
|
@ -109,7 +109,7 @@ suite baseDescription & "Attester Slashing " & preset():
|
|||
ok()
|
||||
|
||||
for path in walkTests(OpAttSlashingDir):
|
||||
runTest[AttesterSlashing, typeof applyAttesterSlashing](
|
||||
runTest[phase0.AttesterSlashing, typeof applyAttesterSlashing](
|
||||
OpAttSlashingDir, suiteName, "Attester Slashing", "attester_slashing",
|
||||
applyAttesterSlashing, path)
|
||||
|
||||
|
@ -226,4 +226,4 @@ suite baseDescription & "Withdrawals " & preset():
|
|||
for path in walkTests(OpWithdrawalsDir):
|
||||
runTest[capella.ExecutionPayload, typeof applyWithdrawals](
|
||||
OpWithdrawalsDir, suiteName, "Withdrawals", "execution_payload",
|
||||
applyWithdrawals, path)
|
||||
applyWithdrawals, path)
|
||||
|
|
|
@ -110,10 +110,10 @@ suite "EF - Capella - SSZ consensus objects " & preset():
|
|||
let hash = loadExpectedHashTreeRoot(path)
|
||||
|
||||
case sszType:
|
||||
of "AggregateAndProof": checkSSZ(AggregateAndProof, path, hash)
|
||||
of "Attestation": checkSSZ(Attestation, path, hash)
|
||||
of "AggregateAndProof": checkSSZ(phase0.AggregateAndProof, path, hash)
|
||||
of "Attestation": checkSSZ(phase0.Attestation, path, hash)
|
||||
of "AttestationData": checkSSZ(AttestationData, path, hash)
|
||||
of "AttesterSlashing": checkSSZ(AttesterSlashing, path, hash)
|
||||
of "AttesterSlashing": checkSSZ(phase0.AttesterSlashing, path, hash)
|
||||
of "BeaconBlock": checkSSZ(capella.BeaconBlock, path, hash)
|
||||
of "BeaconBlockBody": checkSSZ(capella.BeaconBlockBody, path, hash)
|
||||
of "BeaconBlockHeader": checkSSZ(BeaconBlockHeader, path, hash)
|
||||
|
@ -126,14 +126,16 @@ suite "EF - Capella - SSZ consensus objects " & preset():
|
|||
of "DepositMessage": checkSSZ(DepositMessage, path, hash)
|
||||
of "Eth1Block": checkSSZ(Eth1Block, path, hash)
|
||||
of "Eth1Data": checkSSZ(Eth1Data, path, hash)
|
||||
of "ExecutionPayload": checkSSZ(ExecutionPayload, path, hash)
|
||||
of "ExecutionPayload":
|
||||
checkSSZ(capella.ExecutionPayload, path, hash)
|
||||
of "ExecutionPayloadHeader":
|
||||
checkSSZ(ExecutionPayloadHeader, path, hash)
|
||||
checkSSZ(capella.ExecutionPayloadHeader, path, hash)
|
||||
of "Fork": checkSSZ(Fork, path, hash)
|
||||
of "ForkData": checkSSZ(ForkData, path, hash)
|
||||
of "HistoricalBatch": checkSSZ(HistoricalBatch, path, hash)
|
||||
of "HistoricalSummary": checkSSZ(HistoricalSummary, path, hash)
|
||||
of "IndexedAttestation": checkSSZ(IndexedAttestation, path, hash)
|
||||
of "IndexedAttestation":
|
||||
checkSSZ(phase0.IndexedAttestation, path, hash)
|
||||
of "LightClientBootstrap":
|
||||
checkSSZ(capella.LightClientBootstrap, path, hash)
|
||||
of "LightClientHeader":
|
||||
|
@ -148,7 +150,7 @@ suite "EF - Capella - SSZ consensus objects " & preset():
|
|||
of "PowBlock": checkSSZ(PowBlock, path, hash)
|
||||
of "ProposerSlashing": checkSSZ(ProposerSlashing, path, hash)
|
||||
of "SignedAggregateAndProof":
|
||||
checkSSZ(SignedAggregateAndProof, path, hash)
|
||||
checkSSZ(phase0.SignedAggregateAndProof, path, hash)
|
||||
of "SignedBeaconBlock":
|
||||
checkSSZ(capella.SignedBeaconBlock, path, hash)
|
||||
of "SignedBeaconBlockHeader":
|
||||
|
|
|
@ -79,7 +79,7 @@ proc runTest[T, U](
|
|||
|
||||
suite baseDescription & "Attestation " & preset():
|
||||
proc applyAttestation(
|
||||
preState: var deneb.BeaconState, attestation: Attestation):
|
||||
preState: var deneb.BeaconState, attestation: phase0.Attestation):
|
||||
Result[void, cstring] =
|
||||
var cache: StateCache
|
||||
let
|
||||
|
@ -94,14 +94,14 @@ suite baseDescription & "Attestation " & preset():
|
|||
ok()
|
||||
|
||||
for path in walkTests(OpAttestationsDir):
|
||||
runTest[Attestation, typeof applyAttestation](
|
||||
runTest[phase0.Attestation, typeof applyAttestation](
|
||||
OpAttestationsDir, suiteName, "Attestation", "attestation",
|
||||
applyAttestation, path)
|
||||
|
||||
suite baseDescription & "Attester Slashing " & preset():
|
||||
proc applyAttesterSlashing(
|
||||
preState: var deneb.BeaconState, attesterSlashing: AttesterSlashing):
|
||||
Result[void, cstring] =
|
||||
preState: var deneb.BeaconState,
|
||||
attesterSlashing: phase0.AttesterSlashing): Result[void, cstring] =
|
||||
var cache: StateCache
|
||||
doAssert (? process_attester_slashing(
|
||||
defaultRuntimeConfig, preState, attesterSlashing, {strictVerification},
|
||||
|
@ -109,7 +109,7 @@ suite baseDescription & "Attester Slashing " & preset():
|
|||
ok()
|
||||
|
||||
for path in walkTests(OpAttSlashingDir):
|
||||
runTest[AttesterSlashing, typeof applyAttesterSlashing](
|
||||
runTest[phase0.AttesterSlashing, typeof applyAttesterSlashing](
|
||||
OpAttSlashingDir, suiteName, "Attester Slashing", "attester_slashing",
|
||||
applyAttesterSlashing, path)
|
||||
|
||||
|
@ -228,4 +228,4 @@ suite baseDescription & "Withdrawals " & preset():
|
|||
for path in walkTests(OpWithdrawalsDir):
|
||||
runTest[deneb.ExecutionPayload, typeof applyWithdrawals](
|
||||
OpWithdrawalsDir, suiteName, "Withdrawals", "execution_payload",
|
||||
applyWithdrawals, path)
|
||||
applyWithdrawals, path)
|
||||
|
|
|
@ -113,10 +113,10 @@ suite "EF - Deneb - SSZ consensus objects " & preset():
|
|||
let hash = loadExpectedHashTreeRoot(path)
|
||||
|
||||
case sszType:
|
||||
of "AggregateAndProof": checkSSZ(AggregateAndProof, path, hash)
|
||||
of "Attestation": checkSSZ(Attestation, path, hash)
|
||||
of "AggregateAndProof": checkSSZ(phase0.AggregateAndProof, path, hash)
|
||||
of "Attestation": checkSSZ(phase0.Attestation, path, hash)
|
||||
of "AttestationData": checkSSZ(AttestationData, path, hash)
|
||||
of "AttesterSlashing": checkSSZ(AttesterSlashing, path, hash)
|
||||
of "AttesterSlashing": checkSSZ(phase0.AttesterSlashing, path, hash)
|
||||
of "BeaconBlock": checkSSZ(deneb.BeaconBlock, path, hash)
|
||||
of "BeaconBlockBody": checkSSZ(deneb.BeaconBlockBody, path, hash)
|
||||
of "BeaconBlockHeader": checkSSZ(BeaconBlockHeader, path, hash)
|
||||
|
@ -131,18 +131,22 @@ suite "EF - Deneb - SSZ consensus objects " & preset():
|
|||
of "DepositMessage": checkSSZ(DepositMessage, path, hash)
|
||||
of "Eth1Block": checkSSZ(Eth1Block, path, hash)
|
||||
of "Eth1Data": checkSSZ(Eth1Data, path, hash)
|
||||
of "ExecutionPayload": checkSSZ(ExecutionPayload, path, hash)
|
||||
of "ExecutionPayload":
|
||||
checkSSZ(deneb.ExecutionPayload, path, hash)
|
||||
of "ExecutionPayloadHeader":
|
||||
checkSSZ(ExecutionPayloadHeader, path, hash)
|
||||
checkSSZ(deneb.ExecutionPayloadHeader, path, hash)
|
||||
of "Fork": checkSSZ(Fork, path, hash)
|
||||
of "ForkData": checkSSZ(ForkData, path, hash)
|
||||
of "HistoricalBatch": checkSSZ(HistoricalBatch, path, hash)
|
||||
of "HistoricalSummary": checkSSZ(HistoricalSummary, path, hash)
|
||||
of "IndexedAttestation": checkSSZ(IndexedAttestation, path, hash)
|
||||
of "IndexedAttestation":
|
||||
checkSSZ(phase0.IndexedAttestation, path, hash)
|
||||
of "LightClientBootstrap":
|
||||
checkSSZ(deneb.LightClientBootstrap, path, hash)
|
||||
of "LightClientHeader": checkSSZ(deneb.LightClientHeader, path, hash)
|
||||
of "LightClientUpdate": checkSSZ(deneb.LightClientUpdate, path, hash)
|
||||
of "LightClientHeader":
|
||||
checkSSZ(deneb.LightClientHeader, path, hash)
|
||||
of "LightClientUpdate":
|
||||
checkSSZ(deneb.LightClientUpdate, path, hash)
|
||||
of "LightClientFinalityUpdate":
|
||||
checkSSZ(deneb.LightClientFinalityUpdate, path, hash)
|
||||
of "LightClientOptimisticUpdate":
|
||||
|
@ -151,7 +155,7 @@ suite "EF - Deneb - SSZ consensus objects " & preset():
|
|||
of "PowBlock": checkSSZ(PowBlock, path, hash)
|
||||
of "ProposerSlashing": checkSSZ(ProposerSlashing, path, hash)
|
||||
of "SignedAggregateAndProof":
|
||||
checkSSZ(SignedAggregateAndProof, path, hash)
|
||||
checkSSZ(phase0.SignedAggregateAndProof, path, hash)
|
||||
of "SignedBeaconBlock":
|
||||
checkSSZ(deneb.SignedBeaconBlock, path, hash)
|
||||
of "SignedBeaconBlockHeader":
|
||||
|
|
|
@ -13,7 +13,7 @@ import
|
|||
chronicles,
|
||||
# Beacon chain internals
|
||||
../../../beacon_chain/spec/[presets, state_transition_epoch],
|
||||
../../../beacon_chain/spec/datatypes/[altair, deneb],
|
||||
../../../beacon_chain/spec/datatypes/altair,
|
||||
# Test utilities
|
||||
../../testutil,
|
||||
../fixtures_utils, ../os_ops,
|
||||
|
@ -22,6 +22,8 @@ import
|
|||
|
||||
from std/sequtils import mapIt, toSeq
|
||||
from std/strutils import rsplit
|
||||
from ../../../beacon_chain/spec/datatypes/deneb import BeaconState
|
||||
from ../../teststateutil import checkPerValidatorBalanceCalc
|
||||
|
||||
const
|
||||
RootDir = SszTestsDir/const_preset/"deneb"/"epoch_processing"
|
||||
|
@ -73,6 +75,7 @@ template runSuite(
|
|||
# ---------------------------------------------------------------
|
||||
runSuite(JustificationFinalizationDir, "Justification & Finalization"):
|
||||
let info = altair.EpochInfo.init(state)
|
||||
check checkPerValidatorBalanceCalc(state)
|
||||
process_justification_and_finalization(state, info.balances)
|
||||
Result[void, cstring].ok()
|
||||
|
||||
|
@ -80,6 +83,7 @@ runSuite(JustificationFinalizationDir, "Justification & Finalization"):
|
|||
# ---------------------------------------------------------------
|
||||
runSuite(InactivityDir, "Inactivity"):
|
||||
let info = altair.EpochInfo.init(state)
|
||||
check checkPerValidatorBalanceCalc(state)
|
||||
process_inactivity_updates(cfg, state, info)
|
||||
Result[void, cstring].ok()
|
||||
|
||||
|
|
|
@ -276,4 +276,4 @@ suite baseDescription & "Withdrawals " & preset():
|
|||
for path in walkTests(OpWithdrawalsDir):
|
||||
runTest[electra.ExecutionPayload, typeof applyWithdrawals](
|
||||
OpWithdrawalsDir, suiteName, "Withdrawals", "execution_payload",
|
||||
applyWithdrawals, path)
|
||||
applyWithdrawals, path)
|
||||
|
|
|
@ -136,9 +136,10 @@ suite "EF - Electra - SSZ consensus objects " & preset():
|
|||
of "DepositRequest": checkSSZ(DepositRequest, path, hash)
|
||||
of "Eth1Block": checkSSZ(Eth1Block, path, hash)
|
||||
of "Eth1Data": checkSSZ(Eth1Data, path, hash)
|
||||
of "ExecutionPayload": checkSSZ(ExecutionPayload, path, hash)
|
||||
of "ExecutionPayload":
|
||||
checkSSZ(electra.ExecutionPayload, path, hash)
|
||||
of "ExecutionPayloadHeader":
|
||||
checkSSZ(ExecutionPayloadHeader, path, hash)
|
||||
checkSSZ(electra.ExecutionPayloadHeader, path, hash)
|
||||
of "Fork": checkSSZ(Fork, path, hash)
|
||||
of "ForkData": checkSSZ(ForkData, path, hash)
|
||||
of "HistoricalBatch": checkSSZ(HistoricalBatch, path, hash)
|
||||
|
@ -146,8 +147,10 @@ suite "EF - Electra - SSZ consensus objects " & preset():
|
|||
of "IndexedAttestation": checkSSZ(electra.IndexedAttestation, path, hash)
|
||||
of "LightClientBootstrap":
|
||||
checkSSZ(electra.LightClientBootstrap, path, hash)
|
||||
of "LightClientHeader": checkSSZ(electra.LightClientHeader, path, hash)
|
||||
of "LightClientUpdate": checkSSZ(electra.LightClientUpdate, path, hash)
|
||||
of "LightClientHeader":
|
||||
checkSSZ(electra.LightClientHeader, path, hash)
|
||||
of "LightClientUpdate":
|
||||
checkSSZ(electra.LightClientUpdate, path, hash)
|
||||
of "LightClientFinalityUpdate":
|
||||
checkSSZ(electra.LightClientFinalityUpdate, path, hash)
|
||||
of "LightClientOptimisticUpdate":
|
||||
|
@ -184,4 +187,4 @@ suite "EF - Electra - SSZ consensus objects " & preset():
|
|||
of "VoluntaryExit": checkSSZ(VoluntaryExit, path, hash)
|
||||
of "WithdrawalRequest": checkSSZ(WithdrawalRequest, path, hash)
|
||||
else:
|
||||
raise newException(ValueError, "Unsupported test: " & sszType)
|
||||
raise newException(ValueError, "Unsupported test: " & sszType)
|
||||
|
|
|
@ -13,7 +13,7 @@ import
|
|||
chronicles,
|
||||
# Beacon chain internals
|
||||
../../../beacon_chain/spec/[presets, state_transition_epoch],
|
||||
../../../beacon_chain/spec/datatypes/[altair, electra],
|
||||
../../../beacon_chain/spec/datatypes/altair,
|
||||
# Test utilities
|
||||
../../testutil,
|
||||
../fixtures_utils, ../os_ops,
|
||||
|
@ -22,6 +22,8 @@ import
|
|||
|
||||
from std/sequtils import mapIt, toSeq
|
||||
from std/strutils import rsplit
|
||||
from ../../../beacon_chain/spec/datatypes/electra import BeaconState
|
||||
from ../../teststateutil import checkPerValidatorBalanceCalc
|
||||
|
||||
const
|
||||
RootDir = SszTestsDir/const_preset/"electra"/"epoch_processing"
|
||||
|
@ -76,6 +78,7 @@ template runSuite(
|
|||
# ---------------------------------------------------------------
|
||||
runSuite(JustificationFinalizationDir, "Justification & Finalization"):
|
||||
let info = altair.EpochInfo.init(state)
|
||||
check checkPerValidatorBalanceCalc(state)
|
||||
process_justification_and_finalization(state, info.balances)
|
||||
Result[void, cstring].ok()
|
||||
|
||||
|
@ -83,6 +86,7 @@ runSuite(JustificationFinalizationDir, "Justification & Finalization"):
|
|||
# ---------------------------------------------------------------
|
||||
runSuite(InactivityDir, "Inactivity"):
|
||||
let info = altair.EpochInfo.init(state)
|
||||
check checkPerValidatorBalanceCalc(state)
|
||||
process_inactivity_updates(cfg, state, info)
|
||||
Result[void, cstring].ok()
|
||||
|
||||
|
|
|
@ -150,4 +150,4 @@ suite baseDescription & "Voluntary Exit " & preset():
|
|||
for path in walkTests(OpVoluntaryExitDir):
|
||||
runTest[SignedVoluntaryExit, typeof applyVoluntaryExit](
|
||||
OpVoluntaryExitDir, suiteName, "Voluntary Exit", "voluntary_exit",
|
||||
applyVoluntaryExit, path)
|
||||
applyVoluntaryExit, path)
|
||||
|
|
|
@ -108,10 +108,10 @@ suite "EF - Phase 0 - SSZ consensus objects " & preset():
|
|||
let hash = loadExpectedHashTreeRoot(path)
|
||||
|
||||
case sszType:
|
||||
of "AggregateAndProof": checkSSZ(AggregateAndProof, path, hash)
|
||||
of "Attestation": checkSSZ(Attestation, path, hash)
|
||||
of "AggregateAndProof": checkSSZ(phase0.AggregateAndProof, path, hash)
|
||||
of "Attestation": checkSSZ(phase0.Attestation, path, hash)
|
||||
of "AttestationData": checkSSZ(AttestationData, path, hash)
|
||||
of "AttesterSlashing": checkSSZ(AttesterSlashing, path, hash)
|
||||
of "AttesterSlashing": checkSSZ(phase0.AttesterSlashing, path, hash)
|
||||
of "BeaconBlock": checkSSZ(phase0.BeaconBlock, path, hash)
|
||||
of "BeaconBlockBody": checkSSZ(phase0.BeaconBlockBody, path, hash)
|
||||
of "BeaconBlockHeader": checkSSZ(BeaconBlockHeader, path, hash)
|
||||
|
@ -125,11 +125,12 @@ suite "EF - Phase 0 - SSZ consensus objects " & preset():
|
|||
of "Fork": checkSSZ(Fork, path, hash)
|
||||
of "ForkData": checkSSZ(ForkData, path, hash)
|
||||
of "HistoricalBatch": checkSSZ(HistoricalBatch, path, hash)
|
||||
of "IndexedAttestation": checkSSZ(IndexedAttestation, path, hash)
|
||||
of "IndexedAttestation":
|
||||
checkSSZ(phase0.IndexedAttestation, path, hash)
|
||||
of "PendingAttestation": checkSSZ(PendingAttestation, path, hash)
|
||||
of "ProposerSlashing": checkSSZ(ProposerSlashing, path, hash)
|
||||
of "SignedAggregateAndProof":
|
||||
checkSSZ(SignedAggregateAndProof, path, hash)
|
||||
checkSSZ(phase0.SignedAggregateAndProof, path, hash)
|
||||
of "SignedBeaconBlock": checkSSZ(phase0.SignedBeaconBlock, path, hash)
|
||||
of "SignedBeaconBlockHeader":
|
||||
checkSSZ(SignedBeaconBlockHeader, path, hash)
|
||||
|
|
|
@ -19,7 +19,7 @@ import
|
|||
../../beacon_chain/consensus_object_pools/[
|
||||
blockchain_dag, block_clearance, block_quarantine, spec_cache],
|
||||
# Third-party
|
||||
yaml,
|
||||
yaml/tojson,
|
||||
# Test
|
||||
../testutil, ../testdbutil,
|
||||
./fixtures_utils, ./os_ops
|
||||
|
@ -102,7 +102,7 @@ proc loadOps(
|
|||
IOError, KeyError, UnconsumedInput, ValueError,
|
||||
YamlConstructionError, YamlParserError].} =
|
||||
let stepsYAML = os_ops.readFile(path/"steps.yaml")
|
||||
let steps = yaml.loadToJson(stepsYAML)
|
||||
let steps = loadToJson(stepsYAML)
|
||||
|
||||
result = @[]
|
||||
for step in steps[0]:
|
||||
|
@ -136,7 +136,8 @@ proc loadOps(
|
|||
blobs: distinctBase(parseTest(
|
||||
path/(step["blobs"].getStr()) & ".ssz_snappy",
|
||||
SSZ, List[KzgBlob, Limit MAX_BLOBS_PER_BLOCK])),
|
||||
proofs: step["proofs"].mapIt(KzgProof.fromHex(it.getStr())))
|
||||
proofs: step["proofs"].mapIt(
|
||||
KzgProof(bytes: fromHex(array[48, byte], it.getStr()))))
|
||||
else:
|
||||
Opt.none(BlobData)
|
||||
else:
|
||||
|
@ -407,4 +408,4 @@ from ../../beacon_chain/conf import loadKzgTrustedSetup
|
|||
discard loadKzgTrustedSetup() # Required for Deneb tests
|
||||
|
||||
fcSuite("ForkChoice", "fork_choice")
|
||||
fcSuite("Sync", "sync")
|
||||
fcSuite("Sync", "sync")
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
|
||||
import
|
||||
std/json,
|
||||
yaml,
|
||||
yaml/tojson,
|
||||
kzg4844/kzg_ex,
|
||||
stew/byteutils,
|
||||
../testutil,
|
||||
|
@ -39,7 +39,7 @@ proc runBlobToKzgCommitmentTest(suiteName, suitePath, path: string) =
|
|||
let relativePathComponent = path.relativeTestPathComponent(suitePath)
|
||||
test "KZG - Blob to KZG commitment - " & relativePathComponent:
|
||||
let
|
||||
data = yaml.loadToJson(os_ops.readFile(path/"data.yaml"))[0]
|
||||
data = loadToJson(os_ops.readFile(path/"data.yaml"))[0]
|
||||
output = data["output"]
|
||||
blob = fromHex[131072](data["input"]["blob"].getStr)
|
||||
|
||||
|
@ -50,18 +50,18 @@ proc runBlobToKzgCommitmentTest(suiteName, suitePath, path: string) =
|
|||
if blob.isNone:
|
||||
check output.kind == JNull
|
||||
else:
|
||||
let commitment = blobToKzgCommitment(blob.get)
|
||||
let commitment = blobToKzgCommitment(KzgBlob(bytes: blob.get))
|
||||
check:
|
||||
if commitment.isErr:
|
||||
output.kind == JNull
|
||||
else:
|
||||
commitment.get == fromHex[48](output.getStr).get
|
||||
commitment.get().bytes == fromHex[48](output.getStr).get
|
||||
|
||||
proc runVerifyKzgProofTest(suiteName, suitePath, path: string) =
|
||||
let relativePathComponent = path.relativeTestPathComponent(suitePath)
|
||||
test "KZG - Verify KZG proof - " & relativePathComponent:
|
||||
let
|
||||
data = yaml.loadToJson(os_ops.readFile(path/"data.yaml"))[0]
|
||||
data = loadToJson(os_ops.readFile(path/"data.yaml"))[0]
|
||||
output = data["output"]
|
||||
commitment = fromHex[48](data["input"]["commitment"].getStr)
|
||||
z = fromHex[32](data["input"]["z"].getStr)
|
||||
|
@ -75,7 +75,10 @@ proc runVerifyKzgProofTest(suiteName, suitePath, path: string) =
|
|||
if commitment.isNone or z.isNone or y.isNone or proof.isNone:
|
||||
check output.kind == JNull
|
||||
else:
|
||||
let v = verifyProof(commitment.get, z.get, y.get, proof.get)
|
||||
let v = verifyProof(
|
||||
KzgCommitment(bytes: commitment.get),
|
||||
KzgBytes32(bytes: z.get), KzgBytes32(bytes: y.get),
|
||||
KzgBytes48(bytes: proof.get))
|
||||
check:
|
||||
if v.isErr:
|
||||
output.kind == JNull
|
||||
|
@ -86,7 +89,7 @@ proc runVerifyBlobKzgProofTest(suiteName, suitePath, path: string) =
|
|||
let relativePathComponent = path.relativeTestPathComponent(suitePath)
|
||||
test "KZG - Verify blob KZG proof - " & relativePathComponent:
|
||||
let
|
||||
data = yaml.loadToJson(os_ops.readFile(path/"data.yaml"))[0]
|
||||
data = loadToJson(os_ops.readFile(path/"data.yaml"))[0]
|
||||
output = data["output"]
|
||||
blob = fromHex[131072](data["input"]["blob"].getStr)
|
||||
commitment = fromHex[48](data["input"]["commitment"].getStr)
|
||||
|
@ -100,7 +103,10 @@ proc runVerifyBlobKzgProofTest(suiteName, suitePath, path: string) =
|
|||
if blob.isNone or commitment.isNone or proof.isNone:
|
||||
check output.kind == JNull
|
||||
else:
|
||||
let v = verifyBlobKzgProof(blob.get, commitment.get, proof.get)
|
||||
let v = verifyBlobKzgProof(
|
||||
KzgBlob(bytes: blob.get),
|
||||
KzgBytes48(bytes: commitment.get),
|
||||
KzgBytes48(bytes: proof.get))
|
||||
check:
|
||||
if v.isErr:
|
||||
output.kind == JNull
|
||||
|
@ -111,7 +117,7 @@ proc runVerifyBlobKzgProofBatchTest(suiteName, suitePath, path: string) =
|
|||
let relativePathComponent = path.relativeTestPathComponent(suitePath)
|
||||
test "KZG - Verify blob KZG proof batch - " & relativePathComponent:
|
||||
let
|
||||
data = yaml.loadToJson(os_ops.readFile(path/"data.yaml"))[0]
|
||||
data = loadToJson(os_ops.readFile(path/"data.yaml"))[0]
|
||||
output = data["output"]
|
||||
blobs = data["input"]["blobs"].mapIt(fromHex[131072](it.getStr))
|
||||
commitments = data["input"]["commitments"].mapIt(fromHex[48](it.getStr))
|
||||
|
@ -127,7 +133,9 @@ proc runVerifyBlobKzgProofBatchTest(suiteName, suitePath, path: string) =
|
|||
check output.kind == JNull
|
||||
else:
|
||||
let v = verifyBlobKzgProofBatch(
|
||||
blobs.mapIt(it.get), commitments.mapIt(it.get), proofs.mapIt(it.get))
|
||||
blobs.mapIt(KzgBlob(bytes: it.get)),
|
||||
commitments.mapIt(KzgCommitment(bytes: it.get)),
|
||||
proofs.mapIt(KzgProof(bytes: it.get)))
|
||||
check:
|
||||
if v.isErr:
|
||||
output.kind == JNull
|
||||
|
@ -138,7 +146,7 @@ proc runComputeKzgProofTest(suiteName, suitePath, path: string) =
|
|||
let relativePathComponent = path.relativeTestPathComponent(suitePath)
|
||||
test "KZG - Compute KZG proof - " & relativePathComponent:
|
||||
let
|
||||
data = yaml.loadToJson(os_ops.readFile(path/"data.yaml"))[0]
|
||||
data = loadToJson(os_ops.readFile(path/"data.yaml"))[0]
|
||||
output = data["output"]
|
||||
blob = fromHex[131072](data["input"]["blob"].getStr)
|
||||
z = fromHex[32](data["input"]["z"].getStr)
|
||||
|
@ -150,7 +158,8 @@ proc runComputeKzgProofTest(suiteName, suitePath, path: string) =
|
|||
if blob.isNone or z.isNone:
|
||||
check output.kind == JNull
|
||||
else:
|
||||
let p = computeKzgProof(blob.get, z.get)
|
||||
let p = computeKzgProof(
|
||||
KzgBlob(bytes: blob.get), KzgBytes32(bytes: z.get))
|
||||
if p.isErr:
|
||||
check output.kind == JNull
|
||||
else:
|
||||
|
@ -158,14 +167,14 @@ proc runComputeKzgProofTest(suiteName, suitePath, path: string) =
|
|||
proof = fromHex[48](output[0].getStr)
|
||||
y = fromHex[32](output[1].getStr)
|
||||
check:
|
||||
p.get.proof == proof.get
|
||||
p.get.y == y.get
|
||||
p.get.proof.bytes == proof.get
|
||||
p.get.y.bytes == y.get
|
||||
|
||||
proc runComputeBlobKzgProofTest(suiteName, suitePath, path: string) =
|
||||
let relativePathComponent = path.relativeTestPathComponent(suitePath)
|
||||
test "KZG - Compute blob KZG proof - " & relativePathComponent:
|
||||
let
|
||||
data = yaml.loadToJson(os_ops.readFile(path/"data.yaml"))[0]
|
||||
data = loadToJson(os_ops.readFile(path/"data.yaml"))[0]
|
||||
output = data["output"]
|
||||
blob = fromHex[131072](data["input"]["blob"].getStr)
|
||||
commitment = fromHex[48](data["input"]["commitment"].getStr)
|
||||
|
@ -177,11 +186,12 @@ proc runComputeBlobKzgProofTest(suiteName, suitePath, path: string) =
|
|||
if blob.isNone or commitment.isNone:
|
||||
check output.kind == JNull
|
||||
else:
|
||||
let p = computeBlobKzgProof(blob.get, commitment.get)
|
||||
let p = computeBlobKzgProof(
|
||||
KzgBlob(bytes: blob.get), KzgBytes48(bytes: commitment.get))
|
||||
if p.isErr:
|
||||
check output.kind == JNull
|
||||
else:
|
||||
check p.get == fromHex[48](output.getStr).get
|
||||
check p.get.bytes == fromHex[48](output.getStr).get
|
||||
|
||||
from std/algorithm import sorted
|
||||
|
||||
|
@ -227,4 +237,4 @@ suite suiteName:
|
|||
for kind, path in walkDir(testsDir, relative = true, checkDir = true):
|
||||
runComputeBlobKzgProofTest(suiteName, testsDir, testsDir / path)
|
||||
|
||||
doAssert Kzg.freeTrustedSetup().isOk
|
||||
doAssert Kzg.freeTrustedSetup().isOk
|
||||
|
|
|
@ -16,7 +16,7 @@ import
|
|||
chronicles,
|
||||
taskpools,
|
||||
# Third-party
|
||||
yaml,
|
||||
yaml/tojson,
|
||||
# Beacon chain internals
|
||||
../../beacon_chain/beacon_chain_db,
|
||||
../../beacon_chain/consensus_object_pools/[block_clearance, block_quarantine],
|
||||
|
@ -88,7 +88,7 @@ proc loadSteps(
|
|||
loadForked(t, s, path, fork_digests)
|
||||
|
||||
let stepsYAML = os_ops.readFile(path/"steps.yaml")
|
||||
let steps = yaml.loadToJson(stepsYAML)
|
||||
let steps = loadToJson(stepsYAML)
|
||||
|
||||
result = @[]
|
||||
for step in steps[0]:
|
||||
|
|
|
@ -14,7 +14,7 @@ import
|
|||
# Status libraries
|
||||
stew/byteutils,
|
||||
# Third-party
|
||||
yaml,
|
||||
yaml, yaml/tojson,
|
||||
# Beacon chain internals
|
||||
../../beacon_chain/spec/[forks, light_client_sync],
|
||||
# Test utilities
|
||||
|
@ -59,7 +59,7 @@ proc loadSteps(
|
|||
): seq[TestStep] {.raises: [
|
||||
KeyError, ValueError, YamlConstructionError, YamlParserError].} =
|
||||
let stepsYAML = os_ops.readFile(path/"steps.yaml")
|
||||
let steps = yaml.loadToJson(stepsYAML)
|
||||
let steps = loadToJson(stepsYAML)
|
||||
|
||||
result = @[]
|
||||
for step in steps[0]:
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
import
|
||||
chronicles,
|
||||
../../beacon_chain/spec/forks,
|
||||
../../beacon_chain/spec/state_transition,
|
||||
../../beacon_chain/spec/[state_transition, state_transition_epoch],
|
||||
./os_ops,
|
||||
../testutil
|
||||
|
||||
|
@ -21,6 +21,7 @@ from ../../beacon_chain/spec/presets import
|
|||
const_preset, defaultRuntimeConfig
|
||||
from ./fixtures_utils import
|
||||
SSZ, SszTestsDir, hash_tree_root, parseTest, readSszBytes, toSszType
|
||||
from ../teststateutil import checkPerValidatorBalanceCalc
|
||||
|
||||
proc runTest(
|
||||
consensusFork: static ConsensusFork,
|
||||
|
@ -52,6 +53,9 @@ proc runTest(
|
|||
discard state_transition(
|
||||
defaultRuntimeConfig, fhPreState[], blck, cache, info, flags = {},
|
||||
noRollback).expect("should apply block")
|
||||
withState(fhPreState[]):
|
||||
when consensusFork >= ConsensusFork.Deneb:
|
||||
check checkPerValidatorBalanceCalc(forkyState.data)
|
||||
else:
|
||||
let res = state_transition(
|
||||
defaultRuntimeConfig, fhPreState[], blck, cache, info, flags = {},
|
||||
|
|
|
@ -100,6 +100,13 @@ func withDigest(blck: deneb.TrustedBeaconBlock):
|
|||
root: hash_tree_root(blck)
|
||||
)
|
||||
|
||||
func withDigest(blck: electra.TrustedBeaconBlock):
|
||||
electra.TrustedSignedBeaconBlock =
|
||||
electra.TrustedSignedBeaconBlock(
|
||||
message: blck,
|
||||
root: hash_tree_root(blck)
|
||||
)
|
||||
|
||||
proc getTestStates(consensusFork: ConsensusFork): auto =
|
||||
let
|
||||
db = makeTestDB(SLOTS_PER_EPOCH)
|
||||
|
@ -113,7 +120,7 @@ proc getTestStates(consensusFork: ConsensusFork): auto =
|
|||
|
||||
testStates
|
||||
|
||||
debugComment "add some electra states, and test electra state/block loading/etc"
|
||||
debugComment "add some electra states, and test electra state loading/etc"
|
||||
|
||||
# Each set of states gets used twice, so scope them to module
|
||||
let
|
||||
|
@ -153,6 +160,7 @@ suite "Beacon chain DB" & preset():
|
|||
not db.containsBlock(root, bellatrix.TrustedSignedBeaconBlock)
|
||||
not db.containsBlock(root, capella.TrustedSignedBeaconBlock)
|
||||
not db.containsBlock(root, deneb.TrustedSignedBeaconBlock)
|
||||
not db.containsBlock(root, electra.TrustedSignedBeaconBlock)
|
||||
db.getBlock(root, phase0.TrustedSignedBeaconBlock).get() == signedBlock
|
||||
db.getBlockSSZ(root, tmp, phase0.TrustedSignedBeaconBlock)
|
||||
db.getBlockSZ(root, tmp2, phase0.TrustedSignedBeaconBlock)
|
||||
|
@ -168,6 +176,7 @@ suite "Beacon chain DB" & preset():
|
|||
not db.containsBlock(root, bellatrix.TrustedSignedBeaconBlock)
|
||||
not db.containsBlock(root, capella.TrustedSignedBeaconBlock)
|
||||
not db.containsBlock(root, deneb.TrustedSignedBeaconBlock)
|
||||
not db.containsBlock(root, electra.TrustedSignedBeaconBlock)
|
||||
db.getBlock(root, phase0.TrustedSignedBeaconBlock).isErr()
|
||||
not db.getBlockSSZ(root, tmp, phase0.TrustedSignedBeaconBlock)
|
||||
not db.getBlockSZ(root, tmp2, phase0.TrustedSignedBeaconBlock)
|
||||
|
@ -200,6 +209,7 @@ suite "Beacon chain DB" & preset():
|
|||
not db.containsBlock(root, bellatrix.TrustedSignedBeaconBlock)
|
||||
not db.containsBlock(root, capella.TrustedSignedBeaconBlock)
|
||||
not db.containsBlock(root, deneb.TrustedSignedBeaconBlock)
|
||||
not db.containsBlock(root, electra.TrustedSignedBeaconBlock)
|
||||
db.getBlock(root, altair.TrustedSignedBeaconBlock).get() == signedBlock
|
||||
db.getBlockSSZ(root, tmp, altair.TrustedSignedBeaconBlock)
|
||||
db.getBlockSZ(root, tmp2, altair.TrustedSignedBeaconBlock)
|
||||
|
@ -215,6 +225,7 @@ suite "Beacon chain DB" & preset():
|
|||
not db.containsBlock(root, bellatrix.TrustedSignedBeaconBlock)
|
||||
not db.containsBlock(root, capella.TrustedSignedBeaconBlock)
|
||||
not db.containsBlock(root, deneb.TrustedSignedBeaconBlock)
|
||||
not db.containsBlock(root, electra.TrustedSignedBeaconBlock)
|
||||
db.getBlock(root, altair.TrustedSignedBeaconBlock).isErr()
|
||||
not db.getBlockSSZ(root, tmp, altair.TrustedSignedBeaconBlock)
|
||||
not db.getBlockSZ(root, tmp2, altair.TrustedSignedBeaconBlock)
|
||||
|
@ -247,6 +258,7 @@ suite "Beacon chain DB" & preset():
|
|||
db.containsBlock(root, bellatrix.TrustedSignedBeaconBlock)
|
||||
not db.containsBlock(root, capella.TrustedSignedBeaconBlock)
|
||||
not db.containsBlock(root, deneb.TrustedSignedBeaconBlock)
|
||||
not db.containsBlock(root, electra.TrustedSignedBeaconBlock)
|
||||
db.getBlock(root, bellatrix.TrustedSignedBeaconBlock).get() == signedBlock
|
||||
db.getBlockSSZ(root, tmp, bellatrix.TrustedSignedBeaconBlock)
|
||||
db.getBlockSZ(root, tmp2, bellatrix.TrustedSignedBeaconBlock)
|
||||
|
@ -262,6 +274,7 @@ suite "Beacon chain DB" & preset():
|
|||
not db.containsBlock(root, bellatrix.TrustedSignedBeaconBlock)
|
||||
not db.containsBlock(root, capella.TrustedSignedBeaconBlock)
|
||||
not db.containsBlock(root, deneb.TrustedSignedBeaconBlock)
|
||||
not db.containsBlock(root, electra.TrustedSignedBeaconBlock)
|
||||
db.getBlock(root, bellatrix.TrustedSignedBeaconBlock).isErr()
|
||||
not db.getBlockSSZ(root, tmp, bellatrix.TrustedSignedBeaconBlock)
|
||||
not db.getBlockSZ(root, tmp2, bellatrix.TrustedSignedBeaconBlock)
|
||||
|
@ -293,6 +306,7 @@ suite "Beacon chain DB" & preset():
|
|||
not db.containsBlock(root, altair.TrustedSignedBeaconBlock)
|
||||
not db.containsBlock(root, bellatrix.TrustedSignedBeaconBlock)
|
||||
not db.containsBlock(root, deneb.TrustedSignedBeaconBlock)
|
||||
not db.containsBlock(root, electra.TrustedSignedBeaconBlock)
|
||||
db.containsBlock(root, capella.TrustedSignedBeaconBlock)
|
||||
db.getBlock(root, capella.TrustedSignedBeaconBlock).get() == signedBlock
|
||||
db.getBlockSSZ(root, tmp, capella.TrustedSignedBeaconBlock)
|
||||
|
@ -309,6 +323,7 @@ suite "Beacon chain DB" & preset():
|
|||
not db.containsBlock(root, bellatrix.TrustedSignedBeaconBlock)
|
||||
not db.containsBlock(root, capella.TrustedSignedBeaconBlock)
|
||||
not db.containsBlock(root, deneb.TrustedSignedBeaconBlock)
|
||||
not db.containsBlock(root, electra.TrustedSignedBeaconBlock)
|
||||
db.getBlock(root, capella.TrustedSignedBeaconBlock).isErr()
|
||||
not db.getBlockSSZ(root, tmp, capella.TrustedSignedBeaconBlock)
|
||||
not db.getBlockSZ(root, tmp2, capella.TrustedSignedBeaconBlock)
|
||||
|
@ -341,6 +356,7 @@ suite "Beacon chain DB" & preset():
|
|||
not db.containsBlock(root, bellatrix.TrustedSignedBeaconBlock)
|
||||
not db.containsBlock(root, capella.TrustedSignedBeaconBlock)
|
||||
db.containsBlock(root, deneb.TrustedSignedBeaconBlock)
|
||||
not db.containsBlock(root, electra.TrustedSignedBeaconBlock)
|
||||
db.getBlock(root, deneb.TrustedSignedBeaconBlock).get() == signedBlock
|
||||
db.getBlockSSZ(root, tmp, deneb.TrustedSignedBeaconBlock)
|
||||
db.getBlockSZ(root, tmp2, deneb.TrustedSignedBeaconBlock)
|
||||
|
@ -356,6 +372,7 @@ suite "Beacon chain DB" & preset():
|
|||
not db.containsBlock(root, bellatrix.TrustedSignedBeaconBlock)
|
||||
not db.containsBlock(root, capella.TrustedSignedBeaconBlock)
|
||||
not db.containsBlock(root, deneb.TrustedSignedBeaconBlock)
|
||||
not db.containsBlock(root, electra.TrustedSignedBeaconBlock)
|
||||
db.getBlock(root, deneb.TrustedSignedBeaconBlock).isErr()
|
||||
not db.getBlockSSZ(root, tmp, deneb.TrustedSignedBeaconBlock)
|
||||
not db.getBlockSZ(root, tmp2, deneb.TrustedSignedBeaconBlock)
|
||||
|
@ -371,6 +388,55 @@ suite "Beacon chain DB" & preset():
|
|||
|
||||
db.close()
|
||||
|
||||
test "sanity check Electra blocks" & preset():
|
||||
let db = BeaconChainDB.new("", inMemory = true)
|
||||
|
||||
let
|
||||
signedBlock = withDigest((electra.TrustedBeaconBlock)())
|
||||
root = hash_tree_root(signedBlock.message)
|
||||
|
||||
db.putBlock(signedBlock)
|
||||
|
||||
var tmp, tmp2: seq[byte]
|
||||
check:
|
||||
db.containsBlock(root)
|
||||
not db.containsBlock(root, phase0.TrustedSignedBeaconBlock)
|
||||
not db.containsBlock(root, altair.TrustedSignedBeaconBlock)
|
||||
not db.containsBlock(root, bellatrix.TrustedSignedBeaconBlock)
|
||||
not db.containsBlock(root, capella.TrustedSignedBeaconBlock)
|
||||
not db.containsBlock(root, deneb.TrustedSignedBeaconBlock)
|
||||
db.containsBlock(root, electra.TrustedSignedBeaconBlock)
|
||||
db.getBlock(root, electra.TrustedSignedBeaconBlock).get() == signedBlock
|
||||
db.getBlockSSZ(root, tmp, electra.TrustedSignedBeaconBlock)
|
||||
db.getBlockSZ(root, tmp2, electra.TrustedSignedBeaconBlock)
|
||||
tmp == SSZ.encode(signedBlock)
|
||||
tmp2 == encodeFramed(tmp)
|
||||
uncompressedLenFramed(tmp2).isSome
|
||||
|
||||
check:
|
||||
db.delBlock(ConsensusFork.Electra, root)
|
||||
not db.containsBlock(root)
|
||||
not db.containsBlock(root, phase0.TrustedSignedBeaconBlock)
|
||||
not db.containsBlock(root, altair.TrustedSignedBeaconBlock)
|
||||
not db.containsBlock(root, bellatrix.TrustedSignedBeaconBlock)
|
||||
not db.containsBlock(root, capella.TrustedSignedBeaconBlock)
|
||||
not db.containsBlock(root, deneb.TrustedSignedBeaconBlock)
|
||||
not db.containsBlock(root, electra.TrustedSignedBeaconBlock)
|
||||
db.getBlock(root, electra.TrustedSignedBeaconBlock).isErr()
|
||||
not db.getBlockSSZ(root, tmp, electra.TrustedSignedBeaconBlock)
|
||||
not db.getBlockSZ(root, tmp2, electra.TrustedSignedBeaconBlock)
|
||||
|
||||
db.putStateRoot(root, signedBlock.message.slot, root)
|
||||
var root2 = root
|
||||
root2.data[0] = root.data[0] + 1
|
||||
db.putStateRoot(root, signedBlock.message.slot + 1, root2)
|
||||
|
||||
check:
|
||||
db.getStateRoot(root, signedBlock.message.slot).get() == root
|
||||
db.getStateRoot(root, signedBlock.message.slot + 1).get() == root2
|
||||
|
||||
db.close()
|
||||
|
||||
test "sanity check phase 0 states" & preset():
|
||||
let db = makeTestDB(SLOTS_PER_EPOCH)
|
||||
|
||||
|
@ -763,105 +829,109 @@ suite "Beacon chain DB" & preset():
|
|||
check:
|
||||
hash_tree_root(state2[]) == root
|
||||
|
||||
test "sanity check blobs" & preset():
|
||||
const
|
||||
blockHeader0 = SignedBeaconBlockHeader(
|
||||
message: BeaconBlockHeader(slot: Slot(0)))
|
||||
blockHeader1 = SignedBeaconBlockHeader(
|
||||
message: BeaconBlockHeader(slot: Slot(1)))
|
||||
withAll(BlobFork):
|
||||
test "sanity check blobs (" & $blobFork & ")" & preset():
|
||||
const
|
||||
blockHeader0 = SignedBeaconBlockHeader(
|
||||
message: BeaconBlockHeader(slot: Slot(0)))
|
||||
blockHeader1 = SignedBeaconBlockHeader(
|
||||
message: BeaconBlockHeader(slot: Slot(1)))
|
||||
|
||||
let
|
||||
blockRoot0 = hash_tree_root(blockHeader0.message)
|
||||
blockRoot1 = hash_tree_root(blockHeader1.message)
|
||||
let
|
||||
blockRoot0 = hash_tree_root(blockHeader0.message)
|
||||
blockRoot1 = hash_tree_root(blockHeader1.message)
|
||||
|
||||
# Ensure minimal-difference pairs on both block root and blob index to
|
||||
# verify that blobkey uses both
|
||||
blobSidecar0 = BlobSidecar(signed_block_header: blockHeader0, index: 3)
|
||||
blobSidecar1 = BlobSidecar(signed_block_header: blockHeader0, index: 2)
|
||||
blobSidecar2 = BlobSidecar(signed_block_header: blockHeader1, index: 2)
|
||||
# Ensure minimal-difference pairs on both block root and blob index to
|
||||
# verify that blobkey uses both
|
||||
blobSidecar0 = blobFork.BlobSidecar(
|
||||
signed_block_header: blockHeader0, index: 3)
|
||||
blobSidecar1 = blobFork.BlobSidecar(
|
||||
signed_block_header: blockHeader0, index: 2)
|
||||
blobSidecar2 = blobFork.BlobSidecar(
|
||||
signed_block_header: blockHeader1, index: 2)
|
||||
|
||||
db = makeTestDB(SLOTS_PER_EPOCH)
|
||||
db = makeTestDB(SLOTS_PER_EPOCH)
|
||||
|
||||
var
|
||||
buf: seq[byte]
|
||||
blobSidecar: BlobSidecar
|
||||
var
|
||||
buf: seq[byte]
|
||||
blobSidecar: blobFork.BlobSidecar
|
||||
|
||||
check:
|
||||
not db.getBlobSidecar(blockRoot0, 3, blobSidecar)
|
||||
not db.getBlobSidecar(blockRoot0, 2, blobSidecar)
|
||||
not db.getBlobSidecar(blockRoot1, 2, blobSidecar)
|
||||
not db.getBlobSidecarSZ(blockRoot0, 3, buf)
|
||||
not db.getBlobSidecarSZ(blockRoot0, 2, buf)
|
||||
not db.getBlobSidecarSZ(blockRoot1, 2, buf)
|
||||
check:
|
||||
not db.getBlobSidecar(blockRoot0, 3, blobSidecar)
|
||||
not db.getBlobSidecar(blockRoot0, 2, blobSidecar)
|
||||
not db.getBlobSidecar(blockRoot1, 2, blobSidecar)
|
||||
not getBlobSidecarSZ[blobFork.BlobSidecar](db, blockRoot0, 3, buf)
|
||||
not getBlobSidecarSZ[blobFork.BlobSidecar](db, blockRoot0, 2, buf)
|
||||
not getBlobSidecarSZ[blobFork.BlobSidecar](db, blockRoot1, 2, buf)
|
||||
|
||||
db.putBlobSidecar(blobSidecar0)
|
||||
db.putBlobSidecar(blobSidecar0)
|
||||
|
||||
check:
|
||||
db.getBlobSidecar(blockRoot0, 3, blobSidecar)
|
||||
blobSidecar == blobSidecar0
|
||||
not db.getBlobSidecar(blockRoot0, 2, blobSidecar)
|
||||
not db.getBlobSidecar(blockRoot1, 2, blobSidecar)
|
||||
db.getBlobSidecarSZ(blockRoot0, 3, buf)
|
||||
not db.getBlobSidecarSZ(blockRoot0, 2, buf)
|
||||
not db.getBlobSidecarSZ(blockRoot1, 2, buf)
|
||||
check:
|
||||
db.getBlobSidecar(blockRoot0, 3, blobSidecar)
|
||||
blobSidecar == blobSidecar0
|
||||
not db.getBlobSidecar(blockRoot0, 2, blobSidecar)
|
||||
not db.getBlobSidecar(blockRoot1, 2, blobSidecar)
|
||||
getBlobSidecarSZ[blobFork.BlobSidecar](db, blockRoot0, 3, buf)
|
||||
not getBlobSidecarSZ[blobFork.BlobSidecar](db, blockRoot0, 2, buf)
|
||||
not getBlobSidecarSZ[blobFork.BlobSidecar](db, blockRoot1, 2, buf)
|
||||
|
||||
db.putBlobSidecar(blobSidecar1)
|
||||
db.putBlobSidecar(blobSidecar1)
|
||||
|
||||
check:
|
||||
db.getBlobSidecar(blockRoot0, 3, blobSidecar)
|
||||
blobSidecar == blobSidecar0
|
||||
db.getBlobSidecar(blockRoot0, 2, blobSidecar)
|
||||
blobSidecar == blobSidecar1
|
||||
not db.getBlobSidecar(blockRoot1, 2, blobSidecar)
|
||||
db.getBlobSidecarSZ(blockRoot0, 3, buf)
|
||||
db.getBlobSidecarSZ(blockRoot0, 2, buf)
|
||||
not db.getBlobSidecarSZ(blockRoot1, 2, buf)
|
||||
check:
|
||||
db.getBlobSidecar(blockRoot0, 3, blobSidecar)
|
||||
blobSidecar == blobSidecar0
|
||||
db.getBlobSidecar(blockRoot0, 2, blobSidecar)
|
||||
blobSidecar == blobSidecar1
|
||||
not db.getBlobSidecar(blockRoot1, 2, blobSidecar)
|
||||
getBlobSidecarSZ[blobFork.BlobSidecar](db, blockRoot0, 3, buf)
|
||||
getBlobSidecarSZ[blobFork.BlobSidecar](db, blockRoot0, 2, buf)
|
||||
not getBlobSidecarSZ[blobFork.BlobSidecar](db, blockRoot1, 2, buf)
|
||||
|
||||
check db.delBlobSidecar(blockRoot0, 3)
|
||||
check db.delBlobSidecar(blockRoot0, 3)
|
||||
|
||||
check:
|
||||
not db.getBlobSidecar(blockRoot0, 3, blobSidecar)
|
||||
db.getBlobSidecar(blockRoot0, 2, blobSidecar)
|
||||
blobSidecar == blobSidecar1
|
||||
not db.getBlobSidecar(blockRoot1, 2, blobSidecar)
|
||||
not db.getBlobSidecarSZ(blockRoot0, 3, buf)
|
||||
db.getBlobSidecarSZ(blockRoot0, 2, buf)
|
||||
not db.getBlobSidecarSZ(blockRoot1, 2, buf)
|
||||
check:
|
||||
not db.getBlobSidecar(blockRoot0, 3, blobSidecar)
|
||||
db.getBlobSidecar(blockRoot0, 2, blobSidecar)
|
||||
blobSidecar == blobSidecar1
|
||||
not db.getBlobSidecar(blockRoot1, 2, blobSidecar)
|
||||
not getBlobSidecarSZ[blobFork.BlobSidecar](db, blockRoot0, 3, buf)
|
||||
getBlobSidecarSZ[blobFork.BlobSidecar](db, blockRoot0, 2, buf)
|
||||
not getBlobSidecarSZ[blobFork.BlobSidecar](db, blockRoot1, 2, buf)
|
||||
|
||||
db.putBlobSidecar(blobSidecar2)
|
||||
db.putBlobSidecar(blobSidecar2)
|
||||
|
||||
check:
|
||||
not db.getBlobSidecar(blockRoot0, 3, blobSidecar)
|
||||
db.getBlobSidecar(blockRoot0, 2, blobSidecar)
|
||||
blobSidecar == blobSidecar1
|
||||
db.getBlobSidecar(blockRoot1, 2, blobSidecar)
|
||||
blobSidecar == blobSidecar2
|
||||
not db.getBlobSidecarSZ(blockRoot0, 3, buf)
|
||||
db.getBlobSidecarSZ(blockRoot0, 2, buf)
|
||||
db.getBlobSidecarSZ(blockRoot1, 2, buf)
|
||||
check:
|
||||
not db.getBlobSidecar(blockRoot0, 3, blobSidecar)
|
||||
db.getBlobSidecar(blockRoot0, 2, blobSidecar)
|
||||
blobSidecar == blobSidecar1
|
||||
db.getBlobSidecar(blockRoot1, 2, blobSidecar)
|
||||
blobSidecar == blobSidecar2
|
||||
not getBlobSidecarSZ[blobFork.BlobSidecar](db, blockRoot0, 3, buf)
|
||||
getBlobSidecarSZ[blobFork.BlobSidecar](db, blockRoot0, 2, buf)
|
||||
getBlobSidecarSZ[blobFork.BlobSidecar](db, blockRoot1, 2, buf)
|
||||
|
||||
check db.delBlobSidecar(blockRoot0, 2)
|
||||
check db.delBlobSidecar(blockRoot0, 2)
|
||||
|
||||
check:
|
||||
not db.getBlobSidecar(blockRoot0, 3, blobSidecar)
|
||||
not db.getBlobSidecar(blockRoot0, 2, blobSidecar)
|
||||
db.getBlobSidecar(blockRoot1, 2, blobSidecar)
|
||||
blobSidecar == blobSidecar2
|
||||
not db.getBlobSidecarSZ(blockRoot0, 3, buf)
|
||||
not db.getBlobSidecarSZ(blockRoot0, 2, buf)
|
||||
db.getBlobSidecarSZ(blockRoot1, 2, buf)
|
||||
check:
|
||||
not db.getBlobSidecar(blockRoot0, 3, blobSidecar)
|
||||
not db.getBlobSidecar(blockRoot0, 2, blobSidecar)
|
||||
db.getBlobSidecar(blockRoot1, 2, blobSidecar)
|
||||
blobSidecar == blobSidecar2
|
||||
not getBlobSidecarSZ[blobFork.BlobSidecar](db, blockRoot0, 3, buf)
|
||||
not getBlobSidecarSZ[blobFork.BlobSidecar](db, blockRoot0, 2, buf)
|
||||
getBlobSidecarSZ[blobFork.BlobSidecar](db, blockRoot1, 2, buf)
|
||||
|
||||
check db.delBlobSidecar(blockRoot1, 2)
|
||||
check db.delBlobSidecar(blockRoot1, 2)
|
||||
|
||||
check:
|
||||
not db.getBlobSidecar(blockRoot0, 3, blobSidecar)
|
||||
not db.getBlobSidecar(blockRoot0, 2, blobSidecar)
|
||||
not db.getBlobSidecar(blockRoot1, 2, blobSidecar)
|
||||
not db.getBlobSidecarSZ(blockRoot0, 3, buf)
|
||||
not db.getBlobSidecarSZ(blockRoot0, 2, buf)
|
||||
not db.getBlobSidecarSZ(blockRoot1, 2, buf)
|
||||
check:
|
||||
not db.getBlobSidecar(blockRoot0, 3, blobSidecar)
|
||||
not db.getBlobSidecar(blockRoot0, 2, blobSidecar)
|
||||
not db.getBlobSidecar(blockRoot1, 2, blobSidecar)
|
||||
not getBlobSidecarSZ[blobFork.BlobSidecar](db, blockRoot0, 3, buf)
|
||||
not getBlobSidecarSZ[blobFork.BlobSidecar](db, blockRoot0, 2, buf)
|
||||
not getBlobSidecarSZ[blobFork.BlobSidecar](db, blockRoot1, 2, buf)
|
||||
|
||||
db.close()
|
||||
db.close()
|
||||
|
||||
suite "FinalizedBlocks" & preset():
|
||||
test "Basic ops" & preset():
|
||||
|
@ -890,4 +960,4 @@ suite "FinalizedBlocks" & preset():
|
|||
check: k in [Slot 0, Slot 5]
|
||||
items += 1
|
||||
|
||||
check: items == 2
|
||||
check: items == 2
|
||||
|
|
|
@ -64,7 +64,7 @@ suite "Block processor" & preset():
|
|||
let
|
||||
missing = await processor[].addBlock(
|
||||
MsgSource.gossip, ForkedSignedBeaconBlock.init(b2),
|
||||
Opt.none(BlobSidecars))
|
||||
Opt.none(ForkedBlobSidecars))
|
||||
|
||||
check: missing.error == VerifierError.MissingParent
|
||||
|
||||
|
@ -76,7 +76,7 @@ suite "Block processor" & preset():
|
|||
let
|
||||
status = await processor[].addBlock(
|
||||
MsgSource.gossip, ForkedSignedBeaconBlock.init(b1),
|
||||
Opt.none(BlobSidecars))
|
||||
Opt.none(ForkedBlobSidecars))
|
||||
b1Get = dag.getBlockRef(b1.root)
|
||||
|
||||
check:
|
||||
|
|
|
@ -12,7 +12,7 @@ import
|
|||
std/[json, os, random, sequtils, strutils, times],
|
||||
chronos,
|
||||
stew/base10, chronicles, unittest2,
|
||||
yaml,
|
||||
yaml/tojson,
|
||||
../beacon_chain/beacon_chain_db,
|
||||
../beacon_chain/spec/deposit_snapshots,
|
||||
./consensus_spec/os_ops
|
||||
|
@ -208,7 +208,7 @@ suite "EIP-4881":
|
|||
path: string
|
||||
): seq[DepositTestCase] {.raises: [
|
||||
IOError, KeyError, ValueError, YamlConstructionError, YamlParserError].} =
|
||||
yaml.loadToJson(os_ops.readFile(path))[0].mapIt:
|
||||
loadToJson(os_ops.readFile(path))[0].mapIt:
|
||||
DepositTestCase(
|
||||
deposit_data: DepositData(
|
||||
pubkey: ValidatorPubKey.fromHex(
|
||||
|
|
|
@ -67,3 +67,190 @@ suite "Spec helpers":
|
|||
process(fieldVar, i shl childDepth)
|
||||
i += 1
|
||||
process(state, state.numLeaves)
|
||||
|
||||
test "hypergeom_cdf":
|
||||
# Generated with SciPy's hypergeom.cdf() function
|
||||
const tests = [
|
||||
( 0, 2, 1, 1, 0.5),
|
||||
( 8, 200, 162, 9, 0.85631007588636132),
|
||||
( 2, 20, 11, 5, 0.39551083591331271),
|
||||
( 2, 5, 4, 3, 0.59999999999999987),
|
||||
( 16, 100, 71, 28, 0.050496322336354399),
|
||||
( 1, 5, 2, 2, 0.90000000000000002),
|
||||
( 0, 5, 4, 1, 0.20000000000000004),
|
||||
( 27, 200, 110, 54, 0.24032479119039216),
|
||||
( 0, 10, 2, 5, 0.22222222222222224),
|
||||
( 3, 50, 27, 5, 0.77138514980460271),
|
||||
( 2, 50, 24, 8, 0.15067269856977925),
|
||||
( 4, 20, 16, 7, 0.10113519091847264),
|
||||
( 13, 500, 408, 15, 0.79686197891279686),
|
||||
( 0, 5, 3, 1, 0.40000000000000008),
|
||||
( 0, 20, 14, 2, 0.078947368421052627),
|
||||
( 49, 100, 62, 79, 0.6077614986362827),
|
||||
( 2, 10, 3, 6, 0.83333333333333337),
|
||||
( 0, 50, 31, 2, 0.13959183673469389),
|
||||
( 2, 5, 4, 3, 0.59999999999999987),
|
||||
( 4, 50, 21, 8, 0.81380887468704521),
|
||||
( 0, 10, 7, 2, 0.066666666666666652),
|
||||
( 0, 10, 1, 4, 0.59999999999999987),
|
||||
( 0, 20, 4, 2, 0.63157894736842102),
|
||||
( 0, 3, 2, 1, 0.33333333333333331),
|
||||
( 39, 500, 427, 51, 0.05047757656076568),
|
||||
( 2, 100, 6, 21, 0.89490672557682871),
|
||||
( 5, 20, 11, 9, 0.68904501071683733),
|
||||
( 0, 2, 1, 1, 0.5),
|
||||
( 0, 3, 1, 1, 0.66666666666666674),
|
||||
( 14, 50, 27, 30, 0.16250719969887772),
|
||||
( 0, 5, 4, 1, 0.20000000000000004),
|
||||
( 0, 5, 4, 1, 0.20000000000000004),
|
||||
( 2, 10, 8, 4, 0.13333333333333333),
|
||||
( 1, 5, 3, 2, 0.69999999999999996),
|
||||
( 25, 100, 77, 31, 0.79699287800204943),
|
||||
( 0, 3, 2, 1, 0.33333333333333331),
|
||||
( 7, 20, 15, 8, 0.94891640866873062),
|
||||
( 3, 50, 26, 7, 0.45339412360688952),
|
||||
( 1, 10, 8, 2, 0.37777777777777771),
|
||||
( 40, 200, 61, 134, 0.4491054454532335),
|
||||
( 1, 5, 2, 4, 0.40000000000000008),
|
||||
( 0, 10, 6, 1, 0.39999999999999991),
|
||||
( 1, 50, 10, 13, 0.19134773839560071),
|
||||
( 0, 2, 1, 1, 0.5),
|
||||
( 1, 20, 5, 2, 0.94736842105263153),
|
||||
( 7, 50, 12, 30, 0.57532691212157849),
|
||||
( 0, 3, 1, 1, 0.66666666666666674),
|
||||
( 6, 10, 7, 9, 0.69999999999999996),
|
||||
( 0, 20, 2, 1, 0.90000000000000002),
|
||||
( 2, 10, 5, 3, 0.91666666666666663),
|
||||
( 0, 10, 8, 1, 0.19999999999999998),
|
||||
(258, 500, 372, 347, 0.53219975096883698),
|
||||
( 1, 3, 2, 2, 0.66666666666666674),
|
||||
( 45, 200, 129, 68, 0.69415691010446789),
|
||||
( 1, 10, 8, 2, 0.37777777777777771),
|
||||
( 0, 10, 2, 1, 0.80000000000000004),
|
||||
( 1, 10, 4, 5, 0.26190476190476192),
|
||||
( 3, 50, 36, 4, 0.74422492401215801),
|
||||
( 0, 20, 6, 1, 0.69999999999999996),
|
||||
( 0, 5, 2, 3, 0.10000000000000002),
|
||||
( 1, 200, 47, 9, 0.33197417194852796),
|
||||
( 20, 50, 32, 30, 0.78323921453982637),
|
||||
( 16, 50, 21, 34, 0.9149336897131396),
|
||||
( 17, 50, 38, 22, 0.69599001425795692),
|
||||
( 0, 5, 2, 3, 0.10000000000000002),
|
||||
( 1, 5, 3, 2, 0.69999999999999996),
|
||||
( 0, 10, 9, 1, 0.10000000000000001),
|
||||
( 0, 5, 2, 3, 0.10000000000000002),
|
||||
( 2, 10, 5, 6, 0.26190476190476192),
|
||||
( 0, 5, 2, 1, 0.59999999999999987),
|
||||
( 7, 20, 16, 9, 0.62538699690402466),
|
||||
( 1, 100, 27, 2, 0.92909090909090908),
|
||||
( 27, 100, 58, 50, 0.271780848715515),
|
||||
( 47, 100, 96, 51, 0.063730084348641039),
|
||||
( 1, 20, 6, 2, 0.92105263157894735),
|
||||
( 1, 10, 6, 2, 0.66666666666666674),
|
||||
( 0, 2, 1, 1, 0.5),
|
||||
( 0, 20, 11, 1, 0.45000000000000001),
|
||||
( 0, 3, 1, 1, 0.66666666666666674),
|
||||
( 0, 2, 1, 1, 0.5),
|
||||
( 0, 10, 1, 7, 0.29999999999999999),
|
||||
( 0, 2, 1, 1, 0.5),
|
||||
( 0, 100, 36, 1, 0.64000000000000001),
|
||||
( 1, 100, 68, 2, 0.53979797979797983),
|
||||
( 13, 200, 79, 29, 0.80029860188814683),
|
||||
( 0, 10, 5, 1, 0.49999999999999994),
|
||||
( 0, 3, 2, 1, 0.33333333333333331),
|
||||
( 13, 100, 64, 21, 0.5065368728909565),
|
||||
( 1, 10, 6, 4, 0.11904761904761905),
|
||||
( 0, 2, 1, 1, 0.5),
|
||||
( 0, 5, 1, 2, 0.59999999999999987),
|
||||
( 0, 2, 1, 1, 0.5),
|
||||
( 1, 5, 4, 2, 0.40000000000000008),
|
||||
( 14, 50, 41, 17, 0.65850372332742224),
|
||||
( 0, 2, 1, 1, 0.5),
|
||||
( 0, 3, 1, 1, 0.66666666666666674),
|
||||
( 1, 100, 2, 62, 0.61797979797979785),
|
||||
( 0, 2, 1, 1, 0.5),
|
||||
( 0, 2, 1, 1, 0.5),
|
||||
( 12, 500, 312, 16, 0.91020698917397613),
|
||||
( 0, 20, 2, 6, 0.47894736842105257),
|
||||
( 0, 3, 2, 1, 0.33333333333333331),
|
||||
( 1, 10, 3, 4, 0.66666666666666674),
|
||||
( 0, 3, 1, 1, 0.66666666666666674),
|
||||
( 0, 3, 2, 1, 0.33333333333333331),
|
||||
( 6, 50, 20, 14, 0.72026241648862666),
|
||||
( 3, 20, 14, 6, 0.22523219814241485),
|
||||
( 0, 2, 1, 1, 0.5),
|
||||
( 4, 100, 72, 7, 0.30429108474790234),
|
||||
( 0, 5, 1, 2, 0.59999999999999987),
|
||||
( 0, 10, 4, 1, 0.59999999999999998),
|
||||
( 1, 3, 2, 2, 0.66666666666666674),
|
||||
( 0, 3, 1, 1, 0.66666666666666674),
|
||||
( 22, 50, 46, 24, 0.66413373860182379),
|
||||
( 1, 5, 2, 4, 0.40000000000000008),
|
||||
( 62, 100, 80, 79, 0.3457586020522983),
|
||||
( 0, 3, 2, 1, 0.33333333333333331),
|
||||
( 0, 10, 2, 7, 0.066666666666666666),
|
||||
( 0, 2, 1, 1, 0.5),
|
||||
( 0, 5, 2, 1, 0.59999999999999987),
|
||||
( 42, 200, 145, 57, 0.65622325663713577),
|
||||
( 1, 20, 12, 3, 0.34385964912280703),
|
||||
( 0, 2, 1, 1, 0.5),
|
||||
( 2, 10, 4, 7, 0.33333333333333331),
|
||||
( 1, 5, 3, 2, 0.69999999999999996),
|
||||
( 0, 10, 6, 2, 0.1333333333333333),
|
||||
( 2, 10, 6, 5, 0.26190476190476192),
|
||||
( 0, 5, 2, 1, 0.59999999999999987),
|
||||
( 1, 3, 2, 2, 0.66666666666666674),
|
||||
( 0, 50, 25, 2, 0.24489795918367349),
|
||||
( 0, 50, 39, 1, 0.22),
|
||||
( 2, 5, 3, 3, 0.90000000000000002),
|
||||
( 9, 50, 46, 10, 0.60316977854971765),
|
||||
( 0, 5, 2, 1, 0.59999999999999987),
|
||||
( 72, 500, 324, 112, 0.49074275180525029),
|
||||
( 0, 50, 9, 7, 0.22507959200836167),
|
||||
( 0, 5, 2, 2, 0.30000000000000004),
|
||||
( 17, 100, 35, 60, 0.067474411926413541),
|
||||
( 15, 100, 83, 17, 0.83718038506483827),
|
||||
( 0, 10, 7, 1, 0.29999999999999999),
|
||||
( 28, 200, 87, 77, 0.071226044946921765),
|
||||
(154, 500, 361, 212, 0.61327756805578304),
|
||||
( 1, 10, 2, 3, 0.93333333333333335),
|
||||
( 0, 10, 4, 4, 0.071428571428571425),
|
||||
( 0, 5, 1, 1, 0.79999999999999993),
|
||||
( 2, 5, 3, 4, 0.59999999999999987),
|
||||
( 0, 10, 4, 1, 0.59999999999999998),
|
||||
( 0, 3, 2, 1, 0.33333333333333331),
|
||||
( 0, 10, 3, 1, 0.69999999999999996),
|
||||
( 0, 50, 10, 1, 0.80000000000000004),
|
||||
( 0, 2, 1, 1, 0.5),
|
||||
( 0, 10, 1, 3, 0.69999999999999996),
|
||||
( 2, 20, 12, 4, 0.53457172342621262),
|
||||
( 0, 5, 4, 1, 0.20000000000000004),
|
||||
( 4, 20, 9, 7, 0.89821981424148611),
|
||||
( 2, 200, 188, 3, 0.17021775544388609),
|
||||
(132, 500, 298, 215, 0.78880271135040059),
|
||||
( 2, 5, 4, 3, 0.59999999999999987),
|
||||
( 0, 2, 1, 1, 0.5),
|
||||
( 2, 10, 6, 5, 0.26190476190476192),
|
||||
( 0, 3, 1, 1, 0.66666666666666674),
|
||||
(156, 200, 128, 174, 1),
|
||||
( 1, 20, 6, 4, 0.65737874097007221),
|
||||
( 0, 5, 0, 0, 1),
|
||||
(488, 500, 198, 500, 1),
|
||||
(143, 500, 8, 371, 1),
|
||||
( 2, 10, 6, 5, 0.26190476190476192),
|
||||
( 1, 5, 2, 4, 0.40000000000000008),
|
||||
( 0, 3, 2, 0, 1),
|
||||
( 12, 50, 7, 17, 1),
|
||||
(129, 200, 43, 133, 1),
|
||||
( 0, 5, 3, 0, 1),
|
||||
( 0, 2, 1, 1, 0.5),
|
||||
( 5, 20, 20, 17, 0),
|
||||
( 4, 10, 4, 8, 1),
|
||||
( 46, 500, 478, 58, 5.1715118817799218e-07),
|
||||
( 0, 3, 2, 3, 0),
|
||||
( 0, 3, 1, 1, 0.66666666666666674),
|
||||
( 76, 500, 0, 120, 1),
|
||||
( 1, 100, 41, 12, 0.011989696504564528),
|
||||
]
|
||||
for (k, population, successes, draws, val) in tests:
|
||||
check: abs(hypergeom_cdf(k, population, successes, draws) - val) < 1e-11
|
||||
|
|
|
@ -213,7 +213,7 @@ from stew/byteutils import hexToByteArray
|
|||
func fromHex(T: typedesc[KzgCommitment], s: string): T {.
|
||||
raises: [ValueError].} =
|
||||
var res: T
|
||||
hexToByteArray(s, res)
|
||||
hexToByteArray(s, res.bytes)
|
||||
res
|
||||
|
||||
suite "REST JSON encoding and decoding":
|
||||
|
@ -353,4 +353,4 @@ suite "REST JSON encoding and decoding":
|
|||
check:
|
||||
validator.pubkey == ValidatorPubKey.fromHex(
|
||||
"0x933ad9491b62059dd065b560d256d8957a8c402cc6e8d8ee7290ae11e8f7329267a8811c397529dac52ae1342ba58c95")[]
|
||||
validator.exit_epoch == FAR_FUTURE_EPOCH
|
||||
validator.exit_epoch == FAR_FUTURE_EPOCH
|
||||
|
|
|
@ -49,9 +49,12 @@ func collector(queue: AsyncQueue[BlockEntry]): BlockVerifier =
|
|||
# in the async queue, similar to how BlockProcessor does it - as far as
|
||||
# testing goes, this is risky because it might introduce differences between
|
||||
# the BlockProcessor and this test
|
||||
proc verify(signedBlock: ForkedSignedBeaconBlock, blobs: Opt[BlobSidecars],
|
||||
maybeFinalized: bool):
|
||||
Future[Result[void, VerifierError]] {.async: (raises: [CancelledError], raw: true).} =
|
||||
proc verify(
|
||||
signedBlock: ForkedSignedBeaconBlock,
|
||||
blobs: Opt[ForkedBlobSidecars],
|
||||
maybeFinalized: bool
|
||||
): Future[Result[void, VerifierError]] {.
|
||||
async: (raises: [CancelledError], raw: true).} =
|
||||
let fut = Future[Result[void, VerifierError]].Raising([CancelledError]).init()
|
||||
try: queue.addLastNoWait(BlockEntry(blck: signedBlock, resfut: fut))
|
||||
except CatchableError as exc: raiseAssert exc.msg
|
||||
|
@ -73,8 +76,8 @@ suite "SyncManager test suite":
|
|||
|
||||
func createBlobs(
|
||||
blocks: var seq[ref ForkedSignedBeaconBlock], slots: seq[Slot]
|
||||
): seq[ref BlobSidecar] =
|
||||
var res = newSeq[ref BlobSidecar](len(slots))
|
||||
): seq[ForkedBlobSidecar] =
|
||||
var res = newSeq[ForkedBlobSidecar](len(slots))
|
||||
for blck in blocks:
|
||||
withBlck(blck[]):
|
||||
when consensusFork >= ConsensusFork.Deneb:
|
||||
|
@ -94,7 +97,7 @@ suite "SyncManager test suite":
|
|||
var sidecarIdx = 0
|
||||
for i, slot in slots:
|
||||
if slot == forkyBlck.message.slot:
|
||||
res[i] = newClone sidecars[sidecarIdx]
|
||||
res[i] = ForkedBlobSidecar.init(newClone sidecars[sidecarIdx])
|
||||
inc sidecarIdx
|
||||
res
|
||||
|
||||
|
@ -354,7 +357,7 @@ suite "SyncManager test suite":
|
|||
if request.isEmpty():
|
||||
break
|
||||
await queue.push(request, getSlice(chain, start, request),
|
||||
Opt.none(seq[BlobSidecars]))
|
||||
Opt.none(seq[ForkedBlobSidecars]))
|
||||
await validatorFut.cancelAndWait()
|
||||
|
||||
waitFor runSmokeTest()
|
||||
|
@ -429,7 +432,7 @@ suite "SyncManager test suite":
|
|||
var r13 = queue.pop(finishSlot, p3)
|
||||
|
||||
var f13 = queue.push(r13, chain.getSlice(startSlot, r13),
|
||||
Opt.none(seq[BlobSidecars]))
|
||||
Opt.none(seq[ForkedBlobSidecars]))
|
||||
await sleepAsync(100.milliseconds)
|
||||
check:
|
||||
f13.finished == false
|
||||
|
@ -438,7 +441,7 @@ suite "SyncManager test suite":
|
|||
of SyncQueueKind.Backward: counter == int(finishSlot)
|
||||
|
||||
var f11 = queue.push(r11, chain.getSlice(startSlot, r11),
|
||||
Opt.none(seq[BlobSidecars]))
|
||||
Opt.none(seq[ForkedBlobSidecars]))
|
||||
await sleepAsync(100.milliseconds)
|
||||
check:
|
||||
case kkind
|
||||
|
@ -448,7 +451,7 @@ suite "SyncManager test suite":
|
|||
f13.finished == false
|
||||
|
||||
var f12 = queue.push(r12, chain.getSlice(startSlot, r12),
|
||||
Opt.none(seq[BlobSidecars]))
|
||||
Opt.none(seq[ForkedBlobSidecars]))
|
||||
await allFutures(f11, f12, f13)
|
||||
check:
|
||||
f12.finished == true and f12.failed == false
|
||||
|
@ -551,7 +554,7 @@ suite "SyncManager test suite":
|
|||
check response[0][].slot >= getFowardSafeSlotCb()
|
||||
else:
|
||||
check response[^1][].slot <= getBackwardSafeSlotCb()
|
||||
await queue.push(request, response, Opt.none(seq[BlobSidecars]))
|
||||
await queue.push(request, response, Opt.none(seq[ForkedBlobSidecars]))
|
||||
await validatorFut.cancelAndWait()
|
||||
|
||||
waitFor runTest()
|
||||
|
@ -634,7 +637,7 @@ suite "SyncManager test suite":
|
|||
|
||||
# Handle request 1. Should be re-enqueued as it simulates `Invalid`.
|
||||
let response1 = getSlice(chain, start, request1)
|
||||
await queue.push(request1, response1, Opt.none(seq[BlobSidecars]))
|
||||
await queue.push(request1, response1, Opt.none(seq[ForkedBlobSidecars]))
|
||||
check debtLen(queue) == request2.count + request1.count
|
||||
|
||||
# Request 1 should be discarded as it is no longer relevant.
|
||||
|
@ -646,7 +649,7 @@ suite "SyncManager test suite":
|
|||
|
||||
# Handle request 3. Should be re-enqueued as it simulates `Invalid`.
|
||||
let response3 = getSlice(chain, start, request3)
|
||||
await queue.push(request3, response3, Opt.none(seq[BlobSidecars]))
|
||||
await queue.push(request3, response3, Opt.none(seq[ForkedBlobSidecars]))
|
||||
check debtLen(queue) == request3.count
|
||||
|
||||
# Request 2 should be re-issued.
|
||||
|
@ -660,7 +663,7 @@ suite "SyncManager test suite":
|
|||
|
||||
# Handle request 4. Should be re-enqueued as it simulates `Invalid`.
|
||||
let response4 = getSlice(chain, start, request4)
|
||||
await queue.push(request4, response4, Opt.none(seq[BlobSidecars]))
|
||||
await queue.push(request4, response4, Opt.none(seq[ForkedBlobSidecars]))
|
||||
check debtLen(queue) == request4.count
|
||||
|
||||
# Advance `safeSlot` out of band.
|
||||
|
@ -777,14 +780,14 @@ suite "SyncManager test suite":
|
|||
var r14 = queue.pop(finishSlot, p4)
|
||||
|
||||
var f14 = queue.push(r14, chain.getSlice(startSlot, r14),
|
||||
Opt.none(seq[BlobSidecars]))
|
||||
Opt.none(seq[ForkedBlobSidecars]))
|
||||
await sleepAsync(100.milliseconds)
|
||||
check:
|
||||
f14.finished == false
|
||||
counter == int(startSlot)
|
||||
|
||||
var f12 = queue.push(r12, chain.getSlice(startSlot, r12),
|
||||
Opt.none(seq[BlobSidecars]))
|
||||
Opt.none(seq[ForkedBlobSidecars]))
|
||||
await sleepAsync(100.milliseconds)
|
||||
check:
|
||||
counter == int(startSlot)
|
||||
|
@ -792,7 +795,7 @@ suite "SyncManager test suite":
|
|||
f14.finished == false
|
||||
|
||||
var f11 = queue.push(r11, chain.getSlice(startSlot, r11),
|
||||
Opt.none(seq[BlobSidecars]))
|
||||
Opt.none(seq[ForkedBlobSidecars]))
|
||||
await allFutures(f11, f12)
|
||||
check:
|
||||
counter == int(startSlot + chunkSize + chunkSize)
|
||||
|
@ -804,7 +807,7 @@ suite "SyncManager test suite":
|
|||
withBlck(missingSlice[0][]):
|
||||
forkyBlck.message.proposer_index = 0xDEADBEAF'u64
|
||||
var f13 = queue.push(r13, missingSlice,
|
||||
Opt.none(seq[BlobSidecars]))
|
||||
Opt.none(seq[ForkedBlobSidecars]))
|
||||
await allFutures(f13, f14)
|
||||
check:
|
||||
f11.finished == true and f11.failed == false
|
||||
|
@ -826,17 +829,17 @@ suite "SyncManager test suite":
|
|||
check r18.isEmpty() == true
|
||||
|
||||
var f17 = queue.push(r17, chain.getSlice(startSlot, r17),
|
||||
Opt.none(seq[BlobSidecars]))
|
||||
Opt.none(seq[ForkedBlobSidecars]))
|
||||
await sleepAsync(100.milliseconds)
|
||||
check f17.finished == false
|
||||
|
||||
var f16 = queue.push(r16, chain.getSlice(startSlot, r16),
|
||||
Opt.none(seq[BlobSidecars]))
|
||||
Opt.none(seq[ForkedBlobSidecars]))
|
||||
await sleepAsync(100.milliseconds)
|
||||
check f16.finished == false
|
||||
|
||||
var f15 = queue.push(r15, chain.getSlice(startSlot, r15),
|
||||
Opt.none(seq[BlobSidecars]))
|
||||
Opt.none(seq[ForkedBlobSidecars]))
|
||||
await allFutures(f15, f16, f17)
|
||||
check:
|
||||
f15.finished == true and f15.failed == false
|
||||
|
@ -883,7 +886,7 @@ suite "SyncManager test suite":
|
|||
|
||||
# Push a single request that will fail with all blocks being unviable
|
||||
var f11 = queue.push(r11, chain.getSlice(startSlot, r11),
|
||||
Opt.none(seq[BlobSidecars]))
|
||||
Opt.none(seq[ForkedBlobSidecars]))
|
||||
discard await f11.withTimeout(1.seconds)
|
||||
|
||||
check:
|
||||
|
@ -949,14 +952,14 @@ suite "SyncManager test suite":
|
|||
var r14 = queue.pop(finishSlot, p4)
|
||||
|
||||
var f14 = queue.push(r14, chain.getSlice(startSlot, r14),
|
||||
Opt.none(seq[BlobSidecars]))
|
||||
Opt.none(seq[ForkedBlobSidecars]))
|
||||
await sleepAsync(100.milliseconds)
|
||||
check:
|
||||
f14.finished == false
|
||||
counter == int(finishSlot)
|
||||
|
||||
var f12 = queue.push(r12, chain.getSlice(startSlot, r12),
|
||||
Opt.none(seq[BlobSidecars]))
|
||||
Opt.none(seq[ForkedBlobSidecars]))
|
||||
await sleepAsync(100.milliseconds)
|
||||
check:
|
||||
counter == int(finishSlot)
|
||||
|
@ -964,7 +967,7 @@ suite "SyncManager test suite":
|
|||
f14.finished == false
|
||||
|
||||
var f11 = queue.push(r11, chain.getSlice(startSlot, r11),
|
||||
Opt.none(seq[BlobSidecars]))
|
||||
Opt.none(seq[ForkedBlobSidecars]))
|
||||
await allFutures(f11, f12)
|
||||
check:
|
||||
counter == int(finishSlot - chunkSize - chunkSize)
|
||||
|
@ -975,7 +978,7 @@ suite "SyncManager test suite":
|
|||
var missingSlice = chain.getSlice(startSlot, r13)
|
||||
withBlck(missingSlice[0][]):
|
||||
forkyBlck.message.proposer_index = 0xDEADBEAF'u64
|
||||
var f13 = queue.push(r13, missingSlice, Opt.none(seq[BlobSidecars]))
|
||||
var f13 = queue.push(r13, missingSlice, Opt.none(seq[ForkedBlobSidecars]))
|
||||
await allFutures(f13, f14)
|
||||
check:
|
||||
f11.finished == true and f11.failed == false
|
||||
|
@ -993,12 +996,12 @@ suite "SyncManager test suite":
|
|||
check r17.isEmpty() == true
|
||||
|
||||
var f16 = queue.push(r16, chain.getSlice(startSlot, r16),
|
||||
Opt.none(seq[BlobSidecars]))
|
||||
Opt.none(seq[ForkedBlobSidecars]))
|
||||
await sleepAsync(100.milliseconds)
|
||||
check f16.finished == false
|
||||
|
||||
var f15 = queue.push(r15, chain.getSlice(startSlot, r15),
|
||||
Opt.none(seq[BlobSidecars]))
|
||||
Opt.none(seq[ForkedBlobSidecars]))
|
||||
await allFutures(f15, f16)
|
||||
check:
|
||||
f15.finished == true and f15.failed == false
|
||||
|
@ -1101,16 +1104,20 @@ suite "SyncManager test suite":
|
|||
len(grouped[0]) == 0
|
||||
# slot 11
|
||||
len(grouped[1]) == 2
|
||||
grouped[1][0].signed_block_header.message.slot == Slot(11)
|
||||
grouped[1][1].signed_block_header.message.slot == Slot(11)
|
||||
withForkyBlob(grouped[1][0]):
|
||||
forkyBlob[].signed_block_header.message.slot == Slot(11)
|
||||
withForkyBlob(grouped[1][1]):
|
||||
forkyBlob[].signed_block_header.message.slot == Slot(11)
|
||||
# slot 12
|
||||
len(grouped[2]) == 1
|
||||
grouped[2][0].signed_block_header.message.slot == Slot(12)
|
||||
withForkyBlob(grouped[2][0]):
|
||||
forkyBlob[].signed_block_header.message.slot == Slot(12)
|
||||
# slot 13
|
||||
len(grouped[3]) == 0
|
||||
# slot 14
|
||||
len(grouped[4]) == 1
|
||||
grouped[4][0].signed_block_header.message.slot == Slot(14)
|
||||
withForkyBlob(grouped[4][0]):
|
||||
forkyBlob[].signed_block_header.message.slot == Slot(14)
|
||||
# slot 15
|
||||
len(grouped[5]) == 0
|
||||
|
||||
|
@ -1127,16 +1134,15 @@ suite "SyncManager test suite":
|
|||
len(grouped2) == 7
|
||||
len(grouped2[6]) == 0 # slot 17
|
||||
|
||||
let blob18 = new (ref BlobSidecar)
|
||||
blob18[].signed_block_header.message.slot = Slot(18)
|
||||
let blob18 = ForkedBlobSidecar.init(new (ref deneb.BlobSidecar))
|
||||
withForkyBlob(blob18):
|
||||
forkyBlob[].signed_block_header.message.slot = Slot(18)
|
||||
blobs.add(blob18)
|
||||
let groupedRes3 = groupBlobs(req, blocks, blobs)
|
||||
|
||||
check:
|
||||
groupedRes3.isErr()
|
||||
|
||||
|
||||
|
||||
test "[SyncQueue#Forward] getRewindPoint() test":
|
||||
let aq = newAsyncQueue[BlockEntry]()
|
||||
block:
|
||||
|
|
|
@ -118,8 +118,6 @@ proc build_empty_merge_execution_payload(state: bellatrix.BeaconState):
|
|||
bellatrix.ExecutionPayloadForSigning(executionPayload: payload,
|
||||
blockValue: Wei.zero)
|
||||
|
||||
from stew/saturating_arith import saturate
|
||||
|
||||
proc build_empty_execution_payload(
|
||||
state: bellatrix.BeaconState,
|
||||
feeRecipient: Eth1Address): bellatrix.ExecutionPayloadForSigning =
|
||||
|
@ -129,8 +127,8 @@ proc build_empty_execution_payload(
|
|||
latest = state.latest_execution_payload_header
|
||||
timestamp = compute_timestamp_at_slot(state, state.slot)
|
||||
randao_mix = get_randao_mix(state, get_current_epoch(state))
|
||||
base_fee = calcEip1599BaseFee(GasInt.saturate latest.gas_limit,
|
||||
GasInt.saturate latest.gas_used,
|
||||
base_fee = calcEip1599BaseFee(latest.gas_limit,
|
||||
latest.gas_used,
|
||||
latest.base_fee_per_gas)
|
||||
|
||||
var payload = bellatrix.ExecutionPayloadForSigning(
|
||||
|
@ -172,8 +170,6 @@ proc addTestBlock*(
|
|||
cfg, state, getStateField(state, slot) + 1, cache, info, flags).expect(
|
||||
"can advance 1")
|
||||
|
||||
debugComment "add consolidations support to addTestBlock"
|
||||
|
||||
let
|
||||
proposer_index = get_beacon_proposer_index(
|
||||
state, cache, getStateField(state, slot)).expect("valid proposer index")
|
||||
|
|
|
@ -14,6 +14,9 @@ import
|
|||
forks, state_transition, state_transition_block]
|
||||
|
||||
from ".."/beacon_chain/bloomfilter import constructBloomFilter
|
||||
from ".."/beacon_chain/spec/state_transition_epoch import
|
||||
get_validator_balance_after_epoch, process_epoch
|
||||
|
||||
|
||||
func round_multiple_down(x: Gwei, n: Gwei): Gwei =
|
||||
## Round the input to the previous multiple of "n"
|
||||
|
@ -97,4 +100,19 @@ proc getTestStates*(
|
|||
doAssert getStateField(tmpState[], slot) == slot
|
||||
|
||||
if tmpState[].kind == consensusFork:
|
||||
result.add assignClone(tmpState[])
|
||||
result.add assignClone(tmpState[])
|
||||
|
||||
proc checkPerValidatorBalanceCalc*(
|
||||
state: deneb.BeaconState | electra.BeaconState): bool =
|
||||
var
|
||||
info: altair.EpochInfo
|
||||
cache: StateCache
|
||||
let tmpState = newClone(state) # slow, but tolerable for tests
|
||||
discard process_epoch(defaultRuntimeConfig, tmpState[], {}, cache, info)
|
||||
for i in 0 ..< tmpState.balances.len:
|
||||
if tmpState.balances.item(i) != get_validator_balance_after_epoch(
|
||||
defaultRuntimeConfig, state, default(UpdateFlags), cache, info,
|
||||
i.ValidatorIndex):
|
||||
return false
|
||||
|
||||
true
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit ab3ff9fad45fa7e20d749d0a03a7567225f5dd4a
|
||||
Subproject commit 7721c955b522f4893265bb36a6de4f8edef8b54b
|
|
@ -1 +1 @@
|
|||
Subproject commit 9ed6c63314899d17e2c3f669adbe2bc915610982
|
||||
Subproject commit 0e085cb606e78a495ce8014f9350931bc360e663
|
|
@ -1 +1 @@
|
|||
Subproject commit a806cbfab5fe8de49c76139f8705fff79daf99ee
|
||||
Subproject commit 646fa2152b11980c24bf34b3e214b479c9d25f21
|
|
@ -1 +1 @@
|
|||
Subproject commit f29698d2e9a59453d99db7315a5af58add3c8715
|
||||
Subproject commit 1d0d886cdcb17b25108c7b904f84819629c0e4fb
|
|
@ -1 +1 @@
|
|||
Subproject commit 0adf3b7db70736061bf12fa23c2fc51f395b289e
|
||||
Subproject commit cb640db2cd66d7f4a1810a7df51b55f6f59cf3c5
|
|
@ -1 +1 @@
|
|||
Subproject commit 26212c881b464ed64cac20442fb45144d3ecd3b3
|
||||
Subproject commit ebfe63b9b6523a1823e4505f0972d81047a77cf5
|
|
@ -1 +1 @@
|
|||
Subproject commit 11b9d952a80ec87e2443405a6a5382f9daac51f8
|
||||
Subproject commit dbc4a95df60238157dcf286f6125188cb72f37c1
|
|
@ -1 +1 @@
|
|||
Subproject commit be57dbc902d36f37540897e98c69aa80f868cb45
|
||||
Subproject commit 98496aa24d9364d1652e531f5f346de9b7cb3e15
|
|
@ -1 +1 @@
|
|||
Subproject commit ad8721e0f3c6925597b5a93b6c53e040f26b5fb3
|
||||
Subproject commit 8e1cdb18230f7e7172b4b4aa503b0d66fe530942
|
|
@ -1 +1 @@
|
|||
Subproject commit 4d0b0662ed960ab2c5a1ddbd08f77048bac13ae7
|
||||
Subproject commit 89f7be1783b2f828a95dea1496fdac3510532997
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue