Merge branch 'stable' into dev/etan/zz-dbg

This commit is contained in:
Etan Kissling 2024-02-04 08:38:33 +01:00
commit 0cd6ea172a
No known key found for this signature in database
GPG Key ID: B21DA824C5A3D03D
51 changed files with 1077 additions and 232 deletions

View File

@ -74,6 +74,13 @@ OK: 7/7 Fail: 0/7 Skip: 0/7
+ basics OK
```
OK: 2/2 Fail: 0/2 Skip: 0/2
## Blinded block conversions
```diff
+ Bellatrix toSignedBlindedBlock OK
+ Capella toSignedBlindedBlock OK
+ Deneb toSignedBlindedBlock OK
```
OK: 3/3 Fail: 0/3 Skip: 0/3
## Block pool altair processing [Preset: mainnet]
```diff
+ Invalid signatures [Preset: mainnet] OK
@ -979,4 +986,4 @@ OK: 2/2 Fail: 0/2 Skip: 0/2
OK: 9/9 Fail: 0/9 Skip: 0/9
---TOTAL---
OK: 664/669 Fail: 0/669 Skip: 5/669
OK: 667/672 Fail: 0/672 Skip: 5/672

View File

@ -1,3 +1,77 @@
2023-02-02 v24.2.0
==================
Nimbus `v24.2.0` is a `low-urgency` upgrade bringing important stability improvements for Deneb-enabled networks. It's highly recommended for users who are testing their setups in the Holešky testnet, which will be transitioned to Deneb on 7th of February.
### Improvements
* Nimbus now supports the `/eth/v1/beacon/blinded_blocks/{block_id}` Beacon API endpoint:
https://github.com/status-im/nimbus-eth2/pull/5829
https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.2#/Beacon/getBlindedBlock
* Nimbus now reports more comprehensive information in case of errors while interacting with an external builder:
https://github.com/status-im/nimbus-eth2/pull/5819
## Fixes
* Nimbus was frequently getting out of sync on Deneb-transitioned networks due to a regression introduced in the `v24.1.2` release:
https://github.com/status-im/nimbus-eth2/pull/5834
* The `block_sidecar` event reported by the Beacon API had incorrect format for the `versioned_hash` value:
https://github.com/status-im/nimbus-eth2/pull/5844
* Rare circumstances could cause the Nimbus validator client to permanently lose its connection to the configured beacon node when the two processes had system clock disparities:
https://github.com/status-im/nimbus-eth2/pull/5827
* Nimbus now uses smaller Builder API validator registration batch size in order to prevent rarely triggered registration timeouts:
https://github.com/status-im/nimbus-eth2/pull/5837
2023-01-25 v24.1.2
==================
Nimbus `v24.1.2` is a `low-urgency` point release bringing full support for the upcoming Cancun-Deneb hard-fork on the networks Sepolia, Chiado (Gnosis Chain testnet) and Holešky.
### Improvements
* Nimbus `v24.1.2` includes full support for the upcoming Deneb hard-fork in the networks Sepolia (30 Jan), Chiado (31 Jan) and Holešky (7 Feb):
https://github.com/status-im/nimbus-eth2/pull/5795
https://github.com/status-im/nimbus-eth2/pull/5725
https://github.com/status-im/nimbus-eth2/pull/5772
https://github.com/status-im/nimbus-eth2/pull/5796
* Nimbus no longer skips attestations during brief loss of connectivity to the execution client by attesting to the last known valid block:
https://github.com/status-im/nimbus-eth2/pull/5313
* The `/eth/v1/events` Beacon API endpoint now reports `blob_sidecar` events:
https://github.com/status-im/nimbus-eth2/pull/5728
https://github.com/ethereum/beacon-APIs/pull/350/
* The Nimbus status bar and the "Slot start" log message now indicate the time of the next hard-fork in networks where it's already scheduled:
https://github.com/status-im/nimbus-eth2/pull/5761
https://github.com/status-im/nimbus-eth2/pull/5751
https://github.com/status-im/nimbus-eth2/pull/5731
### Fixes
* The HTTP headers `eth-consensus-block-value` and `eth-execution-payload-value` supplied to the `/eth/v3/validator/blocks/{slot}` endpoint were not using decimal encoding:
https://github.com/status-im/nimbus-eth2/pull/5741
* Block headers within light client updates in Deneb-transitioned networks had incorrect zero values for the `blob_gas_used` field:
https://github.com/status-im/nimbus-eth2/pull/5763
* Incomplete responses to `blobSidecarsByRange` requests were inappropriately preventing the client from achieving the maximum possible syncing speed:
https://github.com/status-im/nimbus-eth2/pull/5766
* The Nimbus validator client was not implementing the strategy of using the withdrawal address of the validator as a fee recipient address when one is not explicitly specified. This was resulting in a failure to register any validators obtained from a `--web3-signer-url` with the configured `--payload-builder-url` when the `--suggested-fee-recipient` option is not provided:
https://github.com/status-im/nimbus-eth2/pull/5781
https://github.com/status-im/nimbus-eth2/issues/5730
* The `/eth/v1/beacon/states/{state_id}/validators` Beacon API endpoint was not compliant with the spec in the absence of the optional `status` field in the request:
https://github.com/status-im/nimbus-eth2/pull/5762
https://github.com/status-im/nimbus-eth2/issues/5758
2024-01-08 v24.1.1
==================

View File

@ -408,7 +408,7 @@ proc processAttestation*(
ok()
else:
debug "Dropping attestation", validationError = v.error
debug "Dropping attestation", reason = v.error
beacon_attestations_dropped.inc(1, [$v.error[0]])
err(v.error())
@ -464,7 +464,7 @@ proc processSignedAggregateAndProof*(
ok()
else:
debug "Dropping aggregate", error = v.error
debug "Dropping aggregate", reason = v.error
beacon_aggregates_dropped.inc(1, [$v.error[0]])
err(v.error())
@ -488,7 +488,7 @@ proc processBlsToExecutionChange*(
self.validatorChangePool[].addMessage(
blsToExecutionChange, src == MsgSource.api)
else:
debug "Dropping BLS to execution change", validationError = v.error
debug "Dropping BLS to execution change", reason = v.error
beacon_attester_slashings_dropped.inc(1, [$v.error[0]])
return v
@ -512,7 +512,7 @@ proc processAttesterSlashing*(
beacon_attester_slashings_received.inc()
else:
debug "Dropping attester slashing", validationError = v.error
debug "Dropping attester slashing", reason = v.error
beacon_attester_slashings_dropped.inc(1, [$v.error[0]])
v
@ -535,7 +535,7 @@ proc processProposerSlashing*(
beacon_proposer_slashings_received.inc()
else:
debug "Dropping proposer slashing", validationError = v.error
debug "Dropping proposer slashing", reason = v.error
beacon_proposer_slashings_dropped.inc(1, [$v.error[0]])
v
@ -559,7 +559,7 @@ proc processSignedVoluntaryExit*(
beacon_voluntary_exits_received.inc()
else:
debug "Dropping voluntary exit", error = v.error
debug "Dropping voluntary exit", reason = v.error
beacon_voluntary_exits_dropped.inc(1, [$v.error[0]])
v
@ -605,7 +605,7 @@ proc processSyncCommitteeMessage*(
ok()
else:
debug "Dropping sync committee message", error = v.error
debug "Dropping sync committee message", reason = v.error
beacon_sync_committee_messages_dropped.inc(1, [$v.error[0]])
err(v.error())
@ -650,7 +650,7 @@ proc processSignedContributionAndProof*(
ok()
else:
debug "Dropping contribution", error = v.error
debug "Dropping contribution", reason = v.error
beacon_sync_committee_contributions_dropped.inc(1, [$v.error[0]])
err(v.error())

View File

@ -258,7 +258,8 @@ proc initFullNode(
index: data.index,
slot: data.signed_block_header.message.slot,
kzg_commitment: data.kzg_commitment,
versioned_hash: data.kzg_commitment.kzg_commitment_to_versioned_hash))
versioned_hash:
data.kzg_commitment.kzg_commitment_to_versioned_hash.to0xHex))
proc onBlockAdded(data: ForkedTrustedSignedBeaconBlock) =
let optimistic =
if node.currentSlot().epoch() >= dag.cfg.BELLATRIX_FORK_EPOCH:
@ -351,7 +352,7 @@ proc initFullNode(
blobQuarantine, getBeaconTime)
blockVerifier = proc(signedBlock: ForkedSignedBeaconBlock,
blobs: Opt[BlobSidecars], maybeFinalized: bool):
Future[Result[void, VerifierError]] {.async: (raises: [CancelledError], raw: true).} =
Future[Result[void, VerifierError]] =
# The design with a callback for block verification is unusual compared
# to the rest of the application, but fits with the general approach
# taken in the sync/request managers - this is an architectural compromise
@ -360,23 +361,27 @@ proc initFullNode(
MsgSource.gossip, signedBlock, blobs, maybeFinalized = maybeFinalized)
rmanBlockVerifier = proc(signedBlock: ForkedSignedBeaconBlock,
maybeFinalized: bool):
Future[Result[void, VerifierError]] {.async: (raises: [CancelledError]).} =
Future[Result[void, VerifierError]] =
withBlck(signedBlock):
when consensusFork >= ConsensusFork.Deneb:
when typeof(forkyBlck).kind >= ConsensusFork.Deneb:
if not blobQuarantine[].hasBlobs(forkyBlck):
# We don't have all the blobs for this block, so we have
# to put it in blobless quarantine.
if not quarantine[].addBlobless(dag.finalizedHead.slot, forkyBlck):
err(VerifierError.UnviableFork)
Future.completed(
Result[void, VerifierError].err(VerifierError.UnviableFork),
"rmanBlockVerifier")
else:
err(VerifierError.MissingParent)
Future.completed(
Result[void, VerifierError].err(VerifierError.MissingParent),
"rmanBlockVerifier")
else:
let blobs = blobQuarantine[].popBlobs(forkyBlck.root, forkyBlck)
await blockProcessor[].addBlock(MsgSource.gossip, signedBlock,
blockProcessor[].addBlock(MsgSource.gossip, signedBlock,
Opt.some(blobs),
maybeFinalized = maybeFinalized)
else:
await blockProcessor[].addBlock(MsgSource.gossip, signedBlock,
blockProcessor[].addBlock(MsgSource.gossip, signedBlock,
Opt.none(BlobSidecars),
maybeFinalized = maybeFinalized)

View File

@ -14,7 +14,7 @@ import
../beacon_node,
../consensus_object_pools/[blockchain_dag, spec_cache, validator_change_pool],
../spec/[deposit_snapshots, eth2_merkleization, forks, network, validator],
../spec/datatypes/[phase0, altair, deneb],
../spec/mev/bellatrix_mev,
../validators/message_router_mev
export rest_utils
@ -1012,6 +1012,48 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
RestApiResponse.jsonMsgResponse(BlockValidationSuccess)
# https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.2#/Beacon/getBlindedBlock
# https://github.com/ethereum/beacon-APIs/blob/v2.4.2/apis/beacon/blocks/blinded_block.yaml
router.api2(MethodGet, "/eth/v1/beacon/blinded_blocks/{block_id}") do (
block_id: BlockIdent) -> RestApiResponse:
let
blockIdent = block_id.valueOr:
return RestApiResponse.jsonError(Http400, InvalidBlockIdValueError,
$error)
bid = node.getBlockId(blockIdent).valueOr:
return RestApiResponse.jsonError(Http404, BlockNotFoundError)
contentType =
block:
let res = preferredContentType(jsonMediaType,
sszMediaType)
if res.isErr():
return RestApiResponse.jsonError(Http406, ContentNotAcceptableError)
res.get()
bdata = node.dag.getForkedBlock(bid).valueOr:
return RestApiResponse.jsonError(Http404, BlockNotFoundError)
template respondSszOrJson(
signedMaybeBlindedBlck: auto, consensusFork: ConsensusFork): untyped =
if contentType == sszMediaType:
RestApiResponse.sszResponse(
signedMaybeBlindedBlck,
[("eth-consensus-version", consensusFork.toString())])
elif contentType == jsonMediaType:
RestApiResponse.jsonResponseBlock(
signedMaybeBlindedBlck,
consensusFork,
node.getBlockOptimistic(bdata),
node.dag.isFinalized(bid)
)
else:
RestApiResponse.jsonError(Http500, InvalidAcceptError)
withBlck(bdata.asSigned()):
when consensusFork <= ConsensusFork.Altair:
respondSszOrJson(forkyBlck, consensusFork)
else:
respondSszOrJson(toSignedBlindedBeaconBlock(forkyBlck), consensusFork)
# https://ethereum.github.io/beacon-APIs/#/Beacon/publishBlindedBlock
# https://github.com/ethereum/beacon-APIs/blob/v2.4.0/apis/beacon/blocks/blinded_blocks.yaml
router.api(MethodPost, "/eth/v1/beacon/blinded_blocks") do (

View File

@ -40,6 +40,10 @@ type
# TODO this apparently is suppposed to be SSZ-equivalent to Bytes32, but
# current spec doesn't ever SSZ-serialize it or hash_tree_root it
# TODO make `distinct` then add a REST serialization for it specifically, via
# basically to0xHex, then fix BlobSidecarInfoObject to use VersionedHash, not
# string, and rely on REST serialization, rather than serialize VersionedHash
# field manually
VersionedHash* = array[32, byte]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/deneb/beacon-chain.md#custom-types
@ -68,7 +72,7 @@ type
index*: BlobIndex
slot*: Slot
kzg_commitment*: KzgCommitment
versioned_hash*: VersionedHash
versioned_hash*: string # TODO should be string; VersionedHash not distinct
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/deneb/p2p-interface.md#blobidentifier
BlobIdentifier* = object

View File

@ -204,6 +204,7 @@ RestJson.useDefaultSerializationFor(
bellatrix.ExecutionPayload,
bellatrix.ExecutionPayloadHeader,
bellatrix.SignedBeaconBlock,
bellatrix_mev.BlindedBeaconBlockBody,
bellatrix_mev.BlindedBeaconBlock,
bellatrix_mev.SignedBlindedBeaconBlock,
capella.BeaconBlock,
@ -561,6 +562,33 @@ proc jsonResponse*(t: typedesc[RestApiResponse], data: auto): RestApiResponse =
default
RestApiResponse.response(res, Http200, "application/json")
proc jsonResponseBlock*(t: typedesc[RestApiResponse],
data: ForkySignedBlindedBeaconBlock,
consensusFork: ConsensusFork,
execOpt: Opt[bool],
finalized: bool): RestApiResponse =
let
headers = [("eth-consensus-version", consensusFork.toString())]
res =
block:
var default: seq[byte]
try:
var stream = memoryOutput()
var writer = JsonWriter[RestJson].init(stream)
writer.beginRecord()
writer.writeField("version", consensusFork.toString())
if execOpt.isSome():
writer.writeField("execution_optimistic", execOpt.get())
writer.writeField("finalized", finalized)
writer.writeField("data", data)
writer.endRecord()
stream.getOutput(seq[byte])
except SerializationError:
default
except IOError:
default
RestApiResponse.response(res, Http200, "application/json", headers = headers)
proc jsonResponseBlock*(t: typedesc[RestApiResponse],
data: ForkedSignedBeaconBlock,
execOpt: Opt[bool],

View File

@ -202,7 +202,9 @@ type
ForkySignedBlindedBeaconBlock* =
phase0.SignedBeaconBlock |
altair.SignedBeaconBlock |
capella_mev.SignedBlindedBeaconBlock
bellatrix_mev.SignedBlindedBeaconBlock |
capella_mev.SignedBlindedBeaconBlock |
deneb_mev.SignedBlindedBeaconBlock
ForkedSignedBlindedBeaconBlock* = object
case kind*: ConsensusFork
@ -318,7 +320,8 @@ template kind*(
bellatrix.TrustedBeaconBlockBody |
bellatrix.SigVerifiedSignedBeaconBlock |
bellatrix.MsgTrustedSignedBeaconBlock |
bellatrix.TrustedSignedBeaconBlock]): ConsensusFork =
bellatrix.TrustedSignedBeaconBlock] |
bellatrix_mev.SignedBlindedBeaconBlock): ConsensusFork =
ConsensusFork.Bellatrix
template kind*(

View File

@ -7,11 +7,36 @@
{.push raises: [].}
from ../datatypes/base import Eth1Data
import ".."/datatypes/altair
from ".."/datatypes/bellatrix import ExecutionPayloadHeader
from ".."/eth2_merkleization import hash_tree_root
type
# https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/builder.md#blindedbeaconblockbody
BlindedBeaconBlockBody* = object
randao_reveal*: ValidatorSig
eth1_data*: Eth1Data
graffiti*: GraffitiBytes
proposer_slashings*: List[ProposerSlashing, Limit MAX_PROPOSER_SLASHINGS]
attester_slashings*: List[AttesterSlashing, Limit MAX_ATTESTER_SLASHINGS]
attestations*: List[Attestation, Limit MAX_ATTESTATIONS]
deposits*: List[Deposit, Limit MAX_DEPOSITS]
voluntary_exits*: List[SignedVoluntaryExit, Limit MAX_VOLUNTARY_EXITS]
sync_aggregate*: SyncAggregate
execution_payload_header*: bellatrix.ExecutionPayloadHeader
# https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/builder.md#blindedbeaconblock
BlindedBeaconBlock* = object
slot*: Slot
proposer_index*: uint64
parent_root*: Eth2Digest
state_root*: Eth2Digest
body*: BlindedBeaconBlockBody
# https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/builder.md#signedblindedbeaconblock
SignedBlindedBeaconBlock* = object
message*: BlindedBeaconBlock
signature*: ValidatorSig
func shortLog*(v: BlindedBeaconBlock): auto =
(
@ -40,3 +65,40 @@ func shortLog*(v: SignedBlindedBeaconBlock): auto =
blck: shortLog(default(BlindedBeaconBlock)),
signature: ""
)
func toSignedBlindedBeaconBlock*(blck: bellatrix.SignedBeaconBlock):
SignedBlindedBeaconBlock =
SignedBlindedBeaconBlock(
message: BlindedBeaconBlock(
slot: blck.message.slot,
proposer_index: blck.message.proposer_index,
parent_root: blck.message.parent_root,
state_root: blck.message.state_root,
body: BlindedBeaconBlockBody(
randao_reveal: blck.message.body.randao_reveal,
eth1_data: blck.message.body.eth1_data,
graffiti: blck.message.body.graffiti,
proposer_slashings: blck.message.body.proposer_slashings,
attester_slashings: blck.message.body.attester_slashings,
attestations: blck.message.body.attestations,
deposits: blck.message.body.deposits,
voluntary_exits: blck.message.body.voluntary_exits,
sync_aggregate: blck.message.body.sync_aggregate,
execution_payload_header: ExecutionPayloadHeader(
parent_hash: blck.message.body.execution_payload.parent_hash,
fee_recipient: blck.message.body.execution_payload.fee_recipient,
state_root: blck.message.body.execution_payload.state_root,
receipts_root: blck.message.body.execution_payload.receipts_root,
logs_bloom: blck.message.body.execution_payload.logs_bloom,
prev_randao: blck.message.body.execution_payload.prev_randao,
block_number: blck.message.body.execution_payload.block_number,
gas_limit: blck.message.body.execution_payload.gas_limit,
gas_used: blck.message.body.execution_payload.gas_used,
timestamp: blck.message.body.execution_payload.timestamp,
extra_data: blck.message.body.execution_payload.extra_data,
base_fee_per_gas:
blck.message.body.execution_payload.base_fee_per_gas,
block_hash: blck.message.body.execution_payload.block_hash,
transactions_root:
hash_tree_root(blck.message.body.execution_payload.transactions)))),
signature: blck.signature)

View File

@ -11,6 +11,7 @@ import ".."/datatypes/[altair, capella]
from stew/byteutils import to0xHex
from ../datatypes/bellatrix import ExecutionAddress
from ../eth2_merkleization import hash_tree_root
type
# https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/builder.md#validatorregistrationv1
@ -116,3 +117,43 @@ func shortLog*(v: SignedBlindedBeaconBlock): auto =
blck: shortLog(v.message),
signature: shortLog(v.signature)
)
func toSignedBlindedBeaconBlock*(blck: capella.SignedBeaconBlock):
SignedBlindedBeaconBlock =
SignedBlindedBeaconBlock(
message: BlindedBeaconBlock(
slot: blck.message.slot,
proposer_index: blck.message.proposer_index,
parent_root: blck.message.parent_root,
state_root: blck.message.state_root,
body: BlindedBeaconBlockBody(
randao_reveal: blck.message.body.randao_reveal,
eth1_data: blck.message.body.eth1_data,
graffiti: blck.message.body.graffiti,
proposer_slashings: blck.message.body.proposer_slashings,
attester_slashings: blck.message.body.attester_slashings,
attestations: blck.message.body.attestations,
deposits: blck.message.body.deposits,
voluntary_exits: blck.message.body.voluntary_exits,
sync_aggregate: blck.message.body.sync_aggregate,
execution_payload_header: ExecutionPayloadHeader(
parent_hash: blck.message.body.execution_payload.parent_hash,
fee_recipient: blck.message.body.execution_payload.fee_recipient,
state_root: blck.message.body.execution_payload.state_root,
receipts_root: blck.message.body.execution_payload.receipts_root,
logs_bloom: blck.message.body.execution_payload.logs_bloom,
prev_randao: blck.message.body.execution_payload.prev_randao,
block_number: blck.message.body.execution_payload.block_number,
gas_limit: blck.message.body.execution_payload.gas_limit,
gas_used: blck.message.body.execution_payload.gas_used,
timestamp: blck.message.body.execution_payload.timestamp,
extra_data: blck.message.body.execution_payload.extra_data,
base_fee_per_gas:
blck.message.body.execution_payload.base_fee_per_gas,
block_hash: blck.message.body.execution_payload.block_hash,
transactions_root:
hash_tree_root(blck.message.body.execution_payload.transactions),
withdrawals_root:
hash_tree_root(blck.message.body.execution_payload.withdrawals)),
bls_to_execution_changes: blck.message.body.bls_to_execution_changes)),
signature: blck.signature)

View File

@ -11,6 +11,7 @@ import ".."/datatypes/[altair, deneb]
from stew/byteutils import to0xHex
from ".."/datatypes/capella import SignedBLSToExecutionChange
from ".."/eth2_merkleization import hash_tree_root
type
# https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/deneb/builder.md#builderbid
@ -103,3 +104,44 @@ func shortLog*(v: SignedBlindedBeaconBlock): auto =
blck: shortLog(v.message),
signature: shortLog(v.signature)
)
func toSignedBlindedBeaconBlock*(blck: deneb.SignedBeaconBlock):
SignedBlindedBeaconBlock =
SignedBlindedBeaconBlock(
message: BlindedBeaconBlock(
slot: blck.message.slot,
proposer_index: blck.message.proposer_index,
parent_root: blck.message.parent_root,
state_root: blck.message.state_root,
body: BlindedBeaconBlockBody(
randao_reveal: blck.message.body.randao_reveal,
eth1_data: blck.message.body.eth1_data,
graffiti: blck.message.body.graffiti,
proposer_slashings: blck.message.body.proposer_slashings,
attester_slashings: blck.message.body.attester_slashings,
attestations: blck.message.body.attestations,
deposits: blck.message.body.deposits,
voluntary_exits: blck.message.body.voluntary_exits,
sync_aggregate: blck.message.body.sync_aggregate,
execution_payload_header: ExecutionPayloadHeader(
parent_hash: blck.message.body.execution_payload.parent_hash,
fee_recipient: blck.message.body.execution_payload.fee_recipient,
state_root: blck.message.body.execution_payload.state_root,
receipts_root: blck.message.body.execution_payload.receipts_root,
logs_bloom: blck.message.body.execution_payload.logs_bloom,
prev_randao: blck.message.body.execution_payload.prev_randao,
block_number: blck.message.body.execution_payload.block_number,
gas_limit: blck.message.body.execution_payload.gas_limit,
gas_used: blck.message.body.execution_payload.gas_used,
timestamp: blck.message.body.execution_payload.timestamp,
extra_data: blck.message.body.execution_payload.extra_data,
base_fee_per_gas:
blck.message.body.execution_payload.base_fee_per_gas,
block_hash: blck.message.body.execution_payload.block_hash,
transactions_root:
hash_tree_root(blck.message.body.execution_payload.transactions),
withdrawals_root:
hash_tree_root(blck.message.body.execution_payload.withdrawals)),
bls_to_execution_changes: blck.message.body.bls_to_execution_changes,
blob_kzg_commitments: blck.message.body.blob_kzg_commitments)),
signature: blck.signature)

View File

@ -22,13 +22,13 @@ proc registerValidator*(body: seq[SignedValidatorRegistrationV1]
proc getHeaderCapella*(slot: Slot,
parent_hash: Eth2Digest,
pubkey: ValidatorPubKey
): RestResponse[GetHeaderResponseCapella] {.
): RestPlainResponse {.
rest, endpoint: "/eth/v1/builder/header/{slot}/{parent_hash}/{pubkey}",
meth: MethodGet, connection: {Dedicated, Close}.}
## https://github.com/ethereum/builder-specs/blob/v0.4.0/apis/builder/header.yaml
proc submitBlindedBlock*(body: capella_mev.SignedBlindedBeaconBlock
): RestResponse[SubmitBlindedBlockResponseCapella] {.
): RestPlainResponse {.
rest, endpoint: "/eth/v1/builder/blinded_blocks",
meth: MethodPost, connection: {Dedicated, Close}.}
## https://github.com/ethereum/builder-specs/blob/v0.4.0/apis/builder/blinded_blocks.yaml

View File

@ -15,13 +15,13 @@ export chronos, client, rest_types, eth2_rest_serialization
proc getHeaderDeneb*(slot: Slot,
parent_hash: Eth2Digest,
pubkey: ValidatorPubKey
): RestResponse[GetHeaderResponseDeneb] {.
): RestPlainResponse {.
rest, endpoint: "/eth/v1/builder/header/{slot}/{parent_hash}/{pubkey}",
meth: MethodGet, connection: {Dedicated, Close}.}
## https://github.com/ethereum/builder-specs/blob/v0.4.0/apis/builder/header.yaml
proc submitBlindedBlock*(body: deneb_mev.SignedBlindedBeaconBlock
): RestResponse[SubmitBlindedBlockResponseDeneb] {.
): RestPlainResponse {.
rest, endpoint: "/eth/v1/builder/blinded_blocks",
meth: MethodPost, connection: {Dedicated, Close}.}
## https://github.com/ethereum/builder-specs/blob/v0.4.0/apis/builder/blinded_blocks.yaml

View File

@ -39,7 +39,7 @@ const
type
BlockVerifierFn* =
proc(signedBlock: ForkedSignedBeaconBlock, maybeFinalized: bool):
Future[Result[void, VerifierError]] {.async: (raises: [CancelledError]).}
Future[Result[void, VerifierError]] {.gcsafe, raises: [].}
InhibitFn* = proc: bool {.gcsafe, raises:[].}
RequestManager* = object
@ -49,8 +49,8 @@ type
quarantine: ref Quarantine
blobQuarantine: ref BlobQuarantine
blockVerifier: BlockVerifierFn
blockLoopFuture: Future[void].Raising([CancelledError])
blobLoopFuture: Future[void].Raising([CancelledError])
blockLoopFuture: Future[void]
blobLoopFuture: Future[void]
func shortLog*(x: seq[Eth2Digest]): string =
"[" & x.mapIt(shortLog(it)).join(", ") & "]"
@ -104,7 +104,7 @@ proc checkResponse(idList: seq[BlobIdentifier],
return false
true
proc requestBlocksByRoot(rman: RequestManager, items: seq[Eth2Digest]) {.async: (raises: [CancelledError]).} =
proc requestBlocksByRoot(rman: RequestManager, items: seq[Eth2Digest]) {.async.} =
var peer: Peer
try:
peer = await rman.network.peerPool.acquire()
@ -171,13 +171,19 @@ proc requestBlocksByRoot(rman: RequestManager, items: seq[Eth2Digest]) {.async:
peer = peer, blocks = shortLog(items), err = blocks.error()
peer.updateScore(PeerScoreNoValues)
except CancelledError as exc:
raise exc
except CatchableError as exc:
peer.updateScore(PeerScoreNoValues)
debug "Error while fetching blocks by root", exc = exc.msg,
items = shortLog(items), peer = peer, peer_score = peer.getScore()
raise exc
finally:
if not(isNil(peer)):
rman.network.peerPool.release(peer)
proc fetchBlobsFromNetwork(self: RequestManager,
idList: seq[BlobIdentifier])
{.async: (raises: [CancelledError]).} =
idList: seq[BlobIdentifier]) {.async.} =
var peer: Peer
try:
@ -185,7 +191,7 @@ proc fetchBlobsFromNetwork(self: RequestManager,
debug "Requesting blobs by root", peer = peer, blobs = shortLog(idList),
peer_score = peer.getScore()
let blobs = await blobSidecarsByRoot(peer, BlobIdentifierList idList)
let blobs = (await blobSidecarsByRoot(peer, BlobIdentifierList idList))
if blobs.isOk:
let ublobs = blobs.get()
@ -213,11 +219,18 @@ proc fetchBlobsFromNetwork(self: RequestManager,
peer = peer, blobs = shortLog(idList), err = blobs.error()
peer.updateScore(PeerScoreNoValues)
except CancelledError as exc:
raise exc
except CatchableError as exc:
peer.updateScore(PeerScoreNoValues)
debug "Error while fetching blobs by root", exc = exc.msg,
idList = shortLog(idList), peer = peer, peer_score = peer.getScore()
raise exc
finally:
if not(isNil(peer)):
self.network.peerPool.release(peer)
proc requestManagerBlockLoop(rman: RequestManager) {.async: (raises: [CancelledError]).} =
proc requestManagerBlockLoop(rman: RequestManager) {.async.} =
while true:
# TODO This polling could be replaced with an AsyncEvent that is fired
# from the quarantine when there's work to do
@ -232,19 +245,33 @@ proc requestManagerBlockLoop(rman: RequestManager) {.async: (raises: [CancelledE
continue
debug "Requesting detected missing blocks", blocks = shortLog(blocks)
let start = SyncMoment.now(0)
try:
let start = SyncMoment.now(0)
var workers: array[PARALLEL_REQUESTS, Future[void].Raising([CancelledError])]
var workers: array[PARALLEL_REQUESTS, Future[void]]
for i in 0 ..< PARALLEL_REQUESTS:
workers[i] = rman.requestBlocksByRoot(blocks)
for i in 0 ..< PARALLEL_REQUESTS:
workers[i] = rman.requestBlocksByRoot(blocks)
await allFutures(workers)
await allFutures(workers)
let finish = SyncMoment.now(uint64(len(blocks)))
let finish = SyncMoment.now(uint64(len(blocks)))
var succeed = 0
for worker in workers:
if worker.completed():
inc(succeed)
debug "Request manager block tick", blocks = shortLog(blocks),
succeed = succeed,
failed = (len(workers) - succeed),
sync_speed = speed(start, finish)
except CancelledError:
break
except CatchableError as exc:
warn "Unexpected error in request manager block loop", exc = exc.msg
debug "Request manager block tick", blocks = shortLog(blocks),
sync_speed = speed(start, finish)
proc getMissingBlobs(rman: RequestManager): seq[BlobIdentifier] =
let
@ -281,28 +308,42 @@ proc getMissingBlobs(rman: RequestManager): seq[BlobIdentifier] =
rman.quarantine[].removeBlobless(blobless)
fetches
proc requestManagerBlobLoop(rman: RequestManager) {.async: (raises: [CancelledError]).} =
proc requestManagerBlobLoop(rman: RequestManager) {.async.} =
while true:
# TODO This polling could be replaced with an AsyncEvent that is fired
# from the quarantine when there's work to do
# TODO This polling could be replaced with an AsyncEvent that is fired
# from the quarantine when there's work to do
await sleepAsync(POLL_INTERVAL)
if rman.inhibit():
continue
let fetches = rman.getMissingBlobs()
if fetches.len > 0:
debug "Requesting detected missing blobs", blobs = shortLog(fetches)
let start = SyncMoment.now(0)
var workers: array[PARALLEL_REQUESTS, Future[void].Raising([CancelledError])]
for i in 0 ..< PARALLEL_REQUESTS:
workers[i] = rman.fetchBlobsFromNetwork(fetches)
let fetches = rman.getMissingBlobs()
if fetches.len > 0:
debug "Requesting detected missing blobs", blobs = shortLog(fetches)
try:
let start = SyncMoment.now(0)
var workers: array[PARALLEL_REQUESTS, Future[void]]
for i in 0 ..< PARALLEL_REQUESTS:
workers[i] = rman.fetchBlobsFromNetwork(fetches)
await allFutures(workers)
let finish = SyncMoment.now(uint64(len(fetches)))
await allFutures(workers)
let finish = SyncMoment.now(uint64(len(fetches)))
debug "Request manager blob tick",
blobs_count = len(fetches),
sync_speed = speed(start, finish)
var succeed = 0
for worker in workers:
if worker.finished() and not(worker.failed()):
inc(succeed)
debug "Request manager blob tick",
blobs_count = len(fetches),
succeed = succeed,
failed = (len(workers) - succeed),
sync_speed = speed(start, finish)
except CancelledError:
break
except CatchableError as exc:
warn "Unexpected error in request manager blob loop", exc = exc.msg
proc start*(rman: var RequestManager) =
## Start Request Manager's loops.

View File

@ -43,7 +43,7 @@ type
NoMonitor
SyncWorker*[A, B] = object
future: Future[void].Raising([CancelledError])
future: Future[void]
status: SyncWorkerStatus
SyncManager*[A, B] = ref object
@ -158,9 +158,8 @@ proc newSyncManager*[A, B](pool: PeerPool[A, B],
res.initQueue()
res
proc getBlocks[A, B](man: SyncManager[A, B], peer: A,
req: SyncRequest): Future[BeaconBlocksRes] {.
async: (raises: [CancelledError], raw: true).} =
proc getBlocks*[A, B](man: SyncManager[A, B], peer: A,
req: SyncRequest): Future[BeaconBlocksRes] {.async.} =
mixin getScore, `==`
logScope:
@ -172,8 +171,21 @@ proc getBlocks[A, B](man: SyncManager[A, B], peer: A,
doAssert(not(req.isEmpty()), "Request must not be empty!")
debug "Requesting blocks from peer", request = req
try:
let res = await beaconBlocksByRange_v2(peer, req.slot, req.count, 1'u64)
beaconBlocksByRange_v2(peer, req.slot, req.count, 1'u64)
if res.isErr():
debug "Error, while reading getBlocks response", request = req,
error = $res.error()
return
return res
except CancelledError:
debug "Interrupt, while waiting getBlocks response", request = req
return
except CatchableError as exc:
debug "Error, while waiting getBlocks response", request = req,
errName = exc.name, errMsg = exc.msg
return
proc shouldGetBlobs[A, B](man: SyncManager[A, B], e: Epoch): bool =
let wallEpoch = man.getLocalWallSlot().epoch
@ -182,8 +194,8 @@ proc shouldGetBlobs[A, B](man: SyncManager[A, B], e: Epoch): bool =
e >= wallEpoch - man.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS)
proc getBlobSidecars[A, B](man: SyncManager[A, B], peer: A,
req: SyncRequest): Future[BlobSidecarsRes]
{.async: (raises: [CancelledError], raw: true).} =
req: SyncRequest
): Future[BlobSidecarsRes] {.async.} =
mixin getScore, `==`
logScope:
@ -195,7 +207,21 @@ proc getBlobSidecars[A, B](man: SyncManager[A, B], peer: A,
doAssert(not(req.isEmpty()), "Request must not be empty!")
debug "Requesting blobs sidecars from peer", request = req
blobSidecarsByRange(peer, req.slot, req.count)
try:
let res = await blobSidecarsByRange(peer, req.slot, req.count)
if res.isErr():
debug "Error, while reading blobSidecarsByRange response", request = req,
error = $res.error()
return
return res
except CancelledError:
debug "Interrupt, while waiting blobSidecarsByRange response", request = req
return
except CatchableError as exc:
debug "Error, while waiting blobSidecarsByRange response", request = req,
errName = exc.name, errMsg = exc.msg
return
proc remainingSlots(man: SyncManager): uint64 =
let
@ -256,8 +282,7 @@ func checkBlobs(blobs: seq[BlobSidecars]): Result[void, string] =
? blob_sidecar[].verify_blob_sidecar_inclusion_proof()
ok()
proc syncStep[A, B](man: SyncManager[A, B], index: int, peer: A)
{.async: (raises: [CancelledError]).} =
proc syncStep[A, B](man: SyncManager[A, B], index: int, peer: A) {.async.} =
logScope:
peer_score = peer.getScore()
peer_speed = peer.netKbps()
@ -297,11 +322,17 @@ proc syncStep[A, B](man: SyncManager[A, B], index: int, peer: A)
trace "Updating peer's status information", wall_clock_slot = wallSlot,
remote_head_slot = peerSlot, local_head_slot = headSlot
if not await peer.updateStatus():
peer.updateScore(PeerScoreNoStatus)
debug "Failed to get remote peer's status, exiting",
peer_head_slot = peerSlot
try:
let res = await peer.updateStatus()
if not(res):
peer.updateScore(PeerScoreNoStatus)
debug "Failed to get remote peer's status, exiting",
peer_head_slot = peerSlot
return
except CatchableError as exc:
debug "Unexpected exception while updating peer's status",
peer_head_slot = peerSlot, errName = exc.name, errMsg = exc.msg
return
let newPeerSlot = peer.getHeadSlot()
@ -388,97 +419,110 @@ proc syncStep[A, B](man: SyncManager[A, B], index: int, peer: A)
man.workers[index].status = SyncWorkerStatus.Downloading
let blocks = (await man.getBlocks(peer, req)).valueOr:
peer.updateScore(PeerScoreNoValues)
man.queue.push(req)
debug "Failed to receive blocks on request", request = req
return
try:
let blocks = await man.getBlocks(peer, req)
if blocks.isErr():
peer.updateScore(PeerScoreNoValues)
man.queue.push(req)
debug "Failed to receive blocks on request", request = req
return
let blockData = blocks.get().asSeq()
let blockSmap = getShortMap(req, blockData)
debug "Received blocks on request", blocks_count = len(blockData),
blocks_map = blockSmap, request = req
let blockSmap = getShortMap(req, blocks.asSeq())
debug "Received blocks on request", blocks_count = len(blocks),
blocks_map = blockSmap, request = req
let slots = mapIt(blockData, it[].slot)
if not(checkResponse(req, slots)):
peer.updateScore(PeerScoreBadResponse)
man.queue.push(req)
warn "Received blocks sequence is not in requested range",
blocks_count = len(blockData), blocks_map = blockSmap,
request = req
return
let slots = mapIt(blocks, it[].slot)
if not(checkResponse(req, slots)):
peer.updateScore(PeerScoreBadResponse)
man.queue.push(req)
warn "Received blocks sequence is not in requested range",
blocks_count = len(blocks), blocks_map = blockSmap,
request = req
return
func combine(acc: seq[Slot], cur: Slot): seq[Slot] =
var copy = acc
if copy[copy.len-1] != cur:
copy.add(cur)
copy
func combine(acc: seq[Slot], cur: Slot): seq[Slot] =
var copy = acc
if copy[copy.len-1] != cur:
copy.add(cur)
copy
let blobData =
if man.shouldGetBlobs(req.slot.epoch):
let blobs = await man.getBlobSidecars(peer, req)
if blobs.isErr():
peer.updateScore(PeerScoreNoValues)
man.queue.push(req)
debug "Failed to receive blobs on request", request = req
return
let blobData = blobs.get().asSeq()
let blobSmap = getShortMap(req, blobData)
debug "Received blobs on request", blobs_count = len(blobData),
blobs_map = blobSmap, request = req
let blobData =
if man.shouldGetBlobs(req.slot.epoch):
let blobs = (await man.getBlobSidecars(peer, req)).valueOr:
peer.updateScore(PeerScoreNoValues)
man.queue.push(req)
debug "Failed to receive blobs on request", request = req
return
let blobSmap = getShortMap(req, blobs.asSeq())
debug "Received blobs on request", blobs_count = len(blobs),
blobs_map = blobSmap, request = req
if len(blobs) > 0:
let slots = mapIt(blobs, it[].signed_block_header.message.slot)
let uniqueSlots = foldl(slots, combine(a, b), @[slots[0]])
if not(checkResponse(req, uniqueSlots)):
if len(blobData) > 0:
let slots = mapIt(blobData, it[].signed_block_header.message.slot)
let uniqueSlots = foldl(slots, combine(a, b), @[slots[0]])
if not(checkResponse(req, uniqueSlots)):
peer.updateScore(PeerScoreBadResponse)
man.queue.push(req)
warn "Received blobs sequence is not in requested range",
blobs_count = len(blobData), blobs_map = getShortMap(req, blobData),
request = req
return
let groupedBlobs = groupBlobs(req, blockData, blobData)
if groupedBlobs.isErr():
peer.updateScore(PeerScoreNoValues)
man.queue.push(req)
info "Received blobs sequence is inconsistent",
blobs_map = getShortMap(req, blobData), request = req, msg=groupedBlobs.error()
return
if (let checkRes = groupedBlobs.get.checkBlobs(); checkRes.isErr):
peer.updateScore(PeerScoreBadResponse)
man.queue.push(req)
warn "Received blobs sequence is not in requested range",
blobs_count = len(blobs), blobs_map = blobSmap,
request = req
warn "Received blobs sequence is invalid",
blobs_count = len(blobData),
blobs_map = getShortMap(req, blobData),
request = req,
msg = checkRes.error
return
let groupedBlobs = groupBlobs(req, blocks.asSeq(), blobs.asSeq())
if groupedBlobs.isErr():
peer.updateScore(PeerScoreNoValues)
man.queue.push(req)
info "Received blobs sequence is inconsistent",
blobs_map = blobSmap, request = req, msg=groupedBlobs.error()
return
if (let checkRes = groupedBlobs.get.checkBlobs(); checkRes.isErr):
peer.updateScore(PeerScoreBadResponse)
man.queue.push(req)
warn "Received blobs sequence is invalid",
blobs_map = blobSmap, request = req, msg=groupedBlobs.error()
return
Opt.some(groupedBlobs.get())
else:
Opt.none(seq[BlobSidecars])
Opt.some(groupedBlobs.get())
else:
Opt.none(seq[BlobSidecars])
if len(blocks) == 0 and man.direction == SyncQueueKind.Backward and
req.contains(man.getSafeSlot()):
# The sync protocol does not distinguish between:
# - All requested slots are empty
# - Peer does not have data available about requested range
#
# However, we include the `backfill` slot in backward sync requests.
# If we receive an empty response to a request covering that slot,
# we know that the response is incomplete and can descore.
peer.updateScore(PeerScoreNoValues)
if len(blockData) == 0 and man.direction == SyncQueueKind.Backward and
req.contains(man.getSafeSlot()):
# The sync protocol does not distinguish between:
# - All requested slots are empty
# - Peer does not have data available about requested range
#
# However, we include the `backfill` slot in backward sync requests.
# If we receive an empty response to a request covering that slot,
# we know that the response is incomplete and can descore.
peer.updateScore(PeerScoreNoValues)
man.queue.push(req)
debug "Response does not include known-to-exist block", request = req
return
# Scoring will happen in `syncUpdate`.
man.workers[index].status = SyncWorkerStatus.Queueing
let
peerFinalized = peer.getFinalizedEpoch().start_slot()
lastSlot = req.slot + req.count
# The peer claims the block is finalized - our own block processing will
# verify this point down the line
# TODO descore peers that lie
maybeFinalized = lastSlot < peerFinalized
await man.queue.push(req, blockData, blobData, maybeFinalized, proc() =
man.workers[index].status = SyncWorkerStatus.Processing)
except CatchableError as exc:
man.queue.push(req)
debug "Response does not include known-to-exist block", request = req
debug "Unexpected exception while receiving blocks", request = req,
errName = exc.name, errMsg = exc.msg
return
# Scoring will happen in `syncUpdate`.
man.workers[index].status = SyncWorkerStatus.Queueing
let
peerFinalized = peer.getFinalizedEpoch().start_slot()
lastSlot = req.slot + req.count
# The peer claims the block is finalized - our own block processing will
# verify this point down the line
# TODO descore peers that lie
maybeFinalized = lastSlot < peerFinalized
await man.queue.push(req, blocks.asSeq(), blobData, maybeFinalized, proc() =
man.workers[index].status = SyncWorkerStatus.Processing)
proc syncWorker[A, B](man: SyncManager[A, B], index: int) {.async: (raises: [CancelledError]).} =
proc syncWorker[A, B](man: SyncManager[A, B], index: int) {.async.} =
mixin getKey, getScore, getHeadSlot
logScope:
@ -489,21 +533,30 @@ proc syncWorker[A, B](man: SyncManager[A, B], index: int) {.async: (raises: [Can
debug "Starting syncing worker"
var peer: A = nil
try:
while true:
man.workers[index].status = SyncWorkerStatus.Sleeping
# This event is going to be set until we are not in sync with network
await man.notInSyncEvent.wait()
man.workers[index].status = SyncWorkerStatus.WaitingPeer
peer = await man.pool.acquire()
await man.syncStep(index, peer)
man.pool.release(peer)
peer = nil
finally:
if not(isNil(peer)):
man.pool.release(peer)
while true:
var peer: A = nil
let doBreak =
try:
man.workers[index].status = SyncWorkerStatus.Sleeping
# This event is going to be set until we are not in sync with network
await man.notInSyncEvent.wait()
man.workers[index].status = SyncWorkerStatus.WaitingPeer
peer = await man.pool.acquire()
await man.syncStep(index, peer)
man.pool.release(peer)
false
except CancelledError:
if not(isNil(peer)):
man.pool.release(peer)
true
except CatchableError as exc:
debug "Unexpected exception in sync worker",
peer = peer, peer_score = peer.getScore(),
peer_speed = peer.netKbps(),
errName = exc.name, errMsg = exc.msg
true
if doBreak:
break
debug "Sync worker stopped"
@ -540,10 +593,34 @@ proc getWorkersStats[A, B](man: SyncManager[A, B]): tuple[map: string,
map[i] = ch
(map, sleeping, waiting, pending)
proc startWorkers[A, B](man: SyncManager[A, B]) =
proc guardTask[A, B](man: SyncManager[A, B]) {.async.} =
logScope:
index = index
sync_ident = man.ident
direction = man.direction
topics = "syncman"
var pending: array[SyncWorkersCount, Future[void]]
# Starting all the synchronization workers.
for i in 0 ..< len(man.workers):
man.workers[i].future = syncWorker[A, B](man, i)
let future = syncWorker[A, B](man, i)
man.workers[i].future = future
pending[i] = future
# Wait for synchronization worker's failure and replace it with new one.
while true:
let failFuture = await one(pending)
let index = pending.find(failFuture)
if failFuture.failed():
warn "Synchronization worker stopped working unexpectedly with an error",
errName = failFuture.error.name, errMsg = failFuture.error.msg
else:
warn "Synchronization worker stopped working unexpectedly without error"
let future = syncWorker[A, B](man, index)
man.workers[index].future = future
pending[index] = future
proc toTimeLeftString*(d: Duration): string =
if d == InfiniteDuration:
@ -571,9 +648,11 @@ proc toTimeLeftString*(d: Duration): string =
res = res & "00m"
res
proc syncClose[A, B](man: SyncManager[A, B],
proc syncClose[A, B](man: SyncManager[A, B], guardTaskFut: Future[void],
speedTaskFut: Future[void]) {.async.} =
var pending: seq[FutureBase]
if not(guardTaskFut.finished()):
pending.add(guardTaskFut.cancelAndWait())
if not(speedTaskFut.finished()):
pending.add(speedTaskFut.cancelAndWait())
for worker in man.workers:
@ -590,11 +669,11 @@ proc syncLoop[A, B](man: SyncManager[A, B]) {.async.} =
mixin getKey, getScore
var pauseTime = 0
man.startWorkers()
var guardTaskFut = man.guardTask()
debug "Synchronization loop started"
proc averageSpeedTask() {.async: (raises: [CancelledError]).} =
proc averageSpeedTask() {.async.} =
while true:
# Reset sync speeds between each loss-of-sync event
man.avgSyncSpeed = 0
@ -624,7 +703,7 @@ proc syncLoop[A, B](man: SyncManager[A, B]) {.async.} =
stamp = newStamp
let averageSpeedTaskFut = averageSpeedTask()
var averageSpeedTaskFut = averageSpeedTask()
while true:
let wallSlot = man.getLocalWallSlot()
@ -709,7 +788,7 @@ proc syncLoop[A, B](man: SyncManager[A, B]) {.async.} =
of SyncQueueKind.Forward:
if man.inProgress:
if SyncManagerFlag.NoMonitor in man.flags:
await man.syncClose(averageSpeedTaskFut)
await man.syncClose(guardTaskFut, averageSpeedTaskFut)
man.inProgress = false
debug "Forward synchronization process finished, exiting",
wall_head_slot = wallSlot, local_head_slot = headSlot,
@ -730,8 +809,10 @@ proc syncLoop[A, B](man: SyncManager[A, B]) {.async.} =
of SyncQueueKind.Backward:
# Backward syncing is going to be executed only once, so we exit loop
# and stop all pending tasks which belongs to this instance (sync
# workers, speed calculation task).
await man.syncClose(averageSpeedTaskFut)
# workers, guard task and speed calculation task).
# We first need to cancel and wait for guard task, because otherwise
# it will be able to restore cancelled workers.
await man.syncClose(guardTaskFut, averageSpeedTaskFut)
man.inProgress = false
debug "Backward synchronization process finished, exiting",
wall_head_slot = wallSlot, local_head_slot = headSlot,

View File

@ -27,7 +27,7 @@ type
ProcessingCallback* = proc() {.gcsafe, raises: [].}
BlockVerifier* = proc(signedBlock: ForkedSignedBeaconBlock,
blobs: Opt[BlobSidecars], maybeFinalized: bool):
Future[Result[void, VerifierError]] {.async: (raises: [CancelledError]).}
Future[Result[void, VerifierError]] {.gcsafe, raises: [].}
SyncQueueKind* {.pure.} = enum
Forward, Backward
@ -50,7 +50,7 @@ type
item*: T
SyncWaiter* = ref object
future: Future[void].Raising([CancelledError])
future: Future[void]
reset: bool
RewindPoint = object
@ -311,9 +311,9 @@ proc wakeupWaiters[T](sq: SyncQueue[T], reset = false) =
if not(item.future.finished()):
item.future.complete()
proc waitForChanges[T](sq: SyncQueue[T]): Future[bool] {.async: (raises: [CancelledError]).} =
proc waitForChanges[T](sq: SyncQueue[T]): Future[bool] {.async.} =
## Create new waiter and wait for completion from `wakeupWaiters()`.
let waitfut = Future[void].Raising([CancelledError]).init("SyncQueue.waitForChanges")
var waitfut = newFuture[void]("SyncQueue.waitForChanges")
let waititem = SyncWaiter(future: waitfut)
sq.waiters.add(waititem)
try:
@ -322,7 +322,7 @@ proc waitForChanges[T](sq: SyncQueue[T]): Future[bool] {.async: (raises: [Cancel
finally:
sq.waiters.delete(sq.waiters.find(waititem))
proc wakeupAndWaitWaiters[T](sq: SyncQueue[T]) {.async: (raises: [CancelledError]).} =
proc wakeupAndWaitWaiters[T](sq: SyncQueue[T]) {.async.} =
## This procedure will perform wakeupWaiters(true) and blocks until last
## waiter will be awakened.
var waitChanges = sq.waitForChanges()
@ -333,7 +333,7 @@ proc clearAndWakeup*[T](sq: SyncQueue[T]) =
sq.pending.clear()
sq.wakeupWaiters(true)
proc resetWait*[T](sq: SyncQueue[T], toSlot: Option[Slot]) {.async: (raises: [CancelledError]).} =
proc resetWait*[T](sq: SyncQueue[T], toSlot: Option[Slot]) {.async.} =
## Perform reset of all the blocked waiters in SyncQueue.
##
## We adding one more waiter to the waiters sequence and
@ -610,7 +610,7 @@ proc push*[T](sq: SyncQueue[T], sr: SyncRequest[T],
data: seq[ref ForkedSignedBeaconBlock],
blobs: Opt[seq[BlobSidecars]],
maybeFinalized: bool = false,
processingCb: ProcessingCallback = nil) {.async: (raises: [CancelledError]).} =
processingCb: ProcessingCallback = nil) {.async.} =
logScope:
sync_ident = sq.ident
topics = "syncman"

View File

@ -374,6 +374,14 @@ proc checkOffsetStatus(node: BeaconNodeServerRef, offset: TimeOffset) =
"Beacon node has acceptable time offset")
node.updateStatus(RestBeaconNodeStatus.Offline, failure)
proc disableNimbusExtensions(node: BeaconNodeServerRef) =
node.features.incl(RestBeaconNodeFeature.NoNimbusExtensions)
if node.status == RestBeaconNodeStatus.BrokenClock:
let failure = ApiNodeFailure.init(ApiFailure.NoError,
"disableNimbusExtensions()", node, 200,
"Nimbus extensions no longer available")
node.updateStatus(RestBeaconNodeStatus.Offline, failure)
proc runTimeMonitor(service: FallbackServiceRef,
node: BeaconNodeServerRef) {.async.} =
const NimbusExtensionsLog = "Beacon node does not support Nimbus extensions"
@ -398,10 +406,8 @@ proc runTimeMonitor(service: FallbackServiceRef,
let tres =
try:
let
delay = vc.processingDelay.valueOr: ZeroDuration
res = await node.client.getTimeOffset(delay)
Opt.some(res)
let delay = vc.processingDelay.valueOr: ZeroDuration
await node.client.getTimeOffset(delay)
except RestResponseError as exc:
case exc.status
of 400:
@ -412,12 +418,12 @@ proc runTimeMonitor(service: FallbackServiceRef,
notice NimbusExtensionsLog, status = $exc.status,
reason = $exc.msg, error_message = $exc.message
# Exiting loop
node.features.incl(RestBeaconNodeFeature.NoNimbusExtensions)
node.disableNimbusExtensions()
return
except RestError as exc:
debug "Unable to obtain beacon node's time offset", reason = $exc.msg
notice NimbusExtensionsLog
node.features.incl(RestBeaconNodeFeature.NoNimbusExtensions)
node.disableNimbusExtensions()
return
except CancelledError as exc:
raise exc
@ -425,13 +431,10 @@ proc runTimeMonitor(service: FallbackServiceRef,
warn "An unexpected error occurred while asking for time offset",
reason = $exc.msg, error = $exc.name
notice NimbusExtensionsLog
node.features.incl(RestBeaconNodeFeature.NoNimbusExtensions)
node.disableNimbusExtensions()
return
if tres.isSome():
checkOffsetStatus(node, TimeOffset.init(tres.get()))
else:
debug "Beacon node's time offset was not updated"
checkOffsetStatus(node, TimeOffset.init(tres))
await service.waitForNextSlot()

View File

@ -568,34 +568,55 @@ proc getBlindedExecutionPayload[
# Not ideal to use `when` where instead of splitting into separate functions,
# but Nim doesn't overload on generic EPH type parameter.
when EPH is capella.ExecutionPayloadHeader:
let blindedHeader = awaitWithTimeout(
payloadBuilderClient.getHeaderCapella(slot, executionBlockRoot, pubkey),
BUILDER_PROPOSAL_DELAY_TOLERANCE):
return err "Timeout obtaining Capella blinded header from builder"
let
response = awaitWithTimeout(
payloadBuilderClient.getHeaderCapella(
slot, executionBlockRoot, pubkey),
BUILDER_PROPOSAL_DELAY_TOLERANCE):
return err "Timeout obtaining Capella blinded header from builder"
res = decodeBytes(
GetHeaderResponseCapella, response.data, response.contentType)
blindedHeader = res.valueOr:
return err(
"Unable to decode Capella blinded header: " & $res.error &
" with HTTP status " & $response.status & ", Content-Type " &
$response.contentType & " and content " & $response.data)
elif EPH is deneb_mev.BlindedExecutionPayloadAndBlobsBundle:
let blindedHeader = awaitWithTimeout(
payloadBuilderClient.getHeaderDeneb(slot, executionBlockRoot, pubkey),
BUILDER_PROPOSAL_DELAY_TOLERANCE):
return err "Timeout obtaining Deneb blinded header and blob bundle from builder"
let
response = awaitWithTimeout(
payloadBuilderClient.getHeaderDeneb(
slot, executionBlockRoot, pubkey),
BUILDER_PROPOSAL_DELAY_TOLERANCE):
return err "Timeout obtaining Deneb blinded header from builder"
res = decodeBytes(
GetHeaderResponseDeneb, response.data, response.contentType)
blindedHeader = res.valueOr:
return err(
"Unable to decode Deneb blinded header: " & $res.error &
" with HTTP status " & $response.status & ", Content-Type " &
$response.contentType & " and content " & $response.data)
else:
static: doAssert false
const httpOk = 200
if blindedHeader.status != httpOk:
if response.status != httpOk:
return err "getBlindedExecutionPayload: non-200 HTTP response"
else:
if not verify_builder_signature(
node.dag.cfg.genesisFork, blindedHeader.data.data.message,
blindedHeader.data.data.message.pubkey,
blindedHeader.data.data.signature):
node.dag.cfg.genesisFork, blindedHeader.data.message,
blindedHeader.data.message.pubkey, blindedHeader.data.signature):
return err "getBlindedExecutionPayload: signature verification failed"
when EPH is capella.ExecutionPayloadHeader:
return ok((
blindedBlckPart: blindedHeader.data.data.message.header,
blockValue: blindedHeader.data.data.message.value))
blindedBlckPart: blindedHeader.data.message.header,
blockValue: blindedHeader.data.message.value))
elif EPH is deneb_mev.BlindedExecutionPayloadAndBlobsBundle:
template builderBid: untyped = blindedHeader.data.data.message
template builderBid: untyped = blindedHeader.data.message
return ok((
blindedBlckPart: EPH(
execution_payload_header: builderBid.header,
@ -1717,7 +1738,7 @@ proc registerValidatorsPerBuilder(
# Some relay networks disallow large request bodies, so split requests
template addValidatorRegistration(
validatorRegistration: SignedValidatorRegistrationV1) =
const registrationValidatorChunkSize = 1000
const registrationValidatorChunkSize = 500
if validatorRegistrations[^1].len < registrationValidatorChunkSize:
validatorRegistrations[^1].add validatorRegistration

View File

@ -55,7 +55,7 @@ proc unblindAndRouteBlockMEV*(
# By time submitBlindedBlock is called, must already have done slashing
# protection check
let bundle =
let response =
try:
awaitWithTimeout(
payloadBuilderRestClient.submitBlindedBlock(blindedBlock),
@ -63,13 +63,11 @@ proc unblindAndRouteBlockMEV*(
return err("Submitting blinded block timed out")
# From here on, including error paths, disallow local EL production by
# returning Opt.some, regardless of whether on head or newBlock.
except RestDecodingError as exc:
return err("REST decoding error submitting blinded block: " & exc.msg)
except CatchableError as exc:
return err("exception in submitBlindedBlock: " & exc.msg)
const httpOk = 200
if bundle.status != httpOk:
if response.status != httpOk:
# https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/validator.md#proposer-slashing
# This means if a validator publishes a signature for a
# `BlindedBeaconBlock` (via a dissemination of a
@ -77,12 +75,31 @@ proc unblindAndRouteBlockMEV*(
# local build process as a fallback, even in the event of some failure
# with the external builder network.
return err("submitBlindedBlock failed with HTTP error code " &
$bundle.status & ": " & $shortLog(blindedBlock))
$response.status & ": " & $shortLog(blindedBlock))
when consensusFork >= ConsensusFork.Deneb:
template execution_payload: untyped = bundle.data.data.execution_payload
let
res = decodeBytes(
SubmitBlindedBlockResponseDeneb, response.data, response.contentType)
bundle = res.valueOr:
return err("Could not decode Deneb blinded block: " & $res.error &
" with HTTP status " & $response.status & ", Content-Type " &
$response.contentType & " and content " & $response.data)
template execution_payload: untyped = bundle.data.execution_payload
else:
template execution_payload: untyped = bundle.data.data
let
res = decodeBytes(
SubmitBlindedBlockResponseCapella, response.data, response.contentType)
bundle = res.valueOr:
return err("Could not decode Capella blinded block: " & $res.error &
" with HTTP status " & $response.status & ", Content-Type " &
$response.contentType & " and content " & $response.data)
template execution_payload: untyped = bundle.data
if hash_tree_root(blindedBlock.message.body.execution_payload_header) !=
hash_tree_root(execution_payload):
return err("unblinded payload doesn't match blinded payload header: " &
@ -105,9 +122,9 @@ proc unblindAndRouteBlockMEV*(
let blobsOpt =
when consensusFork >= ConsensusFork.Deneb:
template blobs_bundle: untyped = bundle.data.data.blobs_bundle
template blobs_bundle: untyped = bundle.data.blobs_bundle
if blindedBlock.message.body.blob_kzg_commitments !=
bundle.data.data.blobs_bundle.commitments:
bundle.data.blobs_bundle.commitments:
return err("unblinded blobs bundle has unexpected commitments")
let ok = verifyProofs(
asSeq blobs_bundle.blobs,

View File

@ -17,8 +17,8 @@ const
"Copyright (c) 2019-" & compileYear & " Status Research & Development GmbH"
versionMajor* = 24
versionMinor* = 1
versionBuild* = 2
versionMinor* = 2
versionBuild* = 0
versionBlob* = "stateofus" # Single word - ends up in the default graffiti

View File

@ -137,7 +137,7 @@ Each entry in the slot index is a fixed-length 8-byte two's complement signed in
Only one entry per slot is supported, meaning that only one canonical history can be indexed this way.
A `SlotIndex` record may appear in a stand-alone file which by convention ends with `.e2i` - in this case, the offset is counted as if the index was appened to its corresponding data file - offsets are thus negative and counted from the end of the data file. In particular, if the index is simply appended to the data file, it does not change in contents.
A `SlotIndex` record may appear in a stand-alone file which by convention ends with `.e2i` - in this case, the offset is counted as if the index was appended to its corresponding data file - offsets are thus negative and counted from the end of the data file. In particular, if the index is simply appended to the data file, it does not change in contents.
### Reading
@ -219,7 +219,7 @@ The structure of the era file gives it the following properties:
* the indices at the end are fixed-length: they can be used to discover the beginning of an era if the end of it is known
* the start slot field of the state slot index idenfifies which era the group pertains to
* the state in the era file is the end state after having applied all the blocks in the era and, if applicable, the block at the first slot - the `block_roots` entries in the state can be used to discover the digest of the blocks - either to verify the intergrity of the era file or to quickly load block roots without computing them.
* each group in the era file is full, indendent era file - groups can freely be split and combined
* each group in the era file is full, independent era file - groups can freely be split and combined
## Reading era files

View File

@ -1,5 +1,8 @@
# Introduction
!!! warning
This auditors' handbook is frozen and obsolete; the [Nim language manual](https://nim-lang.org/docs/manual.html) alongside [other Nim documentation](https://nim-lang.org/documentation.html), [Status Nim style guide](https://status-im.github.io/nim-style-guide/), [Chronos guides](https://github.com/status-im/nim-chronos/blob/master/docs/src/SUMMARY.md), and [Nim by Example](https://nim-by-example.github.io/getting_started/) supercede it.
The Nimbus Nim-Beacon-Chain (NBC) project is an implementation of the [Ethereum 2 Beacon Chain specification](https://github.com/ethereum/consensus-specs) in the [Nim programming language](https://nim-lang.org/).
The Auditors' Handbook aims to be provide a comprehensive introduction to:

View File

@ -1,5 +1,8 @@
# Operators and bit manipulation
!!! warning
This auditors' handbook is frozen and obsolete; the [Nim language manual](https://nim-lang.org/docs/manual.html) alongside [other Nim documentation](https://nim-lang.org/documentation.html), [Status Nim style guide](https://status-im.github.io/nim-style-guide/), [Chronos guides](https://github.com/status-im/nim-chronos/blob/master/docs/src/SUMMARY.md), and [Nim by Example](https://nim-by-example.github.io/getting_started/) supercede it.
## Operators
A set of symbol and keywords can be used as infix operators

View File

@ -1,3 +1,6 @@
# Pointer manipulation
!!! warning
This auditors' handbook is frozen and obsolete; the [Nim language manual](https://nim-lang.org/docs/manual.html) alongside [other Nim documentation](https://nim-lang.org/documentation.html), [Status Nim style guide](https://status-im.github.io/nim-style-guide/), [Chronos guides](https://github.com/status-im/nim-chronos/blob/master/docs/src/SUMMARY.md), and [Nim by Example](https://nim-by-example.github.io/getting_started/) supercede it.
https://github.com/status-im/nim-stew/blob/master/stew/ptrops.nim

View File

@ -1,5 +1,8 @@
# Closures and closures iterators
!!! warning
This auditors' handbook is frozen and obsolete; the [Nim language manual](https://nim-lang.org/docs/manual.html) alongside [other Nim documentation](https://nim-lang.org/documentation.html), [Status Nim style guide](https://status-im.github.io/nim-style-guide/), [Chronos guides](https://github.com/status-im/nim-chronos/blob/master/docs/src/SUMMARY.md), and [Nim by Example](https://nim-by-example.github.io/getting_started/) supercede it.
TODO
## At a low-level

View File

@ -1,5 +1,8 @@
# Nim Routines
!!! warning
This auditors' handbook is frozen and obsolete; the [Nim language manual](https://nim-lang.org/docs/manual.html) alongside [other Nim documentation](https://nim-lang.org/documentation.html), [Status Nim style guide](https://status-im.github.io/nim-style-guide/), [Chronos guides](https://github.com/status-im/nim-chronos/blob/master/docs/src/SUMMARY.md), and [Nim by Example](https://nim-by-example.github.io/getting_started/) supercede it.
Nim offers several kinds of "routines" that:
- do computation
- produce side-effect

View File

@ -1,5 +1,8 @@
# Casting and low-level memory representation
!!! warning
This auditors' handbook is frozen and obsolete; the [Nim language manual](https://nim-lang.org/docs/manual.html) alongside [other Nim documentation](https://nim-lang.org/documentation.html), [Status Nim style guide](https://status-im.github.io/nim-style-guide/), [Chronos guides](https://github.com/status-im/nim-chronos/blob/master/docs/src/SUMMARY.md), and [Nim by Example](https://nim-by-example.github.io/getting_started/) supercede it.
## Conversions
Casting to a signed integer will lead to a range check.

View File

@ -1,5 +1,8 @@
# Nim memory management
!!! warning
This auditors' handbook is frozen and obsolete; the [Nim language manual](https://nim-lang.org/docs/manual.html) alongside [other Nim documentation](https://nim-lang.org/documentation.html), [Status Nim style guide](https://status-im.github.io/nim-style-guide/), [Chronos guides](https://github.com/status-im/nim-chronos/blob/master/docs/src/SUMMARY.md), and [Nim by Example](https://nim-by-example.github.io/getting_started/) supercede it.
Nim memory management is on a per-type basis.
Plain objects and char and numerical types are allocated on the stack.

View File

@ -1,5 +1,8 @@
# Generics and Static types
!!! warning
This auditors' handbook is frozen and obsolete; the [Nim language manual](https://nim-lang.org/docs/manual.html) alongside [other Nim documentation](https://nim-lang.org/documentation.html), [Status Nim style guide](https://status-im.github.io/nim-style-guide/), [Chronos guides](https://github.com/status-im/nim-chronos/blob/master/docs/src/SUMMARY.md), and [Nim by Example](https://nim-by-example.github.io/getting_started/) supercede it.
Nim types can be parametrized by types (generics) or compile-time values (static)
For example

View File

@ -1,5 +1,8 @@
# Arrays, openarrays, varargs
!!! warning
This auditors' handbook is frozen and obsolete; the [Nim language manual](https://nim-lang.org/docs/manual.html) alongside [other Nim documentation](https://nim-lang.org/documentation.html), [Status Nim style guide](https://status-im.github.io/nim-style-guide/), [Chronos guides](https://github.com/status-im/nim-chronos/blob/master/docs/src/SUMMARY.md), and [Nim by Example](https://nim-by-example.github.io/getting_started/) supercede it.
## Arrays
TODO

View File

@ -1,5 +1,8 @@
# Nim data types
!!! warning
This auditors' handbook is frozen and obsolete; the [Nim language manual](https://nim-lang.org/docs/manual.html) alongside [other Nim documentation](https://nim-lang.org/documentation.html), [Status Nim style guide](https://status-im.github.io/nim-style-guide/), [Chronos guides](https://github.com/status-im/nim-chronos/blob/master/docs/src/SUMMARY.md), and [Nim by Example](https://nim-by-example.github.io/getting_started/) supercede it.
## Builtin types
### Numerical types

View File

@ -1,5 +1,8 @@
# Correctness, distinct, mutability, effects, exceptions
!!! warning
This auditors' handbook is frozen and obsolete; the [Nim language manual](https://nim-lang.org/docs/manual.html) alongside [other Nim documentation](https://nim-lang.org/documentation.html), [Status Nim style guide](https://status-im.github.io/nim-style-guide/), [Chronos guides](https://github.com/status-im/nim-chronos/blob/master/docs/src/SUMMARY.md), and [Nim by Example](https://nim-by-example.github.io/getting_started/) supercede it.
The Nim compiler provides several constraints that can be used to enforce
proper usage of variables, types and error handling at compile-time.

View File

@ -1,5 +1,8 @@
# Debugging Nim
!!! warning
This auditors' handbook is frozen and obsolete; the [Nim language manual](https://nim-lang.org/docs/manual.html) alongside [other Nim documentation](https://nim-lang.org/documentation.html), [Status Nim style guide](https://status-im.github.io/nim-style-guide/), [Chronos guides](https://github.com/status-im/nim-chronos/blob/master/docs/src/SUMMARY.md), and [Nim by Example](https://nim-by-example.github.io/getting_started/) supercede it.
Reference article: [https://nim-lang.org/blog/2017/10/02/documenting-profiling-and-debugging-nim-code.html](https://nim-lang.org/blog/2017/10/02/documenting-profiling-and-debugging-nim-code.html)
## GDB / LLDB

View File

@ -1,5 +1,8 @@
# Foreign language interop
!!! warning
This auditors' handbook is frozen and obsolete; the [Nim language manual](https://nim-lang.org/docs/manual.html) alongside [other Nim documentation](https://nim-lang.org/documentation.html), [Status Nim style guide](https://status-im.github.io/nim-style-guide/), [Chronos guides](https://github.com/status-im/nim-chronos/blob/master/docs/src/SUMMARY.md), and [Nim by Example](https://nim-by-example.github.io/getting_started/) supercede it.
## Wrapping C
### Using shared library

View File

@ -1,5 +1,8 @@
# Nim threat model
!!! warning
This auditors' handbook is frozen and obsolete; the [Nim language manual](https://nim-lang.org/docs/manual.html) alongside [other Nim documentation](https://nim-lang.org/documentation.html), [Status Nim style guide](https://status-im.github.io/nim-style-guide/), [Chronos guides](https://github.com/status-im/nim-chronos/blob/master/docs/src/SUMMARY.md), and [Nim by Example](https://nim-by-example.github.io/getting_started/) supercede it.
Nim and its standard library are not the focus of the audits.
In particular the codebase intentionally limits reliance on the standard library

View File

@ -1,5 +1,8 @@
# The Nim Programming Language
!!! warning
This auditors' handbook is frozen and obsolete; the [Nim language manual](https://nim-lang.org/docs/manual.html) alongside [other Nim documentation](https://nim-lang.org/documentation.html), [Status Nim style guide](https://status-im.github.io/nim-style-guide/), [Chronos guides](https://github.com/status-im/nim-chronos/blob/master/docs/src/SUMMARY.md), and [Nim by Example](https://nim-by-example.github.io/getting_started/) supercede it.
The Nim programming language is a compiled language, with strong static typing.
The rest of the Handbook will assume that Nim-by-example was read.

View File

@ -1,5 +1,8 @@
# Nimbus build system & dependencies
!!! warning
This auditors' handbook is frozen and obsolete; the [Nim language manual](https://nim-lang.org/docs/manual.html) alongside [other Nim documentation](https://nim-lang.org/documentation.html), [Status Nim style guide](https://status-im.github.io/nim-style-guide/), [Chronos guides](https://github.com/status-im/nim-chronos/blob/master/docs/src/SUMMARY.md), and [Nim by Example](https://nim-by-example.github.io/getting_started/) supercede it.
## Build system
### NBC repo

View File

@ -1,5 +1,8 @@
# NBC Threat model
!!! warning
This auditors' handbook is frozen and obsolete; the [Nim language manual](https://nim-lang.org/docs/manual.html) alongside [other Nim documentation](https://nim-lang.org/documentation.html), [Status Nim style guide](https://status-im.github.io/nim-style-guide/), [Chronos guides](https://github.com/status-im/nim-chronos/blob/master/docs/src/SUMMARY.md), and [Nim by Example](https://nim-by-example.github.io/getting_started/) supercede it.
NBC primarily targets resource restricted devices like Raspberry Pi or smartphones to desktop computers.
We focus on:

View File

@ -1,5 +1,8 @@
# Nimbus Beacon Chain
!!! warning
This auditors' handbook is frozen and obsolete; the [Nim language manual](https://nim-lang.org/docs/manual.html) alongside [other Nim documentation](https://nim-lang.org/documentation.html), [Status Nim style guide](https://status-im.github.io/nim-style-guide/), [Chronos guides](https://github.com/status-im/nim-chronos/blob/master/docs/src/SUMMARY.md), and [Nim by Example](https://nim-by-example.github.io/getting_started/) supercede it.
[https://github.com/status-im/nimbus-eth2](https://github.com/status-im/nimbus-eth2)
Nimbus Beacon Chain (NBC) is an implementation of an Ethereum 2 client.

View File

@ -1,5 +1,8 @@
# Summary
!!! warning
This auditors' handbook is frozen and obsolete; the [Nim language manual](https://nim-lang.org/docs/manual.html) alongside [other Nim documentation](https://nim-lang.org/documentation.html), [Status Nim style guide](https://status-im.github.io/nim-style-guide/), [Chronos guides](https://github.com/status-im/nim-chronos/blob/master/docs/src/SUMMARY.md), and [Nim by Example](https://nim-by-example.github.io/getting_started/) supercede it.
- [Introduction](01_introduction.md)
- [The Nim Programming Language](02_the_Nim_programming_language.md)
- [Nim routines, procedures, functions, templates, macros](02.1_nim_routines_proc_func_templates_macros.md)

View File

@ -51,4 +51,4 @@ Additionally, the URL of the service exposing the [builder API](https://ethereum
- [Mainnet Relay Overview](https://beaconcha.in/relays)
- [Goerli Relay Overview](https://goerli.beaconcha.in/relays)
- [Holesky Relay Overview](https://holesky.beaconcha.in/relays)

View File

@ -28,6 +28,7 @@ For each validator, it selects from the first available, in the following order:
For example, `nimbus_beacon_node --suggested-fee-recipient=0x70E47C843E0F6ab0991A3189c28F2957eb6d3842` suggests to the execution client that `0x70E47C843E0F6ab0991A3189c28F2957eb6d3842` might be the coinbase.
If this Nimbus node has two validators, one of which has its own suggested fee recipient via the keymanager API and the other does not, the former would use its own per-validator suggested fee recipient, while the latter would fall back to `0x70E47C843E0F6ab0991A3189c28F2957eb6d3842`.
Fee recipients are recorded publicly on-chain as part of proposed blocks, so suggested fee recipients should allow for this.
## Command line

View File

@ -3369,6 +3369,190 @@
},
"response": {"status": {"operator": "equals", "value": "400"}}
},
{
"topics": ["beacon", "beacon_blindedblocks_blockid"],
"request": {
"url": "/eth/v1/beacon/blinded_blocks/head",
"headers": {"Accept": "application/json"}
},
"response": {
"status": {"operator": "equals", "value": "200"},
"headers": [
{"key": "Content-Type", "value": "application/json", "operator": "equals"},
{"key": "Eth-Consensus-Version", "value": ["phase0", "altair", "bellatrix"], "operator": "oneof"}
],
"body": [{"operator": "jstructcmpns", "value": {"version": "", "data": {"message": {"slot": "", "proposer_index": "", "parent_root": "", "state_root": "", "body": {"randao_reveal": "", "eth1_data": {"deposit_root": "", "deposit_count": "", "block_hash": ""}, "graffiti": "", "proposer_slashings": [{"signed_header_1": {"message": {"slot": "", "proposer_index": "", "parent_root": "", "state_root": "", "body_root": ""},"signature": ""}, "signed_header_2": {"message": {"slot": "", "proposer_index": "", "parent_root": "", "state_root": "", "body_root": ""},"signature": ""}}], "attester_slashings": [{"attestation_1": {"attesting_indices": [""], "signature": "", "data": {"slot": "", "index": "", "beacon_block_root": "", "source": {"epoch": "", "root": ""}, "target": {"epoch": "", "root": ""}}}, "attestation_2": {"attesting_indices": [""], "signature": "", "data": {"slot": "", "index": "", "beacon_block_root": "", "source": {"epoch": "", "root": ""}, "target": {"epoch": "", "root": ""}}}}], "attestations": [{"aggregation_bits": "", "signature": "", "data": {"slot": "", "index": "", "beacon_block_root": "", "source": {"epoch": "", "root": ""}, "target": {"epoch": "", "root": ""}}}], "deposits": [{"proof": [""], "data": {"pubkey": "", "withdrawal_credentials": "", "amount": "", "signature": ""}}], "voluntary_exits": [{"message": {"epoch": "", "validator_index": ""}, "signature": ""}]}}, "signature": ""}}}]
}
},
{
"topics": ["beacon", "beacon_blindedblocks_blockid"],
"request": {
"url": "/eth/v1/beacon/blinded_blocks/genesis",
"headers": {"Accept": "application/json"}
},
"response": {
"status": {"operator": "equals", "value": "200"},
"headers": [
{"key": "Content-Type", "value": "application/json", "operator": "equals"},
{"key": "Eth-Consensus-Version", "value": ["phase0", "altair", "bellatrix"], "operator": "oneof"}
],
"body": [{"operator": "jstructcmpns", "value": {"version": "", "data": {"message": {"slot": "", "proposer_index": "", "parent_root": "", "state_root": "", "body": {"randao_reveal": "", "eth1_data": {"deposit_root": "", "deposit_count": "", "block_hash": ""}, "graffiti": "", "proposer_slashings": [{"signed_header_1": {"message": {"slot": "", "proposer_index": "", "parent_root": "", "state_root": "", "body_root": ""},"signature": ""}, "signed_header_2": {"message": {"slot": "", "proposer_index": "", "parent_root": "", "state_root": "", "body_root": ""},"signature": ""}}], "attester_slashings": [{"attestation_1": {"attesting_indices": [""], "signature": "", "data": {"slot": "", "index": "", "beacon_block_root": "", "source": {"epoch": "", "root": ""}, "target": {"epoch": "", "root": ""}}}, "attestation_2": {"attesting_indices": [""], "signature": "", "data": {"slot": "", "index": "", "beacon_block_root": "", "source": {"epoch": "", "root": ""}, "target": {"epoch": "", "root": ""}}}}], "attestations": [{"aggregation_bits": "", "signature": "", "data": {"slot": "", "index": "", "beacon_block_root": "", "source": {"epoch": "", "root": ""}, "target": {"epoch": "", "root": ""}}}], "deposits": [{"proof": [""], "data": {"pubkey": "", "withdrawal_credentials": "", "amount": "", "signature": ""}}], "voluntary_exits": [{"message": {"epoch": "", "validator_index": ""}, "signature": ""}]}}, "signature": ""}}}]
}
},
{
"topics": ["beacon", "beacon_blindedblocks_blockid"],
"request": {
"url": "/eth/v1/beacon/blinded_blocks/finalized",
"headers": {"Accept": "application/json"}
},
"response": {
"status": {"operator": "equals", "value": "200"},
"headers": [
{"key": "Content-Type", "value": "application/json", "operator": "equals"},
{"key": "Eth-Consensus-Version", "value": ["phase0", "altair", "bellatrix"], "operator": "oneof"}
],
"body": [{"operator": "jstructcmpns", "value": {"version": "", "data": {"message": {"slot": "", "proposer_index": "", "parent_root": "", "state_root": "", "body": {"randao_reveal": "", "eth1_data": {"deposit_root": "", "deposit_count": "", "block_hash": ""}, "graffiti": "", "proposer_slashings": [{"signed_header_1": {"message": {"slot": "", "proposer_index": "", "parent_root": "", "state_root": "", "body_root": ""},"signature": ""}, "signed_header_2": {"message": {"slot": "", "proposer_index": "", "parent_root": "", "state_root": "", "body_root": ""},"signature": ""}}], "attester_slashings": [{"attestation_1": {"attesting_indices": [""], "signature": "", "data": {"slot": "", "index": "", "beacon_block_root": "", "source": {"epoch": "", "root": ""}, "target": {"epoch": "", "root": ""}}}, "attestation_2": {"attesting_indices": [""], "signature": "", "data": {"slot": "", "index": "", "beacon_block_root": "", "source": {"epoch": "", "root": ""}, "target": {"epoch": "", "root": ""}}}}], "attestations": [{"aggregation_bits": "", "signature": "", "data": {"slot": "", "index": "", "beacon_block_root": "", "source": {"epoch": "", "root": ""}, "target": {"epoch": "", "root": ""}}}], "deposits": [{"proof": [""], "data": {"pubkey": "", "withdrawal_credentials": "", "amount": "", "signature": ""}}], "voluntary_exits": [{"message": {"epoch": "", "validator_index": ""}, "signature": ""}]}}, "signature": ""}}}]
}
},
{
"topics": ["beacon", "beacon_blindedblocks_blockid"],
"request": {
"url": "/eth/v1/beacon/blinded_blocks/0",
"headers": {"Accept": "application/json"}
},
"response": {
"status": {"operator": "equals", "value": "200"},
"headers": [
{"key": "Content-Type", "value": "application/json", "operator": "equals"},
{"key": "Eth-Consensus-Version", "value": ["phase0", "altair", "bellatrix"], "operator": "oneof"}
],
"body": [{"operator": "jstructcmpns", "value": {"version": "", "data": {"message": {"slot": "", "proposer_index": "", "parent_root": "", "state_root": "", "body": {"randao_reveal": "", "eth1_data": {"deposit_root": "", "deposit_count": "", "block_hash": ""}, "graffiti": "", "proposer_slashings": [{"signed_header_1": {"message": {"slot": "", "proposer_index": "", "parent_root": "", "state_root": "", "body_root": ""},"signature": ""}, "signed_header_2": {"message": {"slot": "", "proposer_index": "", "parent_root": "", "state_root": "", "body_root": ""},"signature": ""}}], "attester_slashings": [{"attestation_1": {"attesting_indices": [""], "signature": "", "data": {"slot": "", "index": "", "beacon_block_root": "", "source": {"epoch": "", "root": ""}, "target": {"epoch": "", "root": ""}}}, "attestation_2": {"attesting_indices": [""], "signature": "", "data": {"slot": "", "index": "", "beacon_block_root": "", "source": {"epoch": "", "root": ""}, "target": {"epoch": "", "root": ""}}}}], "attestations": [{"aggregation_bits": "", "signature": "", "data": {"slot": "", "index": "", "beacon_block_root": "", "source": {"epoch": "", "root": ""}, "target": {"epoch": "", "root": ""}}}], "deposits": [{"proof": [""], "data": {"pubkey": "", "withdrawal_credentials": "", "amount": "", "signature": ""}}], "voluntary_exits": [{"message": {"epoch": "", "validator_index": ""}, "signature": ""}]}}, "signature": ""}}}]
}
},
{
"topics": ["beacon", "beacon_blindedblocks_blockid"],
"request": {
"url": "/eth/v1/beacon/blinded_blocks/18446744073709551615",
"headers": {"Accept": "application/json"}
},
"response": {
"status": {"operator": "equals", "value": "404"},
"headers": [
{"key": "Content-Type", "value": "application/json", "operator": "equals"},
{"key": "Eth-Consensus-Version", "operator": "notexists"}
],
"body": [{"operator": "jstructcmpns", "value": {"code": 404, "message": ""}}]
}
},
{
"topics": ["beacon", "beacon_blindedblocks_blockid"],
"request": {
"url": "/eth/v1/beacon/blinded_blocks/18446744073709551616",
"headers": {"Accept": "application/json"}
},
"response": {
"status": {"operator": "equals", "value": "400"},
"headers": [
{"key": "Content-Type", "value": "application/json", "operator": "equals"},
{"key": "Eth-Consensus-Version", "operator": "notexists"}
],
"body": [{"operator": "jstructcmpns", "value": {"code": 400, "message": ""}}]
}
},
{
"topics": ["beacon", "beacon_blindedblocks_blockid"],
"request": {
"url": "/eth/v1/beacon/blinded_blocks/0x0000000000000000000000000000000000000000000000000000000000000000",
"headers": {"Accept": "application/json"}
},
"response": {
"status": {"operator": "equals", "value": "404"},
"headers": [
{"key": "Content-Type", "value": "application/json", "operator": "equals"},
{"key": "Eth-Consensus-Version", "operator": "notexists"}
],
"body": [{"operator": "jstructcmpns", "value": {"code": 404, "message": ""}}]
}
},
{
"topics": ["beacon", "beacon_blindedblocks_blockid"],
"request": {
"url": "/eth/v1/beacon/blinded_blocks/18446744073709551616",
"headers": {"Accept": "application/json"}
},
"response": {
"status": {"operator": "equals", "value": "400"},
"headers": [
{"key": "Content-Type", "value": "application/json", "operator": "equals"},
{"key": "Eth-Consensus-Version", "operator": "notexists"}
],
"body": [{"operator": "jstructcmpns", "value": {"code": 400, "message": ""}}]
}
},
{
"topics": ["beacon", "beacon_blindedblocks_blockid"],
"request": {
"url": "/eth/v1/beacon/blinded_blocks/heat",
"headers": {"Accept": "application/json"}
},
"response": {"status": {"operator": "equals", "value": "400"}}
},
{
"topics": ["beacon", "beacon_blindedblocks_blockid"],
"request": {
"url": "/eth/v1/beacon/blinded_blocks/geneziz",
"headers": {"Accept": "application/json"}
},
"response": {"status": {"operator": "equals", "value": "400"}}
},
{
"topics": ["beacon", "beacon_blindedblocks_blockid"],
"request": {
"url": "/eth/v1/beacon/blinded_blocks/finalised",
"headers": {"Accept": "application/json"}
},
"response": {"status": {"operator": "equals", "value": "400"}}
},
{
"topics": ["beacon", "beacon_blindedblocks_blockid"],
"request": {
"url": "/eth/v1/beacon/blinded_blocks/foobar",
"headers": {"Accept": "application/json"}
},
"response": {"status": {"operator": "equals", "value": "400"}}
},
{
"topics": ["beacon", "beacon_blindedblocks_blockid"],
"request": {
"url": "/eth/v1/beacon/blinded_blocks/0x",
"headers": {"Accept": "application/json"}
},
"response": {"status": {"operator": "equals", "value": "400"}}
},
{
"topics": ["beacon", "beacon_blindedblocks_blockid"],
"request": {
"url": "/eth/v1/beacon/blinded_blocks/0x0",
"headers": {"Accept": "application/json"}
},
"response": {"status": {"operator": "equals", "value": "400"}}
},
{
"topics": ["beacon", "beacon_blindedblocks_blockid"],
"request": {
"url": "/eth/v1/beacon/blinded_blocks/0x00",
"headers": {"Accept": "application/json"}
},
"response": {"status": {"operator": "equals", "value": "400"}}
},
{
"topics": ["beacon", "beacon_blindedblocks_blockid"],
"request": {
"url": "/eth/v1/beacon/blinded_blocks/0x000000000000000000000000000000000000000000000000000000000000000000",
"headers": {"Accept": "application/json"}
},
"response": {"status": {"operator": "equals", "value": "400"}}
},
{
"topics": ["beacon", "beacon_block_blinded_blocks"],
"request": {

View File

@ -42,7 +42,7 @@ proc setupEngineAPI*(server: RpcServer) =
raise (ref InvalidRequest)(
code: engineApiUnknownPayload,
msg: "Unkown payload"
msg: "Unknown payload"
)
# https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.3/src/engine/paris.md#engine_exchangetransitionconfigurationv1

View File

@ -1,6 +1,6 @@
#!/usr/bin/bash
# Copyright (c) 2023 Status Research & Development GmbH.
# Copyright (c) 2023-2024 Status Research & Development GmbH.
# Licensed under either of:
# - Apache License, version 2.0
# - MIT license
@ -34,7 +34,7 @@ KEYSTORES_DIR="${DATA_DIR}/validators_shares/$((SIGNER_NODE_IDX + 1))"
# Here you need to launch your signer server process.
# You must make sure that it will listen on the `$((BASE_REMOTE_SIGNER_PORT + SIGNER_NODE_IDX))` port.
# The new process must be launched in the background.
# Preferrably, you will also create a log file in the `${DATA_DIR}/logs` directory.
# Preferably, you will also create a log file in the `${DATA_DIR}/logs` directory.
# Here is an example way to achieve the above with the web3signer binary:
#

View File

@ -44,6 +44,7 @@ import # Unit test
./test_statediff,
./test_sync_committee_pool,
./test_sync_manager,
./test_toblindedblock,
./test_validator_change_pool,
./test_validator_pool,
./test_zero_signature,

View File

@ -1,5 +1,5 @@
# beacon_chain
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2022-2024 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
@ -21,7 +21,7 @@ suite "Beacon time":
s0.start_beacon_time() == BeaconTime()
s0.sync_committee_period() == SyncCommitteePeriod(0)
# Roundtrip far times we treat these as "Infinitiy"
# Roundtrip far times we treat these as "Infinity"
FAR_FUTURE_SLOT.epoch.start_slot() == FAR_FUTURE_SLOT
FAR_FUTURE_SLOT.sync_committee_period.start_slot() == FAR_FUTURE_SLOT
FAR_FUTURE_EPOCH.start_slot().epoch() == FAR_FUTURE_EPOCH

View File

@ -332,7 +332,7 @@ proc startValidatorClient(basePort: int) {.async, thread.} =
const
password = "7465737470617373776f7264f09f9491"
# This is taken from the offical test vectors in test_keystores.nim
# This is taken from the official test vectors in test_keystores.nim
secretBytes = hexToSeqByte "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"
salt = hexToSeqByte "d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"
iv = hexToSeqByte "264daa3f303d7259501c93d997d84fe6"

View File

@ -50,8 +50,8 @@ proc collector(queue: AsyncQueue[BlockEntry]): BlockVerifier =
# the BlockProcessor and this test
proc verify(signedBlock: ForkedSignedBeaconBlock, blobs: Opt[BlobSidecars],
maybeFinalized: bool):
Future[Result[void, VerifierError]] {.async: (raises: [CancelledError], raw: true).} =
let fut = Future[Result[void, VerifierError]].Raising([CancelledError]).init()
Future[Result[void, VerifierError]] =
let fut = newFuture[Result[void, VerifierError]]()
try: queue.addLastNoWait(BlockEntry(blck: signedBlock, resfut: fut))
except CatchableError as exc: raiseAssert exc.msg
return fut

View File

@ -0,0 +1,131 @@
# beacon_chain
# Copyright (c) 2024 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.used.}
import
# Beacon chain internals
../beacon_chain/spec/helpers,
../beacon_chain/spec/datatypes/[bellatrix, capella],
../beacon_chain/spec/mev/[bellatrix_mev, capella_mev, deneb_mev],
# Test utilities
unittest2
template do_check() =
check:
hash_tree_root(b.message) == hash_tree_root(
b.toSignedBlindedBeaconBlock.message)
b.signature == b.toSignedBlindedBeaconBlock.signature
const
nondefaultEth1Data = Eth1Data(
deposit_root: Eth2Digest.fromHex(
"0x55aaf2ee893f67db190d617070bd10d1583b00194fbcfda03d89baa24626f5bb"),
deposit_count: 1,
block_hash: Eth2Digest.fromHex(
"0xe617d58db390a10741ab7d3de0ba9460b5df5e0772e9721fe33c0422a63b2677"))
let nondefaultValidatorSig = ValidatorSig.fromHex(
"0xac08ca70066c6ea0525aa54dd867f82b86945818cb9305aae30f3bee13275dcf13d6d0680a47e889482ff2bb9a9f3cdb0588746f9e30c04645eda6d01bbd0ce6326ceb695294cb338ebace5b130c5b8f2e4f8efa63d63d5bb255c21a39da9c12")[]
template bellatrix_steps() =
b.message.slot = 1.Slot
do_check
b.message.proposer_index = 1
do_check
b.message.state_root = Eth2Digest.fromHex(
"0xb277ed302ade6685d0f0765fd0659c4b448656ab697409f2935cd9ab7189e48e")
do_check
b.message.parent_root = Eth2Digest.fromHex(
"0x2f6eaa73ec39aeb864884a2371f3e4a8abc29d277074459e46c987418f5df430")
do_check
b.message.body.randao_reveal = nondefaultValidatorSig
do_check
b.message.body.eth1_data = nondefaultEth1Data
do_check
distinctBase(b.message.body.graffiti)[0] = 1
do_check
check: b.message.body.proposer_slashings.add(default(ProposerSlashing))
do_check
check: b.message.body.attester_slashings.add(default(AttesterSlashing))
do_check
check: b.message.body.attestations.add(
Attestation(aggregation_bits: CommitteeValidatorsBits.init(1)))
do_check
check: b.message.body.deposits.add(default(Deposit))
do_check
check: b.message.body.voluntary_exits.add(default(SignedVoluntaryExit))
do_check
b.message.body.sync_aggregate.sync_committee_signature =
nondefaultValidatorSig
do_check
b.message.body.execution_payload.parent_hash = Eth2Digest.fromHex(
"0x941bdf6ccf731a7ede6bac0c9533ecee5e3dc5081ea59d57c3fd8c624eeca85d")
do_check
b.message.body.execution_payload.fee_recipient =
ExecutionAddress.fromHex("0x1234567812345678123456781234567812345678")
do_check
b.message.body.execution_payload.state_root = Eth2Digest.fromHex(
"0x9e7d9bca96a9d0af9013ad6abb8708988beef02d58c16ba1a90075960b99c2ff")
do_check
b.message.body.execution_payload.receipts_root = Eth2Digest.fromHex(
"0x0e66a5007cf7bb16f4398adbbd01b34067a80faaef41a0a6be324c5fdb93a6df")
do_check
b.message.body.execution_payload.logs_bloom.data[0] = 2
do_check
b.message.body.execution_payload.prev_randao = Eth2Digest.fromHex(
"0x8aa830156370e6a5ec7679d7e5ee712dd87f24fef76a1954a03c1df8c68bc0fd")
do_check
b.message.body.execution_payload.block_number = 3
do_check
b.message.body.execution_payload.gas_limit = 4
do_check
b.message.body.execution_payload.gas_used = 5
do_check
b.message.body.execution_payload.timestamp = 6
do_check
check: b.message.body.execution_payload.extra_data.add 0'u8
do_check
b.message.body.execution_payload.base_fee_per_gas = 7.u256
do_check
b.message.body.execution_payload.block_hash = Eth2Digest.fromHex(
"0x4b1aed517ac48bfbf6ab19846923d5256897fbc934c20ca5b8c486bfe71c6ef1")
do_check
check: b.message.body.execution_payload.transactions.add default(Transaction)
do_check
template capella_steps() =
check: b.message.body.bls_to_execution_changes.add(
default(SignedBLSToExecutionChange))
do_check
check: b.message.body.execution_payload.withdrawals.add(default(
Withdrawal))
do_check
template deneb_steps() =
check: b.message.body.blob_kzg_commitments.add(default(KzgCommitment))
do_check
suite "Blinded block conversions":
test "Bellatrix toSignedBlindedBlock":
var b = default(bellatrix.SignedBeaconBlock)
do_check
bellatrix_steps
test "Capella toSignedBlindedBlock":
var b = default(capella.SignedBeaconBlock)
do_check
bellatrix_steps
capella_steps
test "Deneb toSignedBlindedBlock":
var b = default(deneb.SignedBeaconBlock)
do_check
bellatrix_steps
capella_steps
deneb_steps

@ -1 +1 @@
Subproject commit 7568f1b7c3142d8e87c1f3dd42924238926affbe
Subproject commit 7217854b60b9a6c68180e0aac3de24c2ccf35350