2021-10-06 17:05:06 +00:00
|
|
|
# beacon_chain
|
2024-01-06 14:26:56 +00:00
|
|
|
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
2021-03-17 18:46:45 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
2021-10-06 17:05:06 +00:00
|
|
|
|
2024-02-24 05:08:22 +00:00
|
|
|
{.push raises: [].}
|
|
|
|
|
2021-03-17 18:46:45 +00:00
|
|
|
import
|
2022-06-09 08:50:36 +00:00
|
|
|
std/[typetraits, sequtils, sets],
|
2021-03-17 18:46:45 +00:00
|
|
|
stew/[results, base10],
|
|
|
|
chronicles,
|
2021-11-02 18:23:31 +00:00
|
|
|
./rest_utils,
|
2022-12-07 10:24:51 +00:00
|
|
|
./state_ttl_cache,
|
|
|
|
../beacon_node,
|
2023-12-23 05:55:47 +00:00
|
|
|
../consensus_object_pools/[blockchain_dag, spec_cache, validator_change_pool],
|
2022-12-07 10:24:51 +00:00
|
|
|
../spec/[deposit_snapshots, eth2_merkleization, forks, network, validator],
|
2024-03-01 05:30:09 +00:00
|
|
|
../spec/mev/[bellatrix_mev, capella_mev],
|
2022-12-07 10:24:51 +00:00
|
|
|
../validators/message_router_mev
|
2021-03-17 18:46:45 +00:00
|
|
|
|
2021-10-27 12:01:11 +00:00
|
|
|
export rest_utils
|
|
|
|
|
2021-03-17 18:46:45 +00:00
|
|
|
logScope: topics = "rest_beaconapi"
|
|
|
|
|
2022-08-19 10:30:07 +00:00
|
|
|
proc validateBeaconApiQueries*(key: string, value: string): int =
|
|
|
|
## This is rough validation procedure which should be simple and fast,
|
|
|
|
## because it will be used for query routing.
|
|
|
|
case key
|
|
|
|
of "{epoch}":
|
|
|
|
0
|
|
|
|
of "{slot}":
|
|
|
|
0
|
|
|
|
of "{peer_id}":
|
|
|
|
0
|
|
|
|
of "{state_id}":
|
|
|
|
0
|
|
|
|
of "{block_id}":
|
|
|
|
0
|
|
|
|
of "{validator_id}":
|
|
|
|
0
|
|
|
|
of "{block_root}":
|
|
|
|
0
|
|
|
|
of "{pubkey}":
|
|
|
|
int(value.len != 98)
|
|
|
|
else:
|
|
|
|
1
|
|
|
|
|
2024-01-19 23:34:11 +00:00
|
|
|
const
|
|
|
|
AllValidatorFilterKinds = {
|
|
|
|
ValidatorFilterKind.PendingInitialized,
|
|
|
|
ValidatorFilterKind.PendingQueued,
|
|
|
|
ValidatorFilterKind.ActiveOngoing,
|
|
|
|
ValidatorFilterKind.ActiveExiting,
|
|
|
|
ValidatorFilterKind.ActiveSlashed,
|
|
|
|
ValidatorFilterKind.ExitedUnslashed,
|
|
|
|
ValidatorFilterKind.ExitedSlashed,
|
|
|
|
ValidatorFilterKind.WithdrawalPossible,
|
|
|
|
ValidatorFilterKind.WithdrawalDone
|
|
|
|
}
|
|
|
|
|
2021-03-17 18:46:45 +00:00
|
|
|
proc validateFilter(filters: seq[ValidatorFilter]): Result[ValidatorFilter,
|
|
|
|
cstring] =
|
|
|
|
var res: ValidatorFilter
|
|
|
|
for item in filters:
|
|
|
|
if res * item != {}:
|
|
|
|
return err("Validator status must be unique")
|
|
|
|
res.incl(item)
|
|
|
|
if res == {}:
|
2024-01-19 23:34:11 +00:00
|
|
|
return ok(AllValidatorFilterKinds)
|
2021-03-17 18:46:45 +00:00
|
|
|
ok(res)
|
|
|
|
|
|
|
|
proc getStatus(validator: Validator,
|
|
|
|
current_epoch: Epoch): Result[ValidatorFilterKind, cstring] =
|
|
|
|
if validator.activation_epoch > current_epoch:
|
|
|
|
# pending
|
|
|
|
if validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH:
|
|
|
|
ok(ValidatorFilterKind.PendingInitialized)
|
|
|
|
else:
|
|
|
|
# validator.activation_eligibility_epoch < FAR_FUTURE_EPOCH:
|
|
|
|
ok(ValidatorFilterKind.PendingQueued)
|
|
|
|
elif (validator.activation_epoch <= current_epoch) and
|
|
|
|
(current_epoch < validator.exit_epoch):
|
|
|
|
# active
|
|
|
|
if validator.exit_epoch == FAR_FUTURE_EPOCH:
|
|
|
|
ok(ValidatorFilterKind.ActiveOngoing)
|
|
|
|
elif not validator.slashed:
|
|
|
|
# validator.exit_epoch < FAR_FUTURE_EPOCH
|
|
|
|
ok(ValidatorFilterKind.ActiveExiting)
|
|
|
|
else:
|
|
|
|
# validator.exit_epoch < FAR_FUTURE_EPOCH and validator.slashed:
|
|
|
|
ok(ValidatorFilterKind.ActiveSlashed)
|
|
|
|
elif (validator.exit_epoch <= current_epoch) and
|
|
|
|
(current_epoch < validator.withdrawable_epoch):
|
|
|
|
# exited
|
|
|
|
if not validator.slashed:
|
|
|
|
ok(ValidatorFilterKind.ExitedUnslashed)
|
|
|
|
else:
|
|
|
|
# validator.slashed
|
|
|
|
ok(ValidatorFilterKind.ExitedSlashed)
|
|
|
|
elif validator.withdrawable_epoch <= current_epoch:
|
|
|
|
# withdrawal
|
2024-03-19 13:22:07 +00:00
|
|
|
if validator.effective_balance != 0.Gwei:
|
2021-03-17 18:46:45 +00:00
|
|
|
ok(ValidatorFilterKind.WithdrawalPossible)
|
|
|
|
else:
|
2024-03-19 13:22:07 +00:00
|
|
|
# validator.effective_balance == 0.Gwei
|
2021-03-17 18:46:45 +00:00
|
|
|
ok(ValidatorFilterKind.WithdrawalDone)
|
|
|
|
else:
|
|
|
|
err("Invalid validator status")
|
|
|
|
|
|
|
|
proc toString*(kind: ValidatorFilterKind): string =
|
|
|
|
case kind
|
|
|
|
of ValidatorFilterKind.PendingInitialized:
|
|
|
|
"pending_initialized"
|
|
|
|
of ValidatorFilterKind.PendingQueued:
|
|
|
|
"pending_queued"
|
|
|
|
of ValidatorFilterKind.ActiveOngoing:
|
|
|
|
"active_ongoing"
|
|
|
|
of ValidatorFilterKind.ActiveExiting:
|
|
|
|
"active_exiting"
|
|
|
|
of ValidatorFilterKind.ActiveSlashed:
|
|
|
|
"active_slashed"
|
|
|
|
of ValidatorFilterKind.ExitedUnslashed:
|
|
|
|
"exited_unslashed"
|
|
|
|
of ValidatorFilterKind.ExitedSlashed:
|
|
|
|
"exited_slashed"
|
|
|
|
of ValidatorFilterKind.WithdrawalPossible:
|
|
|
|
"withdrawal_possible"
|
|
|
|
of ValidatorFilterKind.WithdrawalDone:
|
|
|
|
"withdrawal_done"
|
|
|
|
|
2024-03-28 03:32:33 +00:00
|
|
|
func checkRestBlockBlobsValid(
|
|
|
|
forkyBlck: deneb.SignedBeaconBlock | electra.SignedBeaconBlock,
|
|
|
|
kzg_proofs: KzgProofs,
|
|
|
|
blobs: Blobs): Result[void, string] =
|
|
|
|
if kzg_proofs.len != blobs.len:
|
|
|
|
return err("Invalid block publish: " & $kzg_proofs.len & " KZG proofs and " &
|
|
|
|
$blobs.len & " blobs")
|
|
|
|
|
|
|
|
if kzg_proofs.len != forkyBlck.message.body.blob_kzg_commitments.len:
|
|
|
|
return err("Invalid block publish: " & $kzg_proofs.len &
|
|
|
|
" KZG proofs and " & $forkyBlck.message.body.blob_kzg_commitments.len &
|
|
|
|
" KZG commitments")
|
|
|
|
|
|
|
|
ok()
|
|
|
|
|
2021-03-17 18:46:45 +00:00
|
|
|
proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
2022-12-07 10:24:51 +00:00
|
|
|
# https://github.com/ethereum/EIPs/blob/master/EIPS/eip-4881.md
|
2024-01-20 16:06:28 +00:00
|
|
|
router.api2(MethodGet, "/eth/v1/beacon/deposit_snapshot") do (
|
|
|
|
) -> RestApiResponse:
|
2024-03-07 17:42:52 +00:00
|
|
|
let snapshot = node.db.getDepositContractSnapshot().valueOr:
|
2024-01-20 16:06:28 +00:00
|
|
|
# This can happen in a very short window after the client is started,
|
|
|
|
# but the snapshot record still haven't been upgraded in the database.
|
|
|
|
# Returning 404 should be easy to handle for the clients - they just need
|
|
|
|
# to retry.
|
|
|
|
return RestApiResponse.jsonError(Http404,
|
|
|
|
NoFinalizedSnapshotAvailableError)
|
|
|
|
|
2024-03-08 13:22:03 +00:00
|
|
|
RestApiResponse.jsonResponse(snapshot.getTreeSnapshot())
|
2023-01-11 12:29:21 +00:00
|
|
|
|
2021-08-23 10:41:48 +00:00
|
|
|
# https://ethereum.github.io/beacon-APIs/#/Beacon/getGenesis
|
2024-01-20 16:06:28 +00:00
|
|
|
router.api2(MethodGet, "/eth/v1/beacon/genesis") do () -> RestApiResponse:
|
|
|
|
RestApiResponse.jsonResponse(
|
2021-03-23 22:50:18 +00:00
|
|
|
(
|
2022-03-16 07:20:40 +00:00
|
|
|
genesis_time: getStateField(node.dag.headState, genesis_time),
|
2021-03-17 18:46:45 +00:00
|
|
|
genesis_validators_root:
|
2022-03-16 07:20:40 +00:00
|
|
|
getStateField(node.dag.headState, genesis_validators_root),
|
2021-07-13 14:27:10 +00:00
|
|
|
genesis_fork_version: node.dag.cfg.GENESIS_FORK_VERSION
|
2021-03-17 18:46:45 +00:00
|
|
|
)
|
|
|
|
)
|
|
|
|
|
2021-08-23 10:41:48 +00:00
|
|
|
# https://ethereum.github.io/beacon-APIs/#/Beacon/getStateRoot
|
2024-01-20 16:06:28 +00:00
|
|
|
router.api2(MethodGet, "/eth/v1/beacon/states/{state_id}/root") do (
|
2021-03-17 18:46:45 +00:00
|
|
|
state_id: StateIdent) -> RestApiResponse:
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
let
|
|
|
|
sid = state_id.valueOr:
|
|
|
|
return RestApiResponse.jsonError(Http400, InvalidStateIdValueError,
|
|
|
|
$error)
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
bslot = node.getBlockSlotId(sid).valueOr:
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
if sid.kind == StateQueryKind.Root:
|
|
|
|
# TODO (cheatfate): Its impossible to retrieve state by `state_root`
|
|
|
|
# in current version of database.
|
|
|
|
return RestApiResponse.jsonError(Http500, NoImplementationError)
|
|
|
|
return RestApiResponse.jsonError(Http404, StateNotFoundError,
|
2022-09-23 15:51:04 +00:00
|
|
|
$error)
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
node.withStateForBlockSlotId(bslot):
|
2023-09-27 14:45:33 +00:00
|
|
|
return RestApiResponse.jsonResponseFinalized(
|
2022-06-20 05:53:39 +00:00
|
|
|
(root: stateRoot),
|
2023-09-27 14:45:33 +00:00
|
|
|
node.getStateOptimistic(state),
|
|
|
|
node.dag.isFinalized(bslot.bid)
|
2022-06-20 05:53:39 +00:00
|
|
|
)
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
|
2024-01-20 16:06:28 +00:00
|
|
|
RestApiResponse.jsonError(Http404, StateNotFoundError)
|
2021-03-17 18:46:45 +00:00
|
|
|
|
2021-08-23 10:41:48 +00:00
|
|
|
# https://ethereum.github.io/beacon-APIs/#/Beacon/getStateFork
|
2024-01-20 16:06:28 +00:00
|
|
|
router.api2(MethodGet, "/eth/v1/beacon/states/{state_id}/fork") do (
|
2021-03-17 18:46:45 +00:00
|
|
|
state_id: StateIdent) -> RestApiResponse:
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
let
|
|
|
|
sid = state_id.valueOr:
|
|
|
|
return RestApiResponse.jsonError(Http400, InvalidStateIdValueError,
|
|
|
|
$error)
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
bslot = node.getBlockSlotId(sid).valueOr:
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
if sid.kind == StateQueryKind.Root:
|
|
|
|
# TODO (cheatfate): Its impossible to retrieve state by `state_root`
|
|
|
|
# in current version of database.
|
|
|
|
return RestApiResponse.jsonError(Http500, NoImplementationError)
|
|
|
|
return RestApiResponse.jsonError(Http404, StateNotFoundError,
|
|
|
|
$error)
|
|
|
|
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
node.withStateForBlockSlotId(bslot):
|
2023-09-27 14:45:33 +00:00
|
|
|
return RestApiResponse.jsonResponseFinalized(
|
2021-03-23 22:50:18 +00:00
|
|
|
(
|
2022-06-20 05:53:39 +00:00
|
|
|
previous_version:
|
|
|
|
getStateField(state, fork).previous_version,
|
|
|
|
current_version:
|
|
|
|
getStateField(state, fork).current_version,
|
|
|
|
epoch:
|
|
|
|
getStateField(state, fork).epoch
|
|
|
|
),
|
2023-09-27 14:45:33 +00:00
|
|
|
node.getStateOptimistic(state),
|
|
|
|
node.dag.isFinalized(bslot.bid)
|
2021-03-17 18:46:45 +00:00
|
|
|
)
|
2024-01-20 16:06:28 +00:00
|
|
|
|
|
|
|
RestApiResponse.jsonError(Http404, StateNotFoundError)
|
2021-03-17 18:46:45 +00:00
|
|
|
|
2021-08-23 10:41:48 +00:00
|
|
|
# https://ethereum.github.io/beacon-APIs/#/Beacon/getStateFinalityCheckpoints
|
2024-01-20 16:06:28 +00:00
|
|
|
router.api2(MethodGet,
|
|
|
|
"/eth/v1/beacon/states/{state_id}/finality_checkpoints") do (
|
2021-03-17 18:46:45 +00:00
|
|
|
state_id: StateIdent) -> RestApiResponse:
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
let
|
|
|
|
sid = state_id.valueOr:
|
|
|
|
return RestApiResponse.jsonError(Http400, InvalidStateIdValueError,
|
|
|
|
$error)
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
bslot = node.getBlockSlotId(sid).valueOr:
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
if sid.kind == StateQueryKind.Root:
|
|
|
|
# TODO (cheatfate): Its impossible to retrieve state by `state_root`
|
|
|
|
# in current version of database.
|
|
|
|
return RestApiResponse.jsonError(Http500, NoImplementationError)
|
|
|
|
return RestApiResponse.jsonError(Http404, StateNotFoundError,
|
2022-06-20 05:53:39 +00:00
|
|
|
$error)
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
node.withStateForBlockSlotId(bslot):
|
2023-09-27 14:45:33 +00:00
|
|
|
return RestApiResponse.jsonResponseFinalized(
|
2021-03-23 22:50:18 +00:00
|
|
|
(
|
2022-06-20 05:53:39 +00:00
|
|
|
previous_justified:
|
|
|
|
getStateField(state, previous_justified_checkpoint),
|
|
|
|
current_justified:
|
|
|
|
getStateField(state, current_justified_checkpoint),
|
|
|
|
finalized:
|
|
|
|
getStateField(state, finalized_checkpoint)
|
|
|
|
),
|
2023-09-27 14:45:33 +00:00
|
|
|
node.getStateOptimistic(state),
|
|
|
|
node.dag.isFinalized(bslot.bid)
|
2021-03-17 18:46:45 +00:00
|
|
|
)
|
2024-01-20 16:06:28 +00:00
|
|
|
|
|
|
|
RestApiResponse.jsonError(Http404, StateNotFoundError)
|
2021-03-17 18:46:45 +00:00
|
|
|
|
2023-11-29 12:05:03 +00:00
|
|
|
proc getIndices(
|
|
|
|
node: BeaconNode,
|
|
|
|
validatorIds: openArray[ValidatorIdent],
|
|
|
|
state: ForkedHashedBeaconState
|
|
|
|
): Result[seq[ValidatorIndex], RestErrorMessage] =
|
|
|
|
var
|
|
|
|
keyset: HashSet[ValidatorPubKey]
|
|
|
|
indexset: HashSet[ValidatorIndex]
|
|
|
|
|
|
|
|
let validatorsCount = lenu64(getStateField(state, validators))
|
|
|
|
|
|
|
|
for item in validatorIds:
|
|
|
|
case item.kind
|
|
|
|
of ValidatorQueryKind.Key:
|
|
|
|
# Test for uniqueness of value.
|
|
|
|
if keyset.containsOrIncl(item.key):
|
|
|
|
return err(RestErrorMessage.init(
|
|
|
|
Http400, NonUniqueValidatorIdError, $item.key))
|
|
|
|
of ValidatorQueryKind.Index:
|
|
|
|
let vindex = item.index.toValidatorIndex().valueOr:
|
|
|
|
case error
|
|
|
|
of ValidatorIndexError.TooHighValue:
|
|
|
|
return err(RestErrorMessage.init(
|
|
|
|
Http400, TooHighValidatorIndexValueError))
|
|
|
|
of ValidatorIndexError.UnsupportedValue:
|
|
|
|
return err(RestErrorMessage.init(
|
|
|
|
Http500, UnsupportedValidatorIndexValueError))
|
|
|
|
if uint64(vindex) < validatorsCount:
|
|
|
|
# We're only adding validator indices which are present in
|
|
|
|
# validators list at this moment.
|
|
|
|
if indexset.containsOrIncl(vindex):
|
|
|
|
return err(RestErrorMessage.init(
|
|
|
|
Http400, NonUniqueValidatorIdError,
|
|
|
|
Base10.toString(uint64(vindex))))
|
|
|
|
|
|
|
|
if len(keyset) > 0:
|
|
|
|
let optIndices = keysToIndices(node.restKeysCache, state, keyset.toSeq())
|
|
|
|
# Remove all the duplicates.
|
|
|
|
for item in optIndices:
|
|
|
|
# We ignore missing keys.
|
|
|
|
if item.isSome():
|
|
|
|
indexset.incl(item.get())
|
|
|
|
ok(indexset.toSeq())
|
|
|
|
|
|
|
|
proc getValidators(
|
|
|
|
node: BeaconNode,
|
|
|
|
bslot: BlockSlotId,
|
|
|
|
validatorsMask: ValidatorFilter,
|
|
|
|
validatorIds: openArray[ValidatorIdent]
|
|
|
|
): RestApiResponse =
|
|
|
|
node.withStateForBlockSlotId(bslot):
|
|
|
|
let
|
|
|
|
stateEpoch = getStateField(state, slot).epoch()
|
|
|
|
indices = node.getIndices(validatorIds, state).valueOr:
|
|
|
|
return RestApiResponse.jsonError(error)
|
|
|
|
response =
|
|
|
|
block:
|
|
|
|
var res: seq[RestValidator]
|
|
|
|
if len(indices) == 0:
|
|
|
|
# Case when `len(indices) == 0 and len(validatorIds) != 0` means
|
|
|
|
# that we can't find validator identifiers in state, so we should
|
|
|
|
# return empty response.
|
|
|
|
if len(validatorIds) == 0:
|
|
|
|
# There are no indices, so we're going to filter all the
|
|
|
|
# validators.
|
|
|
|
for index, validator in getStateField(state, validators):
|
|
|
|
let
|
|
|
|
balance = getStateField(state, balances).item(index)
|
|
|
|
status = validator.getStatus(stateEpoch).valueOr:
|
|
|
|
return RestApiResponse.jsonError(
|
|
|
|
Http400, ValidatorStatusNotFoundError, $error)
|
|
|
|
if status in validatorsMask:
|
|
|
|
res.add(RestValidator.init(ValidatorIndex(index), balance,
|
|
|
|
toString(status), validator))
|
|
|
|
else:
|
|
|
|
for index in indices:
|
|
|
|
let
|
|
|
|
validator = getStateField(state, validators).item(index)
|
|
|
|
balance = getStateField(state, balances).item(index)
|
|
|
|
status = validator.getStatus(stateEpoch).valueOr:
|
|
|
|
return RestApiResponse.jsonError(
|
|
|
|
Http400, ValidatorStatusNotFoundError, $error)
|
|
|
|
if status in validatorsMask:
|
|
|
|
res.add(RestValidator.init(index, balance, toString(status),
|
|
|
|
validator))
|
|
|
|
res
|
|
|
|
return RestApiResponse.jsonResponseFinalized(
|
|
|
|
response,
|
|
|
|
node.getStateOptimistic(state),
|
|
|
|
node.dag.isFinalized(bslot.bid)
|
|
|
|
)
|
|
|
|
RestApiResponse.jsonError(Http404, StateNotFoundError)
|
|
|
|
|
|
|
|
proc getBalances(
|
|
|
|
node: BeaconNode,
|
|
|
|
bslot: BlockSlotId,
|
|
|
|
validatorIds: openArray[ValidatorIdent]
|
|
|
|
): RestApiResponse =
|
|
|
|
node.withStateForBlockSlotId(bslot):
|
|
|
|
let
|
|
|
|
indices = node.getIndices(validatorIds, state).valueOr:
|
|
|
|
return RestApiResponse.jsonError(error)
|
|
|
|
response =
|
|
|
|
block:
|
|
|
|
var res: seq[RestValidatorBalance]
|
|
|
|
if len(indices) == 0:
|
|
|
|
# Case when `len(indices) == 0 and len(validatorIds) != 0` means
|
|
|
|
# that we can't find validator identifiers in state, so we should
|
|
|
|
# return empty response.
|
|
|
|
if len(validatorIds) == 0:
|
|
|
|
# There are no indices, so we're going to return balances of all
|
|
|
|
# known validators.
|
|
|
|
for index, balance in getStateField(state, balances):
|
|
|
|
res.add(RestValidatorBalance.init(ValidatorIndex(index),
|
|
|
|
balance))
|
|
|
|
else:
|
|
|
|
for index in indices:
|
|
|
|
let balance = getStateField(state, balances).item(index)
|
|
|
|
res.add(RestValidatorBalance.init(index, balance))
|
|
|
|
res
|
|
|
|
|
|
|
|
return RestApiResponse.jsonResponseFinalized(
|
|
|
|
response,
|
|
|
|
node.getStateOptimistic(state),
|
|
|
|
node.dag.isFinalized(bslot.bid)
|
|
|
|
)
|
|
|
|
RestApiResponse.jsonError(Http404, StateNotFoundError)
|
|
|
|
|
2021-08-23 10:41:48 +00:00
|
|
|
# https://ethereum.github.io/beacon-APIs/#/Beacon/getStateValidators
|
2024-01-20 16:06:28 +00:00
|
|
|
router.api2(MethodGet, "/eth/v1/beacon/states/{state_id}/validators") do (
|
2021-03-17 18:46:45 +00:00
|
|
|
state_id: StateIdent, id: seq[ValidatorIdent],
|
|
|
|
status: seq[ValidatorFilter]) -> RestApiResponse:
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
let
|
|
|
|
sid = state_id.valueOr:
|
|
|
|
return RestApiResponse.jsonError(Http400, InvalidStateIdValueError,
|
|
|
|
$error)
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
bslot = node.getBlockSlotId(sid).valueOr:
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
if sid.kind == StateQueryKind.Root:
|
|
|
|
# TODO (cheatfate): Its impossible to retrieve state by `state_root`
|
|
|
|
# in current version of database.
|
|
|
|
return RestApiResponse.jsonError(Http500, NoImplementationError)
|
2023-11-29 12:05:03 +00:00
|
|
|
return RestApiResponse.jsonError(
|
|
|
|
Http404, StateNotFoundError, $error)
|
|
|
|
validatorIds =
|
2021-10-14 10:38:38 +00:00
|
|
|
block:
|
2023-11-29 12:05:03 +00:00
|
|
|
if id.isErr():
|
|
|
|
return RestApiResponse.jsonError(
|
|
|
|
Http400, InvalidValidatorIdValueError)
|
|
|
|
let ires = id.get()
|
|
|
|
if len(ires) > ServerMaximumValidatorIds:
|
|
|
|
return RestApiResponse.jsonError(
|
|
|
|
Http414, MaximumNumberOfValidatorIdsError)
|
|
|
|
ires
|
|
|
|
validatorsMask =
|
2021-10-14 10:38:38 +00:00
|
|
|
block:
|
2023-11-29 12:05:03 +00:00
|
|
|
if status.isErr():
|
|
|
|
return RestApiResponse.jsonError(Http400,
|
|
|
|
InvalidValidatorStatusValueError)
|
|
|
|
validateFilter(status.get()).valueOr:
|
|
|
|
return RestApiResponse.jsonError(
|
|
|
|
Http400, InvalidValidatorStatusValueError, $error)
|
|
|
|
getValidators(node, bslot, validatorsMask, validatorIds)
|
|
|
|
|
|
|
|
# https://ethereum.github.io/beacon-APIs/#/Beacon/postStateValidators
|
2024-01-20 16:06:28 +00:00
|
|
|
router.api2(MethodPost, "/eth/v1/beacon/states/{state_id}/validators") do (
|
2023-11-29 12:05:03 +00:00
|
|
|
state_id: StateIdent, contentBody: Option[ContentBody]) -> RestApiResponse:
|
|
|
|
let
|
|
|
|
(validatorIds, validatorsMask) =
|
|
|
|
block:
|
|
|
|
if contentBody.isNone():
|
|
|
|
return RestApiResponse.jsonError(Http400, EmptyRequestBodyError)
|
|
|
|
let request =
|
|
|
|
decodeBody(RestValidatorRequest, contentBody.get()).valueOr:
|
|
|
|
return RestApiResponse.jsonError(
|
|
|
|
Http400, InvalidRequestBodyError, $error)
|
|
|
|
let
|
|
|
|
ids = request.ids.valueOr: @[]
|
2024-01-19 23:34:11 +00:00
|
|
|
filter = request.status.valueOr: AllValidatorFilterKinds
|
2023-11-29 12:05:03 +00:00
|
|
|
(ids, filter)
|
|
|
|
sid = state_id.valueOr:
|
|
|
|
return RestApiResponse.jsonError(Http400, InvalidStateIdValueError,
|
|
|
|
$error)
|
|
|
|
bslot = node.getBlockSlotId(sid).valueOr:
|
|
|
|
if sid.kind == StateQueryKind.Root:
|
|
|
|
# TODO (cheatfate): Its impossible to retrieve state by `state_root`
|
|
|
|
# in current version of database.
|
|
|
|
return RestApiResponse.jsonError(Http500, NoImplementationError)
|
|
|
|
return RestApiResponse.jsonError(Http404, StateNotFoundError, $error)
|
|
|
|
getValidators(node, bslot, validatorsMask, validatorIds)
|
2021-03-17 18:46:45 +00:00
|
|
|
|
2021-08-23 10:41:48 +00:00
|
|
|
# https://ethereum.github.io/beacon-APIs/#/Beacon/getStateValidator
|
2024-01-20 16:06:28 +00:00
|
|
|
router.api2(MethodGet,
|
|
|
|
"/eth/v1/beacon/states/{state_id}/validators/{validator_id}") do (
|
2021-03-17 18:46:45 +00:00
|
|
|
state_id: StateIdent, validator_id: ValidatorIdent) -> RestApiResponse:
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
let
|
|
|
|
sid = state_id.valueOr:
|
|
|
|
return RestApiResponse.jsonError(Http400, InvalidStateIdValueError,
|
|
|
|
$error)
|
|
|
|
vid = validator_id.valueOr:
|
|
|
|
return RestApiResponse.jsonError(Http400, InvalidValidatorIdValueError,
|
|
|
|
$error)
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
bslot = node.getBlockSlotId(sid).valueOr:
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
if sid.kind == StateQueryKind.Root:
|
|
|
|
# TODO (cheatfate): Its impossible to retrieve state by `state_root`
|
|
|
|
# in current version of database.
|
|
|
|
return RestApiResponse.jsonError(Http500, NoImplementationError)
|
|
|
|
return RestApiResponse.jsonError(Http404, StateNotFoundError,
|
|
|
|
$error)
|
|
|
|
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
node.withStateForBlockSlotId(bslot):
|
2021-10-14 10:38:38 +00:00
|
|
|
let
|
2022-03-16 07:20:40 +00:00
|
|
|
current_epoch = getStateField(state, slot).epoch()
|
|
|
|
validatorsCount = lenu64(getStateField(state, validators))
|
2021-10-14 10:38:38 +00:00
|
|
|
|
|
|
|
let vindex =
|
|
|
|
block:
|
|
|
|
case vid.kind
|
|
|
|
of ValidatorQueryKind.Key:
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
let optIndices = keysToIndices(node.restKeysCache, state, [vid.key])
|
2021-10-14 10:38:38 +00:00
|
|
|
if optIndices[0].isNone():
|
2021-10-18 08:54:20 +00:00
|
|
|
return RestApiResponse.jsonError(Http404, ValidatorNotFoundError)
|
2021-10-14 10:38:38 +00:00
|
|
|
optIndices[0].get()
|
|
|
|
of ValidatorQueryKind.Index:
|
2021-04-03 00:21:44 +00:00
|
|
|
let vres = vid.index.toValidatorIndex()
|
|
|
|
if vres.isErr():
|
2024-01-20 16:06:28 +00:00
|
|
|
case vres.error
|
2021-04-03 00:21:44 +00:00
|
|
|
of ValidatorIndexError.TooHighValue:
|
|
|
|
return RestApiResponse.jsonError(Http400,
|
2021-04-08 10:49:28 +00:00
|
|
|
TooHighValidatorIndexValueError)
|
2021-04-03 00:21:44 +00:00
|
|
|
of ValidatorIndexError.UnsupportedValue:
|
|
|
|
return RestApiResponse.jsonError(Http500,
|
2021-04-08 10:49:28 +00:00
|
|
|
UnsupportedValidatorIndexValueError)
|
2021-10-14 10:38:38 +00:00
|
|
|
let index = vres.get()
|
|
|
|
if uint64(index) >= validatorsCount:
|
2021-10-18 08:54:20 +00:00
|
|
|
return RestApiResponse.jsonError(Http404, ValidatorNotFoundError)
|
2021-10-14 10:38:38 +00:00
|
|
|
index
|
|
|
|
|
|
|
|
let
|
2022-05-30 13:30:42 +00:00
|
|
|
validator = getStateField(state, validators).item(vindex)
|
|
|
|
balance = getStateField(state, balances).item(vindex)
|
2021-10-14 10:38:38 +00:00
|
|
|
status =
|
|
|
|
block:
|
|
|
|
let sres = validator.getStatus(current_epoch)
|
|
|
|
if sres.isErr():
|
|
|
|
return RestApiResponse.jsonError(Http400,
|
|
|
|
ValidatorStatusNotFoundError,
|
|
|
|
$sres.get())
|
|
|
|
toString(sres.get())
|
2023-09-27 14:45:33 +00:00
|
|
|
return RestApiResponse.jsonResponseFinalized(
|
2022-06-20 05:53:39 +00:00
|
|
|
RestValidator.init(vindex, balance, status, validator),
|
2023-09-27 14:45:33 +00:00
|
|
|
node.getStateOptimistic(state),
|
|
|
|
node.dag.isFinalized(bslot.bid)
|
2021-10-14 10:38:38 +00:00
|
|
|
)
|
2024-01-20 16:06:28 +00:00
|
|
|
|
|
|
|
RestApiResponse.jsonError(Http404, StateNotFoundError)
|
2021-03-17 18:46:45 +00:00
|
|
|
|
2021-08-23 10:41:48 +00:00
|
|
|
# https://ethereum.github.io/beacon-APIs/#/Beacon/getStateValidatorBalances
|
2024-01-20 16:06:28 +00:00
|
|
|
router.api2(MethodGet,
|
|
|
|
"/eth/v1/beacon/states/{state_id}/validator_balances") do (
|
2021-03-17 18:46:45 +00:00
|
|
|
state_id: StateIdent, id: seq[ValidatorIdent]) -> RestApiResponse:
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
let
|
|
|
|
sid = state_id.valueOr:
|
|
|
|
return RestApiResponse.jsonError(Http400, InvalidStateIdValueError,
|
|
|
|
$error)
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
bslot = node.getBlockSlotId(sid).valueOr:
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
if sid.kind == StateQueryKind.Root:
|
|
|
|
# TODO (cheatfate): Its impossible to retrieve state by `state_root`
|
|
|
|
# in current version of database.
|
|
|
|
return RestApiResponse.jsonError(Http500, NoImplementationError)
|
2023-11-29 12:05:03 +00:00
|
|
|
return RestApiResponse.jsonError(Http404, StateNotFoundError, $error)
|
|
|
|
validatorIds =
|
2021-10-14 10:38:38 +00:00
|
|
|
block:
|
2023-11-29 12:05:03 +00:00
|
|
|
if id.isErr():
|
|
|
|
return RestApiResponse.jsonError(
|
|
|
|
Http400, InvalidValidatorIdValueError)
|
|
|
|
let ires = id.get()
|
|
|
|
if len(ires) > ServerMaximumValidatorIds:
|
|
|
|
return RestApiResponse.jsonError(
|
|
|
|
Http400, MaximumNumberOfValidatorIdsError)
|
|
|
|
ires
|
|
|
|
getBalances(node, bslot, validatorIds)
|
|
|
|
|
|
|
|
# https://ethereum.github.io/beacon-APIs/#/Beacon/postStateValidatorBalances
|
2024-01-20 16:06:28 +00:00
|
|
|
router.api2(MethodPost,
|
|
|
|
"/eth/v1/beacon/states/{state_id}/validator_balances") do (
|
2023-11-29 12:05:03 +00:00
|
|
|
state_id: StateIdent, contentBody: Option[ContentBody]) -> RestApiResponse:
|
|
|
|
let
|
|
|
|
validatorIds =
|
2021-10-14 10:38:38 +00:00
|
|
|
block:
|
2023-11-29 12:05:03 +00:00
|
|
|
if contentBody.isNone():
|
|
|
|
return RestApiResponse.jsonError(Http400, EmptyRequestBodyError)
|
|
|
|
let body = contentBody.get()
|
|
|
|
decodeBody(seq[ValidatorIdent], body).valueOr:
|
|
|
|
return RestApiResponse.jsonError(
|
|
|
|
Http400, InvalidValidatorIdValueError, $error)
|
|
|
|
sid = state_id.valueOr:
|
|
|
|
return RestApiResponse.jsonError(Http400, InvalidStateIdValueError,
|
|
|
|
$error)
|
|
|
|
bslot = node.getBlockSlotId(sid).valueOr:
|
|
|
|
if sid.kind == StateQueryKind.Root:
|
|
|
|
# TODO (cheatfate): Its impossible to retrieve state by `state_root`
|
|
|
|
# in current version of database.
|
|
|
|
return RestApiResponse.jsonError(Http500, NoImplementationError)
|
|
|
|
return RestApiResponse.jsonError(Http404, StateNotFoundError, $error)
|
|
|
|
getBalances(node, bslot, validatorIds)
|
2021-03-17 18:46:45 +00:00
|
|
|
|
2021-08-23 10:41:48 +00:00
|
|
|
# https://ethereum.github.io/beacon-APIs/#/Beacon/getEpochCommittees
|
2024-01-20 16:06:28 +00:00
|
|
|
router.api2(MethodGet,
|
|
|
|
"/eth/v1/beacon/states/{state_id}/committees") do (
|
2021-03-17 18:46:45 +00:00
|
|
|
state_id: StateIdent, epoch: Option[Epoch], index: Option[CommitteeIndex],
|
|
|
|
slot: Option[Slot]) -> RestApiResponse:
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
let
|
|
|
|
sid = state_id.valueOr:
|
|
|
|
return RestApiResponse.jsonError(Http400, InvalidStateIdValueError,
|
|
|
|
$error)
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
bslot = node.getBlockSlotId(sid).valueOr:
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
if sid.kind == StateQueryKind.Root:
|
|
|
|
# TODO (cheatfate): Its impossible to retrieve state by `state_root`
|
|
|
|
# in current version of database.
|
|
|
|
return RestApiResponse.jsonError(Http500, NoImplementationError)
|
|
|
|
return RestApiResponse.jsonError(Http404, StateNotFoundError,
|
|
|
|
$error)
|
|
|
|
|
2021-03-17 18:46:45 +00:00
|
|
|
let vepoch =
|
|
|
|
if epoch.isSome():
|
|
|
|
let repoch = epoch.get()
|
|
|
|
if repoch.isErr():
|
2021-04-08 10:49:28 +00:00
|
|
|
return RestApiResponse.jsonError(Http400, InvalidEpochValueError,
|
2024-01-20 16:06:28 +00:00
|
|
|
$repoch.error)
|
2021-04-04 09:48:44 +00:00
|
|
|
let res = repoch.get()
|
2022-01-08 20:06:34 +00:00
|
|
|
|
|
|
|
if res > bslot.slot.epoch + MIN_SEED_LOOKAHEAD:
|
|
|
|
return RestApiResponse.jsonError(
|
|
|
|
Http400, InvalidEpochValueError,
|
|
|
|
"Requested epoch more than 1 epoch past state epoch")
|
|
|
|
|
|
|
|
if res + EPOCHS_PER_HISTORICAL_VECTOR <
|
|
|
|
bslot.slot.epoch + MIN_SEED_LOOKAHEAD:
|
|
|
|
return RestApiResponse.jsonError(
|
|
|
|
Http400, InvalidEpochValueError,
|
|
|
|
"Requested epoch earlier than what committees can be computed for")
|
|
|
|
|
2021-04-04 09:48:44 +00:00
|
|
|
some(res)
|
2021-03-17 18:46:45 +00:00
|
|
|
else:
|
|
|
|
none[Epoch]()
|
|
|
|
let vindex =
|
|
|
|
if index.isSome():
|
|
|
|
let rindex = index.get()
|
|
|
|
if rindex.isErr():
|
2021-04-08 10:49:28 +00:00
|
|
|
return RestApiResponse.jsonError(Http400,
|
|
|
|
InvalidCommitteeIndexValueError,
|
2024-01-20 16:06:28 +00:00
|
|
|
$rindex.error)
|
2021-03-17 18:46:45 +00:00
|
|
|
some(rindex.get())
|
|
|
|
else:
|
|
|
|
none[CommitteeIndex]()
|
|
|
|
let vslot =
|
|
|
|
if slot.isSome():
|
|
|
|
let rslot = slot.get()
|
|
|
|
if rslot.isErr():
|
2021-04-08 10:49:28 +00:00
|
|
|
return RestApiResponse.jsonError(Http400, InvalidSlotValueError,
|
2024-01-20 16:06:28 +00:00
|
|
|
$rslot.error)
|
2022-01-08 20:06:34 +00:00
|
|
|
let res = rslot.get()
|
|
|
|
if vepoch.isSome():
|
|
|
|
if res.epoch != vepoch.get():
|
2024-01-20 16:06:28 +00:00
|
|
|
return RestApiResponse.jsonError(
|
|
|
|
Http400, InvalidSlotValueError,
|
|
|
|
"Slot does not match requested epoch")
|
2022-01-08 20:06:34 +00:00
|
|
|
else:
|
|
|
|
if res.epoch > bslot.slot.epoch + 1:
|
|
|
|
return RestApiResponse.jsonError(
|
|
|
|
Http400, InvalidEpochValueError,
|
|
|
|
"Requested slot more than 1 epoch past state epoch")
|
|
|
|
|
|
|
|
if res.epoch + EPOCHS_PER_HISTORICAL_VECTOR <
|
|
|
|
bslot.slot.epoch + MIN_SEED_LOOKAHEAD:
|
|
|
|
return RestApiResponse.jsonError(
|
|
|
|
Http400, InvalidEpochValueError,
|
|
|
|
"Requested slot earlier than what committees can be computed for")
|
|
|
|
|
|
|
|
some(res)
|
2021-03-17 18:46:45 +00:00
|
|
|
else:
|
|
|
|
none[Slot]()
|
2024-01-20 16:06:28 +00:00
|
|
|
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
node.withStateForBlockSlotId(bslot):
|
2021-03-17 18:46:45 +00:00
|
|
|
proc getCommittee(slot: Slot,
|
2021-09-23 22:13:25 +00:00
|
|
|
index: CommitteeIndex): RestBeaconStatesCommittees =
|
2022-03-16 07:20:40 +00:00
|
|
|
let validators = get_beacon_committee(state, slot, index, cache)
|
2021-05-20 17:56:12 +00:00
|
|
|
RestBeaconStatesCommittees(index: index, slot: slot,
|
|
|
|
validators: validators)
|
2021-03-17 18:46:45 +00:00
|
|
|
|
|
|
|
proc forSlot(slot: Slot, cindex: Option[CommitteeIndex],
|
2021-05-20 17:56:12 +00:00
|
|
|
res: var seq[RestBeaconStatesCommittees]) =
|
2022-01-12 20:42:03 +00:00
|
|
|
let committees_per_slot = get_committee_count_per_slot(
|
2022-03-16 07:20:40 +00:00
|
|
|
state, slot.epoch, cache)
|
2021-03-17 18:46:45 +00:00
|
|
|
|
|
|
|
if cindex.isNone:
|
2022-01-12 20:42:03 +00:00
|
|
|
for committee_index in get_committee_indices(committees_per_slot):
|
2022-01-08 23:28:49 +00:00
|
|
|
res.add(getCommittee(slot, committee_index))
|
2021-03-17 18:46:45 +00:00
|
|
|
else:
|
2022-01-08 23:28:49 +00:00
|
|
|
let
|
|
|
|
idx = cindex.get()
|
|
|
|
if idx < committees_per_slot:
|
2021-06-29 15:09:29 +00:00
|
|
|
res.add(getCommittee(slot, idx))
|
2021-03-17 18:46:45 +00:00
|
|
|
|
2021-05-20 17:56:12 +00:00
|
|
|
var res: seq[RestBeaconStatesCommittees]
|
2021-03-17 18:46:45 +00:00
|
|
|
let qepoch =
|
|
|
|
if vepoch.isNone:
|
2022-03-16 07:20:40 +00:00
|
|
|
epoch(getStateField(state, slot))
|
2021-03-17 18:46:45 +00:00
|
|
|
else:
|
|
|
|
vepoch.get()
|
|
|
|
|
|
|
|
if vslot.isNone():
|
2022-01-11 10:01:54 +00:00
|
|
|
for slot in qepoch.slots():
|
2022-01-08 23:28:49 +00:00
|
|
|
forSlot(slot, vindex, res)
|
2021-03-17 18:46:45 +00:00
|
|
|
else:
|
|
|
|
forSlot(vslot.get(), vindex, res)
|
|
|
|
|
2023-09-27 14:45:33 +00:00
|
|
|
return RestApiResponse.jsonResponseFinalized(
|
2022-06-20 05:53:39 +00:00
|
|
|
res,
|
2023-09-27 14:45:33 +00:00
|
|
|
node.getStateOptimistic(state),
|
|
|
|
node.dag.isFinalized(bslot.bid)
|
2022-06-20 05:53:39 +00:00
|
|
|
)
|
2021-03-17 18:46:45 +00:00
|
|
|
|
2024-01-20 16:06:28 +00:00
|
|
|
RestApiResponse.jsonError(Http404, StateNotFoundError)
|
2021-03-17 18:46:45 +00:00
|
|
|
|
2021-09-23 22:13:25 +00:00
|
|
|
# https://ethereum.github.io/beacon-APIs/#/Beacon/getEpochSyncCommittees
|
2024-01-20 16:06:28 +00:00
|
|
|
router.api2(MethodGet,
|
|
|
|
"/eth/v1/beacon/states/{state_id}/sync_committees") do (
|
2021-09-23 22:13:25 +00:00
|
|
|
state_id: StateIdent, epoch: Option[Epoch]) -> RestApiResponse:
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
let
|
|
|
|
sid = state_id.valueOr:
|
|
|
|
return RestApiResponse.jsonError(Http400, InvalidStateIdValueError,
|
|
|
|
$error)
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
bslot = node.getBlockSlotId(sid).valueOr:
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
if sid.kind == StateQueryKind.Root:
|
|
|
|
# TODO (cheatfate): Its impossible to retrieve state by `state_root`
|
|
|
|
# in current version of database.
|
|
|
|
return RestApiResponse.jsonError(Http500, NoImplementationError)
|
|
|
|
return RestApiResponse.jsonError(Http404, StateNotFoundError,
|
|
|
|
$error)
|
2021-09-23 22:13:25 +00:00
|
|
|
|
|
|
|
let qepoch =
|
|
|
|
if epoch.isSome():
|
|
|
|
let repoch = epoch.get()
|
|
|
|
if repoch.isErr():
|
|
|
|
return RestApiResponse.jsonError(Http400, InvalidEpochValueError,
|
2024-01-20 16:06:28 +00:00
|
|
|
$repoch.error)
|
2021-09-23 22:13:25 +00:00
|
|
|
let res = repoch.get()
|
|
|
|
if res > MaxEpoch:
|
|
|
|
return RestApiResponse.jsonError(Http400, EpochOverflowValueError)
|
|
|
|
if res < node.dag.cfg.ALTAIR_FORK_EPOCH:
|
|
|
|
return RestApiResponse.jsonError(Http400,
|
|
|
|
EpochFromTheIncorrectForkError)
|
|
|
|
res
|
|
|
|
else:
|
|
|
|
# If ``epoch`` not present then the sync committees for the epoch of
|
|
|
|
# the state will be obtained.
|
|
|
|
bslot.slot.epoch()
|
|
|
|
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
node.withStateForBlockSlotId(bslot):
|
2021-09-23 22:13:25 +00:00
|
|
|
let keys =
|
|
|
|
block:
|
2022-03-16 07:20:40 +00:00
|
|
|
let res = syncCommitteeParticipants(state, qepoch)
|
2021-09-23 22:13:25 +00:00
|
|
|
if res.isErr():
|
|
|
|
return RestApiResponse.jsonError(Http400,
|
2024-01-20 16:06:28 +00:00
|
|
|
$res.error)
|
2021-09-23 22:13:25 +00:00
|
|
|
let kres = res.get()
|
|
|
|
if len(kres) == 0:
|
|
|
|
return RestApiResponse.jsonError(Http500, InternalServerError,
|
|
|
|
"List of sync committee participants is empty")
|
|
|
|
kres
|
|
|
|
|
|
|
|
let indices =
|
|
|
|
block:
|
|
|
|
var res: seq[ValidatorIndex]
|
2022-03-16 07:20:40 +00:00
|
|
|
let optIndices = keysToIndices(node.restKeysCache, state, keys)
|
2021-10-18 08:54:20 +00:00
|
|
|
# Remove all the duplicates.
|
2021-10-14 10:38:38 +00:00
|
|
|
for item in optIndices:
|
|
|
|
if item.isNone():
|
2021-10-18 08:54:20 +00:00
|
|
|
# This should not be happened, because keys are from state.
|
2021-10-14 10:38:38 +00:00
|
|
|
return RestApiResponse.jsonError(Http500, InternalServerError,
|
|
|
|
"Could not get validator indices")
|
|
|
|
res.add(item.get())
|
2021-09-23 22:13:25 +00:00
|
|
|
res
|
|
|
|
|
|
|
|
let aggregates =
|
|
|
|
block:
|
|
|
|
var
|
|
|
|
res: seq[seq[ValidatorIndex]]
|
|
|
|
offset = 0
|
|
|
|
while true:
|
|
|
|
let length = min(SYNC_SUBCOMMITTEE_SIZE, len(indices) - offset)
|
|
|
|
if length == 0:
|
|
|
|
break
|
|
|
|
res.add(@(indices.toOpenArray(offset, offset + length - 1)))
|
|
|
|
offset.inc(length)
|
|
|
|
res
|
|
|
|
|
2023-09-27 14:45:33 +00:00
|
|
|
return RestApiResponse.jsonResponseFinalized(
|
2022-06-20 05:53:39 +00:00
|
|
|
RestEpochSyncCommittee(validators: indices,
|
|
|
|
validator_aggregates: aggregates),
|
2023-09-27 14:45:33 +00:00
|
|
|
node.getStateOptimistic(state),
|
|
|
|
node.dag.isFinalized(bslot.bid)
|
2021-09-28 18:08:23 +00:00
|
|
|
)
|
2021-09-23 22:13:25 +00:00
|
|
|
|
2024-01-20 16:06:28 +00:00
|
|
|
RestApiResponse.jsonError(Http404, StateNotFoundError)
|
2023-04-11 15:27:48 +00:00
|
|
|
|
|
|
|
# https://ethereum.github.io/beacon-APIs/?urls.primaryName=dev#/Beacon/getStateRandao
|
|
|
|
# https://github.com/ethereum/beacon-APIs/blob/b3c4defa238aaa74bf22aa602aa1b24b68a4c78e/apis/beacon/states/randao.yaml
|
2024-01-20 16:06:28 +00:00
|
|
|
router.api2(MethodGet,
|
|
|
|
"/eth/v1/beacon/states/{state_id}/randao") do (
|
2023-04-11 15:27:48 +00:00
|
|
|
state_id: StateIdent, epoch: Option[Epoch]) -> RestApiResponse:
|
|
|
|
let
|
|
|
|
sid = state_id.valueOr:
|
|
|
|
return RestApiResponse.jsonError(Http400, InvalidStateIdValueError,
|
|
|
|
$error)
|
|
|
|
bslot = node.getBlockSlotId(sid).valueOr:
|
|
|
|
if sid.kind == StateQueryKind.Root:
|
|
|
|
# TODO (cheatfate): Its impossible to retrieve state by `state_root`
|
|
|
|
# in current version of database.
|
|
|
|
return RestApiResponse.jsonError(Http500, NoImplementationError)
|
|
|
|
return RestApiResponse.jsonError(Http404, StateNotFoundError,
|
|
|
|
$error)
|
|
|
|
|
|
|
|
let qepoch =
|
|
|
|
if epoch.isSome():
|
|
|
|
let repoch = epoch.get()
|
|
|
|
if repoch.isErr():
|
|
|
|
return RestApiResponse.jsonError(Http400, InvalidEpochValueError,
|
2024-01-20 16:06:28 +00:00
|
|
|
$repoch.error)
|
2023-04-11 15:27:48 +00:00
|
|
|
let res = repoch.get()
|
|
|
|
if res > MaxEpoch:
|
|
|
|
return RestApiResponse.jsonError(Http400, EpochOverflowValueError)
|
|
|
|
if res < node.dag.cfg.ALTAIR_FORK_EPOCH:
|
|
|
|
return RestApiResponse.jsonError(Http400,
|
|
|
|
EpochFromTheIncorrectForkError)
|
|
|
|
if res > bslot.slot.epoch() + 1:
|
|
|
|
return RestApiResponse.jsonError(Http400,
|
|
|
|
EpochFromFutureError)
|
|
|
|
res
|
|
|
|
else:
|
|
|
|
# If ``epoch`` not present then the RANDAO mix for the epoch of
|
|
|
|
# the state will be obtained.
|
|
|
|
bslot.slot.epoch()
|
|
|
|
|
2023-07-31 18:12:15 +00:00
|
|
|
# Try to obtain RANDAO in an accelerated way
|
|
|
|
let bsi = node.dag.atSlot(bslot.bid, (qepoch + 1).start_slot - 1)
|
|
|
|
if bsi.isSome:
|
|
|
|
let mix = node.dag.computeRandaoMix(bsi.get.bid)
|
|
|
|
if mix.isSome:
|
|
|
|
return RestApiResponse.jsonResponseWOpt(
|
|
|
|
RestEpochRandao(randao: mix.get),
|
|
|
|
node.getBidOptimistic(bsi.get.bid)
|
|
|
|
)
|
|
|
|
|
|
|
|
# Fall back to full state computation
|
2023-04-11 15:27:48 +00:00
|
|
|
node.withStateForBlockSlotId(bslot):
|
|
|
|
withState(state):
|
2023-09-27 14:45:33 +00:00
|
|
|
return RestApiResponse.jsonResponseFinalized(
|
2023-04-11 15:27:48 +00:00
|
|
|
RestEpochRandao(randao: get_randao_mix(forkyState.data, qepoch)),
|
2023-09-27 14:45:33 +00:00
|
|
|
node.getStateOptimistic(state),
|
|
|
|
node.dag.isFinalized(bslot.bid)
|
2023-04-11 15:27:48 +00:00
|
|
|
)
|
|
|
|
|
2024-01-20 16:06:28 +00:00
|
|
|
RestApiResponse.jsonError(Http404, StateNotFoundError)
|
2021-09-23 22:13:25 +00:00
|
|
|
|
2021-08-23 10:41:48 +00:00
|
|
|
# https://ethereum.github.io/beacon-APIs/#/Beacon/getBlockHeaders
|
2024-01-20 16:06:28 +00:00
|
|
|
router.api2(MethodGet, "/eth/v1/beacon/headers") do (
|
2021-03-17 18:46:45 +00:00
|
|
|
slot: Option[Slot], parent_root: Option[Eth2Digest]) -> RestApiResponse:
|
2021-08-09 06:08:18 +00:00
|
|
|
# TODO (cheatfate): This call is incomplete, because structure
|
2021-03-29 10:59:39 +00:00
|
|
|
# of database do not allow to query blocks by `parent_root`.
|
2021-04-03 01:19:16 +00:00
|
|
|
let qslot =
|
|
|
|
if slot.isSome():
|
|
|
|
let rslot = slot.get()
|
|
|
|
if rslot.isErr():
|
2021-04-08 10:49:28 +00:00
|
|
|
return RestApiResponse.jsonError(Http400, InvalidSlotValueError,
|
2024-01-20 16:06:28 +00:00
|
|
|
$rslot.error)
|
2021-04-03 01:19:16 +00:00
|
|
|
rslot.get()
|
|
|
|
else:
|
2021-06-01 11:13:40 +00:00
|
|
|
node.dag.head.slot
|
2021-04-03 01:19:16 +00:00
|
|
|
|
|
|
|
if parent_root.isSome():
|
|
|
|
let rroot = parent_root.get()
|
|
|
|
if rroot.isErr():
|
2021-04-08 10:49:28 +00:00
|
|
|
return RestApiResponse.jsonError(Http400, InvalidParentRootValueError,
|
2024-01-20 16:06:28 +00:00
|
|
|
$rroot.error)
|
2021-04-08 10:49:28 +00:00
|
|
|
return RestApiResponse.jsonError(Http500, NoImplementationError)
|
2021-04-03 01:19:16 +00:00
|
|
|
|
2022-03-11 12:08:17 +00:00
|
|
|
let bdata = node.getForkedBlock(BlockIdent.init(qslot)).valueOr:
|
|
|
|
return RestApiResponse.jsonError(Http404, BlockNotFoundError)
|
2021-04-03 01:19:16 +00:00
|
|
|
|
2024-01-20 16:06:28 +00:00
|
|
|
withBlck(bdata):
|
|
|
|
let bid = BlockId(root: forkyBlck.root, slot: forkyBlck.message.slot)
|
|
|
|
RestApiResponse.jsonResponseFinalized(
|
|
|
|
[
|
|
|
|
(
|
|
|
|
root: forkyBlck.root,
|
|
|
|
canonical: node.dag.isCanonical(bid),
|
|
|
|
header: (
|
|
|
|
message: forkyBlck.toBeaconBlockHeader,
|
|
|
|
signature: forkyBlck.signature
|
2021-08-09 06:08:18 +00:00
|
|
|
)
|
2024-01-20 16:06:28 +00:00
|
|
|
)
|
|
|
|
],
|
|
|
|
node.getBlockOptimistic(bdata),
|
|
|
|
node.dag.isFinalized(bid)
|
|
|
|
)
|
2021-03-17 18:46:45 +00:00
|
|
|
|
2021-08-23 10:41:48 +00:00
|
|
|
# https://ethereum.github.io/beacon-APIs/#/Beacon/getBlockHeader
|
2024-01-20 16:06:28 +00:00
|
|
|
router.api2(MethodGet, "/eth/v1/beacon/headers/{block_id}") do (
|
2021-03-17 18:46:45 +00:00
|
|
|
block_id: BlockIdent) -> RestApiResponse:
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
let
|
|
|
|
bid = block_id.valueOr:
|
|
|
|
return RestApiResponse.jsonError(Http400, InvalidBlockIdValueError,
|
|
|
|
$error)
|
|
|
|
|
|
|
|
bdata = node.getForkedBlock(bid).valueOr:
|
|
|
|
return RestApiResponse.jsonError(Http404, BlockNotFoundError)
|
2021-03-23 22:50:18 +00:00
|
|
|
|
2024-01-20 16:06:28 +00:00
|
|
|
withBlck(bdata):
|
|
|
|
let bid = BlockId(root: forkyBlck.root, slot: forkyBlck.message.slot)
|
|
|
|
RestApiResponse.jsonResponseFinalized(
|
|
|
|
(
|
|
|
|
root: forkyBlck.root,
|
|
|
|
canonical: node.dag.isCanonical(bid),
|
|
|
|
header: (
|
|
|
|
message: forkyBlck.toBeaconBlockHeader,
|
|
|
|
signature: forkyBlck.signature
|
|
|
|
)
|
|
|
|
),
|
|
|
|
node.getBlockOptimistic(bdata),
|
|
|
|
node.dag.isFinalized(bid)
|
|
|
|
)
|
2021-03-17 18:46:45 +00:00
|
|
|
|
2021-08-23 10:41:48 +00:00
|
|
|
# https://ethereum.github.io/beacon-APIs/#/Beacon/publishBlock
|
2022-01-06 07:38:40 +00:00
|
|
|
router.api(MethodPost, "/eth/v1/beacon/blocks") do (
|
2021-03-17 18:46:45 +00:00
|
|
|
contentBody: Option[ContentBody]) -> RestApiResponse:
|
2022-07-06 16:11:44 +00:00
|
|
|
let res =
|
2021-03-23 22:50:18 +00:00
|
|
|
block:
|
|
|
|
if contentBody.isNone():
|
2021-04-08 10:49:28 +00:00
|
|
|
return RestApiResponse.jsonError(Http400, EmptyRequestBodyError)
|
2022-09-29 21:00:53 +00:00
|
|
|
let
|
|
|
|
body = contentBody.get()
|
|
|
|
version = request.headers.getString("eth-consensus-version")
|
|
|
|
var
|
2023-06-19 08:56:52 +00:00
|
|
|
restBlock = decodeBody(RestPublishedSignedBlockContents, body,
|
2022-09-29 21:00:53 +00:00
|
|
|
version).valueOr:
|
2023-09-27 14:45:33 +00:00
|
|
|
return RestApiResponse.jsonError(error)
|
2023-06-19 08:56:52 +00:00
|
|
|
forked = ForkedSignedBeaconBlock.init(restBlock)
|
2022-09-29 21:00:53 +00:00
|
|
|
|
2023-06-19 08:56:52 +00:00
|
|
|
if restBlock.kind != node.dag.cfg.consensusForkAtEpoch(
|
2022-09-29 21:00:53 +00:00
|
|
|
getForkedBlockField(forked, slot).epoch):
|
2023-02-03 15:12:11 +00:00
|
|
|
doAssert strictVerification notin node.dag.updateFlags
|
2022-02-13 15:21:55 +00:00
|
|
|
return RestApiResponse.jsonError(Http400, InvalidBlockObjectError)
|
|
|
|
|
2023-06-28 17:55:31 +00:00
|
|
|
case restBlock.kind
|
|
|
|
of ConsensusFork.Phase0:
|
|
|
|
var blck = restBlock.phase0Data
|
2022-02-13 15:21:55 +00:00
|
|
|
blck.root = hash_tree_root(blck.message)
|
2023-11-06 06:48:43 +00:00
|
|
|
await node.router.routeSignedBeaconBlock(
|
|
|
|
blck, Opt.none(seq[BlobSidecar]))
|
2023-06-28 17:55:31 +00:00
|
|
|
of ConsensusFork.Altair:
|
|
|
|
var blck = restBlock.altairData
|
|
|
|
blck.root = hash_tree_root(blck.message)
|
2023-11-06 06:48:43 +00:00
|
|
|
await node.router.routeSignedBeaconBlock(
|
|
|
|
blck, Opt.none(seq[BlobSidecar]))
|
2023-06-28 17:55:31 +00:00
|
|
|
of ConsensusFork.Bellatrix:
|
|
|
|
var blck = restBlock.bellatrixData
|
|
|
|
blck.root = hash_tree_root(blck.message)
|
2023-11-06 06:48:43 +00:00
|
|
|
await node.router.routeSignedBeaconBlock(
|
|
|
|
blck, Opt.none(seq[BlobSidecar]))
|
2023-06-28 17:55:31 +00:00
|
|
|
of ConsensusFork.Capella:
|
|
|
|
var blck = restBlock.capellaData
|
|
|
|
blck.root = hash_tree_root(blck.message)
|
2023-11-06 06:48:43 +00:00
|
|
|
await node.router.routeSignedBeaconBlock(
|
|
|
|
blck, Opt.none(seq[BlobSidecar]))
|
2023-06-28 17:55:31 +00:00
|
|
|
of ConsensusFork.Deneb:
|
|
|
|
var blck = restBlock.denebData.signed_block
|
|
|
|
blck.root = hash_tree_root(blck.message)
|
2024-03-28 03:32:33 +00:00
|
|
|
|
|
|
|
let validity = checkRestBlockBlobsValid(
|
|
|
|
blck, restBlock.denebData.kzg_proofs, restBlock.denebData.blobs)
|
|
|
|
if validity.isErr:
|
|
|
|
return RestApiResponse.jsonError(Http400, validity.error)
|
|
|
|
|
2023-06-28 17:55:31 +00:00
|
|
|
await node.router.routeSignedBeaconBlock(
|
2023-11-06 06:48:43 +00:00
|
|
|
blck, Opt.some(blck.create_blob_sidecars(
|
|
|
|
restBlock.denebData.kzg_proofs, restBlock.denebData.blobs)))
|
2021-05-27 11:53:53 +00:00
|
|
|
|
2021-08-23 10:41:48 +00:00
|
|
|
if res.isErr():
|
2022-07-06 16:11:44 +00:00
|
|
|
return RestApiResponse.jsonError(
|
2024-01-20 16:06:28 +00:00
|
|
|
Http503, BeaconNodeInSyncError, $res.error)
|
2022-07-06 16:11:44 +00:00
|
|
|
if res.get().isNone():
|
2021-04-08 10:49:28 +00:00
|
|
|
return RestApiResponse.jsonError(Http202, BlockValidationError)
|
2021-12-03 13:58:12 +00:00
|
|
|
|
2024-01-20 16:06:28 +00:00
|
|
|
RestApiResponse.jsonMsgResponse(BlockValidationSuccess)
|
2021-03-17 18:46:45 +00:00
|
|
|
|
2023-08-12 03:08:54 +00:00
|
|
|
# https://ethereum.github.io/beacon-APIs/#/Beacon/publishBlockV2
|
|
|
|
router.api(MethodPost, "/eth/v2/beacon/blocks") do (
|
2023-11-01 07:31:18 +00:00
|
|
|
broadcast_validation: Option[BroadcastValidationType],
|
2023-08-12 03:08:54 +00:00
|
|
|
contentBody: Option[ContentBody]) -> RestApiResponse:
|
|
|
|
let res =
|
|
|
|
block:
|
|
|
|
let
|
|
|
|
version = request.headers.getString("eth-consensus-version")
|
2023-11-01 07:31:18 +00:00
|
|
|
validation =
|
|
|
|
block:
|
|
|
|
let res =
|
|
|
|
if broadcast_validation.isNone():
|
|
|
|
BroadcastValidationType.Gossip
|
|
|
|
else:
|
|
|
|
broadcast_validation.get().valueOr:
|
|
|
|
return RestApiResponse.jsonError(Http400,
|
|
|
|
InvalidBroadcastValidationType)
|
|
|
|
# TODO (henridf): support 'consensus' and
|
|
|
|
# 'consensus_and_equivocation' broadcast_validation types.
|
|
|
|
if res != BroadcastValidationType.Gossip:
|
|
|
|
return RestApiResponse.jsonError(Http500,
|
|
|
|
"Only `gossip` broadcast_validation option supported")
|
|
|
|
res
|
|
|
|
body =
|
|
|
|
block:
|
|
|
|
if contentBody.isNone():
|
|
|
|
return RestApiResponse.jsonError(Http400, EmptyRequestBodyError)
|
|
|
|
contentBody.get()
|
2023-08-12 03:08:54 +00:00
|
|
|
var
|
|
|
|
restBlock = decodeBodyJsonOrSsz(RestPublishedSignedBlockContents,
|
|
|
|
body, version).valueOr:
|
2023-09-27 14:45:33 +00:00
|
|
|
return RestApiResponse.jsonError(error)
|
2023-08-12 03:08:54 +00:00
|
|
|
forked = ForkedSignedBeaconBlock.init(restBlock)
|
|
|
|
|
|
|
|
# TODO (henridf): handle broadcast_validation flag
|
|
|
|
if restBlock.kind != node.dag.cfg.consensusForkAtEpoch(
|
|
|
|
getForkedBlockField(forked, slot).epoch):
|
|
|
|
doAssert strictVerification notin node.dag.updateFlags
|
|
|
|
return RestApiResponse.jsonError(Http400, InvalidBlockObjectError)
|
|
|
|
|
|
|
|
case restBlock.kind
|
|
|
|
of ConsensusFork.Phase0:
|
|
|
|
var blck = restBlock.phase0Data
|
|
|
|
blck.root = hash_tree_root(blck.message)
|
2023-11-06 06:48:43 +00:00
|
|
|
await node.router.routeSignedBeaconBlock(
|
|
|
|
blck, Opt.none(seq[BlobSidecar]))
|
2023-08-12 03:08:54 +00:00
|
|
|
of ConsensusFork.Altair:
|
|
|
|
var blck = restBlock.altairData
|
|
|
|
blck.root = hash_tree_root(blck.message)
|
2023-11-06 06:48:43 +00:00
|
|
|
await node.router.routeSignedBeaconBlock(
|
|
|
|
blck, Opt.none(seq[BlobSidecar]))
|
2023-08-12 03:08:54 +00:00
|
|
|
of ConsensusFork.Bellatrix:
|
|
|
|
var blck = restBlock.bellatrixData
|
|
|
|
blck.root = hash_tree_root(blck.message)
|
2023-11-06 06:48:43 +00:00
|
|
|
await node.router.routeSignedBeaconBlock(
|
|
|
|
blck, Opt.none(seq[BlobSidecar]))
|
2023-08-12 03:08:54 +00:00
|
|
|
of ConsensusFork.Capella:
|
|
|
|
var blck = restBlock.capellaData
|
|
|
|
blck.root = hash_tree_root(blck.message)
|
2023-11-06 06:48:43 +00:00
|
|
|
await node.router.routeSignedBeaconBlock(
|
|
|
|
blck, Opt.none(seq[BlobSidecar]))
|
2023-08-12 03:08:54 +00:00
|
|
|
of ConsensusFork.Deneb:
|
|
|
|
var blck = restBlock.denebData.signed_block
|
|
|
|
blck.root = hash_tree_root(blck.message)
|
2024-03-28 03:32:33 +00:00
|
|
|
|
|
|
|
let validity = checkRestBlockBlobsValid(
|
|
|
|
blck, restBlock.denebData.kzg_proofs, restBlock.denebData.blobs)
|
|
|
|
if validity.isErr:
|
|
|
|
return RestApiResponse.jsonError(Http400, validity.error)
|
|
|
|
|
2023-08-12 03:08:54 +00:00
|
|
|
await node.router.routeSignedBeaconBlock(
|
2023-11-06 06:48:43 +00:00
|
|
|
blck, Opt.some(blck.create_blob_sidecars(
|
|
|
|
restBlock.denebData.kzg_proofs, restBlock.denebData.blobs)))
|
2023-08-12 03:08:54 +00:00
|
|
|
|
|
|
|
if res.isErr():
|
|
|
|
return RestApiResponse.jsonError(
|
2024-01-20 16:06:28 +00:00
|
|
|
Http503, BeaconNodeInSyncError, $res.error)
|
2023-08-12 03:08:54 +00:00
|
|
|
if res.get().isNone():
|
|
|
|
return RestApiResponse.jsonError(Http202, BlockValidationError)
|
|
|
|
|
2024-01-20 16:06:28 +00:00
|
|
|
RestApiResponse.jsonMsgResponse(BlockValidationSuccess)
|
2023-06-19 08:56:52 +00:00
|
|
|
|
2024-01-31 03:18:55 +00:00
|
|
|
# https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.2#/Beacon/getBlindedBlock
|
|
|
|
# https://github.com/ethereum/beacon-APIs/blob/v2.4.2/apis/beacon/blocks/blinded_block.yaml
|
|
|
|
router.api2(MethodGet, "/eth/v1/beacon/blinded_blocks/{block_id}") do (
|
|
|
|
block_id: BlockIdent) -> RestApiResponse:
|
|
|
|
let
|
|
|
|
blockIdent = block_id.valueOr:
|
|
|
|
return RestApiResponse.jsonError(Http400, InvalidBlockIdValueError,
|
|
|
|
$error)
|
|
|
|
bid = node.getBlockId(blockIdent).valueOr:
|
|
|
|
return RestApiResponse.jsonError(Http404, BlockNotFoundError)
|
|
|
|
contentType =
|
|
|
|
block:
|
|
|
|
let res = preferredContentType(jsonMediaType,
|
|
|
|
sszMediaType)
|
|
|
|
if res.isErr():
|
|
|
|
return RestApiResponse.jsonError(Http406, ContentNotAcceptableError)
|
|
|
|
res.get()
|
|
|
|
bdata = node.dag.getForkedBlock(bid).valueOr:
|
|
|
|
return RestApiResponse.jsonError(Http404, BlockNotFoundError)
|
|
|
|
|
|
|
|
template respondSszOrJson(
|
|
|
|
signedMaybeBlindedBlck: auto, consensusFork: ConsensusFork): untyped =
|
|
|
|
if contentType == sszMediaType:
|
|
|
|
RestApiResponse.sszResponse(
|
|
|
|
signedMaybeBlindedBlck,
|
|
|
|
[("eth-consensus-version", consensusFork.toString())])
|
|
|
|
elif contentType == jsonMediaType:
|
|
|
|
RestApiResponse.jsonResponseBlock(
|
|
|
|
signedMaybeBlindedBlck,
|
|
|
|
consensusFork,
|
|
|
|
node.getBlockOptimistic(bdata),
|
|
|
|
node.dag.isFinalized(bid)
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
RestApiResponse.jsonError(Http500, InvalidAcceptError)
|
|
|
|
|
|
|
|
withBlck(bdata.asSigned()):
|
2024-02-25 19:42:44 +00:00
|
|
|
when consensusFork <= ConsensusFork.Altair:
|
2024-01-31 03:18:55 +00:00
|
|
|
respondSszOrJson(forkyBlck, consensusFork)
|
|
|
|
else:
|
|
|
|
respondSszOrJson(toSignedBlindedBeaconBlock(forkyBlck), consensusFork)
|
|
|
|
|
2022-11-08 18:08:43 +00:00
|
|
|
# https://ethereum.github.io/beacon-APIs/#/Beacon/publishBlindedBlock
|
2023-05-10 10:20:55 +00:00
|
|
|
# https://github.com/ethereum/beacon-APIs/blob/v2.4.0/apis/beacon/blocks/blinded_blocks.yaml
|
2022-11-08 18:08:43 +00:00
|
|
|
router.api(MethodPost, "/eth/v1/beacon/blinded_blocks") do (
|
|
|
|
contentBody: Option[ContentBody]) -> RestApiResponse:
|
|
|
|
## Instructs the beacon node to use the components of the
|
|
|
|
## `SignedBlindedBeaconBlock` to construct and publish a
|
|
|
|
## `SignedBeaconBlock` by swapping out the transactions_root for the
|
|
|
|
## corresponding full list of transactions. The beacon node should
|
|
|
|
## broadcast a newly constructed `SignedBeaconBlock` to the beacon network,
|
|
|
|
## to be included in the beacon chain. The beacon node is not required to
|
|
|
|
## validate the signed `BeaconBlock`, and a successful response (20X) only
|
|
|
|
## indicates that the broadcast has been successful.
|
|
|
|
if contentBody.isNone():
|
|
|
|
return RestApiResponse.jsonError(Http400, EmptyRequestBodyError)
|
|
|
|
|
|
|
|
let
|
|
|
|
currentEpochFork =
|
2023-02-16 09:32:12 +00:00
|
|
|
node.dag.cfg.consensusForkAtEpoch(node.currentSlot().epoch())
|
2022-11-08 18:08:43 +00:00
|
|
|
version = request.headers.getString("eth-consensus-version")
|
|
|
|
body = contentBody.get()
|
|
|
|
|
2023-09-27 14:45:33 +00:00
|
|
|
if (body.contentType == OctetStreamMediaType) and
|
|
|
|
(currentEpochFork.toString != version):
|
2022-11-08 18:08:43 +00:00
|
|
|
return RestApiResponse.jsonError(Http400, BlockIncorrectFork)
|
|
|
|
|
2023-11-16 00:20:13 +00:00
|
|
|
withConsensusFork(currentEpochFork):
|
2024-02-28 01:10:19 +00:00
|
|
|
when consensusFork >= ConsensusFork.Deneb:
|
2023-11-16 00:20:13 +00:00
|
|
|
let
|
|
|
|
restBlock = decodeBodyJsonOrSsz(
|
|
|
|
consensusFork.SignedBlindedBeaconBlock, body).valueOr:
|
|
|
|
return RestApiResponse.jsonError(error)
|
|
|
|
payloadBuilderClient = node.getPayloadBuilderClient(
|
|
|
|
restBlock.message.proposer_index).valueOr:
|
|
|
|
return RestApiResponse.jsonError(
|
|
|
|
Http400, "Unable to initialize payload builder client: " & $error)
|
|
|
|
res = await node.unblindAndRouteBlockMEV(
|
|
|
|
payloadBuilderClient, restBlock)
|
2023-09-27 14:45:33 +00:00
|
|
|
|
2023-11-16 00:20:13 +00:00
|
|
|
if res.isErr():
|
2023-09-27 14:45:33 +00:00
|
|
|
return RestApiResponse.jsonError(
|
2024-01-20 16:06:28 +00:00
|
|
|
Http500, InternalServerError, $res.error)
|
2023-11-16 00:20:13 +00:00
|
|
|
if res.get().isNone():
|
|
|
|
return RestApiResponse.jsonError(Http202, BlockValidationError)
|
2023-09-27 14:45:33 +00:00
|
|
|
|
2023-11-16 00:20:13 +00:00
|
|
|
return RestApiResponse.jsonMsgResponse(BlockValidationSuccess)
|
|
|
|
elif consensusFork >= ConsensusFork.Bellatrix:
|
2023-09-27 14:45:33 +00:00
|
|
|
return RestApiResponse.jsonError(
|
2023-11-16 00:20:13 +00:00
|
|
|
Http400, $consensusFork & " builder API unsupported")
|
|
|
|
else:
|
|
|
|
# Pre-Bellatrix, this endpoint will accept a `SignedBeaconBlock`.
|
|
|
|
#
|
|
|
|
# This is mostly the same as /eth/v1/beacon/blocks for phase 0 and
|
|
|
|
# altair.
|
|
|
|
var
|
|
|
|
restBlock = decodeBody(
|
|
|
|
RestPublishedSignedBeaconBlock, body, version).valueOr:
|
|
|
|
return RestApiResponse.jsonError(error)
|
|
|
|
forked = ForkedSignedBeaconBlock(restBlock)
|
2023-09-27 14:45:33 +00:00
|
|
|
|
2023-11-16 00:20:13 +00:00
|
|
|
if forked.kind != node.dag.cfg.consensusForkAtEpoch(
|
|
|
|
getForkedBlockField(forked, slot).epoch):
|
|
|
|
return RestApiResponse.jsonError(Http400, InvalidBlockObjectError)
|
2023-09-27 14:45:33 +00:00
|
|
|
|
2023-11-16 00:20:13 +00:00
|
|
|
let res = withBlck(forked):
|
|
|
|
forkyBlck.root = hash_tree_root(forkyBlck.message)
|
|
|
|
await node.router.routeSignedBeaconBlock(
|
|
|
|
forkyBlck, Opt.none(seq[BlobSidecar]))
|
2023-02-14 10:49:48 +00:00
|
|
|
|
2023-11-16 00:20:13 +00:00
|
|
|
if res.isErr():
|
|
|
|
return RestApiResponse.jsonError(
|
2024-01-20 16:06:28 +00:00
|
|
|
Http503, BeaconNodeInSyncError, $res.error)
|
2023-11-16 00:20:13 +00:00
|
|
|
elif res.get().isNone():
|
|
|
|
return RestApiResponse.jsonError(Http202, BlockValidationError)
|
2022-11-08 18:08:43 +00:00
|
|
|
|
2024-01-20 16:06:28 +00:00
|
|
|
RestApiResponse.jsonMsgResponse(BlockValidationSuccess)
|
2022-11-08 18:08:43 +00:00
|
|
|
|
2021-08-23 10:41:48 +00:00
|
|
|
# https://ethereum.github.io/beacon-APIs/#/Beacon/getBlock
|
2024-01-20 16:06:28 +00:00
|
|
|
router.api2(MethodGet, "/eth/v1/beacon/blocks/{block_id}") do (
|
2021-03-17 18:46:45 +00:00
|
|
|
block_id: BlockIdent) -> RestApiResponse:
|
2024-01-20 16:06:28 +00:00
|
|
|
RestApiResponse.jsonError(
|
2022-11-02 10:56:55 +00:00
|
|
|
Http410, DeprecatedRemovalBeaconBlocksDebugStateV1)
|
2021-08-09 06:08:18 +00:00
|
|
|
|
2021-08-23 10:41:48 +00:00
|
|
|
# https://ethereum.github.io/beacon-APIs/#/Beacon/getBlockV2
|
2024-01-20 16:06:28 +00:00
|
|
|
router.api2(MethodGet, "/eth/v2/beacon/blocks/{block_id}") do (
|
2021-08-09 06:08:18 +00:00
|
|
|
block_id: BlockIdent) -> RestApiResponse:
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
let
|
2022-03-11 12:08:17 +00:00
|
|
|
blockIdent = block_id.valueOr:
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
return RestApiResponse.jsonError(Http400, InvalidBlockIdValueError,
|
|
|
|
$error)
|
2022-03-11 12:08:17 +00:00
|
|
|
bid = node.getBlockId(blockIdent).valueOr:
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
return RestApiResponse.jsonError(Http404, BlockNotFoundError)
|
2022-03-11 12:08:17 +00:00
|
|
|
|
2021-09-16 13:32:32 +00:00
|
|
|
let contentType =
|
|
|
|
block:
|
2022-01-21 16:52:34 +00:00
|
|
|
let res = preferredContentType(jsonMediaType,
|
|
|
|
sszMediaType)
|
2021-09-16 13:32:32 +00:00
|
|
|
if res.isErr():
|
|
|
|
return RestApiResponse.jsonError(Http406, ContentNotAcceptableError)
|
|
|
|
res.get()
|
2022-03-11 12:08:17 +00:00
|
|
|
|
2024-01-20 16:06:28 +00:00
|
|
|
if contentType == sszMediaType:
|
|
|
|
var data: seq[byte]
|
|
|
|
if not node.dag.getBlockSSZ(bid, data):
|
|
|
|
return RestApiResponse.jsonError(Http404, BlockNotFoundError)
|
|
|
|
|
|
|
|
let
|
|
|
|
fork = node.dag.cfg.consensusForkAtEpoch(bid.slot.epoch)
|
|
|
|
headers = [("eth-consensus-version", fork.toString())]
|
|
|
|
|
|
|
|
RestApiResponse.sszResponsePlain(data, headers)
|
|
|
|
elif contentType == jsonMediaType:
|
|
|
|
let bdata = node.dag.getForkedBlock(bid).valueOr:
|
|
|
|
return RestApiResponse.jsonError(Http404, BlockNotFoundError)
|
|
|
|
|
|
|
|
RestApiResponse.jsonResponseBlock(
|
|
|
|
bdata.asSigned(),
|
|
|
|
node.getBlockOptimistic(bdata),
|
|
|
|
node.dag.isFinalized(bid)
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
RestApiResponse.jsonError(Http500, InvalidAcceptError)
|
2021-03-17 18:46:45 +00:00
|
|
|
|
2021-08-23 10:41:48 +00:00
|
|
|
# https://ethereum.github.io/beacon-APIs/#/Beacon/getBlockRoot
|
2024-01-20 16:06:28 +00:00
|
|
|
router.api2(MethodGet, "/eth/v1/beacon/blocks/{block_id}/root") do (
|
2021-03-17 18:46:45 +00:00
|
|
|
block_id: BlockIdent) -> RestApiResponse:
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
let
|
2022-03-11 12:08:17 +00:00
|
|
|
blockIdent = block_id.valueOr:
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
return RestApiResponse.jsonError(Http400, InvalidBlockIdValueError,
|
|
|
|
$error)
|
|
|
|
|
2022-03-11 12:08:17 +00:00
|
|
|
bid = node.getBlockId(blockIdent).valueOr:
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
return RestApiResponse.jsonError(Http404, BlockNotFoundError)
|
|
|
|
|
2022-06-20 05:53:39 +00:00
|
|
|
bdata = node.dag.getForkedBlock(bid).valueOr:
|
|
|
|
return RestApiResponse.jsonError(Http404, BlockNotFoundError)
|
|
|
|
|
2024-01-20 16:06:28 +00:00
|
|
|
RestApiResponse.jsonResponseFinalized(
|
2022-06-20 05:53:39 +00:00
|
|
|
(root: bid.root),
|
2023-09-27 14:45:33 +00:00
|
|
|
node.getBlockOptimistic(bdata),
|
|
|
|
node.dag.isFinalized(bid)
|
2022-06-20 05:53:39 +00:00
|
|
|
)
|
2021-03-17 18:46:45 +00:00
|
|
|
|
2021-08-23 10:41:48 +00:00
|
|
|
# https://ethereum.github.io/beacon-APIs/#/Beacon/getBlockAttestations
|
2024-01-20 16:06:28 +00:00
|
|
|
router.api2(MethodGet,
|
2022-01-06 07:38:40 +00:00
|
|
|
"/eth/v1/beacon/blocks/{block_id}/attestations") do (
|
2021-03-17 18:46:45 +00:00
|
|
|
block_id: BlockIdent) -> RestApiResponse:
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
let
|
2023-09-27 14:45:33 +00:00
|
|
|
blockIdent = block_id.valueOr:
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
return RestApiResponse.jsonError(Http400, InvalidBlockIdValueError,
|
|
|
|
$error)
|
2023-09-27 14:45:33 +00:00
|
|
|
bdata = node.getForkedBlock(blockIdent).valueOr:
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
return RestApiResponse.jsonError(Http404, BlockNotFoundError)
|
|
|
|
|
2024-01-20 16:06:28 +00:00
|
|
|
withBlck(bdata):
|
|
|
|
let bid = BlockId(root: forkyBlck.root, slot: forkyBlck.message.slot)
|
|
|
|
RestApiResponse.jsonResponseFinalized(
|
|
|
|
forkyBlck.message.body.attestations.asSeq(),
|
|
|
|
node.getBlockOptimistic(bdata),
|
|
|
|
node.dag.isFinalized(bid)
|
|
|
|
)
|
2021-03-17 18:46:45 +00:00
|
|
|
|
2021-08-23 10:41:48 +00:00
|
|
|
# https://ethereum.github.io/beacon-APIs/#/Beacon/getPoolAttestations
|
2024-01-20 16:06:28 +00:00
|
|
|
router.api2(MethodGet, "/eth/v1/beacon/pool/attestations") do (
|
2021-03-17 18:46:45 +00:00
|
|
|
slot: Option[Slot],
|
|
|
|
committee_index: Option[CommitteeIndex]) -> RestApiResponse:
|
|
|
|
let vindex =
|
|
|
|
if committee_index.isSome():
|
|
|
|
let rindex = committee_index.get()
|
|
|
|
if rindex.isErr():
|
|
|
|
return RestApiResponse.jsonError(Http400,
|
2021-04-08 10:49:28 +00:00
|
|
|
InvalidCommitteeIndexValueError,
|
2024-01-20 16:06:28 +00:00
|
|
|
$rindex.error)
|
2022-07-06 16:11:44 +00:00
|
|
|
Opt.some(rindex.get())
|
2021-03-17 18:46:45 +00:00
|
|
|
else:
|
2022-07-06 16:11:44 +00:00
|
|
|
Opt.none(CommitteeIndex)
|
2021-03-17 18:46:45 +00:00
|
|
|
let vslot =
|
|
|
|
if slot.isSome():
|
|
|
|
let rslot = slot.get()
|
|
|
|
if rslot.isErr():
|
2021-04-08 10:49:28 +00:00
|
|
|
return RestApiResponse.jsonError(Http400, InvalidSlotValueError,
|
2024-01-20 16:06:28 +00:00
|
|
|
$rslot.error)
|
2022-07-06 16:11:44 +00:00
|
|
|
Opt.some(rslot.get())
|
2021-03-17 18:46:45 +00:00
|
|
|
else:
|
2022-07-06 16:11:44 +00:00
|
|
|
Opt.none(Slot)
|
2021-03-17 18:46:45 +00:00
|
|
|
var res: seq[Attestation]
|
|
|
|
for item in node.attestationPool[].attestations(vslot, vindex):
|
|
|
|
res.add(item)
|
2024-01-20 16:06:28 +00:00
|
|
|
RestApiResponse.jsonResponse(res)
|
2021-03-17 18:46:45 +00:00
|
|
|
|
2021-08-23 10:41:48 +00:00
|
|
|
# https://ethereum.github.io/beacon-APIs/#/Beacon/submitPoolAttestations
|
2024-01-20 16:06:28 +00:00
|
|
|
router.api2(MethodPost, "/eth/v1/beacon/pool/attestations") do (
|
2021-03-17 18:46:45 +00:00
|
|
|
contentBody: Option[ContentBody]) -> RestApiResponse:
|
2021-03-23 22:50:18 +00:00
|
|
|
let attestations =
|
|
|
|
block:
|
|
|
|
if contentBody.isNone():
|
2021-04-08 10:49:28 +00:00
|
|
|
return RestApiResponse.jsonError(Http400, EmptyRequestBodyError)
|
2021-03-23 22:50:18 +00:00
|
|
|
let dres = decodeBody(seq[Attestation], contentBody.get())
|
|
|
|
if dres.isErr():
|
2021-04-08 10:49:28 +00:00
|
|
|
return RestApiResponse.jsonError(Http400,
|
|
|
|
InvalidAttestationObjectError,
|
2024-01-20 16:06:28 +00:00
|
|
|
$dres.error)
|
2021-03-23 22:50:18 +00:00
|
|
|
dres.get()
|
|
|
|
|
2021-08-23 10:41:48 +00:00
|
|
|
# Since our validation logic supports batch processing, we will submit all
|
|
|
|
# attestations for validation.
|
|
|
|
let pending =
|
|
|
|
block:
|
|
|
|
var res: seq[Future[SendResult]]
|
|
|
|
for attestation in attestations:
|
2022-07-06 16:11:44 +00:00
|
|
|
res.add(node.router.routeAttestation(attestation))
|
2021-08-23 10:41:48 +00:00
|
|
|
res
|
|
|
|
let failures =
|
|
|
|
block:
|
2022-09-29 20:55:18 +00:00
|
|
|
var res: seq[RestIndexedErrorMessageItem]
|
2021-08-23 10:41:48 +00:00
|
|
|
await allFutures(pending)
|
2022-05-10 10:03:40 +00:00
|
|
|
for index, future in pending:
|
2023-06-01 08:04:30 +00:00
|
|
|
if future.completed():
|
2024-01-20 16:06:28 +00:00
|
|
|
let fres = future.value()
|
2021-08-23 10:41:48 +00:00
|
|
|
if fres.isErr():
|
2022-09-29 20:55:18 +00:00
|
|
|
let failure = RestIndexedErrorMessageItem(index: index,
|
2024-01-20 16:06:28 +00:00
|
|
|
message: $fres.error)
|
2021-08-23 10:41:48 +00:00
|
|
|
res.add(failure)
|
|
|
|
elif future.failed() or future.cancelled():
|
|
|
|
# This is unexpected failure, so we log the error message.
|
2024-01-20 16:06:28 +00:00
|
|
|
let exc = future.error()
|
2022-09-29 20:55:18 +00:00
|
|
|
let failure = RestIndexedErrorMessageItem(index: index,
|
|
|
|
message: $exc.msg)
|
2021-08-23 10:41:48 +00:00
|
|
|
res.add(failure)
|
|
|
|
res
|
2021-03-23 22:50:18 +00:00
|
|
|
|
|
|
|
if len(failures) > 0:
|
2024-01-20 16:06:28 +00:00
|
|
|
RestApiResponse.jsonErrorList(Http400, AttestationValidationError,
|
|
|
|
failures)
|
2021-03-23 22:50:18 +00:00
|
|
|
else:
|
2024-01-20 16:06:28 +00:00
|
|
|
RestApiResponse.jsonMsgResponse(AttestationValidationSuccess)
|
2021-03-17 18:46:45 +00:00
|
|
|
|
2021-08-23 10:41:48 +00:00
|
|
|
# https://ethereum.github.io/beacon-APIs/#/Beacon/getPoolAttesterSlashings
|
2024-01-20 16:06:28 +00:00
|
|
|
router.api2(MethodGet, "/eth/v1/beacon/pool/attester_slashings") do (
|
2021-03-17 18:46:45 +00:00
|
|
|
) -> RestApiResponse:
|
2024-01-20 16:06:28 +00:00
|
|
|
RestApiResponse.jsonResponse(
|
2023-01-19 22:00:40 +00:00
|
|
|
toSeq(node.validatorChangePool.attester_slashings))
|
2021-03-17 18:46:45 +00:00
|
|
|
|
2021-08-23 10:41:48 +00:00
|
|
|
# https://ethereum.github.io/beacon-APIs/#/Beacon/submitPoolAttesterSlashings
|
2022-01-06 07:38:40 +00:00
|
|
|
router.api(MethodPost, "/eth/v1/beacon/pool/attester_slashings") do (
|
2021-03-17 18:46:45 +00:00
|
|
|
contentBody: Option[ContentBody]) -> RestApiResponse:
|
2021-03-23 22:50:18 +00:00
|
|
|
let slashing =
|
|
|
|
block:
|
|
|
|
if contentBody.isNone():
|
2021-04-08 10:49:28 +00:00
|
|
|
return RestApiResponse.jsonError(Http400, EmptyRequestBodyError)
|
2021-03-23 22:50:18 +00:00
|
|
|
let dres = decodeBody(AttesterSlashing, contentBody.get())
|
|
|
|
if dres.isErr():
|
2021-04-08 10:49:28 +00:00
|
|
|
return RestApiResponse.jsonError(Http400,
|
|
|
|
InvalidAttesterSlashingObjectError,
|
2024-01-20 16:06:28 +00:00
|
|
|
$dres.error)
|
2021-08-23 10:41:48 +00:00
|
|
|
dres.get()
|
2022-07-06 16:11:44 +00:00
|
|
|
let res = await node.router.routeAttesterSlashing(slashing)
|
2021-08-23 10:41:48 +00:00
|
|
|
if res.isErr():
|
|
|
|
return RestApiResponse.jsonError(Http400,
|
|
|
|
AttesterSlashingValidationError,
|
2024-01-20 16:06:28 +00:00
|
|
|
$res.error)
|
|
|
|
RestApiResponse.jsonMsgResponse(AttesterSlashingValidationSuccess)
|
2021-03-17 18:46:45 +00:00
|
|
|
|
2021-08-23 10:41:48 +00:00
|
|
|
# https://ethereum.github.io/beacon-APIs/#/Beacon/getPoolProposerSlashings
|
2024-01-20 16:06:28 +00:00
|
|
|
router.api2(MethodGet, "/eth/v1/beacon/pool/proposer_slashings") do (
|
2021-03-17 18:46:45 +00:00
|
|
|
) -> RestApiResponse:
|
2024-01-20 16:06:28 +00:00
|
|
|
RestApiResponse.jsonResponse(
|
2023-01-19 22:00:40 +00:00
|
|
|
toSeq(node.validatorChangePool.proposer_slashings))
|
2021-03-17 18:46:45 +00:00
|
|
|
|
2021-08-23 10:41:48 +00:00
|
|
|
# https://ethereum.github.io/beacon-APIs/#/Beacon/submitPoolProposerSlashings
|
2022-01-06 07:38:40 +00:00
|
|
|
router.api(MethodPost, "/eth/v1/beacon/pool/proposer_slashings") do (
|
2021-03-17 18:46:45 +00:00
|
|
|
contentBody: Option[ContentBody]) -> RestApiResponse:
|
2021-03-23 22:50:18 +00:00
|
|
|
let slashing =
|
|
|
|
block:
|
|
|
|
if contentBody.isNone():
|
2021-04-08 10:49:28 +00:00
|
|
|
return RestApiResponse.jsonError(Http400, EmptyRequestBodyError)
|
2021-03-23 22:50:18 +00:00
|
|
|
let dres = decodeBody(ProposerSlashing, contentBody.get())
|
|
|
|
if dres.isErr():
|
2021-04-08 10:49:28 +00:00
|
|
|
return RestApiResponse.jsonError(Http400,
|
|
|
|
InvalidProposerSlashingObjectError,
|
2024-01-20 16:06:28 +00:00
|
|
|
$dres.error)
|
2021-08-23 10:41:48 +00:00
|
|
|
dres.get()
|
2022-07-06 16:11:44 +00:00
|
|
|
let res = await node.router.routeProposerSlashing(slashing)
|
2021-08-23 10:41:48 +00:00
|
|
|
if res.isErr():
|
|
|
|
return RestApiResponse.jsonError(Http400,
|
|
|
|
ProposerSlashingValidationError,
|
2024-01-20 16:06:28 +00:00
|
|
|
$res.error)
|
|
|
|
RestApiResponse.jsonMsgResponse(ProposerSlashingValidationSuccess)
|
2021-03-17 18:46:45 +00:00
|
|
|
|
2023-01-19 22:00:40 +00:00
|
|
|
# https://ethereum.github.io/beacon-APIs/?urls.primaryName=dev#/Beacon/getPoolBLSToExecutionChanges
|
|
|
|
# https://github.com/ethereum/beacon-APIs/blob/86850001845df9163da5ae9605dbf15cd318d5d0/apis/beacon/pool/bls_to_execution_changes.yaml
|
2024-01-20 16:06:28 +00:00
|
|
|
router.api2(MethodGet, "/eth/v1/beacon/pool/bls_to_execution_changes") do (
|
2023-01-19 22:00:40 +00:00
|
|
|
) -> RestApiResponse:
|
2024-01-20 16:06:28 +00:00
|
|
|
RestApiResponse.jsonResponse(
|
2023-02-03 15:28:28 +00:00
|
|
|
toSeq(node.validatorChangePool.bls_to_execution_changes_gossip) &
|
|
|
|
toSeq(node.validatorChangePool.bls_to_execution_changes_api))
|
2023-01-19 22:00:40 +00:00
|
|
|
|
|
|
|
# https://ethereum.github.io/beacon-APIs/?urls.primaryName=dev#/Beacon/submitPoolBLSToExecutionChange
|
|
|
|
# https://github.com/ethereum/beacon-APIs/blob/86850001845df9163da5ae9605dbf15cd318d5d0/apis/beacon/pool/bls_to_execution_changes.yaml
|
2024-01-20 16:06:28 +00:00
|
|
|
router.api2(MethodPost, "/eth/v1/beacon/pool/bls_to_execution_changes") do (
|
2023-01-19 22:00:40 +00:00
|
|
|
contentBody: Option[ContentBody]) -> RestApiResponse:
|
2023-01-24 21:25:51 +00:00
|
|
|
if node.currentSlot().epoch() < node.dag.cfg.CAPELLA_FORK_EPOCH:
|
|
|
|
return RestApiResponse.jsonError(Http400,
|
|
|
|
InvalidBlsToExecutionChangeObjectError,
|
|
|
|
"Attempt to add to BLS to execution change pool pre-Capella")
|
2024-01-20 16:06:28 +00:00
|
|
|
let
|
|
|
|
bls_to_execution_changes =
|
|
|
|
block:
|
|
|
|
if contentBody.isNone():
|
|
|
|
return RestApiResponse.jsonError(Http400, EmptyRequestBodyError)
|
|
|
|
let dres =
|
|
|
|
decodeBody(seq[SignedBLSToExecutionChange], contentBody.get())
|
|
|
|
if dres.isErr():
|
|
|
|
return RestApiResponse.jsonError(
|
|
|
|
Http400, InvalidBlsToExecutionChangeObjectError, $dres.error)
|
|
|
|
dres.get()
|
|
|
|
pending = mapIt(bls_to_execution_changes,
|
|
|
|
node.router.routeBlsToExecutionChange(it))
|
|
|
|
|
|
|
|
await allFutures(pending)
|
|
|
|
|
|
|
|
for future in pending:
|
|
|
|
if future.failed() or future.cancelled():
|
2023-01-19 22:00:40 +00:00
|
|
|
return RestApiResponse.jsonError(Http400,
|
|
|
|
BlsToExecutionChangeValidationError,
|
2024-01-20 16:06:28 +00:00
|
|
|
$future.error().msg)
|
|
|
|
let res = future.value()
|
|
|
|
if res.isErr():
|
2023-01-19 22:00:40 +00:00
|
|
|
return RestApiResponse.jsonError(Http400,
|
|
|
|
BlsToExecutionChangeValidationError,
|
2024-01-20 16:06:28 +00:00
|
|
|
$res.error)
|
|
|
|
RestApiResponse.jsonMsgResponse(BlsToExecutionChangeValidationSuccess)
|
2023-01-19 22:00:40 +00:00
|
|
|
|
2021-09-23 22:13:25 +00:00
|
|
|
# https://ethereum.github.io/beacon-APIs/#/Beacon/submitPoolSyncCommitteeSignatures
|
2022-01-06 07:38:40 +00:00
|
|
|
router.api(MethodPost, "/eth/v1/beacon/pool/sync_committees") do (
|
2021-09-23 22:13:25 +00:00
|
|
|
contentBody: Option[ContentBody]) -> RestApiResponse:
|
|
|
|
let messages =
|
|
|
|
block:
|
|
|
|
if contentBody.isNone():
|
|
|
|
return RestApiResponse.jsonError(Http400, EmptyRequestBodyError)
|
|
|
|
let dres = decodeBody(seq[SyncCommitteeMessage], contentBody.get())
|
|
|
|
if dres.isErr():
|
|
|
|
return RestApiResponse.jsonError(Http400,
|
|
|
|
InvalidSyncCommitteeSignatureMessageError)
|
|
|
|
dres.get()
|
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
let results = await node.router.routeSyncCommitteeMessages(messages)
|
2021-09-23 22:13:25 +00:00
|
|
|
|
|
|
|
let failures =
|
|
|
|
block:
|
2022-09-29 20:55:18 +00:00
|
|
|
var res: seq[RestIndexedErrorMessageItem]
|
2022-05-10 10:03:40 +00:00
|
|
|
for index, item in results:
|
2021-09-23 22:13:25 +00:00
|
|
|
if item.isErr():
|
2022-09-29 20:55:18 +00:00
|
|
|
res.add(RestIndexedErrorMessageItem(index: index,
|
2024-01-20 16:06:28 +00:00
|
|
|
message: $item.error))
|
2021-09-23 22:13:25 +00:00
|
|
|
res
|
|
|
|
if len(failures) > 0:
|
2024-01-20 16:06:28 +00:00
|
|
|
RestApiResponse.jsonErrorList(
|
|
|
|
Http400, SyncCommitteeMessageValidationError, failures)
|
2021-09-23 22:13:25 +00:00
|
|
|
else:
|
2024-01-20 16:06:28 +00:00
|
|
|
RestApiResponse.jsonMsgResponse(
|
2021-09-23 22:13:25 +00:00
|
|
|
SyncCommitteeMessageValidationSuccess)
|
|
|
|
|
2021-08-23 10:41:48 +00:00
|
|
|
# https://ethereum.github.io/beacon-APIs/#/Beacon/getPoolVoluntaryExits
|
2024-01-20 16:06:28 +00:00
|
|
|
router.api2(MethodGet, "/eth/v1/beacon/pool/voluntary_exits") do (
|
2021-03-17 18:46:45 +00:00
|
|
|
) -> RestApiResponse:
|
2023-01-19 22:00:40 +00:00
|
|
|
return RestApiResponse.jsonResponse(
|
|
|
|
toSeq(node.validatorChangePool.voluntary_exits))
|
2021-03-17 18:46:45 +00:00
|
|
|
|
2021-08-23 10:41:48 +00:00
|
|
|
# https://ethereum.github.io/beacon-APIs/#/Beacon/submitPoolVoluntaryExit
|
2022-01-06 07:38:40 +00:00
|
|
|
router.api(MethodPost, "/eth/v1/beacon/pool/voluntary_exits") do (
|
2021-03-17 18:46:45 +00:00
|
|
|
contentBody: Option[ContentBody]) -> RestApiResponse:
|
2021-03-23 22:50:18 +00:00
|
|
|
let exit =
|
|
|
|
block:
|
|
|
|
if contentBody.isNone():
|
2021-04-08 10:49:28 +00:00
|
|
|
return RestApiResponse.jsonError(Http400, EmptyRequestBodyError)
|
2021-03-23 22:50:18 +00:00
|
|
|
let dres = decodeBody(SignedVoluntaryExit, contentBody.get())
|
|
|
|
if dres.isErr():
|
2021-04-08 10:49:28 +00:00
|
|
|
return RestApiResponse.jsonError(Http400,
|
|
|
|
InvalidVoluntaryExitObjectError,
|
2024-01-20 16:06:28 +00:00
|
|
|
$dres.error)
|
2021-08-23 10:41:48 +00:00
|
|
|
dres.get()
|
2022-07-06 16:11:44 +00:00
|
|
|
let res = await node.router.routeSignedVoluntaryExit(exit)
|
2021-08-23 10:41:48 +00:00
|
|
|
if res.isErr():
|
2024-01-20 16:06:28 +00:00
|
|
|
return RestApiResponse.jsonError(
|
|
|
|
Http400, VoluntaryExitValidationError, $res.error)
|
2021-07-13 11:15:07 +00:00
|
|
|
return RestApiResponse.jsonMsgResponse(VoluntaryExitValidationSuccess)
|
2023-10-29 00:06:13 +00:00
|
|
|
|
|
|
|
# https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.2#/Beacon/getBlobSidecars
|
|
|
|
# https://github.com/ethereum/beacon-APIs/blob/v2.4.2/apis/beacon/blob_sidecars/blob_sidecars.yaml
|
2024-01-20 16:06:28 +00:00
|
|
|
router.api2(MethodGet, "/eth/v1/beacon/blob_sidecars/{block_id}") do (
|
2023-10-29 00:06:13 +00:00
|
|
|
block_id: BlockIdent, indices: seq[uint64]) -> RestApiResponse:
|
|
|
|
let
|
2023-12-14 17:02:36 +00:00
|
|
|
blockIdent = block_id.valueOr:
|
2023-10-29 00:06:13 +00:00
|
|
|
return RestApiResponse.jsonError(Http400, InvalidBlockIdValueError,
|
|
|
|
$error)
|
2023-12-14 17:02:36 +00:00
|
|
|
bid = node.getBlockId(blockIdent).valueOr:
|
2023-10-29 00:06:13 +00:00
|
|
|
return RestApiResponse.jsonError(Http404, BlockNotFoundError)
|
|
|
|
|
|
|
|
contentType = block:
|
|
|
|
let res = preferredContentType(jsonMediaType,
|
|
|
|
sszMediaType)
|
|
|
|
if res.isErr():
|
|
|
|
return RestApiResponse.jsonError(Http406, ContentNotAcceptableError)
|
|
|
|
res.get()
|
|
|
|
|
|
|
|
# https://github.com/ethereum/beacon-APIs/blob/v2.4.2/types/deneb/blob_sidecar.yaml#L2-L28
|
|
|
|
let data = newClone(default(List[BlobSidecar, Limit MAX_BLOBS_PER_BLOCK]))
|
|
|
|
|
|
|
|
if indices.isErr:
|
|
|
|
return RestApiResponse.jsonError(Http400,
|
|
|
|
InvalidSidecarIndexValueError)
|
|
|
|
|
|
|
|
let indexFilter = indices.get.toHashSet
|
|
|
|
|
|
|
|
for blobIndex in 0'u64 ..< MAX_BLOBS_PER_BLOCK:
|
|
|
|
if indexFilter.len > 0 and blobIndex notin indexFilter:
|
|
|
|
continue
|
|
|
|
|
|
|
|
var blobSidecar = new BlobSidecar
|
|
|
|
|
2023-12-14 17:02:36 +00:00
|
|
|
if node.dag.db.getBlobSidecar(bid.root, blobIndex, blobSidecar[]):
|
2023-10-29 00:06:13 +00:00
|
|
|
discard data[].add blobSidecar[]
|
|
|
|
|
2024-01-20 16:06:28 +00:00
|
|
|
if contentType == sszMediaType:
|
|
|
|
RestApiResponse.sszResponse(
|
|
|
|
data[], headers = [("eth-consensus-version",
|
|
|
|
node.dag.cfg.consensusForkAtEpoch(bid.slot.epoch).toString())])
|
|
|
|
elif contentType == jsonMediaType:
|
|
|
|
RestApiResponse.jsonResponse(data)
|
|
|
|
else:
|
|
|
|
RestApiResponse.jsonError(Http500, InvalidAcceptError)
|