2021-03-26 06:52:01 +00:00
|
|
|
# beacon_chain
|
|
|
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
2020-05-22 17:04:52 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
{.push raises: [Defect].}
|
|
|
|
|
2020-05-22 17:04:52 +00:00
|
|
|
import
|
2021-08-27 09:00:06 +00:00
|
|
|
std/[parseutils, sequtils, strutils, sets],
|
2022-01-08 20:06:34 +00:00
|
|
|
stew/[byteutils, results],
|
2021-03-26 14:11:06 +00:00
|
|
|
json_rpc/servers/httpserver,
|
2020-06-05 09:57:40 +00:00
|
|
|
chronicles,
|
2021-10-19 14:09:26 +00:00
|
|
|
../beacon_node,
|
2021-03-05 13:12:00 +00:00
|
|
|
../networking/eth2_network,
|
2021-03-02 10:27:45 +00:00
|
|
|
../validators/validator_duties,
|
2021-06-11 17:51:46 +00:00
|
|
|
../consensus_object_pools/blockchain_dag,
|
2021-11-05 07:34:34 +00:00
|
|
|
../spec/[eth2_merkleization, forks, network, validator],
|
2021-08-12 13:08:20 +00:00
|
|
|
../spec/datatypes/[phase0],
|
2021-08-03 15:17:11 +00:00
|
|
|
./rpc_utils
|
2020-05-22 17:04:52 +00:00
|
|
|
|
2020-10-27 09:00:57 +00:00
|
|
|
logScope: topics = "beaconapi"
|
2020-05-22 17:04:52 +00:00
|
|
|
|
|
|
|
type
|
2020-10-27 09:00:57 +00:00
|
|
|
RpcServer = RpcHttpServer
|
2020-05-22 17:04:52 +00:00
|
|
|
|
2020-12-01 05:18:50 +00:00
|
|
|
ValidatorQuery = object
|
2022-03-13 07:12:45 +00:00
|
|
|
# Security note / threat model:
|
|
|
|
# - The validator pubkey are stored in their raw bytes representation
|
|
|
|
# in the `keyset`.
|
|
|
|
# - While the input is from unknown source (as far as the beacon node is concerned),
|
|
|
|
# users are asked to not expose their
|
|
|
|
# RPC endpoints to untrusted network or use a reverse proxy in front.
|
|
|
|
# - At usage time, keys in the keyset are compared to keys registered
|
|
|
|
# in the Ethereum BeaconState which are valid
|
2020-12-01 05:18:50 +00:00
|
|
|
keyset: HashSet[ValidatorPubKey]
|
|
|
|
ids: seq[uint64]
|
|
|
|
|
2020-12-01 08:15:37 +00:00
|
|
|
StatusQuery = object
|
|
|
|
statset: HashSet[string]
|
|
|
|
|
2020-10-27 09:00:57 +00:00
|
|
|
template unimplemented() =
|
|
|
|
raise (ref CatchableError)(msg: "Unimplemented")
|
2020-07-08 10:11:22 +00:00
|
|
|
|
2022-03-13 07:12:45 +00:00
|
|
|
proc createIdQuery(ids: openArray[string]): Result[ValidatorQuery, cstring] =
|
2020-12-01 05:18:50 +00:00
|
|
|
# validatorIds array should have maximum 30 items, and all items should be
|
|
|
|
# unique.
|
|
|
|
if len(ids) > 30:
|
|
|
|
return err("The number of ids exceeds the limit")
|
|
|
|
|
|
|
|
# All ids in validatorIds must be unique.
|
|
|
|
if len(ids) != len(toHashSet(ids)):
|
|
|
|
return err("ids array must have unique item")
|
|
|
|
|
|
|
|
var res = ValidatorQuery(
|
|
|
|
keyset: initHashSet[ValidatorPubKey](),
|
|
|
|
ids: newSeq[uint64]()
|
|
|
|
)
|
|
|
|
|
|
|
|
for item in ids:
|
|
|
|
if item.startsWith("0x"):
|
2022-04-08 16:22:49 +00:00
|
|
|
let pubkey = ? ValidatorPubKey.fromHex(item)
|
2022-03-13 07:12:45 +00:00
|
|
|
res.keyset.incl(pubkey)
|
2020-12-01 05:18:50 +00:00
|
|
|
else:
|
|
|
|
var tmp: uint64
|
2021-03-26 06:52:01 +00:00
|
|
|
try:
|
|
|
|
if parseBiggestUInt(item, tmp) != len(item):
|
|
|
|
return err("Incorrect index value")
|
|
|
|
except ValueError:
|
2022-03-13 07:12:45 +00:00
|
|
|
return err("Cannot parse index value")
|
2020-12-01 05:18:50 +00:00
|
|
|
res.ids.add(tmp)
|
|
|
|
ok(res)
|
|
|
|
|
2020-12-01 08:15:37 +00:00
|
|
|
proc createStatusQuery(status: openArray[string]): Result[StatusQuery, string] =
|
|
|
|
const AllowedStatuses = [
|
|
|
|
"pending", "pending_initialized", "pending_queued",
|
|
|
|
"active", "active_ongoing", "active_exiting", "active_slashed",
|
|
|
|
"exited", "exited_unslashed", "exited_slashed",
|
|
|
|
"withdrawal", "withdrawal_possible", "withdrawal_done"
|
|
|
|
]
|
|
|
|
|
|
|
|
if len(status) > len(AllowedStatuses):
|
|
|
|
return err("The number of statuses exceeds the limit")
|
|
|
|
|
|
|
|
var res = StatusQuery(statset: initHashSet[string]())
|
|
|
|
|
|
|
|
# All ids in validatorIds must be unique.
|
|
|
|
if len(status) != len(toHashSet(status)):
|
|
|
|
return err("Status array must have unique items")
|
|
|
|
|
|
|
|
for item in status:
|
|
|
|
if item notin AllowedStatuses:
|
|
|
|
return err("Invalid status requested")
|
|
|
|
case item
|
|
|
|
of "pending":
|
|
|
|
res.statset.incl("pending_initialized")
|
|
|
|
res.statset.incl("pending_queued")
|
|
|
|
of "active":
|
|
|
|
res.statset.incl("active_ongoing")
|
|
|
|
res.statset.incl("active_exiting")
|
|
|
|
res.statset.incl("active_slashed")
|
|
|
|
of "exited":
|
|
|
|
res.statset.incl("exited_unslashed")
|
|
|
|
res.statset.incl("exited_slashed")
|
|
|
|
of "withdrawal":
|
|
|
|
res.statset.incl("withdrawal_possible")
|
|
|
|
res.statset.incl("withdrawal_done")
|
|
|
|
else:
|
|
|
|
res.statset.incl(item)
|
|
|
|
|
|
|
|
proc getStatus(validator: Validator,
|
|
|
|
current_epoch: Epoch): Result[string, string] =
|
2020-12-03 19:24:01 +00:00
|
|
|
if validator.activation_epoch > current_epoch:
|
2020-07-08 10:11:22 +00:00
|
|
|
# pending
|
|
|
|
if validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH:
|
2020-12-01 08:15:37 +00:00
|
|
|
ok("pending_initialized")
|
2020-07-08 10:11:22 +00:00
|
|
|
else:
|
|
|
|
# validator.activation_eligibility_epoch < FAR_FUTURE_EPOCH:
|
2020-12-01 08:15:37 +00:00
|
|
|
ok("pending_queued")
|
2020-12-03 19:24:01 +00:00
|
|
|
elif (validator.activation_epoch <= current_epoch) and
|
|
|
|
(current_epoch < validator.exit_epoch):
|
2020-07-08 10:11:22 +00:00
|
|
|
# active
|
|
|
|
if validator.exit_epoch == FAR_FUTURE_EPOCH:
|
2020-12-01 08:15:37 +00:00
|
|
|
ok("active_ongoing")
|
2020-07-08 10:11:22 +00:00
|
|
|
elif not validator.slashed:
|
|
|
|
# validator.exit_epoch < FAR_FUTURE_EPOCH
|
2020-12-01 08:15:37 +00:00
|
|
|
ok("active_exiting")
|
2020-07-08 10:11:22 +00:00
|
|
|
else:
|
|
|
|
# validator.exit_epoch < FAR_FUTURE_EPOCH and validator.slashed:
|
2020-12-01 08:15:37 +00:00
|
|
|
ok("active_slashed")
|
2020-12-03 19:24:01 +00:00
|
|
|
elif (validator.exit_epoch <= current_epoch) and
|
|
|
|
(current_epoch < validator.withdrawable_epoch):
|
2020-07-08 10:11:22 +00:00
|
|
|
# exited
|
|
|
|
if not validator.slashed:
|
2020-12-01 08:15:37 +00:00
|
|
|
ok("exited_unslashed")
|
2020-07-08 10:11:22 +00:00
|
|
|
else:
|
|
|
|
# validator.slashed
|
2020-12-01 08:15:37 +00:00
|
|
|
ok("exited_slashed")
|
2020-07-08 10:11:22 +00:00
|
|
|
elif validator.withdrawable_epoch <= current_epoch:
|
|
|
|
# withdrawal
|
|
|
|
if validator.effective_balance != 0:
|
2020-12-01 08:15:37 +00:00
|
|
|
ok("withdrawal_possible")
|
2020-07-08 10:11:22 +00:00
|
|
|
else:
|
|
|
|
# validator.effective_balance == 0
|
2020-12-01 08:15:37 +00:00
|
|
|
ok("withdrawal_done")
|
2020-07-08 10:11:22 +00:00
|
|
|
else:
|
2020-12-01 08:15:37 +00:00
|
|
|
err("Invalid validator status")
|
2020-07-08 10:11:22 +00:00
|
|
|
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
proc getForkedBlockFromBlockId(
|
|
|
|
node: BeaconNode, blockId: string): ForkedTrustedSignedBeaconBlock {.
|
2021-08-27 09:00:06 +00:00
|
|
|
raises: [Defect, CatchableError].} =
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
case blockId:
|
2020-07-08 10:11:22 +00:00
|
|
|
of "head":
|
2022-03-11 12:08:17 +00:00
|
|
|
node.dag.getForkedBlock(node.dag.head.bid).valueOr:
|
|
|
|
raise newException(CatchableError, "Block not found")
|
2020-07-08 10:11:22 +00:00
|
|
|
of "genesis":
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
node.dag.getForkedBlock(node.dag.genesis).valueOr:
|
2022-03-11 12:08:17 +00:00
|
|
|
raise newException(CatchableError, "Block not found")
|
2020-07-08 10:11:22 +00:00
|
|
|
of "finalized":
|
2022-03-11 12:08:17 +00:00
|
|
|
node.dag.getForkedBlock(node.dag.finalizedHead.blck.bid).valueOr:
|
|
|
|
raise newException(CatchableError, "Block not found")
|
2020-07-08 10:11:22 +00:00
|
|
|
else:
|
|
|
|
if blockId.startsWith("0x"):
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
let
|
|
|
|
blckRoot = parseRoot(blockId)
|
|
|
|
node.dag.getForkedBlock(blckRoot).valueOr:
|
2020-07-08 10:11:22 +00:00
|
|
|
raise newException(CatchableError, "Block not found")
|
|
|
|
else:
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
let bid = node.getBlockIdFromString(blockId)
|
|
|
|
node.dag.getForkedBlock(bid).valueOr:
|
2020-07-08 10:11:22 +00:00
|
|
|
raise newException(CatchableError, "Block not found")
|
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
2021-08-27 09:00:06 +00:00
|
|
|
raises: [Defect, CatchableError].} =
|
2021-08-03 15:17:11 +00:00
|
|
|
rpcServer.rpc("get_v1_beacon_genesis") do () -> RpcBeaconGenesis:
|
2020-10-27 09:00:57 +00:00
|
|
|
return (
|
2022-03-16 07:20:40 +00:00
|
|
|
genesis_time: getStateField(node.dag.headState, genesis_time),
|
2020-10-27 09:00:57 +00:00
|
|
|
genesis_validators_root:
|
2022-03-16 07:20:40 +00:00
|
|
|
getStateField(node.dag.headState, genesis_validators_root),
|
2021-07-13 14:27:10 +00:00
|
|
|
genesis_fork_version: node.dag.cfg.GENESIS_FORK_VERSION
|
2020-10-27 09:00:57 +00:00
|
|
|
)
|
2020-06-19 09:21:17 +00:00
|
|
|
|
|
|
|
rpcServer.rpc("get_v1_beacon_states_root") do (stateId: string) -> Eth2Digest:
|
2020-07-08 10:11:22 +00:00
|
|
|
withStateForStateId(stateId):
|
2021-05-28 12:51:15 +00:00
|
|
|
return stateRoot
|
2020-06-19 09:21:17 +00:00
|
|
|
|
2020-05-27 17:06:28 +00:00
|
|
|
rpcServer.rpc("get_v1_beacon_states_fork") do (stateId: string) -> Fork:
|
2020-07-08 10:11:22 +00:00
|
|
|
withStateForStateId(stateId):
|
2022-03-16 07:20:40 +00:00
|
|
|
return getStateField(state, fork)
|
2020-07-16 13:16:51 +00:00
|
|
|
|
2020-07-08 10:11:22 +00:00
|
|
|
rpcServer.rpc("get_v1_beacon_states_finality_checkpoints") do (
|
2021-08-03 15:17:11 +00:00
|
|
|
stateId: string) -> RpcBeaconStatesFinalityCheckpoints:
|
2020-07-08 10:11:22 +00:00
|
|
|
withStateForStateId(stateId):
|
2021-04-13 13:05:44 +00:00
|
|
|
return (previous_justified:
|
2022-03-16 07:20:40 +00:00
|
|
|
getStateField(state, previous_justified_checkpoint),
|
2021-04-13 13:05:44 +00:00
|
|
|
current_justified:
|
2022-03-16 07:20:40 +00:00
|
|
|
getStateField(state, current_justified_checkpoint),
|
|
|
|
finalized: getStateField(state, finalized_checkpoint))
|
2020-07-08 10:11:22 +00:00
|
|
|
|
|
|
|
rpcServer.rpc("get_v1_beacon_states_stateId_validators") do (
|
2020-12-01 08:15:37 +00:00
|
|
|
stateId: string, validatorIds: Option[seq[string]],
|
2021-08-03 15:17:11 +00:00
|
|
|
status: Option[seq[string]]) -> seq[RpcBeaconStatesValidators]:
|
2020-12-01 08:15:37 +00:00
|
|
|
var vquery: ValidatorQuery
|
|
|
|
var squery: StatusQuery
|
2022-03-16 07:20:40 +00:00
|
|
|
let current_epoch = getStateField(node.dag.headState, slot).epoch
|
2020-12-01 08:15:37 +00:00
|
|
|
|
|
|
|
template statusCheck(status, statusQuery, vstatus, current_epoch): bool =
|
|
|
|
if status.isNone():
|
|
|
|
true
|
|
|
|
else:
|
|
|
|
if vstatus in squery.statset:
|
|
|
|
true
|
|
|
|
else:
|
|
|
|
false
|
|
|
|
|
2021-08-03 15:17:11 +00:00
|
|
|
var res: seq[RpcBeaconStatesValidators]
|
2020-12-01 08:15:37 +00:00
|
|
|
|
2020-07-08 10:11:22 +00:00
|
|
|
withStateForStateId(stateId):
|
2020-12-01 08:15:37 +00:00
|
|
|
if status.isSome:
|
|
|
|
let sqres = createStatusQuery(status.get())
|
|
|
|
if sqres.isErr:
|
|
|
|
raise newException(CatchableError, sqres.error)
|
|
|
|
squery = sqres.get()
|
|
|
|
|
|
|
|
if validatorIds.isSome:
|
|
|
|
let vqres = createIdQuery(validatorIds.get())
|
|
|
|
if vqres.isErr:
|
2022-03-13 07:12:45 +00:00
|
|
|
raise newException(CatchableError, $vqres.error)
|
2020-12-01 08:15:37 +00:00
|
|
|
vquery = vqres.get()
|
|
|
|
|
|
|
|
if validatorIds.isNone():
|
2022-05-10 10:03:40 +00:00
|
|
|
for index, validator in getStateField(state, validators):
|
2020-12-01 08:15:37 +00:00
|
|
|
let sres = validator.getStatus(current_epoch)
|
|
|
|
if sres.isOk:
|
|
|
|
let vstatus = sres.get()
|
|
|
|
let includeFlag = statusCheck(status, squery, vstatus,
|
|
|
|
current_epoch)
|
|
|
|
if includeFlag:
|
|
|
|
res.add((validator: validator,
|
|
|
|
index: uint64(index),
|
|
|
|
status: vstatus,
|
2022-03-16 07:20:40 +00:00
|
|
|
balance: getStateField(state, balances).asSeq()[index]))
|
2020-12-01 08:15:37 +00:00
|
|
|
else:
|
|
|
|
for index in vquery.ids:
|
2022-03-16 07:20:40 +00:00
|
|
|
if index < lenu64(getStateField(state, validators)):
|
|
|
|
let validator = getStateField(state, validators).asSeq()[index]
|
2020-12-01 08:15:37 +00:00
|
|
|
let sres = validator.getStatus(current_epoch)
|
|
|
|
if sres.isOk:
|
|
|
|
let vstatus = sres.get()
|
|
|
|
let includeFlag = statusCheck(status, squery, vstatus,
|
|
|
|
current_epoch)
|
|
|
|
if includeFlag:
|
|
|
|
vquery.keyset.excl(validator.pubkey)
|
|
|
|
res.add((validator: validator,
|
|
|
|
index: uint64(index),
|
|
|
|
status: vstatus,
|
2022-03-16 07:20:40 +00:00
|
|
|
balance: getStateField(state, balances).asSeq()[index]))
|
2020-12-01 08:15:37 +00:00
|
|
|
|
2022-05-10 10:03:40 +00:00
|
|
|
for index, validator in getStateField(state, validators):
|
2020-12-01 08:15:37 +00:00
|
|
|
if validator.pubkey in vquery.keyset:
|
|
|
|
let sres = validator.getStatus(current_epoch)
|
|
|
|
if sres.isOk:
|
|
|
|
let vstatus = sres.get()
|
|
|
|
let includeFlag = statusCheck(status, squery, vstatus,
|
|
|
|
current_epoch)
|
|
|
|
if includeFlag:
|
|
|
|
res.add((validator: validator,
|
|
|
|
index: uint64(index),
|
|
|
|
status: vstatus,
|
2022-03-16 07:20:40 +00:00
|
|
|
balance: getStateField(state, balances).asSeq()[index]))
|
2020-12-01 08:15:37 +00:00
|
|
|
return res
|
2020-07-08 10:11:22 +00:00
|
|
|
|
|
|
|
rpcServer.rpc("get_v1_beacon_states_stateId_validators_validatorId") do (
|
2021-08-03 15:17:11 +00:00
|
|
|
stateId: string, validatorId: string) -> RpcBeaconStatesValidators:
|
2022-03-16 07:20:40 +00:00
|
|
|
let current_epoch = getStateField(node.dag.headState, slot).epoch
|
2020-12-01 08:15:37 +00:00
|
|
|
let vqres = createIdQuery([validatorId])
|
|
|
|
if vqres.isErr:
|
2022-03-13 07:12:45 +00:00
|
|
|
raise newException(CatchableError, $vqres.error)
|
2020-12-01 08:15:37 +00:00
|
|
|
let vquery = vqres.get()
|
|
|
|
|
2020-07-08 10:11:22 +00:00
|
|
|
withStateForStateId(stateId):
|
2020-12-01 08:15:37 +00:00
|
|
|
if len(vquery.ids) > 0:
|
|
|
|
let index = vquery.ids[0]
|
2022-03-16 07:20:40 +00:00
|
|
|
if index < lenu64(getStateField(state, validators)):
|
|
|
|
let validator = getStateField(state, validators).asSeq()[index]
|
2020-12-01 08:15:37 +00:00
|
|
|
let sres = validator.getStatus(current_epoch)
|
|
|
|
if sres.isOk:
|
|
|
|
return (validator: validator, index: uint64(index),
|
2021-04-13 13:05:44 +00:00
|
|
|
status: sres.get(),
|
2022-03-16 07:20:40 +00:00
|
|
|
balance: getStateField(state, balances).asSeq()[index])
|
2020-12-01 08:15:37 +00:00
|
|
|
else:
|
|
|
|
raise newException(CatchableError, "Incorrect validator's state")
|
|
|
|
else:
|
2022-05-10 10:03:40 +00:00
|
|
|
for index, validator in getStateField(state, validators):
|
2020-12-01 08:15:37 +00:00
|
|
|
if validator.pubkey in vquery.keyset:
|
|
|
|
let sres = validator.getStatus(current_epoch)
|
|
|
|
if sres.isOk:
|
|
|
|
return (validator: validator, index: uint64(index),
|
2021-04-13 13:05:44 +00:00
|
|
|
status: sres.get(),
|
2022-03-16 07:20:40 +00:00
|
|
|
balance: getStateField(state, balances).asSeq()[index])
|
2020-12-01 08:15:37 +00:00
|
|
|
else:
|
|
|
|
raise newException(CatchableError, "Incorrect validator's state")
|
2020-07-08 10:11:22 +00:00
|
|
|
|
2020-10-27 09:00:57 +00:00
|
|
|
rpcServer.rpc("get_v1_beacon_states_stateId_validator_balances") do (
|
2021-08-03 15:17:11 +00:00
|
|
|
stateId: string, validatorsId: Option[seq[string]]) -> seq[RpcBalance]:
|
2020-12-01 05:18:50 +00:00
|
|
|
|
2021-08-03 15:17:11 +00:00
|
|
|
var res: seq[RpcBalance]
|
2020-12-01 05:18:50 +00:00
|
|
|
withStateForStateId(stateId):
|
|
|
|
if validatorsId.isNone():
|
2022-05-10 10:03:40 +00:00
|
|
|
for index, value in getStateField(state, balances):
|
2020-12-01 05:18:50 +00:00
|
|
|
let balance = (index: uint64(index), balance: value)
|
|
|
|
res.add(balance)
|
|
|
|
else:
|
2020-12-01 08:15:37 +00:00
|
|
|
let vqres = createIdQuery(validatorsId.get())
|
|
|
|
if vqres.isErr:
|
2022-03-13 07:12:45 +00:00
|
|
|
raise newException(CatchableError, $vqres.error)
|
2020-12-01 05:18:50 +00:00
|
|
|
|
2020-12-01 08:15:37 +00:00
|
|
|
var vquery = vqres.get()
|
|
|
|
for index in vquery.ids:
|
2022-03-16 07:20:40 +00:00
|
|
|
if index < lenu64(getStateField(state, validators)):
|
|
|
|
let validator = getStateField(state, validators).asSeq()[index]
|
2020-12-01 08:15:37 +00:00
|
|
|
vquery.keyset.excl(validator.pubkey)
|
2020-12-01 05:18:50 +00:00
|
|
|
let balance = (index: uint64(index),
|
2022-03-16 07:20:40 +00:00
|
|
|
balance: getStateField(state, balances).asSeq()[index])
|
2020-12-01 05:18:50 +00:00
|
|
|
res.add(balance)
|
|
|
|
|
2022-05-10 10:03:40 +00:00
|
|
|
for index, validator in getStateField(state, validators):
|
2020-12-01 08:15:37 +00:00
|
|
|
if validator.pubkey in vquery.keyset:
|
2020-12-01 05:18:50 +00:00
|
|
|
let balance = (index: uint64(index),
|
2022-03-16 07:20:40 +00:00
|
|
|
balance: getStateField(state, balances).asSeq()[index])
|
2020-12-01 05:18:50 +00:00
|
|
|
res.add(balance)
|
|
|
|
return res
|
2020-10-27 09:00:57 +00:00
|
|
|
|
2020-07-08 10:11:22 +00:00
|
|
|
rpcServer.rpc("get_v1_beacon_states_stateId_committees_epoch") do (
|
2020-12-01 13:55:34 +00:00
|
|
|
stateId: string, epoch: Option[uint64], index: Option[uint64],
|
2021-08-03 15:17:11 +00:00
|
|
|
slot: Option[uint64]) -> seq[RpcBeaconStatesCommittees]:
|
2020-07-08 10:11:22 +00:00
|
|
|
withStateForStateId(stateId):
|
2020-12-01 13:55:34 +00:00
|
|
|
proc getCommittee(slot: Slot,
|
2021-08-03 15:17:11 +00:00
|
|
|
index: CommitteeIndex): RpcBeaconStatesCommittees =
|
2021-05-21 09:23:28 +00:00
|
|
|
let vals = get_beacon_committee(
|
2022-03-16 07:20:40 +00:00
|
|
|
state, slot, index, cache).mapIt(it.uint64)
|
2020-07-08 10:11:22 +00:00
|
|
|
return (index: index.uint64, slot: slot.uint64, validators: vals)
|
2020-07-16 13:16:51 +00:00
|
|
|
|
2021-08-03 15:17:11 +00:00
|
|
|
proc forSlot(slot: Slot, res: var seq[RpcBeaconStatesCommittees]) =
|
2020-10-12 10:52:59 +00:00
|
|
|
let committees_per_slot =
|
2022-03-16 07:20:40 +00:00
|
|
|
get_committee_count_per_slot(state, slot.epoch, cache)
|
2020-12-01 13:55:34 +00:00
|
|
|
|
|
|
|
if index.isNone:
|
2022-01-08 23:28:49 +00:00
|
|
|
for committee_index in get_committee_indices(committees_per_slot):
|
|
|
|
res.add(getCommittee(slot, committee_index))
|
2020-06-19 09:21:17 +00:00
|
|
|
else:
|
2020-12-01 13:55:34 +00:00
|
|
|
if index.get() < committees_per_slot:
|
2022-01-08 23:28:49 +00:00
|
|
|
let cindex = CommitteeIndex.init(index.get()).expect(
|
|
|
|
"valid because verified against committees_per_slot")
|
|
|
|
res.add(getCommittee(slot, cindex))
|
2020-12-01 13:55:34 +00:00
|
|
|
|
2021-08-03 15:17:11 +00:00
|
|
|
var res: seq[RpcBeaconStatesCommittees]
|
2020-12-01 13:55:34 +00:00
|
|
|
|
|
|
|
let qepoch =
|
|
|
|
if epoch.isNone:
|
2022-03-16 07:20:40 +00:00
|
|
|
epoch(getStateField(state, slot))
|
2020-12-01 13:55:34 +00:00
|
|
|
else:
|
|
|
|
Epoch(epoch.get())
|
2020-07-08 10:11:22 +00:00
|
|
|
|
2020-12-01 13:55:34 +00:00
|
|
|
if slot.isNone:
|
2022-01-11 10:01:54 +00:00
|
|
|
for slot in qepoch.slots():
|
2022-01-08 23:28:49 +00:00
|
|
|
forSlot(slot, res)
|
2020-07-08 10:11:22 +00:00
|
|
|
else:
|
2020-12-01 13:55:34 +00:00
|
|
|
forSlot(Slot(slot.get()), res)
|
|
|
|
|
|
|
|
return res
|
2020-07-08 10:11:22 +00:00
|
|
|
|
|
|
|
rpcServer.rpc("get_v1_beacon_headers") do (
|
2020-12-01 13:55:34 +00:00
|
|
|
slot: Option[uint64], parent_root: Option[string]) ->
|
2021-08-03 15:17:11 +00:00
|
|
|
seq[RpcBeaconHeaders]:
|
2020-10-27 09:00:57 +00:00
|
|
|
unimplemented()
|
2020-07-08 10:11:22 +00:00
|
|
|
|
|
|
|
rpcServer.rpc("get_v1_beacon_headers_blockId") do (
|
2020-11-30 20:16:12 +00:00
|
|
|
blockId: string) ->
|
|
|
|
tuple[canonical: bool, header: SignedBeaconBlockHeader]:
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
let bd = node.getForkedBlockFromBlockId(blockId)
|
|
|
|
return withBlck(bd):
|
2021-10-13 10:20:18 +00:00
|
|
|
static: doAssert blck.signature is TrustedSig and
|
|
|
|
sizeof(ValidatorSig) == sizeof(blck.signature)
|
|
|
|
(
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
canonical: node.dag.isCanonical(
|
|
|
|
BlockId(root: blck.root, slot: blck.message.slot)),
|
2021-10-13 10:20:18 +00:00
|
|
|
header: SignedBeaconBlockHeader(
|
2022-02-18 20:35:52 +00:00
|
|
|
message: blck.toBeaconBlockHeader
|
2021-10-13 10:20:18 +00:00
|
|
|
)
|
|
|
|
)
|
2020-07-08 10:11:22 +00:00
|
|
|
|
2021-07-15 19:01:07 +00:00
|
|
|
rpcServer.rpc("post_v1_beacon_blocks") do (blck: phase0.SignedBeaconBlock) -> int:
|
2021-12-03 13:58:12 +00:00
|
|
|
let res = await sendBeaconBlock(node, ForkedSignedBeaconBlock.init(blck))
|
|
|
|
if res.isErr():
|
|
|
|
raise (ref CatchableError)(msg: $res.error())
|
|
|
|
|
|
|
|
if res.get():
|
|
|
|
# The block was validated successfully and has been broadcast.
|
|
|
|
# It has also been integrated into the beacon node's database.
|
|
|
|
return 200
|
|
|
|
else:
|
2020-11-30 20:16:12 +00:00
|
|
|
# The block failed validation, but was successfully broadcast anyway.
|
2021-12-03 13:58:12 +00:00
|
|
|
# It was not integrated into the beacon node''s database.
|
2020-11-30 20:16:12 +00:00
|
|
|
return 202
|
|
|
|
|
2020-07-08 10:11:22 +00:00
|
|
|
rpcServer.rpc("get_v1_beacon_blocks_blockId") do (
|
2021-07-15 19:01:07 +00:00
|
|
|
blockId: string) -> phase0.TrustedSignedBeaconBlock:
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
let blck = node.getForkedBlockFromBlockId(blockId)
|
2021-10-13 10:20:18 +00:00
|
|
|
if blck.kind == BeaconBlockFork.Phase0:
|
2021-10-18 16:37:27 +00:00
|
|
|
return blck.phase0Data
|
2021-10-13 10:20:18 +00:00
|
|
|
else:
|
|
|
|
raiseNoAltairSupport()
|
2020-07-08 10:11:22 +00:00
|
|
|
|
|
|
|
rpcServer.rpc("get_v1_beacon_blocks_blockId_root") do (
|
|
|
|
blockId: string) -> Eth2Digest:
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
return withBlck(node.getForkedBlockFromBlockId(blockId)):
|
|
|
|
blck.root
|
2020-07-08 10:11:22 +00:00
|
|
|
|
|
|
|
rpcServer.rpc("get_v1_beacon_blocks_blockId_attestations") do (
|
|
|
|
blockId: string) -> seq[TrustedAttestation]:
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
return withBlck(node.getForkedBlockFromBlockId(blockId)):
|
2021-10-13 10:20:18 +00:00
|
|
|
blck.message.body.attestations.asSeq
|
2020-06-19 09:21:17 +00:00
|
|
|
|
2020-11-30 14:18:06 +00:00
|
|
|
rpcServer.rpc("get_v1_beacon_pool_attestations") do (
|
2020-12-01 13:55:34 +00:00
|
|
|
slot: Option[uint64], committee_index: Option[uint64]) ->
|
2021-08-03 15:17:11 +00:00
|
|
|
seq[RpcAttestation]:
|
2020-11-30 15:20:46 +00:00
|
|
|
|
2021-08-03 15:17:11 +00:00
|
|
|
var res: seq[RpcAttestation]
|
2020-11-30 14:18:06 +00:00
|
|
|
|
|
|
|
let qslot =
|
|
|
|
if slot.isSome():
|
2020-12-01 13:55:34 +00:00
|
|
|
some(Slot(slot.get()))
|
2020-11-30 14:18:06 +00:00
|
|
|
else:
|
|
|
|
none[Slot]()
|
|
|
|
|
|
|
|
let qindex =
|
|
|
|
if committee_index.isSome():
|
2020-12-01 13:55:34 +00:00
|
|
|
some(CommitteeIndex(committee_index.get()))
|
2020-11-30 14:18:06 +00:00
|
|
|
else:
|
|
|
|
none[CommitteeIndex]()
|
|
|
|
|
2020-11-30 15:20:46 +00:00
|
|
|
for item in node.attestationPool[].attestations(qslot, qindex):
|
|
|
|
let atuple = (
|
2022-01-08 20:06:34 +00:00
|
|
|
aggregation_bits: to0xHex(item.aggregation_bits.bytes),
|
2020-11-30 15:20:46 +00:00
|
|
|
data: item.data,
|
|
|
|
signature: item.signature
|
|
|
|
)
|
|
|
|
res.add(atuple)
|
|
|
|
|
|
|
|
return res
|
2020-10-27 09:00:57 +00:00
|
|
|
|
2020-06-19 09:21:17 +00:00
|
|
|
rpcServer.rpc("post_v1_beacon_pool_attestations") do (
|
|
|
|
attestation: Attestation) -> bool:
|
2021-08-23 10:41:48 +00:00
|
|
|
let res = await node.sendAttestation(attestation)
|
2021-11-05 15:39:47 +00:00
|
|
|
if not res.isOk():
|
|
|
|
raise (ref CatchableError)(msg: $res.error())
|
|
|
|
return true
|
2020-06-05 09:57:40 +00:00
|
|
|
|
2020-11-30 03:14:40 +00:00
|
|
|
rpcServer.rpc("get_v1_beacon_pool_attester_slashings") do (
|
|
|
|
) -> seq[AttesterSlashing]:
|
2020-11-30 04:32:46 +00:00
|
|
|
var res: seq[AttesterSlashing]
|
|
|
|
if isNil(node.exitPool):
|
|
|
|
return res
|
2020-11-30 03:14:40 +00:00
|
|
|
let length = len(node.exitPool.attester_slashings)
|
2020-11-30 04:32:46 +00:00
|
|
|
res = newSeqOfCap[AttesterSlashing](length)
|
2020-11-30 03:14:40 +00:00
|
|
|
for item in node.exitPool.attester_slashings.items():
|
|
|
|
res.add(item)
|
|
|
|
return res
|
2020-07-08 10:11:22 +00:00
|
|
|
|
2020-11-30 04:32:46 +00:00
|
|
|
rpcServer.rpc("post_v1_beacon_pool_attester_slashings") do (
|
|
|
|
slashing: AttesterSlashing) -> bool:
|
2021-08-23 10:41:48 +00:00
|
|
|
let res = node.sendAttesterSlashing(slashing)
|
2021-11-05 15:39:47 +00:00
|
|
|
if not res.isOk():
|
|
|
|
raise (ref CatchableError)(msg: $res.error())
|
|
|
|
return true
|
2020-10-27 09:00:57 +00:00
|
|
|
|
2020-11-30 03:14:40 +00:00
|
|
|
rpcServer.rpc("get_v1_beacon_pool_proposer_slashings") do (
|
|
|
|
) -> seq[ProposerSlashing]:
|
2020-11-30 04:32:46 +00:00
|
|
|
var res: seq[ProposerSlashing]
|
|
|
|
if isNil(node.exitPool):
|
|
|
|
return res
|
2020-11-30 03:14:40 +00:00
|
|
|
let length = len(node.exitPool.proposer_slashings)
|
2020-11-30 04:32:46 +00:00
|
|
|
res = newSeqOfCap[ProposerSlashing](length)
|
2020-11-30 03:14:40 +00:00
|
|
|
for item in node.exitPool.proposer_slashings.items():
|
|
|
|
res.add(item)
|
|
|
|
return res
|
2020-10-27 09:00:57 +00:00
|
|
|
|
2020-11-30 04:32:46 +00:00
|
|
|
rpcServer.rpc("post_v1_beacon_pool_proposer_slashings") do (
|
|
|
|
slashing: ProposerSlashing) -> bool:
|
2021-08-23 10:41:48 +00:00
|
|
|
let res = node.sendProposerSlashing(slashing)
|
2021-11-05 15:39:47 +00:00
|
|
|
if not res.isOk():
|
|
|
|
raise (ref CatchableError)(msg: $res.error())
|
|
|
|
return true
|
2020-10-27 09:00:57 +00:00
|
|
|
|
2020-11-30 03:14:40 +00:00
|
|
|
rpcServer.rpc("get_v1_beacon_pool_voluntary_exits") do (
|
|
|
|
) -> seq[SignedVoluntaryExit]:
|
2020-11-30 04:32:46 +00:00
|
|
|
var res: seq[SignedVoluntaryExit]
|
|
|
|
if isNil(node.exitPool):
|
|
|
|
return res
|
2020-11-30 03:14:40 +00:00
|
|
|
let length = len(node.exitPool.voluntary_exits)
|
2020-11-30 04:32:46 +00:00
|
|
|
res = newSeqOfCap[SignedVoluntaryExit](length)
|
2020-11-30 03:14:40 +00:00
|
|
|
for item in node.exitPool.voluntary_exits.items():
|
|
|
|
res.add(item)
|
|
|
|
return res
|
2020-06-05 09:57:40 +00:00
|
|
|
|
2020-11-27 19:48:33 +00:00
|
|
|
rpcServer.rpc("post_v1_beacon_pool_voluntary_exits") do (
|
|
|
|
exit: SignedVoluntaryExit) -> bool:
|
2021-08-23 10:41:48 +00:00
|
|
|
let res = node.sendVoluntaryExit(exit)
|
2021-11-05 15:39:47 +00:00
|
|
|
if not res.isOk():
|
|
|
|
raise (ref CatchableError)(msg: $res.error())
|
|
|
|
return true
|