2022-01-05 14:49:10 +00:00
|
|
|
import std/[options, macros],
|
2022-01-05 15:42:29 +00:00
|
|
|
stew/byteutils, presto,
|
2021-08-12 13:08:20 +00:00
|
|
|
../spec/[forks],
|
2021-08-03 15:17:11 +00:00
|
|
|
../spec/eth2_apis/[rest_types, eth2_rest_serialization],
|
2021-10-19 14:09:26 +00:00
|
|
|
../beacon_node,
|
2021-11-30 01:20:21 +00:00
|
|
|
../consensus_object_pools/blockchain_dag,
|
2022-01-05 14:49:10 +00:00
|
|
|
"."/[rest_constants, state_ttl_cache]
|
2021-08-03 15:17:11 +00:00
|
|
|
|
|
|
|
export
|
2021-11-30 01:20:21 +00:00
|
|
|
options, eth2_rest_serialization, blockchain_dag, presto, rest_types,
|
|
|
|
rest_constants
|
2021-04-08 10:49:28 +00:00
|
|
|
|
2021-03-17 18:46:45 +00:00
|
|
|
type
|
2021-04-03 00:21:44 +00:00
|
|
|
ValidatorIndexError* {.pure.} = enum
|
|
|
|
UnsupportedValue, TooHighValue
|
|
|
|
|
2021-03-17 18:46:45 +00:00
|
|
|
func match(data: openarray[char], charset: set[char]): int =
|
|
|
|
for ch in data:
|
|
|
|
if ch notin charset:
|
|
|
|
return 1
|
|
|
|
0
|
|
|
|
|
|
|
|
proc validate(key: string, value: string): int =
|
|
|
|
## This is rough validation procedure which should be simple and fast,
|
|
|
|
## because it will be used for query routing.
|
|
|
|
case key
|
|
|
|
of "{epoch}":
|
2021-04-12 16:23:45 +00:00
|
|
|
0
|
2021-03-17 18:46:45 +00:00
|
|
|
of "{slot}":
|
2021-04-12 16:23:45 +00:00
|
|
|
0
|
2021-03-17 18:46:45 +00:00
|
|
|
of "{peer_id}":
|
2021-04-12 16:23:45 +00:00
|
|
|
0
|
2021-03-17 18:46:45 +00:00
|
|
|
of "{state_id}":
|
2021-04-12 16:23:45 +00:00
|
|
|
0
|
2021-03-17 18:46:45 +00:00
|
|
|
of "{block_id}":
|
2021-04-12 16:23:45 +00:00
|
|
|
0
|
2021-03-17 18:46:45 +00:00
|
|
|
of "{validator_id}":
|
2021-04-12 16:23:45 +00:00
|
|
|
0
|
2021-03-17 18:46:45 +00:00
|
|
|
else:
|
|
|
|
1
|
|
|
|
|
2021-12-06 18:52:35 +00:00
|
|
|
func getCurrentSlot*(node: BeaconNode, slot: Slot):
|
|
|
|
Result[Slot, cstring] =
|
|
|
|
if slot <= (node.dag.head.slot + (SLOTS_PER_EPOCH * 2)):
|
|
|
|
ok(slot)
|
|
|
|
else:
|
|
|
|
err("Requesting slot too far ahead of the current head")
|
|
|
|
|
|
|
|
func getCurrentBlock*(node: BeaconNode, slot: Slot):
|
|
|
|
Result[BlockRef, cstring] =
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
let bs = node.dag.getBlockAtSlot(? node.getCurrentSlot(slot))
|
2022-01-05 18:38:04 +00:00
|
|
|
if bs.isProposed():
|
2021-12-06 18:52:35 +00:00
|
|
|
ok(bs.blck)
|
|
|
|
else:
|
|
|
|
err("Block not found")
|
|
|
|
|
|
|
|
proc getCurrentHead*(node: BeaconNode, slot: Slot): Result[BlockRef, cstring] =
|
2021-06-01 11:13:40 +00:00
|
|
|
let res = node.dag.head
|
2021-03-17 18:46:45 +00:00
|
|
|
# if not(node.isSynced(res)):
|
|
|
|
# return err("Cannot fulfill request until node is synced")
|
|
|
|
if res.slot + uint64(2 * SLOTS_PER_EPOCH) < slot:
|
|
|
|
return err("Requesting way ahead of the current head")
|
|
|
|
ok(res)
|
|
|
|
|
|
|
|
proc getCurrentHead*(node: BeaconNode,
|
|
|
|
epoch: Epoch): Result[BlockRef, cstring] =
|
2021-04-27 20:46:24 +00:00
|
|
|
if epoch > MaxEpoch:
|
2021-03-17 18:46:45 +00:00
|
|
|
return err("Requesting epoch for which slot would overflow")
|
2022-01-11 10:01:54 +00:00
|
|
|
node.getCurrentHead(epoch.start_slot())
|
2021-03-17 18:46:45 +00:00
|
|
|
|
|
|
|
proc getBlockSlot*(node: BeaconNode,
|
|
|
|
stateIdent: StateIdent): Result[BlockSlot, cstring] =
|
|
|
|
case stateIdent.kind
|
|
|
|
of StateQueryKind.Slot:
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
let bs = node.dag.getBlockAtSlot(? node.getCurrentSlot(stateIdent.slot))
|
2022-01-05 18:38:04 +00:00
|
|
|
if not isNil(bs.blck):
|
|
|
|
ok(bs)
|
|
|
|
else:
|
|
|
|
err("State for given slot not found, history not available?")
|
2021-03-17 18:46:45 +00:00
|
|
|
of StateQueryKind.Root:
|
2021-12-06 18:52:35 +00:00
|
|
|
if stateIdent.root == getStateRoot(node.dag.headState.data):
|
2021-12-09 17:06:21 +00:00
|
|
|
ok(node.dag.headState.blck.atSlot())
|
2021-12-06 18:52:35 +00:00
|
|
|
else:
|
|
|
|
# We don't have a state root -> BlockSlot mapping
|
2022-01-05 18:38:04 +00:00
|
|
|
err("State for given root not found")
|
2021-03-17 18:46:45 +00:00
|
|
|
of StateQueryKind.Named:
|
|
|
|
case stateIdent.value
|
|
|
|
of StateIdentType.Head:
|
2021-12-09 17:06:21 +00:00
|
|
|
ok(node.dag.head.atSlot())
|
2021-03-17 18:46:45 +00:00
|
|
|
of StateIdentType.Genesis:
|
2021-12-09 17:06:21 +00:00
|
|
|
ok(node.dag.genesis.atSlot())
|
2021-03-17 18:46:45 +00:00
|
|
|
of StateIdentType.Finalized:
|
2021-06-01 11:13:40 +00:00
|
|
|
ok(node.dag.finalizedHead)
|
2021-03-17 18:46:45 +00:00
|
|
|
of StateIdentType.Justified:
|
2021-06-11 17:51:46 +00:00
|
|
|
ok(node.dag.head.atEpochStart(getStateField(
|
|
|
|
node.dag.headState.data, current_justified_checkpoint).epoch))
|
2021-03-17 18:46:45 +00:00
|
|
|
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
proc getBlockId*(node: BeaconNode, id: BlockIdent): Result[BlockId, cstring] =
|
2021-03-17 18:46:45 +00:00
|
|
|
case id.kind
|
|
|
|
of BlockQueryKind.Named:
|
|
|
|
case id.value
|
|
|
|
of BlockIdentType.Head:
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
ok(node.dag.head.bid)
|
2021-03-17 18:46:45 +00:00
|
|
|
of BlockIdentType.Genesis:
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
ok(node.dag.genesis.bid)
|
2021-03-17 18:46:45 +00:00
|
|
|
of BlockIdentType.Finalized:
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
ok(node.dag.finalizedHead.blck.bid)
|
2021-03-17 18:46:45 +00:00
|
|
|
of BlockQueryKind.Root:
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
node.dag.getBlockId(id.root).orErr(cstring("Block not found"))
|
2021-03-17 18:46:45 +00:00
|
|
|
of BlockQueryKind.Slot:
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
let bsid = node.dag.getBlockIdAtSlot(id.slot)
|
|
|
|
if bsid.isProposed():
|
|
|
|
ok bsid.bid
|
|
|
|
else:
|
|
|
|
err("Block not found")
|
2021-12-06 18:52:35 +00:00
|
|
|
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
proc getForkedBlock*(node: BeaconNode, id: BlockIdent):
|
|
|
|
Result[ForkedTrustedSignedBeaconBlock, cstring] =
|
|
|
|
case id.kind
|
|
|
|
of BlockQueryKind.Named:
|
|
|
|
case id.value
|
|
|
|
of BlockIdentType.Head:
|
|
|
|
ok(node.dag.getForkedBlock(node.dag.head))
|
|
|
|
of BlockIdentType.Genesis:
|
|
|
|
ok(node.dag.getForkedBlock(node.dag.genesis))
|
|
|
|
of BlockIdentType.Finalized:
|
|
|
|
ok(node.dag.getForkedBlock(node.dag.finalizedHead.blck))
|
|
|
|
of BlockQueryKind.Root:
|
|
|
|
node.dag.getForkedBlock(id.root).orErr(cstring("Block not found"))
|
|
|
|
of BlockQueryKind.Slot:
|
|
|
|
let bsid = node.dag.getBlockIdAtSlot(id.slot)
|
|
|
|
if bsid.isProposed():
|
|
|
|
node.dag.getForkedBlock(bsid.bid).orErr(cstring("Block not found"))
|
|
|
|
else:
|
|
|
|
err("Block not found")
|
2021-03-17 18:46:45 +00:00
|
|
|
|
2022-01-05 14:49:10 +00:00
|
|
|
proc disallowInterruptionsAux(body: NimNode) =
|
|
|
|
for n in body:
|
|
|
|
const because =
|
|
|
|
"because the `state` variable may be mutated (and thus invalidated) " &
|
|
|
|
"before the function resumes execution."
|
2021-03-17 20:42:55 +00:00
|
|
|
|
2022-01-05 14:49:10 +00:00
|
|
|
if n.kind == nnkYieldStmt:
|
|
|
|
macros.error "You cannot use yield in this block " & because, n
|
|
|
|
|
|
|
|
if (n.kind in {nnkCall, nnkCommand} and
|
|
|
|
n[0].kind in {nnkIdent, nnkSym} and
|
|
|
|
$n[0] == "await"):
|
|
|
|
macros.error "You cannot use await in this block " & because, n
|
|
|
|
|
|
|
|
disallowInterruptionsAux(n)
|
|
|
|
|
|
|
|
macro disallowInterruptions(body: untyped) =
|
|
|
|
disallowInterruptionsAux(body)
|
|
|
|
|
|
|
|
template withStateForBlockSlot*(nodeParam: BeaconNode,
|
|
|
|
blockSlotParam: BlockSlot,
|
|
|
|
body: untyped): untyped =
|
|
|
|
|
|
|
|
block:
|
|
|
|
let
|
|
|
|
node = nodeParam
|
|
|
|
blockSlot = blockSlotParam
|
|
|
|
|
|
|
|
template isState(state: StateData): bool =
|
|
|
|
state.blck.atSlot(getStateField(state.data, slot)) == blockSlot
|
|
|
|
|
|
|
|
var cache {.inject, used.}: StateCache
|
|
|
|
|
|
|
|
# If we have a cache hit, there is a concern that the REST request
|
|
|
|
# handler may continue executing asynchronously while we hit the same
|
|
|
|
# advanced state is another request. We don't want the two requests
|
|
|
|
# to work over the same state object because mutations to it will be
|
|
|
|
# visible in both, so we must outlaw yielding within the `body` block.
|
|
|
|
# Please note that the problem is not limited to the situations where
|
|
|
|
# we have a cache hit. Working with the `headState` will result in the
|
|
|
|
# same problem as it may change while the request is executing.
|
|
|
|
#
|
|
|
|
# TODO
|
|
|
|
# The solution below is only partion, because it theory yields or awaits
|
|
|
|
# can still be hidden in the body through the use of helper templates
|
|
|
|
disallowInterruptions(body)
|
|
|
|
|
|
|
|
# TODO view-types
|
|
|
|
# Avoid the code bloat produced by the double `body` reference through a lent var
|
|
|
|
if isState(node.dag.headState):
|
|
|
|
withStateVars(node.dag.headState):
|
|
|
|
body
|
|
|
|
else:
|
|
|
|
let cachedState = if node.stateTtlCache != nil:
|
|
|
|
node.stateTtlCache.getClosestState(blockSlot)
|
|
|
|
else:
|
|
|
|
nil
|
|
|
|
|
|
|
|
let stateToAdvance = if cachedState != nil:
|
|
|
|
cachedState
|
|
|
|
else:
|
|
|
|
assignClone(node.dag.headState)
|
|
|
|
|
2022-01-05 18:38:04 +00:00
|
|
|
if node.dag.updateStateData(stateToAdvance[], blockSlot, false, cache):
|
|
|
|
if cachedState == nil and node.stateTtlCache != nil:
|
|
|
|
# This was not a cached state, we can cache it now
|
|
|
|
node.stateTtlCache.add(stateToAdvance)
|
2022-01-05 14:49:10 +00:00
|
|
|
|
2022-01-05 18:38:04 +00:00
|
|
|
withStateVars(stateToAdvance[]):
|
|
|
|
body
|
2021-04-03 00:21:44 +00:00
|
|
|
|
2022-01-05 15:42:29 +00:00
|
|
|
template strData*(body: ContentBody): string =
|
|
|
|
bind fromBytes
|
|
|
|
string.fromBytes(body.data)
|
|
|
|
|
2021-04-03 00:21:44 +00:00
|
|
|
proc toValidatorIndex*(value: RestValidatorIndex): Result[ValidatorIndex,
|
|
|
|
ValidatorIndexError] =
|
|
|
|
when sizeof(ValidatorIndex) == 4:
|
|
|
|
if uint64(value) < VALIDATOR_REGISTRY_LIMIT:
|
2021-04-09 16:12:59 +00:00
|
|
|
# On x86 platform Nim allows only `int32` indexes, so all the indexes in
|
|
|
|
# range `2^31 <= x < 2^32` are not supported.
|
|
|
|
if uint64(value) <= uint64(high(int32)):
|
2021-04-03 00:21:44 +00:00
|
|
|
ok(ValidatorIndex(value))
|
|
|
|
else:
|
|
|
|
err(ValidatorIndexError.UnsupportedValue)
|
|
|
|
else:
|
|
|
|
err(ValidatorIndexError.TooHighValue)
|
|
|
|
elif sizeof(ValidatorIndex) == 8:
|
|
|
|
if uint64(value) < VALIDATOR_REGISTRY_LIMIT:
|
|
|
|
ok(ValidatorIndex(value))
|
|
|
|
else:
|
|
|
|
err(ValidatorIndexError.TooHighValue)
|
|
|
|
else:
|
|
|
|
doAssert(false, "ValidatorIndex type size is incorrect")
|
2021-07-13 11:15:07 +00:00
|
|
|
|
2021-10-14 10:38:38 +00:00
|
|
|
func syncCommitteeParticipants*(forkedState: ForkedHashedBeaconState,
|
2021-11-30 01:14:31 +00:00
|
|
|
epoch: Epoch
|
|
|
|
): Result[seq[ValidatorPubKey], cstring] =
|
2021-10-14 10:38:38 +00:00
|
|
|
withState(forkedState):
|
2021-10-18 16:37:27 +00:00
|
|
|
when stateFork >= BeaconStateFork.Altair:
|
2021-10-14 10:38:38 +00:00
|
|
|
let
|
2021-10-20 16:32:46 +00:00
|
|
|
epochPeriod = sync_committee_period(epoch)
|
|
|
|
curPeriod = sync_committee_period(state.data.slot)
|
|
|
|
if epochPeriod == curPeriod:
|
2021-10-14 10:38:38 +00:00
|
|
|
ok(@(state.data.current_sync_committee.pubkeys.data))
|
2021-10-20 16:32:46 +00:00
|
|
|
elif epochPeriod == curPeriod + 1:
|
2021-10-14 10:38:38 +00:00
|
|
|
ok(@(state.data.next_sync_committee.pubkeys.data))
|
|
|
|
else:
|
|
|
|
err("Epoch is outside the sync committee period of the state")
|
|
|
|
else:
|
|
|
|
err("State's fork do not support sync committees")
|
|
|
|
|
|
|
|
func keysToIndices*(cacheTable: var Table[ValidatorPubKey, ValidatorIndex],
|
|
|
|
forkedState: ForkedHashedBeaconState,
|
|
|
|
keys: openArray[ValidatorPubKey]
|
|
|
|
): seq[Option[ValidatorIndex]] =
|
|
|
|
var indices = newSeq[Option[ValidatorIndex]](len(keys))
|
|
|
|
var keyset =
|
|
|
|
block:
|
|
|
|
var res: Table[ValidatorPubKey, int]
|
|
|
|
for inputIndex, pubkey in keys.pairs():
|
|
|
|
# Try to search in cache first.
|
|
|
|
cacheTable.withValue(pubkey, vindex):
|
|
|
|
indices[inputIndex] = some(vindex[])
|
|
|
|
do:
|
|
|
|
res[pubkey] = inputIndex
|
|
|
|
res
|
|
|
|
if len(keyset) > 0:
|
|
|
|
for validatorIndex, validator in getStateField(forkedState,
|
|
|
|
validators).pairs():
|
|
|
|
keyset.withValue(validator.pubkey, listIndex):
|
|
|
|
# Store pair (pubkey, index) into cache table.
|
|
|
|
cacheTable[validator.pubkey] = ValidatorIndex(validatorIndex)
|
|
|
|
# Fill result sequence.
|
|
|
|
indices[listIndex[]] = some(ValidatorIndex(validatorIndex))
|
|
|
|
indices
|
|
|
|
|
2021-08-03 15:17:11 +00:00
|
|
|
proc getRouter*(): RestRouter =
|
|
|
|
RestRouter.init(validate)
|
2022-01-21 16:52:34 +00:00
|
|
|
|
|
|
|
const
|
|
|
|
jsonMediaType* = MediaType.init("application/json")
|
|
|
|
sszMediaType* = MediaType.init("application/octet-stream")
|
|
|
|
textEventStreamMediaType* = MediaType.init("text/event-stream")
|