mirror of
https://github.com/status-im/nimbus-eth2.git
synced 2025-01-09 13:56:23 +00:00
05ffe7b2bf
Up til now, the block dag has been using `BlockRef`, a structure adapted for a full DAG, to represent all of chain history. This is a correct and simple design, but does not exploit the linearity of the chain once parts of it finalize. By pruning the in-memory `BlockRef` structure at finalization, we save, at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory landing us at a steady state of ~750mb normal memory usage for a validating node. Above all though, we prevent memory usage from growing proportionally with the length of the chain, something that would not be sustainable over time - instead, the steady state memory usage is roughly determined by the validator set size which grows much more slowly. With these changes, the core should remain sustainable memory-wise post-merge all the way to withdrawals (when the validator set is expected to grow). In-memory indices are still used for the "hot" unfinalized portion of the chain - this ensure that consensus performance remains unchanged. What changes is that for historical access, we use a db-based linear slot index which is cache-and-disk-friendly, keeping the cost for accessing historical data at a similar level as before, achieving the savings at no percievable cost to functionality or performance. A nice collateral benefit is the almost-instant startup since we no longer load any large indicies at dag init. The cost of this functionality instead can be found in the complexity of having to deal with two ways of traversing the chain - by `BlockRef` and by slot. * use `BlockId` instead of `BlockRef` where finalized / historical data may be required * simplify clearance pre-advancement * remove dag.finalizedBlocks (~50:ish mb) * remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead * `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef` instance, unlike `BlockRef` traversal * prune `BlockRef` parents on finality (~200:ish mb) * speed up ChainDAG init by not loading finalized history index * mess up light client server error handling - this need revisiting :)
265 lines
9.2 KiB
Nim
265 lines
9.2 KiB
Nim
import std/[options, macros],
|
|
stew/byteutils, presto,
|
|
../spec/[forks],
|
|
../spec/eth2_apis/[rest_types, eth2_rest_serialization],
|
|
../beacon_node,
|
|
../consensus_object_pools/blockchain_dag,
|
|
"."/[rest_constants, state_ttl_cache]
|
|
|
|
export
|
|
options, eth2_rest_serialization, blockchain_dag, presto, rest_types,
|
|
rest_constants
|
|
|
|
type
|
|
ValidatorIndexError* {.pure.} = enum
|
|
UnsupportedValue, TooHighValue
|
|
|
|
func match(data: openarray[char], charset: set[char]): int =
|
|
for ch in data:
|
|
if ch notin charset:
|
|
return 1
|
|
0
|
|
|
|
proc validate(key: string, value: string): int =
|
|
## This is rough validation procedure which should be simple and fast,
|
|
## because it will be used for query routing.
|
|
case key
|
|
of "{epoch}":
|
|
0
|
|
of "{slot}":
|
|
0
|
|
of "{peer_id}":
|
|
0
|
|
of "{state_id}":
|
|
0
|
|
of "{block_id}":
|
|
0
|
|
of "{validator_id}":
|
|
0
|
|
else:
|
|
1
|
|
|
|
func getCurrentSlot*(node: BeaconNode, slot: Slot):
|
|
Result[Slot, cstring] =
|
|
if slot <= (node.dag.head.slot + (SLOTS_PER_EPOCH * 2)):
|
|
ok(slot)
|
|
else:
|
|
err("Requesting slot too far ahead of the current head")
|
|
|
|
proc getCurrentHead*(node: BeaconNode, slot: Slot): Result[BlockRef, cstring] =
|
|
let res = node.dag.head
|
|
# if not(node.isSynced(res)):
|
|
# return err("Cannot fulfill request until node is synced")
|
|
if res.slot + uint64(2 * SLOTS_PER_EPOCH) < slot:
|
|
return err("Requesting way ahead of the current head")
|
|
ok(res)
|
|
|
|
proc getCurrentHead*(node: BeaconNode,
|
|
epoch: Epoch): Result[BlockRef, cstring] =
|
|
if epoch > MaxEpoch:
|
|
return err("Requesting epoch for which slot would overflow")
|
|
node.getCurrentHead(epoch.start_slot())
|
|
|
|
proc getBlockSlotId*(node: BeaconNode,
|
|
stateIdent: StateIdent): Result[BlockSlotId, cstring] =
|
|
case stateIdent.kind
|
|
of StateQueryKind.Slot:
|
|
let bsi = node.dag.getBlockIdAtSlot(? node.getCurrentSlot(stateIdent.slot)).valueOr:
|
|
return err("State for given slot not found, history not available?")
|
|
|
|
ok(bsi)
|
|
|
|
of StateQueryKind.Root:
|
|
if stateIdent.root == getStateRoot(node.dag.headState):
|
|
ok(node.dag.head.bid.atSlot())
|
|
else:
|
|
# We don't have a state root -> BlockSlot mapping
|
|
err("State for given root not found")
|
|
of StateQueryKind.Named:
|
|
case stateIdent.value
|
|
of StateIdentType.Head:
|
|
ok(node.dag.head.bid.atSlot())
|
|
of StateIdentType.Genesis:
|
|
ok(node.dag.genesis.atSlot())
|
|
of StateIdentType.Finalized:
|
|
ok(node.dag.finalizedHead.toBlockSlotId().expect("not nil"))
|
|
of StateIdentType.Justified:
|
|
ok(node.dag.head.atEpochStart(getStateField(
|
|
node.dag.headState, current_justified_checkpoint).epoch).toBlockSlotId().expect("not nil"))
|
|
|
|
proc getBlockId*(node: BeaconNode, id: BlockIdent): Opt[BlockId] =
|
|
case id.kind
|
|
of BlockQueryKind.Named:
|
|
case id.value
|
|
of BlockIdentType.Head:
|
|
ok(node.dag.head.bid)
|
|
of BlockIdentType.Genesis:
|
|
ok(node.dag.genesis)
|
|
of BlockIdentType.Finalized:
|
|
ok(node.dag.finalizedHead.blck.bid)
|
|
of BlockQueryKind.Root:
|
|
node.dag.getBlockId(id.root)
|
|
of BlockQueryKind.Slot:
|
|
let bsid = node.dag.getBlockIdAtSlot(id.slot)
|
|
if bsid.isSome and bsid.get().isProposed():
|
|
ok bsid.get().bid
|
|
else:
|
|
err()
|
|
|
|
proc getForkedBlock*(node: BeaconNode, id: BlockIdent):
|
|
Opt[ForkedTrustedSignedBeaconBlock] =
|
|
let bid = ? node.getBlockId(id)
|
|
|
|
node.dag.getForkedBlock(bid)
|
|
|
|
proc disallowInterruptionsAux(body: NimNode) =
|
|
for n in body:
|
|
const because =
|
|
"because the `state` variable may be mutated (and thus invalidated) " &
|
|
"before the function resumes execution."
|
|
|
|
if n.kind == nnkYieldStmt:
|
|
macros.error "You cannot use yield in this block " & because, n
|
|
|
|
if (n.kind in {nnkCall, nnkCommand} and
|
|
n[0].kind in {nnkIdent, nnkSym} and
|
|
$n[0] == "await"):
|
|
macros.error "You cannot use await in this block " & because, n
|
|
|
|
disallowInterruptionsAux(n)
|
|
|
|
macro disallowInterruptions(body: untyped) =
|
|
disallowInterruptionsAux(body)
|
|
|
|
template withStateForBlockSlotId*(nodeParam: BeaconNode,
|
|
blockSlotIdParam: BlockSlotId,
|
|
body: untyped): untyped =
|
|
|
|
block:
|
|
let
|
|
node = nodeParam
|
|
blockSlotId = blockSlotIdParam
|
|
|
|
template isState(state: ForkedHashedBeaconState): bool =
|
|
state.matches_block_slot(blockSlotId.bid.root, blockSlotId.slot)
|
|
|
|
var cache {.inject, used.}: StateCache
|
|
|
|
# If we have a cache hit, there is a concern that the REST request
|
|
# handler may continue executing asynchronously while we hit the same
|
|
# advanced state is another request. We don't want the two requests
|
|
# to work over the same state object because mutations to it will be
|
|
# visible in both, so we must outlaw yielding within the `body` block.
|
|
# Please note that the problem is not limited to the situations where
|
|
# we have a cache hit. Working with the `headState` will result in the
|
|
# same problem as it may change while the request is executing.
|
|
#
|
|
# TODO
|
|
# The solution below is only partion, because it theory yields or awaits
|
|
# can still be hidden in the body through the use of helper templates
|
|
disallowInterruptions(body)
|
|
|
|
# TODO view-types
|
|
# Avoid the code bloat produced by the double `body` reference through a lent var
|
|
if isState(node.dag.headState):
|
|
template state: untyped {.inject, used.} = node.dag.headState
|
|
template stateRoot: untyped {.inject, used.} =
|
|
getStateRoot(node.dag.headState)
|
|
body
|
|
else:
|
|
let cachedState = if node.stateTtlCache != nil:
|
|
node.stateTtlCache.getClosestState(node.dag, blockSlotId)
|
|
else:
|
|
nil
|
|
|
|
let stateToAdvance = if cachedState != nil:
|
|
cachedState
|
|
else:
|
|
assignClone(node.dag.headState)
|
|
|
|
if node.dag.updateState(stateToAdvance[], blockSlotId, false, cache):
|
|
if cachedState == nil and node.stateTtlCache != nil:
|
|
# This was not a cached state, we can cache it now
|
|
node.stateTtlCache.add(stateToAdvance)
|
|
|
|
template state: untyped {.inject, used.} = stateToAdvance[]
|
|
template stateRoot: untyped {.inject, used.} = getStateRoot(stateToAdvance[])
|
|
|
|
body
|
|
|
|
template strData*(body: ContentBody): string =
|
|
bind fromBytes
|
|
string.fromBytes(body.data)
|
|
|
|
proc toValidatorIndex*(value: RestValidatorIndex): Result[ValidatorIndex,
|
|
ValidatorIndexError] =
|
|
when sizeof(ValidatorIndex) == 4:
|
|
if uint64(value) < VALIDATOR_REGISTRY_LIMIT:
|
|
# On x86 platform Nim allows only `int32` indexes, so all the indexes in
|
|
# range `2^31 <= x < 2^32` are not supported.
|
|
if uint64(value) <= uint64(high(int32)):
|
|
ok(ValidatorIndex(value))
|
|
else:
|
|
err(ValidatorIndexError.UnsupportedValue)
|
|
else:
|
|
err(ValidatorIndexError.TooHighValue)
|
|
elif sizeof(ValidatorIndex) == 8:
|
|
if uint64(value) < VALIDATOR_REGISTRY_LIMIT:
|
|
ok(ValidatorIndex(value))
|
|
else:
|
|
err(ValidatorIndexError.TooHighValue)
|
|
else:
|
|
doAssert(false, "ValidatorIndex type size is incorrect")
|
|
|
|
func syncCommitteeParticipants*(forkedState: ForkedHashedBeaconState,
|
|
epoch: Epoch
|
|
): Result[seq[ValidatorPubKey], cstring] =
|
|
withState(forkedState):
|
|
when stateFork >= BeaconStateFork.Altair:
|
|
let
|
|
epochPeriod = sync_committee_period(epoch)
|
|
curPeriod = sync_committee_period(state.data.slot)
|
|
if epochPeriod == curPeriod:
|
|
ok(@(state.data.current_sync_committee.pubkeys.data))
|
|
elif epochPeriod == curPeriod + 1:
|
|
ok(@(state.data.next_sync_committee.pubkeys.data))
|
|
else:
|
|
err("Epoch is outside the sync committee period of the state")
|
|
else:
|
|
err("State's fork do not support sync committees")
|
|
|
|
func keysToIndices*(cacheTable: var Table[ValidatorPubKey, ValidatorIndex],
|
|
forkedState: ForkedHashedBeaconState,
|
|
keys: openArray[ValidatorPubKey]
|
|
): seq[Option[ValidatorIndex]] =
|
|
var indices = newSeq[Option[ValidatorIndex]](len(keys))
|
|
let totalValidatorsInState = getStateField(forkedState, validators).lenu64
|
|
var keyset =
|
|
block:
|
|
var res: Table[ValidatorPubKey, int]
|
|
for inputIndex, pubkey in keys.pairs():
|
|
# Try to search in cache first.
|
|
cacheTable.withValue(pubkey, vindex):
|
|
if uint64(vindex[]) < totalValidatorsInState:
|
|
indices[inputIndex] = some(vindex[])
|
|
do:
|
|
res[pubkey] = inputIndex
|
|
res
|
|
if len(keyset) > 0:
|
|
for validatorIndex, validator in getStateField(forkedState,
|
|
validators).pairs():
|
|
keyset.withValue(validator.pubkey, listIndex):
|
|
# Store pair (pubkey, index) into cache table.
|
|
cacheTable[validator.pubkey] = ValidatorIndex(validatorIndex)
|
|
# Fill result sequence.
|
|
indices[listIndex[]] = some(ValidatorIndex(validatorIndex))
|
|
indices
|
|
|
|
proc getRouter*(allowedOrigin: Option[string]): RestRouter =
|
|
RestRouter.init(validate, allowedOrigin = allowedOrigin)
|
|
|
|
const
|
|
jsonMediaType* = MediaType.init("application/json")
|
|
sszMediaType* = MediaType.init("application/octet-stream")
|
|
textEventStreamMediaType* = MediaType.init("text/event-stream")
|