nimbus-eth2/beacon_chain/rpc/rest_utils.nim

261 lines
8.7 KiB
Nim
Raw Normal View History

import std/[options, macros],
stew/byteutils, presto,
../spec/[forks],
../spec/eth2_apis/[rest_types, eth2_rest_serialization],
../beacon_node,
2021-11-30 01:20:21 +00:00
../consensus_object_pools/blockchain_dag,
"."/[rest_constants, state_ttl_cache]
export
2021-11-30 01:20:21 +00:00
options, eth2_rest_serialization, blockchain_dag, presto, rest_types,
rest_constants
2021-04-08 10:49:28 +00:00
2021-03-17 18:46:45 +00:00
type
ValidatorIndexError* {.pure.} = enum
UnsupportedValue, TooHighValue
2021-03-17 18:46:45 +00:00
func match(data: openarray[char], charset: set[char]): int =
for ch in data:
if ch notin charset:
return 1
0
proc validate(key: string, value: string): int =
## This is rough validation procedure which should be simple and fast,
## because it will be used for query routing.
case key
of "{epoch}":
2021-04-12 16:23:45 +00:00
0
2021-03-17 18:46:45 +00:00
of "{slot}":
2021-04-12 16:23:45 +00:00
0
2021-03-17 18:46:45 +00:00
of "{peer_id}":
2021-04-12 16:23:45 +00:00
0
2021-03-17 18:46:45 +00:00
of "{state_id}":
2021-04-12 16:23:45 +00:00
0
2021-03-17 18:46:45 +00:00
of "{block_id}":
2021-04-12 16:23:45 +00:00
0
2021-03-17 18:46:45 +00:00
of "{validator_id}":
2021-04-12 16:23:45 +00:00
0
2021-03-17 18:46:45 +00:00
else:
1
func getCurrentSlot*(node: BeaconNode, slot: Slot):
Result[Slot, cstring] =
if slot <= (node.dag.head.slot + (SLOTS_PER_EPOCH * 2)):
ok(slot)
else:
err("Requesting slot too far ahead of the current head")
func getCurrentBlock*(node: BeaconNode, slot: Slot):
Result[BlockRef, cstring] =
let bs = node.dag.getBlockBySlot(? node.getCurrentSlot(slot))
if bs.isProposed():
ok(bs.blck)
else:
err("Block not found")
proc getCurrentHead*(node: BeaconNode, slot: Slot): Result[BlockRef, cstring] =
let res = node.dag.head
2021-03-17 18:46:45 +00:00
# if not(node.isSynced(res)):
# return err("Cannot fulfill request until node is synced")
if res.slot + uint64(2 * SLOTS_PER_EPOCH) < slot:
return err("Requesting way ahead of the current head")
ok(res)
proc getCurrentHead*(node: BeaconNode,
epoch: Epoch): Result[BlockRef, cstring] =
if epoch > MaxEpoch:
2021-03-17 18:46:45 +00:00
return err("Requesting epoch for which slot would overflow")
node.getCurrentHead(compute_start_slot_at_epoch(epoch))
proc getBlockSlot*(node: BeaconNode,
stateIdent: StateIdent): Result[BlockSlot, cstring] =
case stateIdent.kind
of StateQueryKind.Slot:
let bs = node.dag.getBlockBySlot(? node.getCurrentSlot(stateIdent.slot))
if not isNil(bs.blck):
ok(bs)
else:
err("State for given slot not found, history not available?")
2021-03-17 18:46:45 +00:00
of StateQueryKind.Root:
if stateIdent.root == getStateRoot(node.dag.headState.data):
ok(node.dag.headState.blck.atSlot())
else:
# We don't have a state root -> BlockSlot mapping
err("State for given root not found")
2021-03-17 18:46:45 +00:00
of StateQueryKind.Named:
case stateIdent.value
of StateIdentType.Head:
ok(node.dag.head.atSlot())
2021-03-17 18:46:45 +00:00
of StateIdentType.Genesis:
ok(node.dag.genesis.atSlot())
2021-03-17 18:46:45 +00:00
of StateIdentType.Finalized:
ok(node.dag.finalizedHead)
2021-03-17 18:46:45 +00:00
of StateIdentType.Justified:
ok(node.dag.head.atEpochStart(getStateField(
node.dag.headState.data, current_justified_checkpoint).epoch))
2021-03-17 18:46:45 +00:00
proc getBlockRef*(node: BeaconNode,
id: BlockIdent): Result[BlockRef, cstring] =
2021-03-17 18:46:45 +00:00
case id.kind
of BlockQueryKind.Named:
case id.value
of BlockIdentType.Head:
ok(node.dag.head)
2021-03-17 18:46:45 +00:00
of BlockIdentType.Genesis:
ok(node.dag.genesis)
2021-03-17 18:46:45 +00:00
of BlockIdentType.Finalized:
ok(node.dag.finalizedHead.blck)
2021-03-17 18:46:45 +00:00
of BlockQueryKind.Root:
let res = node.dag.getRef(id.root)
if isNil(res):
err("Block not found")
else:
ok(res)
2021-03-17 18:46:45 +00:00
of BlockQueryKind.Slot:
node.getCurrentBlock(id.slot)
proc getBlockDataFromBlockIdent*(node: BeaconNode,
id: BlockIdent): Result[BlockData, cstring] =
ok(node.dag.get(? node.getBlockRef(id)))
2021-03-17 18:46:45 +00:00
proc disallowInterruptionsAux(body: NimNode) =
for n in body:
const because =
"because the `state` variable may be mutated (and thus invalidated) " &
"before the function resumes execution."
2021-03-17 20:42:55 +00:00
if n.kind == nnkYieldStmt:
macros.error "You cannot use yield in this block " & because, n
if (n.kind in {nnkCall, nnkCommand} and
n[0].kind in {nnkIdent, nnkSym} and
$n[0] == "await"):
macros.error "You cannot use await in this block " & because, n
disallowInterruptionsAux(n)
macro disallowInterruptions(body: untyped) =
disallowInterruptionsAux(body)
template withStateForBlockSlot*(nodeParam: BeaconNode,
blockSlotParam: BlockSlot,
body: untyped): untyped =
block:
let
node = nodeParam
blockSlot = blockSlotParam
template isState(state: StateData): bool =
state.blck.atSlot(getStateField(state.data, slot)) == blockSlot
var cache {.inject, used.}: StateCache
# If we have a cache hit, there is a concern that the REST request
# handler may continue executing asynchronously while we hit the same
# advanced state is another request. We don't want the two requests
# to work over the same state object because mutations to it will be
# visible in both, so we must outlaw yielding within the `body` block.
# Please note that the problem is not limited to the situations where
# we have a cache hit. Working with the `headState` will result in the
# same problem as it may change while the request is executing.
#
# TODO
# The solution below is only partion, because it theory yields or awaits
# can still be hidden in the body through the use of helper templates
disallowInterruptions(body)
# TODO view-types
# Avoid the code bloat produced by the double `body` reference through a lent var
if isState(node.dag.headState):
withStateVars(node.dag.headState):
body
else:
let cachedState = if node.stateTtlCache != nil:
node.stateTtlCache.getClosestState(blockSlot)
else:
nil
let stateToAdvance = if cachedState != nil:
cachedState
else:
assignClone(node.dag.headState)
if node.dag.updateStateData(stateToAdvance[], blockSlot, false, cache):
if cachedState == nil and node.stateTtlCache != nil:
# This was not a cached state, we can cache it now
node.stateTtlCache.add(stateToAdvance)
withStateVars(stateToAdvance[]):
body
template strData*(body: ContentBody): string =
bind fromBytes
string.fromBytes(body.data)
proc toValidatorIndex*(value: RestValidatorIndex): Result[ValidatorIndex,
ValidatorIndexError] =
when sizeof(ValidatorIndex) == 4:
if uint64(value) < VALIDATOR_REGISTRY_LIMIT:
# On x86 platform Nim allows only `int32` indexes, so all the indexes in
# range `2^31 <= x < 2^32` are not supported.
if uint64(value) <= uint64(high(int32)):
ok(ValidatorIndex(value))
else:
err(ValidatorIndexError.UnsupportedValue)
else:
err(ValidatorIndexError.TooHighValue)
elif sizeof(ValidatorIndex) == 8:
if uint64(value) < VALIDATOR_REGISTRY_LIMIT:
ok(ValidatorIndex(value))
else:
err(ValidatorIndexError.TooHighValue)
else:
doAssert(false, "ValidatorIndex type size is incorrect")
New validator client using REST API. (#2651) * Initial commit. * Exporting getConfig(). * Add beacon node checking procedures. * Post rebase fixes. * Use runSlotLoop() from nimbus_beacon_node. Fallback implementation. Fixes for ETH2 REST serialization. * Add beacon_clock.durationToNextSlot(). Move type declarations from beacon_rest_api to json_rest_serialization. Fix seq[ValidatorIndex] serialization. Refactor ValidatorPool and add some utility procedures. Create separate version of validator_client. * Post-rebase fixes. Remove CookedPubKey from validator_pool.nim. * Now we should be able to produce attestations and aggregate and proofs. But its not working yet. * Debugging attestation sending. * Add durationToNextAttestation. Optimize some debug logs. Fix aggregation_bits encoding. Bump chronos/presto. * Its alive. * Fixes for launch_local_testnet script. Bump chronos. * Switch client API to not use `/api` prefix. * Post-rebase adjustments. * Fix endpoint for publishBlock(). * Add CONFIG_NAME. Add more checks to ensure that beacon_node is compatible. * Add beacon committee subscription support to validator_client. * Fix stacktrace should be an array of strings. Fix committee subscriptions should not be `data` keyed. * Log duration to next block proposal. * Fix beacon_node_status import. * Use jsonMsgResponse() instead of jsonError(). * Fix graffityBytes usage. Remove unnecessary `await`. Adjust creation of SignedBlock instance. Remove legacy files. * Rework durationToNextSlot() and durationToNextEpoch() to use `fromNow`. * Fix race condition for block proposal and attestations for same slot. Fix local_testnet script to properly kill tasks on Windows. Bump chronos and nim-http-tools, to allow connections to infura.io (basic auth). * Catch services errors. Improve performance of local_testnet.sh script on Windows. Fix race condition when attestation producing. * Post-rebase fixes. * Bump chronos and presto. * Calculate block publishing delay. Fix pkill in one more place. * Add error handling and timeouts to firstSuccess() template. Add onceToAll() template. Add checkNodes() procedure. Refactor firstSuccess() template. Add error checking to api.nim calls. * Deprecated usage onceToAll() for better stability. Address comment and send attestations asap. * Avoid unnecessary loop when calculating minimal duration.
2021-07-13 11:15:07 +00:00
func syncCommitteeParticipants*(forkedState: ForkedHashedBeaconState,
epoch: Epoch
): Result[seq[ValidatorPubKey], cstring] =
withState(forkedState):
when stateFork >= BeaconStateFork.Altair:
let
epochPeriod = sync_committee_period(epoch)
curPeriod = sync_committee_period(state.data.slot)
if epochPeriod == curPeriod:
ok(@(state.data.current_sync_committee.pubkeys.data))
elif epochPeriod == curPeriod + 1:
ok(@(state.data.next_sync_committee.pubkeys.data))
else:
err("Epoch is outside the sync committee period of the state")
else:
err("State's fork do not support sync committees")
func keysToIndices*(cacheTable: var Table[ValidatorPubKey, ValidatorIndex],
forkedState: ForkedHashedBeaconState,
keys: openArray[ValidatorPubKey]
): seq[Option[ValidatorIndex]] =
var indices = newSeq[Option[ValidatorIndex]](len(keys))
var keyset =
block:
var res: Table[ValidatorPubKey, int]
for inputIndex, pubkey in keys.pairs():
# Try to search in cache first.
cacheTable.withValue(pubkey, vindex):
indices[inputIndex] = some(vindex[])
do:
res[pubkey] = inputIndex
res
if len(keyset) > 0:
for validatorIndex, validator in getStateField(forkedState,
validators).pairs():
keyset.withValue(validator.pubkey, listIndex):
# Store pair (pubkey, index) into cache table.
cacheTable[validator.pubkey] = ValidatorIndex(validatorIndex)
# Fill result sequence.
indices[listIndex[]] = some(ValidatorIndex(validatorIndex))
indices
proc getRouter*(): RestRouter =
RestRouter.init(validate)