2021-03-26 06:52:01 +00:00
|
|
|
# beacon_chain
|
|
|
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
2020-05-22 17:04:52 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
{.push raises: [Defect].}
|
|
|
|
|
2020-05-22 17:04:52 +00:00
|
|
|
import
|
2020-12-01 05:18:50 +00:00
|
|
|
std/[parseutils, sequtils, strutils, deques, sets],
|
|
|
|
stew/results,
|
2021-03-26 14:11:06 +00:00
|
|
|
json_rpc/servers/httpserver,
|
2020-06-05 09:57:40 +00:00
|
|
|
chronicles,
|
2020-11-30 15:20:46 +00:00
|
|
|
nimcrypto/utils as ncrutils,
|
2021-03-05 13:12:00 +00:00
|
|
|
../beacon_node_common,
|
|
|
|
../networking/eth2_network,
|
2021-03-02 10:27:45 +00:00
|
|
|
../validators/validator_duties,
|
2021-03-06 07:32:55 +00:00
|
|
|
../gossip_processing/gossip_validation,
|
2021-06-11 17:51:46 +00:00
|
|
|
../consensus_object_pools/blockchain_dag,
|
2021-08-18 18:57:58 +00:00
|
|
|
../spec/[eth2_merkleization, forks, network],
|
2021-08-12 13:08:20 +00:00
|
|
|
../spec/datatypes/[phase0],
|
2021-08-03 15:17:11 +00:00
|
|
|
./rpc_utils
|
2020-05-22 17:04:52 +00:00
|
|
|
|
2020-10-27 09:00:57 +00:00
|
|
|
logScope: topics = "beaconapi"
|
2020-05-22 17:04:52 +00:00
|
|
|
|
|
|
|
type
|
2020-10-27 09:00:57 +00:00
|
|
|
RpcServer = RpcHttpServer
|
2020-05-22 17:04:52 +00:00
|
|
|
|
2020-12-01 05:18:50 +00:00
|
|
|
ValidatorQuery = object
|
|
|
|
keyset: HashSet[ValidatorPubKey]
|
|
|
|
ids: seq[uint64]
|
|
|
|
|
2020-12-01 08:15:37 +00:00
|
|
|
StatusQuery = object
|
|
|
|
statset: HashSet[string]
|
|
|
|
|
2020-10-27 09:00:57 +00:00
|
|
|
template unimplemented() =
|
|
|
|
raise (ref CatchableError)(msg: "Unimplemented")
|
2020-07-08 10:11:22 +00:00
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
proc parsePubkey(str: string): ValidatorPubKey {.raises: [Defect, ValueError].} =
|
2020-11-27 19:48:33 +00:00
|
|
|
const expectedLen = RawPubKeySize * 2 + 2
|
|
|
|
if str.len != expectedLen: # +2 because of the `0x` prefix
|
|
|
|
raise newException(ValueError,
|
|
|
|
"A hex public key should be exactly " & $expectedLen & " characters. " &
|
|
|
|
$str.len & " provided")
|
2020-07-08 10:11:22 +00:00
|
|
|
let pubkeyRes = fromHex(ValidatorPubKey, str)
|
|
|
|
if pubkeyRes.isErr:
|
2021-03-26 06:52:01 +00:00
|
|
|
raise newException(ValueError, "Not a valid public key")
|
2020-07-08 10:11:22 +00:00
|
|
|
return pubkeyRes[]
|
|
|
|
|
2020-12-01 08:15:37 +00:00
|
|
|
proc createIdQuery(ids: openArray[string]): Result[ValidatorQuery, string] =
|
2020-12-01 05:18:50 +00:00
|
|
|
# validatorIds array should have maximum 30 items, and all items should be
|
|
|
|
# unique.
|
|
|
|
if len(ids) > 30:
|
|
|
|
return err("The number of ids exceeds the limit")
|
|
|
|
|
|
|
|
# All ids in validatorIds must be unique.
|
|
|
|
if len(ids) != len(toHashSet(ids)):
|
|
|
|
return err("ids array must have unique item")
|
|
|
|
|
|
|
|
var res = ValidatorQuery(
|
|
|
|
keyset: initHashSet[ValidatorPubKey](),
|
|
|
|
ids: newSeq[uint64]()
|
|
|
|
)
|
|
|
|
|
|
|
|
for item in ids:
|
|
|
|
if item.startsWith("0x"):
|
|
|
|
if len(item) != RawPubKeySize * 2 + 2:
|
|
|
|
return err("Incorrect hexadecimal key")
|
|
|
|
let pubkeyRes = ValidatorPubKey.fromHex(item)
|
|
|
|
if pubkeyRes.isErr:
|
|
|
|
return err("Incorrect public key")
|
|
|
|
res.keyset.incl(pubkeyRes.get())
|
|
|
|
else:
|
|
|
|
var tmp: uint64
|
2021-03-26 06:52:01 +00:00
|
|
|
try:
|
|
|
|
if parseBiggestUInt(item, tmp) != len(item):
|
|
|
|
return err("Incorrect index value")
|
|
|
|
except ValueError:
|
|
|
|
return err("Cannot parse index value: " & item)
|
2020-12-01 05:18:50 +00:00
|
|
|
res.ids.add(tmp)
|
|
|
|
ok(res)
|
|
|
|
|
2020-12-01 08:15:37 +00:00
|
|
|
proc createStatusQuery(status: openArray[string]): Result[StatusQuery, string] =
|
|
|
|
const AllowedStatuses = [
|
|
|
|
"pending", "pending_initialized", "pending_queued",
|
|
|
|
"active", "active_ongoing", "active_exiting", "active_slashed",
|
|
|
|
"exited", "exited_unslashed", "exited_slashed",
|
|
|
|
"withdrawal", "withdrawal_possible", "withdrawal_done"
|
|
|
|
]
|
|
|
|
|
|
|
|
if len(status) > len(AllowedStatuses):
|
|
|
|
return err("The number of statuses exceeds the limit")
|
|
|
|
|
|
|
|
var res = StatusQuery(statset: initHashSet[string]())
|
|
|
|
|
|
|
|
# All ids in validatorIds must be unique.
|
|
|
|
if len(status) != len(toHashSet(status)):
|
|
|
|
return err("Status array must have unique items")
|
|
|
|
|
|
|
|
for item in status:
|
|
|
|
if item notin AllowedStatuses:
|
|
|
|
return err("Invalid status requested")
|
|
|
|
case item
|
|
|
|
of "pending":
|
|
|
|
res.statset.incl("pending_initialized")
|
|
|
|
res.statset.incl("pending_queued")
|
|
|
|
of "active":
|
|
|
|
res.statset.incl("active_ongoing")
|
|
|
|
res.statset.incl("active_exiting")
|
|
|
|
res.statset.incl("active_slashed")
|
|
|
|
of "exited":
|
|
|
|
res.statset.incl("exited_unslashed")
|
|
|
|
res.statset.incl("exited_slashed")
|
|
|
|
of "withdrawal":
|
|
|
|
res.statset.incl("withdrawal_possible")
|
|
|
|
res.statset.incl("withdrawal_done")
|
|
|
|
else:
|
|
|
|
res.statset.incl(item)
|
|
|
|
|
|
|
|
proc getStatus(validator: Validator,
|
|
|
|
current_epoch: Epoch): Result[string, string] =
|
2020-12-03 19:24:01 +00:00
|
|
|
if validator.activation_epoch > current_epoch:
|
2020-07-08 10:11:22 +00:00
|
|
|
# pending
|
|
|
|
if validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH:
|
2020-12-01 08:15:37 +00:00
|
|
|
ok("pending_initialized")
|
2020-07-08 10:11:22 +00:00
|
|
|
else:
|
|
|
|
# validator.activation_eligibility_epoch < FAR_FUTURE_EPOCH:
|
2020-12-01 08:15:37 +00:00
|
|
|
ok("pending_queued")
|
2020-12-03 19:24:01 +00:00
|
|
|
elif (validator.activation_epoch <= current_epoch) and
|
|
|
|
(current_epoch < validator.exit_epoch):
|
2020-07-08 10:11:22 +00:00
|
|
|
# active
|
|
|
|
if validator.exit_epoch == FAR_FUTURE_EPOCH:
|
2020-12-01 08:15:37 +00:00
|
|
|
ok("active_ongoing")
|
2020-07-08 10:11:22 +00:00
|
|
|
elif not validator.slashed:
|
|
|
|
# validator.exit_epoch < FAR_FUTURE_EPOCH
|
2020-12-01 08:15:37 +00:00
|
|
|
ok("active_exiting")
|
2020-07-08 10:11:22 +00:00
|
|
|
else:
|
|
|
|
# validator.exit_epoch < FAR_FUTURE_EPOCH and validator.slashed:
|
2020-12-01 08:15:37 +00:00
|
|
|
ok("active_slashed")
|
2020-12-03 19:24:01 +00:00
|
|
|
elif (validator.exit_epoch <= current_epoch) and
|
|
|
|
(current_epoch < validator.withdrawable_epoch):
|
2020-07-08 10:11:22 +00:00
|
|
|
# exited
|
|
|
|
if not validator.slashed:
|
2020-12-01 08:15:37 +00:00
|
|
|
ok("exited_unslashed")
|
2020-07-08 10:11:22 +00:00
|
|
|
else:
|
|
|
|
# validator.slashed
|
2020-12-01 08:15:37 +00:00
|
|
|
ok("exited_slashed")
|
2020-07-08 10:11:22 +00:00
|
|
|
elif validator.withdrawable_epoch <= current_epoch:
|
|
|
|
# withdrawal
|
|
|
|
if validator.effective_balance != 0:
|
2020-12-01 08:15:37 +00:00
|
|
|
ok("withdrawal_possible")
|
2020-07-08 10:11:22 +00:00
|
|
|
else:
|
|
|
|
# validator.effective_balance == 0
|
2020-12-01 08:15:37 +00:00
|
|
|
ok("withdrawal_done")
|
2020-07-08 10:11:22 +00:00
|
|
|
else:
|
2020-12-01 08:15:37 +00:00
|
|
|
err("Invalid validator status")
|
2020-07-08 10:11:22 +00:00
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
proc getBlockDataFromBlockId(node: BeaconNode, blockId: string): BlockData {.raises: [Defect, CatchableError].} =
|
2020-07-08 10:11:22 +00:00
|
|
|
result = case blockId:
|
|
|
|
of "head":
|
2021-06-01 11:13:40 +00:00
|
|
|
node.dag.get(node.dag.head)
|
2020-07-08 10:11:22 +00:00
|
|
|
of "genesis":
|
2021-06-01 11:13:40 +00:00
|
|
|
node.dag.getGenesisBlockData()
|
2020-07-08 10:11:22 +00:00
|
|
|
of "finalized":
|
2021-06-01 11:13:40 +00:00
|
|
|
node.dag.get(node.dag.finalizedHead.blck)
|
2020-07-08 10:11:22 +00:00
|
|
|
else:
|
|
|
|
if blockId.startsWith("0x"):
|
|
|
|
let blckRoot = parseRoot(blockId)
|
2021-06-01 11:13:40 +00:00
|
|
|
let blockData = node.dag.get(blckRoot)
|
2020-07-08 10:11:22 +00:00
|
|
|
if blockData.isNone:
|
|
|
|
raise newException(CatchableError, "Block not found")
|
|
|
|
blockData.get()
|
|
|
|
else:
|
|
|
|
let blockSlot = node.getBlockSlotFromString(blockId)
|
|
|
|
if blockSlot.blck.isNil:
|
|
|
|
raise newException(CatchableError, "Block not found")
|
2021-06-01 11:13:40 +00:00
|
|
|
node.dag.get(blockSlot.blck)
|
2020-07-08 10:11:22 +00:00
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|
|
|
raises: [Exception].} = # TODO fix json-rpc
|
2021-08-03 15:17:11 +00:00
|
|
|
rpcServer.rpc("get_v1_beacon_genesis") do () -> RpcBeaconGenesis:
|
2020-10-27 09:00:57 +00:00
|
|
|
return (
|
2021-06-11 17:51:46 +00:00
|
|
|
genesis_time: getStateField(node.dag.headState.data, genesis_time),
|
2020-10-27 09:00:57 +00:00
|
|
|
genesis_validators_root:
|
2021-06-11 17:51:46 +00:00
|
|
|
getStateField(node.dag.headState.data, genesis_validators_root),
|
2021-07-13 14:27:10 +00:00
|
|
|
genesis_fork_version: node.dag.cfg.GENESIS_FORK_VERSION
|
2020-10-27 09:00:57 +00:00
|
|
|
)
|
2020-06-19 09:21:17 +00:00
|
|
|
|
|
|
|
rpcServer.rpc("get_v1_beacon_states_root") do (stateId: string) -> Eth2Digest:
|
2020-07-08 10:11:22 +00:00
|
|
|
withStateForStateId(stateId):
|
2021-05-28 12:51:15 +00:00
|
|
|
return stateRoot
|
2020-06-19 09:21:17 +00:00
|
|
|
|
2020-05-27 17:06:28 +00:00
|
|
|
rpcServer.rpc("get_v1_beacon_states_fork") do (stateId: string) -> Fork:
|
2020-07-08 10:11:22 +00:00
|
|
|
withStateForStateId(stateId):
|
2021-06-11 17:51:46 +00:00
|
|
|
return getStateField(stateData.data, fork)
|
2020-07-16 13:16:51 +00:00
|
|
|
|
2020-07-08 10:11:22 +00:00
|
|
|
rpcServer.rpc("get_v1_beacon_states_finality_checkpoints") do (
|
2021-08-03 15:17:11 +00:00
|
|
|
stateId: string) -> RpcBeaconStatesFinalityCheckpoints:
|
2020-07-08 10:11:22 +00:00
|
|
|
withStateForStateId(stateId):
|
2021-04-13 13:05:44 +00:00
|
|
|
return (previous_justified:
|
2021-06-11 17:51:46 +00:00
|
|
|
getStateField(stateData.data, previous_justified_checkpoint),
|
2021-04-13 13:05:44 +00:00
|
|
|
current_justified:
|
2021-06-11 17:51:46 +00:00
|
|
|
getStateField(stateData.data, current_justified_checkpoint),
|
|
|
|
finalized: getStateField(stateData.data, finalized_checkpoint))
|
2020-07-08 10:11:22 +00:00
|
|
|
|
|
|
|
rpcServer.rpc("get_v1_beacon_states_stateId_validators") do (
|
2020-12-01 08:15:37 +00:00
|
|
|
stateId: string, validatorIds: Option[seq[string]],
|
2021-08-03 15:17:11 +00:00
|
|
|
status: Option[seq[string]]) -> seq[RpcBeaconStatesValidators]:
|
2020-12-01 08:15:37 +00:00
|
|
|
var vquery: ValidatorQuery
|
|
|
|
var squery: StatusQuery
|
2021-06-11 17:51:46 +00:00
|
|
|
let current_epoch = getStateField(node.dag.headState.data, slot).epoch
|
2020-12-01 08:15:37 +00:00
|
|
|
|
|
|
|
template statusCheck(status, statusQuery, vstatus, current_epoch): bool =
|
|
|
|
if status.isNone():
|
|
|
|
true
|
|
|
|
else:
|
|
|
|
if vstatus in squery.statset:
|
|
|
|
true
|
|
|
|
else:
|
|
|
|
false
|
|
|
|
|
2021-08-03 15:17:11 +00:00
|
|
|
var res: seq[RpcBeaconStatesValidators]
|
2020-12-01 08:15:37 +00:00
|
|
|
|
2020-07-08 10:11:22 +00:00
|
|
|
withStateForStateId(stateId):
|
2020-12-01 08:15:37 +00:00
|
|
|
if status.isSome:
|
|
|
|
let sqres = createStatusQuery(status.get())
|
|
|
|
if sqres.isErr:
|
|
|
|
raise newException(CatchableError, sqres.error)
|
|
|
|
squery = sqres.get()
|
|
|
|
|
|
|
|
if validatorIds.isSome:
|
|
|
|
let vqres = createIdQuery(validatorIds.get())
|
|
|
|
if vqres.isErr:
|
|
|
|
raise newException(CatchableError, vqres.error)
|
|
|
|
vquery = vqres.get()
|
|
|
|
|
|
|
|
if validatorIds.isNone():
|
2021-06-11 17:51:46 +00:00
|
|
|
for index, validator in getStateField(stateData.data, validators).pairs():
|
2020-12-01 08:15:37 +00:00
|
|
|
let sres = validator.getStatus(current_epoch)
|
|
|
|
if sres.isOk:
|
|
|
|
let vstatus = sres.get()
|
|
|
|
let includeFlag = statusCheck(status, squery, vstatus,
|
|
|
|
current_epoch)
|
|
|
|
if includeFlag:
|
|
|
|
res.add((validator: validator,
|
|
|
|
index: uint64(index),
|
|
|
|
status: vstatus,
|
2021-06-11 17:51:46 +00:00
|
|
|
balance: getStateField(stateData.data, balances)[index]))
|
2020-12-01 08:15:37 +00:00
|
|
|
else:
|
|
|
|
for index in vquery.ids:
|
2021-06-11 17:51:46 +00:00
|
|
|
if index < lenu64(getStateField(stateData.data, validators)):
|
|
|
|
let validator = getStateField(stateData.data, validators)[index]
|
2020-12-01 08:15:37 +00:00
|
|
|
let sres = validator.getStatus(current_epoch)
|
|
|
|
if sres.isOk:
|
|
|
|
let vstatus = sres.get()
|
|
|
|
let includeFlag = statusCheck(status, squery, vstatus,
|
|
|
|
current_epoch)
|
|
|
|
if includeFlag:
|
|
|
|
vquery.keyset.excl(validator.pubkey)
|
|
|
|
res.add((validator: validator,
|
|
|
|
index: uint64(index),
|
|
|
|
status: vstatus,
|
2021-06-11 17:51:46 +00:00
|
|
|
balance: getStateField(stateData.data, balances)[index]))
|
2020-12-01 08:15:37 +00:00
|
|
|
|
2021-06-11 17:51:46 +00:00
|
|
|
for index, validator in getStateField(stateData.data, validators).pairs():
|
2020-12-01 08:15:37 +00:00
|
|
|
if validator.pubkey in vquery.keyset:
|
|
|
|
let sres = validator.getStatus(current_epoch)
|
|
|
|
if sres.isOk:
|
|
|
|
let vstatus = sres.get()
|
|
|
|
let includeFlag = statusCheck(status, squery, vstatus,
|
|
|
|
current_epoch)
|
|
|
|
if includeFlag:
|
|
|
|
res.add((validator: validator,
|
|
|
|
index: uint64(index),
|
|
|
|
status: vstatus,
|
2021-06-11 17:51:46 +00:00
|
|
|
balance: getStateField(stateData.data, balances)[index]))
|
2020-12-01 08:15:37 +00:00
|
|
|
return res
|
2020-07-08 10:11:22 +00:00
|
|
|
|
|
|
|
rpcServer.rpc("get_v1_beacon_states_stateId_validators_validatorId") do (
|
2021-08-03 15:17:11 +00:00
|
|
|
stateId: string, validatorId: string) -> RpcBeaconStatesValidators:
|
2021-06-11 17:51:46 +00:00
|
|
|
let current_epoch = getStateField(node.dag.headState.data, slot).epoch
|
2020-12-01 08:15:37 +00:00
|
|
|
let vqres = createIdQuery([validatorId])
|
|
|
|
if vqres.isErr:
|
|
|
|
raise newException(CatchableError, vqres.error)
|
|
|
|
let vquery = vqres.get()
|
|
|
|
|
2020-07-08 10:11:22 +00:00
|
|
|
withStateForStateId(stateId):
|
2020-12-01 08:15:37 +00:00
|
|
|
if len(vquery.ids) > 0:
|
|
|
|
let index = vquery.ids[0]
|
2021-06-11 17:51:46 +00:00
|
|
|
if index < lenu64(getStateField(stateData.data, validators)):
|
|
|
|
let validator = getStateField(stateData.data, validators)[index]
|
2020-12-01 08:15:37 +00:00
|
|
|
let sres = validator.getStatus(current_epoch)
|
|
|
|
if sres.isOk:
|
|
|
|
return (validator: validator, index: uint64(index),
|
2021-04-13 13:05:44 +00:00
|
|
|
status: sres.get(),
|
2021-06-11 17:51:46 +00:00
|
|
|
balance: getStateField(stateData.data, balances)[index])
|
2020-12-01 08:15:37 +00:00
|
|
|
else:
|
|
|
|
raise newException(CatchableError, "Incorrect validator's state")
|
|
|
|
else:
|
2021-06-11 17:51:46 +00:00
|
|
|
for index, validator in getStateField(stateData.data, validators).pairs():
|
2020-12-01 08:15:37 +00:00
|
|
|
if validator.pubkey in vquery.keyset:
|
|
|
|
let sres = validator.getStatus(current_epoch)
|
|
|
|
if sres.isOk:
|
|
|
|
return (validator: validator, index: uint64(index),
|
2021-04-13 13:05:44 +00:00
|
|
|
status: sres.get(),
|
2021-06-11 17:51:46 +00:00
|
|
|
balance: getStateField(stateData.data, balances)[index])
|
2020-12-01 08:15:37 +00:00
|
|
|
else:
|
|
|
|
raise newException(CatchableError, "Incorrect validator's state")
|
2020-07-08 10:11:22 +00:00
|
|
|
|
2020-10-27 09:00:57 +00:00
|
|
|
rpcServer.rpc("get_v1_beacon_states_stateId_validator_balances") do (
|
2021-08-03 15:17:11 +00:00
|
|
|
stateId: string, validatorsId: Option[seq[string]]) -> seq[RpcBalance]:
|
2020-12-01 05:18:50 +00:00
|
|
|
|
2021-08-03 15:17:11 +00:00
|
|
|
var res: seq[RpcBalance]
|
2020-12-01 05:18:50 +00:00
|
|
|
withStateForStateId(stateId):
|
|
|
|
if validatorsId.isNone():
|
2021-06-11 17:51:46 +00:00
|
|
|
for index, value in getStateField(stateData.data, balances).pairs():
|
2020-12-01 05:18:50 +00:00
|
|
|
let balance = (index: uint64(index), balance: value)
|
|
|
|
res.add(balance)
|
|
|
|
else:
|
2020-12-01 08:15:37 +00:00
|
|
|
let vqres = createIdQuery(validatorsId.get())
|
|
|
|
if vqres.isErr:
|
|
|
|
raise newException(CatchableError, vqres.error)
|
2020-12-01 05:18:50 +00:00
|
|
|
|
2020-12-01 08:15:37 +00:00
|
|
|
var vquery = vqres.get()
|
|
|
|
for index in vquery.ids:
|
2021-06-11 17:51:46 +00:00
|
|
|
if index < lenu64(getStateField(stateData.data, validators)):
|
|
|
|
let validator = getStateField(stateData.data, validators)[index]
|
2020-12-01 08:15:37 +00:00
|
|
|
vquery.keyset.excl(validator.pubkey)
|
2020-12-01 05:18:50 +00:00
|
|
|
let balance = (index: uint64(index),
|
2021-06-11 17:51:46 +00:00
|
|
|
balance: getStateField(stateData.data, balances)[index])
|
2020-12-01 05:18:50 +00:00
|
|
|
res.add(balance)
|
|
|
|
|
2021-06-11 17:51:46 +00:00
|
|
|
for index, validator in getStateField(stateData.data, validators).pairs():
|
2020-12-01 08:15:37 +00:00
|
|
|
if validator.pubkey in vquery.keyset:
|
2020-12-01 05:18:50 +00:00
|
|
|
let balance = (index: uint64(index),
|
2021-06-11 17:51:46 +00:00
|
|
|
balance: getStateField(stateData.data, balances)[index])
|
2020-12-01 05:18:50 +00:00
|
|
|
res.add(balance)
|
|
|
|
return res
|
2020-10-27 09:00:57 +00:00
|
|
|
|
2020-07-08 10:11:22 +00:00
|
|
|
rpcServer.rpc("get_v1_beacon_states_stateId_committees_epoch") do (
|
2020-12-01 13:55:34 +00:00
|
|
|
stateId: string, epoch: Option[uint64], index: Option[uint64],
|
2021-08-03 15:17:11 +00:00
|
|
|
slot: Option[uint64]) -> seq[RpcBeaconStatesCommittees]:
|
2020-07-08 10:11:22 +00:00
|
|
|
withStateForStateId(stateId):
|
2020-12-01 13:55:34 +00:00
|
|
|
proc getCommittee(slot: Slot,
|
2021-08-03 15:17:11 +00:00
|
|
|
index: CommitteeIndex): RpcBeaconStatesCommittees =
|
2021-05-21 09:23:28 +00:00
|
|
|
let vals = get_beacon_committee(
|
2021-06-11 17:51:46 +00:00
|
|
|
stateData.data, slot, index, cache).mapIt(it.uint64)
|
2020-07-08 10:11:22 +00:00
|
|
|
return (index: index.uint64, slot: slot.uint64, validators: vals)
|
2020-07-16 13:16:51 +00:00
|
|
|
|
2021-08-03 15:17:11 +00:00
|
|
|
proc forSlot(slot: Slot, res: var seq[RpcBeaconStatesCommittees]) =
|
2020-10-12 10:52:59 +00:00
|
|
|
let committees_per_slot =
|
2021-06-11 17:51:46 +00:00
|
|
|
get_committee_count_per_slot(stateData.data, slot.epoch, cache)
|
2020-12-01 13:55:34 +00:00
|
|
|
|
|
|
|
if index.isNone:
|
2020-07-08 10:11:22 +00:00
|
|
|
for committee_index in 0'u64..<committees_per_slot:
|
|
|
|
res.add(getCommittee(slot, committee_index.CommitteeIndex))
|
2020-06-19 09:21:17 +00:00
|
|
|
else:
|
2020-12-01 13:55:34 +00:00
|
|
|
if index.get() < committees_per_slot:
|
|
|
|
res.add(getCommittee(slot, CommitteeIndex(index.get())))
|
|
|
|
|
2021-08-03 15:17:11 +00:00
|
|
|
var res: seq[RpcBeaconStatesCommittees]
|
2020-12-01 13:55:34 +00:00
|
|
|
|
|
|
|
let qepoch =
|
|
|
|
if epoch.isNone:
|
2021-06-11 17:51:46 +00:00
|
|
|
compute_epoch_at_slot(getStateField(stateData.data, slot))
|
2020-12-01 13:55:34 +00:00
|
|
|
else:
|
|
|
|
Epoch(epoch.get())
|
2020-07-08 10:11:22 +00:00
|
|
|
|
2020-12-01 13:55:34 +00:00
|
|
|
if slot.isNone:
|
2020-07-08 10:11:22 +00:00
|
|
|
for i in 0 ..< SLOTS_PER_EPOCH:
|
2020-12-01 13:55:34 +00:00
|
|
|
forSlot(compute_start_slot_at_epoch(qepoch) + i, res)
|
2020-07-08 10:11:22 +00:00
|
|
|
else:
|
2020-12-01 13:55:34 +00:00
|
|
|
forSlot(Slot(slot.get()), res)
|
|
|
|
|
|
|
|
return res
|
2020-07-08 10:11:22 +00:00
|
|
|
|
|
|
|
rpcServer.rpc("get_v1_beacon_headers") do (
|
2020-12-01 13:55:34 +00:00
|
|
|
slot: Option[uint64], parent_root: Option[string]) ->
|
2021-08-03 15:17:11 +00:00
|
|
|
seq[RpcBeaconHeaders]:
|
2020-10-27 09:00:57 +00:00
|
|
|
unimplemented()
|
2020-07-08 10:11:22 +00:00
|
|
|
|
|
|
|
rpcServer.rpc("get_v1_beacon_headers_blockId") do (
|
2020-11-30 20:16:12 +00:00
|
|
|
blockId: string) ->
|
|
|
|
tuple[canonical: bool, header: SignedBeaconBlockHeader]:
|
2020-07-08 10:11:22 +00:00
|
|
|
let bd = node.getBlockDataFromBlockId(blockId)
|
2021-07-14 12:18:52 +00:00
|
|
|
# TODO check for Altair blocks and fail, because /v1/
|
|
|
|
let tsbb = bd.data.phase0Block
|
2021-01-25 18:45:48 +00:00
|
|
|
static: doAssert tsbb.signature is TrustedSig and
|
|
|
|
sizeof(ValidatorSig) == sizeof(tsbb.signature)
|
|
|
|
result.header.signature = cast[ValidatorSig](tsbb.signature)
|
2020-07-08 10:11:22 +00:00
|
|
|
|
|
|
|
result.header.message.slot = tsbb.message.slot
|
|
|
|
result.header.message.proposer_index = tsbb.message.proposer_index
|
|
|
|
result.header.message.parent_root = tsbb.message.parent_root
|
|
|
|
result.header.message.state_root = tsbb.message.state_root
|
|
|
|
result.header.message.body_root = tsbb.message.body.hash_tree_root()
|
|
|
|
|
2021-06-01 11:13:40 +00:00
|
|
|
result.canonical = bd.refs.isAncestorOf(node.dag.head)
|
2020-07-08 10:11:22 +00:00
|
|
|
|
2021-07-15 19:01:07 +00:00
|
|
|
rpcServer.rpc("post_v1_beacon_blocks") do (blck: phase0.SignedBeaconBlock) -> int:
|
2020-11-30 20:16:12 +00:00
|
|
|
if not(node.syncManager.inProgress):
|
|
|
|
raise newException(CatchableError,
|
|
|
|
"Beacon node is currently syncing, try again later.")
|
2021-06-01 11:13:40 +00:00
|
|
|
let head = node.dag.head
|
2020-11-30 20:16:12 +00:00
|
|
|
if head.slot >= blck.message.slot:
|
2021-07-14 12:18:52 +00:00
|
|
|
# TODO altair-transition, but not immediate testnet-priority to detect
|
|
|
|
# Altair and fail, since /v1/ doesn't support Altair
|
2021-07-07 09:09:47 +00:00
|
|
|
let blocksTopic = getBeaconBlocksTopic(node.dag.forkDigests.phase0)
|
|
|
|
node.network.broadcast(blocksTopic, blck)
|
2020-11-30 20:16:12 +00:00
|
|
|
# The block failed validation, but was successfully broadcast anyway.
|
2021-03-01 10:22:16 +00:00
|
|
|
# It was not integrated into the beacon node's database.
|
2020-11-30 20:16:12 +00:00
|
|
|
return 202
|
|
|
|
else:
|
2021-06-29 15:09:29 +00:00
|
|
|
let res = await proposeSignedBlock(node, head, AttachedValidator(), blck)
|
2020-11-30 20:16:12 +00:00
|
|
|
if res == head:
|
2021-07-14 12:18:52 +00:00
|
|
|
# TODO altair-transition, but not immediate testnet-priority
|
2021-07-07 09:09:47 +00:00
|
|
|
let blocksTopic = getBeaconBlocksTopic(node.dag.forkDigests.phase0)
|
|
|
|
node.network.broadcast(blocksTopic, blck)
|
2020-11-30 20:16:12 +00:00
|
|
|
# The block failed validation, but was successfully broadcast anyway.
|
|
|
|
# It was not integrated into the beacon node''s database.
|
|
|
|
return 202
|
|
|
|
else:
|
|
|
|
# The block was validated successfully and has been broadcast.
|
|
|
|
# It has also been integrated into the beacon node's database.
|
|
|
|
return 200
|
|
|
|
|
2020-07-08 10:11:22 +00:00
|
|
|
rpcServer.rpc("get_v1_beacon_blocks_blockId") do (
|
2021-07-15 19:01:07 +00:00
|
|
|
blockId: string) -> phase0.TrustedSignedBeaconBlock:
|
2021-07-14 12:18:52 +00:00
|
|
|
# TODO detect Altair and fail: /v1/ APIs don't support Altair
|
|
|
|
return node.getBlockDataFromBlockId(blockId).data.phase0Block
|
2020-07-08 10:11:22 +00:00
|
|
|
|
|
|
|
rpcServer.rpc("get_v1_beacon_blocks_blockId_root") do (
|
|
|
|
blockId: string) -> Eth2Digest:
|
2021-07-14 12:18:52 +00:00
|
|
|
# TODO detect Altair and fail: /v1/ APIs don't support Altair
|
|
|
|
return node.getBlockDataFromBlockId(blockId).data.phase0Block.message.state_root
|
2020-07-08 10:11:22 +00:00
|
|
|
|
|
|
|
rpcServer.rpc("get_v1_beacon_blocks_blockId_attestations") do (
|
|
|
|
blockId: string) -> seq[TrustedAttestation]:
|
2021-07-14 12:18:52 +00:00
|
|
|
# TODO detect Altair and fail: /v1/ APIs don't support Altair
|
|
|
|
return node.getBlockDataFromBlockId(blockId).data.phase0Block.message.body.attestations.asSeq
|
2020-06-19 09:21:17 +00:00
|
|
|
|
2020-11-30 14:18:06 +00:00
|
|
|
rpcServer.rpc("get_v1_beacon_pool_attestations") do (
|
2020-12-01 13:55:34 +00:00
|
|
|
slot: Option[uint64], committee_index: Option[uint64]) ->
|
2021-08-03 15:17:11 +00:00
|
|
|
seq[RpcAttestation]:
|
2020-11-30 15:20:46 +00:00
|
|
|
|
2021-08-03 15:17:11 +00:00
|
|
|
var res: seq[RpcAttestation]
|
2020-11-30 14:18:06 +00:00
|
|
|
|
|
|
|
let qslot =
|
|
|
|
if slot.isSome():
|
2020-12-01 13:55:34 +00:00
|
|
|
some(Slot(slot.get()))
|
2020-11-30 14:18:06 +00:00
|
|
|
else:
|
|
|
|
none[Slot]()
|
|
|
|
|
|
|
|
let qindex =
|
|
|
|
if committee_index.isSome():
|
2020-12-01 13:55:34 +00:00
|
|
|
some(CommitteeIndex(committee_index.get()))
|
2020-11-30 14:18:06 +00:00
|
|
|
else:
|
|
|
|
none[CommitteeIndex]()
|
|
|
|
|
2020-11-30 15:20:46 +00:00
|
|
|
for item in node.attestationPool[].attestations(qslot, qindex):
|
|
|
|
let atuple = (
|
|
|
|
aggregation_bits: "0x" & ncrutils.toHex(item.aggregation_bits.bytes),
|
|
|
|
data: item.data,
|
|
|
|
signature: item.signature
|
|
|
|
)
|
|
|
|
res.add(atuple)
|
|
|
|
|
|
|
|
return res
|
2020-10-27 09:00:57 +00:00
|
|
|
|
2020-06-19 09:21:17 +00:00
|
|
|
rpcServer.rpc("post_v1_beacon_pool_attestations") do (
|
|
|
|
attestation: Attestation) -> bool:
|
2021-05-10 07:13:36 +00:00
|
|
|
return await node.sendAttestation(attestation)
|
2020-06-05 09:57:40 +00:00
|
|
|
|
2020-11-30 03:14:40 +00:00
|
|
|
rpcServer.rpc("get_v1_beacon_pool_attester_slashings") do (
|
|
|
|
) -> seq[AttesterSlashing]:
|
2020-11-30 04:32:46 +00:00
|
|
|
var res: seq[AttesterSlashing]
|
|
|
|
if isNil(node.exitPool):
|
|
|
|
return res
|
2020-11-30 03:14:40 +00:00
|
|
|
let length = len(node.exitPool.attester_slashings)
|
2020-11-30 04:32:46 +00:00
|
|
|
res = newSeqOfCap[AttesterSlashing](length)
|
2020-11-30 03:14:40 +00:00
|
|
|
for item in node.exitPool.attester_slashings.items():
|
|
|
|
res.add(item)
|
|
|
|
return res
|
2020-07-08 10:11:22 +00:00
|
|
|
|
2020-11-30 04:32:46 +00:00
|
|
|
rpcServer.rpc("post_v1_beacon_pool_attester_slashings") do (
|
|
|
|
slashing: AttesterSlashing) -> bool:
|
|
|
|
if isNil(node.exitPool):
|
|
|
|
raise newException(CatchableError, "Exit pool is not yet available!")
|
|
|
|
let validity = node.exitPool[].validateAttesterSlashing(slashing)
|
|
|
|
if validity.isOk:
|
2021-08-19 10:45:31 +00:00
|
|
|
node.network.sendAttesterSlashing(slashing)
|
2020-11-30 04:32:46 +00:00
|
|
|
else:
|
|
|
|
raise newException(CatchableError, $(validity.error[1]))
|
|
|
|
return true
|
2020-10-27 09:00:57 +00:00
|
|
|
|
2020-11-30 03:14:40 +00:00
|
|
|
rpcServer.rpc("get_v1_beacon_pool_proposer_slashings") do (
|
|
|
|
) -> seq[ProposerSlashing]:
|
2020-11-30 04:32:46 +00:00
|
|
|
var res: seq[ProposerSlashing]
|
|
|
|
if isNil(node.exitPool):
|
|
|
|
return res
|
2020-11-30 03:14:40 +00:00
|
|
|
let length = len(node.exitPool.proposer_slashings)
|
2020-11-30 04:32:46 +00:00
|
|
|
res = newSeqOfCap[ProposerSlashing](length)
|
2020-11-30 03:14:40 +00:00
|
|
|
for item in node.exitPool.proposer_slashings.items():
|
|
|
|
res.add(item)
|
|
|
|
return res
|
2020-10-27 09:00:57 +00:00
|
|
|
|
2020-11-30 04:32:46 +00:00
|
|
|
rpcServer.rpc("post_v1_beacon_pool_proposer_slashings") do (
|
|
|
|
slashing: ProposerSlashing) -> bool:
|
|
|
|
if isNil(node.exitPool):
|
|
|
|
raise newException(CatchableError, "Exit pool is not yet available!")
|
|
|
|
let validity = node.exitPool[].validateProposerSlashing(slashing)
|
|
|
|
if validity.isOk:
|
2021-08-19 10:45:31 +00:00
|
|
|
node.network.sendProposerSlashing(slashing)
|
2020-11-30 04:32:46 +00:00
|
|
|
else:
|
|
|
|
raise newException(CatchableError, $(validity.error[1]))
|
|
|
|
return true
|
2020-10-27 09:00:57 +00:00
|
|
|
|
2020-11-30 03:14:40 +00:00
|
|
|
rpcServer.rpc("get_v1_beacon_pool_voluntary_exits") do (
|
|
|
|
) -> seq[SignedVoluntaryExit]:
|
2020-11-30 04:32:46 +00:00
|
|
|
var res: seq[SignedVoluntaryExit]
|
|
|
|
if isNil(node.exitPool):
|
|
|
|
return res
|
2020-11-30 03:14:40 +00:00
|
|
|
let length = len(node.exitPool.voluntary_exits)
|
2020-11-30 04:32:46 +00:00
|
|
|
res = newSeqOfCap[SignedVoluntaryExit](length)
|
2020-11-30 03:14:40 +00:00
|
|
|
for item in node.exitPool.voluntary_exits.items():
|
|
|
|
res.add(item)
|
|
|
|
return res
|
2020-06-05 09:57:40 +00:00
|
|
|
|
2020-11-27 19:48:33 +00:00
|
|
|
rpcServer.rpc("post_v1_beacon_pool_voluntary_exits") do (
|
|
|
|
exit: SignedVoluntaryExit) -> bool:
|
2020-11-30 04:32:46 +00:00
|
|
|
if isNil(node.exitPool):
|
|
|
|
raise newException(CatchableError, "Exit pool is not yet available!")
|
2020-11-27 19:48:33 +00:00
|
|
|
let validity = node.exitPool[].validateVoluntaryExit(exit)
|
|
|
|
if validity.isOk:
|
2021-08-19 10:45:31 +00:00
|
|
|
node.network.sendVoluntaryExit(exit)
|
2020-11-27 19:48:33 +00:00
|
|
|
else:
|
2020-11-30 04:32:46 +00:00
|
|
|
raise newException(CatchableError, $(validity.error[1]))
|
2020-11-27 19:48:33 +00:00
|
|
|
return true
|