nimbus-eth2/beacon_chain/rpc/rpc_validator_api.nim

181 lines
7.7 KiB
Nim
Raw Normal View History

# beacon_chain
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [Defect].}
import
# Standard library
std/tables,
# Nimble packages
stew/objects,
json_rpc/servers/httpserver,
chronicles,
# Local modules
../spec/[forks, helpers, network, signatures],
../spec/datatypes/phase0,
../spec/eth2_apis/rpc_types,
disentangle eth2 types from the ssz library (#2785) * reorganize ssz dependencies This PR continues the work in https://github.com/status-im/nimbus-eth2/pull/2646, https://github.com/status-im/nimbus-eth2/pull/2779 as well as past issues with serialization and type, to disentangle SSZ from eth2 and at the same time simplify imports and exports with a structured approach. The principal idea here is that when a library wants to introduce SSZ support, they do so via 3 files: * `ssz_codecs` which imports and reexports `codecs` - this covers the basic byte conversions and ensures no overloads get lost * `xxx_merkleization` imports and exports `merkleization` to specialize and get access to `hash_tree_root` and friends * `xxx_ssz_serialization` imports and exports `ssz_serialization` to specialize ssz for a specific library Those that need to interact with SSZ always import the `xxx_` versions of the modules and never `ssz` itself so as to keep imports simple and safe. This is similar to how the REST / JSON-RPC serializers are structured in that someone wanting to serialize spec types to REST-JSON will import `eth2_rest_serialization` and nothing else. * split up ssz into a core library that is independendent of eth2 types * rename `bytes_reader` to `codec` to highlight that it contains coding and decoding of bytes and native ssz types * remove tricky List init overload that causes compile issues * get rid of top-level ssz import * reenable merkleization tests * move some "standard" json serializers to spec * remove `ValidatorIndex` serialization for now * remove test_ssz_merkleization * add tests for over/underlong byte sequences * fix broken seq[byte] test - seq[byte] is not an SSZ type There are a few things this PR doesn't solve: * like #2646 this PR is weak on how to handle root and other dontSerialize fields that "sometimes" should be computed - the same problem appears in REST / JSON-RPC etc * Fix a build problem on macOS * Another way to fix the macOS builds Co-authored-by: Zahary Karadjov <zahary@gmail.com>
2021-08-18 18:57:58 +00:00
../consensus_object_pools/[blockchain_dag, spec_cache, attestation_pool],
../beacon_node_common, ../beacon_node_types,
../validators/validator_duties,
../networking/eth2_network,
./rpc_utils
logScope: topics = "valapi"
type
RpcServer* = RpcHttpServer
proc installValidatorApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
raises: [Defect, CatchableError].} =
rpcServer.rpc("get_v1_validator_block") do (
slot: Slot, graffiti: GraffitiBytes, randao_reveal: ValidatorSig) -> phase0.BeaconBlock:
debug "get_v1_validator_block", slot = slot
let head = node.doChecksAndGetCurrentHead(slot)
let proposer = node.dag.getProposer(head, slot)
if proposer.isNone():
raise newException(CatchableError,
"could not retrieve block for slot: " & $slot)
let message = await makeBeaconBlockForHeadAndSlot(
node, randao_reveal, proposer.get(), graffiti, head, slot)
if message.isErr():
raise newException(CatchableError,
"could not retrieve block for slot: " & $slot)
let blck = message.get()
case blck.kind
of BeaconBlockFork.Phase0:
return blck.phase0Block
of BeaconBlockFork.Altair:
raise newException(CatchableError,
"could not retrieve block for altair blocks")
rpcServer.rpc("post_v1_validator_block") do (body: phase0.SignedBeaconBlock) -> bool:
debug "post_v1_validator_block",
slot = body.message.slot,
prop_idx = body.message.proposer_index
let head = node.doChecksAndGetCurrentHead(body.message.slot)
if head.slot >= body.message.slot:
raise newException(CatchableError,
"Proposal is for a past slot: " & $body.message.slot)
if head == await proposeSignedBlock(
node, head, AttachedValidator(), ForkedSignedBeaconBlock.init(body)):
raise newException(CatchableError, "Could not propose block")
return true
rpcServer.rpc("get_v1_validator_attestation_data") do (
slot: Slot, committee_index: CommitteeIndex) -> AttestationData:
debug "get_v1_validator_attestation_data", slot = slot
let
head = node.doChecksAndGetCurrentHead(slot)
epochRef = node.dag.getEpochRef(head, slot.epoch)
return makeAttestationData(epochRef, head.atSlot(slot), committee_index)
rpcServer.rpc("get_v1_validator_aggregate_attestation") do (
slot: Slot, attestation_data_root: Eth2Digest)-> Attestation:
debug "get_v1_validator_aggregate_attestation"
let res = node.attestationPool[].getAggregatedAttestation(slot, attestation_data_root)
if res.isSome:
return res.get
raise newException(CatchableError, "Could not retrieve an aggregated attestation")
rpcServer.rpc("post_v1_validator_aggregate_and_proofs") do (
payload: SignedAggregateAndProof) -> bool:
debug "post_v1_validator_aggregate_and_proofs"
node.network.broadcastAggregateAndProof(payload)
notice "Aggregated attestation sent",
attestation = shortLog(payload.message.aggregate),
signature = shortLog(payload.signature)
rpcServer.rpc("get_v1_validator_duties_attester") do (
epoch: Epoch, public_keys: seq[ValidatorPubKey]) -> seq[RpcAttesterDuties]:
debug "get_v1_validator_duties_attester", epoch = epoch
let
head = node.doChecksAndGetCurrentHead(epoch)
epochRef = node.dag.getEpochRef(head, epoch)
committees_per_slot = get_committee_count_per_slot(epochRef)
for i in 0 ..< SLOTS_PER_EPOCH:
let slot = compute_start_slot_at_epoch(epoch) + i
for committee_index in 0'u64..<committees_per_slot:
let committee = get_beacon_committee(
epochRef, slot, committee_index.CommitteeIndex)
for index_in_committee, validatorIdx in committee:
let curr_val_pubkey = epochRef.validatorKey(validatorIdx)
if curr_val_pubkey.isSome():
if public_keys.findIt(it == curr_val_pubkey.get().toPubKey()) != -1:
result.add((public_key: curr_val_pubkey.get().toPubKey(),
validator_index: validatorIdx,
committee_index: committee_index.CommitteeIndex,
committee_length: committee.lenu64,
validator_committee_index: index_in_committee.uint64,
slot: slot))
rpcServer.rpc("get_v1_validator_duties_proposer") do (
epoch: Epoch) -> seq[RpcValidatorDuties]:
debug "get_v1_validator_duties_proposer", epoch = epoch
let
head = node.doChecksAndGetCurrentHead(epoch)
epochRef = node.dag.getEpochRef(head, epoch)
for i, bp in epochRef.beacon_proposers:
if bp.isSome():
result.add((public_key: epochRef.validatorKey(bp.get).get().toPubKey,
validator_index: bp.get(),
slot: compute_start_slot_at_epoch(epoch) + i))
rpcServer.rpc("post_v1_validator_beacon_committee_subscriptions") do (
committee_index: CommitteeIndex, slot: Slot, aggregator: bool,
validator_pubkey: ValidatorPubKey, slot_signature: ValidatorSig) -> bool:
debug "post_v1_validator_beacon_committee_subscriptions",
committee_index, slot
2021-04-18 08:24:59 +00:00
if committee_index.uint64 >= MAX_COMMITTEES_PER_SLOT.uint64:
raise newException(CatchableError,
"Invalid committee index")
if node.syncManager.inProgress:
raise newException(CatchableError,
"Beacon node is currently syncing and not serving request on that endpoint")
let wallSlot = node.beaconClock.now.slotOrZero
if wallSlot > slot + 1:
raise newException(CatchableError,
"Past slot requested")
let epoch = slot.epoch
if epoch >= wallSlot.epoch and epoch - wallSlot.epoch > 1:
raise newException(CatchableError,
"Slot requested not in current or next wall-slot epoch")
if not verify_slot_signature(
getStateField(node.dag.headState.data, fork),
getStateField(node.dag.headState.data, genesis_validators_root),
slot, validator_pubkey, slot_signature):
raise newException(CatchableError,
"Invalid slot signature")
let
head = node.doChecksAndGetCurrentHead(epoch)
epochRef = node.dag.getEpochRef(head, epoch)
subnet_id = compute_subnet_for_attestation(
get_committee_count_per_slot(epochRef), slot, committee_index)
# Either subnet already subscribed or not. If not, subscribe. If it is,
# extend subscription. All one knows from the API combined with how far
# ahead one can check for attestation schedule is that it might be used
# for up to the end of next epoch. Therefore, arrange for subscriptions
# to last at least that long.
if not node.attestationSubnets.aggregateSubnets[subnet_id.uint64]:
# When to subscribe. Since it's not clear when from the API it's first
# needed, do so immediately.
node.attestationSubnets.subscribeSlot[subnet_id.uint64] =
min(node.attestationSubnets.subscribeSlot[subnet_id.uint64], wallSlot)
node.attestationSubnets.unsubscribeSlot[subnet_id.uint64] =
max(
compute_start_slot_at_epoch(epoch + 2),
node.attestationSubnets.unsubscribeSlot[subnet_id.uint64])