harden REST API `atSlot` against non-finalized blocks (#3538)
* harden validator API against pre-finalized slot requests * check `syncHorizon` when responding to validator api requests too far from `head` * limit state-id based requests to one epoch ahead of `head` * put historic data bounds on block/attestation/etc validator production API, preventing them from being used with already-finalized slots * add validator block smoke tests * make rest test create a new genesis with the tests running roughly in the first epoch to allow testing a few more boundary conditions
This commit is contained in:
parent
4207b127f9
commit
bc80ac3be1
|
@ -213,15 +213,17 @@ proc addHeadBlock*(
|
|||
# by the time a new block reaches this point, the parent block will already
|
||||
# have "established" itself in the network to some degree at least.
|
||||
var cache = StateCache()
|
||||
let clearanceBlock =
|
||||
parent.atSlot(signedBlock.message.slot).toBlockslotId.expect("not nil")
|
||||
|
||||
# We've verified that the slot of the new block is newer than that of the
|
||||
# parent, so we should now be able to create an approriate clearance state
|
||||
# onto which we can apply the new block
|
||||
let clearanceBlock = BlockSlotId.init(parent.bid, signedBlock.message.slot)
|
||||
if not updateState(
|
||||
dag, dag.clearanceState, clearanceBlock, true, cache):
|
||||
# We should never end up here - the parent must be a block no older than and
|
||||
# rooted in the finalized checkpoint, hence we should always be able to
|
||||
# load its corresponding state
|
||||
error "Unable to load clearance state for parent block, database corrupt?",
|
||||
parent = shortLog(parent.atSlot(signedBlock.message.slot)),
|
||||
clearanceBlock = shortLog(clearanceBlock)
|
||||
return err(BlockError.MissingParent)
|
||||
|
||||
|
|
|
@ -225,7 +225,7 @@ proc installNimbusApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
let wallSlot = node.beaconClock.now.slotOrZero
|
||||
let head =
|
||||
block:
|
||||
let res = node.getCurrentHead(wallSlot)
|
||||
let res = node.getSyncedHead(wallSlot)
|
||||
if res.isErr():
|
||||
return RestApiResponse.jsonError(Http503, BeaconNodeInSyncError)
|
||||
res.get()
|
||||
|
|
|
@ -3,6 +3,7 @@ import std/[options, macros],
|
|||
../spec/[forks],
|
||||
../spec/eth2_apis/[rest_types, eth2_rest_serialization],
|
||||
../beacon_node,
|
||||
../validators/validator_duties,
|
||||
../consensus_object_pools/blockchain_dag,
|
||||
"."/[rest_constants, state_ttl_cache]
|
||||
|
||||
|
@ -39,32 +40,31 @@ proc validate(key: string, value: string): int =
|
|||
else:
|
||||
1
|
||||
|
||||
func getCurrentSlot*(node: BeaconNode, slot: Slot):
|
||||
Result[Slot, cstring] =
|
||||
if slot <= (node.dag.head.slot + (SLOTS_PER_EPOCH * 2)):
|
||||
ok(slot)
|
||||
else:
|
||||
err("Requesting slot too far ahead of the current head")
|
||||
proc getSyncedHead*(node: BeaconNode, slot: Slot): Result[BlockRef, cstring] =
|
||||
let head = node.dag.head
|
||||
|
||||
proc getCurrentHead*(node: BeaconNode, slot: Slot): Result[BlockRef, cstring] =
|
||||
let res = node.dag.head
|
||||
# if not(node.isSynced(res)):
|
||||
# return err("Cannot fulfill request until node is synced")
|
||||
if res.slot + uint64(2 * SLOTS_PER_EPOCH) < slot:
|
||||
if slot > head.slot and not node.isSynced(head):
|
||||
return err("Requesting way ahead of the current head")
|
||||
ok(res)
|
||||
|
||||
proc getCurrentHead*(node: BeaconNode,
|
||||
ok(head)
|
||||
|
||||
proc getSyncedHead*(node: BeaconNode,
|
||||
epoch: Epoch): Result[BlockRef, cstring] =
|
||||
if epoch > MaxEpoch:
|
||||
return err("Requesting epoch for which slot would overflow")
|
||||
node.getCurrentHead(epoch.start_slot())
|
||||
node.getSyncedHead(epoch.start_slot())
|
||||
|
||||
proc getBlockSlotId*(node: BeaconNode,
|
||||
stateIdent: StateIdent): Result[BlockSlotId, cstring] =
|
||||
case stateIdent.kind
|
||||
of StateQueryKind.Slot:
|
||||
let bsi = node.dag.getBlockIdAtSlot(? node.getCurrentSlot(stateIdent.slot)).valueOr:
|
||||
# Limit requests by state id to the next epoch with respect to the current
|
||||
# head to avoid long empty slot replays (in particular a second epoch
|
||||
# transition)
|
||||
if stateIdent.slot.epoch > (node.dag.head.slot.epoch + 1):
|
||||
return err("Requesting state too far ahead of current head")
|
||||
|
||||
let bsi = node.dag.getBlockIdAtSlot(stateIdent.slot).valueOr:
|
||||
return err("State for given slot not found, history not available?")
|
||||
|
||||
ok(bsi)
|
||||
|
@ -84,8 +84,12 @@ proc getBlockSlotId*(node: BeaconNode,
|
|||
of StateIdentType.Finalized:
|
||||
ok(node.dag.finalizedHead.toBlockSlotId().expect("not nil"))
|
||||
of StateIdentType.Justified:
|
||||
ok(node.dag.head.atEpochStart(getStateField(
|
||||
node.dag.headState, current_justified_checkpoint).epoch).toBlockSlotId().expect("not nil"))
|
||||
# Take checkpoint-synced nodes into account
|
||||
let justifiedEpoch =
|
||||
max(
|
||||
getStateField(node.dag.headState, current_justified_checkpoint).epoch,
|
||||
node.dag.finalizedHead.slot.epoch)
|
||||
ok(node.dag.head.atEpochStart(justifiedEpoch).toBlockSlotId().expect("not nil"))
|
||||
|
||||
proc getBlockId*(node: BeaconNode, id: BlockIdent): Opt[BlockId] =
|
||||
case id.kind
|
||||
|
|
|
@ -63,7 +63,7 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
res
|
||||
let qhead =
|
||||
block:
|
||||
let res = node.getCurrentHead(qepoch)
|
||||
let res = node.getSyncedHead(qepoch)
|
||||
if res.isErr():
|
||||
return RestApiResponse.jsonError(Http503, BeaconNodeInSyncError)
|
||||
res.get()
|
||||
|
@ -116,7 +116,7 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
res
|
||||
let qhead =
|
||||
block:
|
||||
let res = node.getCurrentHead(qepoch)
|
||||
let res = node.getSyncedHead(qepoch)
|
||||
if res.isErr():
|
||||
return RestApiResponse.jsonError(Http503, BeaconNodeInSyncError)
|
||||
res.get()
|
||||
|
@ -281,12 +281,26 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
graffiti: Option[GraffitiBytes]) -> RestApiResponse:
|
||||
let message =
|
||||
block:
|
||||
let qslot =
|
||||
block:
|
||||
if slot.isErr():
|
||||
return RestApiResponse.jsonError(Http400, InvalidSlotValueError,
|
||||
$slot.error())
|
||||
slot.get()
|
||||
let qslot = block:
|
||||
if slot.isErr():
|
||||
return RestApiResponse.jsonError(Http400, InvalidSlotValueError,
|
||||
$slot.error())
|
||||
let res = slot.get()
|
||||
|
||||
if res <= node.dag.finalizedHead.slot:
|
||||
return RestApiResponse.jsonError(Http400, InvalidSlotValueError,
|
||||
"Slot already finalized")
|
||||
let
|
||||
wallTime = node.beaconClock.now() + MAXIMUM_GOSSIP_CLOCK_DISPARITY
|
||||
if res > wallTime.slotOrZero:
|
||||
return RestApiResponse.jsonError(Http400, InvalidSlotValueError,
|
||||
"Slot cannot be in the future")
|
||||
|
||||
if node.dag.cfg.blockForkAtEpoch(res.epoch) != BeaconBlockFork.Phase0:
|
||||
return RestApiResponse.jsonError(Http400,
|
||||
"Use v2 for Altair+ slots")
|
||||
|
||||
res
|
||||
let qrandao =
|
||||
if randao_reveal.isNone():
|
||||
return RestApiResponse.jsonError(Http400, MissingRandaoRevealValue)
|
||||
|
@ -309,13 +323,10 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
res.get()
|
||||
let qhead =
|
||||
block:
|
||||
let res = node.getCurrentHead(qslot)
|
||||
let res = node.getSyncedHead(qslot)
|
||||
if res.isErr():
|
||||
if not(node.isSynced(node.dag.head)):
|
||||
return RestApiResponse.jsonError(Http503, BeaconNodeInSyncError)
|
||||
else:
|
||||
return RestApiResponse.jsonError(Http400, NoHeadForSlotError,
|
||||
$res.error())
|
||||
return RestApiResponse.jsonError(Http503, BeaconNodeInSyncError,
|
||||
$res.error())
|
||||
res.get()
|
||||
let proposer = node.dag.getProposer(qhead, qslot)
|
||||
if proposer.isNone():
|
||||
|
@ -331,7 +342,7 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
RestApiResponse.jsonResponse(message.phase0Data)
|
||||
else:
|
||||
RestApiResponse.jsonError(Http400,
|
||||
"Unable to produce block for altair fork")
|
||||
"Use v2 for Altair+ slots")
|
||||
|
||||
# https://ethereum.github.io/beacon-APIs/#/Validator/produceBlockV2
|
||||
router.api(MethodGet, "/eth/v2/validator/blocks/{slot}") do (
|
||||
|
@ -339,12 +350,21 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
graffiti: Option[GraffitiBytes]) -> RestApiResponse:
|
||||
let message =
|
||||
block:
|
||||
let qslot =
|
||||
block:
|
||||
if slot.isErr():
|
||||
return RestApiResponse.jsonError(Http400, InvalidSlotValueError,
|
||||
$slot.error())
|
||||
slot.get()
|
||||
let qslot = block:
|
||||
if slot.isErr():
|
||||
return RestApiResponse.jsonError(Http400, InvalidSlotValueError,
|
||||
$slot.error())
|
||||
let res = slot.get()
|
||||
|
||||
if res <= node.dag.finalizedHead.slot:
|
||||
return RestApiResponse.jsonError(Http400, InvalidSlotValueError,
|
||||
"Slot already finalized")
|
||||
let
|
||||
wallTime = node.beaconClock.now() + MAXIMUM_GOSSIP_CLOCK_DISPARITY
|
||||
if res > wallTime.slotOrZero:
|
||||
return RestApiResponse.jsonError(Http400, InvalidSlotValueError,
|
||||
"Slot cannot be in the future")
|
||||
res
|
||||
let qrandao =
|
||||
if randao_reveal.isNone():
|
||||
return RestApiResponse.jsonError(Http400, MissingRandaoRevealValue)
|
||||
|
@ -367,13 +387,10 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
res.get()
|
||||
let qhead =
|
||||
block:
|
||||
let res = node.getCurrentHead(qslot)
|
||||
let res = node.getSyncedHead(qslot)
|
||||
if res.isErr():
|
||||
if not(node.isSynced(node.dag.head)):
|
||||
return RestApiResponse.jsonError(Http503, BeaconNodeInSyncError)
|
||||
else:
|
||||
return RestApiResponse.jsonError(Http400, NoHeadForSlotError,
|
||||
$res.error())
|
||||
return RestApiResponse.jsonError(Http503, BeaconNodeInSyncError,
|
||||
$res.error())
|
||||
res.get()
|
||||
let proposer = node.dag.getProposer(qhead, qslot)
|
||||
if proposer.isNone():
|
||||
|
@ -400,6 +417,20 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
return RestApiResponse.jsonError(Http400, InvalidSlotValueError,
|
||||
$res.error())
|
||||
res.get()
|
||||
if qslot <= node.dag.finalizedHead.slot:
|
||||
return RestApiResponse.jsonError(Http400, InvalidSlotValueError,
|
||||
"Slot already finalized")
|
||||
let
|
||||
wallTime = node.beaconClock.now()
|
||||
if qslot > (wallTime + MAXIMUM_GOSSIP_CLOCK_DISPARITY).slotOrZero:
|
||||
return RestApiResponse.jsonError(
|
||||
Http400, InvalidSlotValueError, "Slot cannot be in the future")
|
||||
if qslot + SLOTS_PER_EPOCH <
|
||||
(wallTime - MAXIMUM_GOSSIP_CLOCK_DISPARITY).slotOrZero:
|
||||
return RestApiResponse.jsonError(
|
||||
Http400, InvalidSlotValueError,
|
||||
"Slot cannot be more than an epoch in the past")
|
||||
|
||||
let qindex =
|
||||
block:
|
||||
if committee_index.isNone():
|
||||
|
@ -413,7 +444,7 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
res.get()
|
||||
let qhead =
|
||||
block:
|
||||
let res = node.getCurrentHead(qslot)
|
||||
let res = node.getSyncedHead(qslot)
|
||||
if res.isErr():
|
||||
return RestApiResponse.jsonError(Http503, BeaconNodeInSyncError)
|
||||
res.get()
|
||||
|
@ -606,19 +637,22 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
"/eth/v1/validator/sync_committee_contribution") do (
|
||||
slot: Option[Slot], subcommittee_index: Option[SyncSubCommitteeIndex],
|
||||
beacon_block_root: Option[Eth2Digest]) -> RestApiResponse:
|
||||
let qslot =
|
||||
let qslot = block:
|
||||
if slot.isNone():
|
||||
return RestApiResponse.jsonError(Http400, MissingSlotValueError)
|
||||
else:
|
||||
let res = slot.get()
|
||||
if res.isErr():
|
||||
return RestApiResponse.jsonError(Http400, InvalidSlotValueError,
|
||||
$res.error())
|
||||
let rslot = res.get()
|
||||
if epoch(rslot) < node.dag.cfg.ALTAIR_FORK_EPOCH:
|
||||
return RestApiResponse.jsonError(Http400,
|
||||
SlotFromTheIncorrectForkError)
|
||||
rslot
|
||||
|
||||
let res = slot.get()
|
||||
if res.isErr():
|
||||
return RestApiResponse.jsonError(Http400, InvalidSlotValueError,
|
||||
$res.error())
|
||||
let rslot = res.get()
|
||||
if epoch(rslot) < node.dag.cfg.ALTAIR_FORK_EPOCH:
|
||||
return RestApiResponse.jsonError(Http400,
|
||||
SlotFromTheIncorrectForkError)
|
||||
rslot
|
||||
if qslot <= node.dag.finalizedHead.slot:
|
||||
return RestApiResponse.jsonError(Http400, InvalidSlotValueError,
|
||||
"Slot already finalized")
|
||||
let qindex =
|
||||
if subcommittee_index.isNone():
|
||||
return RestApiResponse.jsonError(Http400,
|
||||
|
@ -643,7 +677,7 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
res.get()
|
||||
|
||||
# Check if node is fully synced.
|
||||
let sres = node.getCurrentHead(qslot)
|
||||
let sres = node.getSyncedHead(qslot)
|
||||
if sres.isErr():
|
||||
return RestApiResponse.jsonError(Http503, BeaconNodeInSyncError)
|
||||
|
||||
|
|
|
@ -616,6 +616,16 @@ proc handleAttestations(node: BeaconNode, head: BlockRef, slot: Slot) =
|
|||
slot = shortLog(slot)
|
||||
return
|
||||
|
||||
if slot < node.dag.finalizedHead.slot:
|
||||
# During checkpoint sync, we implicitly finalize the given slot even if the
|
||||
# state transition does not yet consider it final - this is a sanity check
|
||||
# mostly to ensure the `atSlot` below works as expected
|
||||
warn "Skipping attestation - slot already finalized",
|
||||
head = shortLog(head),
|
||||
slot = shortLog(slot),
|
||||
finalized = shortLog(node.dag.finalizedHead)
|
||||
return
|
||||
|
||||
let attestationHead = head.atSlot(slot)
|
||||
if head != attestationHead.blck:
|
||||
# In rare cases, such as when we're busy syncing or just slow, we'll be
|
||||
|
|
|
@ -2962,12 +2962,66 @@
|
|||
"status": {"operator": "equals", "value": "400"}
|
||||
}
|
||||
},
|
||||
{
|
||||
"topics": ["validator", "blocks"],
|
||||
"request": {
|
||||
"url": "/eth/v1/validator/blocks/0",
|
||||
"headers": {"Accept": "application/json"}
|
||||
},
|
||||
"response": {
|
||||
"status": {"operator": "equals", "value": "400"}
|
||||
}
|
||||
},
|
||||
{
|
||||
"topics": ["validator", "blocks"],
|
||||
"request": {
|
||||
"url": "/eth/v1/validator/blocks/1?randao_reveal=0x97897b5e8526b4d0f808e7b60bcd1942935b124720bd5156da54c54adc25fe458ef7c934b4e5018afe4659978b06e6510797e5cc7fc31f329035ec6a46889ee9aea375d57b22be71dd4ff181b7f1a07b9199e73c2b80e39e04ba904596d9e4db",
|
||||
"headers": {"Accept": "application/json"}
|
||||
},
|
||||
"response": {
|
||||
"status": {"operator": "equals", "value": "200"},
|
||||
"headers": [{"key": "Content-Type", "value": "application/json", "operator": "equals"}],
|
||||
"body": [{"operator": "jstructcmps", "start": ["data"], "value": {"slot": "", "proposer_index": "", "parent_root": "", "state_root": "", "body": {"randao_reveal": "", "eth1_data": {"deposit_root": "", "deposit_count": "", "block_hash": ""}, "graffiti": "", "proposer_slashings": [], "attester_slashings": [], "attestations": [], "deposits": [], "voluntary_exits": []}}}]
|
||||
}
|
||||
},
|
||||
{
|
||||
"topics": ["validator", "blocksV2"],
|
||||
"request": {
|
||||
"url": "/eth/v2/validator/blocks/0",
|
||||
"headers": {"Accept": "application/json"}
|
||||
},
|
||||
"response": {
|
||||
"status": {"operator": "equals", "value": "400"}
|
||||
}
|
||||
},
|
||||
{
|
||||
"topics": ["validator", "blocksV2"],
|
||||
"request": {
|
||||
"url": "/eth/v2/validator/blocks/1?randao_reveal=0x97897b5e8526b4d0f808e7b60bcd1942935b124720bd5156da54c54adc25fe458ef7c934b4e5018afe4659978b06e6510797e5cc7fc31f329035ec6a46889ee9aea375d57b22be71dd4ff181b7f1a07b9199e73c2b80e39e04ba904596d9e4db",
|
||||
"headers": {"Accept": "application/json"}
|
||||
},
|
||||
"response": {
|
||||
"status": {"operator": "equals", "value": "200"},
|
||||
"headers": [{"key": "Content-Type", "value": "application/json", "operator": "equals"}],
|
||||
"body": [{"operator": "jstructcmps", "start": ["data"], "value": {"slot": "", "proposer_index": "", "parent_root": "", "state_root": "", "body": {"randao_reveal": "", "eth1_data": {"deposit_root": "", "deposit_count": "", "block_hash": ""}, "graffiti": "", "proposer_slashings": [], "attester_slashings": [], "attestations": [], "deposits": [], "voluntary_exits": []}}}]
|
||||
}
|
||||
},
|
||||
{
|
||||
"topics": ["validator", "attestation_data"],
|
||||
"request": {
|
||||
"url": "/eth/v1/validator/attestation_data?slot=0&committee_index=0",
|
||||
"headers": {"Accept": "application/json"}
|
||||
},
|
||||
"response": {
|
||||
"status": {"operator": "equals", "value": "400"}
|
||||
}
|
||||
},
|
||||
{
|
||||
"topics": ["validator", "attestation_data"],
|
||||
"request": {
|
||||
"url": "/eth/v1/validator/attestation_data?slot=1&committee_index=0",
|
||||
"headers": {"Accept": "application/json"}
|
||||
},
|
||||
"response": {
|
||||
"status": {"operator": "equals", "value": "200"},
|
||||
"headers": [{"key": "Content-Type", "value": "application/json", "operator": "equals"}],
|
||||
|
|
|
@ -189,19 +189,21 @@ cleanup() {
|
|||
}
|
||||
trap 'cleanup' SIGINT SIGTERM EXIT
|
||||
|
||||
if [[ ! -f "${SNAPSHOT_FILE}" ]]; then
|
||||
echo "Creating testnet genesis..."
|
||||
${NIMBUS_BEACON_NODE_BIN} \
|
||||
--data-dir="${TEST_DIR}" \
|
||||
createTestnet \
|
||||
--deposits-file="${DEPOSITS_FILE}" \
|
||||
--total-validators="${NUM_VALIDATORS}" \
|
||||
--output-genesis="${SNAPSHOT_FILE}" \
|
||||
--output-bootstrap-file="${NETWORK_BOOTSTRAP_FILE}" \
|
||||
--netkey-file=network_key.json \
|
||||
--insecure-netkey-password=true \
|
||||
--genesis-offset=0 # Delay in seconds
|
||||
fi
|
||||
echo "Creating testnet genesis..."
|
||||
${NIMBUS_BEACON_NODE_BIN} \
|
||||
--data-dir="${TEST_DIR}" \
|
||||
createTestnet \
|
||||
--deposits-file="${DEPOSITS_FILE}" \
|
||||
--total-validators="${NUM_VALIDATORS}" \
|
||||
--output-genesis="${SNAPSHOT_FILE}" \
|
||||
--output-bootstrap-file="${NETWORK_BOOTSTRAP_FILE}" \
|
||||
--netkey-file=network_key.json \
|
||||
--insecure-netkey-password=true \
|
||||
--genesis-offset=-12 # Chain that has already started allows testing empty slots
|
||||
|
||||
# Make sure we use the newly generated genesis
|
||||
echo "Removing existing database..."
|
||||
rm -rf "${TEST_DIR}/db"
|
||||
|
||||
DEPOSIT_CONTRACT_ADDRESS="0x0000000000000000000000000000000000000000"
|
||||
DEPOSIT_CONTRACT_BLOCK="0x0000000000000000000000000000000000000000000000000000000000000000"
|
||||
|
|
Loading…
Reference in New Issue