spec: Option -> Opt (#4488)
This commit is contained in:
parent
713bdd317d
commit
ba3db7aa5a
|
@ -269,7 +269,7 @@ type
|
|||
|
||||
checkpoints*: FinalityCheckpoints
|
||||
|
||||
beacon_proposers*: array[SLOTS_PER_EPOCH, Option[ValidatorIndex]]
|
||||
beacon_proposers*: array[SLOTS_PER_EPOCH, Opt[ValidatorIndex]]
|
||||
proposer_dependent_root*: Eth2Digest
|
||||
|
||||
shufflingRef*: ShufflingRef
|
||||
|
|
|
@ -11,7 +11,7 @@ else:
|
|||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[algorithm, options, sequtils, tables, sets],
|
||||
std/[algorithm, sequtils, tables, sets],
|
||||
stew/[assign2, byteutils, results],
|
||||
metrics, snappy, chronicles,
|
||||
../spec/[beaconstate, eth2_merkleization, eth2_ssz_serialization, helpers,
|
||||
|
@ -135,7 +135,7 @@ proc updateFrontfillBlocks*(dag: ChainDAGRef) =
|
|||
reset(dag.frontfillBlocks)
|
||||
|
||||
func validatorKey*(
|
||||
dag: ChainDAGRef, index: ValidatorIndex or uint64): Option[CookedPubKey] =
|
||||
dag: ChainDAGRef, index: ValidatorIndex or uint64): Opt[CookedPubKey] =
|
||||
## Returns the validator pubkey for the index, assuming it's been observed
|
||||
## at any point in time - this function may return pubkeys for indicies that
|
||||
## are not (yet) part of the head state (if the key has been observed on a
|
||||
|
@ -2210,11 +2210,11 @@ proc preInit*(
|
|||
notice "Database initialized from checkpoint", blockRoot = $blockRoot
|
||||
|
||||
proc getProposer*(
|
||||
dag: ChainDAGRef, head: BlockRef, slot: Slot): Option[ValidatorIndex] =
|
||||
dag: ChainDAGRef, head: BlockRef, slot: Slot): Opt[ValidatorIndex] =
|
||||
let
|
||||
epochRef = dag.getEpochRef(head.bid, slot.epoch(), false).valueOr:
|
||||
notice "Cannot load EpochRef for given head", head, slot, error
|
||||
return none(ValidatorIndex)
|
||||
return Opt.none(ValidatorIndex)
|
||||
|
||||
slotInEpoch = slot.since_epoch_start()
|
||||
|
||||
|
@ -2226,7 +2226,7 @@ proc getProposer*(
|
|||
# created validators in the state without updating the cache!
|
||||
warn "Proposer key not found",
|
||||
keys = dag.db.immutableValidators.lenu64(), proposer = proposer.get()
|
||||
return none(ValidatorIndex)
|
||||
return Opt.none(ValidatorIndex)
|
||||
|
||||
proposer
|
||||
|
||||
|
@ -2274,20 +2274,17 @@ proc aggregateAll*(
|
|||
return err("aggregate: no attesting keys")
|
||||
|
||||
let
|
||||
firstKey = dag.validatorKey(validator_indices[0])
|
||||
|
||||
if not firstKey.isSome():
|
||||
return err("aggregate: invalid validator index")
|
||||
firstKey = dag.validatorKey(validator_indices[0]).valueOr:
|
||||
return err("aggregate: invalid validator index")
|
||||
|
||||
var aggregateKey{.noinit.}: AggregatePublicKey
|
||||
|
||||
aggregateKey.init(firstKey.get())
|
||||
aggregateKey.init(firstKey)
|
||||
|
||||
for i in 1 ..< validator_indices.len:
|
||||
let key = dag.validatorKey(validator_indices[i])
|
||||
if not key.isSome():
|
||||
let key = dag.validatorKey(validator_indices[i]).valueOr:
|
||||
return err("aggregate: invalid validator index")
|
||||
aggregateKey.aggregate(key.get())
|
||||
aggregateKey.aggregate(key)
|
||||
|
||||
ok(finish(aggregateKey))
|
||||
|
||||
|
@ -2304,14 +2301,13 @@ proc aggregateAll*(
|
|||
|
||||
for i in 0..<bits.len():
|
||||
if bits[i]:
|
||||
let key = dag.validatorKey(validator_indices[i])
|
||||
if not key.isSome():
|
||||
let key = dag.validatorKey(validator_indices[i]).valueOr:
|
||||
return err("aggregate: invalid validator index")
|
||||
|
||||
if inited:
|
||||
aggregateKey.aggregate(key.get)
|
||||
aggregateKey.aggregate(key)
|
||||
else:
|
||||
aggregateKey = AggregatePublicKey.init(key.get)
|
||||
aggregateKey = AggregatePublicKey.init(key)
|
||||
inited = true
|
||||
|
||||
if not inited:
|
||||
|
|
|
@ -313,15 +313,14 @@ proc checkNextProposer(
|
|||
if not dag.isSynced(wallSlot):
|
||||
return Opt.none((ValidatorIndex, ValidatorPubKey))
|
||||
|
||||
let proposer = dag.getProposer(dag.head, nextWallSlot)
|
||||
if proposer.isNone():
|
||||
return Opt.none((ValidatorIndex, ValidatorPubKey))
|
||||
let proposer = ? dag.getProposer(dag.head, nextWallSlot)
|
||||
|
||||
if actionTracker.getNextProposalSlot(wallSlot) != nextWallSlot and
|
||||
dynamicFeeRecipientsStore[].getDynamicFeeRecipient(
|
||||
proposer.get, nextWallSlot.epoch).isNone:
|
||||
proposer, nextWallSlot.epoch).isNone:
|
||||
return Opt.none((ValidatorIndex, ValidatorPubKey))
|
||||
let proposerKey = dag.validatorKey(proposer.get).get().toPubKey
|
||||
Opt.some((proposer.get, proposerKey))
|
||||
let proposerKey = dag.validatorKey(proposer).get().toPubKey
|
||||
Opt.some((proposer, proposerKey))
|
||||
|
||||
proc checkNextProposer*(self: ref ConsensusManager, wallSlot: Slot):
|
||||
Opt[(ValidatorIndex, ValidatorPubKey)] =
|
||||
|
|
|
@ -273,13 +273,6 @@ proc scheduleBatch(batchCrypto: ref BatchCrypto, fresh: bool) =
|
|||
# If there's a full batch, process it eagerly assuming the callback allows
|
||||
batchCrypto.processBatch()
|
||||
|
||||
template orReturnErr(v: Option, error: cstring): untyped =
|
||||
## Returns with given error string if the option does not have a value
|
||||
let tmp = v
|
||||
if tmp.isNone:
|
||||
return err(error) # this exits the calling scope, as templates are inlined.
|
||||
tmp.unsafeGet()
|
||||
|
||||
template withBatch(
|
||||
batchCrypto: ref BatchCrypto, name: cstring,
|
||||
body: untyped): Future[BatchResult] =
|
||||
|
@ -328,7 +321,8 @@ proc scheduleAttestationCheck*(
|
|||
## and a future with the deferred attestation check otherwise.
|
||||
##
|
||||
let
|
||||
sig = signature.load().orReturnErr("attestation: cannot load signature")
|
||||
sig = signature.load().valueOr:
|
||||
return err("attestation: cannot load signature")
|
||||
fut = batchCrypto.withBatch("batch_validation.scheduleAttestationCheck"):
|
||||
attestation_signature_set(
|
||||
fork, genesis_validators_root, attestationData, pubkey, sig)
|
||||
|
@ -364,15 +358,15 @@ proc scheduleAggregateChecks*(
|
|||
# Do the eager steps first to avoid polluting batches with needlessly
|
||||
let
|
||||
aggregatorKey =
|
||||
dag.validatorKey(aggregate_and_proof.aggregator_index).orReturnErr(
|
||||
"SignedAggregateAndProof: invalid aggregator index")
|
||||
aggregatorSig = signedAggregateAndProof.signature.load().orReturnErr(
|
||||
"aggregateAndProof: invalid proof signature")
|
||||
slotSig = aggregate_and_proof.selection_proof.load().orReturnErr(
|
||||
"aggregateAndProof: invalid selection signature")
|
||||
dag.validatorKey(aggregate_and_proof.aggregator_index).valueOr:
|
||||
return err("SignedAggregateAndProof: invalid aggregator index")
|
||||
aggregatorSig = signedAggregateAndProof.signature.load().valueOr:
|
||||
return err("aggregateAndProof: invalid proof signature")
|
||||
slotSig = aggregate_and_proof.selection_proof.load().valueOr:
|
||||
return err("aggregateAndProof: invalid selection signature")
|
||||
aggregateKey = ? aggregateAll(dag, attesting_indices)
|
||||
aggregateSig = aggregate.signature.load().orReturnErr(
|
||||
"aggregateAndProof: invalid aggregate signature")
|
||||
aggregateSig = aggregate.signature.load().valueOr:
|
||||
return err("aggregateAndProof: invalid aggregate signature")
|
||||
|
||||
let
|
||||
aggregatorFut = batchCrypto.withBatch("scheduleAggregateChecks.aggregator"):
|
||||
|
@ -406,8 +400,8 @@ proc scheduleSyncCommitteeMessageCheck*(
|
|||
## and a future with the deferred attestation check otherwise.
|
||||
##
|
||||
let
|
||||
sig = signature.load().orReturnErr(
|
||||
"SyncCommitteMessage: cannot load signature")
|
||||
sig = signature.load().valueOr:
|
||||
return err("SyncCommitteMessage: cannot load signature")
|
||||
fut = batchCrypto.withBatch("scheduleSyncCommitteeMessageCheck"):
|
||||
sync_committee_message_signature_set(
|
||||
fork, genesis_validators_root, slot, beacon_block_root, pubkey, sig)
|
||||
|
@ -438,14 +432,14 @@ proc scheduleContributionChecks*(
|
|||
# Do the eager steps first to avoid polluting batches with needlessly
|
||||
let
|
||||
aggregatorKey =
|
||||
dag.validatorKey(contribution_and_proof.aggregator_index).orReturnErr(
|
||||
"SignedAggregateAndProof: invalid contributor index")
|
||||
aggregatorSig = signedContributionAndProof.signature.load().orReturnErr(
|
||||
"SignedContributionAndProof: invalid proof signature")
|
||||
proofSig = contribution_and_proof.selection_proof.load().orReturnErr(
|
||||
"SignedContributionAndProof: invalid selection signature")
|
||||
contributionSig = contribution.signature.load().orReturnErr(
|
||||
"SignedContributionAndProof: invalid contribution signature")
|
||||
dag.validatorKey(contribution_and_proof.aggregator_index).valueOr:
|
||||
return err("SignedAggregateAndProof: invalid contributor index")
|
||||
aggregatorSig = signedContributionAndProof.signature.load().valueOr:
|
||||
return err("SignedContributionAndProof: invalid proof signature")
|
||||
proofSig = contribution_and_proof.selection_proof.load().valueOr:
|
||||
return err("SignedContributionAndProof: invalid selection signature")
|
||||
contributionSig = contribution.signature.load().valueOr:
|
||||
return err("SignedContributionAndProof: invalid contribution signature")
|
||||
|
||||
contributionKey = ? aggregateAll(
|
||||
dag, dag.syncCommitteeParticipants(contribution.slot + 1, subcommitteeIdx),
|
||||
|
|
|
@ -371,13 +371,12 @@ proc validateBeaconBlock*(
|
|||
# processing while proposers for the block's branch are calculated -- in such
|
||||
# a case do not REJECT, instead IGNORE this message.
|
||||
let
|
||||
proposer = getProposer(dag, parent, signed_beacon_block.message.slot)
|
||||
proposer = getProposer(
|
||||
dag, parent, signed_beacon_block.message.slot).valueOr:
|
||||
warn "cannot compute proposer for message"
|
||||
return errIgnore("BeaconBlock: Cannot compute proposer") # internal issue
|
||||
|
||||
if proposer.isNone:
|
||||
warn "cannot compute proposer for message"
|
||||
return errIgnore("BeaconBlock: Cannot compute proposer") # internal issue
|
||||
|
||||
if uint64(proposer.get()) != signed_beacon_block.message.proposer_index:
|
||||
if uint64(proposer) != signed_beacon_block.message.proposer_index:
|
||||
quarantine[].addUnviable(signed_beacon_block.root)
|
||||
return errReject("BeaconBlock: Unexpected proposer proposer")
|
||||
|
||||
|
@ -388,7 +387,7 @@ proc validateBeaconBlock*(
|
|||
getStateField(dag.headState, genesis_validators_root),
|
||||
signed_beacon_block.message.slot,
|
||||
signed_beacon_block.root,
|
||||
dag.validatorKey(proposer.get()).get(),
|
||||
dag.validatorKey(proposer).get(),
|
||||
signed_beacon_block.signature):
|
||||
quarantine[].addUnviable(signed_beacon_block.root)
|
||||
|
||||
|
@ -579,8 +578,7 @@ proc validateAttestation*(
|
|||
attestation.data.target.epoch:
|
||||
return errIgnore("Attestation: Validator has already voted in epoch")
|
||||
|
||||
let pubkey = pool.dag.validatorKey(validator_index)
|
||||
if pubkey.isNone():
|
||||
let pubkey = pool.dag.validatorKey(validator_index).valueOr:
|
||||
# can't happen, in theory, because we checked the aggregator index above
|
||||
return errIgnore("Attestation: cannot find validator pubkey")
|
||||
|
||||
|
@ -592,7 +590,7 @@ proc validateAttestation*(
|
|||
let deferredCrypto = batchCrypto
|
||||
.scheduleAttestationCheck(
|
||||
fork, genesis_validators_root, attestation.data,
|
||||
pubkey.get(), attestation.signature)
|
||||
pubkey, attestation.signature)
|
||||
if deferredCrypto.isErr():
|
||||
return checkedReject(deferredCrypto.error)
|
||||
|
||||
|
@ -608,10 +606,8 @@ proc validateAttestation*(
|
|||
of BatchResult.Valid:
|
||||
sig # keep going only in this case
|
||||
else:
|
||||
let sig = attestation.signature.load()
|
||||
if not sig.isSome():
|
||||
attestation.signature.load().valueOr:
|
||||
return checkedReject("Attestation: unable to load signature")
|
||||
sig.get()
|
||||
|
||||
# Only valid attestations go in the list, which keeps validator_index
|
||||
# in range
|
||||
|
@ -803,10 +799,8 @@ proc validateAggregate*(
|
|||
discard
|
||||
sig
|
||||
else:
|
||||
let sig = aggregate.signature.load()
|
||||
if not sig.isSome():
|
||||
aggregate.signature.load().valueOr:
|
||||
return checkedReject("Aggregate: unable to load signature")
|
||||
sig.get()
|
||||
|
||||
# The following rule follows implicitly from that we clear out any
|
||||
# unviable blocks from the chain dag:
|
||||
|
@ -949,10 +943,8 @@ proc validateSyncCommitteeMessage*(
|
|||
epoch = msg.slot.epoch
|
||||
fork = dag.forkAtEpoch(epoch)
|
||||
genesis_validators_root = dag.genesis_validators_root
|
||||
senderPubKey = dag.validatorKey(msg.validator_index)
|
||||
|
||||
if senderPubKey.isNone():
|
||||
return errReject("SyncCommitteeMessage: invalid validator index")
|
||||
senderPubKey = dag.validatorKey(msg.validator_index).valueOr:
|
||||
return errReject("SyncCommitteeMessage: invalid validator index")
|
||||
|
||||
let sig =
|
||||
if checkSignature:
|
||||
|
@ -961,7 +953,7 @@ proc validateSyncCommitteeMessage*(
|
|||
.scheduleSyncCommitteeMessageCheck(
|
||||
fork, genesis_validators_root,
|
||||
msg.slot, msg.beacon_block_root,
|
||||
senderPubKey.get(), msg.signature)
|
||||
senderPubKey, msg.signature)
|
||||
if deferredCrypto.isErr():
|
||||
return errReject(deferredCrypto.error)
|
||||
|
||||
|
@ -979,10 +971,8 @@ proc validateSyncCommitteeMessage*(
|
|||
of BatchResult.Valid:
|
||||
sig # keep going only in this case
|
||||
else:
|
||||
let sig = msg.signature.load()
|
||||
if not sig.isSome():
|
||||
msg.signature.load().valueOr:
|
||||
return errReject("SyncCommitteeMessage: unable to load signature")
|
||||
sig.get()
|
||||
|
||||
return ok((positionsInSubcommittee, sig))
|
||||
|
||||
|
@ -1100,10 +1090,8 @@ proc validateContribution*(
|
|||
discard
|
||||
sig
|
||||
else:
|
||||
let sig = msg.message.contribution.signature.load()
|
||||
if not sig.isSome():
|
||||
msg.message.contribution.signature.load().valueOr:
|
||||
return errReject("SyncCommitteeMessage: unable to load signature")
|
||||
sig.get()
|
||||
|
||||
return ok((sig, participants))
|
||||
|
||||
|
|
|
@ -126,22 +126,20 @@ proc toString*(kind: ValidatorFilterKind): string =
|
|||
proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
||||
# https://github.com/ethereum/EIPs/blob/master/EIPS/eip-4881.md
|
||||
router.api(MethodGet, "/eth/v1/beacon/deposit_snapshot") do () -> RestApiResponse:
|
||||
let snapshotOpt = node.db.getDepositTreeSnapshot()
|
||||
if snapshotOpt.isSome():
|
||||
let snapshot = snapshotOpt.get()
|
||||
return RestApiResponse.jsonResponse(
|
||||
RestDepositSnapshot(
|
||||
finalized: snapshot.depositContractState.branch,
|
||||
deposit_root: snapshot.getDepositRoot(),
|
||||
deposit_count: snapshot.getDepositCountU64(),
|
||||
execution_block_hash: snapshot.eth1Block,
|
||||
execution_block_height: snapshot.blockHeight))
|
||||
else:
|
||||
let snapshot = node.db.getDepositTreeSnapshot().valueOr:
|
||||
# This can happen in a very short window after the client is started, but the
|
||||
# snapshot record still haven't been upgraded in the database. Returning 404
|
||||
# should be easy to handle for the clients - they just need to retry.
|
||||
return RestApiResponse.jsonError(Http404, NoFinalizedSnapshotAvailableError)
|
||||
|
||||
return RestApiResponse.jsonResponse(
|
||||
RestDepositSnapshot(
|
||||
finalized: snapshot.depositContractState.branch,
|
||||
deposit_root: snapshot.getDepositRoot(),
|
||||
deposit_count: snapshot.getDepositCountU64(),
|
||||
execution_block_hash: snapshot.eth1Block,
|
||||
execution_block_height: snapshot.blockHeight))
|
||||
|
||||
# https://ethereum.github.io/beacon-APIs/#/Beacon/getGenesis
|
||||
router.api(MethodGet, "/eth/v1/beacon/genesis") do () -> RestApiResponse:
|
||||
return RestApiResponse.jsonResponse(
|
||||
|
@ -1048,14 +1046,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
# https://ethereum.github.io/beacon-APIs/#/Beacon/getPoolAttesterSlashings
|
||||
router.api(MethodGet, "/eth/v1/beacon/pool/attester_slashings") do (
|
||||
) -> RestApiResponse:
|
||||
var res: seq[AttesterSlashing]
|
||||
if isNil(node.exitPool):
|
||||
return RestApiResponse.jsonResponse(res)
|
||||
let length = len(node.exitPool.attester_slashings)
|
||||
res = newSeqOfCap[AttesterSlashing](length)
|
||||
for item in node.exitPool.attester_slashings.items():
|
||||
res.add(item)
|
||||
return RestApiResponse.jsonResponse(res)
|
||||
return RestApiResponse.jsonResponse(toSeq(node.exitPool.attester_slashings))
|
||||
|
||||
# https://ethereum.github.io/beacon-APIs/#/Beacon/submitPoolAttesterSlashings
|
||||
router.api(MethodPost, "/eth/v1/beacon/pool/attester_slashings") do (
|
||||
|
@ -1080,14 +1071,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
# https://ethereum.github.io/beacon-APIs/#/Beacon/getPoolProposerSlashings
|
||||
router.api(MethodGet, "/eth/v1/beacon/pool/proposer_slashings") do (
|
||||
) -> RestApiResponse:
|
||||
var res: seq[ProposerSlashing]
|
||||
if isNil(node.exitPool):
|
||||
return RestApiResponse.jsonResponse(res)
|
||||
let length = len(node.exitPool.proposer_slashings)
|
||||
res = newSeqOfCap[ProposerSlashing](length)
|
||||
for item in node.exitPool.proposer_slashings.items():
|
||||
res.add(item)
|
||||
return RestApiResponse.jsonResponse(res)
|
||||
return RestApiResponse.jsonResponse(toSeq(node.exitPool.proposer_slashings))
|
||||
|
||||
# https://ethereum.github.io/beacon-APIs/#/Beacon/submitPoolProposerSlashings
|
||||
router.api(MethodPost, "/eth/v1/beacon/pool/proposer_slashings") do (
|
||||
|
@ -1143,14 +1127,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
# https://ethereum.github.io/beacon-APIs/#/Beacon/getPoolVoluntaryExits
|
||||
router.api(MethodGet, "/eth/v1/beacon/pool/voluntary_exits") do (
|
||||
) -> RestApiResponse:
|
||||
var res: seq[SignedVoluntaryExit]
|
||||
if isNil(node.exitPool):
|
||||
return RestApiResponse.jsonResponse(res)
|
||||
let length = len(node.exitPool.voluntary_exits)
|
||||
res = newSeqOfCap[SignedVoluntaryExit](length)
|
||||
for item in node.exitPool.voluntary_exits.items():
|
||||
res.add(item)
|
||||
return RestApiResponse.jsonResponse(res)
|
||||
return RestApiResponse.jsonResponse(toSeq(node.exitPool.voluntary_exits))
|
||||
|
||||
# https://ethereum.github.io/beacon-APIs/#/Beacon/submitPoolVoluntaryExit
|
||||
router.api(MethodPost, "/eth/v1/beacon/pool/voluntary_exits") do (
|
||||
|
|
|
@ -313,9 +313,8 @@ proc verifyRandao*(
|
|||
node: BeaconNode, slot: Slot, proposer: ValidatorIndex,
|
||||
randao: ValidatorSig, skip_randao_verification: bool): bool =
|
||||
let
|
||||
proposer_pubkey = node.dag.validatorKey(proposer)
|
||||
if proposer_pubkey.isNone():
|
||||
return false
|
||||
proposer_pubkey = node.dag.validatorKey(proposer).valueOr:
|
||||
return false
|
||||
|
||||
if skip_randao_verification:
|
||||
randao == ValidatorSig.infinity()
|
||||
|
@ -325,5 +324,4 @@ proc verifyRandao*(
|
|||
genesis_validators_root = node.dag.genesis_validators_root
|
||||
|
||||
verify_epoch_signature(
|
||||
fork, genesis_validators_root, slot.epoch, proposer_pubkey.get(),
|
||||
randao)
|
||||
fork, genesis_validators_root, slot.epoch, proposer_pubkey, randao)
|
||||
|
|
|
@ -366,21 +366,20 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
$res.error())
|
||||
res.get()
|
||||
let
|
||||
proposer = node.dag.getProposer(qhead, qslot)
|
||||
if proposer.isNone():
|
||||
return RestApiResponse.jsonError(Http400, ProposerNotFoundError)
|
||||
proposer = node.dag.getProposer(qhead, qslot).valueOr:
|
||||
return RestApiResponse.jsonError(Http400, ProposerNotFoundError)
|
||||
|
||||
if not node.verifyRandao(
|
||||
qslot, proposer.get(), qrandao, qskip_randao_verification):
|
||||
qslot, proposer, qrandao, qskip_randao_verification):
|
||||
return RestApiResponse.jsonError(Http400, InvalidRandaoRevealValue)
|
||||
|
||||
let res =
|
||||
if qslot.epoch >= node.dag.cfg.CAPELLA_FORK_EPOCH:
|
||||
await makeBeaconBlockForHeadAndSlot[capella.ExecutionPayload](
|
||||
node, qrandao, proposer.get(), qgraffiti, qhead, qslot)
|
||||
node, qrandao, proposer, qgraffiti, qhead, qslot)
|
||||
else:
|
||||
await makeBeaconBlockForHeadAndSlot[bellatrix.ExecutionPayload](
|
||||
node, qrandao, proposer.get(), qgraffiti, qhead, qslot)
|
||||
node, qrandao, proposer, qgraffiti, qhead, qslot)
|
||||
if res.isErr():
|
||||
return RestApiResponse.jsonError(Http400, res.error())
|
||||
res.get()
|
||||
|
@ -456,12 +455,11 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
return RestApiResponse.jsonError(Http503, BeaconNodeInSyncError,
|
||||
$res.error())
|
||||
res.get()
|
||||
let proposer = node.dag.getProposer(qhead, qslot)
|
||||
if proposer.isNone():
|
||||
let proposer = node.dag.getProposer(qhead, qslot).valueOr:
|
||||
return RestApiResponse.jsonError(Http400, ProposerNotFoundError)
|
||||
|
||||
if not node.verifyRandao(
|
||||
qslot, proposer.get(), qrandao, qskip_randao_verification):
|
||||
qslot, proposer, qrandao, qskip_randao_verification):
|
||||
return RestApiResponse.jsonError(Http400, InvalidRandaoRevealValue)
|
||||
|
||||
template responsePlain(response: untyped): untyped =
|
||||
|
@ -474,7 +472,7 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
|
||||
if node.currentSlot().epoch() >= node.dag.cfg.BELLATRIX_FORK_EPOCH:
|
||||
let res = await makeBlindedBeaconBlockForHeadAndSlot(
|
||||
node, qrandao, proposer.get(), qgraffiti, qhead, qslot)
|
||||
node, qrandao, proposer, qgraffiti, qhead, qslot)
|
||||
if res.isErr():
|
||||
return RestApiResponse.jsonError(Http400, res.error())
|
||||
return responsePlain(ForkedBlindedBeaconBlock(
|
||||
|
@ -483,7 +481,7 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
else:
|
||||
# Pre-Bellatrix, this endpoint will return a BeaconBlock
|
||||
let res = await makeBeaconBlockForHeadAndSlot[bellatrix.ExecutionPayload](
|
||||
node, qrandao, proposer.get(), qgraffiti, qhead, qslot)
|
||||
node, qrandao, proposer, qgraffiti, qhead, qslot)
|
||||
if res.isErr():
|
||||
return RestApiResponse.jsonError(Http400, res.error())
|
||||
return responsePlain(res.get())
|
||||
|
|
|
@ -204,19 +204,18 @@ proc slash_validator*(
|
|||
get_slashing_penalty(state, validator.effective_balance))
|
||||
|
||||
# The rest doesn't make sense without there being any proposer index, so skip
|
||||
let proposer_index = get_beacon_proposer_index(state, cache)
|
||||
if proposer_index.isNone:
|
||||
let proposer_index = get_beacon_proposer_index(state, cache).valueOr:
|
||||
debug "No beacon proposer index and probably no active validators"
|
||||
return ok()
|
||||
|
||||
# Apply proposer and whistleblower rewards
|
||||
let
|
||||
# Spec has whistleblower_index as optional param, but it's never used.
|
||||
whistleblower_index = proposer_index.get
|
||||
whistleblower_index = proposer_index
|
||||
whistleblower_reward = get_whistleblower_reward(validator.effective_balance)
|
||||
proposer_reward = get_proposer_reward(state, whistleblower_reward)
|
||||
|
||||
increase_balance(state, proposer_index.get, proposer_reward)
|
||||
increase_balance(state, proposer_index, proposer_reward)
|
||||
# TODO: evaluate if spec bug / underflow can be triggered
|
||||
doAssert(whistleblower_reward >= proposer_reward, "Spec bug: underflow in slash_validator")
|
||||
increase_balance(
|
||||
|
@ -737,8 +736,7 @@ proc process_attestation*(
|
|||
# https://github.com/nim-lang/Nim/issues/18202 means that this being called
|
||||
# by process_operations() in state_transition_block fails that way.
|
||||
|
||||
let proposer_index = get_beacon_proposer_index(state, cache)
|
||||
if proposer_index.isNone:
|
||||
let proposer_index = get_beacon_proposer_index(state, cache).valueOr:
|
||||
return err("process_attestation: no beacon proposer index and probably no active validators")
|
||||
|
||||
? check_attestation(state, attestation, flags, cache)
|
||||
|
@ -754,13 +752,13 @@ proc process_attestation*(
|
|||
assign(pa[].aggregation_bits, attestation.aggregation_bits)
|
||||
pa[].data = attestation.data
|
||||
pa[].inclusion_delay = state.slot - attestation.data.slot
|
||||
pa[].proposer_index = proposer_index.get().uint64
|
||||
pa[].proposer_index = proposer_index.uint64
|
||||
|
||||
# Altair, Bellatrix, and Capella
|
||||
template updateParticipationFlags(epoch_participation: untyped) =
|
||||
let proposer_reward = get_proposer_reward(
|
||||
state, attestation, base_reward_per_increment, cache, epoch_participation)
|
||||
increase_balance(state, proposer_index.get, proposer_reward)
|
||||
increase_balance(state, proposer_index, proposer_reward)
|
||||
|
||||
when state is phase0.BeaconState:
|
||||
doAssert base_reward_per_increment == 0.Gwei
|
||||
|
|
|
@ -27,8 +27,6 @@ else:
|
|||
{.push raises: [].}
|
||||
|
||||
import
|
||||
# Standard library
|
||||
std/options,
|
||||
# Status
|
||||
stew/[endians2, objects, results, byteutils],
|
||||
blscurve,
|
||||
|
@ -42,7 +40,7 @@ from std/tables import Table, withValue, `[]=`
|
|||
|
||||
from nimcrypto/utils import burnMem
|
||||
|
||||
export options, results, blscurve, rand, json_serialization
|
||||
export results, blscurve, rand, json_serialization
|
||||
|
||||
# Type definitions
|
||||
# ----------------------------------------------------------------------
|
||||
|
@ -122,21 +120,21 @@ func toPubKey*(pubKey: CookedPubKey): ValidatorPubKey =
|
|||
# Un-specced in either hash-to-curve or Eth2
|
||||
ValidatorPubKey(blob: pubKey.toRaw())
|
||||
|
||||
func load*(v: ValidatorPubKey): Option[CookedPubKey] =
|
||||
func load*(v: ValidatorPubKey): Opt[CookedPubKey] =
|
||||
## Parse signature blob - this may fail
|
||||
var val: blscurve.PublicKey
|
||||
if fromBytes(val, v.blob):
|
||||
some CookedPubKey(val)
|
||||
Opt.some CookedPubKey(val)
|
||||
else:
|
||||
none CookedPubKey
|
||||
Opt.none CookedPubKey
|
||||
|
||||
func load*(v: UncompressedPubKey): Option[CookedPubKey] =
|
||||
func load*(v: UncompressedPubKey): Opt[CookedPubKey] =
|
||||
## Parse signature blob - this may fail
|
||||
var val: blscurve.PublicKey
|
||||
if fromBytes(val, v.blob):
|
||||
some CookedPubKey(val)
|
||||
Opt.some CookedPubKey(val)
|
||||
else:
|
||||
none CookedPubKey
|
||||
Opt.none CookedPubKey
|
||||
|
||||
func loadValid*(v: UncompressedPubKey | ValidatorPubKey): CookedPubKey {.noinit.} =
|
||||
## Parse known-to-be-valid key - this is the case for any key that's passed
|
||||
|
@ -148,7 +146,7 @@ func loadValid*(v: UncompressedPubKey | ValidatorPubKey): CookedPubKey {.noinit.
|
|||
|
||||
CookedPubKey(val)
|
||||
|
||||
proc loadWithCache*(v: ValidatorPubKey): Option[CookedPubKey] =
|
||||
proc loadWithCache*(v: ValidatorPubKey): Opt[CookedPubKey] =
|
||||
## Parse public key blob - this may fail - this function uses a cache to
|
||||
## avoid the expensive deserialization - for now, external public keys only
|
||||
## come from deposits in blocks - when more sources are added, the memory
|
||||
|
@ -158,7 +156,7 @@ proc loadWithCache*(v: ValidatorPubKey): Option[CookedPubKey] =
|
|||
# Try to get parse value from cache - if it's not in there, try to parse it -
|
||||
# if that's not possible, it's broken
|
||||
cache.withValue(v.blob, key) do:
|
||||
return some key[]
|
||||
return Opt.some key[]
|
||||
do:
|
||||
# Only valid keys are cached
|
||||
let cooked = v.load()
|
||||
|
@ -166,13 +164,13 @@ proc loadWithCache*(v: ValidatorPubKey): Option[CookedPubKey] =
|
|||
cache[v.blob] = cooked.get()
|
||||
return cooked
|
||||
|
||||
func load*(v: ValidatorSig): Option[CookedSig] =
|
||||
func load*(v: ValidatorSig): Opt[CookedSig] =
|
||||
## Parse signature blob - this may fail
|
||||
var parsed: blscurve.Signature
|
||||
if fromBytes(parsed, v.blob):
|
||||
some(CookedSig(parsed))
|
||||
Opt.some(CookedSig(parsed))
|
||||
else:
|
||||
none(CookedSig)
|
||||
Opt.none(CookedSig)
|
||||
|
||||
func init*(agg: var AggregatePublicKey, pubkey: CookedPubKey) {.inline.}=
|
||||
## Initializes an aggregate signature context
|
||||
|
|
|
@ -236,7 +236,7 @@ type
|
|||
## Sync committees corresponding to the finalized header
|
||||
next_sync_committee*: SyncCommittee
|
||||
|
||||
best_valid_update*: Option[LightClientUpdate]
|
||||
best_valid_update*: Opt[LightClientUpdate]
|
||||
## Best available header to switch finalized head to if we see nothing else
|
||||
|
||||
optimistic_header*: BeaconBlockHeader
|
||||
|
|
|
@ -408,7 +408,7 @@ type
|
|||
StateCache* = object
|
||||
total_active_balance*: Table[Epoch, Gwei]
|
||||
shuffled_active_validator_indices*: Table[Epoch, seq[ValidatorIndex]]
|
||||
beacon_proposer_indices*: Table[Slot, Option[ValidatorIndex]]
|
||||
beacon_proposer_indices*: Table[Slot, Opt[ValidatorIndex]]
|
||||
sync_committees*: Table[SyncCommitteePeriod, SyncCommitteeCache]
|
||||
|
||||
# This matches the mutable state of the Solidity deposit contract
|
||||
|
@ -526,7 +526,7 @@ type
|
|||
current_epoch_effective_balance*: Gwei
|
||||
|
||||
# True if the validator had an attestation included in the _previous_ epoch.
|
||||
is_previous_epoch_attester*: Option[InclusionInfo]
|
||||
is_previous_epoch_attester*: Opt[InclusionInfo]
|
||||
|
||||
# Total rewards and penalties for this validator
|
||||
delta*: RewardDelta
|
||||
|
@ -893,11 +893,11 @@ proc readValue*(r: var JsonReader, T: type GraffitiBytes): T
|
|||
|
||||
func load*(
|
||||
validators: openArray[ImmutableValidatorData2],
|
||||
index: ValidatorIndex | uint64): Option[CookedPubKey] =
|
||||
index: ValidatorIndex | uint64): Opt[CookedPubKey] =
|
||||
if validators.lenu64() <= index.uint64:
|
||||
none(CookedPubKey)
|
||||
Opt.none(CookedPubKey)
|
||||
else:
|
||||
some(validators[index.int].pubkey)
|
||||
Opt.some(validators[index.int].pubkey)
|
||||
|
||||
template hash*(header: BeaconBlockHeader): Hash =
|
||||
hash(header.state_root)
|
||||
|
|
|
@ -223,7 +223,7 @@ proc process_light_client_update*(
|
|||
# if the timeout elapses
|
||||
if store.best_valid_update.isNone or
|
||||
is_better_update(update, store.best_valid_update.get):
|
||||
store.best_valid_update = some(update.toFull)
|
||||
store.best_valid_update = Opt.some(update.toFull)
|
||||
didProgress = true
|
||||
|
||||
# Track the maximum number of active participants in the committee signatures
|
||||
|
|
|
@ -191,16 +191,16 @@ func getTargetGossipState*(
|
|||
doAssert len(targetForks) <= 2
|
||||
targetForks
|
||||
|
||||
func nearSyncCommitteePeriod*(epoch: Epoch): Option[uint64] =
|
||||
func nearSyncCommitteePeriod*(epoch: Epoch): Opt[uint64] =
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.3.0-rc.0/specs/altair/validator.md#sync-committee-subnet-stability
|
||||
if epoch.is_sync_committee_period():
|
||||
return some 0'u64
|
||||
return Opt.some 0'u64
|
||||
let epochsBefore =
|
||||
EPOCHS_PER_SYNC_COMMITTEE_PERIOD - epoch.since_sync_committee_period_start()
|
||||
if epoch.is_sync_committee_period() or epochsBefore <= SYNC_COMMITTEE_SUBNET_COUNT:
|
||||
return some epochsBefore
|
||||
return Opt.some epochsBefore
|
||||
|
||||
none(uint64)
|
||||
Opt.none(uint64)
|
||||
|
||||
func getSyncSubnets*(
|
||||
nodeHasPubkey: proc(pubkey: ValidatorPubKey):
|
||||
|
|
|
@ -216,10 +216,9 @@ proc verify_deposit_signature*(preset: RuntimeConfig,
|
|||
# Deposits come with compressed public keys; uncompressing them is expensive.
|
||||
# `blsVerify` fills an internal cache when using `ValidatorPubKey`.
|
||||
# To avoid filling this cache unnecessarily, uncompress explicitly here.
|
||||
let pubkey = deposit.pubkey.load() # Loading the pubkey is slow!
|
||||
if pubkey.isNone:
|
||||
let pubkey = deposit.pubkey.load().valueOr: # Loading the pubkey is slow!
|
||||
return false
|
||||
verify_deposit_signature(preset, deposit, pubkey.get)
|
||||
verify_deposit_signature(preset, deposit, pubkey)
|
||||
|
||||
func compute_voluntary_exit_signing_root*(
|
||||
fork: Fork, genesis_validators_root: Eth2Digest,
|
||||
|
|
|
@ -49,15 +49,6 @@ func `$`*(s: SignatureSet): string =
|
|||
# there is no guarantee that pubkeys and signatures received are valid
|
||||
# unlike when Nimbus did eager loading which ensured they were correct beforehand
|
||||
|
||||
template loadOrExit(signature: ValidatorSig, error: cstring):
|
||||
untyped =
|
||||
## Load a BLS signature from a raw signature
|
||||
## Exits the **caller** with false if the signature is invalid
|
||||
let sig = signature.load()
|
||||
if sig.isNone:
|
||||
return err(error) # this exits the calling scope, as templates are inlined.
|
||||
sig.unsafeGet()
|
||||
|
||||
func init(T: type SignatureSet,
|
||||
pubkey: CookedPubKey, signing_root: Eth2Digest,
|
||||
signature: CookedSig): T =
|
||||
|
@ -81,19 +72,16 @@ proc aggregateAttesters(
|
|||
return err("aggregateAttesters: no attesting indices")
|
||||
|
||||
let
|
||||
firstKey = validatorKeys.load(validatorIndices[0])
|
||||
|
||||
if not firstKey.isSome():
|
||||
return err("aggregateAttesters: invalid attesting index")
|
||||
firstKey = validatorKeys.load(validatorIndices[0]).valueOr:
|
||||
return err("aggregateAttesters: invalid attesting index")
|
||||
|
||||
var attestersAgg{.noinit.}: AggregatePublicKey
|
||||
|
||||
attestersAgg.init(firstKey.get())
|
||||
attestersAgg.init(firstKey)
|
||||
for i in 1 ..< validatorIndices.len:
|
||||
let key = validatorKeys.load(validatorIndices[i])
|
||||
if not key.isSome():
|
||||
let key = validatorKeys.load(validatorIndices[i]).valueOr:
|
||||
return err("aggregateAttesters: invalid attesting index")
|
||||
attestersAgg.aggregate(key.get())
|
||||
attestersAgg.aggregate(key)
|
||||
|
||||
ok(finish(attestersAgg))
|
||||
|
||||
|
@ -114,13 +102,12 @@ proc aggregateAttesters(
|
|||
var inited = false
|
||||
for i in 0..<bits.len:
|
||||
if bits[i]:
|
||||
let key = validatorKeys.load(validatorIndices[i])
|
||||
if not key.isSome():
|
||||
let key = validatorKeys.load(validatorIndices[i]).valueOr:
|
||||
return err("aggregateAttesters: invalid attesting index")
|
||||
if inited:
|
||||
attestersAgg.aggregate(key.get())
|
||||
attestersAgg.aggregate(key)
|
||||
else:
|
||||
attestersAgg = AggregatePublicKey.init(key.get)
|
||||
attestersAgg = AggregatePublicKey.init(key)
|
||||
inited = true
|
||||
|
||||
if not inited:
|
||||
|
@ -249,26 +236,24 @@ proc collectSignatureSets*(
|
|||
|
||||
let
|
||||
proposer_index = signed_block.message.proposer_index
|
||||
proposer_key = validatorKeys.load(proposer_index)
|
||||
if not proposer_key.isSome():
|
||||
return err("collectSignatureSets: invalid proposer index")
|
||||
|
||||
let epoch = signed_block.message.slot.epoch()
|
||||
proposer_key = validatorKeys.load(proposer_index).valueOr:
|
||||
return err("collectSignatureSets: invalid proposer index")
|
||||
epoch = signed_block.message.slot.epoch()
|
||||
|
||||
# 1. Block proposer
|
||||
# ----------------------------------------------------
|
||||
sigs.add block_signature_set(
|
||||
fork, genesis_validators_root,
|
||||
signed_block.message.slot, signed_block.root,
|
||||
proposer_key.get(), signed_block.signature.loadOrExit(
|
||||
"collectSignatureSets: cannot load signature"))
|
||||
proposer_key, signed_block.signature.load.valueOr do:
|
||||
return err("collectSignatureSets: cannot load signature"))
|
||||
|
||||
# 2. Randao Reveal
|
||||
# ----------------------------------------------------
|
||||
sigs.add epoch_signature_set(
|
||||
fork, genesis_validators_root, epoch, proposer_key.get(),
|
||||
signed_block.message.body.randao_reveal.loadOrExit(
|
||||
"collectSignatureSets: cannot load randao"))
|
||||
fork, genesis_validators_root, epoch, proposer_key,
|
||||
signed_block.message.body.randao_reveal.load().valueOr do:
|
||||
return err("collectSignatureSets: cannot load randao"))
|
||||
|
||||
# 3. Proposer slashings
|
||||
# ----------------------------------------------------
|
||||
|
@ -288,27 +273,27 @@ proc collectSignatureSets*(
|
|||
block:
|
||||
let
|
||||
header = slashing.signed_header_1
|
||||
key = validatorKeys.load(header.message.proposer_index)
|
||||
if not key.isSome():
|
||||
return err("collectSignatureSets: invalid slashing proposer index 1")
|
||||
key = validatorKeys.load(header.message.proposer_index).valueOr:
|
||||
return err("collectSignatureSets: invalid slashing proposer index 1")
|
||||
|
||||
sigs.add block_signature_set(
|
||||
fork, genesis_validators_root, header.message.slot, header.message,
|
||||
key.get(), header.signature.loadOrExit(
|
||||
"collectSignatureSets: cannot load proposer slashing 1 signature"))
|
||||
key, header.signature.load().valueOr do:
|
||||
return err(
|
||||
"collectSignatureSets: cannot load proposer slashing 1 signature"))
|
||||
|
||||
# Conflicting block 2
|
||||
block:
|
||||
let
|
||||
header = slashing.signed_header_2
|
||||
key = validatorKeys.load(header.message.proposer_index)
|
||||
if not key.isSome():
|
||||
return err("collectSignatureSets: invalid slashing proposer index 2")
|
||||
key = validatorKeys.load(header.message.proposer_index).valueOr:
|
||||
return err("collectSignatureSets: invalid slashing proposer index 2")
|
||||
|
||||
sigs.add block_signature_set(
|
||||
fork, genesis_validators_root, header.message.slot, header.message,
|
||||
key.get(), header.signature.loadOrExit(
|
||||
"collectSignatureSets: cannot load proposer slashing 2 signature"))
|
||||
key, header.signature.load().valueOr do:
|
||||
return err(
|
||||
"collectSignatureSets: cannot load proposer slashing 2 signature"))
|
||||
|
||||
# 4. Attester slashings
|
||||
# ----------------------------------------------------
|
||||
|
@ -329,7 +314,8 @@ proc collectSignatureSets*(
|
|||
let
|
||||
key = ? aggregateAttesters(
|
||||
slashing.attestation_1.attesting_indices.asSeq(), validatorKeys)
|
||||
sig = slashing.attestation_1.signature.loadOrExit("")
|
||||
sig = slashing.attestation_1.signature.load().valueOr:
|
||||
return err("Invalid attestation slashing signature 1")
|
||||
sigs.add attestation_signature_set(
|
||||
fork, genesis_validators_root, slashing.attestation_1.data, key, sig)
|
||||
|
||||
|
@ -338,7 +324,8 @@ proc collectSignatureSets*(
|
|||
let
|
||||
key = ? aggregateAttesters(
|
||||
slashing.attestation_2.attesting_indices.asSeq(), validatorKeys)
|
||||
sig = slashing.attestation_2.signature.loadOrExit("")
|
||||
sig = slashing.attestation_2.signature.load().valueOr:
|
||||
return err("Invalid attestation slashing signature 2")
|
||||
sigs.add attestation_signature_set(
|
||||
fork, genesis_validators_root, slashing.attestation_2.data, key, sig)
|
||||
|
||||
|
@ -359,7 +346,8 @@ proc collectSignatureSets*(
|
|||
get_attesting_indices(
|
||||
state, attestation.data, attestation.aggregation_bits, cache),
|
||||
validatorKeys)
|
||||
sig = attestation.signature.loadOrExit("")
|
||||
sig = attestation.signature.load().valueOr:
|
||||
return err("Invalid attestation signature")
|
||||
|
||||
sigs.add attestation_signature_set(
|
||||
fork, genesis_validators_root, attestation.data, key, sig)
|
||||
|
@ -375,14 +363,14 @@ proc collectSignatureSets*(
|
|||
# due to https://github.com/nim-lang/Nim/issues/14421
|
||||
# fixed in 1.4.2
|
||||
template volex: untyped = signed_block.message.body.voluntary_exits[i]
|
||||
let key = validatorKeys.load(volex.message.validator_index)
|
||||
if not key.isSome():
|
||||
let key = validatorKeys.load(volex.message.validator_index).valueOr:
|
||||
return err("collectSignatureSets: invalid voluntary exit")
|
||||
|
||||
sigs.add voluntary_exit_signature_set(
|
||||
fork, genesis_validators_root, volex.message, key.get(),
|
||||
volex.signature.loadOrExit(
|
||||
"collectSignatureSets: cannot load voluntary exit signature"))
|
||||
fork, genesis_validators_root, volex.message, key,
|
||||
volex.signature.load.valueOr do:
|
||||
return err(
|
||||
"collectSignatureSets: cannot load voluntary exit signature"))
|
||||
|
||||
block:
|
||||
when signed_block is phase0.SignedBeaconBlock:
|
||||
|
@ -409,8 +397,8 @@ proc collectSignatureSets*(
|
|||
sigs.add sync_committee_message_signature_set(
|
||||
fork, genesis_validators_root, previous_slot, beacon_block_root,
|
||||
pubkey,
|
||||
signed_block.message.body.sync_aggregate.sync_committee_signature.loadOrExit(
|
||||
"collectSignatureSets: cannot load signature"))
|
||||
signed_block.message.body.sync_aggregate.sync_committee_signature.load().valueOr do:
|
||||
return err("collectSignatureSets: cannot load signature"))
|
||||
|
||||
ok()
|
||||
|
||||
|
|
|
@ -49,11 +49,10 @@ func process_block_header*(
|
|||
return err("process_block_header: block not newer than latest block header")
|
||||
|
||||
# Verify that proposer index is the correct index
|
||||
let proposer_index = get_beacon_proposer_index(state, cache)
|
||||
if proposer_index.isNone:
|
||||
let proposer_index = get_beacon_proposer_index(state, cache).valueOr:
|
||||
return err("process_block_header: proposer missing")
|
||||
|
||||
if not (blck.proposer_index == proposer_index.get):
|
||||
if not (blck.proposer_index == proposer_index):
|
||||
return err("process_block_header: proposer index incorrect")
|
||||
|
||||
# Verify that the parent matches
|
||||
|
@ -84,16 +83,14 @@ proc process_randao(
|
|||
state: var ForkyBeaconState, body: SomeForkyBeaconBlockBody, flags: UpdateFlags,
|
||||
cache: var StateCache): Result[void, cstring] =
|
||||
let
|
||||
proposer_index = get_beacon_proposer_index(state, cache)
|
||||
|
||||
if proposer_index.isNone:
|
||||
return err("process_randao: proposer index missing, probably along with any active validators")
|
||||
proposer_index = get_beacon_proposer_index(state, cache).valueOr:
|
||||
return err("process_randao: proposer index missing, probably along with any active validators")
|
||||
|
||||
# Verify RANDAO reveal
|
||||
let epoch = state.get_current_epoch()
|
||||
|
||||
if skipBlsValidation notin flags and body.randao_reveal isnot TrustedSig:
|
||||
let proposer_pubkey = state.validators.item(proposer_index.get).pubkey
|
||||
let proposer_pubkey = state.validators.item(proposer_index).pubkey
|
||||
|
||||
# `state_transition.makeBeaconBlock` ensures this is run with a trusted
|
||||
# signature, but unless the full skipBlsValidation is specified, RANDAO
|
||||
|
@ -512,11 +509,9 @@ proc process_sync_aggregate*(
|
|||
let
|
||||
participant_reward = get_participant_reward(total_active_balance)
|
||||
proposer_reward = state_transition_block.get_proposer_reward(participant_reward)
|
||||
proposer_index = get_beacon_proposer_index(state, cache)
|
||||
|
||||
if proposer_index.isNone:
|
||||
# We're processing a block, so this can't happen, in theory (!)
|
||||
return err("process_sync_aggregate: no proposer")
|
||||
proposer_index = get_beacon_proposer_index(state, cache).valueOr:
|
||||
# We're processing a block, so this can't happen, in theory (!)
|
||||
return err("process_sync_aggregate: no proposer")
|
||||
|
||||
# Apply participant and proposer rewards
|
||||
let indices = get_sync_committee_cache(state, cache).current_sync_committee
|
||||
|
@ -528,7 +523,7 @@ proc process_sync_aggregate*(
|
|||
let participant_index = indices[i]
|
||||
if sync_aggregate.sync_committee_bits[i]:
|
||||
increase_balance(state, participant_index, participant_reward)
|
||||
increase_balance(state, proposer_index.get, proposer_reward)
|
||||
increase_balance(state, proposer_index, proposer_reward)
|
||||
else:
|
||||
decrease_balance(state, participant_index, participant_reward)
|
||||
|
||||
|
|
|
@ -93,7 +93,7 @@ func process_attestation(
|
|||
# Collect information about the attestation
|
||||
var
|
||||
flags: set[RewardFlags]
|
||||
is_previous_epoch_attester: Option[InclusionInfo]
|
||||
is_previous_epoch_attester: Opt[InclusionInfo]
|
||||
|
||||
if a.data.target.epoch == state.get_current_epoch():
|
||||
flags.incl RewardFlags.isCurrentEpochAttester
|
||||
|
@ -102,7 +102,7 @@ func process_attestation(
|
|||
flags.incl RewardFlags.isCurrentEpochTargetAttester
|
||||
|
||||
elif a.data.target.epoch == state.get_previous_epoch():
|
||||
is_previous_epoch_attester = some(InclusionInfo(
|
||||
is_previous_epoch_attester = Opt.some(InclusionInfo(
|
||||
delay: a.inclusion_delay,
|
||||
proposer_index: a.proposer_index,
|
||||
))
|
||||
|
@ -551,7 +551,7 @@ func get_head_delta*(validator: RewardStatus,
|
|||
|
||||
func get_inclusion_delay_delta*(validator: RewardStatus,
|
||||
base_reward: uint64):
|
||||
(RewardDelta, Option[(uint64, RewardDelta)]) =
|
||||
(RewardDelta, Opt[(uint64, RewardDelta)]) =
|
||||
## Return proposer and inclusion delay micro-rewards/penalties for each validator.
|
||||
if validator.is_previous_epoch_attester.isSome() and ((not validator.flags.contains(RewardFlags.isSlashed))):
|
||||
let
|
||||
|
@ -563,7 +563,7 @@ func get_inclusion_delay_delta*(validator: RewardStatus,
|
|||
max_attester_reward = base_reward - proposer_reward
|
||||
delta = RewardDelta(rewards: max_attester_reward div inclusion_info.delay)
|
||||
proposer_index = inclusion_info.proposer_index;
|
||||
return (delta, some((proposer_index, proposer_delta)))
|
||||
return (delta, Opt.some((proposer_index, proposer_delta)))
|
||||
|
||||
func get_inactivity_penalty_delta*(validator: RewardStatus,
|
||||
base_reward: Gwei,
|
||||
|
|
|
@ -332,12 +332,12 @@ func compute_shuffled_index*(
|
|||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.3.0-rc.0/specs/phase0/beacon-chain.md#compute_proposer_index
|
||||
func compute_proposer_index(state: ForkyBeaconState,
|
||||
indices: seq[ValidatorIndex], seed: Eth2Digest): Option[ValidatorIndex] =
|
||||
indices: seq[ValidatorIndex], seed: Eth2Digest): Opt[ValidatorIndex] =
|
||||
## Return from ``indices`` a random index sampled by effective balance.
|
||||
const MAX_RANDOM_BYTE = 255
|
||||
|
||||
if len(indices) == 0:
|
||||
return none(ValidatorIndex)
|
||||
return Opt.none(ValidatorIndex)
|
||||
|
||||
let seq_len = indices.lenu64
|
||||
|
||||
|
@ -354,20 +354,20 @@ func compute_proposer_index(state: ForkyBeaconState,
|
|||
effective_balance = state.validators[candidate_index].effective_balance
|
||||
if effective_balance * MAX_RANDOM_BYTE >=
|
||||
MAX_EFFECTIVE_BALANCE * random_byte:
|
||||
return some(candidate_index)
|
||||
return Opt.some(candidate_index)
|
||||
i += 1
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.3.0-rc.0/specs/phase0/beacon-chain.md#get_beacon_proposer_index
|
||||
func get_beacon_proposer_index*(
|
||||
state: ForkyBeaconState, cache: var StateCache, slot: Slot):
|
||||
Option[ValidatorIndex] =
|
||||
Opt[ValidatorIndex] =
|
||||
let epoch = get_current_epoch(state)
|
||||
|
||||
if slot.epoch() != epoch:
|
||||
# compute_proposer_index depends on `effective_balance`, therefore the
|
||||
# beacon proposer index can only be computed for the "current" epoch:
|
||||
# https://github.com/ethereum/consensus-specs/pull/772#issuecomment-475574357
|
||||
return none(ValidatorIndex)
|
||||
return Opt.none(ValidatorIndex)
|
||||
|
||||
cache.beacon_proposer_indices.withValue(slot, proposer) do:
|
||||
return proposer[]
|
||||
|
@ -384,7 +384,7 @@ func get_beacon_proposer_index*(
|
|||
# quite a while
|
||||
indices = get_active_validator_indices(state, epoch)
|
||||
|
||||
var res: Option[ValidatorIndex]
|
||||
var res: Opt[ValidatorIndex]
|
||||
for epoch_slot in epoch.slots():
|
||||
buffer[32..39] = uint_to_bytes(epoch_slot.asUInt64)
|
||||
let seed = eth2digest(buffer)
|
||||
|
@ -397,12 +397,12 @@ func get_beacon_proposer_index*(
|
|||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.3.0-rc.0/specs/phase0/beacon-chain.md#get_beacon_proposer_index
|
||||
func get_beacon_proposer_index*(state: ForkyBeaconState, cache: var StateCache):
|
||||
Option[ValidatorIndex] =
|
||||
Opt[ValidatorIndex] =
|
||||
get_beacon_proposer_index(state, cache, state.slot)
|
||||
|
||||
func get_beacon_proposer_index*(state: ForkedHashedBeaconState,
|
||||
cache: var StateCache, slot: Slot):
|
||||
Option[ValidatorIndex] =
|
||||
Opt[ValidatorIndex] =
|
||||
withState(state):
|
||||
get_beacon_proposer_index(forkyState.data, cache, slot)
|
||||
|
||||
|
|
|
@ -149,12 +149,9 @@ func init*(T: type KeystoreData,
|
|||
|
||||
func init*(T: type KeystoreData, keystore: RemoteKeystore,
|
||||
handle: FileLockHandle): Result[T, cstring] {.raises: [Defect].} =
|
||||
let cookedKey =
|
||||
block:
|
||||
let res = keystore.pubkey.load()
|
||||
if res.isNone():
|
||||
let cookedKey = keystore.pubkey.load().valueOr:
|
||||
return err("Invalid validator's public key")
|
||||
res.get()
|
||||
|
||||
ok(KeystoreData(
|
||||
kind: KeystoreKind.Remote,
|
||||
handle: handle,
|
||||
|
@ -670,13 +667,12 @@ iterator listLoadableKeys*(validatorsDir, secretsDir: string,
|
|||
continue
|
||||
let publicKey = kres.get()
|
||||
|
||||
let cres = publicKey.load()
|
||||
if cres.isNone():
|
||||
let cres = publicKey.load().valueOr:
|
||||
# Skip folders which has invalid ValidatorPubKey
|
||||
# (point is not on curve).
|
||||
continue
|
||||
|
||||
yield cres.get()
|
||||
yield cres
|
||||
|
||||
except OSError as err:
|
||||
error "Validator keystores directory not accessible",
|
||||
|
@ -1208,14 +1204,10 @@ proc importKeystore*(pool: var ValidatorPool,
|
|||
keystoreFile = keystoreDir / RemoteKeystoreFileName
|
||||
|
||||
# We check `publicKey`.
|
||||
let cookedKey =
|
||||
block:
|
||||
let res = publicKey.load()
|
||||
if res.isNone():
|
||||
let cookedKey = publicKey.load().valueOr:
|
||||
return err(
|
||||
AddValidatorFailure.init(AddValidatorStatus.failed,
|
||||
"Invalid validator's public key"))
|
||||
res.get()
|
||||
|
||||
# We check `publicKey` in memory storage first.
|
||||
if publicKey in pool:
|
||||
|
|
|
@ -137,11 +137,9 @@ proc getAttachedValidator(node: BeaconNode,
|
|||
proc getValidatorForDuties*(
|
||||
node: BeaconNode,
|
||||
idx: ValidatorIndex, slot: Slot): Opt[AttachedValidator] =
|
||||
let key = node.dag.validatorKey(idx)
|
||||
if key.isNone:
|
||||
return Opt.none(AttachedValidator)
|
||||
let key = ? node.dag.validatorKey(idx)
|
||||
|
||||
node.attachedValidators[].getValidatorForDuties(key.get().toPubKey(), slot)
|
||||
node.attachedValidators[].getValidatorForDuties(key.toPubKey(), slot)
|
||||
|
||||
proc isSynced*(node: BeaconNode, head: BlockRef): SyncStatus =
|
||||
## TODO This function is here as a placeholder for some better heurestics to
|
||||
|
@ -1122,20 +1120,18 @@ proc handleProposal(node: BeaconNode, head: BlockRef, slot: Slot):
|
|||
## that is supposed to do so, given the shuffling at that slot for the given
|
||||
## head - to compute the proposer, we need to advance a state to the given
|
||||
## slot
|
||||
let proposer = node.dag.getProposer(head, slot)
|
||||
if proposer.isNone():
|
||||
return head
|
||||
|
||||
let
|
||||
proposerKey = node.dag.validatorKey(proposer.get()).get().toPubKey
|
||||
validator = node.getValidatorForDuties(proposer.get(), slot).valueOr:
|
||||
proposer = node.dag.getProposer(head, slot).valueOr:
|
||||
return head
|
||||
proposerKey = node.dag.validatorKey(proposer).get().toPubKey
|
||||
validator = node.getValidatorForDuties(proposer, slot).valueOr:
|
||||
debug "Expecting block proposal", headRoot = shortLog(head.root),
|
||||
slot = shortLog(slot),
|
||||
proposer_index = proposer.get(),
|
||||
proposer_index = proposer,
|
||||
proposer = shortLog(proposerKey)
|
||||
return head
|
||||
|
||||
return await proposeBlock(node, validator, proposer.get(), head, slot)
|
||||
return await proposeBlock(node, validator, proposer, head, slot)
|
||||
|
||||
proc signAndSendAggregate(
|
||||
node: BeaconNode, validator: AttachedValidator, shufflingRef: ShufflingRef,
|
||||
|
|
|
@ -143,7 +143,7 @@ func initialize_light_client_store(state: auto): LightClientStore =
|
|||
finalized_header: BeaconBlockHeader(),
|
||||
current_sync_committee: state.current_sync_committee,
|
||||
next_sync_committee: state.next_sync_committee,
|
||||
best_valid_update: none(altair.LightClientUpdate),
|
||||
best_valid_update: Opt.none(altair.LightClientUpdate),
|
||||
optimistic_header: BeaconBlockHeader(),
|
||||
previous_max_active_participants: 0,
|
||||
current_max_active_participants: 0,
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
import
|
||||
# Standard library
|
||||
os, strutils, streams, strformat,
|
||||
macros, sets,
|
||||
macros,
|
||||
# Third-party
|
||||
yaml,
|
||||
# Beacon chain internals
|
||||
|
|
|
@ -155,8 +155,8 @@ proc addTestBlockAux[EP: bellatrix.ExecutionPayload | capella.ExecutionPayload](
|
|||
|
||||
let
|
||||
proposer_index = get_beacon_proposer_index(
|
||||
state, cache, getStateField(state, slot))
|
||||
privKey = MockPrivKeys[proposer_index.get]
|
||||
state, cache, getStateField(state, slot)).expect("valid proposer index")
|
||||
privKey = MockPrivKeys[proposer_index]
|
||||
randao_reveal =
|
||||
if skipBlsValidation notin flags:
|
||||
get_epoch_signature(
|
||||
|
@ -189,7 +189,7 @@ proc addTestBlockAux[EP: bellatrix.ExecutionPayload | capella.ExecutionPayload](
|
|||
message = makeBeaconBlock(
|
||||
cfg,
|
||||
state,
|
||||
proposer_index.get(),
|
||||
proposer_index,
|
||||
randao_reveal,
|
||||
# Keep deposit counts internally consistent.
|
||||
Eth1Data(
|
||||
|
|
Loading…
Reference in New Issue