mirror of
https://github.com/status-im/nimbus-eth2.git
synced 2025-01-23 04:50:59 +00:00
write uncompressed validator keys to database (#2639)
* write uncompressed validator keys to database Loading 150k+ validator keys on startup in compressed format takes a lot of time - better store them in uncompressed format which makes behaviour just after startup faster / more predictable. * refactor cached validator key access * fix isomorphic cast to work with non-var instances * remove cooked pubkey cache - directly use database cache in chaindag as well (one less cache to keep in sync) * bump blscurve, introduce loadValid for known-to-be-valid keys
This commit is contained in:
parent
5974fb0e7d
commit
d859bc12f0
@ -17,6 +17,8 @@ import
|
||||
./ssz/[ssz_serialization, merkleization],
|
||||
./filepath
|
||||
|
||||
export crypto
|
||||
|
||||
logScope: topics = "bc_db"
|
||||
|
||||
type
|
||||
@ -26,7 +28,6 @@ type
|
||||
recordCount: int64
|
||||
|
||||
DepositsSeq = DbSeq[DepositData]
|
||||
ImmutableValidatorsSeq = DbSeq[ImmutableValidatorData]
|
||||
|
||||
DepositContractSnapshot* = object
|
||||
eth1Block*: Eth2Digest
|
||||
@ -79,10 +80,10 @@ type
|
||||
preset*: RuntimePreset
|
||||
genesisDeposits*: DepositsSeq
|
||||
|
||||
# ImmutableValidatorsSeq only stores the total count; it's a proxy for SQL
|
||||
# immutableValidatorsDb only stores the total count; it's a proxy for SQL
|
||||
# queries.
|
||||
immutableValidators*: ImmutableValidatorsSeq
|
||||
immutableValidatorsMem*: seq[ImmutableValidatorData]
|
||||
immutableValidatorsDb*: DbSeq[ImmutableValidatorData2]
|
||||
immutableValidators*: seq[ImmutableValidatorData2]
|
||||
|
||||
checkpoint*: proc() {.gcsafe, raises: [Defect].}
|
||||
|
||||
@ -255,6 +256,10 @@ proc loadImmutableValidators(vals: DbSeq[ImmutableValidatorData]): seq[Immutable
|
||||
for i in 0 ..< vals.len:
|
||||
result.add vals.get(i)
|
||||
|
||||
proc loadImmutableValidators(vals: DbSeq[ImmutableValidatorData2]): seq[ImmutableValidatorData2] =
|
||||
for i in 0 ..< vals.len:
|
||||
result.add vals.get(i)
|
||||
|
||||
proc new*(T: type BeaconChainDB,
|
||||
preset: RuntimePreset,
|
||||
dir: string,
|
||||
@ -289,8 +294,8 @@ proc new*(T: type BeaconChainDB,
|
||||
|
||||
genesisDepositsSeq =
|
||||
DbSeq[DepositData].init(db, "genesis_deposits").expectDb()
|
||||
immutableValidatorsSeq =
|
||||
DbSeq[ImmutableValidatorData].init(db, "immutable_validators").expectDb()
|
||||
immutableValidatorsDb =
|
||||
DbSeq[ImmutableValidatorData2].init(db, "immutable_validators2").expectDb()
|
||||
|
||||
# V1 - expected-to-be small rows get without rowid optimizations
|
||||
keyValues = kvStore db.openKvStore("key_values", true).expectDb()
|
||||
@ -300,6 +305,24 @@ proc new*(T: type BeaconChainDB,
|
||||
stateDiffs = kvStore db.openKvStore("state_diffs").expectDb()
|
||||
summaries = kvStore db.openKvStore("beacon_block_summaries", true).expectDb()
|
||||
|
||||
# `immutable_validators` stores validator keys in compressed format - this is
|
||||
# slow to load and has been superceded by `immutable_validators2` which uses
|
||||
# uncompressed keys instead. The migration is lossless but the old table
|
||||
# should not be removed until after altair, to permit downgrades.
|
||||
let immutableValidatorsDb1 =
|
||||
DbSeq[ImmutableValidatorData].init(db, "immutable_validators").expectDb()
|
||||
|
||||
if immutableValidatorsDb.len() < immutableValidatorsDb1.len():
|
||||
notice "Migrating validator keys, this may take a minute",
|
||||
len = immutableValidatorsDb1.len()
|
||||
while immutableValidatorsDb.len() < immutableValidatorsDb1.len():
|
||||
let val = immutableValidatorsDb1.get(immutableValidatorsDb.len())
|
||||
immutableValidatorsDb.add(ImmutableValidatorData2(
|
||||
pubkey: val.pubkey.loadValid().toUncompressed(),
|
||||
withdrawal_credentials: val.withdrawal_credentials
|
||||
))
|
||||
immutableValidatorsDb1.close()
|
||||
|
||||
T(
|
||||
db: db,
|
||||
v0: BeaconChainDBV0(
|
||||
@ -308,8 +331,8 @@ proc new*(T: type BeaconChainDB,
|
||||
),
|
||||
preset: preset,
|
||||
genesisDeposits: genesisDepositsSeq,
|
||||
immutableValidators: immutableValidatorsSeq,
|
||||
immutableValidatorsMem: loadImmutableValidators(immutableValidatorsSeq),
|
||||
immutableValidatorsDb: immutableValidatorsDb,
|
||||
immutableValidators: loadImmutableValidators(immutableValidatorsDb),
|
||||
checkpoint: proc() = db.checkpoint(),
|
||||
keyValues: keyValues,
|
||||
blocks: blocks,
|
||||
@ -429,7 +452,7 @@ proc close*(db: BeaconchainDB) =
|
||||
discard db.stateRoots.close()
|
||||
discard db.blocks.close()
|
||||
discard db.keyValues.close()
|
||||
db.immutableValidators.close()
|
||||
db.immutableValidatorsDb.close()
|
||||
db.genesisDeposits.close()
|
||||
db.v0.close()
|
||||
db.db.close()
|
||||
@ -451,32 +474,24 @@ proc putBlock*(db: BeaconChainDB, value: TrustedSignedBeaconBlock) =
|
||||
db.blocks.putSnappySSZ(value.root.data, value)
|
||||
db.putBeaconBlockSummary(value.root, value.message.toBeaconBlockSummary())
|
||||
|
||||
proc updateImmutableValidators(
|
||||
db: BeaconChainDB, immutableValidators: var seq[ImmutableValidatorData],
|
||||
validators: auto) =
|
||||
let
|
||||
numValidators = validators.lenu64
|
||||
origNumImmutableValidators = immutableValidators.lenu64
|
||||
proc updateImmutableValidators*(
|
||||
db: BeaconChainDB, validators: openArray[Validator]) =
|
||||
# Must be called before storing a state that references the new validators
|
||||
let numValidators = validators.len
|
||||
|
||||
doAssert immutableValidators.len == db.immutableValidators.len
|
||||
|
||||
if numValidators <= origNumImmutableValidators:
|
||||
return
|
||||
|
||||
for validatorIndex in origNumImmutableValidators ..< numValidators:
|
||||
# This precedes state storage
|
||||
while db.immutableValidators.len() < numValidators:
|
||||
let immutableValidator =
|
||||
getImmutableValidatorData(validators[validatorIndex])
|
||||
getImmutableValidatorData(validators[db.immutableValidators.len()])
|
||||
db.immutableValidatorsDb.add immutableValidator
|
||||
db.immutableValidators.add immutableValidator
|
||||
immutableValidators.add immutableValidator
|
||||
|
||||
proc putState*(db: BeaconChainDB, key: Eth2Digest, value: var BeaconState) =
|
||||
db.updateImmutableValidators(db.immutableValidatorsMem, value.validators)
|
||||
proc putState*(db: BeaconChainDB, key: Eth2Digest, value: BeaconState) =
|
||||
db.updateImmutableValidators(value.validators.asSeq())
|
||||
db.statesNoVal.putSnappySSZ(
|
||||
key.data,
|
||||
isomorphicCast[BeaconStateNoImmutableValidators](value))
|
||||
|
||||
proc putState*(db: BeaconChainDB, value: var BeaconState) =
|
||||
proc putState*(db: BeaconChainDB, value: BeaconState) =
|
||||
db.putState(hash_tree_root(value), value)
|
||||
|
||||
func stateRootKey(root: Eth2Digest, slot: Slot): array[40, byte] =
|
||||
@ -539,7 +554,7 @@ proc getBlock*(db: BeaconChainDB, key: Eth2Digest): Opt[TrustedSignedBeaconBlock
|
||||
result.get().root = key
|
||||
|
||||
proc getStateOnlyMutableValidators(
|
||||
immutableValidatorsMem: openArray[ImmutableValidatorData],
|
||||
immutableValidators: openArray[ImmutableValidatorData2],
|
||||
store: KvStoreRef, key: openArray[byte], output: var BeaconState,
|
||||
rollback: RollbackProc): bool =
|
||||
## Load state into `output` - BeaconState is large so we want to avoid
|
||||
@ -557,16 +572,19 @@ proc getStateOnlyMutableValidators(
|
||||
key, isomorphicCast[BeaconStateNoImmutableValidators](output))
|
||||
of GetResult.found:
|
||||
let numValidators = output.validators.len
|
||||
doAssert immutableValidatorsMem.len >= numValidators
|
||||
doAssert immutableValidators.len >= numValidators
|
||||
|
||||
for i in 0 ..< numValidators:
|
||||
let
|
||||
# Bypass hash cache invalidation
|
||||
dstValidator = addr output.validators.data[i]
|
||||
|
||||
assign(dstValidator.pubkey, immutableValidatorsMem[i].pubkey)
|
||||
assign(dstValidator.withdrawal_credentials,
|
||||
immutableValidatorsMem[i].withdrawal_credentials)
|
||||
assign(
|
||||
dstValidator.pubkey,
|
||||
immutableValidators[i].pubkey.loadValid().toPubKey())
|
||||
assign(
|
||||
dstValidator.withdrawal_credentials,
|
||||
immutableValidators[i].withdrawal_credentials)
|
||||
|
||||
output.validators.resetCache()
|
||||
|
||||
@ -579,7 +597,7 @@ proc getStateOnlyMutableValidators(
|
||||
|
||||
proc getState(
|
||||
db: BeaconChainDBV0,
|
||||
immutableValidatorsMem: openArray[ImmutableValidatorData],
|
||||
immutableValidators: openArray[ImmutableValidatorData2],
|
||||
key: Eth2Digest, output: var BeaconState,
|
||||
rollback: RollbackProc): bool =
|
||||
# Nimbus 1.0 reads and writes writes genesis BeaconState to `backend`
|
||||
@ -589,11 +607,11 @@ proc getState(
|
||||
# and reads BeaconState from `backend` and BeaconStateNoImmutableValidators
|
||||
# from `stateStore`. We will try to read the state from all these locations.
|
||||
if getStateOnlyMutableValidators(
|
||||
immutableValidatorsMem, db.stateStore,
|
||||
immutableValidators, db.stateStore,
|
||||
subkey(BeaconStateNoImmutableValidators, key), output, rollback):
|
||||
return true
|
||||
if getStateOnlyMutableValidators(
|
||||
immutableValidatorsMem, db.backend,
|
||||
immutableValidators, db.backend,
|
||||
subkey(BeaconStateNoImmutableValidators, key), output, rollback):
|
||||
return true
|
||||
|
||||
@ -620,8 +638,8 @@ proc getState*(
|
||||
# TODO RVO is inefficient for large objects:
|
||||
# https://github.com/nim-lang/Nim/issues/13879
|
||||
if not getStateOnlyMutableValidators(
|
||||
db.immutableValidatorsMem, db.statesNoVal, key.data, output, rollback):
|
||||
db.v0.getState(db.immutableValidatorsMem, key, output, rollback)
|
||||
db.immutableValidators, db.statesNoVal, key.data, output, rollback):
|
||||
db.v0.getState(db.immutableValidators, key, output, rollback)
|
||||
else:
|
||||
true
|
||||
|
||||
|
@ -77,7 +77,7 @@ func getSizeofSig(x: auto, n: int = 0): seq[(string, int, int)] =
|
||||
result.add getSizeofSig(value, n + 1)
|
||||
result.add((name, sizeof(value), n))
|
||||
|
||||
template isomorphicCast*[T, U](x: var U): T =
|
||||
template isomorphicCast*[T, U](x: U): T =
|
||||
# Each of these pairs of types has ABI-compatible memory representations, so
|
||||
# that the SSZ serialization can read and write directly from an object with
|
||||
# only mutable portions of BeaconState into a full BeaconState without using
|
||||
@ -85,4 +85,4 @@ template isomorphicCast*[T, U](x: var U): T =
|
||||
static:
|
||||
doAssert sizeof(T) == sizeof(U)
|
||||
doAssert getSizeofSig(T()) == getSizeofSig(U())
|
||||
cast[ref T](addr x)[]
|
||||
cast[ptr T](unsafeAddr x)[]
|
||||
|
@ -250,7 +250,7 @@ proc addRawBlockKnownParent(
|
||||
|
||||
var sigs: seq[SignatureSet]
|
||||
if sigs.collectSignatureSets(
|
||||
signedBlock, dag.validatorKeys, dag.clearanceState, cache).isErr():
|
||||
signedBlock, dag.db.immutableValidators, dag.clearanceState, cache).isErr():
|
||||
# A PublicKey or Signature isn't on the BLS12-381 curve
|
||||
return err((ValidationResult.Reject, Invalid))
|
||||
if not quarantine.batchVerify(sigs):
|
||||
|
@ -159,11 +159,6 @@ type
|
||||
## block - we limit the number of held EpochRefs to put a cap on
|
||||
## memory usage
|
||||
|
||||
validatorKeys*: seq[CookedPubKey] ##\
|
||||
## The deposit scheme in eth2 guarantees that validators are added in the
|
||||
## same order regardless of which fork the chain takes - there may be more
|
||||
## keys in the cache than there are validators in the head state however!
|
||||
|
||||
EpochRef* = ref object
|
||||
dag*: ChainDAGRef
|
||||
epoch*: Epoch
|
||||
@ -198,8 +193,6 @@ type
|
||||
blckRef: BlockRef, blck: TrustedSignedBeaconBlock,
|
||||
epochRef: EpochRef, state: HashedBeaconState) {.gcsafe, raises: [Defect].}
|
||||
|
||||
template validator_keys*(e: EpochRef): seq[CookedPubKey] = e.dag.validatorKeys
|
||||
|
||||
template head*(dag: ChainDagRef): BlockRef = dag.headState.blck
|
||||
|
||||
func shortLog*(v: BlockSlot): string =
|
||||
|
@ -113,20 +113,28 @@ func get_effective_balances(validators: openArray[Validator], epoch: Epoch):
|
||||
result[i] = validator[].effective_balance
|
||||
|
||||
proc updateValidatorKeys*(dag: ChainDAGRef, validators: openArray[Validator]) =
|
||||
# Update validator key cache - must be called every time a state is loaded
|
||||
# from database or a block is applied successfully (from anywhere).
|
||||
# The validator key indexing is shared across all histories, and grows when a
|
||||
# validated block is added.
|
||||
while dag.validatorKeys.len() < validators.len():
|
||||
let key = validators[dag.validatorKeys.len()].pubkey.load()
|
||||
if not key.isSome():
|
||||
# State keys are verified when deposit is processed - a state should never
|
||||
# contain invalid keys
|
||||
fatal "Invalid pubkey in state",
|
||||
validator_index = dag.validatorKeys.len(),
|
||||
raw = toHex(validators[dag.validatorKeys.len()].pubkey.toRaw())
|
||||
quit 1
|
||||
dag.validatorKeys.add(key.get())
|
||||
# Update validator key cache - must be called every time a valid block is
|
||||
# applied to the state - this is important to ensure that when we sync blocks
|
||||
# without storing a state (non-epoch blocks essentially), the deposits from
|
||||
# those blocks are persisted to the in-database cache of immutable validator
|
||||
# data (but no earlier than that the whole block as been validated)
|
||||
dag.db.updateImmutableValidators(validators)
|
||||
|
||||
func validatorKey*(
|
||||
dag: ChainDAGRef, index: ValidatorIndex or uint64): Option[CookedPubKey] =
|
||||
## Returns the validator pubkey for the index, assuming it's been observed
|
||||
## at any point in time - this function may return pubkeys for indicies that
|
||||
## are not (yet) part of the head state (if the key has been observed on a
|
||||
## non-head branch)!
|
||||
dag.db.immutableValidators.load(index)
|
||||
|
||||
func validatorKey*(
|
||||
epochRef: EpochRef, index: ValidatorIndex or uint64): Option[CookedPubKey] =
|
||||
## Returns the validator pubkey for the index, assuming it's been observed
|
||||
## at any point in time - this function may return pubkeys for indicies that
|
||||
## are not (yet) part of the head state (if the key has been observed on a
|
||||
## non-head branch)!
|
||||
epochRef.dag.validatorKey(index)
|
||||
|
||||
func init*(
|
||||
T: type EpochRef, dag: ChainDAGRef, state: StateData,
|
||||
@ -414,8 +422,6 @@ proc init*(T: type ChainDAGRef,
|
||||
|
||||
doAssert dag.updateFlags in [{}, {verifyFinalization}]
|
||||
|
||||
dag.updateValidatorKeys(getStateField(dag.headState, validators).asSeq())
|
||||
|
||||
var cache: StateCache
|
||||
dag.updateStateData(dag.headState, headRef.atSlot(headRef.slot), false, cache)
|
||||
# We presently save states on the epoch boundary - it means that the latest
|
||||
@ -512,11 +518,6 @@ proc getState(
|
||||
state.blck = blck
|
||||
state.data.root = stateRoot
|
||||
|
||||
# In case a newer state is loaded from database than we previously knew - this
|
||||
# is in theory possible if the head state we load on init is older than
|
||||
# some other random known state in the database
|
||||
dag.updateValidatorKeys(getStateField(state, validators).asSeq())
|
||||
|
||||
true
|
||||
|
||||
func stateCheckpoint*(bs: BlockSlot): BlockSlot =
|
||||
@ -692,9 +693,6 @@ proc applyBlock(
|
||||
if ok:
|
||||
state.blck = blck.refs
|
||||
|
||||
# New validators might have been added by block (on startup for example)
|
||||
dag.updateValidatorKeys(getStateField(state, validators).asSeq())
|
||||
|
||||
ok
|
||||
|
||||
proc updateStateData*(
|
||||
@ -1146,12 +1144,12 @@ proc getProposer*(
|
||||
|
||||
let proposer = epochRef.beacon_proposers[slotInEpoch]
|
||||
if proposer.isSome():
|
||||
if proposer.get().uint64 >= dag.validatorKeys.lenu64():
|
||||
if proposer.get().uint64 >= dag.db.immutableValidators.lenu64():
|
||||
# Sanity check - it should never happen that the key cache doesn't contain
|
||||
# a key for the selected proposer - that would mean that we somehow
|
||||
# created validators in the state without updating the cache!
|
||||
warn "Proposer key not found",
|
||||
keys = dag.validatorKeys.lenu64(), proposer = proposer.get()
|
||||
keys = dag.db.immutableValidators.lenu64(), proposer = proposer.get()
|
||||
return none(ValidatorIndex)
|
||||
|
||||
proposer
|
||||
|
@ -132,7 +132,7 @@ proc is_valid_indexed_attestation*(
|
||||
pubkeys = newSeqOfCap[CookedPubKey](sigs)
|
||||
for index in get_attesting_indices(
|
||||
epochRef, attestation.data, attestation.aggregation_bits):
|
||||
pubkeys.add(epochRef.validator_keys[index])
|
||||
pubkeys.add(epochRef.validatorKey(index).get())
|
||||
|
||||
if not verify_attestation_signature(
|
||||
fork, genesis_validators_root, attestation.data,
|
||||
|
@ -286,14 +286,16 @@ proc scheduleAggregateChecks*(
|
||||
|
||||
# Enqueue in the buffer
|
||||
# ------------------------------------------------------
|
||||
let aggregator = epochRef.validator_keys[aggregate_and_proof.aggregator_index]
|
||||
let aggregator = epochRef.validatorKey(aggregate_and_proof.aggregator_index)
|
||||
if not aggregator.isSome():
|
||||
return err("scheduleAggregateChecks: invalid aggregator index")
|
||||
block:
|
||||
if (let v = batch
|
||||
.pendingBuffer
|
||||
.addSlotSignature(
|
||||
fork, genesis_validators_root,
|
||||
aggregate.data.slot,
|
||||
aggregator,
|
||||
aggregator.get(),
|
||||
aggregate_and_proof.selection_proof
|
||||
); v.isErr()):
|
||||
return err(v.error())
|
||||
@ -309,7 +311,7 @@ proc scheduleAggregateChecks*(
|
||||
.addAggregateAndProofSignature(
|
||||
fork, genesis_validators_root,
|
||||
aggregate_and_proof,
|
||||
aggregator,
|
||||
aggregator.get(),
|
||||
signed_aggregate_and_proof.signature
|
||||
); v.isErr()):
|
||||
batchCrypto.scheduleBatch(fresh)
|
||||
|
@ -173,7 +173,7 @@ proc checkForPotentialDoppelganger(
|
||||
let epochRef = self.dag.getEpochRef(
|
||||
tgtBlck, attestation.data.target.epoch)
|
||||
for validatorIndex in attesterIndices:
|
||||
let validatorPubkey = epochRef.validator_keys[validatorIndex].toPubKey()
|
||||
let validatorPubkey = epochRef.validatorKey(validatorIndex).get().toPubKey()
|
||||
if self.doppelgangerDetectionEnabled and
|
||||
self.validatorPool[].getValidator(validatorPubkey) !=
|
||||
default(AttachedValidator):
|
||||
|
@ -420,15 +420,12 @@ proc validateAggregate*(
|
||||
return err((ValidationResult.Reject, cstring(
|
||||
"Aggregator's validator index not in committee")))
|
||||
|
||||
block:
|
||||
# 1. [REJECT] The aggregate_and_proof.selection_proof is a valid signature of the
|
||||
# aggregate.data.slot by the validator with index
|
||||
# aggregate_and_proof.aggregator_index.
|
||||
# get_slot_signature(state, aggregate.data.slot, privkey)
|
||||
# 2. [REJECT] The aggregator signature, signed_aggregate_and_proof.signature, is valid.
|
||||
# 3. [REJECT] The signature of aggregate is valid.
|
||||
if aggregate_and_proof.aggregator_index >= epochRef.validator_keys.lenu64:
|
||||
return err((ValidationResult.Reject, cstring("Invalid aggregator_index")))
|
||||
# 1. [REJECT] The aggregate_and_proof.selection_proof is a valid signature of the
|
||||
# aggregate.data.slot by the validator with index
|
||||
# aggregate_and_proof.aggregator_index.
|
||||
# get_slot_signature(state, aggregate.data.slot, privkey)
|
||||
# 2. [REJECT] The aggregator signature, signed_aggregate_and_proof.signature, is valid.
|
||||
# 3. [REJECT] The signature of aggregate is valid.
|
||||
|
||||
let
|
||||
fork = getStateField(pool.dag.headState, fork)
|
||||
@ -633,7 +630,7 @@ proc isValidBeaconBlock*(
|
||||
getStateField(dag.headState, genesis_validators_root),
|
||||
signed_beacon_block.message.slot,
|
||||
signed_beacon_block.message,
|
||||
dag.validatorKeys[proposer.get()],
|
||||
dag.validatorKey(proposer.get()).get(),
|
||||
signed_beacon_block.signature):
|
||||
debug "block failed signature verification",
|
||||
signature = shortLog(signed_beacon_block.signature)
|
||||
|
@ -95,10 +95,10 @@ proc installValidatorApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
||||
let committee = get_beacon_committee(
|
||||
epochRef, slot, committee_index.CommitteeIndex)
|
||||
for index_in_committee, validatorIdx in committee:
|
||||
if validatorIdx < epochRef.validator_keys.len.ValidatorIndex:
|
||||
let curr_val_pubkey = epochRef.validator_keys[validatorIdx].toPubKey()
|
||||
if public_keys.findIt(it == curr_val_pubkey) != -1:
|
||||
result.add((public_key: curr_val_pubkey,
|
||||
let curr_val_pubkey = epochRef.validatorKey(validatorIdx)
|
||||
if curr_val_pubkey.isSome():
|
||||
if public_keys.findIt(it == curr_val_pubkey.get().toPubKey()) != -1:
|
||||
result.add((public_key: curr_val_pubkey.get().toPubKey(),
|
||||
validator_index: validatorIdx,
|
||||
committee_index: committee_index.CommitteeIndex,
|
||||
committee_length: committee.lenu64,
|
||||
@ -113,7 +113,7 @@ proc installValidatorApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
||||
epochRef = node.dag.getEpochRef(head, epoch)
|
||||
for i, bp in epochRef.beacon_proposers:
|
||||
if bp.isSome():
|
||||
result.add((public_key: epochRef.validator_keys[bp.get()].toPubKey(),
|
||||
result.add((public_key: epochRef.validatorKey(bp.get()).get().toPubKey(),
|
||||
validator_index: bp.get(),
|
||||
slot: compute_start_slot_at_epoch(epoch) + i))
|
||||
|
||||
|
@ -102,12 +102,12 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
||||
epochRef, slot, CommitteeIndex(committee_index)
|
||||
)
|
||||
for index_in_committee, validator_index in commitee:
|
||||
if validator_index < ValidatorIndex(len(epochRef.validator_keys)):
|
||||
let validator_key = epochRef.validator_keys[validator_index]
|
||||
if validator_index in indexList:
|
||||
if validator_index in indexList:
|
||||
let validator_key = epochRef.validatorKey(validator_index)
|
||||
if validator_key.isSome():
|
||||
res.add(
|
||||
RestAttesterDuty(
|
||||
pubkey: validator_key.toPubKey(),
|
||||
pubkey: validator_key.get().toPubKey(),
|
||||
validator_index: validator_index,
|
||||
committee_index: CommitteeIndex(committee_index),
|
||||
committee_length: lenu64(commitee),
|
||||
@ -147,14 +147,16 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
||||
block:
|
||||
var res: seq[RestProposerDuty]
|
||||
let epochRef = node.dag.getEpochRef(qhead, qepoch)
|
||||
# Fix for https://github.com/status-im/nimbus-eth2/issues/2488
|
||||
# Slot(0) at Epoch(0) do not have a proposer.
|
||||
let startSlot = if qepoch == Epoch(0): 1'u64 else: 0'u64
|
||||
for i, bp in epochRef.beacon_proposers:
|
||||
if i == 0 and qepoch == 0:
|
||||
# Fix for https://github.com/status-im/nimbus-eth2/issues/2488
|
||||
# Slot(0) at Epoch(0) do not have a proposer.
|
||||
continue
|
||||
|
||||
if bp.isSome():
|
||||
res.add(
|
||||
RestProposerDuty(
|
||||
pubkey: epochRef.validator_keys[bp.get()].toPubKey(),
|
||||
pubkey: epochRef.validatorKey(bp.get()).get().toPubKey(),
|
||||
validator_index: bp.get(),
|
||||
slot: compute_start_slot_at_epoch(qepoch) + i
|
||||
)
|
||||
|
@ -35,7 +35,7 @@ import
|
||||
json_serialization,
|
||||
nimcrypto/utils as ncrutils
|
||||
|
||||
export results, json_serialization
|
||||
export results, json_serialization, blscurve
|
||||
|
||||
# Type definitions
|
||||
# ----------------------------------------------------------------------
|
||||
@ -43,16 +43,22 @@ export results, json_serialization
|
||||
const
|
||||
RawSigSize* = 96
|
||||
RawPubKeySize* = 48
|
||||
UncompressedPubKeySize* = 96
|
||||
# RawPrivKeySize* = 48 for Miracl / 32 for BLST
|
||||
|
||||
type
|
||||
# Raw serialized key bytes - this type is used in so as to not eagerly
|
||||
# load keys - deserialization is slow, as are equality checks - however, it
|
||||
# is not guaranteed that the key is valid (except in some cases, like the
|
||||
# database state)
|
||||
ValidatorPubKey* = object
|
||||
ValidatorPubKey* = object ##\
|
||||
## Compressed raw serialized key bytes - this type is used in so as to not
|
||||
## eagerly load keys - deserialization is slow, as are equality checks -
|
||||
## however, it is not guaranteed that the key is valid (except in some
|
||||
## cases, like the database state)
|
||||
blob*: array[RawPubKeySize, byte]
|
||||
|
||||
UncompressedPubKey* = object
|
||||
## Uncompressed variation of ValidatorPubKey - this type is faster to
|
||||
## deserialize but doubles the storage footprint
|
||||
blob*: array[UncompressedPubKeySize, byte]
|
||||
|
||||
CookedPubKey* = distinct blscurve.PublicKey ## Valid deserialized key
|
||||
|
||||
ValidatorSig* = object
|
||||
@ -90,6 +96,8 @@ func toPubKey*(privkey: ValidatorPrivKey): CookedPubKey =
|
||||
|
||||
template toRaw*(x: CookedPubKey): auto =
|
||||
PublicKey(x).exportRaw()
|
||||
template toUncompressed*(x: CookedPubKey): auto =
|
||||
UncompressedPubKey(blob: PublicKey(x).exportUncompressed())
|
||||
|
||||
func toPubKey*(pubKey: CookedPubKey): ValidatorPubKey =
|
||||
## Derive a public key from a private key
|
||||
@ -104,6 +112,24 @@ proc load*(v: ValidatorPubKey): Option[CookedPubKey] =
|
||||
else:
|
||||
none CookedPubKey
|
||||
|
||||
proc load*(v: UncompressedPubKey): Option[CookedPubKey] =
|
||||
## Parse signature blob - this may fail
|
||||
var val: blscurve.PublicKey
|
||||
if fromBytes(val, v.blob):
|
||||
some CookedPubKey(val)
|
||||
else:
|
||||
none CookedPubKey
|
||||
|
||||
func loadValid*(v: UncompressedPubKey | ValidatorPubKey): CookedPubKey {.noinit.} =
|
||||
## Parse known-to-be-valid key - this is the case for any key that's passed
|
||||
## parsing once and is the output of serialization, such as those keys we
|
||||
## keep in the database or state.
|
||||
var val: blscurve.PublicKey
|
||||
let ok = fromBytesKnownOnCurve(val, v.blob)
|
||||
doAssert ok, "Valid key no longer parses, data corrupt? " & $v
|
||||
|
||||
CookedPubKey(val)
|
||||
|
||||
proc loadWithCache*(v: ValidatorPubKey): Option[CookedPubKey] =
|
||||
## Parse public key blob - this may fail - this function uses a cache to
|
||||
## avoid the expensive deserialization - for now, external public keys only
|
||||
|
@ -32,7 +32,7 @@ import
|
||||
./merge
|
||||
|
||||
export
|
||||
sszTypes, merge, presets, json_serialization
|
||||
crypto, sszTypes, merge, presets, json_serialization
|
||||
|
||||
# Presently, we're reusing the data types from the serialization (uint64) in the
|
||||
# objects we pass around to the beacon chain logic, thus keeping the two
|
||||
@ -271,11 +271,17 @@ type
|
||||
SomeSignedBeaconBlockHeader* = SignedBeaconBlockHeader | TrustedSignedBeaconBlockHeader
|
||||
SomeSignedVoluntaryExit* = SignedVoluntaryExit | TrustedSignedVoluntaryExit
|
||||
|
||||
# Please note that this type is not part of the spec
|
||||
# Legacy database type, see BeaconChainDB
|
||||
ImmutableValidatorData* = object
|
||||
pubkey*: ValidatorPubKey
|
||||
withdrawal_credentials*: Eth2Digest
|
||||
|
||||
# Non-spec type that represents the immutable part of a validator - an
|
||||
# uncompressed key serialization is used to speed up loading from database
|
||||
ImmutableValidatorData2* = object
|
||||
pubkey*: UncompressedPubKey
|
||||
withdrawal_credentials*: Eth2Digest
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#validator
|
||||
Validator* = object
|
||||
pubkey*: ValidatorPubKey
|
||||
@ -550,9 +556,12 @@ type
|
||||
statuses*: seq[RewardStatus]
|
||||
total_balances*: TotalBalances
|
||||
|
||||
func getImmutableValidatorData*(validator: Validator): ImmutableValidatorData =
|
||||
ImmutableValidatorData(
|
||||
pubkey: validator.pubkey,
|
||||
func getImmutableValidatorData*(validator: Validator): ImmutableValidatorData2 =
|
||||
let cookedKey = validator.pubkey.load() # Loading the pubkey is slow!
|
||||
doAssert cookedKey.isSome,
|
||||
"Cannot parse validator key: " & toHex(validator.pubkey)
|
||||
ImmutableValidatorData2(
|
||||
pubkey: cookedKey.get().toUncompressed(),
|
||||
withdrawal_credentials: validator.withdrawal_credentials)
|
||||
|
||||
# TODO when https://github.com/nim-lang/Nim/issues/14440 lands in Status's Nim,
|
||||
@ -918,6 +927,14 @@ proc readValue*(r: var JsonReader, T: type GraffitiBytes): T
|
||||
template getStateField*(stateData, fieldName: untyped): untyped =
|
||||
stateData.data.data.fieldName
|
||||
|
||||
proc load*(
|
||||
validators: openArray[ImmutableValidatorData2],
|
||||
index: ValidatorIndex | uint64): Option[CookedPubKey] =
|
||||
if validators.lenu64() <= index.uint64:
|
||||
none(CookedPubKey)
|
||||
else:
|
||||
some(validators[index.int].pubkey.loadValid())
|
||||
|
||||
static:
|
||||
# Sanity checks - these types should be trivial enough to copy with memcpy
|
||||
doAssert supportsCopyMem(Validator)
|
||||
|
@ -69,7 +69,7 @@ func addSignatureSet[T](
|
||||
|
||||
proc aggregateAttesters(
|
||||
validatorIndices: openArray[uint64],
|
||||
validatorKeys: openArray[CookedPubKey],
|
||||
validatorKeys: auto,
|
||||
): Result[CookedPubKey, cstring] =
|
||||
if validatorIndices.len == 0:
|
||||
# Aggregation spec requires non-empty collection
|
||||
@ -78,22 +78,27 @@ proc aggregateAttesters(
|
||||
# - https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#is_valid_indexed_attestation
|
||||
return err("aggregateAttesters: no attesting indices")
|
||||
|
||||
var attestersAgg{.noInit.}: AggregatePublicKey
|
||||
if validatorIndices[0] >= validatorKeys.lenu64():
|
||||
let
|
||||
firstKey = validatorKeys.load(validatorIndices[0])
|
||||
|
||||
if not firstKey.isSome():
|
||||
return err("aggregateAttesters: invalid attesting index")
|
||||
|
||||
attestersAgg.init(validatorKeys[validatorIndices[0].int])
|
||||
var attestersAgg{.noInit.}: AggregatePublicKey
|
||||
|
||||
attestersAgg.init(firstKey.get())
|
||||
for i in 1 ..< validatorIndices.len:
|
||||
if validatorIndices[i] >= validatorKeys.lenu64():
|
||||
let key = validatorKeys.load(validatorIndices[i])
|
||||
if not key.isSome():
|
||||
return err("aggregateAttesters: invalid attesting index")
|
||||
attestersAgg.aggregate(validatorKeys[validatorIndices[i].int])
|
||||
attestersAgg.aggregate(key.get())
|
||||
|
||||
ok(finish(attestersAgg))
|
||||
|
||||
proc addIndexedAttestation(
|
||||
sigs: var seq[SignatureSet],
|
||||
attestation: IndexedAttestation,
|
||||
validatorKeys: openArray[CookedPubKey],
|
||||
validatorKeys: auto,
|
||||
state: StateData,
|
||||
): Result[void, cstring] =
|
||||
## Add an indexed attestation for batched BLS verification
|
||||
@ -118,7 +123,7 @@ proc addIndexedAttestation(
|
||||
proc addAttestation(
|
||||
sigs: var seq[SignatureSet],
|
||||
attestation: Attestation,
|
||||
validatorKeys: openArray[CookedPubKey],
|
||||
validatorKeys: auto,
|
||||
state: StateData,
|
||||
cache: var StateCache
|
||||
): Result[void, cstring] =
|
||||
@ -130,10 +135,10 @@ proc addAttestation(
|
||||
cache
|
||||
):
|
||||
if not inited: # first iteration
|
||||
attestersAgg.init(validatorKeys[valIndex.int])
|
||||
attestersAgg.init(validatorKeys.load(valIndex).get())
|
||||
inited = true
|
||||
else:
|
||||
attestersAgg.aggregate(validatorKeys[valIndex.int])
|
||||
attestersAgg.aggregate(validatorKeys.load(valIndex).get())
|
||||
|
||||
if not inited:
|
||||
# There were no attesters
|
||||
@ -169,7 +174,7 @@ proc addAttestation*(
|
||||
## Returns true if the attestation was added to the batching buffer
|
||||
## Returns false if sanity checks failed (non-empty, keys are valid)
|
||||
## In that case the seq[SignatureSet] is unmodified
|
||||
mixin get_attesting_indices, validator_keys, pubkey
|
||||
mixin get_attesting_indices, validatorKey
|
||||
|
||||
var inited = false
|
||||
var attestersAgg{.noInit.}: AggregatePublicKey
|
||||
@ -177,10 +182,10 @@ proc addAttestation*(
|
||||
attestation.data,
|
||||
attestation.aggregation_bits):
|
||||
if not inited: # first iteration
|
||||
attestersAgg.init(epochRef.validator_keys[valIndex])
|
||||
attestersAgg.init(epochRef.validatorKey(valIndex).get())
|
||||
inited = true
|
||||
else:
|
||||
attestersAgg.aggregate(epochRef.validator_keys[valIndex])
|
||||
attestersAgg.aggregate(epochRef.validatorKey(valIndex).get())
|
||||
|
||||
if not inited:
|
||||
# There were no attesters
|
||||
@ -245,7 +250,7 @@ proc addAggregateAndProofSignature*(
|
||||
proc collectSignatureSets*(
|
||||
sigs: var seq[SignatureSet],
|
||||
signed_block: SignedBeaconBlock,
|
||||
validatorKeys: openArray[CookedPubKey],
|
||||
validatorKeys: auto,
|
||||
state: StateData,
|
||||
cache: var StateCache): Result[void, cstring] =
|
||||
## Collect all signatures in a single signed block.
|
||||
@ -262,11 +267,12 @@ proc collectSignatureSets*(
|
||||
|
||||
# Metadata
|
||||
# ----------------------------------------------------
|
||||
mixin load
|
||||
|
||||
let
|
||||
proposer_index = signed_block.message.proposer_index
|
||||
validators = validatorKeys.lenu64
|
||||
if proposer_index >= validators:
|
||||
proposer_key = validatorKeys.load(proposer_index)
|
||||
if not proposer_key.isSome():
|
||||
return err("collectSignatureSets: invalid proposer index")
|
||||
|
||||
let epoch = signed_block.message.slot.compute_epoch_at_slot()
|
||||
@ -274,7 +280,7 @@ proc collectSignatureSets*(
|
||||
# 1. Block proposer
|
||||
# ----------------------------------------------------
|
||||
sigs.addSignatureSet(
|
||||
validatorKeys[proposer_index],
|
||||
proposer_key.get(),
|
||||
signed_block.message,
|
||||
signed_block.signature.loadOrExit(
|
||||
"collectSignatureSets: cannot load signature"),
|
||||
@ -286,7 +292,7 @@ proc collectSignatureSets*(
|
||||
# 2. Randao Reveal
|
||||
# ----------------------------------------------------
|
||||
sigs.addSignatureSet(
|
||||
validatorKeys[proposer_index],
|
||||
proposer_key.get(),
|
||||
epoch,
|
||||
signed_block.message.body.randao_reveal.loadOrExit(
|
||||
"collectSignatureSets: cannot load randao"),
|
||||
@ -311,13 +317,15 @@ proc collectSignatureSets*(
|
||||
|
||||
# Proposed block 1
|
||||
block:
|
||||
let header_1 = slashing.signed_header_1
|
||||
if header_1.message.proposer_index >= validators:
|
||||
let
|
||||
header_1 = slashing.signed_header_1
|
||||
key_1 = validatorKeys.load(header_1.message.proposer_index)
|
||||
if not key_1.isSome():
|
||||
return err("collectSignatureSets: invalid slashing proposer index 1")
|
||||
|
||||
let epoch1 = header_1.message.slot.compute_epoch_at_slot()
|
||||
sigs.addSignatureSet(
|
||||
validatorKeys[header_1.message.proposer_index],
|
||||
key_1.get(),
|
||||
header_1.message,
|
||||
header_1.signature.loadOrExit(
|
||||
"collectSignatureSets: cannot load proposer slashing 1 signature"),
|
||||
@ -329,12 +337,14 @@ proc collectSignatureSets*(
|
||||
|
||||
# Conflicting block 2
|
||||
block:
|
||||
let header_2 = slashing.signed_header_2
|
||||
if header_2.message.proposer_index >= validators:
|
||||
let
|
||||
header_2 = slashing.signed_header_2
|
||||
key_2 = validatorKeys.load(header_2.message.proposer_index)
|
||||
if not key_2.isSome():
|
||||
return err("collectSignatureSets: invalid slashing proposer index 2")
|
||||
let epoch2 = header_2.message.slot.compute_epoch_at_slot()
|
||||
sigs.addSignatureSet(
|
||||
validatorKeys[header_2.message.proposer_index],
|
||||
key_2.get(),
|
||||
header_2.message,
|
||||
header_2.signature.loadOrExit(
|
||||
"collectSignatureSets: cannot load proposer slashing 2 signature"),
|
||||
@ -389,11 +399,12 @@ proc collectSignatureSets*(
|
||||
# due to https://github.com/nim-lang/Nim/issues/14421
|
||||
# fixed in 1.4.2
|
||||
template volex: untyped = signed_block.message.body.voluntary_exits[i]
|
||||
if volex.message.validator_index >= validators:
|
||||
let key = validatorKeys.load(volex.message.validator_index)
|
||||
if not key.isSome():
|
||||
return err("collectSignatureSets: invalid voluntary exit")
|
||||
|
||||
sigs.addSignatureSet(
|
||||
validatorKeys[volex.message.validator_index],
|
||||
key.get(),
|
||||
volex.message,
|
||||
volex.signature.loadOrExit(
|
||||
"collectSignatureSets: cannot load voluntary exit signature"),
|
||||
|
@ -10,7 +10,7 @@
|
||||
import
|
||||
stew/assign2,
|
||||
./ssz/types,
|
||||
./spec/[datatypes, digest, helpers]
|
||||
./spec/[crypto, datatypes, digest, helpers]
|
||||
|
||||
func diffModIncEpoch[T, U](hl: HashArray[U, T], startSlot: uint64):
|
||||
array[SLOTS_PER_EPOCH, T] =
|
||||
@ -32,7 +32,7 @@ func applyValidatorIdentities(
|
||||
hl: auto) =
|
||||
for item in hl:
|
||||
if not validators.add Validator(
|
||||
pubkey: item.pubkey,
|
||||
pubkey: item.pubkey.loadValid().toPubKey(),
|
||||
withdrawal_credentials: item.withdrawal_credentials):
|
||||
raiseAssert "cannot readd"
|
||||
|
||||
@ -150,7 +150,7 @@ func diffStates*(state0, state1: BeaconState): BeaconStateDiff =
|
||||
|
||||
func applyDiff*(
|
||||
state: var BeaconState,
|
||||
immutableValidators: openArray[ImmutableValidatorData],
|
||||
immutableValidators: openArray[ImmutableValidatorData2],
|
||||
stateDiff: BeaconStateDiff) =
|
||||
template assign[T, U](tgt: var HashList[T, U], src: List[T, U]) =
|
||||
assign(tgt.data, src)
|
||||
|
@ -121,16 +121,17 @@ proc getAttachedValidator*(node: BeaconNode,
|
||||
proc getAttachedValidator*(node: BeaconNode,
|
||||
epochRef: EpochRef,
|
||||
idx: ValidatorIndex): AttachedValidator =
|
||||
if idx < epochRef.validator_keys.len.ValidatorIndex:
|
||||
let validator = node.getAttachedValidator(epochRef.validator_keys[idx].toPubKey())
|
||||
let key = epochRef.validatorKey(idx)
|
||||
if key.isSome():
|
||||
let validator = node.getAttachedValidator(key.get().toPubKey())
|
||||
if validator != nil and validator.index != some(idx.ValidatorIndex):
|
||||
# Update index, in case the validator was activated!
|
||||
notice "Validator activated", pubkey = validator.pubkey, index = idx
|
||||
validator.index = some(idx.ValidatorIndex)
|
||||
validator
|
||||
else:
|
||||
warn "Validator index out of bounds",
|
||||
idx, epoch = epochRef.epoch, validators = epochRef.validator_keys.len
|
||||
warn "Validator key not found",
|
||||
idx, epoch = epochRef.epoch
|
||||
nil
|
||||
|
||||
proc isSynced*(node: BeaconNode, head: BlockRef): bool =
|
||||
@ -501,7 +502,7 @@ proc handleProposal(node: BeaconNode, head: BlockRef, slot: Slot):
|
||||
return head
|
||||
|
||||
let
|
||||
proposerKey = node.dag.validatorKeys[proposer.get()].toPubKey()
|
||||
proposerKey = node.dag.validatorKey(proposer.get()).get().toPubKey()
|
||||
validator = node.attachedValidators[].getValidator(proposerKey)
|
||||
|
||||
if validator != nil:
|
||||
|
@ -14,7 +14,7 @@ import
|
||||
eth/keys,
|
||||
../beacon_chain/spec/[datatypes, digest, helpers, state_transition, presets],
|
||||
../beacon_chain/beacon_node_types,
|
||||
../beacon_chain/ssz,
|
||||
../beacon_chain/[beacon_chain_db, ssz],
|
||||
../beacon_chain/consensus_object_pools/[
|
||||
blockchain_dag, block_quarantine, block_clearance, statedata_helpers],
|
||||
./testutil, ./testdbutil, ./testblockutil
|
||||
@ -154,6 +154,8 @@ suite "Block pool processing" & preset():
|
||||
let
|
||||
b2Add = dag.addRawBlock(quarantine, b2, nil)
|
||||
b2Get = dag.get(b2.root)
|
||||
er = dag.findEpochRef(b1Add[], b1Add[].slot.epoch)
|
||||
validators = getStateField(dag.headState, validators).lenu64()
|
||||
|
||||
check:
|
||||
b2Get.isSome()
|
||||
@ -161,11 +163,16 @@ suite "Block pool processing" & preset():
|
||||
b2Add[].root == b2Get.get().refs.root
|
||||
dag.heads.len == 1
|
||||
dag.heads[0] == b2Add[]
|
||||
not dag.findEpochRef(b1Add[], b1Add[].slot.epoch).isNil
|
||||
dag.findEpochRef(b1Add[], b1Add[].slot.epoch) ==
|
||||
dag.findEpochRef(b2Add[], b2Add[].slot.epoch)
|
||||
not er.isNil
|
||||
# Same epoch - same epochRef
|
||||
er == dag.findEpochRef(b2Add[], b2Add[].slot.epoch)
|
||||
# Different epoch that was never processed
|
||||
dag.findEpochRef(b1Add[], b1Add[].slot.epoch + 1).isNil
|
||||
|
||||
er.validatorKey(0'u64).isSome()
|
||||
er.validatorKey(validators - 1).isSome()
|
||||
er.validatorKey(validators).isNone()
|
||||
|
||||
# Skip one slot to get a gap
|
||||
check:
|
||||
process_slots(state[], state.data.slot + 1, cache, rewards)
|
||||
@ -376,7 +383,7 @@ suite "chain DAG finalization tests" & preset():
|
||||
dag.heads.len() == 1
|
||||
|
||||
check:
|
||||
dag.validatorKeys.len() == getStateField(dag.headState, validators).len()
|
||||
dag.db.immutableValidators.len() == getStateField(dag.headState, validators).len()
|
||||
|
||||
let
|
||||
finalER = dag.findEpochRef(dag.finalizedHead.blck, dag.finalizedHead.slot.epoch)
|
||||
@ -516,4 +523,3 @@ suite "chain DAG finalization tests" & preset():
|
||||
dag2.finalizedHead.blck.root == dag.finalizedHead.blck.root
|
||||
dag2.finalizedHead.slot == dag.finalizedHead.slot
|
||||
hash_tree_root(dag2.headState) == hash_tree_root(dag.headState)
|
||||
dag2.validatorKeys.len() == dag.validatorKeys.len()
|
||||
|
2
vendor/nim-blscurve
vendored
2
vendor/nim-blscurve
vendored
@ -1 +1 @@
|
||||
Subproject commit fd4956f5d65129e9b475e654903a84303395eb92
|
||||
Subproject commit e1b9392b802ef53407035bf78a1d2a3f3067d7d7
|
Loading…
x
Reference in New Issue
Block a user