performance fixes (#2259)

* performance fixes

* don't mark tree cache as dirty on read-only List accesses
* store only blob in memory for keys and signatures, parse blob lazily
* compare public keys by blob instead of parsing / converting to raw
* compare Eth2Digest using non-constant-time comparison
* avoid some unnecessary validator copying

This branch will in particular speed up deposit processing which has
been slowing down block replay.

Pre (mainnet, 1600 blocks):

```
All time are ms
     Average,       StdDev,          Min,          Max,      Samples,         Test
Validation is turned off meaning that no BLS operations are performed
    3450.269,        0.000,     3450.269,     3450.269,            1, Initialize DB
       0.417,        0.822,        0.036,       21.098,         1400, Load block from database
      16.521,        0.000,       16.521,       16.521,            1, Load state from database
      27.906,       50.846,        8.104,     1507.633,         1350, Apply block
      52.617,       37.029,       20.640,      135.938,           50, Apply epoch block
```

Post:

```
    3502.715,        0.000,     3502.715,     3502.715,            1, Initialize DB
       0.080,        0.560,        0.035,       21.015,         1400, Load block from database
      17.595,        0.000,       17.595,       17.595,            1, Load state from database
      15.706,       11.028,        8.300,      107.537,         1350, Apply block
      33.217,       12.622,       17.331,       60.580,           50, Apply epoch block
```

* more perf fixes

* load EpochRef cache into StateCache more aggressively
* point out security concern with public key cache
* reuse proposer index from state when processing block
* avoid genericAssign in a few more places
* don't parse key when signature is unparseable
* fix `==` overload for Eth2Digest
* preallocate validator list when getting active validators
* speed up proposer index calculation a little bit
* reuse cache when replaying blocks in ncli_db
* avoid a few more copying loops

```
     Average,       StdDev,          Min,          Max,      Samples,         Test
Validation is turned off meaning that no BLS operations are performed
    3279.158,        0.000,     3279.158,     3279.158,            1, Initialize DB
       0.072,        0.357,        0.035,       13.400,         1400, Load block from database
      17.295,        0.000,       17.295,       17.295,            1, Load state from database
       5.918,        9.896,        0.198,       98.028,         1350, Apply block
      15.888,       10.951,        7.902,       39.535,           50, Apply epoch block
       0.000,        0.000,        0.000,        0.000,            0, Database block store
```

* clear full balance cache before processing rewards and penalties

```
All time are ms
     Average,       StdDev,          Min,          Max,      Samples,         Test
Validation is turned off meaning that no BLS operations are performed
    3947.901,        0.000,     3947.901,     3947.901,            1, Initialize DB
       0.124,        0.506,        0.026,      202.370,       363345, Load block from database
      97.614,        0.000,       97.614,       97.614,            1, Load state from database
       0.186,        0.188,        0.012,       99.561,       357262, Advance slot, non-epoch
      14.161,        5.966,        1.099,      395.511,        11524, Advance slot, epoch
       1.372,        4.170,        0.017,      276.401,       363345, Apply block, no slot processing
       0.000,        0.000,        0.000,        0.000,            0, Database block store
```
This commit is contained in:
Jacek Sieka 2021-01-25 13:04:18 +01:00 committed by GitHub
parent 095b191aee
commit 5713a3ce4c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
24 changed files with 332 additions and 290 deletions

View File

@ -58,7 +58,7 @@ template withState*(
## TODO async transformations will lead to a race where stateData gets updated
## while waiting for future to complete - catch this here somehow?
var cache {.inject.} = blockSlot.blck.getStateCache(blockSlot.slot.epoch())
var cache {.inject.} = StateCache()
updateStateData(dag, stateData, blockSlot, false, cache)
template hashedState(): HashedBeaconState {.inject, used.} = stateData.data
@ -97,9 +97,9 @@ func get_effective_balances*(state: BeaconState): seq[Gwei] =
for i in 0 ..< result.len:
# All non-active validators have a 0 balance
template validator: Validator = state.validators[i]
if validator.is_active_validator(epoch):
result[i] = validator.effective_balance
let validator = unsafeAddr state.validators[i]
if validator[].is_active_validator(epoch):
result[i] = validator[].effective_balance
proc init*(
T: type EpochRef, state: BeaconState, cache: var StateCache,
@ -146,13 +146,13 @@ proc init*(
ret
if prevEpoch != nil and (
prevEpoch.validator_key_store[0] == hash_tree_root(state.validators) or
prevEpoch.validator_key_store[0] == validators_root or
sameKeys(prevEpoch.validator_key_store[1][], state.validators.asSeq)):
epochRef.validator_key_store =
(validators_root, prevEpoch.validator_key_store[1])
else:
epochRef.validator_key_store = (
hash_tree_root(state.validators),
validators_root,
newClone(mapIt(state.validators.toSeq, it.pubkey)))
# When fork choice runs, it will need the effective balance of the justified
@ -273,34 +273,36 @@ func epochAncestor*(blck: BlockRef, epoch: Epoch): BlockSlot =
blck.atEpochStart(epoch)
proc getStateCache*(blck: BlockRef, epoch: Epoch): StateCache =
func findEpochRef*(blck: BlockRef, epoch: Epoch): EpochRef = # may return nil!
let ancestor = blck.epochAncestor(epoch)
doAssert ancestor.blck != nil
for epochRef in ancestor.blck.epochRefs:
if epochRef.epoch == epoch:
return epochRef
proc loadStateCache*(cache: var StateCache, blck: BlockRef, epoch: Epoch) =
# When creating a state cache, we want the current and the previous epoch
# information to be preloaded as both of these are used in state transition
# functions
var res = StateCache()
template load(e: Epoch) =
let ancestor = blck.epochAncestor(epoch)
for epochRef in ancestor.blck.epochRefs:
if epochRef.epoch == e:
res.shuffled_active_validator_indices[epochRef.epoch] =
if epoch notin cache.shuffled_active_validator_indices:
let epochRef = blck.findEpochRef(epoch)
if epochRef != nil:
cache.shuffled_active_validator_indices[epochRef.epoch] =
epochRef.shuffled_active_validator_indices
if epochRef.epoch == epoch:
for i, idx in epochRef.beacon_proposers:
res.beacon_proposer_indices[
cache.beacon_proposer_indices[
epoch.compute_start_slot_at_epoch + i] =
if idx.isSome: some(idx.get()[0]) else: none(ValidatorIndex)
break
load(epoch)
if epoch > 0:
load(epoch - 1)
res
func init(T: type BlockRef, root: Eth2Digest, slot: Slot): BlockRef =
BlockRef(
root: root,
@ -443,13 +445,6 @@ proc init*(T: type ChainDAGRef,
res
proc findEpochRef*(blck: BlockRef, epoch: Epoch): EpochRef = # may return nil!
let ancestor = blck.epochAncestor(epoch)
doAssert ancestor.blck != nil
for epochRef in ancestor.blck.epochRefs:
if epochRef.epoch == epoch:
return epochRef
proc getEpochRef*(dag: ChainDAGRef, blck: BlockRef, epoch: Epoch): EpochRef =
let epochRef = blck.findEpochRef(epoch)
if epochRef != nil:
@ -662,6 +657,8 @@ proc applyBlock(
doAssert (addr(statePtr.data) == addr v)
statePtr[] = dag.headState
loadStateCache(cache, blck.refs, blck.data.message.slot.epoch)
let ok = state_transition(
dag.runtimePreset, state.data, blck.data,
cache, flags + dag.updateFlags + {slotProcessed}, restore)
@ -783,6 +780,8 @@ proc updateStateData*(
dag.applyBlock(state, dag.get(ancestors[i]), {}, cache)
doAssert ok, "Blocks in database should never fail to apply.."
loadStateCache(cache, bs.blck, bs.slot.epoch)
# ...and make sure to process empty slots as requested
dag.advanceSlots(state, bs.slot, save, cache)

View File

@ -196,7 +196,7 @@ proc addRawBlock*(
# TODO if the block is from the future, we should not be resolving it (yet),
# but maybe we should use it as a hint that our clock is wrong?
var cache = getStateCache(parent, blck.slot.epoch)
var cache = StateCache()
updateStateData(
dag, dag.clearanceState, parent.atSlot(blck.slot), true, cache)

View File

@ -15,7 +15,10 @@ proc toJsonHex(data: openArray[byte]): string =
proc fromJson*(n: JsonNode, argName: string, result: var ValidatorPubKey) =
n.kind.expect(JString, argName)
result = initPubKey(ValidatorPubKey.fromHex(n.getStr()).tryGet().initPubKey())
var tmp = ValidatorPubKey.fromHex(n.getStr()).tryGet()
if not tmp.loadWithCache().isSome():
raise (ref ValueError)(msg: "Invalid public BLS key")
result = tmp
proc `%`*(pubkey: ValidatorPubKey): JsonNode =
newJString(toJsonHex(toRaw(pubkey)))

View File

@ -437,7 +437,7 @@ proc logEnrAddress(address: string) =
var forkid = SSZ.decode(eth2Data.get(), ENRForkID)
eth2fork_digest = $forkid.fork_digest
eth2next_fork_version = $forkid.next_fork_version
eth2next_fork_epoch = strutils.toHex(cast[uint64](forkid.next_fork_epoch))
eth2next_fork_epoch = strutils.toHex(uint64(forkid.next_fork_epoch))
except CatchableError:
eth2fork_digest = "Error"
eth2next_fork_version = "Error"

View File

@ -18,7 +18,7 @@ programMain:
# load and send all public keys so the BN knows for which ones to ping us
doAssert paramCount() == 2
for curr in validatorKeysFromDirs(paramStr(1), paramStr(2)):
validators[curr.toPubKey.initPubKey] = curr
validators[curr.toPubKey] = curr
echo curr.toPubKey
echo "end"
@ -27,6 +27,6 @@ programMain:
let args = stdin.readLine.split(" ")
doAssert args.len == 2
let privKey = validators[ValidatorPubKey.fromHex(args[0]).get().initPubKey()]
let privKey = validators[ValidatorPubKey.fromHex(args[0]).get()]
echo blsSign(privKey, Eth2Digest.fromHex(args[1]).data)

View File

@ -302,7 +302,7 @@ programMain:
# load all the validators from the data dir into memory
for curr in vc.config.validatorKeys:
vc.attachedValidators.addLocalValidator(
curr.toPubKey.initPubKey, curr, none(ValidatorIndex))
curr.toPubKey, curr, none(ValidatorIndex))
waitFor vc.client.connect($vc.config.rpcAddress, vc.config.rpcPort)
info "Connected to BN",

View File

@ -92,7 +92,7 @@ proc installValidatorApiHandlers*(rpcServer: RpcServer, node: BeaconNode) =
epochRef, slot, committee_index.CommitteeIndex)
for index_in_committee, validatorIdx in committee:
if validatorIdx < epochRef.validator_keys.len.ValidatorIndex:
let curr_val_pubkey = epochRef.validator_keys[validatorIdx].initPubKey
let curr_val_pubkey = epochRef.validator_keys[validatorIdx]
if public_keys.findIt(it == curr_val_pubkey) != -1:
result.add((public_key: curr_val_pubkey,
validator_index: validatorIdx,
@ -109,7 +109,7 @@ proc installValidatorApiHandlers*(rpcServer: RpcServer, node: BeaconNode) =
epochRef = node.chainDag.getEpochRef(head, epoch)
for i in 0 ..< SLOTS_PER_EPOCH:
if epochRef.beacon_proposers[i].isSome():
result.add((public_key: epochRef.beacon_proposers[i].get()[1].initPubKey(),
result.add((public_key: epochRef.beacon_proposers[i].get()[1],
slot: compute_start_slot_at_epoch(epoch) + i))
rpcServer.rpc("post_v1_validator_beacon_committee_subscriptions") do (

View File

@ -9,6 +9,7 @@
import
std/[tables, algorithm, math, sequtils, options],
stew/assign2,
json_serialization/std/sets,
chronicles,
../extras, ../ssz/merkleization,
@ -36,24 +37,32 @@ func is_valid_merkle_branch*(leaf: Eth2Digest, branch: openArray[Eth2Digest],
value == root
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.0/specs/phase0/beacon-chain.md#increase_balance
func increase_balance*(balance: var Gwei, delta: Gwei) =
balance += delta
func increase_balance*(
state: var BeaconState, index: ValidatorIndex, delta: Gwei) =
## Increase the validator balance at index ``index`` by ``delta``.
state.balances[index] += delta
if delta != 0: # avoid dirtying the balance cache if not needed
increase_balance(state.balances[index], delta)
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.0/specs/phase0/beacon-chain.md#decrease_balance
func decrease_balance*(balance: var Gwei, delta: Gwei) =
balance =
if delta > balance:
0'u64
else:
balance - delta
func decrease_balance*(
state: var BeaconState, index: ValidatorIndex, delta: Gwei) =
## Decrease the validator balance at index ``index`` by ``delta``, with
## underflow protection.
state.balances[index] =
if delta > state.balances[index]:
0'u64
else:
state.balances[index] - delta
if delta != 0: # avoid dirtying the balance cache if not needed
decrease_balance(state.balances[index], delta)
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.0/specs/phase0/beacon-chain.md#deposits
func get_validator_from_deposit(state: BeaconState, deposit: DepositData):
func get_validator_from_deposit(deposit: DepositData):
Validator =
let
amount = deposit.amount
@ -91,31 +100,36 @@ proc process_deposit*(preset: RuntimePreset,
let
pubkey = deposit.data.pubkey
pubkey_inited = pubkey.initPubKey
amount = deposit.data.amount
var index = -1
for i, validator in state.validators:
if pubkey_inited == validator.pubkey.initPubKey:
# This linear scan is unfortunate, but should be fairly fast as we do a simple
# byte comparison of the key. The alternative would be to build a Table, but
# given that each block can hold no more than 16 deposits, it's slower to
# build the table and use it for lookups than to scan it like this.
# Once we have a reusable, long-lived cache, this should be revisited
for i in 0..<state.validators.len():
if state.validators.asSeq()[i].pubkey == pubkey:
index = i
break
if index == -1:
# Verify the deposit signature (proof of possession) which is not checked
# by the deposit contract
if skipBLSValidation notin flags:
if not verify_deposit_signature(preset, deposit.data):
# It's ok that deposits fail - they get included in blocks regardless
trace "Skipping deposit with invalid signature",
deposit = shortLog(deposit.data)
return ok()
# Add validator and balance entries
state.validators.add(get_validator_from_deposit(state, deposit.data))
state.balances.add(amount)
else:
if index != -1:
# Increase balance by deposit amount
increase_balance(state, index.ValidatorIndex, amount)
else:
# Verify the deposit signature (proof of possession) which is not checked
# by the deposit contract
if skipBLSValidation in flags or verify_deposit_signature(preset, deposit.data):
# New validator! Add validator and balance entries
state.validators.add(get_validator_from_deposit(deposit.data))
state.balances.add(amount)
else:
# Deposits may come with invalid signatures - in that case, they are not
# turned into a validator but still get processed to keep the deposit
# index correct
trace "Skipping deposit with invalid signature",
deposit = shortLog(deposit.data)
ok()
@ -154,13 +168,16 @@ func initiate_validator_exit*(state: var BeaconState,
var exit_queue_epoch = compute_activation_exit_epoch(get_current_epoch(state))
# Compute max exit epoch
for v in state.validators:
if v.exit_epoch != FAR_FUTURE_EPOCH and v.exit_epoch > exit_queue_epoch:
exit_queue_epoch = v.exit_epoch
for idx in 0..<state.validators.len:
let exit_epoch = state.validators.asSeq()[idx].exit_epoch
if exit_epoch != FAR_FUTURE_EPOCH and exit_epoch > exit_queue_epoch:
exit_queue_epoch = exit_epoch
let
exit_queue_churn = countIt(
state.validators, it.exit_epoch == exit_queue_epoch)
var
exit_queue_churn: int
for idx in 0..<state.validators.len:
if state.validators.asSeq()[idx].exit_epoch == exit_queue_epoch:
exit_queue_churn += 1
if exit_queue_churn.uint64 >= get_validator_churn_limit(state, cache):
exit_queue_epoch += 1
@ -250,14 +267,7 @@ proc initialize_beacon_state_from_eth1*(
Eth1Data(block_hash: eth1_block_hash, deposit_count: uint64(len(deposits))),
latest_block_header:
BeaconBlockHeader(
body_root: hash_tree_root(BeaconBlockBody(
# This differs from the spec intentionally.
# We must specify the default value for `ValidatorSig`
# in order to get a correct `hash_tree_root`.
randao_reveal: ValidatorSig(kind: OpaqueBlob)
))
)
)
body_root: hash_tree_root(BeaconBlockBody())))
# Seed RANDAO with Eth1 entropy
state.randao_mixes.fill(eth1_block_hash)
@ -286,7 +296,7 @@ proc initialize_beacon_state_from_eth1*(
if skipBlsValidation in flags or
verify_deposit_signature(preset, deposit):
pubkeyToIndex[pubkey] = state.validators.len
state.validators.add(get_validator_from_deposit(state[], deposit))
state.validators.add(get_validator_from_deposit(deposit))
state.balances.add(amount)
else:
# Invalid deposits are perfectly possible
@ -296,7 +306,7 @@ proc initialize_beacon_state_from_eth1*(
# Process activations
for validator_index in 0 ..< state.validators.len:
let
balance = state.balances[validator_index]
balance = state.balances.asSeq()[validator_index]
validator = addr state.validators[validator_index]
validator.effective_balance = min(
@ -321,9 +331,8 @@ proc initialize_hashed_beacon_state_from_eth1*(
preset, eth1_block_hash, eth1_timestamp, deposits, flags)
HashedBeaconState(data: genesisState[], root: hash_tree_root(genesisState[]))
func emptyBeaconBlockBody(): BeaconBlockBody =
# TODO: This shouldn't be necessary if OpaqueBlob is the default
BeaconBlockBody(randao_reveal: ValidatorSig(kind: OpaqueBlob))
template emptyBeaconBlockBody(): BeaconBlockBody =
BeaconBlockBody()
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.0/specs/phase0/beacon-chain.md#genesis-block
func get_initial_beacon_block*(state: BeaconState): SignedBeaconBlock =
@ -398,21 +407,22 @@ proc process_registry_updates*(state: var BeaconState,
# the current epoch, 1 + MAX_SEED_LOOKAHEAD epochs ahead. Thus caches
# remain valid for this epoch through though this function along with
# the rest of the epoch transition.
for index, validator in state.validators:
if is_eligible_for_activation_queue(validator):
for index in 0..<state.validators.len():
if is_eligible_for_activation_queue(state.validators.asSeq()[index]):
state.validators[index].activation_eligibility_epoch =
get_current_epoch(state) + 1
if is_active_validator(validator, get_current_epoch(state)) and
validator.effective_balance <= EJECTION_BALANCE:
if is_active_validator(state.validators.asSeq()[index], get_current_epoch(state)) and
state.validators.asSeq()[index].effective_balance <= EJECTION_BALANCE:
initiate_validator_exit(state, index.ValidatorIndex, cache)
## Queue validators eligible for activation and not dequeued for activation
var activation_queue : seq[tuple[a: Epoch, b: int]] = @[]
for index, validator in state.validators:
if is_eligible_for_activation(state, validator):
for index in 0..<state.validators.len():
let validator = unsafeAddr state.validators.asSeq()[index]
if is_eligible_for_activation(state, validator[]):
activation_queue.add (
state.validators[index].activation_eligibility_epoch, index)
validator[].activation_eligibility_epoch, index)
activation_queue.sort(system.cmp)
@ -424,8 +434,7 @@ proc process_registry_updates*(state: var BeaconState,
break
let
(_, index) = epoch_and_index
validator = addr state.validators[index]
validator.activation_epoch =
state.validators[index].activation_epoch =
compute_activation_exit_epoch(get_current_epoch(state))
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.0/specs/phase0/beacon-chain.md#is_valid_indexed_attestation
@ -639,29 +648,20 @@ proc process_attestation*(
? check_attestation(state, attestation, flags, cache)
let
attestation_slot = attestation.data.slot
pending_attestation = PendingAttestation(
data: attestation.data,
aggregation_bits: attestation.aggregation_bits,
inclusion_delay: state.slot - attestation_slot,
proposer_index: proposer_index.get.uint64,
)
template addPendingAttestation(attestations: typed) =
# The genericSeqAssign generated by the compiler to copy the attestation
# data sadly is a processing hotspot - the business with the addDefault
# pointer is here simply to work around the poor codegen
var pa = attestations.addDefault()
assign(pa[].aggregation_bits, attestation.aggregation_bits)
pa[].data = attestation.data
pa[].inclusion_delay = state.slot - attestation.data.slot
pa[].proposer_index = proposer_index.get().uint64
if attestation.data.target.epoch == get_current_epoch(state):
trace "current_epoch_attestations.add",
attestation = shortLog(attestation),
pending_attestation = shortLog(pending_attestation),
indices = get_attesting_indices(
state, attestation.data, attestation.aggregation_bits, cache).len
state.current_epoch_attestations.add(pending_attestation)
addPendingAttestation(state.current_epoch_attestations)
else:
trace "previous_epoch_attestations.add",
attestation = shortLog(attestation),
pending_attestation = shortLog(pending_attestation),
indices = get_attesting_indices(
state, attestation.data, attestation.aggregation_bits, cache).len
state.previous_epoch_attestations.add(pending_attestation)
addPendingAttestation(state.previous_epoch_attestations)
ok()

View File

@ -46,31 +46,23 @@ const
# RawPrivKeySize* = 48 for Miracl / 32 for BLST
type
BlsValueType* = enum
Real
OpaqueBlob
# BLS deserialization is a bit slow, so we deserialize public keys and
# signatures lazily - this helps operations like comparisons and hashes to
# be fast (which is important), makes loading blocks and states fast, and
# allows invalid values in the SSZ byte stream, which is valid from an SSZ
# point of view - the invalid values are later processed to
ValidatorPubKey* = object
blob*: array[RawPubKeySize, byte]
BlsValue*[N: static int, T: blscurve.PublicKey or blscurve.Signature] = object
# Invalid BLS values may appear in SSZ blobs, their validity being checked
# in consensus rather than SSZ decoding, thus we use a variant to hold either
case kind*: BlsValueType
of Real:
blsValue*: T
of OpaqueBlob:
blob*: array[N, byte]
ValidatorPubKey* = BlsValue[RawPubKeySize, blscurve.PublicKey]
ValidatorSig* = object
blob*: array[RawSigSize, byte]
ValidatorPrivKey* = distinct blscurve.SecretKey
ValidatorSig* = BlsValue[RawSigSize, blscurve.Signature]
BlsCurveType* = ValidatorPrivKey | ValidatorPubKey | ValidatorSig
BlsResult*[T] = Result[T, cstring]
RandomSourceDepleted* = object of CatchableError
TrustedSig* = object
data*: array[RawSigSize, byte]
@ -83,52 +75,57 @@ export AggregateSignature
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.0/specs/phase0/beacon-chain.md#bls-signatures
func toPubKey*(privkey: ValidatorPrivKey): ValidatorPubKey =
## Create a private key from a public key
# Un-specced in either hash-to-curve or Eth2 result.kind = Real
let ok = result.blsValue.publicFromSecret(SecretKey privkey)
## Derive a public key from a private key
# Un-specced in either hash-to-curve or Eth2
var pubKey: blscurve.PublicKey
let ok = publicFromSecret(pubKey, SecretKey privkey)
doAssert ok, "The validator private key was a zero key. This should never happen."
proc toRealPubKey(pubkey: ValidatorPubKey): Option[ValidatorPubKey] =
var validatorKeyCache {.threadvar.}:
Table[array[RawPubKeySize, byte], Option[ValidatorPubKey]]
ValidatorPubKey(blob: pubKey.exportRaw())
case pubkey.kind:
of Real:
return some(pubkey)
of OpaqueBlob:
validatorKeyCache.withValue(pubkey.blob, key) do:
return key[]
proc loadWithCache*(v: ValidatorPubKey): Option[blscurve.PublicKey] =
## Parse public key blob - this may fail - this function uses a cache to
## avoid the expensive deserialization - for now, external public keys only
## come from deposits in blocks - when more sources are added, the memory
## usage of the cache should be considered
var cache {.threadvar.}: Table[typeof(v.blob), blscurve.PublicKey]
# Try to get parse value from cache - if it's not in there, try to parse it -
# if that's not possible, it's broken
cache.withValue(v.blob, key) do:
return some key[]
do:
# Only valid keys are cached
var val: blscurve.PublicKey
let maybeRealKey =
if fromBytes(val, pubkey.blob):
some ValidatorPubKey(kind: Real, blsValue: val)
return
if fromBytes(val, v.blob):
some cache.mGetOrPut(v.blob, val)
else:
none ValidatorPubKey
return validatorKeyCache.mGetOrPut(pubkey.blob, maybeRealKey)
none blscurve.PublicKey
proc initPubKey*(pubkey: ValidatorPubKey): ValidatorPubKey =
# Public keys are lazy-initialized, so this needs to be called before any
# other function using the public key is tried
let key = toRealPubKey(pubkey)
if key.isNone:
return ValidatorPubKey()
key.get
proc load*(v: ValidatorSig): Option[blscurve.Signature] =
## Parse signature blob - this may fail
var parsed: blscurve.Signature
if fromBytes(parsed, v.blob):
some(parsed)
else:
none(blscurve.Signature)
func init*(agg: var AggregateSignature, sig: ValidatorSig) {.inline.}=
## Initializes an aggregate signature context
## This assumes that the signature is valid
agg.init(sig.blsValue)
agg.init(sig.load().get())
func aggregate*(agg: var AggregateSignature, sig: ValidatorSig) {.inline.}=
## Aggregate two Validator Signatures
## This assumes that they are real signatures
agg.aggregate(sig.blsValue)
## Both signatures must be valid
agg.aggregate(sig.load.get())
func finish*(agg: AggregateSignature): ValidatorSig {.inline.}=
## Canonicalize an AggregateSignature into a signature
result.kind = Real
result.blsValue.finish(agg)
var sig: blscurve.Signature
sig.finish(agg)
ValidatorSig(blob: sig.exportRaw())
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.0/specs/phase0/beacon-chain.md#bls-signatures
proc blsVerify*(
@ -141,19 +138,25 @@ proc blsVerify*(
## The proof-of-possession MUST be verified before calling this function.
## It is recommended to use the overload that accepts a proof-of-possession
## to enforce correct usage.
if signature.kind != Real:
# Invalid signatures are possible in deposits (discussed with Danny)
return false
let realkey = toRealPubKey(pubkey)
if realkey.isNone:
# TODO: chronicles warning
return false
let
parsedSig = signature.load()
realkey.get.blsValue.verify(message, signature.blsValue)
if parsedSig.isNone():
false
else:
let
parsedKey = pubkey.loadWithCache()
# It may happen that signatures or keys fail to parse as invalid blobs may
# be passed around - for example, the deposit contract doesn't verify
# signatures, so the loading happens lazily at verification time instead!
parsedKey.isSome() and
parsedKey.get.verify(message, parsedSig.get())
func blsSign*(privkey: ValidatorPrivKey, message: openArray[byte]): ValidatorSig =
## Computes a signature from a secret key and a message
ValidatorSig(kind: Real, blsValue: SecretKey(privkey).sign(message))
let sig = SecretKey(privkey).sign(message)
ValidatorSig(blob: sig.exportRaw())
proc blsFastAggregateVerify*(
publicKeys: openArray[ValidatorPubKey],
@ -178,16 +181,17 @@ proc blsFastAggregateVerify*(
# in blscurve which already exists internally
# - or at network/databases/serialization boundaries we do not
# allow invalid BLS objects to pollute consensus routines
if signature.kind != Real:
let parsedSig = signature.load()
if not parsedSig.isSome():
return false
var unwrapped: seq[PublicKey]
for pubkey in publicKeys:
let realkey = toRealPubKey(pubkey)
let realkey = pubkey.loadWithCache()
if realkey.isNone:
return false
unwrapped.add realkey.get.blsValue
unwrapped.add realkey.get
fastAggregateVerify(unwrapped, message, signature.blsValue)
fastAggregateVerify(unwrapped, message, parsedSig.get())
proc toGaugeValue*(hash: Eth2Digest): int64 =
# Only the last 8 bytes are taken into consideration in accordance
@ -201,12 +205,8 @@ proc toGaugeValue*(hash: Eth2Digest): int64 =
func `$`*(x: ValidatorPrivKey): string =
"<private key>"
func `$`*(x: BlsValue): string =
# The prefix must be short
# due to the mechanics of the `shortLog` function.
case x.kind
of Real: x.blsValue.toHex()
of OpaqueBlob: "r:" & x.blob.toHex()
func `$`*(x: ValidatorPubKey | ValidatorSig): string =
x.blob.toHex()
func toRaw*(x: ValidatorPrivKey): array[32, byte] =
# TODO: distinct type - see https://github.com/status-im/nim-blscurve/pull/67
@ -217,13 +217,10 @@ func toRaw*(x: ValidatorPrivKey): array[32, byte] =
let raw = SecretKey(x).exportRaw()
result[0..32-1] = raw.toOpenArray(48-32, 48-1)
func toRaw*(x: BlsValue): auto =
if x.kind == Real:
x.blsValue.exportRaw()
else:
template toRaw*(x: ValidatorPubKey | ValidatorSig): auto =
x.blob
func toRaw*(x: TrustedSig): auto =
template toRaw*(x: TrustedSig): auto =
x.data
func toHex*(x: BlsCurveType): string =
@ -236,18 +233,12 @@ func fromRaw*(T: type ValidatorPrivKey, bytes: openArray[byte]): BlsResult[T] =
else:
err "bls: invalid private key"
func fromRaw*[N, T](BT: type BlsValue[N, T], bytes: openArray[byte]): BlsResult[BT] =
# This is a workaround, so that we can deserialize the serialization of a
# default-initialized BlsValue without raising an exception
when defined(ssz_testing) or BT is ValidatorPubKey:
ok BT(kind: OpaqueBlob, blob: toArray(N, bytes))
func fromRaw*(BT: type[ValidatorPubKey | ValidatorSig], bytes: openArray[byte]): BlsResult[BT] =
# Signatures and keys are deserialized lazily
if bytes.len() != sizeof(BT):
err "bls: invalid bls length"
else:
# Try if valid BLS value
var val: T
if fromBytes(val, bytes):
ok BT(kind: Real, blsValue: val)
else:
ok BT(kind: OpaqueBlob, blob: toArray(N, bytes))
ok BT(blob: toArray(sizeof(BT), bytes))
func fromHex*(T: type BlsCurveType, hexStr: string): BlsResult[T] {.inline.} =
## Initialize a BLSValue from its hex representation
@ -256,27 +247,17 @@ func fromHex*(T: type BlsCurveType, hexStr: string): BlsResult[T] {.inline.} =
except ValueError:
err "bls: cannot parse value"
func `==`*(a, b: BlsValue): bool =
# The assumption here is that converting to raw is mostly fast!
case a.kind
of Real:
if a.kind == b.kind:
a.blsValue == b.blsValue
else:
a.toRaw() == b.blob
of OpaqueBlob:
if a.kind == b.kind:
a.blob == b.blob
else:
a.blob == b.toRaw()
func `==`*(a, b: ValidatorPubKey | ValidatorSig): bool =
equalMem(unsafeAddr a.blob[0], unsafeAddr b.blob[0], sizeof(a.blob))
# Hashing
# ----------------------------------------------------------------------
template hash*(x: BlsCurveType): Hash =
# TODO: prevent using secret keys
bind toRaw
hash(toRaw(x))
template hash*(x: ValidatorPubKey | ValidatorSig): Hash =
static: doAssert sizeof(Hash) <= x.blob.len div 2
# We use rough "middle" of blob for the hash, assuming this is where most of
# the entropy is found
cast[ptr Hash](unsafeAddr x.blob[x.blob.len div 2])[]
# Serialization
# ----------------------------------------------------------------------
@ -323,7 +304,7 @@ proc readValue*(reader: var JsonReader, value: var ValidatorPrivKey)
# TODO: Can we provide better diagnostic?
raiseUnexpectedValue(reader, "Valid hex-encoded private key expected")
template fromSszBytes*(T: type BlsValue, bytes: openArray[byte]): auto =
template fromSszBytes*(T: type[ValidatorPubKey | ValidatorSig], bytes: openArray[byte]): auto =
let v = fromRaw(T, bytes)
if v.isErr:
raise newException(MalformedSszError, $v.error)
@ -332,15 +313,10 @@ template fromSszBytes*(T: type BlsValue, bytes: openArray[byte]): auto =
# Logging
# ----------------------------------------------------------------------
func shortLog*(x: BlsValue): string =
func shortLog*(x: ValidatorPubKey | ValidatorSig): string =
## Logging for wrapped BLS types
## that may contain valid or non-validated data
# The prefix must be short
# due to the mechanics of the `shortLog` function.
if x.kind == Real:
byteutils.toHex(x.blsValue.exportRaw().toOpenArray(0, 3))
else:
"r:" & byteutils.toHex(x.blob.toOpenArray(0, 3))
byteutils.toHex(x.blob.toOpenArray(0, 3))
func shortLog*(x: ValidatorPrivKey): string =
## Logging for raw unwrapped BLS types

View File

@ -538,9 +538,6 @@ type
broadcastStartEpoch*: Epoch
probeEpoch*: Epoch
func shortValidatorKey*(state: BeaconState, validatorIdx: int): string =
($state.validators[validatorIdx].pubkey)[0..7]
func getDepositMessage*(depositData: DepositData): DepositMessage =
result.pubkey = depositData.pubkey
result.amount = depositData.amount

View File

@ -99,10 +99,12 @@ template withEth2Hash*(body: untyped): Eth2Digest =
body
finish(h)
func hash*(x: Eth2Digest): Hash =
template hash*(x: Eth2Digest): Hash =
## Hash for digests for Nim hash tables
# Stub for BeaconChainDB
# digests are already good hashes
cast[ptr Hash](unsafeAddr x.data[0])[]
# We just slice the first 4 or 8 bytes of the block hash
# depending of if we are on a 32 or 64-bit platform
result = cast[ptr Hash](unsafeAddr x)[]
func `==`*(a, b: Eth2Digest): bool =
# nimcrypto uses a constant-time comparison for all MDigest types which for
# Eth2Digest is unnecessary - the type should never hold a secret!
equalMem(unsafeAddr a.data[0], unsafeAddr b.data[0], sizeof(a.data))

View File

@ -60,13 +60,14 @@ func is_active_validator*(validator: Validator, epoch: Epoch): bool =
func get_active_validator_indices*(state: BeaconState, epoch: Epoch):
seq[ValidatorIndex] =
## Return the sequence of active validator indices at ``epoch``.
for idx, val in state.validators:
if is_active_validator(val, epoch):
result = newSeqOfCap[ValidatorIndex](state.validators.len)
for idx in 0..<state.validators.len:
if is_active_validator(state.validators[idx], epoch):
result.add idx.ValidatorIndex
func get_active_validator_indices_len*(state: BeaconState, epoch: Epoch): uint64 =
for idx, val in state.validators:
if is_active_validator(val, epoch):
for idx in 0..<state.validators.len:
if is_active_validator(state.validators[idx], epoch):
inc result
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.0/specs/phase0/beacon-chain.md#get_current_epoch

View File

@ -16,6 +16,27 @@
# * When updating the code, add TODO sections to mark where there are clear
# improvements to be made - other than that, keep things similar to spec unless
# motivated by security or performance considerations
#
# Performance notes:
# * The state transition is used in two contexts: to verify that incoming blocks
# are correct and to replay existing blocks from database. Incoming blocks
# are processed one-by-one while replay happens multiple blocks at a time.
# * Although signature verification is the slowest operation in the state
# state transition, we skip it during replay - this is also when we repeatedly
# call the state transition, making the non-signature part of the code
# important from a performance point of view.
# * It's important to start with a prefilled cache - generating the shuffled
# list of active validators is generally very slow.
# * Throughout, the code is affected by inefficient for loop codegen, meaning
# that we have to iterate over indices and pick out the value manually:
# https://github.com/nim-lang/Nim/issues/14421
# * Throughout, we're affected by inefficient `let` borrowing, meaning we
# often have to take the address of a sequence item due to the above - look
# for `let ... = unsafeAddr sequence[idx]`
# * Throughout, we're affected by the overloading rules that prefer a `var`
# overload to a non-var overload - look for `asSeq()` - when the `var`
# overload is used, the hash tree cache is cleared, which, aside from being
# slow itself, causes additional processing to recalculate the merkle tree.
{.push raises: [Defect].}

View File

@ -52,7 +52,7 @@ func process_block_header*(
return err("process_block_header: previous block root mismatch")
# Verify proposer is not slashed
if state.validators[proposer_index.get].slashed:
if state.validators.asSeq()[blck.proposer_index].slashed:
return err("process_block_header: proposer slashed")
# Cache current block as the new latest block
@ -86,7 +86,7 @@ proc process_randao(
epoch = state.get_current_epoch()
if skipBLSValidation notin flags:
let proposer_pubkey = state.validators[proposer_index.get].pubkey
let proposer_pubkey = state.validators.asSeq()[proposer_index.get].pubkey
if not verify_epoch_signature(
state.fork, state.genesis_validators_root, epoch, proposer_pubkey,
@ -149,8 +149,8 @@ proc check_proposer_slashing*(
return err("check_proposer_slashing: headers not different")
# Verify the proposer is slashable
let proposer = state.validators[header_1.proposer_index]
if not is_slashable_validator(proposer, get_current_epoch(state)):
let proposer = unsafeAddr state.validators.asSeq()[header_1.proposer_index]
if not is_slashable_validator(proposer[], get_current_epoch(state)):
return err("check_proposer_slashing: slashed proposer")
# Verify signatures
@ -159,7 +159,7 @@ proc check_proposer_slashing*(
proposer_slashing.signed_header_2]:
if not verify_block_signature(
state.fork, state.genesis_validators_root, signed_header.message.slot,
signed_header.message, proposer.pubkey, signed_header.signature):
signed_header.message, proposer[].pubkey, signed_header.signature):
return err("check_proposer_slashing: invalid signature")
ok()
@ -214,7 +214,7 @@ proc check_attester_slashing*(
toHashSet(attestation_1.attesting_indices.asSeq),
toHashSet(attestation_2.attesting_indices.asSeq)).items), system.cmp):
if is_slashable_validator(
state.validators[index], get_current_epoch(state)):
state.validators.asSeq()[index], get_current_epoch(state)):
slashed_indices.add index.ValidatorIndex
if slashed_indices.len == 0:
return err("Attester slashing: Trying to slash participant(s) twice")
@ -241,7 +241,7 @@ proc process_attester_slashing*(
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.0/specs/phase0/beacon-chain.md#voluntary-exits
proc check_voluntary_exit*(
state: var BeaconState,
state: BeaconState,
signed_voluntary_exit: SignedVoluntaryExit,
flags: UpdateFlags): Result[void, cstring] {.nbench.} =
@ -251,14 +251,14 @@ proc check_voluntary_exit*(
if voluntary_exit.validator_index >= state.validators.lenu64:
return err("Exit: invalid validator index")
let validator = state.validators[voluntary_exit.validator_index]
let validator = unsafeAddr state.validators.asSeq()[voluntary_exit.validator_index]
# Verify the validator is active
if not is_active_validator(validator, get_current_epoch(state)):
if not is_active_validator(validator[], get_current_epoch(state)):
return err("Exit: validator not active")
# Verify exit has not been initiated
if validator.exit_epoch != FAR_FUTURE_EPOCH:
if validator[].exit_epoch != FAR_FUTURE_EPOCH:
return err("Exit: validator has exited")
# Exits must specify an epoch when they become valid; they are not valid
@ -267,7 +267,7 @@ proc check_voluntary_exit*(
return err("Exit: exit epoch not passed")
# Verify the validator has been active long enough
if not (get_current_epoch(state) >= validator.activation_epoch +
if not (get_current_epoch(state) >= validator[].activation_epoch +
SHARD_COMMITTEE_PERIOD):
return err("Exit: not in validator set long enough")
@ -275,7 +275,7 @@ proc check_voluntary_exit*(
if skipBlsValidation notin flags:
if not verify_voluntary_exit_signature(
state.fork, state.genesis_validators_root, voluntary_exit,
validator.pubkey, signed_voluntary_exit.signature):
validator[].pubkey, signed_voluntary_exit.signature):
return err("Exit: invalid signature")
# Initiate exit
@ -284,10 +284,10 @@ proc check_voluntary_exit*(
num_validators = state.validators.len,
epoch = voluntary_exit.epoch,
current_epoch = get_current_epoch(state),
validator_slashed = validator.slashed,
validator_withdrawable_epoch = validator.withdrawable_epoch,
validator_exit_epoch = validator.exit_epoch,
validator_effective_balance = validator.effective_balance
validator_slashed = validator[].slashed,
validator_withdrawable_epoch = validator[].withdrawable_epoch,
validator_exit_epoch = validator[].exit_epoch,
validator_effective_balance = validator[].effective_balance
ok()

View File

@ -131,19 +131,20 @@ template previous_epoch_head_attesters*(v: TotalBalances): Gwei =
func init*(T: type ValidatorStatuses, state: BeaconState): T =
result.statuses = newSeq[ValidatorStatus](state.validators.len)
for i, v in state.validators:
result.statuses[i].is_slashed = v.slashed
for i in 0..<state.validators.len:
let v = unsafeAddr state.validators[i]
result.statuses[i].is_slashed = v[].slashed
result.statuses[i].is_withdrawable_in_current_epoch =
state.get_current_epoch() >= v.withdrawable_epoch
result.statuses[i].current_epoch_effective_balance = v.effective_balance
state.get_current_epoch() >= v[].withdrawable_epoch
result.statuses[i].current_epoch_effective_balance = v[].effective_balance
if v.is_active_validator(state.get_current_epoch()):
if v[].is_active_validator(state.get_current_epoch()):
result.statuses[i].is_active_in_current_epoch = true
result.total_balances.current_epoch_raw += v.effective_balance
result.total_balances.current_epoch_raw += v[].effective_balance
if v.is_active_validator(state.get_previous_epoch()):
if v[].is_active_validator(state.get_previous_epoch()):
result.statuses[i].is_active_in_previous_epoch = true
result.total_balances.previous_epoch_raw += v.effective_balance
result.total_balances.previous_epoch_raw += v[].effective_balance
func add(a: var Delta, b: Delta) =
a.rewards += b.rewards
@ -515,9 +516,14 @@ func process_rewards_and_penalties(
get_attestation_deltas(state, validator_statuses)
# Here almost all balances are updated (assuming most validators are active) -
# clearing the cache becomes a bottleneck if done item by item because of the
# recursive nature of cache clearing - instead, we clear the whole cache then
# update the raw list directly
state.balances.clearCache()
for idx, v in validator_statuses.statuses:
increase_balance(state, idx.ValidatorIndex, v.delta.rewards)
decrease_balance(state, idx.ValidatorIndex, v.delta.penalties)
increase_balance(state.balances.asSeq()[idx], v.delta.rewards)
decrease_balance(state.balances.asSeq()[idx], v.delta.penalties)
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.0/specs/phase0/beacon-chain.md#slashings
func process_slashings*(state: var BeaconState, total_balance: Gwei) {.nbench.}=
@ -526,13 +532,14 @@ func process_slashings*(state: var BeaconState, total_balance: Gwei) {.nbench.}=
adjusted_total_slashing_balance =
min(sum(state.slashings) * PROPORTIONAL_SLASHING_MULTIPLIER, total_balance)
for index, validator in state.validators:
if validator.slashed and epoch + EPOCHS_PER_SLASHINGS_VECTOR div 2 ==
validator.withdrawable_epoch:
for index in 0..<state.validators.len:
let validator = unsafeAddr state.validators.asSeq()[index]
if validator[].slashed and epoch + EPOCHS_PER_SLASHINGS_VECTOR div 2 ==
validator[].withdrawable_epoch:
let increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from penalty
# numerator to avoid uint64 overflow
let penalty_numerator =
validator.effective_balance div increment *
validator[].effective_balance div increment *
adjusted_total_slashing_balance
let penalty = penalty_numerator div total_balance * increment
decrease_balance(state, index.ValidatorIndex, penalty)
@ -548,16 +555,17 @@ func process_final_updates*(state: var BeaconState) {.nbench.}=
state.eth1_data_votes = default(type state.eth1_data_votes)
# Update effective balances with hysteresis
for index, validator in state.validators:
let balance = state.balances[index]
for index in 0..<state.validators.len:
let balance = state.balances.asSeq()[index]
const
HYSTERESIS_INCREMENT =
EFFECTIVE_BALANCE_INCREMENT div HYSTERESIS_QUOTIENT
DOWNWARD_THRESHOLD =
HYSTERESIS_INCREMENT * HYSTERESIS_DOWNWARD_MULTIPLIER
UPWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_UPWARD_MULTIPLIER
if balance + DOWNWARD_THRESHOLD < validator.effective_balance or
validator.effective_balance + UPWARD_THRESHOLD < balance:
let effective_balance = state.validators.asSeq()[index].effective_balance
if balance + DOWNWARD_THRESHOLD < effective_balance or
effective_balance + UPWARD_THRESHOLD < balance:
state.validators[index].effective_balance =
min(
balance - balance mod EFFECTIVE_BALANCE_INCREMENT,
@ -579,9 +587,10 @@ func process_final_updates*(state: var BeaconState) {.nbench.}=
state.historical_roots.add hash_tree_root(
[hash_tree_root(state.block_roots), hash_tree_root(state.state_roots)])
# Rotate current/previous epoch attestations
state.previous_epoch_attestations = state.current_epoch_attestations
state.current_epoch_attestations = default(type state.current_epoch_attestations)
# Rotate current/previous epoch attestations - using swap avoids copying all
# elements using a slow genericSeqAssign
state.previous_epoch_attestations.clear()
swap(state.previous_epoch_attestations, state.current_epoch_attestations)
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.0/specs/phase0/beacon-chain.md#epoch-processing
proc process_epoch*(state: var BeaconState, updateFlags: UpdateFlags,

View File

@ -278,31 +278,28 @@ func compute_shuffled_index(
doAssert index < index_count
var
pivot_buffer: array[(32+1), byte]
source_buffer: array[(32+1+4), byte]
source_buffer {.noinit.}: array[(32+1+4), byte]
cur_idx_permuted = index
pivot_buffer[0..31] = seed.data
source_buffer[0..31] = seed.data
# Swap or not (https://link.springer.com/content/pdf/10.1007%2F978-3-642-32009-5_1.pdf)
# See the 'generalized domain' algorithm on page 3
for current_round in 0'u8 ..< SHUFFLE_ROUND_COUNT.uint8:
pivot_buffer[32] = current_round
source_buffer[32] = current_round
let
# If using multiple indices, can amortize this
pivot =
bytes_to_uint64(eth2digest(pivot_buffer).data.toOpenArray(0, 7)) mod
bytes_to_uint64(eth2digest(source_buffer.toOpenArray(0, 32)).data.toOpenArray(0, 7)) mod
index_count
flip = ((index_count + pivot) - cur_idx_permuted) mod index_count
position = max(cur_idx_permuted.int, flip.int)
source_buffer[33..36] = uint_to_bytes4((position div 256).uint64)
position = max(cur_idx_permuted, flip)
source_buffer[33..36] = uint_to_bytes4((position shr 8))
let
source = eth2digest(source_buffer).data
byte_value = source[(position mod 256) div 8]
byte_value = source[(position mod 256) shr 3]
bit = (byte_value shr (position mod 8)) mod 2
cur_idx_permuted = if bit != 0: flip else: cur_idx_permuted

View File

@ -213,6 +213,11 @@ proc clearCaches*(a: var HashList, dataIdx: int64) =
clearCache(a.hashes[0])
proc clearCache*(a: var HashList) =
# Clear the full merkle tree, in anticipation of a complete rewrite of the
# contents
for c in a.hashes.mitems(): clearCache(c)
proc growHashes*(a: var HashList) =
# Ensure that the hash cache is big enough for the data in the list
let
@ -246,12 +251,19 @@ template add*(x: var HashList, val: auto) =
x.growHashes()
clearCaches(x, x.data.len() - 1)
proc addDefault*(x: var HashList): ptr x.T =
distinctBase(x.data).setLen(x.data.len + 1)
x.growHashes()
clearCaches(x, x.data.len() - 1)
addr x.data[^1]
template len*(x: HashList|HashArray): auto = len(x.data)
template low*(x: HashList|HashArray): auto = low(x.data)
template high*(x: HashList|HashArray): auto = high(x.data)
template `[]`*(x: HashList|HashArray, idx: auto): auto = x.data[idx]
proc `[]`*(a: var HashArray, b: auto): var a.T =
# Access item and clear cache - use asSeq when only reading!
clearCaches(a, b.Limit)
a.data[b]
@ -260,6 +272,7 @@ proc `[]=`*(a: var HashArray, b: auto, c: auto) =
a.data[b] = c
proc `[]`*(x: var HashList, idx: auto): var x.T =
# Access item and clear cache - use asSeq when only reading!
clearCaches(x, idx.int64)
x.data[idx]
@ -274,6 +287,16 @@ template `$`*(x: HashList): auto = $(x.data)
template items* (x: HashList|HashArray): untyped = items(x.data)
template pairs* (x: HashList|HashArray): untyped = pairs(x.data)
template swap*(a, b: var HashList) =
swap(a.data, b.data)
swap(a.hashes, b.hashes)
swap(a.indices, b.indices)
template clear*(a: var HashList) =
a.data.setLen(0)
a.hashes.setLen(0)
a.indices = default(type a.indices)
template fill*(a: var HashArray, c: auto) =
mixin fill
fill(a.data, c)

View File

@ -77,7 +77,7 @@ proc addRemoteValidators*(node: BeaconNode) =
while line != "end" and running(node.vcProcess):
if node.vcProcess.outputStream.readLine(line) and line != "end":
let
key = ValidatorPubKey.fromHex(line).get().initPubKey()
key = ValidatorPubKey.fromHex(line).get()
index = findValidator(node.chainDag.headState.data.data, key)
let v = AttachedValidator(pubKey: key,
@ -473,7 +473,7 @@ proc handleProposal(node: BeaconNode, head: BlockRef, slot: Slot):
headRoot = shortLog(head.root),
slot = shortLog(slot),
proposer_index = proposer.get()[0],
proposer = shortLog(proposer.get()[1].initPubKey())
proposer = shortLog(proposer.get()[1])
return head

View File

@ -44,7 +44,7 @@ proc addRemoteValidator*(pool: var ValidatorPool,
proc getValidator*(pool: ValidatorPool,
validatorKey: ValidatorPubKey): AttachedValidator =
pool.validators.getOrDefault(validatorKey.initPubKey)
pool.validators.getOrDefault(validatorKey)
proc signWithRemoteValidator(v: AttachedValidator, data: Eth2Digest):
Future[ValidatorSig] {.async.} =

View File

@ -13,8 +13,9 @@ type Timers = enum
tInit = "Initialize DB"
tLoadBlock = "Load block from database"
tLoadState = "Load state from database"
tApplyBlock = "Apply block"
tApplyEpochBlock = "Apply epoch block"
tAdvanceSlot = "Advance slot, non-epoch"
tAdvanceEpoch = "Advance slot, epoch"
tApplyBlock = "Apply block, no slot processing"
tDbStore = "Database block store"
type
@ -49,7 +50,12 @@ type
storeBlocks* {.
defaultValue: false
desc: "Store each read block back into a separate database".}: bool
printTimes* {.
defaultValue: true
desc: "Print csv of block processing time".}: bool
resetCache* {.
defaultValue: false
desc: "Process each block with a fresh cache".}: bool
of dumpState:
stateRoot* {.
argument
@ -122,16 +128,26 @@ proc cmdBench(conf: DbConf, runtimePreset: RuntimePreset) =
withTimer(timers[tLoadState]):
discard db.getState(state[].root, state[].data, noRollback)
for b in blocks:
let
isEpoch = state[].data.get_current_epoch() !=
b.message.slot.compute_epoch_at_slot
withTimer(timers[if isEpoch: tApplyEpochBlock else: tApplyBlock]):
var cache = StateCache()
if not state_transition(runtimePreset, state[], b, cache, {}, noRollback):
for b in blocks.mitems():
while state[].data.slot < b.message.slot:
let isEpoch = state[].data.slot.epoch() != (state[].data.slot + 1).epoch
withTimer(timers[if isEpoch: tAdvanceEpoch else: tAdvanceSlot]):
let ok = process_slots(state[], state[].data.slot + 1, cache, {})
doAssert ok, "Slot processing can't fail with correct inputs"
var start = Moment.now()
withTimer(timers[tApplyBlock]):
if conf.resetCache:
cache = StateCache()
if not state_transition(
runtimePreset, state[], b, cache, {slotProcessed}, noRollback):
dump("./", b)
echo "State transition failed (!)"
quit 1
if conf.printTimes:
echo b.message.slot, ",", toHex(b.root.data), ",", nanoseconds(Moment.now() - start)
if conf.storeBlocks:
withTimer(timers[tDbStore]):
dbBenchmark.putBlock(b)

View File

@ -27,7 +27,7 @@ proc newKeyPair(rng: var BrHmacDrbgContext): BlsResult[tuple[pub: ValidatorPubKe
sk: SecretKey
pk: blscurve.PublicKey
if keyGen(ikm, pk, sk):
ok((ValidatorPubKey(kind: Real, blsValue: pk), ValidatorPrivKey(sk)))
ok((ValidatorPubKey(blob: pk.exportRaw()), ValidatorPrivKey(sk)))
else:
err "bls: cannot generate keypair"

View File

@ -36,7 +36,7 @@ func fakeRoot(index: SomeInteger): Eth2Digest =
func fakeValidator(index: SomeInteger): ValidatorPubKey =
## Create fake validator public key
result = ValidatorPubKey(kind: OpaqueBlob)
result = ValidatorPubKey()
result.blob[0 ..< 8] = (1'u64 shl 48 + index.uint64).toBytesBE()
func hexToDigest(hex: string): Eth2Digest =

View File

@ -35,7 +35,7 @@ func fakeRoot(index: SomeInteger): Eth2Digest =
func fakeValidator(index: SomeInteger): ValidatorPubKey =
## Create fake validator public key
result = ValidatorPubKey(kind: OpaqueBlob)
result = ValidatorPubKey()
result.blob[0 ..< 8] = (1'u64 shl 48 + index.uint64).toBytesBE()
suiteReport "Slashing Protection DB" & preset():

View File

@ -38,9 +38,7 @@ suiteReport "Zero signature sanity checks":
timedTest "SSZ serialization roundtrip of SignedBeaconBlockHeader":
let defaultBlockHeader = SignedBeaconBlockHeader(
signature: ValidatorSig(kind: OpaqueBlob)
)
let defaultBlockHeader = SignedBeaconBlockHeader()
check:
block: