singe validator key cache
Instead of keeping a validator key list per EpochRef, this PR introduces a single shared validator key list in ChainDAG, and cleans up some other ChainDAG and key-related issues. The PR does not introduce the validator key list in the state transition - this is because we batch-check all signatures before entering the spec code, thus the spec code never hits the cache. A future refactor should _probably_ remove the threadvar altogether. There's a few other small fixes in here that make the flow easier to read: * fix `var ChainDAGRef` -> `ChainDAGRef` * fix `var QuarantineRef` -> `QuarantineRef` * consistent `dag` variable name * avoid using threadvar pubkey cache in most cases * better error messages in batch signature checking
This commit is contained in:
parent
201d9d4056
commit
abe0d7b4ae
|
@ -40,7 +40,7 @@ type
|
|||
db*: BeaconChainDB
|
||||
config*: BeaconNodeConf
|
||||
attachedValidators*: ref ValidatorPool
|
||||
chainDag*: ChainDAGRef
|
||||
dag*: ChainDAGRef
|
||||
quarantine*: QuarantineRef
|
||||
attestationPool*: ref AttestationPool
|
||||
exitPool*: ref ExitPool
|
||||
|
|
|
@ -67,7 +67,7 @@ type
|
|||
## Generally, we keep attestations only until a slot has been finalized -
|
||||
## after that, they may no longer affect fork choice.
|
||||
|
||||
chainDag*: ChainDAGRef
|
||||
dag*: ChainDAGRef
|
||||
quarantine*: QuarantineRef
|
||||
|
||||
forkChoice*: ForkChoice
|
||||
|
@ -97,7 +97,7 @@ type
|
|||
prior_seen_voluntary_exit_indices*: IntSet ##\
|
||||
## Records voluntary exit indices seen.
|
||||
|
||||
chainDag*: ChainDAGRef
|
||||
dag*: ChainDAGRef
|
||||
quarantine*: QuarantineRef
|
||||
|
||||
# #############################################
|
||||
|
@ -115,7 +115,7 @@ type
|
|||
pubKeyStr*: string
|
||||
|
||||
AttachedValidator* = ref object
|
||||
pubKey*: ValidatorPubKey
|
||||
pubKey*: CookedPubKey
|
||||
|
||||
case kind*: ValidatorKind
|
||||
of inProcess:
|
||||
|
|
|
@ -27,22 +27,22 @@ logScope: topics = "attpool"
|
|||
declareGauge attestation_pool_block_attestation_packing_time,
|
||||
"Time it took to create list of attestations for block"
|
||||
|
||||
proc init*(T: type AttestationPool, chainDag: ChainDAGRef, quarantine: QuarantineRef): T =
|
||||
## Initialize an AttestationPool from the chainDag `headState`
|
||||
proc init*(T: type AttestationPool, dag: ChainDAGRef, quarantine: QuarantineRef): T =
|
||||
## Initialize an AttestationPool from the dag `headState`
|
||||
## The `finalized_root` works around the finalized_checkpoint of the genesis block
|
||||
## holding a zero_root.
|
||||
let finalizedEpochRef = chainDag.getFinalizedEpochRef()
|
||||
let finalizedEpochRef = dag.getFinalizedEpochRef()
|
||||
|
||||
var forkChoice = ForkChoice.init(
|
||||
finalizedEpochRef,
|
||||
chainDag.finalizedHead.blck)
|
||||
dag.finalizedHead.blck)
|
||||
|
||||
# Feed fork choice with unfinalized history - during startup, block pool only
|
||||
# keeps track of a single history so we just need to follow it
|
||||
doAssert chainDag.heads.len == 1, "Init only supports a single history"
|
||||
doAssert dag.heads.len == 1, "Init only supports a single history"
|
||||
|
||||
var blocks: seq[BlockRef]
|
||||
var cur = chainDag.head
|
||||
var cur = dag.head
|
||||
|
||||
# When the chain is finalizing, the votes between the head block and the
|
||||
# finalized checkpoint should be enough for a stable fork choice - when the
|
||||
|
@ -52,7 +52,7 @@ proc init*(T: type AttestationPool, chainDag: ChainDAGRef, quarantine: Quarantin
|
|||
# it takes to replay that many blocks during startup and thus miss _new_
|
||||
# votes.
|
||||
const ForkChoiceHorizon = 256
|
||||
while cur != chainDag.finalizedHead.blck:
|
||||
while cur != dag.finalizedHead.blck:
|
||||
blocks.add cur
|
||||
cur = cur.parent
|
||||
|
||||
|
@ -75,21 +75,21 @@ proc init*(T: type AttestationPool, chainDag: ChainDAGRef, quarantine: Quarantin
|
|||
epochRef.current_justified_checkpoint.epoch,
|
||||
epochRef.finalized_checkpoint.epoch)
|
||||
else:
|
||||
epochRef = chainDag.getEpochRef(blck, blck.slot.epoch)
|
||||
epochRef = dag.getEpochRef(blck, blck.slot.epoch)
|
||||
forkChoice.process_block(
|
||||
chainDag, epochRef, blck, chainDag.get(blck).data.message, blck.slot)
|
||||
dag, epochRef, blck, dag.get(blck).data.message, blck.slot)
|
||||
|
||||
doAssert status.isOk(), "Error in preloading the fork choice: " & $status.error
|
||||
|
||||
info "Fork choice initialized",
|
||||
justified_epoch = getStateField(
|
||||
chainDag.headState, current_justified_checkpoint).epoch,
|
||||
dag.headState, current_justified_checkpoint).epoch,
|
||||
finalized_epoch = getStateField(
|
||||
chainDag.headState, finalized_checkpoint).epoch,
|
||||
finalized_root = shortlog(chainDag.finalizedHead.blck.root)
|
||||
dag.headState, finalized_checkpoint).epoch,
|
||||
finalized_root = shortlog(dag.finalizedHead.blck.root)
|
||||
|
||||
T(
|
||||
chainDag: chainDag,
|
||||
dag: dag,
|
||||
quarantine: quarantine,
|
||||
forkChoice: forkChoice
|
||||
)
|
||||
|
@ -100,7 +100,7 @@ proc addForkChoiceVotes(
|
|||
wallSlot: Slot) =
|
||||
# Add attestation votes to fork choice
|
||||
if (let v = pool.forkChoice.on_attestation(
|
||||
pool.chainDag, slot, block_root, attesting_indices, wallSlot);
|
||||
pool.dag, slot, block_root, attesting_indices, wallSlot);
|
||||
v.isErr):
|
||||
# This indicates that the fork choice and the chain dag are out of sync -
|
||||
# this is most likely the result of a bug, but we'll try to keep going -
|
||||
|
@ -313,7 +313,7 @@ proc addForkChoice*(pool: var AttestationPool,
|
|||
wallSlot: Slot) =
|
||||
## Add a verified block to the fork choice context
|
||||
let state = pool.forkChoice.process_block(
|
||||
pool.chainDag, epochRef, blckRef, blck, wallSlot)
|
||||
pool.dag, epochRef, blckRef, blck, wallSlot)
|
||||
|
||||
if state.isErr:
|
||||
# This indicates that the fork choice and the chain dag are out of sync -
|
||||
|
@ -592,13 +592,13 @@ proc getAggregatedAttestation*(pool: var AttestationPool,
|
|||
proc selectHead*(pool: var AttestationPool, wallSlot: Slot): BlockRef =
|
||||
## Trigger fork choice and returns the new head block.
|
||||
## Can return `nil`
|
||||
let newHead = pool.forkChoice.get_head(pool.chainDag, wallSlot)
|
||||
let newHead = pool.forkChoice.get_head(pool.dag, wallSlot)
|
||||
|
||||
if newHead.isErr:
|
||||
error "Couldn't select head", err = newHead.error
|
||||
nil
|
||||
else:
|
||||
let ret = pool.chainDag.getRef(newHead.get())
|
||||
let ret = pool.dag.getRef(newHead.get())
|
||||
if ret.isNil:
|
||||
# This should normally not happen, but if the chain dag and fork choice
|
||||
# get out of sync, we'll need to try to download the selected head - in
|
||||
|
|
|
@ -60,7 +60,7 @@ template asTrusted(x: SignedBeaconBlock or SigVerifiedBeaconBlock): TrustedSigne
|
|||
|
||||
cast[ptr TrustedSignedBeaconBlock](signedBlock.unsafeAddr)[]
|
||||
|
||||
func getOrResolve*(dag: ChainDAGRef, quarantine: var QuarantineRef, root: Eth2Digest): BlockRef =
|
||||
func getOrResolve*(dag: ChainDAGRef, quarantine: QuarantineRef, root: Eth2Digest): BlockRef =
|
||||
## Fetch a block ref, or nil if not found (will be added to list of
|
||||
## blocks-to-resolve)
|
||||
result = dag.getRef(root)
|
||||
|
@ -68,7 +68,7 @@ func getOrResolve*(dag: ChainDAGRef, quarantine: var QuarantineRef, root: Eth2Di
|
|||
if result.isNil:
|
||||
quarantine.addMissing(root)
|
||||
|
||||
proc batchVerify(quarantine: var QuarantineRef, sigs: openArray[SignatureSet]): bool =
|
||||
proc batchVerify(quarantine: QuarantineRef, sigs: openArray[SignatureSet]): bool =
|
||||
var secureRandomBytes: array[32, byte]
|
||||
quarantine.rng[].brHmacDrbgGenerate(secureRandomBytes)
|
||||
|
||||
|
@ -76,12 +76,12 @@ proc batchVerify(quarantine: var QuarantineRef, sigs: openArray[SignatureSet]):
|
|||
return batchVerifySerial(quarantine.sigVerifCache, sigs, secureRandomBytes)
|
||||
|
||||
proc addRawBlock*(
|
||||
dag: var ChainDAGRef, quarantine: var QuarantineRef,
|
||||
dag: ChainDAGRef, quarantine: QuarantineRef,
|
||||
signedBlock: SignedBeaconBlock, onBlockAdded: OnBlockAdded
|
||||
): Result[BlockRef, (ValidationResult, BlockError)] {.gcsafe.}
|
||||
|
||||
proc addResolvedBlock(
|
||||
dag: var ChainDAGRef, quarantine: var QuarantineRef,
|
||||
dag: ChainDAGRef, quarantine: QuarantineRef,
|
||||
state: var StateData, trustedBlock: TrustedSignedBeaconBlock,
|
||||
parent: BlockRef, cache: var StateCache,
|
||||
onBlockAdded: OnBlockAdded, stateDataDur, sigVerifyDur,
|
||||
|
@ -120,6 +120,12 @@ proc addResolvedBlock(
|
|||
# been applied but the `blck` field was still set to the parent
|
||||
state.blck = blockRef
|
||||
|
||||
# Regardless of the chain we're on, the deposits come in the same order so
|
||||
# as soon as we import a block, we'll also update the shared public key
|
||||
# cache
|
||||
|
||||
dag.updateValidatorKeys(getStateField(state, validators).asSeq())
|
||||
|
||||
# Getting epochRef with the state will potentially create a new EpochRef
|
||||
let
|
||||
epochRef = dag.getEpochRef(state, cache)
|
||||
|
@ -159,11 +165,10 @@ proc addResolvedBlock(
|
|||
for v in resolved:
|
||||
discard addRawBlock(dag, quarantine, v, onBlockAdded)
|
||||
|
||||
proc addRawBlockCheckStateTransition(
|
||||
dag: ChainDAGRef, quarantine: var QuarantineRef,
|
||||
signedBlock: SomeSignedBeaconBlock, cache: var StateCache
|
||||
): (ValidationResult, BlockError) =
|
||||
## addRawBlock - Ensure block can be applied on a state
|
||||
proc checkStateTransition(
|
||||
dag: ChainDAGRef, signedBlock: SomeSignedBeaconBlock,
|
||||
cache: var StateCache): (ValidationResult, BlockError) =
|
||||
## Ensure block can be applied on a state
|
||||
func restore(v: var HashedBeaconState) =
|
||||
# TODO address this ugly workaround - there should probably be a
|
||||
# `state_transition` that takes a `StateData` instead and updates
|
||||
|
@ -183,7 +188,7 @@ proc addRawBlockCheckStateTransition(
|
|||
return (ValidationResult.Reject, Invalid)
|
||||
return (ValidationResult.Accept, default(BlockError))
|
||||
|
||||
proc advanceClearanceState*(dag: var ChainDagRef) =
|
||||
proc advanceClearanceState*(dag: ChainDagRef) =
|
||||
# When the chain is synced, the most likely block to be produced is the block
|
||||
# right after head - we can exploit this assumption and advance the state
|
||||
# to that slot before the block arrives, thus allowing us to do the expensive
|
||||
|
@ -191,20 +196,21 @@ proc advanceClearanceState*(dag: var ChainDagRef) =
|
|||
# Notably, we use the clearance state here because that's where the block will
|
||||
# first be seen - later, this state will be copied to the head state!
|
||||
if dag.clearanceState.blck.slot == getStateField(dag.clearanceState, slot):
|
||||
let next = dag.clearanceState.blck.atSlot(
|
||||
getStateField(dag.clearanceState, slot) + 1)
|
||||
let next =
|
||||
dag.clearanceState.blck.atSlot(dag.clearanceState.blck.slot + 1)
|
||||
|
||||
debug "Preparing clearance state for next block", next
|
||||
|
||||
var cache = StateCache()
|
||||
updateStateData(dag, dag.clearanceState,next, true, cache)
|
||||
updateStateData(dag, dag.clearanceState, next, true, cache)
|
||||
|
||||
proc addRawBlockKnownParent(
|
||||
dag: var ChainDAGRef, quarantine: var QuarantineRef,
|
||||
dag: ChainDAGRef, quarantine: QuarantineRef,
|
||||
signedBlock: SignedBeaconBlock,
|
||||
parent: BlockRef,
|
||||
onBlockAdded: OnBlockAdded
|
||||
): Result[BlockRef, (ValidationResult, BlockError)] =
|
||||
## addRawBlock - Block has a parent
|
||||
## Add a block whose parent is known, after performing validity checks
|
||||
|
||||
if parent.slot >= signedBlock.message.slot:
|
||||
# A block whose parent is newer than the block itself is clearly invalid -
|
||||
|
@ -239,27 +245,26 @@ proc addRawBlockKnownParent(
|
|||
# blocks we add to the database are clean for the given state
|
||||
let startTick = Moment.now()
|
||||
|
||||
# TODO if the block is from the future, we should not be resolving it (yet),
|
||||
# but maybe we should use it as a hint that our clock is wrong?
|
||||
var cache = StateCache()
|
||||
updateStateData(
|
||||
dag, dag.clearanceState, parent.atSlot(signedBlock.message.slot), true, cache)
|
||||
let stateDataTick = Moment.now()
|
||||
|
||||
# First batch verify crypto
|
||||
# First, batch-verify all signatures in block
|
||||
if skipBLSValidation notin dag.updateFlags:
|
||||
# TODO: remove skipBLSValidation
|
||||
|
||||
var sigs: seq[SignatureSet]
|
||||
if not sigs.collectSignatureSets(signedBlock, dag.clearanceState, cache):
|
||||
if sigs.collectSignatureSets(
|
||||
signedBlock, dag.validatorKeys, dag.clearanceState, cache).isErr():
|
||||
# A PublicKey or Signature isn't on the BLS12-381 curve
|
||||
return err((ValidationResult.Reject, Invalid))
|
||||
if not quarantine.batchVerify(sigs):
|
||||
return err((ValidationResult.Reject, Invalid))
|
||||
|
||||
let sigVerifyTick = Moment.now()
|
||||
let (valRes, blockErr) = addRawBlockCheckStateTransition(
|
||||
dag, quarantine, signedBlock.asSigVerified(), cache)
|
||||
let (valRes, blockErr) = checkStateTransition(
|
||||
dag, signedBlock.asSigVerified(), cache)
|
||||
if valRes != ValidationResult.Accept:
|
||||
return err((valRes, blockErr))
|
||||
|
||||
|
@ -278,8 +283,8 @@ proc addRawBlockKnownParent(
|
|||
return ok dag.clearanceState.blck
|
||||
|
||||
proc addRawBlockUnresolved(
|
||||
dag: var ChainDAGRef,
|
||||
quarantine: var QuarantineRef,
|
||||
dag: ChainDAGRef,
|
||||
quarantine: QuarantineRef,
|
||||
signedBlock: SignedBeaconBlock
|
||||
): Result[BlockRef, (ValidationResult, BlockError)] =
|
||||
## addRawBlock - Block is unresolved / has no parent
|
||||
|
@ -317,7 +322,7 @@ proc addRawBlockUnresolved(
|
|||
return err((ValidationResult.Ignore, MissingParent))
|
||||
|
||||
proc addRawBlock*(
|
||||
dag: var ChainDAGRef, quarantine: var QuarantineRef,
|
||||
dag: ChainDAGRef, quarantine: QuarantineRef,
|
||||
signedBlock: SignedBeaconBlock,
|
||||
onBlockAdded: OnBlockAdded
|
||||
): Result[BlockRef, (ValidationResult, BlockError)] =
|
||||
|
|
|
@ -159,19 +159,21 @@ type
|
|||
## block - we limit the number of held EpochRefs to put a cap on
|
||||
## memory usage
|
||||
|
||||
validatorKeys*: seq[CookedPubKey] ##\
|
||||
## The deposit scheme in eth2 guarantees that validators are added in the
|
||||
## same order regardless of which fork the chain takes - there may be more
|
||||
## keys in the cache than there are validators in the head state however!
|
||||
|
||||
EpochRef* = ref object
|
||||
dag*: ChainDAGRef
|
||||
epoch*: Epoch
|
||||
current_justified_checkpoint*: Checkpoint
|
||||
finalized_checkpoint*: Checkpoint
|
||||
eth1_data*: Eth1Data
|
||||
eth1_deposit_index*: uint64
|
||||
beacon_proposers*: array[
|
||||
SLOTS_PER_EPOCH, Option[(ValidatorIndex, ValidatorPubKey)]]
|
||||
SLOTS_PER_EPOCH, Option[ValidatorIndex]]
|
||||
shuffled_active_validator_indices*: seq[ValidatorIndex]
|
||||
# This is an expensive cache that is sometimes shared among epochref
|
||||
# instances - in particular, validators keep their keys and locations in the
|
||||
# validator list in each particular history.
|
||||
validator_key_store*: (Eth2Digest, ref seq[ValidatorPubKey])
|
||||
|
||||
# balances, as used in fork choice
|
||||
effective_balances_bytes*: seq[byte]
|
||||
|
@ -196,9 +198,9 @@ type
|
|||
blckRef: BlockRef, blck: TrustedSignedBeaconBlock,
|
||||
epochRef: EpochRef, state: HashedBeaconState) {.gcsafe, raises: [Defect].}
|
||||
|
||||
template validator_keys*(e: EpochRef): untyped = e.validator_key_store[1][]
|
||||
template validator_keys*(e: EpochRef): seq[CookedPubKey] = e.dag.validatorKeys
|
||||
|
||||
template head*(v: ChainDagRef): BlockRef = v.headState.blck
|
||||
template head*(dag: ChainDagRef): BlockRef = dag.headState.blck
|
||||
|
||||
func shortLog*(v: BlockSlot): string =
|
||||
try:
|
||||
|
|
|
@ -24,7 +24,7 @@ func init*(T: type QuarantineRef, rng: ref BrHmacDrbgContext): T =
|
|||
result = T()
|
||||
result.rng = rng
|
||||
|
||||
func checkMissing*(quarantine: var QuarantineRef): seq[FetchRecord] =
|
||||
func checkMissing*(quarantine: QuarantineRef): seq[FetchRecord] =
|
||||
## Return a list of blocks that we should try to resolve from other client -
|
||||
## to be called periodically but not too often (once per slot?)
|
||||
var done: seq[Eth2Digest]
|
||||
|
@ -58,7 +58,7 @@ func containsOrphan*(
|
|||
quarantine: QuarantineRef, signedBlock: SignedBeaconBlock): bool =
|
||||
(signedBlock.root, signedBlock.signature) in quarantine.orphans
|
||||
|
||||
func addMissing*(quarantine: var QuarantineRef, root: Eth2Digest) =
|
||||
func addMissing*(quarantine: QuarantineRef, root: Eth2Digest) =
|
||||
## Schedule the download a the given block
|
||||
# Can only request by root, not by signature, so partial match suffices
|
||||
if not anyIt(quarantine.orphans.keys, it[0] == root):
|
||||
|
@ -66,7 +66,7 @@ func addMissing*(quarantine: var QuarantineRef, root: Eth2Digest) =
|
|||
discard quarantine.missing.hasKeyOrPut(root, MissingBlock())
|
||||
|
||||
func removeOrphan*(
|
||||
quarantine: var QuarantineRef, signedBlock: SignedBeaconBlock) =
|
||||
quarantine: QuarantineRef, signedBlock: SignedBeaconBlock) =
|
||||
quarantine.orphans.del((signedBlock.root, signedBlock.signature))
|
||||
|
||||
func isViableOrphan(dag: ChainDAGRef, signedBlock: SignedBeaconBlock): bool =
|
||||
|
@ -74,7 +74,7 @@ func isViableOrphan(dag: ChainDAGRef, signedBlock: SignedBeaconBlock): bool =
|
|||
# either is the finalized block or more recent
|
||||
signedBlock.message.slot > dag.finalizedHead.slot
|
||||
|
||||
func removeOldBlocks(quarantine: var QuarantineRef, dag: ChainDAGRef) =
|
||||
func removeOldBlocks(quarantine: QuarantineRef, dag: ChainDAGRef) =
|
||||
var oldBlocks: seq[(Eth2Digest, ValidatorSig)]
|
||||
|
||||
for k, v in quarantine.orphans.pairs():
|
||||
|
@ -84,11 +84,11 @@ func removeOldBlocks(quarantine: var QuarantineRef, dag: ChainDAGRef) =
|
|||
for k in oldBlocks:
|
||||
quarantine.orphans.del k
|
||||
|
||||
func clearQuarantine*(quarantine: var QuarantineRef) =
|
||||
func clearQuarantine*(quarantine: QuarantineRef) =
|
||||
quarantine.orphans.clear()
|
||||
quarantine.missing.clear()
|
||||
|
||||
func add*(quarantine: var QuarantineRef, dag: ChainDAGRef,
|
||||
func add*(quarantine: QuarantineRef, dag: ChainDAGRef,
|
||||
signedBlock: SignedBeaconBlock): bool =
|
||||
## Adds block to quarantine's `orphans` and `missing` lists.
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
|
||||
import
|
||||
std/[options, sequtils, tables, sets],
|
||||
stew/assign2,
|
||||
stew/[assign2, byteutils],
|
||||
metrics, snappy, chronicles,
|
||||
../ssz/[ssz_serialization, merkleization], ../beacon_chain_db, ../extras,
|
||||
../spec/[
|
||||
|
@ -44,7 +44,7 @@ declareGauge beacon_processed_deposits_total, "Number of total deposits included
|
|||
logScope: topics = "chaindag"
|
||||
|
||||
proc putBlock*(
|
||||
dag: var ChainDAGRef, signedBlock: TrustedSignedBeaconBlock) =
|
||||
dag: ChainDAGRef, signedBlock: TrustedSignedBeaconBlock) =
|
||||
dag.db.putBlock(signedBlock)
|
||||
|
||||
proc updateStateData*(
|
||||
|
@ -111,12 +111,29 @@ func get_effective_balances(validators: openArray[Validator], epoch: Epoch):
|
|||
if validator[].is_active_validator(epoch):
|
||||
result[i] = validator[].effective_balance
|
||||
|
||||
func init(
|
||||
T: type EpochRef, state: StateData, cache: var StateCache,
|
||||
prevEpoch: EpochRef): T =
|
||||
proc updateValidatorKeys*(dag: ChainDAGRef, validators: openArray[Validator]) =
|
||||
# Update validator key cache - must be called every time a state is loaded
|
||||
# from database or a block is applied successfully (from anywhere).
|
||||
# The validator key indexing is shared across all histories, and grows when a
|
||||
# validated block is added.
|
||||
while dag.validatorKeys.len() < validators.len():
|
||||
let key = validators[dag.validatorKeys.len()].pubkey.load()
|
||||
if not key.isSome():
|
||||
# State keys are verified when deposit is processed - a state should never
|
||||
# contain invalid keys
|
||||
fatal "Invalid pubkey in state",
|
||||
validator_index = dag.validatorKeys.len(),
|
||||
raw = toHex(validators[dag.validatorKeys.len()].pubkey.toRaw())
|
||||
quit 1
|
||||
dag.validatorKeys.add(key.get())
|
||||
|
||||
func init*(
|
||||
T: type EpochRef, dag: ChainDAGRef, state: StateData,
|
||||
cache: var StateCache): T =
|
||||
let
|
||||
epoch = state.get_current_epoch()
|
||||
epochRef = EpochRef(
|
||||
dag: dag, # This gives access to the validator pubkeys through an EpochRef
|
||||
epoch: epoch,
|
||||
eth1_data: getStateField(state, eth1_data),
|
||||
eth1_deposit_index: getStateField(state, eth1_deposit_index),
|
||||
|
@ -126,47 +143,8 @@ func init(
|
|||
shuffled_active_validator_indices:
|
||||
cache.get_shuffled_active_validator_indices(state, epoch))
|
||||
for i in 0'u64..<SLOTS_PER_EPOCH:
|
||||
let idx = get_beacon_proposer_index(
|
||||
epochRef.beacon_proposers[i] = get_beacon_proposer_index(
|
||||
state.data.data, cache, epoch.compute_start_slot_at_epoch() + i)
|
||||
if idx.isSome():
|
||||
epochRef.beacon_proposers[i] =
|
||||
some((idx.get(), getStateField(state, validators)[idx.get].pubkey))
|
||||
|
||||
# Validator sets typically don't change between epochs - a more efficient
|
||||
# scheme could be devised where parts of the validator key set is reused
|
||||
# between epochs because in a single history, the validator set only
|
||||
# grows - this however is a trivially implementable compromise.
|
||||
|
||||
# The validators root is cached in the state, so we can quickly compare
|
||||
# it to see if it remains unchanged - effective balances in the validator
|
||||
# information may however result in a different root, even if the public
|
||||
# keys are the same
|
||||
|
||||
let validators_root = hash_tree_root(getStateField(state, validators))
|
||||
|
||||
template sameKeys(a: openArray[ValidatorPubKey], b: openArray[Validator]): bool =
|
||||
if a.len != b.len:
|
||||
false
|
||||
else:
|
||||
block:
|
||||
var ret = true
|
||||
for i, key in a:
|
||||
if key != b[i].pubkey:
|
||||
ret = false
|
||||
break
|
||||
ret
|
||||
|
||||
if prevEpoch != nil and (
|
||||
prevEpoch.validator_key_store[0] == validators_root or
|
||||
sameKeys(
|
||||
prevEpoch.validator_key_store[1][],
|
||||
getStateField(state, validators).asSeq)):
|
||||
epochRef.validator_key_store =
|
||||
(validators_root, prevEpoch.validator_key_store[1])
|
||||
else:
|
||||
epochRef.validator_key_store = (
|
||||
validators_root,
|
||||
newClone(mapIt(getStateField(state, validators).toSeq, it.pubkey)))
|
||||
|
||||
# When fork choice runs, it will need the effective balance of the justified
|
||||
# checkpoint - we pre-load the balances here to avoid rewinding the justified
|
||||
|
@ -300,8 +278,7 @@ func loadStateCache(
|
|||
if epochRef.epoch == epoch:
|
||||
for i, idx in epochRef.beacon_proposers:
|
||||
cache.beacon_proposer_indices[
|
||||
epoch.compute_start_slot_at_epoch + i] =
|
||||
if idx.isSome: some(idx.get()[0]) else: none(ValidatorIndex)
|
||||
epoch.compute_start_slot_at_epoch + i] = idx
|
||||
|
||||
load(epoch)
|
||||
|
||||
|
@ -422,7 +399,7 @@ proc init*(T: type ChainDAGRef,
|
|||
# would be a good recovery model?
|
||||
raiseAssert "No state found in head history, database corrupt?"
|
||||
|
||||
let res = ChainDAGRef(
|
||||
let dag = ChainDAGRef(
|
||||
blocks: blocks,
|
||||
tail: tailRef,
|
||||
genesis: genesisRef,
|
||||
|
@ -438,10 +415,12 @@ proc init*(T: type ChainDAGRef,
|
|||
runtimePreset: preset,
|
||||
)
|
||||
|
||||
doAssert res.updateFlags in [{}, {verifyFinalization}]
|
||||
doAssert dag.updateFlags in [{}, {verifyFinalization}]
|
||||
|
||||
dag.updateValidatorKeys(getStateField(dag.headState, validators).asSeq())
|
||||
|
||||
var cache: StateCache
|
||||
res.updateStateData(res.headState, headRef.atSlot(headRef.slot), false, cache)
|
||||
dag.updateStateData(dag.headState, headRef.atSlot(headRef.slot), false, cache)
|
||||
# We presently save states on the epoch boundary - it means that the latest
|
||||
# state we loaded might be older than head block - nonetheless, it will be
|
||||
# from the same epoch as the head, thus the finalized and justified slots are
|
||||
|
@ -449,22 +428,22 @@ proc init*(T: type ChainDAGRef,
|
|||
# When we start from a snapshot state, the `finalized_checkpoint` in the
|
||||
# snapshot will point to an even older state, but we trust the tail state
|
||||
# (the snapshot) to be finalized, hence the `max` expression below.
|
||||
let finalizedEpoch = max(getStateField(res.headState, finalized_checkpoint).epoch,
|
||||
let finalizedEpoch = max(getStateField(dag.headState, finalized_checkpoint).epoch,
|
||||
tailRef.slot.epoch)
|
||||
res.finalizedHead = headRef.atEpochStart(finalizedEpoch)
|
||||
dag.finalizedHead = headRef.atEpochStart(finalizedEpoch)
|
||||
|
||||
res.clearanceState = res.headState
|
||||
dag.clearanceState = dag.headState
|
||||
|
||||
# Pruning metadata
|
||||
res.lastPrunePoint = res.finalizedHead
|
||||
dag.lastPrunePoint = dag.finalizedHead
|
||||
|
||||
info "Block dag initialized",
|
||||
head = shortLog(headRef),
|
||||
finalizedHead = shortLog(res.finalizedHead),
|
||||
finalizedHead = shortLog(dag.finalizedHead),
|
||||
tail = shortLog(tailRef),
|
||||
totalBlocks = blocks.len
|
||||
|
||||
res
|
||||
dag
|
||||
|
||||
func getEpochRef*(
|
||||
dag: ChainDAGRef, state: StateData, cache: var StateCache): EpochRef =
|
||||
|
@ -474,12 +453,7 @@ func getEpochRef*(
|
|||
|
||||
var epochRef = dag.findEpochRef(blck, epoch)
|
||||
if epochRef == nil:
|
||||
let
|
||||
ancestor = blck.epochAncestor(epoch)
|
||||
prevEpochRef = if epoch < 1: nil
|
||||
else: dag.findEpochRef(blck, epoch - 1)
|
||||
|
||||
epochRef = EpochRef.init(state, cache, prevEpochRef)
|
||||
epochRef = EpochRef.init(dag, state, cache)
|
||||
|
||||
if epoch >= dag.finalizedHead.slot.epoch():
|
||||
# Only cache epoch information for unfinalized blocks - earlier states
|
||||
|
@ -499,21 +473,10 @@ func getEpochRef*(
|
|||
if candidate[1].epoch < dag.epochRefs[oldest][1].epoch:
|
||||
oldest = x
|
||||
|
||||
let
|
||||
ancestor = blck.epochAncestor(epoch)
|
||||
dag.epochRefs[oldest] = (ancestor.blck, epochRef)
|
||||
|
||||
# Because key stores are additive lists, we can use a newer list whereever an
|
||||
# older list is expected - all indices in the new list will be valid for the
|
||||
# old list also
|
||||
if epoch > 0:
|
||||
var cur = ancestor.blck.epochAncestor(epoch - 1)
|
||||
while cur.slot >= dag.finalizedHead.slot:
|
||||
let er = dag.findEpochRef(cur.blck, cur.slot.epoch)
|
||||
if er != nil:
|
||||
er.validator_key_store = epochRef.validator_key_store
|
||||
if cur.slot.epoch == 0:
|
||||
break
|
||||
cur = cur.blck.epochAncestor(cur.slot.epoch - 1)
|
||||
|
||||
epochRef
|
||||
|
||||
proc getEpochRef*(dag: ChainDAGRef, blck: BlockRef, epoch: Epoch): EpochRef =
|
||||
|
@ -552,6 +515,11 @@ proc getState(
|
|||
state.blck = blck
|
||||
state.data.root = stateRoot
|
||||
|
||||
# In case a newer state is loaded from database than we previously knew - this
|
||||
# is in theory possible if the head state we load on init is older than
|
||||
# some other random known state in the database
|
||||
dag.updateValidatorKeys(getStateField(state, validators).asSeq())
|
||||
|
||||
true
|
||||
|
||||
func stateCheckpoint*(bs: BlockSlot): BlockSlot =
|
||||
|
@ -725,6 +693,9 @@ proc applyBlock(
|
|||
if ok:
|
||||
state.blck = blck.refs
|
||||
|
||||
# New validators might have been added by block (on startup for example)
|
||||
dag.updateValidatorKeys(getStateField(state, validators).asSeq())
|
||||
|
||||
ok
|
||||
|
||||
proc updateStateData*(
|
||||
|
@ -993,7 +964,7 @@ proc pruneStateCachesDAG*(dag: ChainDAGRef) =
|
|||
proc updateHead*(
|
||||
dag: ChainDAGRef,
|
||||
newHead: BlockRef,
|
||||
quarantine: var QuarantineRef) =
|
||||
quarantine: QuarantineRef) =
|
||||
## Update what we consider to be the current head, as given by the fork
|
||||
## choice.
|
||||
##
|
||||
|
@ -1168,10 +1139,19 @@ func getGenesisBlockSlot*(dag: ChainDAGRef): BlockSlot =
|
|||
BlockSlot(blck: dag.genesis, slot: GENESIS_SLOT)
|
||||
|
||||
proc getProposer*(
|
||||
dag: ChainDAGRef, head: BlockRef, slot: Slot):
|
||||
Option[(ValidatorIndex, ValidatorPubKey)] =
|
||||
dag: ChainDAGRef, head: BlockRef, slot: Slot): Option[ValidatorIndex] =
|
||||
let
|
||||
epochRef = dag.getEpochRef(head, slot.compute_epoch_at_slot())
|
||||
slotInEpoch = slot - slot.compute_epoch_at_slot().compute_start_slot_at_epoch()
|
||||
|
||||
epochRef.beacon_proposers[slotInEpoch]
|
||||
let proposer = epochRef.beacon_proposers[slotInEpoch]
|
||||
if proposer.isSome():
|
||||
if proposer.get().uint64 >= dag.validatorKeys.lenu64():
|
||||
# Sanity check - it should never happen that the key cache doesn't contain
|
||||
# a key for the selected proposer - that would mean that we somehow
|
||||
# created validators in the state without updating the cache!
|
||||
warn "Proposer key not found",
|
||||
keys = dag.validatorKeys.lenu64(), proposer = proposer.get()
|
||||
return none(ValidatorIndex)
|
||||
|
||||
proposer
|
||||
|
|
|
@ -27,8 +27,8 @@ const
|
|||
VOLUNTARY_EXITS_BOUND* = MAX_VOLUNTARY_EXITS * 2
|
||||
|
||||
proc init*(
|
||||
T: type ExitPool, chainDag: ChainDAGRef, quarantine: QuarantineRef): T =
|
||||
## Initialize an ExitPool from the chainDag `headState`
|
||||
T: type ExitPool, dag: ChainDAGRef, quarantine: QuarantineRef): T =
|
||||
## Initialize an ExitPool from the dag `headState`
|
||||
T(
|
||||
# Allow for filtering out some exit messages during block production
|
||||
attester_slashings:
|
||||
|
@ -37,7 +37,7 @@ proc init*(
|
|||
initDeque[ProposerSlashing](initialSize = PROPOSER_SLASHINGS_BOUND.int),
|
||||
voluntary_exits:
|
||||
initDeque[SignedVoluntaryExit](initialSize = VOLUNTARY_EXITS_BOUND.int),
|
||||
chainDag: chainDag,
|
||||
dag: dag,
|
||||
quarantine: quarantine
|
||||
)
|
||||
|
||||
|
@ -111,7 +111,7 @@ func getExitMessagesForBlock[T](
|
|||
|
||||
if allIt(
|
||||
getValidatorIndices(exit_message),
|
||||
getStateField(pool.chainDag.headState, validators)[it].exit_epoch !=
|
||||
getStateField(pool.dag.headState, validators)[it].exit_epoch !=
|
||||
FAR_FUTURE_EPOCH):
|
||||
# A beacon block exit message already targeted all these validators
|
||||
continue
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
{.push raises: [Defect].}
|
||||
|
||||
import
|
||||
std/[algorithm, intsets, sequtils],
|
||||
std/[intsets],
|
||||
chronicles,
|
||||
../spec/[
|
||||
crypto, datatypes, digest, helpers, network, presets, signatures,
|
||||
|
@ -112,61 +112,6 @@ func get_attesting_indices*(epochRef: EpochRef,
|
|||
for idx in get_attesting_indices(epochRef, data, bits):
|
||||
result.add(idx)
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#get_indexed_attestation
|
||||
func get_indexed_attestation*(epochRef: EpochRef, attestation: Attestation): IndexedAttestation =
|
||||
# Return the indexed attestation corresponding to ``attestation``.
|
||||
let
|
||||
attesting_indices =
|
||||
get_attesting_indices(
|
||||
epochRef, attestation.data, attestation.aggregation_bits)
|
||||
|
||||
IndexedAttestation(
|
||||
attesting_indices:
|
||||
List[uint64, Limit MAX_VALIDATORS_PER_COMMITTEE].init(
|
||||
sorted(mapIt(attesting_indices, it.uint64), system.cmp)),
|
||||
data: attestation.data,
|
||||
signature: attestation.signature
|
||||
)
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#is_valid_indexed_attestation
|
||||
proc is_valid_indexed_attestation*(
|
||||
fork: Fork, genesis_validators_root: Eth2Digest,
|
||||
epochRef: EpochRef, indexed_attestation: SomeIndexedAttestation,
|
||||
flags: UpdateFlags): Result[void, cstring] =
|
||||
# Check if ``indexed_attestation`` is not empty, has sorted and unique
|
||||
# indices and has a valid aggregate signature.
|
||||
|
||||
template is_sorted_and_unique(s: untyped): bool =
|
||||
var res = true
|
||||
for i in 1 ..< s.len:
|
||||
if s[i - 1].uint64 >= s[i].uint64:
|
||||
res = false
|
||||
break
|
||||
res
|
||||
|
||||
if len(indexed_attestation.attesting_indices) == 0:
|
||||
return err("indexed_attestation: no attesting indices")
|
||||
|
||||
# Not from spec, but this function gets used in front-line roles, not just
|
||||
# behind firewall.
|
||||
let num_validators = epochRef.validator_keys.lenu64
|
||||
if anyIt(indexed_attestation.attesting_indices, it >= num_validators):
|
||||
return err("indexed attestation: not all indices valid validators")
|
||||
|
||||
if not is_sorted_and_unique(indexed_attestation.attesting_indices):
|
||||
return err("indexed attestation: indices not sorted and unique")
|
||||
|
||||
# Verify aggregate signature
|
||||
if not (skipBLSValidation in flags or indexed_attestation.signature is TrustedSig):
|
||||
let pubkeys = mapIt(
|
||||
indexed_attestation.attesting_indices, epochRef.validator_keys[it])
|
||||
if not verify_attestation_signature(
|
||||
fork, genesis_validators_root, indexed_attestation.data,
|
||||
pubkeys, indexed_attestation.signature):
|
||||
return err("indexed attestation: signature verification failure")
|
||||
|
||||
ok()
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#is_valid_indexed_attestation
|
||||
proc is_valid_indexed_attestation*(
|
||||
fork: Fork, genesis_validators_root: Eth2Digest,
|
||||
|
@ -184,7 +129,7 @@ proc is_valid_indexed_attestation*(
|
|||
# Verify aggregate signature
|
||||
if not (skipBLSValidation in flags or attestation.signature is TrustedSig):
|
||||
var
|
||||
pubkeys = newSeqOfCap[ValidatorPubKey](sigs)
|
||||
pubkeys = newSeqOfCap[CookedPubKey](sigs)
|
||||
for index in get_attesting_indices(
|
||||
epochRef, attestation.data, attestation.aggregation_bits):
|
||||
pubkeys.add(epochRef.validator_keys[index])
|
||||
|
|
|
@ -232,14 +232,12 @@ proc scheduleAttestationCheck*(
|
|||
|
||||
doAssert batch.pendingBuffer.len < BatchedCryptoSize
|
||||
|
||||
let sig = batch
|
||||
let sig = ? batch
|
||||
.pendingBuffer
|
||||
.addAttestation(
|
||||
fork, genesis_validators_root, epochRef,
|
||||
attestation
|
||||
)
|
||||
if not sig.isSome():
|
||||
return err("Attestation batch validation: no attester found or invalid pubkey found.")
|
||||
|
||||
let fut = newFuture[BatchResult](
|
||||
"batch_validation.scheduleAttestationCheck"
|
||||
|
@ -249,7 +247,7 @@ proc scheduleAttestationCheck*(
|
|||
|
||||
batchCrypto.scheduleBatch(fresh)
|
||||
|
||||
return ok((fut, sig.get()))
|
||||
return ok((fut, sig))
|
||||
|
||||
proc scheduleAggregateChecks*(
|
||||
batchCrypto: ref BatchCrypto,
|
||||
|
@ -290,31 +288,32 @@ proc scheduleAggregateChecks*(
|
|||
# ------------------------------------------------------
|
||||
let aggregator = epochRef.validator_keys[aggregate_and_proof.aggregator_index]
|
||||
block:
|
||||
if not batch
|
||||
if (let v = batch
|
||||
.pendingBuffer
|
||||
.addSlotSignature(
|
||||
fork, genesis_validators_root,
|
||||
aggregate.data.slot,
|
||||
aggregator,
|
||||
aggregate_and_proof.selection_proof
|
||||
):
|
||||
return err("Aggregate batch validation: invalid pubkey or signature in addSlotSignature.")
|
||||
); v.isErr()):
|
||||
return err(v.error())
|
||||
|
||||
let futSlot = newFuture[BatchResult](
|
||||
"batch_validation.scheduleAggregateChecks.slotCheck"
|
||||
)
|
||||
batch.resultsBuffer.add(futSlot)
|
||||
|
||||
block:
|
||||
if not batch
|
||||
if (let v = batch
|
||||
.pendingBuffer
|
||||
.addAggregateAndProofSignature(
|
||||
fork, genesis_validators_root,
|
||||
aggregate_and_proof,
|
||||
aggregator,
|
||||
signed_aggregate_and_proof.signature
|
||||
):
|
||||
); v.isErr()):
|
||||
batchCrypto.scheduleBatch(fresh)
|
||||
return err("Aggregate batch validation: invalid pubkey or signature in addAggregateAndProofSignature.")
|
||||
return err(v.error())
|
||||
|
||||
let futAggregator = newFuture[BatchResult](
|
||||
"batch_validation.scheduleAggregateChecks.aggregatorCheck"
|
||||
|
@ -328,9 +327,9 @@ proc scheduleAggregateChecks*(
|
|||
fork, genesis_validators_root, epochRef,
|
||||
aggregate
|
||||
)
|
||||
if not sig.isSome():
|
||||
if sig.isErr():
|
||||
batchCrypto.scheduleBatch(fresh)
|
||||
return err("Attestation batch validation: no attester found or invalid pubkey found.")
|
||||
return err(sig.error())
|
||||
|
||||
let futAggregate = newFuture[BatchResult](
|
||||
"batch_validation.scheduleAggregateChecks.aggregateCheck"
|
||||
|
|
|
@ -150,7 +150,7 @@ proc storeBlock(
|
|||
let
|
||||
attestationPool = self.consensusManager.attestationPool
|
||||
|
||||
let blck = self.consensusManager.chainDag.addRawBlock(
|
||||
let blck = self.consensusManager.dag.addRawBlock(
|
||||
self.consensusManager.quarantine, signedBlock) do (
|
||||
blckRef: BlockRef, trustedBlock: TrustedSignedBeaconBlock,
|
||||
epochRef: EpochRef, state: HashedBeaconState):
|
||||
|
@ -200,7 +200,7 @@ proc processBlock(self: var BlockProcessor, entry: BlockEntry) =
|
|||
beacon_store_block_duration_seconds.observe(storeBlockDur.toFloatSeconds())
|
||||
|
||||
debug "Block processed",
|
||||
localHeadSlot = self.consensusManager.chainDag.head.slot,
|
||||
localHeadSlot = self.consensusManager.dag.head.slot,
|
||||
blockSlot = entry.blck.message.slot,
|
||||
validationDur = entry.validationDur,
|
||||
queueDur, storeBlockDur, updateHeadDur
|
||||
|
|
|
@ -21,7 +21,7 @@ type
|
|||
|
||||
# Validated & Verified
|
||||
# ----------------------------------------------------------------
|
||||
chainDag*: ChainDAGRef
|
||||
dag*: ChainDAGRef
|
||||
attestationPool*: ref AttestationPool
|
||||
|
||||
# Missing info
|
||||
|
@ -32,12 +32,12 @@ type
|
|||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc new*(T: type ConsensusManager,
|
||||
chainDag: ChainDAGRef,
|
||||
dag: ChainDAGRef,
|
||||
attestationPool: ref AttestationPool,
|
||||
quarantine: QuarantineRef
|
||||
): ref ConsensusManager =
|
||||
(ref ConsensusManager)(
|
||||
chainDag: chainDag,
|
||||
dag: dag,
|
||||
attestationPool: attestationPool,
|
||||
quarantine: quarantine
|
||||
)
|
||||
|
@ -49,7 +49,7 @@ proc checkExpectedBlock(self: var ConsensusManager) =
|
|||
if self.expectedBlockReceived == nil:
|
||||
return
|
||||
|
||||
if self.chainDag.head.slot < self.expectedSlot:
|
||||
if self.dag.head.slot < self.expectedSlot:
|
||||
return
|
||||
|
||||
self.expectedBlockReceived.complete(true)
|
||||
|
@ -82,12 +82,12 @@ proc updateHead*(self: var ConsensusManager, wallSlot: Slot) =
|
|||
let newHead = self.attestationPool[].selectHead(wallSlot)
|
||||
if newHead.isNil():
|
||||
warn "Head selection failed, using previous head",
|
||||
head = shortLog(self.chainDag.head), wallSlot
|
||||
head = shortLog(self.dag.head), wallSlot
|
||||
return
|
||||
|
||||
# Store the new head in the chain DAG - this may cause epochs to be
|
||||
# justified and finalized
|
||||
self.chainDag.updateHead(newHead, self.quarantine)
|
||||
self.dag.updateHead(newHead, self.quarantine)
|
||||
|
||||
self.checkExpectedBlock()
|
||||
|
||||
|
@ -98,6 +98,6 @@ proc pruneStateCachesAndForkChoice*(self: var ConsensusManager) =
|
|||
## - the attestation pool/fork choice
|
||||
|
||||
# Cleanup DAG & fork choice if we have a finalized head
|
||||
if self.chainDag.needStateCachesAndForkChoicePruning():
|
||||
self.chainDag.pruneStateCachesDAG()
|
||||
if self.dag.needStateCachesAndForkChoicePruning():
|
||||
self.dag.pruneStateCachesDAG()
|
||||
self.attestationPool[].prune()
|
||||
|
|
|
@ -51,7 +51,7 @@ type
|
|||
|
||||
# Local sources of truth for validation
|
||||
# ----------------------------------------------------------------
|
||||
chainDag*: ChainDAGRef
|
||||
dag*: ChainDAGRef
|
||||
attestationPool*: ref AttestationPool
|
||||
validatorPool: ref ValidatorPool
|
||||
|
||||
|
@ -79,7 +79,7 @@ type
|
|||
proc new*(T: type Eth2Processor,
|
||||
doppelGangerDetectionEnabled: bool,
|
||||
blockProcessor: ref BlockProcessor,
|
||||
chainDag: ChainDAGRef,
|
||||
dag: ChainDAGRef,
|
||||
attestationPool: ref AttestationPool,
|
||||
exitPool: ref ExitPool,
|
||||
validatorPool: ref ValidatorPool,
|
||||
|
@ -90,7 +90,7 @@ proc new*(T: type Eth2Processor,
|
|||
doppelGangerDetectionEnabled: doppelGangerDetectionEnabled,
|
||||
getWallTime: getWallTime,
|
||||
blockProcessor: blockProcessor,
|
||||
chainDag: chainDag,
|
||||
dag: dag,
|
||||
attestationPool: attestationPool,
|
||||
exitPool: exitPool,
|
||||
validatorPool: validatorPool,
|
||||
|
@ -123,7 +123,7 @@ proc blockValidator*(
|
|||
|
||||
let delay = wallTime - signedBlock.message.slot.toBeaconTime
|
||||
|
||||
if signedBlock.root in self.chainDag:
|
||||
if signedBlock.root in self.dag:
|
||||
# The gossip algorithm itself already does one round of hashing to find
|
||||
# already-seen data, but it is fairly aggressive about forgetting about
|
||||
# what it has seen already
|
||||
|
@ -134,7 +134,7 @@ proc blockValidator*(
|
|||
# decoding at this stage, which may be significant
|
||||
debug "Block received", delay
|
||||
|
||||
let blck = self.chainDag.isValidBeaconBlock(
|
||||
let blck = self.dag.isValidBeaconBlock(
|
||||
self.quarantine, signedBlock, wallTime, {})
|
||||
|
||||
self.blockProcessor[].dumpBlock(signedBlock, blck)
|
||||
|
@ -167,13 +167,13 @@ proc checkForPotentialDoppelganger(
|
|||
return
|
||||
|
||||
if epoch < self.doppelgangerDetection.broadcastStartEpoch:
|
||||
let tgtBlck = self.chainDag.getRef(attestation.data.target.root)
|
||||
let tgtBlck = self.dag.getRef(attestation.data.target.root)
|
||||
doAssert not tgtBlck.isNil # because attestation is valid above
|
||||
|
||||
let epochRef = self.chainDag.getEpochRef(
|
||||
let epochRef = self.dag.getEpochRef(
|
||||
tgtBlck, attestation.data.target.epoch)
|
||||
for validatorIndex in attesterIndices:
|
||||
let validatorPubkey = epochRef.validator_keys[validatorIndex]
|
||||
let validatorPubkey = epochRef.validator_keys[validatorIndex].toPubKey()
|
||||
if self.doppelgangerDetectionEnabled and
|
||||
self.validatorPool[].getValidator(validatorPubkey) !=
|
||||
default(AttachedValidator):
|
||||
|
|
|
@ -49,7 +49,7 @@ func check_attestation_block(
|
|||
# equal, but then we're voting for an already-finalized block which is pretty
|
||||
# useless - other blocks that are not rooted in the finalized chain will be
|
||||
# pruned by the chain dag, and thus we can no longer get a BlockRef for them
|
||||
if not (blck.slot > pool.chainDag.finalizedHead.slot):
|
||||
if not (blck.slot > pool.dag.finalizedHead.slot):
|
||||
return err((ValidationResult.Ignore, cstring(
|
||||
"Voting for already-finalized block")))
|
||||
|
||||
|
@ -101,7 +101,7 @@ func check_beacon_and_target_block(
|
|||
# The target block is returned.
|
||||
# We rely on the chain DAG to have been validated, so check for the existence
|
||||
# of the block in the pool.
|
||||
let blck = pool.chainDag.getRef(data.beacon_block_root)
|
||||
let blck = pool.dag.getRef(data.beacon_block_root)
|
||||
if blck.isNil:
|
||||
pool.quarantine.addMissing(data.beacon_block_root)
|
||||
return err((ValidationResult.Ignore, cstring("Attestation block unknown")))
|
||||
|
@ -217,7 +217,7 @@ proc validateAttestation*(
|
|||
# compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)) ==
|
||||
# store.finalized_checkpoint.root
|
||||
let
|
||||
epochRef = pool.chainDag.getEpochRef(target, attestation.data.target.epoch)
|
||||
epochRef = pool.dag.getEpochRef(target, attestation.data.target.epoch)
|
||||
|
||||
# [REJECT] The committee index is within the expected range -- i.e.
|
||||
# data.index < get_committee_count_per_slot(state, data.target.epoch).
|
||||
|
@ -249,9 +249,9 @@ proc validateAttestation*(
|
|||
"validateAttestation: number of aggregation bits and committee size mismatch")))
|
||||
|
||||
let
|
||||
fork = getStateField(pool.chainDag.headState, fork)
|
||||
fork = getStateField(pool.dag.headState, fork)
|
||||
genesis_validators_root =
|
||||
getStateField(pool.chainDag.headState, genesis_validators_root)
|
||||
getStateField(pool.dag.headState, genesis_validators_root)
|
||||
attesting_index = get_attesting_indices_one(
|
||||
epochRef, attestation.data, attestation.aggregation_bits)
|
||||
|
||||
|
@ -404,7 +404,7 @@ proc validateAggregate*(
|
|||
# aggregator for the slot -- i.e. is_aggregator(state, aggregate.data.slot,
|
||||
# aggregate.data.index, aggregate_and_proof.selection_proof) returns True.
|
||||
let
|
||||
epochRef = pool.chainDag.getEpochRef(target, aggregate.data.target.epoch)
|
||||
epochRef = pool.dag.getEpochRef(target, aggregate.data.target.epoch)
|
||||
|
||||
if not is_aggregator(
|
||||
epochRef, aggregate.data.slot, aggregate.data.index.CommitteeIndex,
|
||||
|
@ -431,9 +431,9 @@ proc validateAggregate*(
|
|||
return err((ValidationResult.Reject, cstring("Invalid aggregator_index")))
|
||||
|
||||
let
|
||||
fork = getStateField(pool.chainDag.headState, fork)
|
||||
fork = getStateField(pool.dag.headState, fork)
|
||||
genesis_validators_root =
|
||||
getStateField(pool.chainDag.headState, genesis_validators_root)
|
||||
getStateField(pool.dag.headState, genesis_validators_root)
|
||||
|
||||
let deferredCrypto = batchCrypto
|
||||
.scheduleAggregateChecks(
|
||||
|
@ -505,7 +505,7 @@ proc validateAggregate*(
|
|||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/p2p-interface.md#beacon_block
|
||||
proc isValidBeaconBlock*(
|
||||
dag: ChainDAGRef, quarantine: var QuarantineRef,
|
||||
dag: ChainDAGRef, quarantine: QuarantineRef,
|
||||
signed_beacon_block: SignedBeaconBlock, wallTime: BeaconTime,
|
||||
flags: UpdateFlags):
|
||||
Result[void, (ValidationResult, BlockError)] =
|
||||
|
@ -621,10 +621,9 @@ proc isValidBeaconBlock*(
|
|||
warn "cannot compute proposer for message"
|
||||
return err((ValidationResult.Ignore, Invalid)) # internal issue
|
||||
|
||||
if proposer.get()[0] !=
|
||||
ValidatorIndex(signed_beacon_block.message.proposer_index):
|
||||
if uint64(proposer.get()) != signed_beacon_block.message.proposer_index:
|
||||
notice "block had unexpected proposer",
|
||||
expected_proposer = proposer.get()[0]
|
||||
expected_proposer = proposer.get()
|
||||
return err((ValidationResult.Reject, Invalid))
|
||||
|
||||
# [REJECT] The proposer signature, signed_beacon_block.signature, is valid
|
||||
|
@ -634,7 +633,7 @@ proc isValidBeaconBlock*(
|
|||
getStateField(dag.headState, genesis_validators_root),
|
||||
signed_beacon_block.message.slot,
|
||||
signed_beacon_block.message,
|
||||
proposer.get()[1],
|
||||
dag.validatorKeys[proposer.get()],
|
||||
signed_beacon_block.signature):
|
||||
debug "block failed signature verification",
|
||||
signature = shortLog(signed_beacon_block.signature)
|
||||
|
@ -671,7 +670,7 @@ proc validateAttesterSlashing*(
|
|||
# validation.
|
||||
let attester_slashing_validity =
|
||||
check_attester_slashing(
|
||||
pool.chainDag.headState.data.data, attester_slashing, {})
|
||||
pool.dag.headState.data.data, attester_slashing, {})
|
||||
if attester_slashing_validity.isErr:
|
||||
return err((ValidationResult.Reject, attester_slashing_validity.error))
|
||||
|
||||
|
@ -701,7 +700,7 @@ proc validateProposerSlashing*(
|
|||
# [REJECT] All of the conditions within process_proposer_slashing pass validation.
|
||||
let proposer_slashing_validity =
|
||||
check_proposer_slashing(
|
||||
pool.chainDag.headState.data.data, proposer_slashing, {})
|
||||
pool.dag.headState.data.data, proposer_slashing, {})
|
||||
if proposer_slashing_validity.isErr:
|
||||
return err((ValidationResult.Reject, proposer_slashing_validity.error))
|
||||
|
||||
|
@ -719,11 +718,11 @@ proc validateVoluntaryExit*(
|
|||
# [IGNORE] The voluntary exit is the first valid voluntary exit received for
|
||||
# the validator with index signed_voluntary_exit.message.validator_index.
|
||||
if signed_voluntary_exit.message.validator_index >=
|
||||
getStateField(pool.chainDag.headState, validators).lenu64:
|
||||
getStateField(pool.dag.headState, validators).lenu64:
|
||||
return err((ValidationResult.Ignore, cstring(
|
||||
"validateVoluntaryExit: validator index too high")))
|
||||
|
||||
# Given that getStateField(pool.chainDag.headState, validators) is a seq,
|
||||
# Given that getStateField(pool.dag.headState, validators) is a seq,
|
||||
# signed_voluntary_exit.message.validator_index.int is already valid, but
|
||||
# check explicitly if one changes that data structure.
|
||||
if signed_voluntary_exit.message.validator_index.int in
|
||||
|
@ -735,7 +734,7 @@ proc validateVoluntaryExit*(
|
|||
# validation.
|
||||
let voluntary_exit_validity =
|
||||
check_voluntary_exit(
|
||||
pool.chainDag.headState.data.data, signed_voluntary_exit, {})
|
||||
pool.dag.headState.data.data, signed_voluntary_exit, {})
|
||||
if voluntary_exit_validity.isErr:
|
||||
return err((ValidationResult.Reject, voluntary_exit_validity.error))
|
||||
|
||||
|
|
|
@ -237,12 +237,12 @@ proc init*(T: type BeaconNode,
|
|||
let
|
||||
chainDagFlags = if config.verifyFinalization: {verifyFinalization}
|
||||
else: {}
|
||||
chainDag = ChainDAGRef.init(runtimePreset, db, chainDagFlags)
|
||||
dag = ChainDAGRef.init(runtimePreset, db, chainDagFlags)
|
||||
beaconClock =
|
||||
BeaconClock.init(getStateField(chainDag.headState, genesis_time))
|
||||
BeaconClock.init(getStateField(dag.headState, genesis_time))
|
||||
quarantine = QuarantineRef.init(rng)
|
||||
databaseGenesisValidatorsRoot =
|
||||
getStateField(chainDag.headState, genesis_validators_root)
|
||||
getStateField(dag.headState, genesis_validators_root)
|
||||
|
||||
if genesisStateContents.len != 0:
|
||||
let
|
||||
|
@ -260,14 +260,14 @@ proc init*(T: type BeaconNode,
|
|||
currentSlot = beaconClock.now.slotOrZero
|
||||
isCheckpointStale = not is_within_weak_subjectivity_period(
|
||||
currentSlot,
|
||||
chainDag.headState,
|
||||
dag.headState,
|
||||
config.weakSubjectivityCheckpoint.get)
|
||||
|
||||
if isCheckpointStale:
|
||||
error "Weak subjectivity checkpoint is stale",
|
||||
currentSlot,
|
||||
checkpoint = config.weakSubjectivityCheckpoint.get,
|
||||
headStateSlot = getStateField(chainDag.headState, slot)
|
||||
headStateSlot = getStateField(dag.headState, slot)
|
||||
quit 1
|
||||
|
||||
if checkpointState != nil:
|
||||
|
@ -278,7 +278,7 @@ proc init*(T: type BeaconNode,
|
|||
dataDir = config.dataDir
|
||||
quit 1
|
||||
|
||||
chainDag.setTailState(checkpointState[], checkpointBlock)
|
||||
dag.setTailState(checkpointState[], checkpointBlock)
|
||||
|
||||
if eth1Monitor.isNil and
|
||||
config.web3Urls.len > 0 and
|
||||
|
@ -308,13 +308,13 @@ proc init*(T: type BeaconNode,
|
|||
nickname = if config.nodeName == "auto": shortForm(netKeys)
|
||||
else: config.nodeName
|
||||
enrForkId = getENRForkID(
|
||||
getStateField(chainDag.headState, fork),
|
||||
getStateField(chainDag.headState, genesis_validators_root))
|
||||
getStateField(dag.headState, fork),
|
||||
getStateField(dag.headState, genesis_validators_root))
|
||||
topicBeaconBlocks = getBeaconBlocksTopic(enrForkId.fork_digest)
|
||||
topicAggregateAndProofs = getAggregateAndProofsTopic(enrForkId.fork_digest)
|
||||
network = createEth2Node(rng, config, netKeys, enrForkId)
|
||||
attestationPool = newClone(AttestationPool.init(chainDag, quarantine))
|
||||
exitPool = newClone(ExitPool.init(chainDag, quarantine))
|
||||
attestationPool = newClone(AttestationPool.init(dag, quarantine))
|
||||
exitPool = newClone(ExitPool.init(dag, quarantine))
|
||||
|
||||
case config.slashingDbKind
|
||||
of SlashingDbKind.v2:
|
||||
|
@ -331,12 +331,12 @@ proc init*(T: type BeaconNode,
|
|||
let
|
||||
slashingProtectionDB =
|
||||
SlashingProtectionDB.init(
|
||||
getStateField(chainDag.headState, genesis_validators_root),
|
||||
getStateField(dag.headState, genesis_validators_root),
|
||||
config.validatorsDir(), SlashingDbName)
|
||||
validatorPool = newClone(ValidatorPool.init(slashingProtectionDB))
|
||||
|
||||
consensusManager = ConsensusManager.new(
|
||||
chainDag, attestationPool, quarantine
|
||||
dag, attestationPool, quarantine
|
||||
)
|
||||
blockProcessor = BlockProcessor.new(
|
||||
config.dumpEnabled, config.dumpDirInvalid, config.dumpDirIncoming,
|
||||
|
@ -345,7 +345,7 @@ proc init*(T: type BeaconNode,
|
|||
processor = Eth2Processor.new(
|
||||
config.doppelgangerDetection,
|
||||
blockProcessor,
|
||||
chainDag, attestationPool, exitPool, validatorPool,
|
||||
dag, attestationPool, exitPool, validatorPool,
|
||||
quarantine,
|
||||
rng,
|
||||
proc(): BeaconTime = beaconClock.now())
|
||||
|
@ -358,7 +358,7 @@ proc init*(T: type BeaconNode,
|
|||
netKeys: netKeys,
|
||||
db: db,
|
||||
config: config,
|
||||
chainDag: chainDag,
|
||||
dag: dag,
|
||||
quarantine: quarantine,
|
||||
attestationPool: attestationPool,
|
||||
attachedValidators: validatorPool,
|
||||
|
@ -404,7 +404,7 @@ proc init*(T: type BeaconNode,
|
|||
|
||||
# This merely configures the BeaconSync
|
||||
# The traffic will be started when we join the network.
|
||||
network.initBeaconSync(chainDag, enrForkId.fork_digest)
|
||||
network.initBeaconSync(dag, enrForkId.fork_digest)
|
||||
|
||||
node.updateValidatorMetrics()
|
||||
|
||||
|
@ -422,7 +422,7 @@ func verifyFinalization(node: BeaconNode, slot: Slot) =
|
|||
# during testing.
|
||||
if epoch >= 4 and slot mod SLOTS_PER_EPOCH > SETTLING_TIME_OFFSET:
|
||||
let finalizedEpoch =
|
||||
node.chainDag.finalizedHead.slot.compute_epoch_at_slot()
|
||||
node.dag.finalizedHead.slot.compute_epoch_at_slot()
|
||||
# Finalization rule 234, that has the most lag slots among the cases, sets
|
||||
# state.finalized_checkpoint = old_previous_justified_checkpoint.epoch + 3
|
||||
# and then state.slot gets incremented, to increase the maximum offset, if
|
||||
|
@ -436,9 +436,9 @@ func toBitArray(stabilitySubnets: auto): BitArray[ATTESTATION_SUBNET_COUNT] =
|
|||
proc getAttachedValidators(node: BeaconNode):
|
||||
Table[ValidatorIndex, AttachedValidator] =
|
||||
for validatorIndex in 0 ..<
|
||||
getStateField(node.chainDag.headState, validators).len:
|
||||
getStateField(node.dag.headState, validators).len:
|
||||
let attachedValidator = node.getAttachedValidator(
|
||||
getStateField(node.chainDag.headState, validators),
|
||||
getStateField(node.dag.headState, validators),
|
||||
validatorIndex.ValidatorIndex)
|
||||
if attachedValidator.isNil:
|
||||
continue
|
||||
|
@ -453,13 +453,12 @@ proc updateSubscriptionSchedule(node: BeaconNode, epoch: Epoch) {.async.} =
|
|||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#lookahead
|
||||
# Only subscribe when this node should aggregate; libp2p broadcasting works
|
||||
# on subnet topics regardless.
|
||||
let epochRef = node.chainDag.getEpochRef(node.chainDag.head, epoch)
|
||||
let epochRef = node.dag.getEpochRef(node.dag.head, epoch)
|
||||
|
||||
# Update proposals
|
||||
node.attestationSubnets.proposingSlots[epoch mod 2] = 0
|
||||
for i in 0 ..< SLOTS_PER_EPOCH:
|
||||
let beaconProposer = epochRef.beacon_proposers[i]
|
||||
if beaconProposer.isSome and beaconProposer.get()[0] in attachedValidators:
|
||||
for i, proposer in epochRef.beacon_proposers:
|
||||
if proposer.isSome and proposer.get() in attachedValidators:
|
||||
node.attestationsubnets.proposingSlots[epoch mod 2] =
|
||||
node.attestationsubnets.proposingSlots[epoch mod 2] or (1'u32 shl i)
|
||||
|
||||
|
@ -471,9 +470,9 @@ proc updateSubscriptionSchedule(node: BeaconNode, epoch: Epoch) {.async.} =
|
|||
is_aggregator(
|
||||
committeeLen,
|
||||
await attachedValidators[it.ValidatorIndex].getSlotSig(
|
||||
getStateField(node.chainDag.headState, fork),
|
||||
getStateField(node.dag.headState, fork),
|
||||
getStateField(
|
||||
node.chainDag.headState, genesis_validators_root), slot)))
|
||||
node.dag.headState, genesis_validators_root), slot)))
|
||||
|
||||
node.attestationSubnets.lastCalculatedEpoch = epoch
|
||||
node.attestationSubnets.attestingSlots[epoch mod 2] = 0
|
||||
|
@ -547,15 +546,15 @@ proc cycleAttestationSubnetsPerEpoch(
|
|||
# calculating future attestation subnets.
|
||||
|
||||
# Only know RANDAO mix, which determines shuffling seed, one epoch in
|
||||
# advance. When getStateField(node.chainDag.headState, slot).epoch is
|
||||
# advance. When getStateField(node.dag.headState, slot).epoch is
|
||||
# ahead of wallSlot, the clock's just incorrect. If the slot's behind
|
||||
# wallSlot, it would have to look more than MIN_SEED_LOOKAHEAD epochs
|
||||
# ahead to compute the shuffling determining the beacon committees.
|
||||
static: doAssert MIN_SEED_LOOKAHEAD == 1
|
||||
if getStateField(node.chainDag.headState, slot).epoch != wallSlot.epoch:
|
||||
if getStateField(node.dag.headState, slot).epoch != wallSlot.epoch:
|
||||
debug "Requested attestation subnets too far in advance",
|
||||
wallSlot,
|
||||
stateSlot = getStateField(node.chainDag.headState, slot)
|
||||
stateSlot = getStateField(node.dag.headState, slot)
|
||||
return prevStabilitySubnets
|
||||
|
||||
# This works so long as at least one block in an epoch provides a basis for
|
||||
|
@ -636,7 +635,7 @@ proc getInitialAggregateSubnets(node: BeaconNode): Table[SubnetId, Slot] =
|
|||
# TODO when https://github.com/nim-lang/Nim/issues/15972 and
|
||||
# https://github.com/nim-lang/Nim/issues/16217 are fixed, in
|
||||
# Nimbus's Nim, use (_, _, subnetIndex, slot).
|
||||
let epochRef = node.chainDag.getEpochRef(node.chainDag.head, epoch)
|
||||
let epochRef = node.dag.getEpochRef(node.dag.head, epoch)
|
||||
for (_, ci, subnet_id, slot) in get_committee_assignments(
|
||||
epochRef, epoch, validatorIndices):
|
||||
result.withValue(subnet_id, v) do:
|
||||
|
@ -823,7 +822,7 @@ proc updateGossipStatus(node: BeaconNode, slot: Slot) {.raises: [Defect, Catchab
|
|||
# this into the node.cycleAttestationSubnets() call.
|
||||
debug "Enabling topic subscriptions",
|
||||
wallSlot = slot,
|
||||
headSlot = node.chainDag.head.slot,
|
||||
headSlot = node.dag.head.slot,
|
||||
syncQueueLen
|
||||
|
||||
node.setupDoppelgangerDetection(slot)
|
||||
|
@ -837,7 +836,7 @@ proc updateGossipStatus(node: BeaconNode, slot: Slot) {.raises: [Defect, Catchab
|
|||
syncQueueLen < 2 * slot.uint64:
|
||||
debug "Disabling topic subscriptions",
|
||||
wallSlot = slot,
|
||||
headSlot = node.chainDag.head.slot,
|
||||
headSlot = node.dag.head.slot,
|
||||
syncQueueLen
|
||||
node.removeMessageHandlers()
|
||||
|
||||
|
@ -881,13 +880,13 @@ proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} =
|
|||
# Things we do when slot processing has ended and we're about to wait for the
|
||||
# next slot
|
||||
|
||||
if node.chainDag.needStateCachesAndForkChoicePruning():
|
||||
if node.dag.needStateCachesAndForkChoicePruning():
|
||||
if node.attachedValidators.validators.len > 0:
|
||||
node.attachedValidators
|
||||
.slashingProtection
|
||||
# pruning is only done if the DB is set to pruning mode.
|
||||
.pruneAfterFinalization(
|
||||
node.chainDag.finalizedHead.slot.compute_epoch_at_slot()
|
||||
node.dag.finalizedHead.slot.compute_epoch_at_slot()
|
||||
)
|
||||
|
||||
# Delay part of pruning until latency critical duties are done.
|
||||
|
@ -910,17 +909,6 @@ proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} =
|
|||
# the database are synced with the filesystem.
|
||||
node.db.checkpoint()
|
||||
|
||||
# When we're not behind schedule, we'll speculatively update the clearance
|
||||
# state in anticipation of receiving the next block
|
||||
if node.beaconClock.now() + 500.millis < (slot+1).toBeaconTime():
|
||||
# This is not a perfect location to be calling advance since the block
|
||||
# for the current slot may have not arrived yet, specially when running
|
||||
# a node that is not attesting - there's a small chance we'll call
|
||||
# advance twice for a block and not at all for the next because of these
|
||||
# timing effect - this is fine, except for the missed opportunity to
|
||||
# speculate
|
||||
node.chainDag.advanceClearanceState()
|
||||
|
||||
# -1 is a more useful output than 18446744073709551615 as an indicator of
|
||||
# no future attestation/proposal known.
|
||||
template displayInt64(x: Slot): int64 =
|
||||
|
@ -942,11 +930,11 @@ proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} =
|
|||
info "Slot end",
|
||||
slot = shortLog(slot),
|
||||
nextSlot = shortLog(slot + 1),
|
||||
head = shortLog(node.chainDag.head),
|
||||
headEpoch = shortLog(node.chainDag.head.slot.compute_epoch_at_slot()),
|
||||
finalizedHead = shortLog(node.chainDag.finalizedHead.blck),
|
||||
head = shortLog(node.dag.head),
|
||||
headEpoch = shortLog(node.dag.head.slot.compute_epoch_at_slot()),
|
||||
finalizedHead = shortLog(node.dag.finalizedHead.blck),
|
||||
finalizedEpoch =
|
||||
shortLog(node.chainDag.finalizedHead.blck.slot.compute_epoch_at_slot()),
|
||||
shortLog(node.dag.finalizedHead.blck.slot.compute_epoch_at_slot()),
|
||||
nextAttestationSlot = displayInt64(nextAttestationSlot),
|
||||
nextProposalSlot = displayInt64(nextProposalSlot),
|
||||
nextActionWait =
|
||||
|
@ -960,6 +948,20 @@ proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} =
|
|||
|
||||
node.updateGossipStatus(slot)
|
||||
|
||||
# When we're not behind schedule, we'll speculatively update the clearance
|
||||
# state in anticipation of receiving the next block - we do it after logging
|
||||
# slot end since the nextActionWaitTime can be short
|
||||
let
|
||||
advanceCutoff = node.beaconClock.fromNow(
|
||||
slot.toBeaconTime(chronos.seconds(int(SECONDS_PER_SLOT - 1))))
|
||||
if advanceCutoff.inFuture:
|
||||
# We wait until there's only a second left before the next slot begins, then
|
||||
# we advance the clearance state to the next slot - this gives us a high
|
||||
# probability of being prepared for the block that will arrive and the
|
||||
# epoch processing that follows
|
||||
await sleepAsync(advanceCutoff.offset)
|
||||
node.dag.advanceClearanceState()
|
||||
|
||||
proc onSlotStart(
|
||||
node: BeaconNode, wallTime: BeaconTime, lastSlot: Slot) {.async.} =
|
||||
## Called at the beginning of a slot - usually every slot, but sometimes might
|
||||
|
@ -974,7 +976,7 @@ proc onSlotStart(
|
|||
# If everything was working perfectly, the slot that we should be processing
|
||||
expectedSlot = lastSlot + 1
|
||||
finalizedEpoch =
|
||||
node.chainDag.finalizedHead.blck.slot.compute_epoch_at_slot()
|
||||
node.dag.finalizedHead.blck.slot.compute_epoch_at_slot()
|
||||
delay = wallTime - expectedSlot.toBeaconTime()
|
||||
|
||||
info "Slot start",
|
||||
|
@ -982,9 +984,9 @@ proc onSlotStart(
|
|||
wallSlot = shortLog(wallSlot),
|
||||
delay = shortLog(delay),
|
||||
peers = len(node.network.peerPool),
|
||||
head = shortLog(node.chainDag.head),
|
||||
headEpoch = shortLog(node.chainDag.head.slot.compute_epoch_at_slot()),
|
||||
finalized = shortLog(node.chainDag.finalizedHead.blck),
|
||||
head = shortLog(node.dag.head),
|
||||
headEpoch = shortLog(node.dag.head.slot.compute_epoch_at_slot()),
|
||||
finalized = shortLog(node.dag.finalizedHead.blck),
|
||||
finalizedEpoch = shortLog(finalizedEpoch),
|
||||
sync =
|
||||
if node.syncManager.inProgress: node.syncManager.syncStatus
|
||||
|
@ -1106,13 +1108,13 @@ proc runOnSecondLoop(node: BeaconNode) {.async.} =
|
|||
|
||||
proc startSyncManager(node: BeaconNode) =
|
||||
func getLocalHeadSlot(): Slot =
|
||||
node.chainDag.head.slot
|
||||
node.dag.head.slot
|
||||
|
||||
proc getLocalWallSlot(): Slot =
|
||||
node.beaconClock.now().slotOrZero
|
||||
|
||||
func getFirstSlotAtFinalizedEpoch(): Slot =
|
||||
node.chainDag.finalizedHead.slot
|
||||
node.dag.finalizedHead.slot
|
||||
|
||||
proc scoreCheck(peer: Peer): bool =
|
||||
if peer.score < PeerScoreLowLimit:
|
||||
|
@ -1299,8 +1301,8 @@ func shouldWeStartWeb3(node: BeaconNode): bool =
|
|||
|
||||
proc start(node: BeaconNode) {.raises: [Defect, CatchableError].} =
|
||||
let
|
||||
head = node.chainDag.head
|
||||
finalizedHead = node.chainDag.finalizedHead
|
||||
head = node.dag.head
|
||||
finalizedHead = node.dag.finalizedHead
|
||||
genesisTime = node.beaconClock.fromNow(toBeaconTime(Slot 0))
|
||||
|
||||
notice "Starting beacon node",
|
||||
|
@ -1356,8 +1358,8 @@ proc initStatusBar(node: BeaconNode) {.raises: [Defect, ValueError].} =
|
|||
error "Couldn't enable colors", err = exc.msg
|
||||
|
||||
proc dataResolver(expr: string): string {.raises: [Defect].} =
|
||||
template justified: untyped = node.chainDag.head.atEpochStart(
|
||||
getStateField(node.chainDag.headState, current_justified_checkpoint).epoch)
|
||||
template justified: untyped = node.dag.head.atEpochStart(
|
||||
getStateField(node.dag.headState, current_justified_checkpoint).epoch)
|
||||
# TODO:
|
||||
# We should introduce a general API for resolving dot expressions
|
||||
# such as `db.latest_block.slot` or `metrics.connected_peers`.
|
||||
|
@ -1370,13 +1372,13 @@ proc initStatusBar(node: BeaconNode) {.raises: [Defect, ValueError].} =
|
|||
$(node.connectedPeersCount)
|
||||
|
||||
of "head_root":
|
||||
shortLog(node.chainDag.head.root)
|
||||
shortLog(node.dag.head.root)
|
||||
of "head_epoch":
|
||||
$(node.chainDag.head.slot.epoch)
|
||||
$(node.dag.head.slot.epoch)
|
||||
of "head_epoch_slot":
|
||||
$(node.chainDag.head.slot mod SLOTS_PER_EPOCH)
|
||||
$(node.dag.head.slot mod SLOTS_PER_EPOCH)
|
||||
of "head_slot":
|
||||
$(node.chainDag.head.slot)
|
||||
$(node.dag.head.slot)
|
||||
|
||||
of "justifed_root":
|
||||
shortLog(justified.blck.root)
|
||||
|
@ -1388,13 +1390,13 @@ proc initStatusBar(node: BeaconNode) {.raises: [Defect, ValueError].} =
|
|||
$(justified.slot)
|
||||
|
||||
of "finalized_root":
|
||||
shortLog(node.chainDag.finalizedHead.blck.root)
|
||||
shortLog(node.dag.finalizedHead.blck.root)
|
||||
of "finalized_epoch":
|
||||
$(node.chainDag.finalizedHead.slot.epoch)
|
||||
$(node.dag.finalizedHead.slot.epoch)
|
||||
of "finalized_epoch_slot":
|
||||
$(node.chainDag.finalizedHead.slot mod SLOTS_PER_EPOCH)
|
||||
$(node.dag.finalizedHead.slot mod SLOTS_PER_EPOCH)
|
||||
of "finalized_slot":
|
||||
$(node.chainDag.finalizedHead.slot)
|
||||
$(node.dag.finalizedHead.slot)
|
||||
|
||||
of "epoch":
|
||||
$node.currentSlot.epoch
|
||||
|
|
|
@ -22,7 +22,7 @@ programMain:
|
|||
# load and send all public keys so the BN knows for which ones to ping us
|
||||
doAssert paramCount() == 2
|
||||
for curr in validatorKeysFromDirs(paramStr(1), paramStr(2)):
|
||||
validators[curr.toPubKey] = curr
|
||||
validators[curr.toPubKey.toPubKey()] = curr
|
||||
echo curr.toPubKey
|
||||
echo "end"
|
||||
|
||||
|
|
|
@ -160,15 +160,15 @@ proc getStatus(validator: Validator,
|
|||
proc getBlockDataFromBlockId(node: BeaconNode, blockId: string): BlockData {.raises: [Defect, CatchableError].} =
|
||||
result = case blockId:
|
||||
of "head":
|
||||
node.chainDag.get(node.chainDag.head)
|
||||
node.dag.get(node.dag.head)
|
||||
of "genesis":
|
||||
node.chainDag.getGenesisBlockData()
|
||||
node.dag.getGenesisBlockData()
|
||||
of "finalized":
|
||||
node.chainDag.get(node.chainDag.finalizedHead.blck)
|
||||
node.dag.get(node.dag.finalizedHead.blck)
|
||||
else:
|
||||
if blockId.startsWith("0x"):
|
||||
let blckRoot = parseRoot(blockId)
|
||||
let blockData = node.chainDag.get(blckRoot)
|
||||
let blockData = node.dag.get(blckRoot)
|
||||
if blockData.isNone:
|
||||
raise newException(CatchableError, "Block not found")
|
||||
blockData.get()
|
||||
|
@ -176,15 +176,15 @@ proc getBlockDataFromBlockId(node: BeaconNode, blockId: string): BlockData {.rai
|
|||
let blockSlot = node.getBlockSlotFromString(blockId)
|
||||
if blockSlot.blck.isNil:
|
||||
raise newException(CatchableError, "Block not found")
|
||||
node.chainDag.get(blockSlot.blck)
|
||||
node.dag.get(blockSlot.blck)
|
||||
|
||||
proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
||||
raises: [Exception].} = # TODO fix json-rpc
|
||||
rpcServer.rpc("get_v1_beacon_genesis") do () -> BeaconGenesisTuple:
|
||||
return (
|
||||
genesis_time: getStateField(node.chainDag.headState, genesis_time),
|
||||
genesis_time: getStateField(node.dag.headState, genesis_time),
|
||||
genesis_validators_root:
|
||||
getStateField(node.chainDag.headState, genesis_validators_root),
|
||||
getStateField(node.dag.headState, genesis_validators_root),
|
||||
genesis_fork_version: node.runtimePreset.GENESIS_FORK_VERSION
|
||||
)
|
||||
|
||||
|
@ -210,7 +210,7 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
status: Option[seq[string]]) -> seq[BeaconStatesValidatorsTuple]:
|
||||
var vquery: ValidatorQuery
|
||||
var squery: StatusQuery
|
||||
let current_epoch = getStateField(node.chainDag.headState, slot).epoch
|
||||
let current_epoch = getStateField(node.dag.headState, slot).epoch
|
||||
|
||||
template statusCheck(status, statusQuery, vstatus, current_epoch): bool =
|
||||
if status.isNone():
|
||||
|
@ -280,7 +280,7 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
|
||||
rpcServer.rpc("get_v1_beacon_states_stateId_validators_validatorId") do (
|
||||
stateId: string, validatorId: string) -> BeaconStatesValidatorsTuple:
|
||||
let current_epoch = getStateField(node.chainDag.headState, slot).epoch
|
||||
let current_epoch = getStateField(node.dag.headState, slot).epoch
|
||||
let vqres = createIdQuery([validatorId])
|
||||
if vqres.isErr:
|
||||
raise newException(CatchableError, vqres.error)
|
||||
|
@ -396,13 +396,13 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
result.header.message.state_root = tsbb.message.state_root
|
||||
result.header.message.body_root = tsbb.message.body.hash_tree_root()
|
||||
|
||||
result.canonical = bd.refs.isAncestorOf(node.chainDag.head)
|
||||
result.canonical = bd.refs.isAncestorOf(node.dag.head)
|
||||
|
||||
rpcServer.rpc("post_v1_beacon_blocks") do (blck: SignedBeaconBlock) -> int:
|
||||
if not(node.syncManager.inProgress):
|
||||
raise newException(CatchableError,
|
||||
"Beacon node is currently syncing, try again later.")
|
||||
let head = node.chainDag.head
|
||||
let head = node.dag.head
|
||||
if head.slot >= blck.message.slot:
|
||||
node.network.broadcast(getBeaconBlocksTopic(node.forkDigest), blck)
|
||||
# The block failed validation, but was successfully broadcast anyway.
|
||||
|
|
|
@ -127,9 +127,9 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
router.api(MethodGet, "/api/eth/v1/beacon/genesis") do () -> RestApiResponse:
|
||||
return RestApiResponse.jsonResponse(
|
||||
(
|
||||
genesis_time: getStateField(node.chainDag.headState, genesis_time),
|
||||
genesis_time: getStateField(node.dag.headState, genesis_time),
|
||||
genesis_validators_root:
|
||||
getStateField(node.chainDag.headState, genesis_validators_root),
|
||||
getStateField(node.dag.headState, genesis_validators_root),
|
||||
genesis_fork_version: node.runtimePreset.GENESIS_FORK_VERSION
|
||||
)
|
||||
)
|
||||
|
@ -268,7 +268,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
(res1, res2)
|
||||
|
||||
node.withStateForBlockSlot(bslot):
|
||||
let current_epoch = get_current_epoch(node.chainDag.headState)
|
||||
let current_epoch = get_current_epoch(node.dag.headState)
|
||||
var res: seq[RestValidator]
|
||||
for index, validator in getStateField(stateData, validators).pairs():
|
||||
let includeFlag =
|
||||
|
@ -309,7 +309,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
return RestApiResponse.jsonError(Http400, InvalidValidatorIdValueError,
|
||||
$validator_id.error())
|
||||
node.withStateForBlockSlot(bslot):
|
||||
let current_epoch = get_current_epoch(node.chainDag.headState)
|
||||
let current_epoch = get_current_epoch(node.dag.headState)
|
||||
let vid = validator_id.get()
|
||||
case vid.kind
|
||||
of ValidatorQueryKind.Key:
|
||||
|
@ -416,7 +416,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
res2.incl(vitem)
|
||||
(res1, res2)
|
||||
node.withStateForBlockSlot(bslot):
|
||||
let current_epoch = get_current_epoch(node.chainDag.headState)
|
||||
let current_epoch = get_current_epoch(node.dag.headState)
|
||||
var res: seq[RestValidatorBalance]
|
||||
for index, validator in getStateField(stateData, validators).pairs():
|
||||
let includeFlag =
|
||||
|
@ -533,7 +533,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
$rslot.error())
|
||||
rslot.get()
|
||||
else:
|
||||
node.chainDag.head.slot
|
||||
node.dag.head.slot
|
||||
|
||||
if parent_root.isSome():
|
||||
let rroot = parent_root.get()
|
||||
|
@ -554,12 +554,12 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
let blockSlot = head.atSlot(qslot)
|
||||
if isNil(blockSlot.blck):
|
||||
return RestApiResponse.jsonError(Http404, BlockNotFoundError)
|
||||
node.chainDag.get(blockSlot.blck)
|
||||
node.dag.get(blockSlot.blck)
|
||||
|
||||
return RestApiResponse.jsonResponse(
|
||||
(
|
||||
root: bdata.data.root,
|
||||
canonical: bdata.refs.isAncestorOf(node.chainDag.head),
|
||||
canonical: bdata.refs.isAncestorOf(node.dag.head),
|
||||
header: (
|
||||
message: (
|
||||
slot: bdata.data.message.slot,
|
||||
|
@ -589,7 +589,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
return RestApiResponse.jsonResponse(
|
||||
(
|
||||
root: bdata.data.root,
|
||||
canonical: bdata.refs.isAncestorOf(node.chainDag.head),
|
||||
canonical: bdata.refs.isAncestorOf(node.dag.head),
|
||||
header: (
|
||||
message: (
|
||||
slot: bdata.data.message.slot,
|
||||
|
@ -620,7 +620,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
res.root = hash_tree_root(res.message)
|
||||
res
|
||||
|
||||
let head = node.chainDag.head
|
||||
let head = node.dag.head
|
||||
if not(node.isSynced(head)):
|
||||
return RestApiResponse.jsonError(Http503, BeaconNodeInSyncError)
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ func getDepositAddress(node: BeaconNode): string =
|
|||
proc installConfigApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
||||
raises: [Exception].} = # TODO fix json-rpc
|
||||
rpcServer.rpc("get_v1_config_fork_schedule") do () -> seq[Fork]:
|
||||
return @[getStateField(node.chainDag.headState, fork)]
|
||||
return @[getStateField(node.dag.headState, fork)]
|
||||
|
||||
rpcServer.rpc("get_v1_config_spec") do () -> JsonNode:
|
||||
return %*{
|
||||
|
|
|
@ -28,7 +28,7 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
# TODO: Implemenation needs a fix, when forks infrastructure will be
|
||||
# established.
|
||||
return RestApiResponse.jsonResponse(
|
||||
[getStateField(node.chainDag.headState, fork)]
|
||||
[getStateField(node.dag.headState, fork)]
|
||||
)
|
||||
|
||||
router.api(MethodGet,
|
||||
|
|
|
@ -29,4 +29,4 @@ proc installDebugApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
return stateData.data.data
|
||||
|
||||
rpcServer.rpc("get_v1_debug_beacon_heads") do () -> seq[tuple[root: Eth2Digest, slot: Slot]]:
|
||||
return node.chainDag.heads.mapIt((it.root, it.slot))
|
||||
return node.dag.heads.mapIt((it.root, it.slot))
|
||||
|
|
|
@ -29,7 +29,7 @@ proc installDebugApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
router.api(MethodGet,
|
||||
"/api/eth/v1/debug/beacon/heads") do () -> RestApiResponse:
|
||||
return RestApiResponse.jsonResponse(
|
||||
node.chainDag.heads.mapIt((root: it.root, slot: it.slot))
|
||||
node.dag.heads.mapIt((root: it.root, slot: it.slot))
|
||||
)
|
||||
|
||||
router.redirect(
|
||||
|
|
|
@ -38,14 +38,14 @@ proc installNimbusApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
## Install non-standard api handlers - some of these are used by 3rd-parties
|
||||
## such as eth2stats, pending a full REST api
|
||||
rpcServer.rpc("getBeaconHead") do () -> Slot:
|
||||
return node.chainDag.head.slot
|
||||
return node.dag.head.slot
|
||||
|
||||
rpcServer.rpc("getChainHead") do () -> JsonNode:
|
||||
let
|
||||
head = node.chainDag.head
|
||||
finalized = getStateField(node.chainDag.headState, finalized_checkpoint)
|
||||
head = node.dag.head
|
||||
finalized = getStateField(node.dag.headState, finalized_checkpoint)
|
||||
justified =
|
||||
getStateField(node.chainDag.headState, current_justified_checkpoint)
|
||||
getStateField(node.dag.headState, current_justified_checkpoint)
|
||||
return %* {
|
||||
"head_slot": head.slot,
|
||||
"head_block_root": head.root.data.toHex(),
|
||||
|
@ -103,8 +103,8 @@ proc installNimbusApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
wallSlot = node.beaconClock.now.slotOrZero
|
||||
head = node.doChecksAndGetCurrentHead(wallSlot)
|
||||
|
||||
let proposalState = assignClone(node.chainDag.headState)
|
||||
node.chainDag.withState(proposalState[], head.atSlot(wallSlot)):
|
||||
let proposalState = assignClone(node.dag.headState)
|
||||
node.dag.withState(proposalState[], head.atSlot(wallSlot)):
|
||||
return node.getBlockProposalEth1Data(stateData)
|
||||
|
||||
rpcServer.rpc("debug_getChronosFutures") do () -> seq[FutureInfo]:
|
||||
|
|
|
@ -107,14 +107,14 @@ proc toNode(v: PubSubPeer, backoff: Moment): RestPubSubPeer =
|
|||
|
||||
proc installNimbusApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
||||
router.api(MethodGet, "/api/nimbus/v1/beacon/head") do () -> RestApiResponse:
|
||||
return RestApiResponse.jsonResponse(node.chainDag.head.slot)
|
||||
return RestApiResponse.jsonResponse(node.dag.head.slot)
|
||||
|
||||
router.api(MethodGet, "/api/nimbus/v1/chain/head") do() -> RestApiResponse:
|
||||
let
|
||||
head = node.chainDag.head
|
||||
finalized = getStateField(node.chainDag.headState, finalized_checkpoint)
|
||||
head = node.dag.head
|
||||
finalized = getStateField(node.dag.headState, finalized_checkpoint)
|
||||
justified =
|
||||
getStateField(node.chainDag.headState, current_justified_checkpoint)
|
||||
getStateField(node.dag.headState, current_justified_checkpoint)
|
||||
return RestApiResponse.jsonResponse(
|
||||
(
|
||||
head_slot: head.slot,
|
||||
|
@ -202,8 +202,8 @@ proc installNimbusApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
if res.isErr():
|
||||
return RestApiResponse.jsonError(Http503, BeaconNodeInSyncError)
|
||||
res.get()
|
||||
let proposalState = assignClone(node.chainDag.headState)
|
||||
node.chainDag.withState(proposalState[], head.atSlot(wallSlot)):
|
||||
let proposalState = assignClone(node.dag.headState)
|
||||
node.dag.withState(proposalState[], head.atSlot(wallSlot)):
|
||||
return RestApiResponse.jsonResponse(
|
||||
node.getBlockProposalEth1Data(stateData))
|
||||
|
||||
|
|
|
@ -469,7 +469,7 @@ proc getRouter*(): RestRouter =
|
|||
|
||||
proc getCurrentHead*(node: BeaconNode,
|
||||
slot: Slot): Result[BlockRef, cstring] =
|
||||
let res = node.chainDag.head
|
||||
let res = node.dag.head
|
||||
# if not(node.isSynced(res)):
|
||||
# return err("Cannot fulfill request until node is synced")
|
||||
if res.slot + uint64(2 * SLOTS_PER_EPOCH) < slot:
|
||||
|
@ -495,21 +495,21 @@ proc getBlockSlot*(node: BeaconNode,
|
|||
return err("Block not found")
|
||||
ok(bslot)
|
||||
of StateQueryKind.Root:
|
||||
let blckRef = node.chainDag.getRef(stateIdent.root)
|
||||
let blckRef = node.dag.getRef(stateIdent.root)
|
||||
if isNil(blckRef):
|
||||
return err("Block not found")
|
||||
ok(blckRef.toBlockSlot())
|
||||
of StateQueryKind.Named:
|
||||
case stateIdent.value
|
||||
of StateIdentType.Head:
|
||||
ok(node.chainDag.head.toBlockSlot())
|
||||
ok(node.dag.head.toBlockSlot())
|
||||
of StateIdentType.Genesis:
|
||||
ok(node.chainDag.getGenesisBlockSlot())
|
||||
ok(node.dag.getGenesisBlockSlot())
|
||||
of StateIdentType.Finalized:
|
||||
ok(node.chainDag.finalizedHead)
|
||||
ok(node.dag.finalizedHead)
|
||||
of StateIdentType.Justified:
|
||||
ok(node.chainDag.head.atEpochStart(
|
||||
getStateField(node.chainDag.headState, current_justified_checkpoint).epoch))
|
||||
ok(node.dag.head.atEpochStart(
|
||||
getStateField(node.dag.headState, current_justified_checkpoint).epoch))
|
||||
|
||||
proc getBlockDataFromBlockIdent*(node: BeaconNode,
|
||||
id: BlockIdent): Result[BlockData, cstring] =
|
||||
|
@ -517,13 +517,13 @@ proc getBlockDataFromBlockIdent*(node: BeaconNode,
|
|||
of BlockQueryKind.Named:
|
||||
case id.value
|
||||
of BlockIdentType.Head:
|
||||
ok(node.chainDag.get(node.chainDag.head))
|
||||
ok(node.dag.get(node.dag.head))
|
||||
of BlockIdentType.Genesis:
|
||||
ok(node.chainDag.getGenesisBlockData())
|
||||
ok(node.dag.getGenesisBlockData())
|
||||
of BlockIdentType.Finalized:
|
||||
ok(node.chainDag.get(node.chainDag.finalizedHead.blck))
|
||||
ok(node.dag.get(node.dag.finalizedHead.blck))
|
||||
of BlockQueryKind.Root:
|
||||
let res = node.chainDag.get(id.root)
|
||||
let res = node.dag.get(id.root)
|
||||
if res.isNone():
|
||||
return err("Block not found")
|
||||
ok(res.get())
|
||||
|
@ -532,20 +532,20 @@ proc getBlockDataFromBlockIdent*(node: BeaconNode,
|
|||
let blockSlot = head.atSlot(id.slot)
|
||||
if isNil(blockSlot.blck):
|
||||
return err("Block not found")
|
||||
ok(node.chainDag.get(blockSlot.blck))
|
||||
ok(node.dag.get(blockSlot.blck))
|
||||
|
||||
template withStateForBlockSlot*(node: BeaconNode,
|
||||
blockSlot: BlockSlot, body: untyped): untyped =
|
||||
template isState(state: StateData): bool =
|
||||
state.blck.atSlot(getStateField(state, slot)) == blockSlot
|
||||
|
||||
if isState(node.chainDag.headState):
|
||||
withStateVars(node.chainDag.headState):
|
||||
if isState(node.dag.headState):
|
||||
withStateVars(node.dag.headState):
|
||||
var cache {.inject.}: StateCache
|
||||
body
|
||||
else:
|
||||
let rpcState = assignClone(node.chainDag.headState)
|
||||
node.chainDag.withState(rpcState[], blockSlot):
|
||||
let rpcState = assignClone(node.dag.headState)
|
||||
node.dag.withState(rpcState[], blockSlot):
|
||||
body
|
||||
|
||||
proc toValidatorIndex*(value: RestValidatorIndex): Result[ValidatorIndex,
|
||||
|
|
|
@ -23,13 +23,13 @@ template withStateForStateId*(stateId: string, body: untyped): untyped =
|
|||
template isState(state: StateData): bool =
|
||||
state.blck.atSlot(getStateField(state, slot)) == bs
|
||||
|
||||
if isState(node.chainDag.headState):
|
||||
withStateVars(node.chainDag.headState):
|
||||
if isState(node.dag.headState):
|
||||
withStateVars(node.dag.headState):
|
||||
var cache {.inject.}: StateCache
|
||||
body
|
||||
else:
|
||||
let rpcState = assignClone(node.chainDag.headState)
|
||||
node.chainDag.withState(rpcState[], bs):
|
||||
let rpcState = assignClone(node.dag.headState)
|
||||
node.dag.withState(rpcState[], bs):
|
||||
body
|
||||
|
||||
proc toBlockSlot*(blckRef: BlockRef): BlockSlot =
|
||||
|
@ -45,7 +45,7 @@ func checkEpochToSlotOverflow*(epoch: Epoch) {.raises: [Defect, ValueError].} =
|
|||
ValueError, "Requesting epoch for which slot would overflow")
|
||||
|
||||
proc doChecksAndGetCurrentHead*(node: BeaconNode, slot: Slot): BlockRef {.raises: [Defect, CatchableError].} =
|
||||
result = node.chainDag.head
|
||||
result = node.dag.head
|
||||
if not node.isSynced(result):
|
||||
raise newException(CatchableError, "Cannot fulfill request until node is synced")
|
||||
# TODO for now we limit the requests arbitrarily by up to 2 epochs into the future
|
||||
|
@ -68,18 +68,18 @@ proc getBlockSlotFromString*(node: BeaconNode, slot: string): BlockSlot {.raises
|
|||
proc stateIdToBlockSlot*(node: BeaconNode, stateId: string): BlockSlot {.raises: [Defect, CatchableError].} =
|
||||
case stateId:
|
||||
of "head":
|
||||
node.chainDag.head.toBlockSlot()
|
||||
node.dag.head.toBlockSlot()
|
||||
of "genesis":
|
||||
node.chainDag.getGenesisBlockSlot()
|
||||
node.dag.getGenesisBlockSlot()
|
||||
of "finalized":
|
||||
node.chainDag.finalizedHead
|
||||
node.dag.finalizedHead
|
||||
of "justified":
|
||||
node.chainDag.head.atEpochStart(
|
||||
getStateField(node.chainDag.headState, current_justified_checkpoint).epoch)
|
||||
node.dag.head.atEpochStart(
|
||||
getStateField(node.dag.headState, current_justified_checkpoint).epoch)
|
||||
else:
|
||||
if stateId.startsWith("0x"):
|
||||
let blckRoot = parseRoot(stateId)
|
||||
let blckRef = node.chainDag.getRef(blckRoot)
|
||||
let blckRef = node.dag.getRef(blckRoot)
|
||||
if blckRef.isNil:
|
||||
raise newException(CatchableError, "Block not found")
|
||||
blckRef.toBlockSlot()
|
||||
|
|
|
@ -37,11 +37,11 @@ proc installValidatorApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
slot: Slot, graffiti: GraffitiBytes, randao_reveal: ValidatorSig) -> BeaconBlock:
|
||||
debug "get_v1_validator_block", slot = slot
|
||||
let head = node.doChecksAndGetCurrentHead(slot)
|
||||
let proposer = node.chainDag.getProposer(head, slot)
|
||||
let proposer = node.dag.getProposer(head, slot)
|
||||
if proposer.isNone():
|
||||
raise newException(CatchableError, "could not retrieve block for slot: " & $slot)
|
||||
let message = await makeBeaconBlockForHeadAndSlot(
|
||||
node, randao_reveal, proposer.get()[0], graffiti, head, slot)
|
||||
node, randao_reveal, proposer.get(), graffiti, head, slot)
|
||||
if message.isNone():
|
||||
raise newException(CatchableError, "could not retrieve block for slot: " & $slot)
|
||||
return message.get()
|
||||
|
@ -64,7 +64,7 @@ proc installValidatorApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
debug "get_v1_validator_attestation_data", slot = slot
|
||||
let
|
||||
head = node.doChecksAndGetCurrentHead(slot)
|
||||
epochRef = node.chainDag.getEpochRef(head, slot.epoch)
|
||||
epochRef = node.dag.getEpochRef(head, slot.epoch)
|
||||
return makeAttestationData(epochRef, head.atSlot(slot), committee_index)
|
||||
|
||||
rpcServer.rpc("get_v1_validator_aggregate_attestation") do (
|
||||
|
@ -87,7 +87,7 @@ proc installValidatorApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
debug "get_v1_validator_duties_attester", epoch = epoch
|
||||
let
|
||||
head = node.doChecksAndGetCurrentHead(epoch)
|
||||
epochRef = node.chainDag.getEpochRef(head, epoch)
|
||||
epochRef = node.dag.getEpochRef(head, epoch)
|
||||
committees_per_slot = get_committee_count_per_slot(epochRef)
|
||||
for i in 0 ..< SLOTS_PER_EPOCH:
|
||||
let slot = compute_start_slot_at_epoch(epoch) + i
|
||||
|
@ -96,7 +96,7 @@ proc installValidatorApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
epochRef, slot, committee_index.CommitteeIndex)
|
||||
for index_in_committee, validatorIdx in committee:
|
||||
if validatorIdx < epochRef.validator_keys.len.ValidatorIndex:
|
||||
let curr_val_pubkey = epochRef.validator_keys[validatorIdx]
|
||||
let curr_val_pubkey = epochRef.validator_keys[validatorIdx].toPubKey()
|
||||
if public_keys.findIt(it == curr_val_pubkey) != -1:
|
||||
result.add((public_key: curr_val_pubkey,
|
||||
validator_index: validatorIdx,
|
||||
|
@ -110,11 +110,11 @@ proc installValidatorApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
debug "get_v1_validator_duties_proposer", epoch = epoch
|
||||
let
|
||||
head = node.doChecksAndGetCurrentHead(epoch)
|
||||
epochRef = node.chainDag.getEpochRef(head, epoch)
|
||||
for i in 0 ..< SLOTS_PER_EPOCH:
|
||||
if epochRef.beacon_proposers[i].isSome():
|
||||
result.add((public_key: epochRef.beacon_proposers[i].get()[1],
|
||||
validator_index: epochRef.beacon_proposers[i].get()[0],
|
||||
epochRef = node.dag.getEpochRef(head, epoch)
|
||||
for i, bp in epochRef.beacon_proposers:
|
||||
if bp.isSome():
|
||||
result.add((public_key: epochRef.validator_keys[bp.get()].toPubKey(),
|
||||
validator_index: bp.get(),
|
||||
slot: compute_start_slot_at_epoch(epoch) + i))
|
||||
|
||||
rpcServer.rpc("post_v1_validator_beacon_committee_subscriptions") do (
|
||||
|
@ -141,15 +141,15 @@ proc installValidatorApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
"Slot requested not in current or next wall-slot epoch")
|
||||
|
||||
if not verify_slot_signature(
|
||||
getStateField(node.chainDag.headState, fork),
|
||||
getStateField(node.chainDag.headState, genesis_validators_root),
|
||||
getStateField(node.dag.headState, fork),
|
||||
getStateField(node.dag.headState, genesis_validators_root),
|
||||
slot, validator_pubkey, slot_signature):
|
||||
raise newException(CatchableError,
|
||||
"Invalid slot signature")
|
||||
|
||||
let
|
||||
head = node.doChecksAndGetCurrentHead(epoch)
|
||||
epochRef = node.chainDag.getEpochRef(head, epoch)
|
||||
epochRef = node.dag.getEpochRef(head, epoch)
|
||||
subnet_id = compute_subnet_for_attestation(
|
||||
get_committee_count_per_slot(epochRef), slot, committee_index)
|
||||
|
||||
|
|
|
@ -89,11 +89,11 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
if qepoch >= Epoch(2):
|
||||
qhead.atSlot(compute_start_slot_at_epoch(qepoch - 1) - 1).blck.root
|
||||
else:
|
||||
node.chainDag.genesis.root
|
||||
node.dag.genesis.root
|
||||
let duties =
|
||||
block:
|
||||
var res: seq[RestAttesterDuty]
|
||||
let epochRef = node.chainDag.getEpochRef(qhead, qepoch)
|
||||
let epochRef = node.dag.getEpochRef(qhead, qepoch)
|
||||
let committees_per_slot = get_committee_count_per_slot(epochRef)
|
||||
for i in 0 ..< SLOTS_PER_EPOCH:
|
||||
let slot = compute_start_slot_at_epoch(qepoch) + i
|
||||
|
@ -107,7 +107,7 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
if validator_index in indexList:
|
||||
res.add(
|
||||
RestAttesterDuty(
|
||||
pubkey: validator_key,
|
||||
pubkey: validator_key.toPubKey(),
|
||||
validator_index: validator_index,
|
||||
committee_index: CommitteeIndex(committee_index),
|
||||
committee_length: lenu64(commitee),
|
||||
|
@ -142,21 +142,20 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
if qepoch >= Epoch(2):
|
||||
qhead.atSlot(compute_start_slot_at_epoch(qepoch - 1) - 1).blck.root
|
||||
else:
|
||||
node.chainDag.genesis.root
|
||||
node.dag.genesis.root
|
||||
let duties =
|
||||
block:
|
||||
var res: seq[RestProposerDuty]
|
||||
let epochRef = node.chainDag.getEpochRef(qhead, qepoch)
|
||||
let epochRef = node.dag.getEpochRef(qhead, qepoch)
|
||||
# Fix for https://github.com/status-im/nimbus-eth2/issues/2488
|
||||
# Slot(0) at Epoch(0) do not have a proposer.
|
||||
let startSlot = if qepoch == Epoch(0): 1'u64 else: 0'u64
|
||||
for i in startSlot ..< SLOTS_PER_EPOCH:
|
||||
if epochRef.beacon_proposers[i].isSome():
|
||||
let proposer = epochRef.beacon_proposers[i].get()
|
||||
for i, bp in epochRef.beacon_proposers:
|
||||
if bp.isSome():
|
||||
res.add(
|
||||
RestProposerDuty(
|
||||
pubkey: proposer[1],
|
||||
validator_index: proposer[0],
|
||||
pubkey: epochRef.validator_keys[bp.get()].toPubKey(),
|
||||
validator_index: bp.get(),
|
||||
slot: compute_start_slot_at_epoch(qepoch) + i
|
||||
)
|
||||
)
|
||||
|
@ -200,17 +199,17 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
block:
|
||||
let res = node.getCurrentHead(qslot)
|
||||
if res.isErr():
|
||||
if not(node.isSynced(node.chainDag.head)):
|
||||
if not(node.isSynced(node.dag.head)):
|
||||
return RestApiResponse.jsonError(Http503, BeaconNodeInSyncError)
|
||||
else:
|
||||
return RestApiResponse.jsonError(Http400, NoHeadForSlotError,
|
||||
$res.error())
|
||||
res.get()
|
||||
let proposer = node.chainDag.getProposer(qhead, qslot)
|
||||
let proposer = node.dag.getProposer(qhead, qslot)
|
||||
if proposer.isNone():
|
||||
return RestApiResponse.jsonError(Http400, ProposerNotFoundError)
|
||||
let res = await makeBeaconBlockForHeadAndSlot(
|
||||
node, qrandao, proposer.get()[0], qgraffiti, qhead, qslot)
|
||||
node, qrandao, proposer.get(), qgraffiti, qhead, qslot)
|
||||
if res.isNone():
|
||||
return RestApiResponse.jsonError(Http400, BlockProduceError)
|
||||
res.get()
|
||||
|
@ -249,7 +248,7 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
if res.isErr():
|
||||
return RestApiResponse.jsonError(Http503, BeaconNodeInSyncError)
|
||||
res.get()
|
||||
let epochRef = node.chainDag.getEpochRef(qhead, qslot.epoch)
|
||||
let epochRef = node.dag.getEpochRef(qhead, qslot.epoch)
|
||||
makeAttestationData(epochRef, qhead.atSlot(qslot), qindex)
|
||||
return RestApiResponse.jsonResponse(adata)
|
||||
|
||||
|
@ -330,7 +329,7 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
InvalidSubscriptionRequestValueError,
|
||||
$dres.error())
|
||||
dres.get()
|
||||
if not(node.isSynced(node.chainDag.head)):
|
||||
if not(node.isSynced(node.dag.head)):
|
||||
return RestApiResponse.jsonError(Http503, BeaconNodeInSyncError)
|
||||
|
||||
for request in requests:
|
||||
|
@ -341,10 +340,10 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
block:
|
||||
let idx = request.validator_index
|
||||
if uint64(idx) >=
|
||||
lenu64(getStateField(node.chainDag.headState, validators)):
|
||||
lenu64(getStateField(node.dag.headState, validators)):
|
||||
return RestApiResponse.jsonError(Http400,
|
||||
InvalidValidatorIndexValueError)
|
||||
getStateField(node.chainDag.headState, validators)[idx].pubkey
|
||||
getStateField(node.dag.headState, validators)[idx].pubkey
|
||||
|
||||
let wallSlot = node.beaconClock.now.slotOrZero
|
||||
if wallSlot > request.slot + 1:
|
||||
|
@ -360,7 +359,7 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
return RestApiResponse.jsonError(Http400, NoHeadForSlotError,
|
||||
$res.error())
|
||||
res.get()
|
||||
let epochRef = node.chainDag.getEpochRef(head, epoch)
|
||||
let epochRef = node.dag.getEpochRef(head, epoch)
|
||||
let subnet = uint8(compute_subnet_for_attestation(
|
||||
get_committee_count_per_slot(epochRef), request.slot,
|
||||
request.committee_index)
|
||||
|
|
|
@ -820,15 +820,13 @@ proc get_next_sync_committee*(state: altair.BeaconState): SyncCommittee =
|
|||
# see signatures_batch, TODO shouldn't be here
|
||||
# Deposit processing ensures all keys are valid
|
||||
var
|
||||
aggregate_pubkey: blscurve.PublicKey
|
||||
attestersAgg: AggregatePublicKey
|
||||
attestersAgg.init(pubkeys[0].loadWithCache().get)
|
||||
for i in 1 ..< pubkeys.len:
|
||||
attestersAgg.aggregate(pubkeys[i].loadWithCache().get)
|
||||
aggregate_pubkey.finish(attestersAgg)
|
||||
let aggregate_pubkey = finish(attestersAgg)
|
||||
|
||||
var res = SyncCommittee(aggregate_pubkey:
|
||||
ValidatorPubKey(blob: aggregate_pubkey.exportRaw()))
|
||||
var res = SyncCommittee(aggregate_pubkey: aggregate_pubkey.toPubKey())
|
||||
for i in 0 ..< SYNC_COMMITTEE_SIZE:
|
||||
# obviously ineffecient
|
||||
res.pubkeys[i] = pubkeys[i]
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
|
||||
import
|
||||
# Standard library
|
||||
std/[options, hashes, tables],
|
||||
std/[options, hashes, sequtils, tables],
|
||||
# Internal
|
||||
./digest,
|
||||
# Status
|
||||
|
@ -46,14 +46,15 @@ const
|
|||
# RawPrivKeySize* = 48 for Miracl / 32 for BLST
|
||||
|
||||
type
|
||||
# BLS deserialization is a bit slow, so we deserialize public keys and
|
||||
# signatures lazily - this helps operations like comparisons and hashes to
|
||||
# be fast (which is important), makes loading blocks and states fast, and
|
||||
# allows invalid values in the SSZ byte stream, which is valid from an SSZ
|
||||
# point of view - the invalid values are caught later
|
||||
# Raw serialized key bytes - this type is used in so as to not eagerly
|
||||
# load keys - deserialization is slow, as are equality checks - however, it
|
||||
# is not guaranteed that the key is valid (except in some cases, like the
|
||||
# database state)
|
||||
ValidatorPubKey* = object
|
||||
blob*: array[RawPubKeySize, byte]
|
||||
|
||||
CookedPubKey* = distinct blscurve.PublicKey ## Valid deserialized key
|
||||
|
||||
ValidatorSig* = object
|
||||
blob*: array[RawSigSize, byte]
|
||||
|
||||
|
@ -78,21 +79,37 @@ export AggregateSignature
|
|||
# ----------------------------------------------------------------------
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#bls-signatures
|
||||
|
||||
func toPubKey*(privkey: ValidatorPrivKey): ValidatorPubKey =
|
||||
func toPubKey*(privkey: ValidatorPrivKey): CookedPubKey =
|
||||
## Derive a public key from a private key
|
||||
# Un-specced in either hash-to-curve or Eth2
|
||||
var pubKey: blscurve.PublicKey
|
||||
let ok = publicFromSecret(pubKey, SecretKey privkey)
|
||||
doAssert ok, "The validator private key was a zero key. This should never happen."
|
||||
|
||||
ValidatorPubKey(blob: pubKey.exportRaw())
|
||||
CookedPubKey(pubKey)
|
||||
|
||||
proc loadWithCache*(v: ValidatorPubKey): Option[blscurve.PublicKey] =
|
||||
template toRaw*(x: CookedPubKey): auto =
|
||||
PublicKey(x).exportRaw()
|
||||
|
||||
func toPubKey*(pubKey: CookedPubKey): ValidatorPubKey =
|
||||
## Derive a public key from a private key
|
||||
# Un-specced in either hash-to-curve or Eth2
|
||||
ValidatorPubKey(blob: pubKey.toRaw())
|
||||
|
||||
proc load*(v: ValidatorPubKey): Option[CookedPubKey] =
|
||||
## Parse signature blob - this may fail
|
||||
var val: blscurve.PublicKey
|
||||
if fromBytes(val, v.blob):
|
||||
some CookedPubKey(val)
|
||||
else:
|
||||
none CookedPubKey
|
||||
|
||||
proc loadWithCache*(v: ValidatorPubKey): Option[CookedPubKey] =
|
||||
## Parse public key blob - this may fail - this function uses a cache to
|
||||
## avoid the expensive deserialization - for now, external public keys only
|
||||
## come from deposits in blocks - when more sources are added, the memory
|
||||
## usage of the cache should be considered
|
||||
var cache {.threadvar.}: Table[typeof(v.blob), blscurve.PublicKey]
|
||||
var cache {.threadvar.}: Table[typeof(v.blob), CookedPubKey]
|
||||
|
||||
# Try to get parse value from cache - if it's not in there, try to parse it -
|
||||
# if that's not possible, it's broken
|
||||
|
@ -100,12 +117,10 @@ proc loadWithCache*(v: ValidatorPubKey): Option[blscurve.PublicKey] =
|
|||
return some key[]
|
||||
do:
|
||||
# Only valid keys are cached
|
||||
var val: blscurve.PublicKey
|
||||
return
|
||||
if fromBytes(val, v.blob):
|
||||
some cache.mGetOrPut(v.blob, val)
|
||||
else:
|
||||
none blscurve.PublicKey
|
||||
let cooked = v.load()
|
||||
if cooked.isSome():
|
||||
cache[v.blob] = cooked.get()
|
||||
return cooked
|
||||
|
||||
proc load*(v: ValidatorSig): Option[CookedSig] =
|
||||
## Parse signature blob - this may fail
|
||||
|
@ -115,6 +130,23 @@ proc load*(v: ValidatorSig): Option[CookedSig] =
|
|||
else:
|
||||
none(CookedSig)
|
||||
|
||||
func init*(agg: var AggregatePublicKey, pubkey: CookedPubKey) {.inline.}=
|
||||
## Initializes an aggregate signature context
|
||||
agg.init(blscurve.PublicKey(pubkey))
|
||||
|
||||
func init*(T: type AggregatePublicKey, pubkey: CookedPubKey): T =
|
||||
result.init(pubkey)
|
||||
|
||||
proc aggregate*(agg: var AggregatePublicKey, pubkey: CookedPubKey) {.inline.}=
|
||||
## Aggregate two valid Validator Public Keys
|
||||
agg.aggregate(blscurve.PublicKey(pubkey))
|
||||
|
||||
func finish*(agg: AggregatePublicKey): CookedPubKey {.inline.} =
|
||||
## Canonicalize an AggregatePublicKey into a signature
|
||||
var pubkey: blscurve.PublicKey
|
||||
pubkey.finish(agg)
|
||||
CookedPubKey(pubkey)
|
||||
|
||||
func init*(agg: var AggregateSignature, sig: CookedSig) {.inline.}=
|
||||
## Initializes an aggregate signature context
|
||||
agg.init(blscurve.Signature(sig))
|
||||
|
@ -134,7 +166,7 @@ func finish*(agg: AggregateSignature): CookedSig {.inline.} =
|
|||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#bls-signatures
|
||||
proc blsVerify*(
|
||||
pubkey: ValidatorPubKey, message: openArray[byte],
|
||||
pubkey: CookedPubKey, message: openArray[byte],
|
||||
signature: CookedSig): bool =
|
||||
## Check that a signature is valid for a message
|
||||
## under the provided public key.
|
||||
|
@ -143,24 +175,32 @@ proc blsVerify*(
|
|||
## The proof-of-possession MUST be verified before calling this function.
|
||||
## It is recommended to use the overload that accepts a proof-of-possession
|
||||
## to enforce correct usage.
|
||||
PublicKey(pubkey).verify(message, blscurve.Signature(signature))
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#bls-signatures
|
||||
proc blsVerify*(
|
||||
pubkey: ValidatorPubKey, message: openArray[byte],
|
||||
signature: CookedSig): bool =
|
||||
## Check that a signature is valid for a message
|
||||
## under the provided public key.
|
||||
## returns `true` if the signature and the pubkey is valid, `false` otherwise.
|
||||
##
|
||||
## The proof-of-possession MUST be verified before calling this function.
|
||||
## It is recommended to use the overload that accepts a proof-of-possession
|
||||
## to enforce correct usage.
|
||||
let
|
||||
parsedKey = pubkey.loadWithCache()
|
||||
|
||||
# It may happen that signatures or keys fail to parse as invalid blobs may
|
||||
# be passed around - for example, the deposit contract doesn't verify
|
||||
# signatures, so the loading happens lazily at verification time instead!
|
||||
parsedKey.isSome() and
|
||||
parsedKey.get.verify(message, blscurve.Signature(signature))
|
||||
# Guard against invalid signature blobs that fail to parse
|
||||
parsedKey.isSome() and blsVerify(parsedKey.get(), message, signature)
|
||||
|
||||
proc blsVerify*(
|
||||
pubkey: ValidatorPubKey, message: openArray[byte],
|
||||
pubkey: ValidatorPubKey | CookedPubKey, message: openArray[byte],
|
||||
signature: ValidatorSig): bool =
|
||||
let parsedSig = signature.load()
|
||||
|
||||
if parsedSig.isNone():
|
||||
false
|
||||
else:
|
||||
blsVerify(pubkey, message, parsedSig.get())
|
||||
let
|
||||
parsedSig = signature.load()
|
||||
# Guard against invalid signature blobs that fail to parse
|
||||
parsedSig.isSome() and blsVerify(pubkey, message, parsedSig.get())
|
||||
|
||||
proc blsVerify*(sigSet: SignatureSet): bool =
|
||||
## Unbatched verification
|
||||
|
@ -177,7 +217,7 @@ func blsSign*(privkey: ValidatorPrivKey, message: openArray[byte]): CookedSig =
|
|||
CookedSig(SecretKey(privkey).sign(message))
|
||||
|
||||
proc blsFastAggregateVerify*(
|
||||
publicKeys: openArray[ValidatorPubKey],
|
||||
publicKeys: openArray[CookedPubKey],
|
||||
message: openArray[byte],
|
||||
signature: CookedSig
|
||||
): bool =
|
||||
|
@ -199,25 +239,38 @@ proc blsFastAggregateVerify*(
|
|||
# in blscurve which already exists internally
|
||||
# - or at network/databases/serialization boundaries we do not
|
||||
# allow invalid BLS objects to pollute consensus routines
|
||||
let keys = mapIt(publicKeys, PublicKey(it))
|
||||
fastAggregateVerify(keys, message, blscurve.Signature(signature))
|
||||
|
||||
proc blsFastAggregateVerify*(
|
||||
publicKeys: openArray[ValidatorPubKey],
|
||||
message: openArray[byte],
|
||||
signature: CookedSig
|
||||
): bool =
|
||||
var unwrapped: seq[PublicKey]
|
||||
for pubkey in publicKeys:
|
||||
let realkey = pubkey.loadWithCache()
|
||||
if realkey.isNone:
|
||||
return false
|
||||
unwrapped.add realkey.get
|
||||
unwrapped.add PublicKey(realkey.get)
|
||||
|
||||
fastAggregateVerify(unwrapped, message, blscurve.Signature(signature))
|
||||
|
||||
proc blsFastAggregateVerify*(
|
||||
publicKeys: openArray[CookedPubKey],
|
||||
message: openArray[byte],
|
||||
signature: ValidatorSig
|
||||
): bool =
|
||||
let parsedSig = signature.load()
|
||||
parsedSig.isSome and blsFastAggregateVerify(publicKeys, message, parsedSig.get())
|
||||
|
||||
proc blsFastAggregateVerify*(
|
||||
publicKeys: openArray[ValidatorPubKey],
|
||||
message: openArray[byte],
|
||||
signature: ValidatorSig
|
||||
): bool =
|
||||
let parsedSig = signature.load()
|
||||
if not parsedSig.isSome():
|
||||
false
|
||||
else:
|
||||
blsFastAggregateVerify(publicKeys, message, parsedSig.get())
|
||||
parsedSig.isSome and blsFastAggregateVerify(publicKeys, message, parsedSig.get())
|
||||
|
||||
proc toGaugeValue*(hash: Eth2Digest): int64 =
|
||||
# Only the last 8 bytes are taken into consideration in accordance
|
||||
|
@ -252,6 +305,12 @@ template toRaw*(x: TrustedSig): auto =
|
|||
func toHex*(x: BlsCurveType): string =
|
||||
toHex(toRaw(x))
|
||||
|
||||
func toHex*(x: CookedPubKey): string =
|
||||
toHex(x.toPubKey())
|
||||
|
||||
func `$`*(x: CookedPubKey): string =
|
||||
$(x.toPubKey())
|
||||
|
||||
func toValidatorSig*(x: CookedSig): ValidatorSig =
|
||||
ValidatorSig(blob: blscurve.Signature(x).exportRaw())
|
||||
|
||||
|
@ -293,7 +352,7 @@ template hash*(x: ValidatorPubKey | ValidatorSig): Hash =
|
|||
|
||||
{.pragma: serializationRaises, raises: [SerializationError, IOError, Defect].}
|
||||
|
||||
proc writeValue*(writer: var JsonWriter, value: ValidatorPubKey) {.
|
||||
proc writeValue*(writer: var JsonWriter, value: ValidatorPubKey | CookedPubKey) {.
|
||||
inline, raises: [IOError, Defect].} =
|
||||
writer.writeValue(value.toHex())
|
||||
|
||||
|
@ -347,6 +406,10 @@ func shortLog*(x: ValidatorPubKey | ValidatorSig): string =
|
|||
## that may contain valid or non-validated data
|
||||
byteutils.toHex(x.blob.toOpenArray(0, 3))
|
||||
|
||||
func shortLog*(x: CookedPubKey): string =
|
||||
let raw = x.toRaw()
|
||||
byteutils.toHex(raw.toOpenArray(0, 3))
|
||||
|
||||
func shortLog*(x: ValidatorPrivKey): string =
|
||||
## Logging for raw unwrapped BLS types
|
||||
"<private key>"
|
||||
|
|
|
@ -718,7 +718,7 @@ proc createKeystore*(kdfKind: KdfKind,
|
|||
|
||||
Keystore(
|
||||
crypto: cryptoField,
|
||||
pubkey: pubkey,
|
||||
pubkey: pubkey.toPubKey(),
|
||||
path: path,
|
||||
description: newClone(description),
|
||||
uuid: $uuid,
|
||||
|
@ -747,18 +747,22 @@ proc createWallet*(kdfKind: KdfKind,
|
|||
nextAccount: nextAccount.get(0))
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.2/specs/phase0/deposit-contract.md#withdrawal-credentials
|
||||
proc makeWithdrawalCredentials*(k: ValidatorPubKey): Eth2Digest =
|
||||
func makeWithdrawalCredentials*(k: ValidatorPubKey): Eth2Digest =
|
||||
var bytes = eth2digest(k.toRaw())
|
||||
bytes.data[0] = BLS_WITHDRAWAL_PREFIX.uint8
|
||||
bytes
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.2/specs/phase0/deposit-contract.md#withdrawal-credentials
|
||||
proc makeWithdrawalCredentials*(k: CookedPubKey): Eth2Digest =
|
||||
makeWithdrawalCredentials(k.toPubKey())
|
||||
|
||||
proc prepareDeposit*(preset: RuntimePreset,
|
||||
withdrawalPubKey: ValidatorPubKey,
|
||||
signingKey: ValidatorPrivKey, signingPubKey: ValidatorPubKey,
|
||||
withdrawalPubKey: CookedPubKey,
|
||||
signingKey: ValidatorPrivKey, signingPubKey: CookedPubKey,
|
||||
amount = MAX_EFFECTIVE_BALANCE.Gwei): DepositData =
|
||||
var res = DepositData(
|
||||
amount: amount,
|
||||
pubkey: signingPubKey,
|
||||
pubkey: signingPubKey.toPubKey(),
|
||||
withdrawal_credentials: makeWithdrawalCredentials(withdrawalPubKey))
|
||||
|
||||
res.signature = preset.get_deposit_signature(res, signingKey).toValidatorSig()
|
||||
|
|
|
@ -40,7 +40,7 @@ func get_slot_signature*(
|
|||
|
||||
proc verify_slot_signature*(
|
||||
fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot,
|
||||
pubkey: ValidatorPubKey, signature: SomeSig): bool =
|
||||
pubkey: ValidatorPubKey | CookedPubKey, signature: SomeSig): bool =
|
||||
withTrust(signature):
|
||||
let
|
||||
epoch = compute_epoch_at_slot(slot)
|
||||
|
@ -65,7 +65,7 @@ func get_epoch_signature*(
|
|||
|
||||
proc verify_epoch_signature*(
|
||||
fork: Fork, genesis_validators_root: Eth2Digest, epoch: Epoch,
|
||||
pubkey: ValidatorPubKey, signature: SomeSig): bool =
|
||||
pubkey: ValidatorPubKey | CookedPubKey, signature: SomeSig): bool =
|
||||
withTrust(signature):
|
||||
let
|
||||
domain = get_domain(fork, DOMAIN_RANDAO, epoch, genesis_validators_root)
|
||||
|
@ -91,8 +91,7 @@ func get_block_signature*(
|
|||
proc verify_block_signature*(
|
||||
fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot,
|
||||
blck: Eth2Digest | SomeSomeBeaconBlock | BeaconBlockHeader,
|
||||
pubkey: ValidatorPubKey,
|
||||
signature: SomeSig): bool =
|
||||
pubkey: ValidatorPubKey | CookedPubKey, signature: SomeSig): bool =
|
||||
withTrust(signature):
|
||||
let
|
||||
epoch = compute_epoch_at_slot(slot)
|
||||
|
@ -118,9 +117,10 @@ func get_aggregate_and_proof_signature*(fork: Fork, genesis_validators_root: Eth
|
|||
blsSign(privKey, compute_aggregate_and_proof_root(fork, genesis_validators_root,
|
||||
aggregate_and_proof).data)
|
||||
|
||||
proc verify_aggregate_and_proof_signature*(fork: Fork, genesis_validators_root: Eth2Digest,
|
||||
aggregate_and_proof: AggregateAndProof,
|
||||
pubkey: ValidatorPubKey, signature: SomeSig): bool =
|
||||
proc verify_aggregate_and_proof_signature*(
|
||||
fork: Fork, genesis_validators_root: Eth2Digest,
|
||||
aggregate_and_proof: AggregateAndProof,
|
||||
pubkey: ValidatorPubKey | CookedPubKey, signature: SomeSig): bool =
|
||||
withTrust(signature):
|
||||
let
|
||||
epoch = compute_epoch_at_slot(aggregate_and_proof.aggregate.data.slot)
|
||||
|
@ -151,7 +151,7 @@ func get_attestation_signature*(
|
|||
proc verify_attestation_signature*(
|
||||
fork: Fork, genesis_validators_root: Eth2Digest,
|
||||
attestation_data: AttestationData,
|
||||
pubkeys: openArray[ValidatorPubKey],
|
||||
pubkeys: auto,
|
||||
signature: SomeSig): bool =
|
||||
withTrust(signature):
|
||||
let
|
||||
|
|
|
@ -10,11 +10,10 @@
|
|||
import
|
||||
# Status lib
|
||||
blscurve,
|
||||
stew/byteutils,
|
||||
stew/[byteutils, results],
|
||||
# Internal
|
||||
../ssz/merkleization,
|
||||
./crypto, ./datatypes, ./helpers, ./presets,
|
||||
./beaconstate, ./digest
|
||||
"."/[crypto, datatypes, helpers, presets, beaconstate, digest]
|
||||
|
||||
# Otherwise, error.
|
||||
import chronicles
|
||||
|
@ -32,31 +31,22 @@ func `$`*(s: SignatureSet): string =
|
|||
# there is no guarantee that pubkeys and signatures received are valid
|
||||
# unlike when Nimbus did eager loading which ensured they were correct beforehand
|
||||
|
||||
template loadOrExit(signature: ValidatorSig, failReturn: auto):
|
||||
CookedSig =
|
||||
template loadOrExit(signature: ValidatorSig, error: cstring):
|
||||
untyped =
|
||||
## Load a BLS signature from a raw signature
|
||||
## Exits the **caller** with false if the signature is invalid
|
||||
let sig = signature.load()
|
||||
if sig.isNone:
|
||||
return failReturn # this exits the calling scope, as templates are inlined.
|
||||
return err(error) # this exits the calling scope, as templates are inlined.
|
||||
sig.unsafeGet()
|
||||
|
||||
template loadWithCacheOrExit(pubkey: ValidatorPubKey, failReturn: auto):
|
||||
blscurve.PublicKey =
|
||||
## Load a BLS signature from a raw public key
|
||||
## Exits the **caller** with false if the public key is invalid
|
||||
let pk = pubkey.loadWithCache()
|
||||
if pk.isNone:
|
||||
return failReturn # this exits the calling scope, as templates are inlined.
|
||||
pk.unsafeGet()
|
||||
|
||||
func addSignatureSet[T](
|
||||
sigs: var seq[SignatureSet],
|
||||
pubkey: blscurve.PublicKey,
|
||||
pubkey: CookedPubKey,
|
||||
sszObj: T,
|
||||
signature: CookedSig,
|
||||
genesis_validators_root: Eth2Digest,
|
||||
fork: Fork,
|
||||
genesis_validators_root: Eth2Digest,
|
||||
epoch: Epoch,
|
||||
domain: DomainType) =
|
||||
## Add a new signature set triplet (pubkey, message, signature)
|
||||
|
@ -72,83 +62,66 @@ func addSignatureSet[T](
|
|||
).data
|
||||
|
||||
sigs.add((
|
||||
pubkey,
|
||||
blscurve.PublicKey(pubkey),
|
||||
signing_root,
|
||||
blscurve.Signature(signature)
|
||||
))
|
||||
|
||||
proc aggregateAttesters(
|
||||
aggPK: var blscurve.PublicKey,
|
||||
attestation: IndexedAttestation,
|
||||
validators: seq[Validator],
|
||||
): bool =
|
||||
doAssert attestation.attesting_indices.len > 0
|
||||
var attestersAgg{.noInit.}: AggregatePublicKey
|
||||
attestersAgg.init(validators[attestation.attesting_indices[0]]
|
||||
.pubkey.loadWithCacheOrExit(false))
|
||||
for i in 1 ..< attestation.attesting_indices.len:
|
||||
attestersAgg.aggregate(validators[attestation.attesting_indices[i]]
|
||||
.pubkey.loadWithCacheOrExit(false))
|
||||
aggPK.finish(attestersAgg)
|
||||
return true
|
||||
validatorIndices: openArray[uint64],
|
||||
validatorKeys: openArray[CookedPubKey],
|
||||
): Result[CookedPubKey, cstring] =
|
||||
if validatorIndices.len == 0:
|
||||
# Aggregation spec requires non-empty collection
|
||||
# - https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-04
|
||||
# Eth2 spec requires at least one attesting index in attestation
|
||||
# - https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#is_valid_indexed_attestation
|
||||
return err("aggregateAttesters: no attesting indices")
|
||||
|
||||
proc aggregateAttesters(
|
||||
aggPK: var blscurve.PublicKey,
|
||||
attestation: IndexedAttestation,
|
||||
epochRef: auto
|
||||
): bool =
|
||||
mixin validator_keys
|
||||
|
||||
doAssert attestation.attesting_indices.len > 0
|
||||
var attestersAgg{.noInit.}: AggregatePublicKey
|
||||
attestersAgg.init(epochRef.validator_keys[attestation.attesting_indices[0]]
|
||||
.pubkey.loadWithCacheOrExitFalse())
|
||||
for i in 1 ..< attestation.attesting_indices.len:
|
||||
attestersAgg.aggregate(epochRef.validator_keys[attestation.attesting_indices[i]]
|
||||
.pubkey.loadWithCacheOrExitFalse())
|
||||
aggPK.finish(attestersAgg)
|
||||
return true
|
||||
if validatorIndices[0] >= validatorKeys.lenu64():
|
||||
return err("aggregateAttesters: invalid attesting index")
|
||||
|
||||
attestersAgg.init(validatorKeys[validatorIndices[0].int])
|
||||
for i in 1 ..< validatorIndices.len:
|
||||
if validatorIndices[i] >= validatorKeys.lenu64():
|
||||
return err("aggregateAttesters: invalid attesting index")
|
||||
attestersAgg.aggregate(validatorKeys[validatorIndices[i].int])
|
||||
|
||||
ok(finish(attestersAgg))
|
||||
|
||||
proc addIndexedAttestation(
|
||||
sigs: var seq[SignatureSet],
|
||||
attestation: IndexedAttestation,
|
||||
state: StateData
|
||||
): bool =
|
||||
validatorKeys: openArray[CookedPubKey],
|
||||
state: StateData,
|
||||
): Result[void, cstring] =
|
||||
## Add an indexed attestation for batched BLS verification
|
||||
## purposes
|
||||
## This only verifies cryptography, checking that
|
||||
## the indices are sorted and unique is not checked for example.
|
||||
##
|
||||
## Returns true if the indexed attestations was added to the batching buffer
|
||||
## Returns false if saniy checks failed (non-empty, keys are valid)
|
||||
if attestation.attesting_indices.len == 0:
|
||||
# Aggregation spec requires non-empty collection
|
||||
# - https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-04
|
||||
# Eth2 spec requires at least one attesting indice in slashing
|
||||
# - https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#is_valid_indexed_attestation
|
||||
return false
|
||||
|
||||
var aggPK {.noInit.}: blscurve.PublicKey
|
||||
if not aggPK.aggregateAttesters(
|
||||
attestation, getStateField(state, validators).asSeq):
|
||||
return false
|
||||
let aggPk =
|
||||
? aggregateAttesters(attestation.attesting_indices.asSeq(), validatorKeys)
|
||||
|
||||
sigs.addSignatureSet(
|
||||
aggPK,
|
||||
attestation.data,
|
||||
attestation.signature.loadOrExit(false),
|
||||
getStateField(state, genesis_validators_root),
|
||||
attestation.signature.loadOrExit(
|
||||
"addIndexedAttestation: cannot load signature"),
|
||||
getStateField(state, fork),
|
||||
getStateField(state, genesis_validators_root),
|
||||
attestation.data.target.epoch,
|
||||
DOMAIN_BEACON_ATTESTER)
|
||||
return true
|
||||
ok()
|
||||
|
||||
proc addAttestation(
|
||||
sigs: var seq[SignatureSet],
|
||||
attestation: Attestation,
|
||||
validatorKeys: openArray[CookedPubKey],
|
||||
state: StateData,
|
||||
cache: var StateCache
|
||||
): bool =
|
||||
): Result[void, cstring] =
|
||||
var inited = false
|
||||
var attestersAgg{.noInit.}: AggregatePublicKey
|
||||
for valIndex in state.data.data.get_attesting_indices(
|
||||
|
@ -157,30 +130,28 @@ proc addAttestation(
|
|||
cache
|
||||
):
|
||||
if not inited: # first iteration
|
||||
attestersAgg.init(getStateField(state, validators)[valIndex]
|
||||
.pubkey.loadWithCacheOrExit(false))
|
||||
attestersAgg.init(validatorKeys[valIndex.int])
|
||||
inited = true
|
||||
else:
|
||||
attestersAgg.aggregate(getStateField(state, validators)[valIndex]
|
||||
.pubkey.loadWithCacheOrExit(false))
|
||||
attestersAgg.aggregate(validatorKeys[valIndex.int])
|
||||
|
||||
if not inited:
|
||||
# There were no attesters
|
||||
return false
|
||||
return err("addAttestation: no attesting indices")
|
||||
|
||||
var attesters{.noinit.}: blscurve.PublicKey
|
||||
attesters.finish(attestersAgg)
|
||||
let attesters = finish(attestersAgg)
|
||||
|
||||
sigs.addSignatureSet(
|
||||
attesters,
|
||||
attestation.data,
|
||||
attestation.signature.loadOrExit(false),
|
||||
getStateField(state, genesis_validators_root),
|
||||
attestation.signature.loadOrExit(
|
||||
"addAttestation: cannot load signature"),
|
||||
getStateField(state, fork),
|
||||
getStateField(state, genesis_validators_root),
|
||||
attestation.data.target.epoch,
|
||||
DOMAIN_BEACON_ATTESTER)
|
||||
|
||||
true
|
||||
ok()
|
||||
|
||||
# Public API
|
||||
# ------------------------------------------------------
|
||||
|
@ -190,7 +161,7 @@ proc addAttestation*(
|
|||
fork: Fork, genesis_validators_root: Eth2Digest,
|
||||
epochRef: auto,
|
||||
attestation: Attestation
|
||||
): Option[CookedSig] =
|
||||
): Result[CookedSig, cstring] =
|
||||
## Add an attestation for batched BLS verification
|
||||
## purposes
|
||||
## This only verifies cryptography
|
||||
|
@ -206,78 +177,77 @@ proc addAttestation*(
|
|||
attestation.data,
|
||||
attestation.aggregation_bits):
|
||||
if not inited: # first iteration
|
||||
attestersAgg.init(epochRef.validator_keys[valIndex]
|
||||
.loadWithCacheOrExit(none(CookedSig)))
|
||||
attestersAgg.init(epochRef.validator_keys[valIndex])
|
||||
inited = true
|
||||
else:
|
||||
attestersAgg.aggregate(epochRef.validator_keys[valIndex]
|
||||
.loadWithCacheOrExit(none(CookedSig)))
|
||||
attestersAgg.aggregate(epochRef.validator_keys[valIndex])
|
||||
|
||||
if not inited:
|
||||
# There were no attesters
|
||||
return none(CookedSig)
|
||||
return err("addAttestation: no attesting indices")
|
||||
|
||||
var attesters{.noinit.}: blscurve.PublicKey
|
||||
attesters.finish(attestersAgg)
|
||||
|
||||
let cookedSig = attestation.signature.loadOrExit(none(CookedSig))
|
||||
let
|
||||
attesters = finish(attestersAgg)
|
||||
cookedSig = attestation.signature.loadOrExit(
|
||||
"addAttestation: cannot load signature")
|
||||
|
||||
sigs.addSignatureSet(
|
||||
attesters,
|
||||
attestation.data,
|
||||
cookedSig,
|
||||
genesis_validators_root,
|
||||
fork,
|
||||
genesis_validators_root,
|
||||
attestation.data.target.epoch,
|
||||
DOMAIN_BEACON_ATTESTER)
|
||||
|
||||
some(CookedSig(cookedSig))
|
||||
ok(CookedSig(cookedSig))
|
||||
|
||||
proc addSlotSignature*(
|
||||
sigs: var seq[SignatureSet],
|
||||
fork: Fork, genesis_validators_root: Eth2Digest,
|
||||
slot: Slot,
|
||||
pubkey: ValidatorPubKey,
|
||||
signature: ValidatorSig): bool =
|
||||
pubkey: CookedPubKey,
|
||||
signature: ValidatorSig): Result[void, cstring] =
|
||||
let epoch = compute_epoch_at_slot(slot)
|
||||
sigs.addSignatureSet(
|
||||
pubkey.loadWithCacheOrExit(false),
|
||||
pubkey,
|
||||
sszObj = slot,
|
||||
signature.loadOrExit(false),
|
||||
genesis_validators_root,
|
||||
signature.loadOrExit("addSlotSignature: cannot load signature"),
|
||||
fork,
|
||||
genesis_validators_root,
|
||||
epoch,
|
||||
DOMAIN_SELECTION_PROOF
|
||||
)
|
||||
|
||||
true
|
||||
ok()
|
||||
|
||||
proc addAggregateAndProofSignature*(
|
||||
sigs: var seq[SignatureSet],
|
||||
fork: Fork, genesis_validators_root: Eth2Digest,
|
||||
aggregate_and_proof: AggregateAndProof,
|
||||
pubkey: ValidatorPubKey,
|
||||
pubkey: CookedPubKey,
|
||||
signature: ValidatorSig
|
||||
): bool =
|
||||
): Result[void, cstring] =
|
||||
|
||||
let epoch = compute_epoch_at_slot(aggregate_and_proof.aggregate.data.slot)
|
||||
sigs.addSignatureSet(
|
||||
pubkey.loadWithCacheOrExit(false),
|
||||
pubkey,
|
||||
sszObj = aggregate_and_proof,
|
||||
signature.loadOrExit(false),
|
||||
genesis_validators_root,
|
||||
signature.loadOrExit("addAggregateAndProofSignature: cannot load signature"),
|
||||
fork,
|
||||
genesis_validators_root,
|
||||
epoch,
|
||||
DOMAIN_AGGREGATE_AND_PROOF
|
||||
)
|
||||
|
||||
true
|
||||
ok()
|
||||
|
||||
proc collectSignatureSets*(
|
||||
sigs: var seq[SignatureSet],
|
||||
signed_block: SignedBeaconBlock,
|
||||
validatorKeys: openArray[CookedPubKey],
|
||||
state: StateData,
|
||||
cache: var StateCache): bool =
|
||||
cache: var StateCache): Result[void, cstring] =
|
||||
## Collect all signatures in a single signed block.
|
||||
## This includes
|
||||
## - Block proposer
|
||||
|
@ -295,32 +265,33 @@ proc collectSignatureSets*(
|
|||
|
||||
let
|
||||
proposer_index = signed_block.message.proposer_index
|
||||
if proposer_index >= getStateField(state, validators).lenu64:
|
||||
return false
|
||||
validators = validatorKeys.lenu64
|
||||
if proposer_index >= validators:
|
||||
return err("collectSignatureSets: invalid proposer index")
|
||||
|
||||
let pubkey = getStateField(state, validators)[proposer_index]
|
||||
.pubkey.loadWithCacheOrExit(false)
|
||||
let epoch = signed_block.message.slot.compute_epoch_at_slot()
|
||||
|
||||
# 1. Block proposer
|
||||
# ----------------------------------------------------
|
||||
sigs.addSignatureSet(
|
||||
pubkey,
|
||||
validatorKeys[proposer_index],
|
||||
signed_block.message,
|
||||
signed_block.signature.loadOrExit(false),
|
||||
getStateField(state, genesis_validators_root),
|
||||
signed_block.signature.loadOrExit(
|
||||
"collectSignatureSets: cannot load signature"),
|
||||
getStateField(state, fork),
|
||||
getStateField(state, genesis_validators_root),
|
||||
epoch,
|
||||
DOMAIN_BEACON_PROPOSER)
|
||||
|
||||
# 2. Randao Reveal
|
||||
# ----------------------------------------------------
|
||||
sigs.addSignatureSet(
|
||||
pubkey,
|
||||
validatorKeys[proposer_index],
|
||||
epoch,
|
||||
signed_block.message.body.randao_reveal.loadOrExit(false),
|
||||
getStateField(state, genesis_validators_root),
|
||||
signed_block.message.body.randao_reveal.loadOrExit(
|
||||
"collectSignatureSets: cannot load randao"),
|
||||
getStateField(state, fork),
|
||||
getStateField(state, genesis_validators_root),
|
||||
epoch,
|
||||
DOMAIN_RANDAO)
|
||||
|
||||
|
@ -341,15 +312,17 @@ proc collectSignatureSets*(
|
|||
# Proposed block 1
|
||||
block:
|
||||
let header_1 = slashing.signed_header_1
|
||||
let proposer1 =
|
||||
getStateField(state, validators)[header_1.message.proposer_index]
|
||||
if header_1.message.proposer_index >= validators:
|
||||
return err("collectSignatureSets: invalid slashing proposer index 1")
|
||||
|
||||
let epoch1 = header_1.message.slot.compute_epoch_at_slot()
|
||||
sigs.addSignatureSet(
|
||||
proposer1.pubkey.loadWithCacheOrExit(false),
|
||||
validatorKeys[header_1.message.proposer_index],
|
||||
header_1.message,
|
||||
header_1.signature.loadOrExit(false),
|
||||
getStateField(state, genesis_validators_root),
|
||||
header_1.signature.loadOrExit(
|
||||
"collectSignatureSets: cannot load proposer slashing 1 signature"),
|
||||
getStateField(state, fork),
|
||||
getStateField(state, genesis_validators_root),
|
||||
epoch1,
|
||||
DOMAIN_BEACON_PROPOSER
|
||||
)
|
||||
|
@ -357,15 +330,16 @@ proc collectSignatureSets*(
|
|||
# Conflicting block 2
|
||||
block:
|
||||
let header_2 = slashing.signed_header_2
|
||||
let proposer2 =
|
||||
getStateField(state, validators)[header_2.message.proposer_index]
|
||||
if header_2.message.proposer_index >= validators:
|
||||
return err("collectSignatureSets: invalid slashing proposer index 2")
|
||||
let epoch2 = header_2.message.slot.compute_epoch_at_slot()
|
||||
sigs.addSignatureSet(
|
||||
proposer2.pubkey.loadWithCacheOrExit(false),
|
||||
validatorKeys[header_2.message.proposer_index],
|
||||
header_2.message,
|
||||
header_2.signature.loadOrExit(false),
|
||||
getStateField(state, genesis_validators_root),
|
||||
header_2.signature.loadOrExit(
|
||||
"collectSignatureSets: cannot load proposer slashing 2 signature"),
|
||||
getStateField(state, fork),
|
||||
getStateField(state, genesis_validators_root),
|
||||
epoch2,
|
||||
DOMAIN_BEACON_PROPOSER
|
||||
)
|
||||
|
@ -385,16 +359,10 @@ proc collectSignatureSets*(
|
|||
template slashing: untyped = signed_block.message.body.attester_slashings[i]
|
||||
|
||||
# Attestation 1
|
||||
if not sigs.addIndexedAttestation(
|
||||
slashing.attestation_1,
|
||||
state):
|
||||
return false
|
||||
? sigs.addIndexedAttestation(slashing.attestation_1, validatorKeys, state)
|
||||
|
||||
# Conflicting attestation 2
|
||||
if not sigs.addIndexedAttestation(
|
||||
slashing.attestation_2,
|
||||
state):
|
||||
return false
|
||||
? sigs.addIndexedAttestation(slashing.attestation_2, validatorKeys, state)
|
||||
|
||||
# 5. Attestations
|
||||
# ----------------------------------------------------
|
||||
|
@ -406,10 +374,9 @@ proc collectSignatureSets*(
|
|||
# don't use "items" for iterating over large type
|
||||
# due to https://github.com/nim-lang/Nim/issues/14421
|
||||
# fixed in 1.4.2
|
||||
if not sigs.addAttestation(
|
||||
signed_block.message.body.attestations[i],
|
||||
state, cache):
|
||||
return false
|
||||
? sigs.addAttestation(
|
||||
signed_block.message.body.attestations[i],
|
||||
validatorKeys, state, cache)
|
||||
|
||||
# 6. VoluntaryExits
|
||||
# ----------------------------------------------------
|
||||
|
@ -422,15 +389,17 @@ proc collectSignatureSets*(
|
|||
# due to https://github.com/nim-lang/Nim/issues/14421
|
||||
# fixed in 1.4.2
|
||||
template volex: untyped = signed_block.message.body.voluntary_exits[i]
|
||||
if volex.message.validator_index >= validators:
|
||||
return err("collectSignatureSets: invalid voluntary exit")
|
||||
|
||||
sigs.addSignatureSet(
|
||||
getStateField(state, validators)[volex.message.validator_index]
|
||||
.pubkey.loadWithCacheOrExit(false),
|
||||
validatorKeys[volex.message.validator_index],
|
||||
volex.message,
|
||||
volex.signature.loadOrExit(false),
|
||||
getStateField(state, genesis_validators_root),
|
||||
volex.signature.loadOrExit(
|
||||
"collectSignatureSets: cannot load voluntary exit signature"),
|
||||
getStateField(state, fork),
|
||||
getStateField(state, genesis_validators_root),
|
||||
volex.message.epoch,
|
||||
DOMAIN_VOLUNTARY_EXIT)
|
||||
|
||||
return true
|
||||
ok()
|
||||
|
|
|
@ -158,7 +158,8 @@ proc check_proposer_slashing*(
|
|||
proposer_slashing.signed_header_2]:
|
||||
if not verify_block_signature(
|
||||
state.fork, state.genesis_validators_root, signed_header.message.slot,
|
||||
signed_header.message, proposer[].pubkey, signed_header.signature):
|
||||
signed_header.message, proposer[].pubkey,
|
||||
signed_header.signature):
|
||||
return err("check_proposer_slashing: invalid signature")
|
||||
|
||||
ok()
|
||||
|
|
|
@ -100,7 +100,7 @@ func diffStates*(state0, state1: BeaconState): BeaconStateDiff =
|
|||
doAssert state1.slot > state0.slot
|
||||
doAssert state0.slot.isEpoch
|
||||
doAssert state1.slot == state0.slot + SLOTS_PER_EPOCH
|
||||
# TODO not here, but in chainDag, an isancestorof check
|
||||
# TODO not here, but in dag, an isancestorof check
|
||||
|
||||
doAssert state0.genesis_time == state1.genesis_time
|
||||
doAssert state0.genesis_validators_root == state1.genesis_validators_root
|
||||
|
|
|
@ -47,7 +47,7 @@ type
|
|||
BeaconBlockCallback* = proc(signedBlock: SignedBeaconBlock) {.gcsafe, raises: [Defect].}
|
||||
|
||||
BeaconSyncNetworkState* = ref object
|
||||
chainDag*: ChainDAGRef
|
||||
dag*: ChainDAGRef
|
||||
forkDigest*: ForkDigest
|
||||
|
||||
BeaconSyncPeerState* = ref object
|
||||
|
@ -79,15 +79,15 @@ func disconnectReasonName(reason: uint64): string =
|
|||
|
||||
proc getCurrentStatus*(state: BeaconSyncNetworkState): StatusMsg {.gcsafe.} =
|
||||
let
|
||||
chainDag = state.chainDag
|
||||
headBlock = chainDag.head
|
||||
dag = state.dag
|
||||
headBlock = dag.head
|
||||
|
||||
StatusMsg(
|
||||
forkDigest: state.forkDigest,
|
||||
finalizedRoot:
|
||||
getStateField(chainDag.headState, finalized_checkpoint).root,
|
||||
getStateField(dag.headState, finalized_checkpoint).root,
|
||||
finalizedEpoch:
|
||||
getStateField(chainDag.headState, finalized_checkpoint).epoch,
|
||||
getStateField(dag.headState, finalized_checkpoint).epoch,
|
||||
headRoot: headBlock.root,
|
||||
headSlot: headBlock.slot)
|
||||
|
||||
|
@ -159,14 +159,14 @@ p2pProtocol BeaconSync(version = 1,
|
|||
if reqCount > 0'u64 and reqStep > 0'u64:
|
||||
var blocks: array[MAX_REQUEST_BLOCKS, BlockRef]
|
||||
let
|
||||
chainDag = peer.networkState.chainDag
|
||||
dag = peer.networkState.dag
|
||||
# Limit number of blocks in response
|
||||
count = int min(reqCount, blocks.lenu64)
|
||||
|
||||
let
|
||||
endIndex = count - 1
|
||||
startIndex =
|
||||
chainDag.getBlockRange(startSlot, reqStep,
|
||||
dag.getBlockRange(startSlot, reqStep,
|
||||
blocks.toOpenArray(0, endIndex))
|
||||
peer.updateRequestQuota(
|
||||
blockByRangeLookupCost +
|
||||
|
@ -177,7 +177,7 @@ p2pProtocol BeaconSync(version = 1,
|
|||
doAssert not blocks[i].isNil, "getBlockRange should return non-nil blocks only"
|
||||
trace "wrote response block",
|
||||
slot = blocks[i].slot, roor = shortLog(blocks[i].root)
|
||||
await response.write(chainDag.get(blocks[i]).data)
|
||||
await response.write(dag.get(blocks[i]).data)
|
||||
|
||||
debug "Block range request done",
|
||||
peer, startSlot, count, reqStep, found = count - startIndex
|
||||
|
@ -195,7 +195,7 @@ p2pProtocol BeaconSync(version = 1,
|
|||
raise newException(InvalidInputsError, "No blocks requested")
|
||||
|
||||
let
|
||||
chainDag = peer.networkState.chainDag
|
||||
dag = peer.networkState.dag
|
||||
count = blockRoots.len
|
||||
|
||||
peer.updateRequestQuota(count.float * blockByRootLookupCost)
|
||||
|
@ -203,9 +203,9 @@ p2pProtocol BeaconSync(version = 1,
|
|||
|
||||
var found = 0
|
||||
for i in 0..<count:
|
||||
let blockRef = chainDag.getRef(blockRoots[i])
|
||||
let blockRef = dag.getRef(blockRoots[i])
|
||||
if not isNil(blockRef):
|
||||
await response.write(chainDag.get(blockRef).data)
|
||||
await response.write(dag.get(blockRef).data)
|
||||
inc found
|
||||
|
||||
peer.updateRequestQuota(found.float * blockResponseCost)
|
||||
|
@ -258,8 +258,8 @@ proc handleStatus(peer: Peer,
|
|||
# we can add this peer to PeerPool.
|
||||
await peer.handlePeer()
|
||||
|
||||
proc initBeaconSync*(network: Eth2Node, chainDag: ChainDAGRef,
|
||||
proc initBeaconSync*(network: Eth2Node, dag: ChainDAGRef,
|
||||
forkDigest: ForkDigest) =
|
||||
var networkState = network.protocolState(BeaconSync)
|
||||
networkState.chainDag = chainDag
|
||||
networkState.dag = dag
|
||||
networkState.forkDigest = forkDigest
|
||||
|
|
|
@ -389,7 +389,7 @@ proc saveNetKeystore*(rng: var BrHmacDrbgContext, keyStorePath: string,
|
|||
|
||||
proc saveKeystore(rng: var BrHmacDrbgContext,
|
||||
validatorsDir, secretsDir: string,
|
||||
signingKey: ValidatorPrivKey, signingPubKey: ValidatorPubKey,
|
||||
signingKey: ValidatorPrivKey, signingPubKey: CookedPubKey,
|
||||
signingKeyPath: KeyPath): Result[void, KeystoreGenerationError] =
|
||||
let
|
||||
keyName = "0x" & $signingPubKey
|
||||
|
@ -462,7 +462,8 @@ proc generateDeposits*(preset: RuntimePreset,
|
|||
derivedKey, signingPubKey,
|
||||
makeKeyPath(validatorIdx, signingKeyKind))
|
||||
|
||||
deposits.add preset.prepareDeposit(withdrawalPubKey, derivedKey, signingPubKey)
|
||||
deposits.add preset.prepareDeposit(
|
||||
withdrawalPubKey, derivedKey, signingPubKey)
|
||||
|
||||
ok deposits
|
||||
|
||||
|
|
|
@ -285,7 +285,7 @@ proc importInterchangeV5Impl*(
|
|||
|
||||
result = siPartial
|
||||
continue
|
||||
if key.get().loadWithCache().isNone():
|
||||
if key.get().load().isNone():
|
||||
# The bytes don't deserialize to a valid BLS G1 elliptic curve point.
|
||||
# Deserialization is costly but done only once per validator.
|
||||
# and SlashingDB import is a very rare event.
|
||||
|
|
|
@ -71,11 +71,11 @@ proc addLocalValidator(node: BeaconNode,
|
|||
let pubKey = privKey.toPubKey()
|
||||
node.attachedValidators[].addLocalValidator(
|
||||
pubKey, privKey,
|
||||
findValidator(getStateField(stateData, validators), pubKey))
|
||||
findValidator(getStateField(stateData, validators), pubKey.toPubKey()))
|
||||
|
||||
proc addLocalValidators*(node: BeaconNode) =
|
||||
for validatorKey in node.config.validatorKeys:
|
||||
node.addLocalValidator node.chainDag.headState, validatorKey
|
||||
node.addLocalValidator node.dag.headState, validatorKey
|
||||
|
||||
proc addRemoteValidators*(node: BeaconNode) {.raises: [Defect, OSError, IOError].} =
|
||||
# load all the validators from the child process - loop until `end`
|
||||
|
@ -85,16 +85,19 @@ proc addRemoteValidators*(node: BeaconNode) {.raises: [Defect, OSError, IOError]
|
|||
let
|
||||
key = ValidatorPubKey.fromHex(line).get()
|
||||
index = findValidator(
|
||||
getStateField(node.chainDag.headState, validators), key)
|
||||
|
||||
let v = AttachedValidator(pubKey: key,
|
||||
index: index,
|
||||
kind: ValidatorKind.remote,
|
||||
connection: ValidatorConnection(
|
||||
inStream: node.vcProcess.inputStream,
|
||||
outStream: node.vcProcess.outputStream,
|
||||
pubKeyStr: $key))
|
||||
node.attachedValidators[].addRemoteValidator(key, v)
|
||||
getStateField(node.dag.headState, validators), key)
|
||||
pk = key.load()
|
||||
if pk.isSome():
|
||||
let v = AttachedValidator(pubKey: pk.get(),
|
||||
index: index,
|
||||
kind: ValidatorKind.remote,
|
||||
connection: ValidatorConnection(
|
||||
inStream: node.vcProcess.inputStream,
|
||||
outStream: node.vcProcess.outputStream,
|
||||
pubKeyStr: $key))
|
||||
node.attachedValidators[].addRemoteValidator(pk.get(), v)
|
||||
else:
|
||||
warn "Could not load public key", line
|
||||
|
||||
proc getAttachedValidator*(node: BeaconNode,
|
||||
pubkey: ValidatorPubKey): AttachedValidator =
|
||||
|
@ -119,7 +122,7 @@ proc getAttachedValidator*(node: BeaconNode,
|
|||
epochRef: EpochRef,
|
||||
idx: ValidatorIndex): AttachedValidator =
|
||||
if idx < epochRef.validator_keys.len.ValidatorIndex:
|
||||
let validator = node.getAttachedValidator(epochRef.validator_keys[idx])
|
||||
let validator = node.getAttachedValidator(epochRef.validator_keys[idx].toPubKey())
|
||||
if validator != nil and validator.index != some(idx.ValidatorIndex):
|
||||
# Update index, in case the validator was activated!
|
||||
notice "Validator activated", pubkey = validator.pubkey, index = idx
|
||||
|
@ -194,12 +197,12 @@ proc sendProposerSlashing*(node: BeaconNode, slashing: ProposerSlashing) =
|
|||
proc sendAttestation*(node: BeaconNode, attestation: Attestation): Future[bool] =
|
||||
# For the validator API, which doesn't supply the subnet id.
|
||||
let attestationBlck =
|
||||
node.chainDag.getRef(attestation.data.beacon_block_root)
|
||||
node.dag.getRef(attestation.data.beacon_block_root)
|
||||
if attestationBlck.isNil:
|
||||
debug "Attempt to send attestation without corresponding block"
|
||||
return
|
||||
let
|
||||
epochRef = node.chainDag.getEpochRef(
|
||||
epochRef = node.dag.getEpochRef(
|
||||
attestationBlck, attestation.data.target.epoch)
|
||||
subnet_id = compute_subnet_for_attestation(
|
||||
get_committee_count_per_slot(epochRef), attestation.data.slot,
|
||||
|
@ -227,7 +230,7 @@ proc createAndSendAttestation(node: BeaconNode,
|
|||
return
|
||||
|
||||
if node.config.dumpEnabled:
|
||||
dump(node.config.dumpDirOutgoing, attestation.data, validator.pubKey)
|
||||
dump(node.config.dumpDirOutgoing, attestation.data, validator.pubKey.toPubKey())
|
||||
|
||||
let wallTime = node.beaconClock.now()
|
||||
let deadline = attestationData.slot.toBeaconTime() +
|
||||
|
@ -260,7 +263,7 @@ proc getBlockProposalEth1Data*(node: BeaconNode,
|
|||
else:
|
||||
result.vote = getStateField(stateData, eth1_data)
|
||||
else:
|
||||
let finalizedEpochRef = node.chainDag.getFinalizedEpochRef()
|
||||
let finalizedEpochRef = node.dag.getFinalizedEpochRef()
|
||||
result = node.eth1Monitor.getBlockProposalData(
|
||||
stateData, finalizedEpochRef.eth1_data,
|
||||
finalizedEpochRef.eth1_deposit_index)
|
||||
|
@ -293,13 +296,13 @@ proc makeBeaconBlockForHeadAndSlot*(node: BeaconNode,
|
|||
# Advance state to the slot that we're proposing for
|
||||
|
||||
let
|
||||
proposalState = assignClone(node.chainDag.headState)
|
||||
proposalState = assignClone(node.dag.headState)
|
||||
proposalStateAddr = unsafeAddr proposalState[]
|
||||
|
||||
node.chainDag.withState(proposalState[], head.atSlot(slot)):
|
||||
node.dag.withState(proposalState[], head.atSlot(slot)):
|
||||
let
|
||||
eth1Proposal = node.getBlockProposalEth1Data(stateData)
|
||||
poolPtr = unsafeAddr node.chainDag # safe because restore is short-lived
|
||||
poolPtr = unsafeAddr node.dag # safe because restore is short-lived
|
||||
|
||||
if eth1Proposal.hasMissingDeposits:
|
||||
error "Eth1 deposits not available. Skipping block proposal", slot
|
||||
|
@ -333,7 +336,7 @@ proc proposeSignedBlock*(node: BeaconNode,
|
|||
head: BlockRef,
|
||||
validator: AttachedValidator,
|
||||
newBlock: SignedBeaconBlock): BlockRef =
|
||||
let newBlockRef = node.chainDag.addRawBlock(node.quarantine, newBlock) do (
|
||||
let newBlockRef = node.dag.addRawBlock(node.quarantine, newBlock) do (
|
||||
blckRef: BlockRef, trustedBlock: TrustedSignedBeaconBlock,
|
||||
epochRef: EpochRef, state: HashedBeaconState):
|
||||
# Callback add to fork choice if signed block valid (and becomes trusted)
|
||||
|
@ -377,9 +380,9 @@ proc proposeBlock(node: BeaconNode,
|
|||
return head
|
||||
|
||||
let
|
||||
fork = getStateField(node.chainDag.headState, fork)
|
||||
fork = getStateField(node.dag.headState, fork)
|
||||
genesis_validators_root =
|
||||
getStateField(node.chainDag.headState, genesis_validators_root)
|
||||
getStateField(node.dag.headState, genesis_validators_root)
|
||||
randao = await validator.genRandaoReveal(
|
||||
fork, genesis_validators_root, slot)
|
||||
message = await makeBeaconBlockForHeadAndSlot(
|
||||
|
@ -400,7 +403,7 @@ proc proposeBlock(node: BeaconNode,
|
|||
fork, genesis_validators_root, slot, newBlock.root)
|
||||
let notSlashable = node.attachedValidators
|
||||
.slashingProtection
|
||||
.registerBlock(validator_index, validator.pubkey, slot, signing_root)
|
||||
.registerBlock(validator_index, validator.pubkey.toPubKey(), slot, signing_root)
|
||||
|
||||
if notSlashable.isErr:
|
||||
warn "Slashing protection activated",
|
||||
|
@ -448,12 +451,12 @@ proc handleAttestations(node: BeaconNode, head: BlockRef, slot: Slot) =
|
|||
# using empty slots as fillers.
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#validator-assignments
|
||||
let
|
||||
epochRef = node.chainDag.getEpochRef(
|
||||
epochRef = node.dag.getEpochRef(
|
||||
attestationHead.blck, slot.compute_epoch_at_slot())
|
||||
committees_per_slot = get_committee_count_per_slot(epochRef)
|
||||
fork = getStateField(node.chainDag.headState, fork)
|
||||
fork = getStateField(node.dag.headState, fork)
|
||||
genesis_validators_root =
|
||||
getStateField(node.chainDag.headState, genesis_validators_root)
|
||||
getStateField(node.dag.headState, genesis_validators_root)
|
||||
|
||||
for committee_index in get_committee_indices(epochRef):
|
||||
let committee = get_beacon_committee(epochRef, slot, committee_index)
|
||||
|
@ -472,7 +475,7 @@ proc handleAttestations(node: BeaconNode, head: BlockRef, slot: Slot) =
|
|||
.slashingProtection
|
||||
.registerAttestation(
|
||||
validator_index,
|
||||
validator.pubkey,
|
||||
validator.pubkey.toPubKey(),
|
||||
data.source.epoch,
|
||||
data.target.epoch,
|
||||
signing_root)
|
||||
|
@ -493,22 +496,22 @@ proc handleProposal(node: BeaconNode, head: BlockRef, slot: Slot):
|
|||
## that is supposed to do so, given the shuffling at that slot for the given
|
||||
## head - to compute the proposer, we need to advance a state to the given
|
||||
## slot
|
||||
|
||||
let proposer = node.chainDag.getProposer(head, slot)
|
||||
let proposer = node.dag.getProposer(head, slot)
|
||||
if proposer.isNone():
|
||||
return head
|
||||
|
||||
let validator =
|
||||
node.attachedValidators[].getValidator(proposer.get()[1])
|
||||
let
|
||||
proposerKey = node.dag.validatorKeys[proposer.get()].toPubKey()
|
||||
validator = node.attachedValidators[].getValidator(proposerKey)
|
||||
|
||||
if validator != nil:
|
||||
return await proposeBlock(node, validator, proposer.get()[0], head, slot)
|
||||
return await proposeBlock(node, validator, proposer.get(), head, slot)
|
||||
|
||||
debug "Expecting block proposal",
|
||||
headRoot = shortLog(head.root),
|
||||
slot = shortLog(slot),
|
||||
proposer_index = proposer.get()[0],
|
||||
proposer = shortLog(proposer.get()[1])
|
||||
proposer_index = proposer.get(),
|
||||
proposer = shortLog(proposerKey)
|
||||
|
||||
return head
|
||||
|
||||
|
@ -523,10 +526,10 @@ proc broadcastAggregatedAttestations(
|
|||
# the corresponding one -- whatver they are, they match.
|
||||
|
||||
let
|
||||
epochRef = node.chainDag.getEpochRef(aggregationHead, aggregationSlot.epoch)
|
||||
fork = getStateField(node.chainDag.headState, fork)
|
||||
epochRef = node.dag.getEpochRef(aggregationHead, aggregationSlot.epoch)
|
||||
fork = getStateField(node.dag.headState, fork)
|
||||
genesis_validators_root =
|
||||
getStateField(node.chainDag.headState, genesis_validators_root)
|
||||
getStateField(node.dag.headState, genesis_validators_root)
|
||||
committees_per_slot = get_committee_count_per_slot(epochRef)
|
||||
|
||||
var
|
||||
|
@ -585,14 +588,14 @@ proc updateValidatorMetrics*(node: BeaconNode) =
|
|||
if v.index.isNone():
|
||||
0.Gwei
|
||||
elif v.index.get().uint64 >=
|
||||
getStateField(node.chainDag.headState, balances).lenu64:
|
||||
getStateField(node.dag.headState, balances).lenu64:
|
||||
debug "Cannot get validator balance, index out of bounds",
|
||||
pubkey = shortLog(v.pubkey), index = v.index.get(),
|
||||
balances = getStateField(node.chainDag.headState, balances).len,
|
||||
stateRoot = node.chainDag.headState.data.root
|
||||
balances = getStateField(node.dag.headState, balances).len,
|
||||
stateRoot = node.dag.headState.data.root
|
||||
0.Gwei
|
||||
else:
|
||||
getStateField(node.chainDag.headState, balances)[v.index.get()]
|
||||
getStateField(node.dag.headState, balances)[v.index.get()]
|
||||
|
||||
if i < 64:
|
||||
attached_validator_balance.set(
|
||||
|
@ -612,9 +615,9 @@ proc handleValidatorDuties*(node: BeaconNode, lastSlot, slot: Slot) {.async.} =
|
|||
# Nothing to do because we have no validator attached
|
||||
return
|
||||
|
||||
# The chainDag head might be updated by sync while we're working due to the
|
||||
# The dag head might be updated by sync while we're working due to the
|
||||
# await calls, thus we use a local variable to keep the logic straight here
|
||||
var head = node.chainDag.head
|
||||
var head = node.dag.head
|
||||
if not node.isSynced(head):
|
||||
notice "Syncing in progress; skipping validator duties for now",
|
||||
slot, headSlot = head.slot
|
||||
|
@ -702,7 +705,7 @@ proc handleValidatorDuties*(node: BeaconNode, lastSlot, slot: Slot) {.async.} =
|
|||
|
||||
# Time passed - we might need to select a new head in that case
|
||||
node.consensusManager[].updateHead(slot)
|
||||
head = node.chainDag.head
|
||||
head = node.dag.head
|
||||
|
||||
handleAttestations(node, head, slot)
|
||||
|
||||
|
@ -726,6 +729,6 @@ proc handleValidatorDuties*(node: BeaconNode, lastSlot, slot: Slot) {.async.} =
|
|||
await broadcastAggregatedAttestations(node, head, slot)
|
||||
|
||||
if node.eth1Monitor != nil and (slot mod SLOTS_PER_EPOCH) == 0:
|
||||
let finalizedEpochRef = node.chainDag.getFinalizedEpochRef()
|
||||
let finalizedEpochRef = node.dag.getFinalizedEpochRef()
|
||||
discard node.eth1Monitor.trackFinalizedState(
|
||||
finalizedEpochRef.eth1_data, finalizedEpochRef.eth1_deposit_index)
|
||||
|
|
|
@ -33,22 +33,22 @@ template count*(pool: ValidatorPool): int =
|
|||
pool.validators.len
|
||||
|
||||
proc addLocalValidator*(pool: var ValidatorPool,
|
||||
pubKey: ValidatorPubKey,
|
||||
pubKey: CookedPubKey,
|
||||
privKey: ValidatorPrivKey,
|
||||
index: Option[ValidatorIndex]) =
|
||||
let v = AttachedValidator(pubKey: pubKey,
|
||||
index: index,
|
||||
kind: inProcess,
|
||||
privKey: privKey)
|
||||
pool.validators[pubKey] = v
|
||||
pool.validators[pubKey.toPubKey()] = v
|
||||
notice "Local validator attached", pubKey, validator = shortLog(v)
|
||||
|
||||
validators.set(pool.count().int64)
|
||||
|
||||
proc addRemoteValidator*(pool: var ValidatorPool,
|
||||
pubKey: ValidatorPubKey,
|
||||
pubKey: CookedPubKey,
|
||||
v: AttachedValidator) =
|
||||
pool.validators[pubKey] = v
|
||||
pool.validators[pubKey.toPubKey()] = v
|
||||
notice "Remote validator attached", pubKey, validator = shortLog(v)
|
||||
|
||||
validators.set(pool.count().int64)
|
||||
|
|
|
@ -72,15 +72,15 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
|
|||
putInitialDepositContractSnapshot(db, depositContractSnapshot)
|
||||
|
||||
var
|
||||
chainDag = ChainDAGRef.init(runtimePreset, db)
|
||||
dag = ChainDAGRef.init(runtimePreset, db)
|
||||
eth1Chain = Eth1Chain.init(runtimePreset, db)
|
||||
merkleizer = depositContractSnapshot.createMerkleizer
|
||||
quarantine = QuarantineRef.init(keys.newRng())
|
||||
attPool = AttestationPool.init(chainDag, quarantine)
|
||||
attPool = AttestationPool.init(dag, quarantine)
|
||||
timers: array[Timers, RunningStat]
|
||||
attesters: RunningStat
|
||||
r = initRand(1)
|
||||
tmpState = assignClone(chainDag.headState)
|
||||
tmpState = assignClone(dag.headState)
|
||||
|
||||
eth1Chain.addBlock Eth1Block(
|
||||
number: Eth1BlockNumber 1,
|
||||
|
@ -89,13 +89,13 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
|
|||
deposit_root: merkleizer.getDepositsRoot,
|
||||
deposit_count: merkleizer.getChunkCount))
|
||||
|
||||
let replayState = assignClone(chainDag.headState)
|
||||
let replayState = assignClone(dag.headState)
|
||||
|
||||
proc handleAttestations(slot: Slot) =
|
||||
let
|
||||
attestationHead = chainDag.head.atSlot(slot)
|
||||
attestationHead = dag.head.atSlot(slot)
|
||||
|
||||
chainDag.withState(tmpState[], attestationHead):
|
||||
dag.withState(tmpState[], attestationHead):
|
||||
let committees_per_slot =
|
||||
get_committee_count_per_slot(stateData, slot.epoch, cache)
|
||||
|
||||
|
@ -128,11 +128,11 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
|
|||
return
|
||||
|
||||
let
|
||||
head = chainDag.head
|
||||
head = dag.head
|
||||
|
||||
chainDag.withState(tmpState[], head.atSlot(slot)):
|
||||
dag.withState(tmpState[], head.atSlot(slot)):
|
||||
let
|
||||
finalizedEpochRef = chainDag.getFinalizedEpochRef()
|
||||
finalizedEpochRef = dag.getFinalizedEpochRef()
|
||||
proposerIdx = get_beacon_proposer_index(
|
||||
stateData.data.data, cache).get()
|
||||
privKey = hackPrivKey(
|
||||
|
@ -177,16 +177,16 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
|
|||
newBlock.message.slot,
|
||||
blockRoot, privKey).toValidatorSig()
|
||||
|
||||
let added = chainDag.addRawBlock(quarantine, newBlock) do (
|
||||
let added = dag.addRawBlock(quarantine, newBlock) do (
|
||||
blckRef: BlockRef, signedBlock: TrustedSignedBeaconBlock,
|
||||
epochRef: EpochRef, state: HashedBeaconState):
|
||||
# Callback add to fork choice if valid
|
||||
attPool.addForkChoice(epochRef, blckRef, signedBlock.message, blckRef.slot)
|
||||
|
||||
blck() = added[]
|
||||
chainDag.updateHead(added[], quarantine)
|
||||
if chainDag.needStateCachesAndForkChoicePruning():
|
||||
chainDag.pruneStateCachesDAG()
|
||||
dag.updateHead(added[], quarantine)
|
||||
if dag.needStateCachesAndForkChoicePruning():
|
||||
dag.pruneStateCachesDAG()
|
||||
attPool.prune()
|
||||
|
||||
var
|
||||
|
@ -235,7 +235,7 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
|
|||
|
||||
# TODO if attestation pool was smarter, it would include older attestations
|
||||
# too!
|
||||
verifyConsensus(chainDag.headState, attesterRatio * blockRatio)
|
||||
verifyConsensus(dag.headState, attesterRatio * blockRatio)
|
||||
|
||||
if t == tEpoch:
|
||||
echo &". slot: {shortLog(slot)} ",
|
||||
|
@ -247,9 +247,9 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
|
|||
if replay:
|
||||
withTimer(timers[tReplay]):
|
||||
var cache = StateCache()
|
||||
chainDag.updateStateData(
|
||||
replayState[], chainDag.head.atSlot(Slot(slots)), false, cache)
|
||||
dag.updateStateData(
|
||||
replayState[], dag.head.atSlot(Slot(slots)), false, cache)
|
||||
|
||||
echo "Done!"
|
||||
|
||||
printTimers(chainDag.headState, attesters, true, timers)
|
||||
printTimers(dag.headState, attesters, true, timers)
|
||||
|
|
|
@ -42,7 +42,7 @@ proc genMockPrivKeys(privkeys: var openArray[ValidatorPrivKey]) =
|
|||
func genMockPubKeys(pubkeys: var openArray[ValidatorPubKey],
|
||||
privkeys: openArray[ValidatorPrivKey]) =
|
||||
for i in 0 ..< privkeys.len:
|
||||
pubkeys[i] = toPubKey(privkeys[i])
|
||||
pubkeys[i] = toPubKey(privkeys[i]).toPubKey()
|
||||
|
||||
# Ref array necessary to limit stack usage / binary size
|
||||
var MockPrivKeys* = newSeq[ValidatorPrivKey](defaultRuntimePreset.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT)
|
||||
|
|
|
@ -61,10 +61,10 @@ suite "Attestation pool processing" & preset():
|
|||
setup:
|
||||
# Genesis state that results in 6 members per committee
|
||||
var
|
||||
chainDag = init(ChainDAGRef, defaultRuntimePreset, makeTestDB(SLOTS_PER_EPOCH * 6))
|
||||
dag = init(ChainDAGRef, defaultRuntimePreset, makeTestDB(SLOTS_PER_EPOCH * 6))
|
||||
quarantine = QuarantineRef.init(keys.newRng())
|
||||
pool = newClone(AttestationPool.init(chainDag, quarantine))
|
||||
state = newClone(chainDag.headState)
|
||||
pool = newClone(AttestationPool.init(dag, quarantine))
|
||||
state = newClone(dag.headState)
|
||||
cache = StateCache()
|
||||
rewards: RewardInfo
|
||||
# Slot 0 is a finalized slot - won't be making attestations for it..
|
||||
|
@ -363,8 +363,8 @@ suite "Attestation pool processing" & preset():
|
|||
test "Fork choice returns latest block with no attestations":
|
||||
var cache = StateCache()
|
||||
let
|
||||
b1 = addTestBlock(state.data, chainDag.tail.root, cache)
|
||||
b1Add = chainDag.addRawBlock(quarantine, b1) do (
|
||||
b1 = addTestBlock(state.data, dag.tail.root, cache)
|
||||
b1Add = dag.addRawBlock(quarantine, b1) do (
|
||||
blckRef: BlockRef, signedBlock: TrustedSignedBeaconBlock,
|
||||
epochRef: EpochRef, state: HashedBeaconState):
|
||||
# Callback add to fork choice if valid
|
||||
|
@ -377,7 +377,7 @@ suite "Attestation pool processing" & preset():
|
|||
|
||||
let
|
||||
b2 = addTestBlock(state.data, b1.root, cache)
|
||||
b2Add = chainDag.addRawBlock(quarantine, b2) do (
|
||||
b2Add = dag.addRawBlock(quarantine, b2) do (
|
||||
blckRef: BlockRef, signedBlock: TrustedSignedBeaconBlock,
|
||||
epochRef: EpochRef, state: HashedBeaconState):
|
||||
# Callback add to fork choice if valid
|
||||
|
@ -391,8 +391,8 @@ suite "Attestation pool processing" & preset():
|
|||
test "Fork choice returns block with attestation":
|
||||
var cache = StateCache()
|
||||
let
|
||||
b10 = makeTestBlock(state.data, chainDag.tail.root, cache)
|
||||
b10Add = chainDag.addRawBlock(quarantine, b10) do (
|
||||
b10 = makeTestBlock(state.data, dag.tail.root, cache)
|
||||
b10Add = dag.addRawBlock(quarantine, b10) do (
|
||||
blckRef: BlockRef, signedBlock: TrustedSignedBeaconBlock,
|
||||
epochRef: EpochRef, state: HashedBeaconState):
|
||||
# Callback add to fork choice if valid
|
||||
|
@ -404,10 +404,10 @@ suite "Attestation pool processing" & preset():
|
|||
head == b10Add[]
|
||||
|
||||
let
|
||||
b11 = makeTestBlock(state.data, chainDag.tail.root, cache,
|
||||
b11 = makeTestBlock(state.data, dag.tail.root, cache,
|
||||
graffiti = GraffitiBytes [1'u8, 0, 0, 0 ,0 ,0 ,0 ,0 ,0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
|
||||
)
|
||||
b11Add = chainDag.addRawBlock(quarantine, b11) do (
|
||||
b11Add = dag.addRawBlock(quarantine, b11) do (
|
||||
blckRef: BlockRef, signedBlock: TrustedSignedBeaconBlock,
|
||||
epochRef: EpochRef, state: HashedBeaconState):
|
||||
# Callback add to fork choice if valid
|
||||
|
@ -451,8 +451,8 @@ suite "Attestation pool processing" & preset():
|
|||
test "Trying to add a block twice tags the second as an error":
|
||||
var cache = StateCache()
|
||||
let
|
||||
b10 = makeTestBlock(state.data, chainDag.tail.root, cache)
|
||||
b10Add = chainDag.addRawBlock(quarantine, b10) do (
|
||||
b10 = makeTestBlock(state.data, dag.tail.root, cache)
|
||||
b10Add = dag.addRawBlock(quarantine, b10) do (
|
||||
blckRef: BlockRef, signedBlock: TrustedSignedBeaconBlock,
|
||||
epochRef: EpochRef, state: HashedBeaconState):
|
||||
# Callback add to fork choice if valid
|
||||
|
@ -466,7 +466,7 @@ suite "Attestation pool processing" & preset():
|
|||
# -------------------------------------------------------------
|
||||
# Add back the old block to ensure we have a duplicate error
|
||||
let b10_clone = b10 # Assumes deep copy
|
||||
let b10Add_clone = chainDag.addRawBlock(quarantine, b10_clone) do (
|
||||
let b10Add_clone = dag.addRawBlock(quarantine, b10_clone) do (
|
||||
blckRef: BlockRef, signedBlock: TrustedSignedBeaconBlock,
|
||||
epochRef: EpochRef, state: HashedBeaconState):
|
||||
# Callback add to fork choice if valid
|
||||
|
@ -477,11 +477,11 @@ suite "Attestation pool processing" & preset():
|
|||
test "Trying to add a duplicate block from an old pruned epoch is tagged as an error":
|
||||
# Note: very sensitive to stack usage
|
||||
|
||||
chainDag.updateFlags.incl {skipBLSValidation}
|
||||
dag.updateFlags.incl {skipBLSValidation}
|
||||
var cache = StateCache()
|
||||
let
|
||||
b10 = addTestBlock(state.data, chainDag.tail.root, cache)
|
||||
b10Add = chainDag.addRawBlock(quarantine, b10) do (
|
||||
b10 = addTestBlock(state.data, dag.tail.root, cache)
|
||||
b10Add = dag.addRawBlock(quarantine, b10) do (
|
||||
blckRef: BlockRef, signedBlock: TrustedSignedBeaconBlock,
|
||||
epochRef: EpochRef, state: HashedBeaconState):
|
||||
# Callback add to fork choice if valid
|
||||
|
@ -509,7 +509,7 @@ suite "Attestation pool processing" & preset():
|
|||
state.data, block_root, cache, attestations = attestations)
|
||||
|
||||
block_root = new_block.root
|
||||
let blockRef = chainDag.addRawBlock(quarantine, new_block) do (
|
||||
let blockRef = dag.addRawBlock(quarantine, new_block) do (
|
||||
blckRef: BlockRef, signedBlock: TrustedSignedBeaconBlock,
|
||||
epochRef: EpochRef, state: HashedBeaconState):
|
||||
# Callback add to fork choice if valid
|
||||
|
@ -517,8 +517,8 @@ suite "Attestation pool processing" & preset():
|
|||
|
||||
let head = pool[].selectHead(blockRef[].slot)
|
||||
doAssert: head == blockRef[]
|
||||
chainDag.updateHead(head, quarantine)
|
||||
pruneAtFinalization(chainDag, pool[])
|
||||
dag.updateHead(head, quarantine)
|
||||
pruneAtFinalization(dag, pool[])
|
||||
|
||||
attestations.setlen(0)
|
||||
for index in 0'u64 ..< committees_per_slot:
|
||||
|
@ -545,13 +545,13 @@ suite "Attestation pool processing" & preset():
|
|||
# -------------------------------------------------------------
|
||||
# Prune
|
||||
|
||||
doAssert: chainDag.finalizedHead.slot != 0
|
||||
doAssert: dag.finalizedHead.slot != 0
|
||||
|
||||
pool[].prune()
|
||||
doAssert: b10.root notin pool.forkChoice.backend
|
||||
|
||||
# Add back the old block to ensure we have a duplicate error
|
||||
let b10Add_clone = chainDag.addRawBlock(quarantine, b10_clone) do (
|
||||
let b10Add_clone = dag.addRawBlock(quarantine, b10_clone) do (
|
||||
blckRef: BlockRef, signedBlock: TrustedSignedBeaconBlock,
|
||||
epochRef: EpochRef, state: HashedBeaconState):
|
||||
# Callback add to fork choice if valid
|
||||
|
|
|
@ -378,32 +378,21 @@ suite "chain DAG finalization tests" & preset():
|
|||
check:
|
||||
dag.heads.len() == 1
|
||||
|
||||
let
|
||||
headER = dag.findEpochRef(dag.heads[0], dag.heads[0].slot.epoch)
|
||||
finalER = dag.findEpochRef(dag.finalizedHead.blck, dag.finalizedHead.slot.epoch)
|
||||
check:
|
||||
dag.validatorKeys.len() == getStateField(dag.headState, validators).len()
|
||||
|
||||
# Epochrefs should share validator key set when the validator set is
|
||||
# stable
|
||||
not headER.isNil
|
||||
not dag.findEpochRef(dag.heads[0], dag.heads[0].slot.epoch - 1).isNil
|
||||
headER !=
|
||||
dag.findEpochRef(dag.heads[0], dag.heads[0].slot.epoch - 1)
|
||||
headER.validator_key_store[1] ==
|
||||
dag.findEpochRef(dag.heads[0], dag.heads[0].slot.epoch - 1).validator_key_store[1]
|
||||
let
|
||||
finalER = dag.findEpochRef(dag.finalizedHead.blck, dag.finalizedHead.slot.epoch)
|
||||
|
||||
# The EpochRef for the finalized block is needed for eth1 voting, so we
|
||||
# should never drop it!
|
||||
check:
|
||||
not finalER.isNil
|
||||
|
||||
block:
|
||||
for er in dag.epochRefs:
|
||||
check: er[1] == nil or er[1].epoch >= dag.finalizedHead.slot.epoch
|
||||
|
||||
if er[1] != nil:
|
||||
# EpochRef validator keystores should back-propagate to all previous
|
||||
# epochs
|
||||
check (addr headER.validator_keys) == (addr er[1].validator_keys)
|
||||
block:
|
||||
# The late block is a block whose parent was finalized long ago and thus
|
||||
# is no longer a viable head candidate
|
||||
|
@ -514,3 +503,4 @@ suite "chain DAG finalization tests" & preset():
|
|||
dag2.finalizedHead.blck.root == dag.finalizedHead.blck.root
|
||||
dag2.finalizedHead.slot == dag.finalizedHead.slot
|
||||
hash_tree_root(dag2.headState) == hash_tree_root(dag.headState)
|
||||
dag2.validatorKeys.len() == dag.validatorKeys.len()
|
||||
|
|
|
@ -14,9 +14,9 @@ import ../beacon_chain/consensus_object_pools/[block_quarantine, blockchain_dag,
|
|||
import "."/[testutil, testdbutil]
|
||||
|
||||
proc getExitPool(): auto =
|
||||
let chainDag =
|
||||
let dag =
|
||||
init(ChainDAGRef, defaultRuntimePreset, makeTestDB(SLOTS_PER_EPOCH * 3))
|
||||
newClone(ExitPool.init(chainDag, QuarantineRef.init(keys.newRng())))
|
||||
newClone(ExitPool.init(dag, QuarantineRef.init(keys.newRng())))
|
||||
|
||||
suite "Exit pool testing suite":
|
||||
setup:
|
||||
|
|
|
@ -34,10 +34,10 @@ suite "Gossip validation " & preset():
|
|||
setup:
|
||||
# Genesis state that results in 3 members per committee
|
||||
var
|
||||
chainDag = init(ChainDAGRef, defaultRuntimePreset, makeTestDB(SLOTS_PER_EPOCH * 3))
|
||||
dag = init(ChainDAGRef, defaultRuntimePreset, makeTestDB(SLOTS_PER_EPOCH * 3))
|
||||
quarantine = QuarantineRef.init(keys.newRng())
|
||||
pool = newClone(AttestationPool.init(chainDag, quarantine))
|
||||
state = newClone(chainDag.headState)
|
||||
pool = newClone(AttestationPool.init(dag, quarantine))
|
||||
state = newClone(dag.headState)
|
||||
cache = StateCache()
|
||||
rewards = RewardInfo()
|
||||
batchCrypto = BatchCrypto.new(keys.newRng(), eager = proc(): bool = false)
|
||||
|
@ -47,34 +47,34 @@ suite "Gossip validation " & preset():
|
|||
|
||||
test "Validation sanity":
|
||||
# TODO: refactor tests to avoid skipping BLS validation
|
||||
chainDag.updateFlags.incl {skipBLSValidation}
|
||||
dag.updateFlags.incl {skipBLSValidation}
|
||||
|
||||
var
|
||||
cache: StateCache
|
||||
for blck in makeTestBlocks(
|
||||
chainDag.headState.data, chainDag.head.root, cache,
|
||||
dag.headState.data, dag.head.root, cache,
|
||||
int(SLOTS_PER_EPOCH * 5), false):
|
||||
let added = chainDag.addRawBlock(quarantine, blck) do (
|
||||
let added = dag.addRawBlock(quarantine, blck) do (
|
||||
blckRef: BlockRef, signedBlock: TrustedSignedBeaconBlock,
|
||||
epochRef: EpochRef, state: HashedBeaconState):
|
||||
# Callback add to fork choice if valid
|
||||
pool[].addForkChoice(epochRef, blckRef, signedBlock.message, blckRef.slot)
|
||||
|
||||
check: added.isOk()
|
||||
chainDag.updateHead(added[], quarantine)
|
||||
pruneAtFinalization(chainDag, pool[])
|
||||
dag.updateHead(added[], quarantine)
|
||||
pruneAtFinalization(dag, pool[])
|
||||
|
||||
var
|
||||
# Create attestations for slot 1
|
||||
beacon_committee = get_beacon_committee(
|
||||
chainDag.headState, chainDag.head.slot, 0.CommitteeIndex, cache)
|
||||
dag.headState, dag.head.slot, 0.CommitteeIndex, cache)
|
||||
att_1_0 = makeAttestation(
|
||||
chainDag.headState, chainDag.head.root, beacon_committee[0], cache)
|
||||
dag.headState, dag.head.root, beacon_committee[0], cache)
|
||||
att_1_1 = makeAttestation(
|
||||
chainDag.headState, chainDag.head.root, beacon_committee[1], cache)
|
||||
dag.headState, dag.head.root, beacon_committee[1], cache)
|
||||
|
||||
committees_per_slot =
|
||||
get_committee_count_per_slot(chainDag.headState,
|
||||
get_committee_count_per_slot(dag.headState,
|
||||
att_1_0.data.slot.epoch, cache)
|
||||
|
||||
subnet = compute_subnet_for_attestation(
|
||||
|
|
|
@ -146,7 +146,8 @@ suite "Interop":
|
|||
|
||||
for i in 0..<64:
|
||||
let privKey = makeInteropPrivKey(i)
|
||||
deposits.add makeDeposit(defaultRuntimePreset, privKey.toPubKey(), privKey)
|
||||
deposits.add makeDeposit(
|
||||
defaultRuntimePreset, privKey.toPubKey().toPubKey(), privKey)
|
||||
|
||||
const genesis_time = 1570500000
|
||||
var
|
||||
|
|
|
@ -47,7 +47,7 @@ func makeDeposit*(i: int, flags: UpdateFlags = {}): DepositData =
|
|||
withdrawal_credentials = makeFakeHash(i)
|
||||
|
||||
result = DepositData(
|
||||
pubkey: pubkey,
|
||||
pubkey: pubkey.toPubKey(),
|
||||
withdrawal_credentials: withdrawal_credentials,
|
||||
amount: MAX_EFFECTIVE_BALANCE)
|
||||
|
||||
|
|
Loading…
Reference in New Issue