Small fixes (#1165)

* random fixes

* create dump dir on startup
* don't crash on failure to write dump
* fix a few `uint64` instances being used when indexing arrays - this
should be a compile error but isn't due to compiler bugs
* fix standalone test_block_pool compilation
* add signed block processing in ncli

* reuse cache entry instead of allocating a new one

* allow for small clock disparities when validating blocks
This commit is contained in:
Jacek Sieka 2020-06-12 18:43:20 +02:00 committed by GitHub
parent c8f24ae3b8
commit 42832cefa8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 83 additions and 46 deletions

View File

@ -303,12 +303,13 @@ proc storeBlock(
let blck = node.blockPool.add(blockRoot, signedBlock)
if blck.isErr:
if blck.error == Invalid and node.config.dumpEnabled:
dump(node.config.dumpDir / "invalid", signedBlock, blockRoot)
let parent = node.blockPool.getRef(signedBlock.message.parent_root)
if parent != nil:
node.blockPool.withState(
node.blockPool.tmpState, parent.atSlot(signedBlock.message.slot - 1)):
let parentBs = parent.atSlot(signedBlock.message.slot - 1)
node.blockPool.withState(node.blockPool.tmpState, parentBs):
dump(node.config.dumpDir / "invalid", hashedState, parent)
dump(node.config.dumpDir / "invalid", signedBlock, blockRoot)
return err(blck.error)
@ -1082,6 +1083,7 @@ programMain:
if config.dumpEnabled:
createDir(config.dumpDir)
createDir(config.dumpDir / "incoming")
createDir(config.dumpDir / "invalid")
var node = waitFor BeaconNode.init(config)

View File

@ -313,12 +313,50 @@ proc getState(
func getStateCacheIndex(dag: CandidateChains, blockRoot: Eth2Digest, slot: Slot): int =
for i, cachedState in dag.cachedStates:
let (cacheBlockRoot, cacheSlot, state) = cachedState
let (cacheBlockRoot, cacheSlot, _) = cachedState
if cacheBlockRoot == blockRoot and cacheSlot == slot:
return i
-1
func putStateCache(
dag: CandidateChains, state: HashedBeaconState, blck: BlockRef) =
# Need to be able to efficiently access states for both attestation
# aggregation and to process block proposals going back to the last
# finalized slot. Ideally to avoid potential combinatiorial forking
# storage and/or memory constraints could CoW, up to and including,
# in particular, hash_tree_root() which is expensive to do 30 times
# since the previous epoch, to efficiently state_transition back to
# desired slot. However, none of that's in place, so there are both
# expensive, repeated BeaconState copies as well as computationally
# time-consuming-near-end-of-epoch hash tree roots. The latter are,
# effectively, naïvely O(n^2) in slot number otherwise, so when the
# slots become in the mid-to-high-20s it's spending all its time in
# pointlessly repeated calculations of prefix-state-transitions. An
# intermediate time/memory workaround involves storing only mapping
# between BlockRefs, or BlockSlots, and the BeaconState tree roots,
# but that still involves tens of megabytes worth of copying, along
# with the concomitant memory allocator and GC load. Instead, use a
# more memory-intensive (but more conceptually straightforward, and
# faster) strategy to just store, for the most recent slots.
let stateCacheIndex = dag.getStateCacheIndex(blck.root, state.data.slot)
if stateCacheIndex == -1:
# Could use a deque or similar, but want simpler structure, and the data
# items are small and few.
const MAX_CACHE_SIZE = 32
let cacheLen = dag.cachedStates.len
doAssert cacheLen <= MAX_CACHE_SIZE
let entry =
if dag.cachedStates.len == MAX_CACHE_SIZE: dag.cachedStates.pop().state
else: (ref HashedBeaconState)()
entry[] = state
insert(dag.cachedStates, (blck.root, state.data.slot, entry))
trace "CandidateChains.putState(): state cache updated",
cacheLen, root = shortLog(blck.root), slot = state.data.slot
proc putState*(dag: CandidateChains, state: HashedBeaconState, blck: BlockRef) =
# TODO we save state at every epoch start but never remove them - we also
# potentially save multiple states per slot if reorgs happen, meaning
@ -344,35 +382,7 @@ proc putState*(dag: CandidateChains, state: HashedBeaconState, blck: BlockRef) =
if not rootWritten:
dag.db.putStateRoot(blck.root, state.data.slot, state.root)
# Need to be able to efficiently access states for both attestation
# aggregation and to process block proposals going back to the last
# finalized slot. Ideally to avoid potential combinatiorial forking
# storage and/or memory constraints could CoW, up to and including,
# in particular, hash_tree_root() which is expensive to do 30 times
# since the previous epoch, to efficiently state_transition back to
# desired slot. However, none of that's in place, so there are both
# expensive, repeated BeaconState copies as well as computationally
# time-consuming-near-end-of-epoch hash tree roots. The latter are,
# effectively, naïvely O(n^2) in slot number otherwise, so when the
# slots become in the mid-to-high-20s it's spending all its time in
# pointlessly repeated calculations of prefix-state-transitions. An
# intermediate time/memory workaround involves storing only mapping
# between BlockRefs, or BlockSlots, and the BeaconState tree roots,
# but that still involves tens of megabytes worth of copying, along
# with the concomitant memory allocator and GC load. Instead, use a
# more memory-intensive (but more conceptually straightforward, and
# faster) strategy to just store, for the most recent slots.
let stateCacheIndex = dag.getStateCacheIndex(blck.root, state.data.slot)
if stateCacheIndex == -1:
# Could use a deque or similar, but want simpler structure, and the data
# items are small and few.
const MAX_CACHE_SIZE = 32
insert(dag.cachedStates, (blck.root, state.data.slot, newClone(state)))
while dag.cachedStates.len > MAX_CACHE_SIZE:
discard dag.cachedStates.pop()
let cacheLen = dag.cachedStates.len
trace "CandidateChains.putState(): state cache updated", cacheLen
doAssert cacheLen > 0 and cacheLen <= MAX_CACHE_SIZE
putStateCache(dag, state, blck)
func getRef*(dag: CandidateChains, root: Eth2Digest): BlockRef =
## Retrieve a resolved block reference, if available

View File

@ -266,7 +266,9 @@ proc isValidBeaconBlock*(
# The block is not from a future slot
# TODO allow `MAXIMUM_GOSSIP_CLOCK_DISPARITY` leniency, especially towards
# seemingly future slots.
if not (signed_beacon_block.message.slot <= current_slot):
# TODO using +1 here while this is being sorted - should queue these until
# they're within the DISPARITY limit
if not (signed_beacon_block.message.slot <= current_slot + 1):
debug "isValidBeaconBlock: block is from a future slot",
signed_beacon_block_message_slot = signed_beacon_block.message.slot,
current_slot = current_slot

View File

@ -174,7 +174,7 @@ proc slash_validator*(state: var BeaconState, slashed_index: ValidatorIndex,
validator.slashed = true
validator.withdrawable_epoch =
max(validator.withdrawable_epoch, epoch + EPOCHS_PER_SLASHINGS_VECTOR)
state.slashings[epoch mod EPOCHS_PER_SLASHINGS_VECTOR] +=
state.slashings[int(epoch mod EPOCHS_PER_SLASHINGS_VECTOR)] +=
validator.effective_balance
decrease_balance(state, slashed_index,
validator.effective_balance div MIN_SLASHING_PENALTY_QUOTIENT)

View File

@ -566,7 +566,7 @@ func process_final_updates*(state: var BeaconState) {.nbench.}=
MAX_EFFECTIVE_BALANCE)
# Reset slashings
state.slashings[next_epoch mod EPOCHS_PER_SLASHINGS_VECTOR] = 0.Gwei
state.slashings[int(next_epoch mod EPOCHS_PER_SLASHINGS_VECTOR)] = 0.Gwei
# Set randao mix
state.randao_mixes[next_epoch mod EPOCHS_PER_HISTORICAL_VECTOR] =

View File

@ -1,24 +1,38 @@
{.push raises: [Defect].}
import
os, strformat,
os, strformat, chronicles,
ssz/ssz_serialization,
beacon_node_types,
./spec/[crypto, datatypes, digest]
# Dump errors are generally not fatal where used currently - the code calling
# these functions, like most code, is not exception safe
template logErrors(body: untyped) =
try:
body
except CatchableError as err:
notice "Failed to write SSZ", dir, msg = err.msg
proc dump*(dir: string, v: AttestationData, validator: ValidatorPubKey) =
SSZ.saveFile(dir / &"att-{v.slot}-{v.index}-{shortLog(validator)}.ssz", v)
logErrors:
SSZ.saveFile(dir / &"att-{v.slot}-{v.index}-{shortLog(validator)}.ssz", v)
proc dump*(dir: string, v: SignedBeaconBlock, root: Eth2Digest) =
SSZ.saveFile(dir / &"block-{v.message.slot}-{shortLog(root)}.ssz", v)
logErrors:
SSZ.saveFile(dir / &"block-{v.message.slot}-{shortLog(root)}.ssz", v)
proc dump*(dir: string, v: SignedBeaconBlock, blck: BlockRef) =
dump(dir, v, blck.root)
proc dump*(dir: string, v: HashedBeaconState, blck: BlockRef) =
SSZ.saveFile(
dir / &"state-{v.data.slot}-{shortLog(blck.root)}-{shortLog(v.root)}.ssz",
v.data)
logErrors:
SSZ.saveFile(
dir / &"state-{v.data.slot}-{shortLog(blck.root)}-{shortLog(v.root)}.ssz",
v.data)
proc dump*(dir: string, v: HashedBeaconState) =
SSZ.saveFile(
dir / &"state-{v.data.slot}-{shortLog(v.root)}.ssz",
v.data)
logErrors:
SSZ.saveFile(
dir / &"state-{v.data.slot}-{shortLog(v.root)}.ssz",
v.data)

View File

@ -17,13 +17,17 @@ cli do(kind: string, file: string):
echo "Unknown file type: ", ext
quit 1
)
echo hash_tree_root(v[]).data.toHex()
when t is SignedBeaconBlock:
echo hash_tree_root(v.message).data.toHex()
else:
echo hash_tree_root(v[]).data.toHex()
let ext = splitFile(file).ext
case kind
of "attester_slashing": printit(AttesterSlashing)
of "attestation": printit(Attestation)
of "signed_block": printit(SignedBeaconBlock)
of "block": printit(BeaconBlock)
of "block_body": printit(BeaconBlockBody)
of "block_header": printit(BeaconBlockHeader)

View File

@ -22,6 +22,7 @@ cli do(kind: string, file: string):
case kind
of "attester_slashing": printit(AttesterSlashing)
of "attestation": printit(Attestation)
of "signed_block": printit(SignedBeaconBlock)
of "block": printit(BeaconBlock)
of "block_body": printit(BeaconBlockBody)
of "block_header": printit(BeaconBlockHeader)

View File

@ -13,6 +13,9 @@ import
../beacon_chain/spec/[datatypes, digest, validator],
../beacon_chain/[beacon_node_types, block_pool, state_transition, ssz]
when isMainModule:
import chronicles # or some random compile error happens...
suiteReport "BlockRef and helpers" & preset():
timedTest "isAncestorOf sanity" & preset():
let
@ -367,3 +370,4 @@ when const_preset == "minimal": # These require some minutes in mainnet
hash_tree_root(pool.headState.data.data)
hash_tree_root(pool2.justifiedState.data.data) ==
hash_tree_root(pool.justifiedState.data.data)