fix invalid state root being written to database (#1493)

* fix invalid state root being written to database

When rewinding state data, the wrong block reference would be used when
saving the state root - this would cause state loading to fail by
loading a different state than expected, preventing blocks to be
applied.

* refactor state loading and saving to consistently use and set
StateData block
* avoid rollback when state is missing from database (as opposed to
being partially overwritten and therefore in need of rollback)
* don't store state roots for empty slots - previously, these were used
as a cache to avoid recalculating them in state transition, but this has
been superceded by hash tree root caching
* don't attempt loading states / state roots for non-epoch slots, these
are not saved to the database
* simplify rewinder and clean up funcitions after caches have been
reworked
* fix chaindag logscope
* add database reload metric
* re-enable clearance epoch tests

* names
This commit is contained in:
Jacek Sieka 2020-08-13 11:50:05 +02:00 committed by GitHub
parent ab34584f23
commit 58d77153fc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 203 additions and 198 deletions

View File

@ -94,26 +94,31 @@ proc get(db: BeaconChainDB, key: openArray[byte], T: type Eth2Digest): Opt[T] =
res res
proc get(db: BeaconChainDB, key: openArray[byte], res: var auto): bool = type GetResult = enum
var found = false found
notFound
corrupted
proc get(db: BeaconChainDB, key: openArray[byte], output: var auto): GetResult =
var status = GetResult.notFound
# TODO address is needed because there's no way to express lifetimes in nim # TODO address is needed because there's no way to express lifetimes in nim
# we'll use unsafeAddr to find the code later # we'll use unsafeAddr to find the code later
var resPtr = unsafeAddr res # callback is local, ptr wont escape var outputPtr = unsafeAddr output # callback is local, ptr wont escape
proc decode(data: openArray[byte]) = proc decode(data: openArray[byte]) =
try: try:
resPtr[] = SSZ.decode(snappy.decode(data), type res) outputPtr[] = SSZ.decode(snappy.decode(data), type output)
found = true status = GetResult.found
except SerializationError as e: except SerializationError as e:
# If the data can't be deserialized, it could be because it's from a # If the data can't be deserialized, it could be because it's from a
# version of the software that uses a different SSZ encoding # version of the software that uses a different SSZ encoding
warn "Unable to deserialize data, old database?", warn "Unable to deserialize data, old database?",
err = e.msg, typ = name(type res), dataLen = data.len err = e.msg, typ = name(type output), dataLen = data.len
discard status = GetResult.corrupted
discard db.backend.get(key, decode).expect("working database") discard db.backend.get(key, decode).expect("working database")
found status
proc putBlock*(db: BeaconChainDB, value: SignedBeaconBlock) = proc putBlock*(db: BeaconChainDB, value: SignedBeaconBlock) =
db.put(subkey(type value, value.root), value) db.put(subkey(type value, value.root), value)
@ -152,7 +157,7 @@ proc putTailBlock*(db: BeaconChainDB, key: Eth2Digest) =
proc getBlock*(db: BeaconChainDB, key: Eth2Digest): Opt[TrustedSignedBeaconBlock] = proc getBlock*(db: BeaconChainDB, key: Eth2Digest): Opt[TrustedSignedBeaconBlock] =
# We only store blocks that we trust in the database # We only store blocks that we trust in the database
result.ok(TrustedSignedBeaconBlock(root: key)) result.ok(TrustedSignedBeaconBlock(root: key))
if not db.get(subkey(SignedBeaconBlock, key), result.get): if db.get(subkey(SignedBeaconBlock, key), result.get) != GetResult.found:
result.err() result.err()
proc getState*( proc getState*(
@ -162,15 +167,20 @@ proc getState*(
## re-allocating it if possible ## re-allocating it if possible
## Return `true` iff the entry was found in the database and `output` was ## Return `true` iff the entry was found in the database and `output` was
## overwritten. ## overwritten.
## Rollback will be called only if output was partially written - if it was
## not found at all, rollback will not be called
# TODO rollback is needed to deal with bug - use `noRollback` to ignore: # TODO rollback is needed to deal with bug - use `noRollback` to ignore:
# https://github.com/nim-lang/Nim/issues/14126 # https://github.com/nim-lang/Nim/issues/14126
# TODO RVO is inefficient for large objects: # TODO RVO is inefficient for large objects:
# https://github.com/nim-lang/Nim/issues/13879 # https://github.com/nim-lang/Nim/issues/13879
if not db.get(subkey(BeaconState, key), output): case db.get(subkey(BeaconState, key), output)
of GetResult.found:
true
of GetResult.notFound:
false
of GetResult.corrupted:
rollback(output) rollback(output)
false false
else:
true
proc getStateRoot*(db: BeaconChainDB, proc getStateRoot*(db: BeaconChainDB,
root: Eth2Digest, root: Eth2Digest,
@ -198,6 +208,6 @@ iterator getAncestors*(db: BeaconChainDB, root: Eth2Digest):
var res: TrustedSignedBeaconBlock var res: TrustedSignedBeaconBlock
res.root = root res.root = root
while db.get(subkey(SignedBeaconBlock, res.root), res): while db.get(subkey(SignedBeaconBlock, res.root), res) == GetResult.found:
yield res yield res
res.root = res.message.parent_root res.root = res.message.parent_root

View File

@ -24,8 +24,9 @@ export block_pools_types
declareCounter beacon_reorgs_total, "Total occurrences of reorganizations of the chain" # On fork choice declareCounter beacon_reorgs_total, "Total occurrences of reorganizations of the chain" # On fork choice
declareCounter beacon_state_data_cache_hits, "EpochRef hits" declareCounter beacon_state_data_cache_hits, "EpochRef hits"
declareCounter beacon_state_data_cache_misses, "EpochRef misses" declareCounter beacon_state_data_cache_misses, "EpochRef misses"
declareCounter beacon_state_rewinds, "State database rewinds"
logScope: topics = "hotdb" logScope: topics = "chaindag"
proc putBlock*( proc putBlock*(
dag: var ChainDAGRef, signedBlock: SignedBeaconBlock) = dag: var ChainDAGRef, signedBlock: SignedBeaconBlock) =
@ -382,11 +383,11 @@ proc getEpochRef*(dag: ChainDAGRef, blck: BlockRef, epoch: Epoch): EpochRef =
getEpochInfo(blck, state, cache) getEpochInfo(blck, state, cache)
proc getState( proc getState(
dag: ChainDAGRef, db: BeaconChainDB, stateRoot: Eth2Digest, blck: BlockRef, dag: ChainDAGRef, state: var StateData, stateRoot: Eth2Digest,
output: var StateData): bool = blck: BlockRef): bool =
let outputAddr = unsafeAddr output # local scope let stateAddr = unsafeAddr state # local scope
func restore(v: var BeaconState) = func restore(v: var BeaconState) =
if outputAddr == (unsafeAddr dag.headState): if stateAddr == (unsafeAddr dag.headState):
# TODO seeing the headState in the restore shouldn't happen - we load # TODO seeing the headState in the restore shouldn't happen - we load
# head states only when updating the head position, and by that time # head states only when updating the head position, and by that time
# the database will have gone through enough sanity checks that # the database will have gone through enough sanity checks that
@ -394,40 +395,55 @@ proc getState(
# Nonetheless, this is an ugly workaround that needs to go away # Nonetheless, this is an ugly workaround that needs to go away
doAssert false, "Cannot alias headState" doAssert false, "Cannot alias headState"
assign(outputAddr[], dag.headState) assign(stateAddr[], dag.headState)
if not db.getState(stateRoot, output.data.data, restore): if not dag.db.getState(stateRoot, state.data.data, restore):
return false return false
output.blck = blck state.blck = blck
output.data.root = stateRoot state.data.root = stateRoot
true true
proc putState*(dag: ChainDAGRef, state: HashedBeaconState, blck: BlockRef) = proc getState(dag: ChainDAGRef, state: var StateData, bs: BlockSlot): bool =
## Load a state from the database given a block and a slot - this will first
## lookup the state root in the state root table then load the corresponding
## state, if it exists
if not bs.slot.isEpoch:
return false # We only ever save epoch states - no need to hit database
if (let stateRoot = dag.db.getStateRoot(bs.blck.root, bs.slot);
stateRoot.isSome()):
return dag.getState(state, stateRoot.get(), bs.blck)
false
proc putState*(dag: ChainDAGRef, state: StateData) =
# Store a state and its root
# TODO we save state at every epoch start but never remove them - we also # TODO we save state at every epoch start but never remove them - we also
# potentially save multiple states per slot if reorgs happen, meaning # potentially save multiple states per slot if reorgs happen, meaning
# we could easily see a state explosion # we could easily see a state explosion
logScope: pcs = "save_state_at_epoch_start" logScope: pcs = "save_state_at_epoch_start"
var rootWritten = false if not state.data.data.slot.isEpoch:
if state.data.slot != blck.slot: # As a policy, we only store epoch boundary states - the rest can be
# This is a state that was produced by a skip slot for which there is no # reconstructed by loading an epoch boundary state and applying the
# block - we'll save the state root in the database in case we need to # missing blocks
# replay the skip return
dag.db.putStateRoot(blck.root, state.data.slot, state.root)
rootWritten = true if dag.db.containsState(state.data.root):
return
if state.data.slot.isEpoch:
if not dag.db.containsState(state.root):
info "Storing state", info "Storing state",
blck = shortLog(blck), blck = shortLog(state.blck),
stateSlot = shortLog(state.data.slot), stateSlot = shortLog(state.data.data.slot),
stateRoot = shortLog(state.root) stateRoot = shortLog(state.data.root)
dag.db.putState(state.root, state.data) # Ideally we would save the state and the root lookup cache in a single
if not rootWritten: # transaction to prevent database inconsistencies, but the state loading code
dag.db.putStateRoot(blck.root, state.data.slot, state.root) # is resilient against one or the other going missing
dag.db.putState(state.data.root, state.data.data)
dag.db.putStateRoot(state.blck.root, state.data.data.slot, state.data.root)
func getRef*(dag: ChainDAGRef, root: Eth2Digest): BlockRef = func getRef*(dag: ChainDAGRef, root: Eth2Digest): BlockRef =
## Retrieve a resolved block reference, if available ## Retrieve a resolved block reference, if available
@ -500,122 +516,48 @@ proc get*(dag: ChainDAGRef, root: Eth2Digest): Option[BlockData] =
else: else:
none(BlockData) none(BlockData)
proc skipAndUpdateState( proc advanceSlots(
dag: ChainDAGRef, dag: ChainDAGRef, state: var StateData, slot: Slot, save: bool) =
state: var HashedBeaconState, blck: BlockRef, slot: Slot, save: bool) = # Given a state, advance it zero or more slots by applying empty slot
while state.data.slot < slot: # processing
doAssert state.data.data.slot <= slot
while state.data.data.slot < slot:
# Process slots one at a time in case afterUpdate needs to see empty states # Process slots one at a time in case afterUpdate needs to see empty states
var stateCache = getEpochCache(blck, state.data) var cache = getEpochCache(state.blck, state.data.data)
advance_slot(state, dag.updateFlags, stateCache) advance_slot(state.data, dag.updateFlags, cache)
if save: if save:
dag.putState(state, blck) dag.putState(state)
proc skipAndUpdateState( proc applyBlock(
dag: ChainDAGRef, dag: ChainDAGRef,
state: var StateData, blck: BlockData, flags: UpdateFlags, save: bool): bool = state: var StateData, blck: BlockData, flags: UpdateFlags, save: bool): bool =
# Apply a single block to the state - the state must be positioned at the
# parent of the block with a slot lower than the one of the block being
# applied
doAssert state.blck == blck.refs.parent
dag.skipAndUpdateState( # `state_transition` can handle empty slots, but we want to potentially save
state.data, blck.refs, blck.data.message.slot - 1, save) # some of the empty slot states
dag.advanceSlots(state, blck.data.message.slot - 1, save)
var statePtr = unsafeAddr state # safe because `restore` is locally scoped var statePtr = unsafeAddr state # safe because `restore` is locally scoped
func restore(v: var HashedBeaconState) = func restore(v: var HashedBeaconState) =
doAssert (addr(statePtr.data) == addr v) doAssert (addr(statePtr.data) == addr v)
statePtr[] = dag.headState statePtr[] = dag.headState
var stateCache = getEpochCache(blck.refs, state.data.data) var cache = getEpochCache(blck.refs, state.data.data)
let ok = state_transition( let ok = state_transition(
dag.runtimePreset, state.data, blck.data, dag.runtimePreset, state.data, blck.data,
stateCache, flags + dag.updateFlags, restore) cache, flags + dag.updateFlags, restore)
if ok:
if ok and save: state.blck = blck.refs
dag.putState(state.data, blck.refs) dag.putState(state)
ok ok
proc rewindState(
dag: ChainDAGRef, state: var StateData, bs: BlockSlot): seq[BlockRef] =
logScope:
blockSlot = shortLog(bs)
pcs = "replay_state"
var ancestors = @[bs.blck]
# Common case: the last block applied is the parent of the block to apply:
if not bs.blck.parent.isNil and state.blck.root == bs.blck.parent.root and
state.data.data.slot < bs.blck.slot:
return ancestors
# It appears that the parent root of the proposed new block is different from
# what we expected. We will have to rewind the state to a point along the
# chain of ancestors of the new block. We will do this by loading each
# successive parent block and checking if we can find the corresponding state
# in the database.
var
stateRoot = block:
let tmp = dag.db.getStateRoot(bs.blck.root, bs.slot)
if tmp.isSome() and dag.db.containsState(tmp.get()):
tmp
else:
# State roots are sometimes kept in database even though state is not
err(Opt[Eth2Digest])
curBs = bs
while stateRoot.isNone():
let parBs = curBs.parent()
if parBs.blck.isNil:
break # Bug probably!
if parBs.blck != curBs.blck:
ancestors.add(parBs.blck)
if (let tmp = dag.db.getStateRoot(parBs.blck.root, parBs.slot); tmp.isSome()):
if dag.db.containsState(tmp.get):
stateRoot = tmp
break
curBs = parBs
if stateRoot.isNone():
# TODO this should only happen if the database is corrupt - we walked the
# list of parent blocks and couldn't find a corresponding state in the
# database, which should never happen (at least we should have the
# tail state in there!)
fatal "Couldn't find ancestor state root!"
doAssert false, "Oh noes, we passed big bang!"
let
ancestor = ancestors.pop()
root = stateRoot.get()
found = dag.getState(dag.db, root, ancestor, state)
if not found:
# TODO this should only happen if the database is corrupt - we walked the
# list of parent blocks and couldn't find a corresponding state in the
# database, which should never happen (at least we should have the
# tail state in there!)
fatal "Couldn't find ancestor state or block parent missing!"
doAssert false, "Oh noes, we passed big bang!"
trace "Replaying state transitions",
stateSlot = shortLog(state.data.data.slot),
ancestors = ancestors.len
ancestors
proc getStateDataCached(
dag: ChainDAGRef, state: var StateData, bs: BlockSlot): bool =
# This pointedly does not run rewindState or state_transition, but otherwise
# mostly matches updateStateData(...), because it's too expensive to run the
# rewindState(...)/skipAndUpdateState(...)/state_transition(...) procs, when
# each hash_tree_root(...) consumes a nontrivial fraction of a second.
# In-memory caches didn't hit. Try main block pool database. This is slower
# than the caches due to SSZ (de)serializing and disk I/O, so prefer them.
if (let tmp = dag.db.getStateRoot(bs.blck.root, bs.slot); tmp.isSome()):
return dag.getState(dag.db, tmp.get(), bs.blck, state)
false
proc updateStateData*( proc updateStateData*(
dag: ChainDAGRef, state: var StateData, bs: BlockSlot) = dag: ChainDAGRef, state: var StateData, bs: BlockSlot) =
## Rewind or advance state such that it matches the given block and slot - ## Rewind or advance state such that it matches the given block and slot -
@ -624,56 +566,72 @@ proc updateStateData*(
## If slot is higher than blck.slot, replay will fill in with empty/non-block ## If slot is higher than blck.slot, replay will fill in with empty/non-block
## slots, else it is ignored ## slots, else it is ignored
# We need to check the slot because the state might have moved forwards # First, see if we're already at the requested block. If we are, also check
# without blocks # that the state has not been advanced past the desired block - if it has,
if state.blck.root == bs.blck.root and state.data.data.slot <= bs.slot: # an earlier state must be loaded since there's no way to undo the slot
if state.data.data.slot != bs.slot: # transitions
# Might be that we're moving to the same block but later slot if state.blck == bs.blck and state.data.data.slot <= bs.slot:
dag.skipAndUpdateState(state.data, bs.blck, bs.slot, true) # The block is the same and we're at an early enough slot - advance the
# state with empty slot processing until the slot is correct
dag.advanceSlots(state, bs.slot, true)
return # State already at the right spot
if dag.getStateDataCached(state, bs):
return return
let ancestors = rewindState(dag, state, bs) # Either the state is too new or was created by applying a different block.
# We'll now resort to loading the state from the database then reapplying
# blocks until we reach the desired point in time.
# If we come this far, we found the state root. The last block on the stack var
# is the one that produced this particular state, so we can pop it ancestors: seq[BlockRef]
# TODO it might be possible to use the latest block hashes from the state to cur = bs
# do this more efficiently.. whatever! # Look for a state in the database and load it - as long as it cannot be
# found, keep track of the blocks that are needed to reach it from the
# state that eventually will be found
while not dag.getState(state, cur):
# There's no state saved for this particular BlockSlot combination, keep
# looking...
if cur.slot == cur.blck.slot:
# This is not an empty slot, so the block will need to be applied to
# eventually reach bs
ancestors.add(cur.blck)
# Time to replay all the blocks between then and now. We skip one because # Moves back slot by slot, in case a state for an empty slot was saved
# it's the one that we found the state with, and it has already been cur = cur.parent
# applied. Pathologically quadratic in slot number, naïvely.
# Time to replay all the blocks between then and now
for i in countdown(ancestors.len - 1, 0): for i in countdown(ancestors.len - 1, 0):
# Because the ancestors are in the database, there's no need to persist them # Because the ancestors are in the database, there's no need to persist them
# again. Also, because we're applying blocks that were loaded from the # again. Also, because we're applying blocks that were loaded from the
# database, we can skip certain checks that have already been performed # database, we can skip certain checks that have already been performed
# before adding the block to the database. In particular, this means that # before adding the block to the database.
# no state root calculation will take place here, because we can load
# the final state root from the block itself.
let ok = let ok =
dag.skipAndUpdateState(state, dag.get(ancestors[i]), {}, false) dag.applyBlock(state, dag.get(ancestors[i]), {}, false)
doAssert ok, "Blocks in database should never fail to apply.." doAssert ok, "Blocks in database should never fail to apply.."
# We save states here - blocks were guaranteed to have passed through the save # We save states here - blocks were guaranteed to have passed through the save
# function once at least, but not so for empty slots! # function once at least, but not so for empty slots!
dag.skipAndUpdateState(state.data, bs.blck, bs.slot, true) dag.advanceSlots(state, bs.slot, true)
state.blck = bs.blck beacon_state_rewinds.inc()
debug "State reloaded from database",
blocks = ancestors.len, stateRoot = shortLog(state.data.root),
blck = shortLog(bs)
proc loadTailState*(dag: ChainDAGRef): StateData = proc loadTailState*(dag: ChainDAGRef): StateData =
## Load the state associated with the current tail in the dag ## Load the state associated with the current tail in the dag
let stateRoot = dag.db.getBlock(dag.tail.root).get().message.state_root let stateRoot = dag.db.getBlock(dag.tail.root).get().message.state_root
let found = dag.getState(dag.db, stateRoot, dag.tail, result) let found = dag.getState(result, stateRoot, dag.tail)
# TODO turn into regular error, this can happen # TODO turn into regular error, this can happen
doAssert found, "Failed to load tail state, database corrupt?" doAssert found, "Failed to load tail state, database corrupt?"
proc delState(dag: ChainDAGRef, bs: BlockSlot) = proc delState(dag: ChainDAGRef, bs: BlockSlot) =
# Delete state state and mapping for a particular block+slot # Delete state state and mapping for a particular block+slot
if not bs.slot.isEpoch:
return # We only ever save epoch states
if (let root = dag.db.getStateRoot(bs.blck.root, bs.slot); root.isSome()): if (let root = dag.db.getStateRoot(bs.blck.root, bs.slot); root.isSome()):
dag.db.delState(root.get()) dag.db.delState(root.get())
dag.db.delStateRoot(bs.blck.root, bs.slot)
proc updateHead*(dag: ChainDAGRef, newHead: BlockRef) = proc updateHead*(dag: ChainDAGRef, newHead: BlockRef) =
## Update what we consider to be the current head, as given by the fork ## Update what we consider to be the current head, as given by the fork

View File

@ -216,7 +216,7 @@ proc addRawBlock*(
onBlockAdded onBlockAdded
) )
dag.putState(dag.clearanceState.data, dag.clearanceState.blck) dag.putState(dag.clearanceState)
return ok dag.clearanceState.blck return ok dag.clearanceState.blck

View File

@ -351,45 +351,82 @@ suiteReport "chain DAG finalization tests" & preset():
hash_tree_root(dag2.headState.data.data) == hash_tree_root(dag2.headState.data.data) ==
hash_tree_root(dag.headState.data.data) hash_tree_root(dag.headState.data.data)
# timedTest "init with gaps" & preset(): timedTest "orphaned epoch block" & preset():
# var cache = StateCache() var prestate = (ref HashedBeaconState)()
# for i in 0 ..< (SLOTS_PER_EPOCH * 6 - 2): for i in 0 ..< SLOTS_PER_EPOCH:
# var if i == SLOTS_PER_EPOCH - 1:
# blck = makeTestBlock( assign(prestate[], dag.headState.data)
# dag.headState.data, pool.head.blck.root, cache,
# attestations = makeFullAttestations(
# dag.headState.data.data, pool.head.blck.root,
# dag.headState.data.data.slot, cache, {}))
# let added = dag.addRawBlock(quarantine, hash_tree_root(blck.message), blck) do (validBlock: BlockRef): let blck = makeTestBlock(
# discard dag.headState.data, dag.head.root, cache)
# check: added.isOk() let added = dag.addRawBlock(quarantine, blck, nil)
# dag.updateHead(added[]) check: added.isOk()
dag.updateHead(added[])
# # Advance past epoch so that the epoch transition is gapped check:
# check: dag.heads.len() == 1
# process_slots(
# dag.headState.data, Slot(SLOTS_PER_EPOCH * 6 + 2) )
# var blck = makeTestBlock( advance_slot(prestate[], {}, cache)
# dag.headState.data, pool.head.blck.root, cache,
# attestations = makeFullAttestations(
# dag.headState.data.data, pool.head.blck.root,
# dag.headState.data.data.slot, cache, {}))
# let added = dag.addRawBlock(quarantine, hash_tree_root(blck.message), blck) do (validBlock: BlockRef): # create another block, orphaning the head
# discard let blck = makeTestBlock(
# check: added.isOk() prestate[], dag.head.parent.root, cache)
# dag.updateHead(added[])
# let # Add block, but don't update head
# pool2 = BlockPool.init(db) let added = dag.addRawBlock(quarantine, blck, nil)
check: added.isOk()
# # check that the state reloaded from database resembles what we had before var
# check: dag2 = init(ChainDAGRef, defaultRuntimePreset, db)
# pool2.dag.tail.root == dag.tail.root
# pool2.dag.head.blck.root == dag.head.blck.root # check that we can apply the block after the orphaning
# pool2.dag.finalizedHead.blck.root == dag.finalizedHead.blck.root let added2 = dag2.addRawBlock(quarantine, blck, nil)
# pool2.dag.finalizedHead.slot == dag.finalizedHead.slot check: added2.isOk()
# hash_tree_root(pool2.headState.data.data) ==
# hash_tree_root(dag.headState.data.data) suiteReport "chain DAG finalization tests" & preset():
setup:
var
db = makeTestDB(SLOTS_PER_EPOCH)
dag = init(ChainDAGRef, defaultRuntimePreset, db)
quarantine = QuarantineRef()
cache = StateCache()
timedTest "init with gaps" & preset():
for i in 0 ..< (SLOTS_PER_EPOCH * 6 - 2):
var
blck = makeTestBlock(
dag.headState.data, dag.head.root, cache,
attestations = makeFullAttestations(
dag.headState.data.data, dag.head.root,
dag.headState.data.data.slot, cache, {}))
let added = dag.addRawBlock(quarantine, blck, nil)
check: added.isOk()
dag.updateHead(added[])
# Advance past epoch so that the epoch transition is gapped
check:
process_slots(
dag.headState.data, Slot(SLOTS_PER_EPOCH * 6 + 2) )
var blck = makeTestBlock(
dag.headState.data, dag.head.root, cache,
attestations = makeFullAttestations(
dag.headState.data.data, dag.head.root,
dag.headState.data.data.slot, cache, {}))
let added = dag.addRawBlock(quarantine, blck, nil)
check: added.isOk()
dag.updateHead(added[])
let
dag2 = init(ChainDAGRef, defaultRuntimePreset, db)
# check that the state reloaded from database resembles what we had before
check:
dag2.tail.root == dag.tail.root
dag2.head.root == dag.head.root
dag2.finalizedHead.blck.root == dag.finalizedHead.blck.root
dag2.finalizedHead.slot == dag.finalizedHead.slot
hash_tree_root(dag2.headState.data.data) ==
hash_tree_root(dag.headState.data.data)