fix comments based on PR feedback
This commit is contained in:
parent
97e54b85d6
commit
85f6712363
|
@ -86,7 +86,7 @@ def with_custom_state(balances_fn: Callable[[Any], Sequence[int]],
|
|||
state = _prepare_state(balances_fn, threshold_fn, spec, phases)
|
||||
_custom_state_cache_dict[key] = state.get_backing()
|
||||
|
||||
# Take a copy out of the LRU cache result.
|
||||
# Take an entry out of the LRU.
|
||||
# No copy is necessary, as we wrap the immutable backing with a new view.
|
||||
state = spec.BeaconState(backing=_custom_state_cache_dict[key])
|
||||
kw['state'] = state
|
||||
|
|
|
@ -382,13 +382,14 @@ def cached_prepare_state_with_attestations(spec, state):
|
|||
Cached version of prepare_state_with_attestations,
|
||||
but does not return anything, and does not support a participation fn argument
|
||||
"""
|
||||
# If the pre-state is not already known in the LRU, then take it, make it leaking, and put it in the LRU.
|
||||
# The input state is likely already cached, so the hash-tree-root is fine.
|
||||
# If the pre-state is not already known in the LRU, then take it,
|
||||
# prepare it with attestations, and put it in the LRU.
|
||||
# The input state is likely already cached, so the hash-tree-root does not affect speed.
|
||||
key = (spec.fork, state.hash_tree_root())
|
||||
global _prep_state_cache_dict
|
||||
if key not in _prep_state_cache_dict:
|
||||
prepare_state_with_attestations(spec, state)
|
||||
_prep_state_cache_dict[key] = state.get_backing()
|
||||
_prep_state_cache_dict[key] = state.get_backing() # cache the tree structure, not the view wrapping it.
|
||||
|
||||
# Put the LRU cache result into the state view, as if we transitioned the original view
|
||||
state.set_backing(_prep_state_cache_dict[key])
|
||||
|
|
|
@ -20,15 +20,16 @@ def leaking(epochs=None):
|
|||
|
||||
def deco(fn):
|
||||
def entry(*args, spec, state, **kw):
|
||||
# If the pre-state is not already known in the LRU, then take it, make it leaking, and put it in the LRU.
|
||||
# The input state is likely already cached, so the hash-tree-root is fine.
|
||||
# If the pre-state is not already known in the LRU, then take it,
|
||||
# transition it to leak, and put it in the LRU.
|
||||
# The input state is likely already cached, so the hash-tree-root does not affect speed.
|
||||
key = (state.hash_tree_root(), spec.MIN_EPOCHS_TO_INACTIVITY_PENALTY, spec.SLOTS_PER_EPOCH, epochs)
|
||||
global _cache_dict
|
||||
if key not in _cache_dict:
|
||||
transition_state_to_leak(spec, state, epochs=epochs)
|
||||
_cache_dict[key] = state.get_backing()
|
||||
_cache_dict[key] = state.get_backing() # cache the tree structure, not the view wrapping it.
|
||||
|
||||
# Take a copy out of the LRU cache result.
|
||||
# Take an entry out of the LRU.
|
||||
# No copy is necessary, as we wrap the immutable backing with a new view.
|
||||
state = spec.BeaconState(backing=_cache_dict[key])
|
||||
return fn(*args, spec=spec, state=state, **kw)
|
||||
|
|
Loading…
Reference in New Issue