avoid memory allocations and copies when loading states (#942)

* rolls back some of the ref changes
* adds utility to calculate stack sizes
* works around bugs in nim exception handling and rvo
This commit is contained in:
Jacek Sieka 2020-04-28 10:08:32 +02:00 committed by GitHub
parent 80b538452e
commit 28d6cd2524
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 336 additions and 256 deletions

View File

@ -52,9 +52,10 @@ OK: 7/7 Fail: 0/7 Skip: 0/7
OK: 5/5 Fail: 0/5 Skip: 0/5
## BlockPool finalization tests [Preset: minimal]
```diff
+ init with gaps [Preset: minimal] OK
+ prune heads on finalization [Preset: minimal] OK
```
OK: 1/1 Fail: 0/1 Skip: 0/1
OK: 2/2 Fail: 0/2 Skip: 0/2
## BlockRef and helpers [Preset: minimal]
```diff
+ getAncestorAt sanity [Preset: minimal] OK
@ -257,4 +258,4 @@ OK: 4/4 Fail: 0/4 Skip: 0/4
OK: 8/8 Fail: 0/8 Skip: 0/8
---TOTAL---
OK: 156/159 Fail: 3/159 Skip: 0/159
OK: 157/160 Fail: 3/160 Skip: 0/160

View File

@ -22,7 +22,8 @@ TOOLS := \
ncli_hash_tree_root \
ncli_pretty \
ncli_transition \
process_dashboard
process_dashboard \
stackSizes
# bench_bls_sig_agggregation TODO reenable after bls v0.10.1 changes
TOOLS_DIRS := \
beacon_chain \

View File

@ -180,7 +180,7 @@ proc addResolved(pool: var AttestationPool, blck: BlockRef, attestation: Attesta
pool.blockPool, pool.blockPool.tmpState,
BlockSlot(blck: blck, slot: attestation.data.slot))
template state(): BeaconState = pool.blockPool.tmpState.data.data[]
template state(): BeaconState = pool.blockPool.tmpState.data.data
if not validate(state, attestation):
notice "Invalid attestation",
@ -456,7 +456,7 @@ proc selectHead*(pool: AttestationPool): BlockRef =
justifiedHead = pool.blockPool.latestJustifiedBlock()
let newHead =
lmdGhost(pool, pool.blockPool.justifiedState.data.data[], justifiedHead.blck)
lmdGhost(pool, pool.blockPool.justifiedState.data.data, justifiedHead.blck)
newHead
@ -529,9 +529,9 @@ proc isValidAttestation*(
# as it supports aggregated attestations (which this can't be)
var cache = get_empty_per_epoch_cache()
if not is_valid_indexed_attestation(
pool.blockPool.headState.data.data[],
pool.blockPool.headState.data.data,
get_indexed_attestation(
pool.blockPool.headState.data.data[], attestation, cache), {}):
pool.blockPool.headState.data.data, attestation, cache), {}):
debug "isValidAttestation: signature verification failed"
return false

View File

@ -1,10 +1,10 @@
{.push raises: [Defect].}
import
options, typetraits, stew/[results, endians2],
typetraits, stew/[results, endians2],
serialization, chronicles,
spec/[datatypes, digest, crypto],
eth/db/kvstore, ssz
eth/db/kvstore, ssz, state_transition
type
BeaconChainDB* = ref object
@ -45,7 +45,7 @@ func subkey[N: static int](kind: DbKeyKind, key: array[N, byte]):
result[0] = byte ord(kind)
result[1 .. ^1] = key
func subkey(kind: type BeaconStateRef, key: Eth2Digest): auto =
func subkey(kind: type BeaconState, key: Eth2Digest): auto =
subkey(kHashToState, key.data)
func subkey(kind: type SignedBeaconBlock, key: Eth2Digest): auto =
@ -86,13 +86,13 @@ proc get(db: BeaconChainDB, key: openArray[byte], T: typedesc): Opt[T] =
proc putBlock*(db: BeaconChainDB, key: Eth2Digest, value: SignedBeaconBlock) =
db.put(subkey(type value, key), value)
proc putState*(db: BeaconChainDB, key: Eth2Digest, value: BeaconStateRef) =
proc putState*(db: BeaconChainDB, key: Eth2Digest, value: BeaconState) =
# TODO prune old states - this is less easy than it seems as we never know
# when or if a particular state will become finalized.
db.put(subkey(type value, key), value)
proc putState*(db: BeaconChainDB, value: BeaconStateRef) =
proc putState*(db: BeaconChainDB, value: BeaconState) =
db.putState(hash_tree_root(value), value)
proc putStateRoot*(db: BeaconChainDB, root: Eth2Digest, slot: Slot,
@ -108,7 +108,7 @@ proc delBlock*(db: BeaconChainDB, key: Eth2Digest) =
"working database")
proc delState*(db: BeaconChainDB, key: Eth2Digest) =
db.backend.del(subkey(BeaconStateRef, key)).expect("working database")
db.backend.del(subkey(BeaconState, key)).expect("working database")
proc delStateRoot*(db: BeaconChainDB, root: Eth2Digest, slot: Slot) =
db.backend.del(subkey(root, slot)).expect("working database")
@ -122,8 +122,31 @@ proc putTailBlock*(db: BeaconChainDB, key: Eth2Digest) =
proc getBlock*(db: BeaconChainDB, key: Eth2Digest): Opt[SignedBeaconBlock] =
db.get(subkey(SignedBeaconBlock, key), SignedBeaconBlock)
proc getState*(db: BeaconChainDB, key: Eth2Digest): Opt[BeaconStateRef] =
db.get(subkey(BeaconStateRef, key), BeaconStateRef)
proc getState*(
db: BeaconChainDB, key: Eth2Digest, output: var BeaconState,
rollback: RollbackProc): bool =
## Load state into `output` - BeaconState is large so we want to avoid
## re-allocating it if possible
## Return `true` iff the entry was found in the database and `output` was
## overwritten.
# TODO rollback is needed to deal with bug - use `noRollback` to ignore:
# https://github.com/nim-lang/Nim/issues/14126
# TODO RVO is inefficient for large objects:
# https://github.com/nim-lang/Nim/issues/13879
# TODO address is needed because there's no way to express lifetimes in nim
# we'll use unsafeAddr to find the code later
let outputAddr = unsafeAddr output # callback is local
proc decode(data: openArray[byte]) =
try:
# TODO can't write to output directly..
outputAddr[] = SSZ.decode(data, BeaconState)
except SerializationError as e:
# If the data can't be deserialized, it could be because it's from a
# version of the software that uses a different SSZ encoding
warn "Unable to deserialize data, old database?", err = e.msg
rollback(outputAddr[])
db.backend.get(subkey(BeaconState, key), decode).expect("working database")
proc getStateRoot*(db: BeaconChainDB,
root: Eth2Digest,
@ -140,7 +163,7 @@ proc containsBlock*(db: BeaconChainDB, key: Eth2Digest): bool =
db.backend.contains(subkey(SignedBeaconBlock, key)).expect("working database")
proc containsState*(db: BeaconChainDB, key: Eth2Digest): bool =
db.backend.contains(subkey(BeaconStateRef, key)).expect("working database")
db.backend.contains(subkey(BeaconState, key)).expect("working database")
iterator getAncestors*(db: BeaconChainDB, root: Eth2Digest):
tuple[root: Eth2Digest, blck: SignedBeaconBlock] =

View File

@ -23,7 +23,7 @@ import
attestation_pool, block_pool, eth2_network, eth2_discovery,
beacon_node_types, mainchain_monitor, version, ssz, ssz/dynamic_navigator,
sync_protocol, request_manager, validator_keygen, interop, statusbar,
attestation_aggregation, sync_manager
attestation_aggregation, sync_manager, state_transition
const
genesisFile = "genesis.ssz"
@ -192,7 +192,7 @@ proc init*(T: type BeaconNode, conf: BeaconNodeConf): Future[BeaconNode] {.async
let tailBlock = get_initial_beacon_block(genesisState[])
try:
BlockPool.preInit(db, genesisState, tailBlock)
BlockPool.preInit(db, genesisState[], tailBlock)
doAssert BlockPool.isInitialized(db), "preInit should have initialized db"
except CatchableError as e:
error "Failed to initialize database", err = e.msg
@ -219,7 +219,7 @@ proc init*(T: type BeaconNode, conf: BeaconNodeConf): Future[BeaconNode] {.async
nil
let
enrForkId = enrForkIdFromState(blockPool.headState.data.data[])
enrForkId = enrForkIdFromState(blockPool.headState.data.data)
topicBeaconBlocks = getBeaconBlocksTopic(enrForkId.forkDigest)
topicAggregateAndProofs = getAggregateAndProofsTopic(enrForkId.forkDigest)
network = await createEth2Node(conf, enrForkId)
@ -235,7 +235,7 @@ proc init*(T: type BeaconNode, conf: BeaconNodeConf): Future[BeaconNode] {.async
blockPool: blockPool,
attestationPool: AttestationPool.init(blockPool),
mainchainMonitor: mainchainMonitor,
beaconClock: BeaconClock.init(blockPool.headState.data.data[]),
beaconClock: BeaconClock.init(blockPool.headState.data.data),
rpcServer: rpcServer,
forkDigest: enrForkId.forkDigest,
topicBeaconBlocks: topicBeaconBlocks,
@ -410,15 +410,15 @@ proc proposeBlock(node: BeaconNode,
(get_eth1data_stub(state.eth1_deposit_index, slot.compute_epoch_at_slot()),
newSeq[Deposit]())
else:
node.mainchainMonitor.getBlockProposalData(state[])
node.mainchainMonitor.getBlockProposalData(state)
let message = makeBeaconBlock(
state[],
state,
head.root,
validator.genRandaoReveal(state.fork, state.genesis_validators_root, slot),
eth1data,
Eth2Digest(),
node.attestationPool.getAttestationsForBlock(state[]),
node.attestationPool.getAttestationsForBlock(state),
deposits)
if not message.isSome():
@ -546,7 +546,7 @@ proc handleAttestations(node: BeaconNode, head: BlockRef, slot: Slot) =
slot = shortLog(slot)
return
let attestationHead = head.findAncestorBySlot(slot)
let attestationHead = head.atSlot(slot)
if head != attestationHead.blck:
# In rare cases, such as when we're busy syncing or just slow, we'll be
# attesting to a past state - we must then recreate the world as it looked
@ -576,16 +576,16 @@ proc handleAttestations(node: BeaconNode, head: BlockRef, slot: Slot) =
# version here that calculates the committee for a single slot only
node.blockPool.withState(node.blockPool.tmpState, attestationHead):
var cache = get_empty_per_epoch_cache()
let committees_per_slot = get_committee_count_at_slot(state[], slot)
let committees_per_slot = get_committee_count_at_slot(state, slot)
for committee_index in 0'u64..<committees_per_slot:
let committee = get_beacon_committee(
state[], slot, committee_index.CommitteeIndex, cache)
state, slot, committee_index.CommitteeIndex, cache)
for index_in_committee, validatorIdx in committee:
let validator = node.getAttachedValidator(state[], validatorIdx)
let validator = node.getAttachedValidator(state, validatorIdx)
if validator != nil:
let ad = makeAttestationData(state[], slot, committee_index, blck.root)
let ad = makeAttestationData(state, slot, committee_index, blck.root)
attestations.add((ad, committee.len, index_in_committee, validator))
for a in attestations:
@ -649,21 +649,21 @@ proc broadcastAggregatedAttestations(
let bs = BlockSlot(blck: aggregationHead, slot: aggregationSlot)
node.blockPool.withState(node.blockPool.tmpState, bs):
let
committees_per_slot = get_committee_count_at_slot(state[], aggregationSlot)
committees_per_slot = get_committee_count_at_slot(state, aggregationSlot)
var cache = get_empty_per_epoch_cache()
for committee_index in 0'u64..<committees_per_slot:
let committee = get_beacon_committee(
state[], aggregationSlot, committee_index.CommitteeIndex, cache)
state, aggregationSlot, committee_index.CommitteeIndex, cache)
for index_in_committee, validatorIdx in committee:
let validator = node.getAttachedValidator(state[], validatorIdx)
let validator = node.getAttachedValidator(state, validatorIdx)
if validator != nil:
# This is slightly strange/inverted control flow, since really it's
# going to happen once per slot, but this is the best way to get at
# the validator index and private key pair. TODO verify it only has
# one isSome() with test.
let aggregateAndProof =
aggregate_attestations(node.attestationPool, state[],
aggregate_attestations(node.attestationPool, state,
committee_index.CommitteeIndex,
# TODO https://github.com/status-im/nim-beacon-chain/issues/545
# this assumes in-process private keys
@ -1060,13 +1060,13 @@ proc installBeaconApiHandlers(rpcServer: RpcServer, node: BeaconNode) =
requireOneOf(slot, root)
if slot.isSome:
let blk = node.blockPool.head.blck.atSlot(slot.get)
var tmpState = emptyStateData()
node.blockPool.withState(tmpState, blk):
node.blockPool.withState(node.blockPool.tmpState, blk):
return jsonResult(state)
else:
let state = node.db.getState(root.get)
if state.isSome:
return jsonResult(state.get)
let tmp = BeaconStateRef() # TODO use tmpState - but load the entire StateData!
let state = node.db.getState(root.get, tmp[], noRollback)
if state:
return jsonResult(tmp[])
else:
return StringOfJson("null")
@ -1193,7 +1193,7 @@ proc start(node: BeaconNode) =
bs = BlockSlot(blck: head.blck, slot: head.blck.slot)
node.blockPool.withState(node.blockPool.tmpState, bs):
node.addLocalValidators(state[])
node.addLocalValidators(state)
node.run()
@ -1282,7 +1282,7 @@ when hasPrompt:
# TODO slow linear scan!
for idx, b in node.blockPool.headState.data.data.balances:
if node.getAttachedValidator(
node.blockPool.headState.data.data[], ValidatorIndex(idx)) != nil:
node.blockPool.headState.data.data, ValidatorIndex(idx)) != nil:
balance += b
formatGwei(balance)

View File

@ -1,7 +1,7 @@
{.push raises: [Defect].}
import
deques, tables, options,
deques, tables,
stew/[endians2, byteutils], chronicles,
spec/[datatypes, crypto, digest],
beacon_chain_db
@ -225,24 +225,16 @@ type
root*: Eth2Digest
historySlots*: uint64
func emptyStateData*: StateData =
StateData(
data: HashedBeaconState(
# Please note that this initialization is needed in order
# to allocate memory for the BeaconState:
data: BeaconStateRef(),
root: default(Eth2Digest)
),
blck: default(BlockRef))
func clone*(other: StateData): StateData =
StateData(data: clone(other.data),
blck: other.blck)
proc shortLog*(v: AttachedValidator): string = shortLog(v.pubKey)
chronicles.formatIt BlockSlot:
it.blck.root.data[0..3].toHex() & ":" & $it.slot
proc shortLog*(v: BlockSlot): string =
if v.blck.slot == v.slot:
v.blck.root.data[0..3].toHex() & ":" & $v.blck.slot
else: # There was a gap - log it
v.blck.root.data[0..3].toHex() & ":" & $v.blck.slot & "@" & $v.slot
chronicles.formatIt BlockRef:
it.root.data[0..3].toHex() & ":" & $it.slot
proc shortLog*(v: BlockRef): string =
v.root.data[0..3].toHex() & ":" & $v.slot
chronicles.formatIt BlockSlot: shortLog(it)
chronicles.formatIt BlockRef: shortLog(it)

View File

@ -33,7 +33,7 @@ template withState*(
updateStateData(pool, cache, blockSlot)
template hashedState(): HashedBeaconState {.inject, used.} = cache.data
template state(): BeaconStateRef {.inject, used.} = cache.data.data
template state(): BeaconState {.inject, used.} = cache.data.data
template blck(): BlockRef {.inject, used.} = cache.blck
template root(): Eth2Digest {.inject, used.} = cache.data.root
@ -139,16 +139,6 @@ func init*(T: type BlockRef, root: Eth2Digest, slot: Slot): BlockRef =
func init*(T: type BlockRef, root: Eth2Digest, blck: BeaconBlock): BlockRef =
BlockRef.init(root, blck.slot)
func findAncestorBySlot*(blck: BlockRef, slot: Slot): BlockSlot =
## Find the first ancestor that has a slot number less than or equal to `slot`
doAssert(not blck.isNil)
var ret = blck
while ret.parent != nil and ret.slot > slot:
ret = ret.parent
BlockSlot(blck: ret, slot: slot)
proc init*(T: type BlockPool, db: BeaconChainDB): BlockPool =
# TODO we require that the db contains both a head and a tail block -
# asserting here doesn't seem like the right way to go about it however..
@ -168,7 +158,6 @@ proc init*(T: type BlockPool, db: BeaconChainDB): BlockPool =
var
blocks = {tailRef.root: tailRef}.toTable()
latestStateRoot = Option[tuple[stateRoot: Eth2Digest, blckRef: BlockRef]]()
headRef: BlockRef
if headRoot != tailRoot:
@ -191,34 +180,54 @@ proc init*(T: type BlockPool, db: BeaconChainDB): BlockPool =
blocks[curRef.root] = curRef
trace "Populating block pool", key = curRef.root, val = curRef
if latestStateRoot.isNone() and db.containsState(blck.message.state_root):
latestStateRoot = some((blck.message.state_root, curRef))
doAssert curRef == tailRef,
"head block does not lead to tail, database corrupt?"
else:
headRef = tailRef
if latestStateRoot.isNone():
doAssert db.containsState(tailBlock.message.state_root),
"state data missing for tail block, database corrupt?"
latestStateRoot = some((tailBlock.message.state_root, tailRef))
var
bs = headRef.atSlot(headRef.slot)
tmpState = (ref StateData)()
# We're only saving epoch boundary states in the database right now, so when
# we're loading the head block, the corresponding state does not necessarily
# exist in the database - we'll load this latest state we know about and use
# that as finalization point.
let stateOpt = db.getState(latestStateRoot.get().stateRoot)
doAssert stateOpt.isSome, "failed to obtain latest state. database corrupt?"
let tmpState = stateOpt.get
# Now that we have a head block, we need to find the most recent state that
# we have saved in the database
while bs.blck != nil:
let root = db.getStateRoot(bs.blck.root, bs.slot)
if root.isSome():
# TODO load StateData from BeaconChainDB
let loaded = db.getState(root.get(), tmpState.data.data, noRollback)
if not loaded:
# TODO We don't write state root and state atomically, so we need to be
# lenient here in case of dirty shutdown - transactions would be
# nice!
warn "State root, but no state - database corrupt?",
stateRoot = root.get(), blockRoot = bs.blck.root, blockSlot = bs.slot
continue
tmpState.data.root = root.get()
tmpState.blck = bs.blck
break
bs = bs.parent() # Iterate slot by slot in case there's a gap!
if tmpState.blck == nil:
warn "No state found in head history, database corrupt?"
# TODO Potentially we could recover from here instead of crashing - what
# would be a good recovery model?
raiseAssert "No state found in head history, database corrupt?"
# We presently save states on the epoch boundary - it means that the latest
# state we loaded might be older than head block - nonetheless, it will be
# from the same epoch as the head, thus the finalized and justified slots are
# the same - these only change on epoch boundaries.
let
finalizedSlot =
tmpState.finalized_checkpoint.epoch.compute_start_slot_at_epoch()
finalizedHead = headRef.findAncestorBySlot(finalizedSlot)
tmpState.data.data.finalized_checkpoint.epoch.compute_start_slot_at_epoch()
finalizedHead = headRef.atSlot(finalizedSlot)
justifiedSlot =
tmpState.current_justified_checkpoint.epoch.compute_start_slot_at_epoch()
justifiedHead = headRef.findAncestorBySlot(justifiedSlot)
tmpState.data.data.current_justified_checkpoint.epoch.compute_start_slot_at_epoch()
justifiedHead = headRef.atSlot(justifiedSlot)
head = Head(blck: headRef, justified: justifiedHead)
justifiedBlock = db.getBlock(justifiedHead.blck.root).get()
justifiedStateRoot = justifiedBlock.message.state_root
@ -226,22 +235,6 @@ proc init*(T: type BlockPool, db: BeaconChainDB): BlockPool =
doAssert justifiedHead.slot >= finalizedHead.slot,
"justified head comes before finalized head - database corrupt?"
debug "Block pool initialized",
head = head.blck, finalizedHead, tail = tailRef,
totalBlocks = blocks.len
let headState = StateData(
data: HashedBeaconState(
data: tmpState, root: latestStateRoot.get().stateRoot),
blck: latestStateRoot.get().blckRef)
let justifiedState = db.getState(justifiedStateRoot)
doAssert justifiedState.isSome,
"failed to obtain latest justified state. database corrupt?"
# For the initialization of `tmpState` below.
# Please note that it's initialized few lines below
{.push warning[UnsafeDefault]: off.}
let res = BlockPool(
pending: initTable[Eth2Digest, SignedBeaconBlock](),
missing: initTable[Eth2Digest, MissingBlock](),
@ -260,17 +253,18 @@ proc init*(T: type BlockPool, db: BeaconChainDB): BlockPool =
finalizedHead: finalizedHead,
db: db,
heads: @[head],
headState: headState,
justifiedState: StateData(
data: HashedBeaconState(data: justifiedState.get, root: justifiedStateRoot),
blck: justifiedHead.blck),
tmpState: default(StateData)
headState: tmpState[],
justifiedState: tmpState[], # This is wrong but we'll update it below
tmpState: tmpState[]
)
{.pop.}
res.updateStateData(res.headState, BlockSlot(blck: head.blck,
slot: head.blck.slot))
res.tmpState = clone(res.headState)
res.updateStateData(res.justifiedState, justifiedHead)
res.updateStateData(res.headState, headRef.atSlot(headRef.slot))
info "Block pool initialized",
head = head.blck, justifiedHead, finalizedHead, tail = tailRef,
totalBlocks = blocks.len
res
proc addResolvedBlock(
@ -297,7 +291,7 @@ proc addResolvedBlock(
for head in pool.heads.mitems():
if head.blck.isAncestorOf(blockRef):
if head.justified.slot != justifiedSlot:
head.justified = blockRef.findAncestorBySlot(justifiedSlot)
head.justified = blockRef.atSlot(justifiedSlot)
head.blck = blockRef
@ -307,14 +301,13 @@ proc addResolvedBlock(
if foundHead.isNone():
foundHead = some(Head(
blck: blockRef,
justified: blockRef.findAncestorBySlot(justifiedSlot)))
justified: blockRef.atSlot(justifiedSlot)))
pool.heads.add(foundHead.get())
info "Block resolved",
blck = shortLog(signedBlock.message),
blockRoot = shortLog(blockRoot),
justifiedRoot = shortLog(foundHead.get().justified.blck.root),
justifiedSlot = shortLog(foundHead.get().justified.slot),
justifiedHead = foundHead.get().justified,
heads = pool.heads.len(),
cat = "filtering"
@ -339,6 +332,29 @@ proc addResolvedBlock(
keepGoing = pool.pending.len < retries.len
blockRef
proc getState(
pool: BlockPool, db: BeaconChainDB, stateRoot: Eth2Digest, blck: BlockRef,
output: var StateData): bool =
let outputAddr = unsafeAddr output # local scope
proc rollback(v: var BeaconState) =
if outputAddr == (unsafeAddr pool.headState):
# TODO seeing the headState in the rollback shouldn't happen - we load
# head states only when updating the head position, and by that time
# the database will have gone through enough sanity checks that
# SSZ exceptions shouldn't happen, which is when rollback happens.
# Nonetheless, this is an ugly workaround that needs to go away
doAssert false, "Cannot alias headState"
outputAddr[] = pool.headState
if not db.getState(stateRoot, output.data.data, rollback):
return false
output.blck = blck
output.data.root = stateRoot
true
proc putState(pool: BlockPool, state: HashedBeaconState, blck: BlockRef) =
# TODO we save state at every epoch start but never remove them - we also
# potentially save multiple states per slot if reorgs happen, meaning
@ -350,8 +366,7 @@ proc putState(pool: BlockPool, state: HashedBeaconState, blck: BlockRef) =
if state.data.slot mod SLOTS_PER_EPOCH == 0:
if not pool.db.containsState(state.root):
info "Storing state",
blockRoot = shortLog(blck.root),
blockSlot = shortLog(blck.slot),
blck = shortLog(blck),
stateSlot = shortLog(state.data.slot),
stateRoot = shortLog(state.root),
cat = "caching"
@ -425,7 +440,8 @@ proc add*(
if blck.slot <= pool.finalizedHead.slot:
debug "Old block, dropping",
blck = shortLog(blck),
tailSlot = shortLog(pool.tail.slot),
finalizedHead = shortLog(pool.finalizedHead),
tail = shortLog(pool.tail),
blockRoot = shortLog(blockRoot),
cat = "filtering"
@ -439,8 +455,7 @@ proc add*(
notice "Invalid block slot",
blck = shortLog(blck),
blockRoot = shortLog(blockRoot),
parentRoot = shortLog(parent.root),
parentSlot = shortLog(parent.slot)
parentBlock = shortLog(parent)
return
@ -475,7 +490,7 @@ proc add*(
# Careful, tmpState.data has been updated but not blck - we need to create
# the BlockRef first!
pool.tmpState.blck = pool.addResolvedBlock(
pool.tmpState.data.data[], blockRoot, signedBlock, parent)
pool.tmpState.data.data, blockRoot, signedBlock, parent)
pool.putState(pool.tmpState.data, pool.tmpState.blck)
return pool.tmpState.blck
@ -573,7 +588,7 @@ proc getBlockRange*(
func getBlockBySlot*(pool: BlockPool, slot: Slot): BlockRef =
## Retrieves the first block in the current canonical chain
## with slot number less or equal to `slot`.
pool.head.blck.findAncestorBySlot(slot).blck
pool.head.blck.atSlot(slot).blck
func getBlockByPreciseSlot*(pool: BlockPool, slot: Slot): BlockRef =
## Retrieves a block from the canonical chain with a slot
@ -642,12 +657,12 @@ proc skipAndUpdateState(
pool.skipAndUpdateState(state.data, blck.refs, blck.data.message.slot - 1)
var statePtr = unsafeAddr state # safe because `restore` is locally scoped
proc restore(v: var HashedBeaconState) =
var statePtr = unsafeAddr state # safe because `rollback` is locally scoped
proc rollback(v: var HashedBeaconState) =
doAssert (addr(statePtr.data) == addr v)
statePtr[] = pool.headState
let ok = state_transition(state.data, blck.data, flags, restore)
let ok = state_transition(state.data, blck.data, flags, rollback)
if ok:
pool.putState(state.data, blck.refs)
@ -713,39 +728,35 @@ proc rewindState(pool: BlockPool, state: var StateData, bs: BlockSlot):
let
ancestor = ancestors.pop()
root = stateRoot.get()
ancestorState =
if pool.db.containsState(root):
pool.db.getState(root)
elif pool.cachedStates[0].containsState(root):
pool.cachedStates[0].getState(root)
else:
pool.cachedStates[1].getState(root)
if ancestorState.isNone():
# TODO this should only happen if the database is corrupt - we walked the
# list of parent blocks and couldn't find a corresponding state in the
# database, which should never happen (at least we should have the
# tail state in there!)
error "Couldn't find ancestor state or block parent missing!",
blockRoot = shortLog(bs.blck.root),
blockSlot = shortLog(bs.blck.slot),
slot = shortLog(bs.slot),
cat = "crash"
doAssert false, "Oh noes, we passed big bang!"
if pool.cachedStates[0].containsState(root):
doAssert pool.getState(pool.cachedStates[0], root, ancestor.refs, state)
elif pool.cachedStates[1].containsState(root):
doAssert pool.getState(pool.cachedStates[1], root, ancestor.refs, state)
else:
let found = pool.getState(pool.db, root, ancestor.refs, state)
if not found:
# TODO this should only happen if the database is corrupt - we walked the
# list of parent blocks and couldn't find a corresponding state in the
# database, which should never happen (at least we should have the
# tail state in there!)
error "Couldn't find ancestor state or block parent missing!",
blockRoot = shortLog(bs.blck.root),
blockSlot = shortLog(bs.blck.slot),
slot = shortLog(bs.slot),
cat = "crash"
doAssert false, "Oh noes, we passed big bang!"
trace "Replaying state transitions",
stateSlot = shortLog(state.data.data.slot),
ancestorStateRoot = shortLog(ancestor.data.message.state_root),
ancestorStateSlot = shortLog(ancestorState.get().slot),
ancestorStateSlot = shortLog(state.data.data.slot),
slot = shortLog(bs.slot),
blockRoot = shortLog(bs.blck.root),
ancestors = ancestors.len,
cat = "replay_state"
state.data.data[] = ancestorState.get()[]
state.data.root = stateRoot.get()
state.blck = ancestor.refs
ancestors
proc getStateDataCached(pool: BlockPool, state: var StateData, bs: BlockSlot): bool =
@ -755,17 +766,15 @@ proc getStateDataCached(pool: BlockPool, state: var StateData, bs: BlockSlot): b
# each hash_tree_root(...) consumes a nontrivial fraction of a second.
for db in [pool.db, pool.cachedStates[0], pool.cachedStates[1]]:
if (let tmp = db.getStateRoot(bs.blck.root, bs.slot); tmp.isSome()):
if not db.containsState(tmp.get):
let found = pool.getState(db, tmp.get(), bs.blck, state)
if not found:
# TODO We don't write state root and state atomically, so we need to be
# lenient here in case of dirty shutdown - transactions would be
# nice!
warn "State root, but no state - cache corrupt?",
stateRoot = tmp.get(), blockRoot = bs.blck.root, blockSlot = bs.slot
continue
let
root = tmp.get()
ancestorState = db.getState(root)
doAssert ancestorState.isSome()
state.data.data = ancestorState.get()
state.data.root = root
state.blck = pool.get(bs.blck).refs
return true
false
@ -813,12 +822,9 @@ proc updateStateData*(pool: BlockPool, state: var StateData, bs: BlockSlot) =
proc loadTailState*(pool: BlockPool): StateData =
## Load the state associated with the current tail in the pool
let stateRoot = pool.db.getBlock(pool.tail.root).get().message.state_root
StateData(
data: HashedBeaconState(
data: pool.db.getState(stateRoot).get(),
root: stateRoot),
blck: pool.tail
)
let found = pool.getState(pool.db, stateRoot, pool.tail, result)
# TODO turn into regular error, this can happen
doAssert found, "Failed to load tail state, database corrupt?"
proc delState(pool: BlockPool, bs: BlockSlot) =
# Delete state state and mapping for a particular block+slot
@ -838,8 +844,7 @@ proc updateHead*(pool: BlockPool, newHead: BlockRef) =
if pool.head.blck == newHead:
info "No head block update",
headBlockRoot = shortLog(newHead.root),
headBlockSlot = shortLog(newHead.slot),
head = shortLog(newHead),
cat = "fork_choice"
return
@ -857,7 +862,7 @@ proc updateHead*(pool: BlockPool, newHead: BlockRef) =
.current_justified_checkpoint
.epoch
.compute_start_slot_at_epoch()
justifiedBS = newHead.findAncestorBySlot(justifiedSlot)
justifiedBS = newHead.atSlot(justifiedSlot)
pool.head = Head(blck: newHead, justified: justifiedBS)
updateStateData(pool, pool.justifiedState, justifiedBS)
@ -865,10 +870,10 @@ proc updateHead*(pool: BlockPool, newHead: BlockRef) =
# TODO isAncestorOf may be expensive - too expensive?
if not lastHead.blck.isAncestorOf(newHead):
info "Updated head block (new parent)",
lastHeadRoot = shortLog(lastHead.blck.root),
parentRoot = shortLog(newHead.parent.root),
lastHead = shortLog(lastHead.blck),
headParent = shortLog(newHead.parent),
stateRoot = shortLog(pool.headState.data.root),
headBlockRoot = shortLog(pool.headState.blck.root),
headBlock = shortLog(pool.headState.blck),
stateSlot = shortLog(pool.headState.data.data.slot),
justifiedEpoch = shortLog(pool.headState.data.data.current_justified_checkpoint.epoch),
finalizedEpoch = shortLog(pool.headState.data.data.finalized_checkpoint.epoch),
@ -882,7 +887,7 @@ proc updateHead*(pool: BlockPool, newHead: BlockRef) =
else:
info "Updated head block",
stateRoot = shortLog(pool.headState.data.root),
headBlockRoot = shortLog(pool.headState.blck.root),
headBlock = shortLog(pool.headState.blck),
stateSlot = shortLog(pool.headState.data.data.slot),
justifiedEpoch = shortLog(pool.headState.data.data.current_justified_checkpoint.epoch),
finalizedEpoch = shortLog(pool.headState.data.data.finalized_checkpoint.epoch),
@ -893,7 +898,7 @@ proc updateHead*(pool: BlockPool, newHead: BlockRef) =
pool.headState.data.data.finalized_checkpoint.epoch.
compute_start_slot_at_epoch()
# TODO there might not be a block at the epoch boundary - what then?
finalizedHead = newHead.findAncestorBySlot(finalizedEpochStartSlot)
finalizedHead = newHead.atSlot(finalizedEpochStartSlot)
doAssert (not finalizedHead.blck.isNil),
"Block graph should always lead to a finalized block"
@ -949,10 +954,8 @@ proc updateHead*(pool: BlockPool, newHead: BlockRef) =
pool.heads.del(n)
info "Finalized block",
finalizedBlockRoot = shortLog(finalizedHead.blck.root),
finalizedBlockSlot = shortLog(finalizedHead.slot),
headBlockRoot = shortLog(newHead.root),
headBlockSlot = shortLog(newHead.slot),
finalizedHead = shortLog(finalizedHead),
head = shortLog(newHead),
heads = pool.heads.len,
cat = "fork_choice"
@ -994,7 +997,7 @@ proc isInitialized*(T: type BlockPool, db: BeaconChainDB): bool =
return true
proc preInit*(
T: type BlockPool, db: BeaconChainDB, state: BeaconStateRef,
T: type BlockPool, db: BeaconChainDB, state: BeaconState,
signedBlock: SignedBeaconBlock) =
# write a genesis state, the way the BlockPool expects it to be stored in
# database
@ -1023,14 +1026,14 @@ proc getProposer*(pool: BlockPool, head: BlockRef, slot: Slot): Option[Validator
var cache = get_empty_per_epoch_cache()
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#validator-assignments
let proposerIdx = get_beacon_proposer_index(state[], cache)
let proposerIdx = get_beacon_proposer_index(state, cache)
if proposerIdx.isNone:
warn "Missing proposer index",
slot=slot,
epoch=slot.compute_epoch_at_slot,
num_validators=state.validators.len,
active_validators=
get_active_validator_indices(state[], slot.compute_epoch_at_slot),
get_active_validator_indices(state, slot.compute_epoch_at_slot),
balances=state.balances
return
@ -1132,7 +1135,7 @@ proc isValidBeaconBlock*(pool: var BlockPool,
pool.withState(pool.tmpState, bs):
let
blockRoot = hash_tree_root(signed_beacon_block.message)
domain = get_domain(pool.headState.data.data[], DOMAIN_BEACON_PROPOSER,
domain = get_domain(pool.headState.data.data, DOMAIN_BEACON_PROPOSER,
compute_epoch_at_slot(signed_beacon_block.message.slot))
signing_root = compute_signing_root(blockRoot, domain)
proposer_index = signed_beacon_block.message.proposer_index

View File

@ -395,7 +395,7 @@ type
# TODO to be replaced with some magic hash caching
HashedBeaconState* = object
data*: BeaconStateRef
data*: BeaconState
root*: Eth2Digest # hash_tree_root(data)
StateCache* = object
@ -575,18 +575,14 @@ template readValue*(reader: var JsonReader, value: var BitList) =
template writeValue*(writer: var JsonWriter, value: BitList) =
writeValue(writer, BitSeq value)
func newClone*[T](x: T): ref T not nil =
new result
result[] = x
template newClone*[T: not ref](x: T): ref T =
# TODO not nil in return type: https://github.com/nim-lang/Nim/issues/14146
let res = new typeof(x) # TODO safe to do noinit here?
res[] = x
res
func newClone*[T](x: ref T): ref T not nil =
new result
result[] = x[]
func clone*(other: HashedBeaconState): HashedBeaconState =
HashedBeaconState(
data: newClone(other.data),
root: other.root)
template newClone*[T](x: ref T not nil): ref T =
newClone(x[])
template init*(T: type BitList, len: int): auto = T init(BitSeq, len)
template len*(x: BitList): auto = len(BitSeq(x))

View File

@ -485,7 +485,7 @@ proc makeBeaconBlock*(
deposits: deposits)
)
var tmpState = newClone(state)
let tmpState = newClone(state)
let ok = process_block(tmpState[], blck, {skipBlsValidation}, cache)
if not ok:

View File

@ -139,7 +139,7 @@ proc verifyStateRoot(state: BeaconState, blck: BeaconBlock): bool =
true
type
RollbackProc* = proc(v: var BeaconState) {.gcsafe.}
RollbackProc* = proc(v: var BeaconState) {.gcsafe, raises: [Defect].}
proc noRollback*(state: var BeaconState) =
trace "Skipping rollback of broken state"
@ -247,14 +247,14 @@ proc process_slots*(state: var HashedBeaconState, slot: Slot) =
if is_epoch_transition:
# Note: Genesis epoch = 0, no need to test if before Genesis
try:
beacon_previous_validators.set(get_epoch_validator_count(state.data[]))
beacon_previous_validators.set(get_epoch_validator_count(state.data))
except Exception as e: # TODO https://github.com/status-im/nim-metrics/pull/22
trace "Couldn't update metrics", msg = e.msg
process_epoch(state.data[])
process_epoch(state.data)
state.data.slot += 1
if is_epoch_transition:
try:
beacon_current_validators.set(get_epoch_validator_count(state.data[]))
beacon_current_validators.set(get_epoch_validator_count(state.data))
except Exception as e: # TODO https://github.com/status-im/nim-metrics/pull/22
trace "Couldn't update metrics", msg = e.msg
state.root = hash_tree_root(state.data)
@ -272,18 +272,18 @@ proc state_transition*(
process_slots(state, signedBlock.message.slot)
if skipBLSValidation in flags or
verify_block_signature(state.data[], signedBlock):
verify_block_signature(state.data, signedBlock):
var per_epoch_cache = get_empty_per_epoch_cache()
if processBlock(state.data[], signedBlock.message, flags, per_epoch_cache):
if skipStateRootValidation in flags or verifyStateRoot(state.data[], signedBlock.message):
if processBlock(state.data, signedBlock.message, flags, per_epoch_cache):
if skipStateRootValidation in flags or verifyStateRoot(state.data, signedBlock.message):
# State root is what it should be - we're done!
# TODO when creating a new block, state_root is not yet set.. comparing
# with zero hash here is a bit fragile however, but this whole thing
# should go away with proper hash caching
state.root =
if signedBlock.message.state_root == Eth2Digest(): hash_tree_root(state.data[])
if signedBlock.message.state_root == Eth2Digest(): hash_tree_root(state.data)
else: signedBlock.message.state_root
return true

View File

@ -9,7 +9,7 @@ cli do(pre: string, blck: string, post: string, verifyStateRoot = false):
blckX = SSZ.loadFile(blck, SignedBeaconBlock)
flags = if verifyStateRoot: {skipStateRootValidation} else: {}
var stateY = HashedBeaconState(data: stateX, root: hash_tree_root(stateX))
var stateY = HashedBeaconState(data: stateX[], root: hash_tree_root(stateX[]))
if not state_transition(stateY, blckX, flags, noRollback):
error "State transition failed"
else:

24
research/stackSizes.nim Normal file
View File

@ -0,0 +1,24 @@
import ../beacon_chain/spec/datatypes
import typetraits, strformat, strutils
proc print(t: auto, n: string, indent: int) =
echo fmt"{sizeof(t):>8} {spaces(indent)}{n}: {typeof(t).name}"
when t is object|tuple:
for n, p in t.fieldPairs:
print(p, n, indent + 1)
print(BeaconState(), "state", 0)
echo ""
print(SignedBeaconBlock(), "block", 0)
echo ""
print(Validator(), "validator", 0)
echo ""
print(Attestation(), "attestation", 0)

View File

@ -29,7 +29,7 @@ when const_preset == "minimal": # Too much stack space used on mainnet
var
blockPool = BlockPool.init(makeTestDB(SLOTS_PER_EPOCH * 3))
pool = AttestationPool.init(blockPool)
state = loadTailState(blockPool)
state = newClone(loadTailState(blockPool))
# Slot 0 is a finalized slot - won't be making attestations for it..
process_slots(state.data, state.data.data.slot + 1)
@ -38,15 +38,15 @@ when const_preset == "minimal": # Too much stack space used on mainnet
let
# Create an attestation for slot 1!
beacon_committee = get_beacon_committee(
state.data.data[], state.data.data.slot, 0.CommitteeIndex, cache)
state.data.data, state.data.data.slot, 0.CommitteeIndex, cache)
attestation = makeAttestation(
state.data.data[], state.blck.root, beacon_committee[0], cache)
state.data.data, state.blck.root, beacon_committee[0], cache)
pool.add(attestation)
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1)
let attestations = pool.getAttestationsForBlock(state.data.data[])
let attestations = pool.getAttestationsForBlock(state.data.data)
check:
attestations.len == 1
@ -56,17 +56,17 @@ when const_preset == "minimal": # Too much stack space used on mainnet
let
# Create an attestation for slot 1!
bc0 = get_beacon_committee(
state.data.data[], state.data.data.slot, 0.CommitteeIndex, cache)
state.data.data, state.data.data.slot, 0.CommitteeIndex, cache)
attestation0 = makeAttestation(
state.data.data[], state.blck.root, bc0[0], cache)
state.data.data, state.blck.root, bc0[0], cache)
process_slots(state.data, state.data.data.slot + 1)
let
bc1 = get_beacon_committee(state.data.data[],
bc1 = get_beacon_committee(state.data.data,
state.data.data.slot, 0.CommitteeIndex, cache)
attestation1 = makeAttestation(
state.data.data[], state.blck.root, bc1[0], cache)
state.data.data, state.blck.root, bc1[0], cache)
# test reverse order
pool.add(attestation1)
@ -74,7 +74,7 @@ when const_preset == "minimal": # Too much stack space used on mainnet
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1)
let attestations = pool.getAttestationsForBlock(state.data.data[])
let attestations = pool.getAttestationsForBlock(state.data.data)
check:
attestations.len == 1
@ -84,18 +84,18 @@ when const_preset == "minimal": # Too much stack space used on mainnet
let
# Create an attestation for slot 1!
bc0 = get_beacon_committee(
state.data.data[], state.data.data.slot, 0.CommitteeIndex, cache)
state.data.data, state.data.data.slot, 0.CommitteeIndex, cache)
attestation0 = makeAttestation(
state.data.data[], state.blck.root, bc0[0], cache)
state.data.data, state.blck.root, bc0[0], cache)
attestation1 = makeAttestation(
state.data.data[], state.blck.root, bc0[1], cache)
state.data.data, state.blck.root, bc0[1], cache)
pool.add(attestation0)
pool.add(attestation1)
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1)
let attestations = pool.getAttestationsForBlock(state.data.data[])
let attestations = pool.getAttestationsForBlock(state.data.data)
check:
attestations.len == 1
@ -106,11 +106,11 @@ when const_preset == "minimal": # Too much stack space used on mainnet
var
# Create an attestation for slot 1!
bc0 = get_beacon_committee(
state.data.data[], state.data.data.slot, 0.CommitteeIndex, cache)
state.data.data, state.data.data.slot, 0.CommitteeIndex, cache)
attestation0 = makeAttestation(
state.data.data[], state.blck.root, bc0[0], cache)
state.data.data, state.blck.root, bc0[0], cache)
attestation1 = makeAttestation(
state.data.data[], state.blck.root, bc0[1], cache)
state.data.data, state.blck.root, bc0[1], cache)
attestation0.combine(attestation1, {})
@ -119,7 +119,7 @@ when const_preset == "minimal": # Too much stack space used on mainnet
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1)
let attestations = pool.getAttestationsForBlock(state.data.data[])
let attestations = pool.getAttestationsForBlock(state.data.data)
check:
attestations.len == 1
@ -128,12 +128,12 @@ when const_preset == "minimal": # Too much stack space used on mainnet
var cache = get_empty_per_epoch_cache()
var
# Create an attestation for slot 1!
bc0 = get_beacon_committee(state.data.data[],
bc0 = get_beacon_committee(state.data.data,
state.data.data.slot, 0.CommitteeIndex, cache)
attestation0 = makeAttestation(
state.data.data[], state.blck.root, bc0[0], cache)
state.data.data, state.blck.root, bc0[0], cache)
attestation1 = makeAttestation(
state.data.data[], state.blck.root, bc0[1], cache)
state.data.data, state.blck.root, bc0[1], cache)
attestation0.combine(attestation1, {})
@ -142,14 +142,14 @@ when const_preset == "minimal": # Too much stack space used on mainnet
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1)
let attestations = pool.getAttestationsForBlock(state.data.data[])
let attestations = pool.getAttestationsForBlock(state.data.data)
check:
attestations.len == 1
timedTest "Fork choice returns latest block with no attestations":
let
b1 = addTestBlock(state.data.data[], blockPool.tail.root)
b1 = addTestBlock(state.data.data, blockPool.tail.root)
b1Root = hash_tree_root(b1.message)
b1Add = blockPool.add(b1Root, b1)
head = pool.selectHead()
@ -158,7 +158,7 @@ when const_preset == "minimal": # Too much stack space used on mainnet
head == b1Add
let
b2 = addTestBlock(state.data.data[], b1Root)
b2 = addTestBlock(state.data.data, b1Root)
b2Root = hash_tree_root(b2.message)
b2Add = blockPool.add(b2Root, b2)
head2 = pool.selectHead()
@ -169,7 +169,7 @@ when const_preset == "minimal": # Too much stack space used on mainnet
timedTest "Fork choice returns block with attestation":
var cache = get_empty_per_epoch_cache()
let
b10 = makeTestBlock(state.data.data[], blockPool.tail.root)
b10 = makeTestBlock(state.data.data, blockPool.tail.root)
b10Root = hash_tree_root(b10.message)
b10Add = blockPool.add(b10Root, b10)
head = pool.selectHead()
@ -178,15 +178,15 @@ when const_preset == "minimal": # Too much stack space used on mainnet
head == b10Add
let
b11 = makeTestBlock(state.data.data[], blockPool.tail.root,
b11 = makeTestBlock(state.data.data, blockPool.tail.root,
graffiti = Eth2Digest(data: [1'u8, 0, 0, 0 ,0 ,0 ,0 ,0 ,0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
)
b11Root = hash_tree_root(b11.message)
b11Add = blockPool.add(b11Root, b11)
bc1 = get_beacon_committee(
state.data.data[], state.data.data.slot, 1.CommitteeIndex, cache)
attestation0 = makeAttestation(state.data.data[], b10Root, bc1[0], cache)
state.data.data, state.data.data.slot, 1.CommitteeIndex, cache)
attestation0 = makeAttestation(state.data.data, b10Root, bc1[0], cache)
pool.add(attestation0)
@ -197,8 +197,8 @@ when const_preset == "minimal": # Too much stack space used on mainnet
head2 == b10Add
let
attestation1 = makeAttestation(state.data.data[], b11Root, bc1[1], cache)
attestation2 = makeAttestation(state.data.data[], b11Root, bc1[2], cache)
attestation1 = makeAttestation(state.data.data, b11Root, bc1[1], cache)
attestation2 = makeAttestation(state.data.data, b11Root, bc1[2], cache)
pool.add(attestation1)
let head3 = pool.selectHead()

View File

@ -8,23 +8,25 @@
{.used.}
import options, unittest, sequtils,
../beacon_chain/[beacon_chain_db, extras, interop, ssz],
../beacon_chain/[beacon_chain_db, extras, interop, ssz, state_transition],
../beacon_chain/spec/[beaconstate, datatypes, digest, crypto],
eth/db/kvstore,
# test utilies
./testutil, ./testblockutil
proc getStateRef(db: BeaconChainDB, root: Eth2Digest): NilableBeaconStateRef =
# load beaconstate the way BlockPool does it - into an existing instance
let res = BeaconStateRef()
if db.getState(root, res[], noRollback):
return res
suiteReport "Beacon chain DB" & preset():
timedTest "empty database" & preset():
var
db = init(BeaconChainDB, kvStore MemStoreRef.init())
check:
when const_preset=="minimal":
db.getState(Eth2Digest()).isNone and db.getBlock(Eth2Digest()).isNone
else:
# TODO re-check crash here in mainnet
true
db.getStateRef(Eth2Digest()).isNil
db.getBlock(Eth2Digest()).isNone
timedTest "sanity check blocks" & preset():
var
@ -52,11 +54,11 @@ suiteReport "Beacon chain DB" & preset():
state = BeaconStateRef()
root = hash_tree_root(state)
db.putState(state)
db.putState(state[])
check:
db.containsState(root)
db.getState(root).get[] == state[]
db.getStateRef(root)[] == state[]
timedTest "find ancestors" & preset():
var
@ -103,8 +105,8 @@ suiteReport "Beacon chain DB" & preset():
eth1BlockHash, 0, makeInitialDeposits(SLOTS_PER_EPOCH), {skipBlsValidation})
root = hash_tree_root(state)
db.putState(state)
db.putState(state[])
check:
db.containsState(root)
db.getState(root).get[] == state[]
db.getStateRef(root)[] == state[]

View File

@ -94,7 +94,7 @@ when const_preset == "minimal": # Too much stack space used on mainnet
var
db = makeTestDB(SLOTS_PER_EPOCH)
pool = BlockPool.init(db)
state = pool.loadTailState().data.data
state = newClone(pool.loadTailState().data.data)
b1 = addTestBlock(state[], pool.tail.root)
b1Root = hash_tree_root(b1.message)
b2 = addTestBlock(state[], b1Root)
@ -242,42 +242,42 @@ when const_preset == "minimal": # Too much stack space used on mainnet
bs1_3 = b1Add.atSlot(3.Slot)
bs2_3 = b2Add.atSlot(3.Slot)
var tmpState = clone(pool.headState)
var tmpState = newClone(pool.headState)
# move to specific block
pool.updateStateData(tmpState, bs1)
pool.updateStateData(tmpState[], bs1)
check:
tmpState.blck == b1Add
tmpState.data.data.slot == bs1.slot
# Skip slots
pool.updateStateData(tmpState, bs1_3) # skip slots
pool.updateStateData(tmpState[], bs1_3) # skip slots
check:
tmpState.blck == b1Add
tmpState.data.data.slot == bs1_3.slot
# Move back slots, but not blocks
pool.updateStateData(tmpState, bs1_3.parent())
pool.updateStateData(tmpState[], bs1_3.parent())
check:
tmpState.blck == b1Add
tmpState.data.data.slot == bs1_3.parent().slot
# Move to different block and slot
pool.updateStateData(tmpState, bs2_3)
pool.updateStateData(tmpState[], bs2_3)
check:
tmpState.blck == b2Add
tmpState.data.data.slot == bs2_3.slot
# Move back slot and block
pool.updateStateData(tmpState, bs1)
pool.updateStateData(tmpState[], bs1)
check:
tmpState.blck == b1Add
tmpState.data.data.slot == bs1.slot
# Move back to genesis
pool.updateStateData(tmpState, bs1.parent())
pool.updateStateData(tmpState[], bs1.parent())
check:
tmpState.blck == b1Add.parent
tmpState.data.data.slot == bs1.parent.slot
@ -292,7 +292,7 @@ when const_preset == "minimal": # Too much stack space used on mainnet
block:
# Create a fork that will not be taken
var
blck = makeTestBlock(pool.headState.data.data[], pool.head.blck.root)
blck = makeTestBlock(pool.headState.data.data, pool.head.blck.root)
discard pool.add(hash_tree_root(blck.message), blck)
for i in 0 ..< (SLOTS_PER_EPOCH * 6):
@ -304,9 +304,9 @@ when const_preset == "minimal": # Too much stack space used on mainnet
var
cache = get_empty_per_epoch_cache()
blck = makeTestBlock(
pool.headState.data.data[], pool.head.blck.root,
pool.headState.data.data, pool.head.blck.root,
attestations = makeFullAttestations(
pool.headState.data.data[], pool.head.blck.root,
pool.headState.data.data, pool.head.blck.root,
pool.headState.data.data.slot, cache, {}))
let added = pool.add(hash_tree_root(blck.message), blck)
pool.updateHead(added)
@ -329,3 +329,41 @@ when const_preset == "minimal": # Too much stack space used on mainnet
hash_tree_root(pool.headState.data.data)
hash_tree_root(pool2.justifiedState.data.data) ==
hash_tree_root(pool.justifiedState.data.data)
timedTest "init with gaps" & preset():
var cache = get_empty_per_epoch_cache()
for i in 0 ..< (SLOTS_PER_EPOCH * 6 - 2):
var
blck = makeTestBlock(
pool.headState.data.data, pool.head.blck.root,
attestations = makeFullAttestations(
pool.headState.data.data, pool.head.blck.root,
pool.headState.data.data.slot, cache, {}))
let added = pool.add(hash_tree_root(blck.message), blck)
pool.updateHead(added)
# Advance past epoch so that the epoch transition is gapped
process_slots(pool.headState.data.data, Slot(SLOTS_PER_EPOCH * 6 + 2) )
var blck = makeTestBlock(
pool.headState.data.data, pool.head.blck.root,
attestations = makeFullAttestations(
pool.headState.data.data, pool.head.blck.root,
pool.headState.data.data.slot, cache, {}))
let added = pool.add(hash_tree_root(blck.message), blck)
pool.updateHead(added)
let
pool2 = BlockPool.init(db)
# check that the state reloaded from database resembles what we had before
check:
pool2.tail.root == pool.tail.root
pool2.head.blck.root == pool.head.blck.root
pool2.finalizedHead.blck.root == pool.finalizedHead.blck.root
pool2.finalizedHead.slot == pool.finalizedHead.slot
hash_tree_root(pool2.headState.data.data) ==
hash_tree_root(pool.headState.data.data)
hash_tree_root(pool2.justifiedState.data.data) ==
hash_tree_root(pool.justifiedState.data.data)

View File

@ -93,7 +93,7 @@ template timedTest*(name, body) =
# TODO noto thread-safe as-is
testTimes.add (f, name)
proc makeTestDB*(tailState: BeaconStateRef, tailBlock: SignedBeaconBlock): BeaconChainDB =
proc makeTestDB*(tailState: BeaconState, tailBlock: SignedBeaconBlock): BeaconChainDB =
result = init(BeaconChainDB, kvStore MemStoreRef.init())
BlockPool.preInit(result, tailState, tailBlock)
@ -104,6 +104,6 @@ proc makeTestDB*(validators: int): BeaconChainDB =
makeInitialDeposits(validators, flags = {skipBlsValidation}),
{skipBlsValidation})
genBlock = get_initial_beacon_block(genState[])
makeTestDB(genState, genBlock)
makeTestDB(genState[], genBlock)
export inMicroseconds