2019-02-21 17:20:50 +00:00
|
|
|
# Nimbus
|
2022-01-04 09:45:38 +00:00
|
|
|
# Copyright (c) 2018-2022 Status Research & Development GmbH
|
2019-02-21 17:20:50 +00:00
|
|
|
# Licensed under either of
|
2019-11-25 15:30:02 +00:00
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0)
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT)
|
2019-02-21 17:20:50 +00:00
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
2019-11-14 10:47:55 +00:00
|
|
|
{.used.}
|
|
|
|
|
2021-04-28 16:41:02 +00:00
|
|
|
import
|
2022-12-14 23:12:29 +00:00
|
|
|
unittest2,
|
2021-08-18 18:57:58 +00:00
|
|
|
../beacon_chain/[beacon_chain_db, interop],
|
2021-08-12 13:08:20 +00:00
|
|
|
../beacon_chain/spec/[beaconstate, forks, state_transition],
|
2022-01-12 14:50:30 +00:00
|
|
|
../beacon_chain/spec/datatypes/[phase0, altair, bellatrix],
|
2021-03-15 14:11:51 +00:00
|
|
|
../beacon_chain/consensus_object_pools/blockchain_dag,
|
Snappy revamp (#3564)
This PR makes the necessary adjustments to deal with the revamped snappy
API.
In practical terms for nimbus-eth2, there are performance increases to
gossip processing, database reading and writing as well as era file
processing. Exporting `.era` files for example, a snappy-heavy
operation, almost halves in total processing time:
Pre:
```
Average, StdDev, Min, Max, Samples, Test
39.088, 8.735, 23.619, 53.301, 50, tState
237.079, 46.692, 165.620, 355.481, 49, tBlocks
```
Post:
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
25.350, 5.303, 15.351, 41.856, 50, tState
141.238, 24.164, 99.990, 199.329, 49, tBlocks
```
2022-04-15 07:44:06 +00:00
|
|
|
eth/db/kvstore,
|
2019-05-27 12:48:13 +00:00
|
|
|
# test utilies
|
2021-04-28 16:41:02 +00:00
|
|
|
./testutil, ./testdbutil, ./testblockutil, ./teststateutil
|
2021-03-15 14:11:51 +00:00
|
|
|
|
2022-12-14 23:12:29 +00:00
|
|
|
from std/algorithm import sort
|
|
|
|
from std/sequtils import toSeq
|
|
|
|
from snappy import encodeFramed, uncompressedLenFramed
|
|
|
|
|
2021-03-15 14:11:51 +00:00
|
|
|
when isMainModule:
|
|
|
|
import chronicles # or some random compile error happens...
|
2019-02-21 17:20:50 +00:00
|
|
|
|
2021-06-24 07:11:47 +00:00
|
|
|
proc getPhase0StateRef(db: BeaconChainDB, root: Eth2Digest):
|
|
|
|
phase0.NilableBeaconStateRef =
|
2020-07-30 19:18:17 +00:00
|
|
|
# load beaconstate the way the block pool does it - into an existing instance
|
2021-06-24 07:11:47 +00:00
|
|
|
let res = (phase0.BeaconStateRef)()
|
2020-04-28 08:08:32 +00:00
|
|
|
if db.getState(root, res[], noRollback):
|
|
|
|
return res
|
|
|
|
|
2021-06-24 07:11:47 +00:00
|
|
|
proc getAltairStateRef(db: BeaconChainDB, root: Eth2Digest):
|
|
|
|
altair.NilableBeaconStateRef =
|
|
|
|
# load beaconstate the way the block pool does it - into an existing instance
|
|
|
|
let res = (altair.BeaconStateRef)()
|
2021-11-05 07:34:34 +00:00
|
|
|
if db.getState(root, res[], noRollback):
|
2021-06-24 07:11:47 +00:00
|
|
|
return res
|
|
|
|
|
2022-02-21 11:55:56 +00:00
|
|
|
proc getBellatrixStateRef(db: BeaconChainDB, root: Eth2Digest):
|
2022-01-12 14:50:30 +00:00
|
|
|
bellatrix.NilableBeaconStateRef =
|
2021-09-30 01:07:24 +00:00
|
|
|
# load beaconstate the way the block pool does it - into an existing instance
|
2022-01-12 14:50:30 +00:00
|
|
|
let res = (bellatrix.BeaconStateRef)()
|
2021-11-05 07:34:34 +00:00
|
|
|
if db.getState(root, res[], noRollback):
|
2021-09-30 01:07:24 +00:00
|
|
|
return res
|
|
|
|
|
2022-11-09 17:32:10 +00:00
|
|
|
from ../beacon_chain/spec/datatypes/capella import
|
|
|
|
BeaconStateRef, NilableBeaconStateRef
|
|
|
|
|
|
|
|
proc getCapellaStateRef(db: BeaconChainDB, root: Eth2Digest):
|
|
|
|
capella.NilableBeaconStateRef =
|
|
|
|
# load beaconstate the way the block pool does it - into an existing instance
|
|
|
|
let res = (capella.BeaconStateRef)()
|
|
|
|
if db.getState(root, res[], noRollback):
|
|
|
|
return res
|
|
|
|
|
2022-12-13 00:56:50 +00:00
|
|
|
from ../beacon_chain/spec/datatypes/eip4844 import TrustedSignedBeaconBlock
|
|
|
|
|
|
|
|
proc getEIP4844StateRef(db: BeaconChainDB, root: Eth2Digest):
|
|
|
|
eip4844.NilableBeaconStateRef =
|
|
|
|
# load beaconstate the way the block pool does it - into an existing instance
|
|
|
|
let res = (eip4844.BeaconStateRef)()
|
|
|
|
if db.getState(root, res[], noRollback):
|
|
|
|
return res
|
|
|
|
|
2021-06-24 07:11:47 +00:00
|
|
|
func withDigest(blck: phase0.TrustedBeaconBlock):
|
|
|
|
phase0.TrustedSignedBeaconBlock =
|
|
|
|
phase0.TrustedSignedBeaconBlock(
|
|
|
|
message: blck,
|
|
|
|
root: hash_tree_root(blck)
|
|
|
|
)
|
|
|
|
|
|
|
|
func withDigest(blck: altair.TrustedBeaconBlock):
|
|
|
|
altair.TrustedSignedBeaconBlock =
|
|
|
|
altair.TrustedSignedBeaconBlock(
|
2020-07-16 13:16:51 +00:00
|
|
|
message: blck,
|
|
|
|
root: hash_tree_root(blck)
|
|
|
|
)
|
|
|
|
|
2022-01-12 14:50:30 +00:00
|
|
|
func withDigest(blck: bellatrix.TrustedBeaconBlock):
|
|
|
|
bellatrix.TrustedSignedBeaconBlock =
|
|
|
|
bellatrix.TrustedSignedBeaconBlock(
|
2021-09-30 01:07:24 +00:00
|
|
|
message: blck,
|
|
|
|
root: hash_tree_root(blck)
|
|
|
|
)
|
|
|
|
|
2022-11-09 17:32:10 +00:00
|
|
|
func withDigest(blck: capella.TrustedBeaconBlock):
|
|
|
|
capella.TrustedSignedBeaconBlock =
|
|
|
|
capella.TrustedSignedBeaconBlock(
|
|
|
|
message: blck,
|
|
|
|
root: hash_tree_root(blck)
|
|
|
|
)
|
|
|
|
|
2022-12-13 00:56:50 +00:00
|
|
|
func withDigest(blck: eip4844.TrustedBeaconBlock):
|
|
|
|
eip4844.TrustedSignedBeaconBlock =
|
|
|
|
eip4844.TrustedSignedBeaconBlock(
|
|
|
|
message: blck,
|
|
|
|
root: hash_tree_root(blck)
|
|
|
|
)
|
|
|
|
|
2021-09-30 01:07:24 +00:00
|
|
|
proc getTestStates(stateFork: BeaconStateFork): auto =
|
2021-09-27 14:22:58 +00:00
|
|
|
let
|
|
|
|
db = makeTestDB(SLOTS_PER_EPOCH)
|
2021-12-20 19:20:31 +00:00
|
|
|
validatorMonitor = newClone(ValidatorMonitor.init())
|
|
|
|
dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {})
|
2022-03-16 07:20:40 +00:00
|
|
|
var testStates = getTestStates(dag.headState, stateFork)
|
2021-09-27 14:22:58 +00:00
|
|
|
|
|
|
|
# Ensure transitions beyond just adding validators and increasing slots
|
|
|
|
sort(testStates) do (x, y: ref ForkedHashedBeaconState) -> int:
|
|
|
|
cmp($getStateRoot(x[]), $getStateRoot(y[]))
|
|
|
|
|
|
|
|
testStates
|
|
|
|
|
2022-12-14 23:12:29 +00:00
|
|
|
# Each set of states gets used twice, so scope them to module
|
2021-09-27 14:22:58 +00:00
|
|
|
let
|
2022-01-04 09:45:38 +00:00
|
|
|
testStatesPhase0 = getTestStates(BeaconStateFork.Phase0)
|
|
|
|
testStatesAltair = getTestStates(BeaconStateFork.Altair)
|
|
|
|
testStatesBellatrix = getTestStates(BeaconStateFork.Bellatrix)
|
2022-12-14 23:12:29 +00:00
|
|
|
testStatesCapella = getTestStates(BeaconStateFork.Capella)
|
|
|
|
testStatesEIP4844 = getTestStates(BeaconStateFork.EIP4844)
|
|
|
|
doAssert len(testStatesPhase0) > 8
|
|
|
|
doAssert len(testStatesAltair) > 8
|
|
|
|
doAssert len(testStatesBellatrix) > 8
|
|
|
|
doAssert len(testStatesCapella) > 8
|
|
|
|
doAssert len(testStatesEIP4844) > 8
|
2021-09-27 14:22:58 +00:00
|
|
|
|
2021-04-28 16:41:02 +00:00
|
|
|
suite "Beacon chain DB" & preset():
|
|
|
|
test "empty database" & preset():
|
2019-03-08 16:40:17 +00:00
|
|
|
var
|
2021-07-13 14:27:10 +00:00
|
|
|
db = BeaconChainDB.new("", inMemory = true)
|
2019-02-21 17:20:50 +00:00
|
|
|
check:
|
2022-06-18 04:57:37 +00:00
|
|
|
db.getPhase0StateRef(ZERO_HASH).isNil
|
|
|
|
db.getBlock(ZERO_HASH, phase0.TrustedSignedBeaconBlock).isNone
|
2019-02-21 17:20:50 +00:00
|
|
|
|
2021-06-24 07:11:47 +00:00
|
|
|
test "sanity check phase 0 blocks" & preset():
|
2022-02-20 20:13:06 +00:00
|
|
|
let db = BeaconChainDB.new("", inMemory = true)
|
2019-03-08 16:40:17 +00:00
|
|
|
|
|
|
|
let
|
2021-06-24 07:11:47 +00:00
|
|
|
signedBlock = withDigest((phase0.TrustedBeaconBlock)())
|
2020-02-29 15:15:44 +00:00
|
|
|
root = hash_tree_root(signedBlock.message)
|
2019-03-08 16:40:17 +00:00
|
|
|
|
2020-02-29 15:15:44 +00:00
|
|
|
db.putBlock(signedBlock)
|
2019-03-08 16:40:17 +00:00
|
|
|
|
2022-03-29 11:33:06 +00:00
|
|
|
var tmp, tmp2: seq[byte]
|
2019-03-08 16:40:17 +00:00
|
|
|
check:
|
|
|
|
db.containsBlock(root)
|
2022-02-21 08:48:02 +00:00
|
|
|
db.containsBlock(root, phase0.TrustedSignedBeaconBlock)
|
|
|
|
not db.containsBlock(root, altair.TrustedSignedBeaconBlock)
|
|
|
|
not db.containsBlock(root, bellatrix.TrustedSignedBeaconBlock)
|
2022-11-09 17:32:10 +00:00
|
|
|
not db.containsBlock(root, capella.TrustedSignedBeaconBlock)
|
2022-12-13 00:56:50 +00:00
|
|
|
not db.containsBlock(root, eip4844.TrustedSignedBeaconBlock)
|
2022-02-21 08:48:02 +00:00
|
|
|
db.getBlock(root, phase0.TrustedSignedBeaconBlock).get() == signedBlock
|
|
|
|
db.getBlockSSZ(root, tmp, phase0.TrustedSignedBeaconBlock)
|
2022-03-29 11:33:06 +00:00
|
|
|
db.getBlockSZ(root, tmp2, phase0.TrustedSignedBeaconBlock)
|
2022-01-07 10:13:19 +00:00
|
|
|
tmp == SSZ.encode(signedBlock)
|
Snappy revamp (#3564)
This PR makes the necessary adjustments to deal with the revamped snappy
API.
In practical terms for nimbus-eth2, there are performance increases to
gossip processing, database reading and writing as well as era file
processing. Exporting `.era` files for example, a snappy-heavy
operation, almost halves in total processing time:
Pre:
```
Average, StdDev, Min, Max, Samples, Test
39.088, 8.735, 23.619, 53.301, 50, tState
237.079, 46.692, 165.620, 355.481, 49, tBlocks
```
Post:
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
25.350, 5.303, 15.351, 41.856, 50, tState
141.238, 24.164, 99.990, 199.329, 49, tBlocks
```
2022-04-15 07:44:06 +00:00
|
|
|
tmp2 == encodeFramed(tmp)
|
2022-05-05 11:00:02 +00:00
|
|
|
uncompressedLenFramed(tmp2).isSome
|
2019-03-08 16:40:17 +00:00
|
|
|
|
2021-06-24 07:11:47 +00:00
|
|
|
check:
|
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
|
|
|
db.delBlock(BeaconBlockFork.Phase0, root)
|
2021-06-24 07:11:47 +00:00
|
|
|
not db.containsBlock(root)
|
2022-02-21 08:48:02 +00:00
|
|
|
not db.containsBlock(root, phase0.TrustedSignedBeaconBlock)
|
|
|
|
not db.containsBlock(root, altair.TrustedSignedBeaconBlock)
|
|
|
|
not db.containsBlock(root, bellatrix.TrustedSignedBeaconBlock)
|
2022-11-09 17:32:10 +00:00
|
|
|
not db.containsBlock(root, capella.TrustedSignedBeaconBlock)
|
2022-12-13 00:56:50 +00:00
|
|
|
not db.containsBlock(root, eip4844.TrustedSignedBeaconBlock)
|
2022-02-21 08:48:02 +00:00
|
|
|
db.getBlock(root, phase0.TrustedSignedBeaconBlock).isErr()
|
|
|
|
not db.getBlockSSZ(root, tmp, phase0.TrustedSignedBeaconBlock)
|
2022-03-29 11:33:06 +00:00
|
|
|
not db.getBlockSZ(root, tmp2, phase0.TrustedSignedBeaconBlock)
|
2021-06-24 07:11:47 +00:00
|
|
|
|
|
|
|
db.putStateRoot(root, signedBlock.message.slot, root)
|
|
|
|
var root2 = root
|
|
|
|
root2.data[0] = root.data[0] + 1
|
|
|
|
db.putStateRoot(root, signedBlock.message.slot + 1, root2)
|
|
|
|
|
|
|
|
check:
|
|
|
|
db.getStateRoot(root, signedBlock.message.slot).get() == root
|
|
|
|
db.getStateRoot(root, signedBlock.message.slot + 1).get() == root2
|
|
|
|
|
|
|
|
db.close()
|
|
|
|
|
|
|
|
test "sanity check Altair blocks" & preset():
|
2022-02-20 20:13:06 +00:00
|
|
|
let db = BeaconChainDB.new("", inMemory = true)
|
2021-06-24 07:11:47 +00:00
|
|
|
|
|
|
|
let
|
|
|
|
signedBlock = withDigest((altair.TrustedBeaconBlock)())
|
|
|
|
root = hash_tree_root(signedBlock.message)
|
|
|
|
|
|
|
|
db.putBlock(signedBlock)
|
|
|
|
|
2022-03-29 11:33:06 +00:00
|
|
|
var tmp, tmp2: seq[byte]
|
2021-06-24 07:11:47 +00:00
|
|
|
check:
|
|
|
|
db.containsBlock(root)
|
2022-02-21 08:48:02 +00:00
|
|
|
not db.containsBlock(root, phase0.TrustedSignedBeaconBlock)
|
|
|
|
db.containsBlock(root, altair.TrustedSignedBeaconBlock)
|
|
|
|
not db.containsBlock(root, bellatrix.TrustedSignedBeaconBlock)
|
2022-11-09 17:32:10 +00:00
|
|
|
not db.containsBlock(root, capella.TrustedSignedBeaconBlock)
|
2022-12-13 00:56:50 +00:00
|
|
|
not db.containsBlock(root, eip4844.TrustedSignedBeaconBlock)
|
2022-02-21 08:48:02 +00:00
|
|
|
db.getBlock(root, altair.TrustedSignedBeaconBlock).get() == signedBlock
|
|
|
|
db.getBlockSSZ(root, tmp, altair.TrustedSignedBeaconBlock)
|
2022-03-29 11:33:06 +00:00
|
|
|
db.getBlockSZ(root, tmp2, altair.TrustedSignedBeaconBlock)
|
2022-01-07 10:13:19 +00:00
|
|
|
tmp == SSZ.encode(signedBlock)
|
Snappy revamp (#3564)
This PR makes the necessary adjustments to deal with the revamped snappy
API.
In practical terms for nimbus-eth2, there are performance increases to
gossip processing, database reading and writing as well as era file
processing. Exporting `.era` files for example, a snappy-heavy
operation, almost halves in total processing time:
Pre:
```
Average, StdDev, Min, Max, Samples, Test
39.088, 8.735, 23.619, 53.301, 50, tState
237.079, 46.692, 165.620, 355.481, 49, tBlocks
```
Post:
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
25.350, 5.303, 15.351, 41.856, 50, tState
141.238, 24.164, 99.990, 199.329, 49, tBlocks
```
2022-04-15 07:44:06 +00:00
|
|
|
tmp2 == encodeFramed(tmp)
|
2022-05-05 11:00:02 +00:00
|
|
|
uncompressedLenFramed(tmp2).isSome
|
2021-06-24 07:11:47 +00:00
|
|
|
|
|
|
|
check:
|
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
|
|
|
db.delBlock(BeaconBlockFork.Altair, root)
|
2021-06-24 07:11:47 +00:00
|
|
|
not db.containsBlock(root)
|
2022-02-21 08:48:02 +00:00
|
|
|
not db.containsBlock(root, phase0.TrustedSignedBeaconBlock)
|
|
|
|
not db.containsBlock(root, altair.TrustedSignedBeaconBlock)
|
|
|
|
not db.containsBlock(root, bellatrix.TrustedSignedBeaconBlock)
|
2022-11-09 17:32:10 +00:00
|
|
|
not db.containsBlock(root, capella.TrustedSignedBeaconBlock)
|
2022-12-13 00:56:50 +00:00
|
|
|
not db.containsBlock(root, eip4844.TrustedSignedBeaconBlock)
|
2022-02-21 08:48:02 +00:00
|
|
|
db.getBlock(root, altair.TrustedSignedBeaconBlock).isErr()
|
|
|
|
not db.getBlockSSZ(root, tmp, altair.TrustedSignedBeaconBlock)
|
2022-03-29 11:33:06 +00:00
|
|
|
not db.getBlockSZ(root, tmp2, altair.TrustedSignedBeaconBlock)
|
2021-06-24 07:11:47 +00:00
|
|
|
|
2020-02-29 15:15:44 +00:00
|
|
|
db.putStateRoot(root, signedBlock.message.slot, root)
|
2020-08-07 20:17:24 +00:00
|
|
|
var root2 = root
|
|
|
|
root2.data[0] = root.data[0] + 1
|
|
|
|
db.putStateRoot(root, signedBlock.message.slot + 1, root2)
|
|
|
|
|
2019-03-28 06:10:48 +00:00
|
|
|
check:
|
2020-02-29 15:15:44 +00:00
|
|
|
db.getStateRoot(root, signedBlock.message.slot).get() == root
|
2020-08-07 20:17:24 +00:00
|
|
|
db.getStateRoot(root, signedBlock.message.slot + 1).get() == root2
|
2019-03-28 06:10:48 +00:00
|
|
|
|
2020-09-12 05:35:58 +00:00
|
|
|
db.close()
|
|
|
|
|
2022-01-26 12:21:29 +00:00
|
|
|
test "sanity check Bellatrix blocks" & preset():
|
2022-02-20 20:13:06 +00:00
|
|
|
let db = BeaconChainDB.new("", inMemory = true)
|
2021-09-30 01:07:24 +00:00
|
|
|
|
|
|
|
let
|
2022-01-12 14:50:30 +00:00
|
|
|
signedBlock = withDigest((bellatrix.TrustedBeaconBlock)())
|
2021-09-30 01:07:24 +00:00
|
|
|
root = hash_tree_root(signedBlock.message)
|
|
|
|
|
|
|
|
db.putBlock(signedBlock)
|
|
|
|
|
2022-03-29 11:33:06 +00:00
|
|
|
var tmp, tmp2: seq[byte]
|
2021-09-30 01:07:24 +00:00
|
|
|
check:
|
|
|
|
db.containsBlock(root)
|
2022-02-21 08:48:02 +00:00
|
|
|
not db.containsBlock(root, phase0.TrustedSignedBeaconBlock)
|
|
|
|
not db.containsBlock(root, altair.TrustedSignedBeaconBlock)
|
|
|
|
db.containsBlock(root, bellatrix.TrustedSignedBeaconBlock)
|
2022-11-09 17:32:10 +00:00
|
|
|
not db.containsBlock(root, capella.TrustedSignedBeaconBlock)
|
2022-12-13 00:56:50 +00:00
|
|
|
not db.containsBlock(root, eip4844.TrustedSignedBeaconBlock)
|
2022-02-21 08:48:02 +00:00
|
|
|
db.getBlock(root, bellatrix.TrustedSignedBeaconBlock).get() == signedBlock
|
|
|
|
db.getBlockSSZ(root, tmp, bellatrix.TrustedSignedBeaconBlock)
|
2022-03-29 11:33:06 +00:00
|
|
|
db.getBlockSZ(root, tmp2, bellatrix.TrustedSignedBeaconBlock)
|
2022-01-07 10:13:19 +00:00
|
|
|
tmp == SSZ.encode(signedBlock)
|
Snappy revamp (#3564)
This PR makes the necessary adjustments to deal with the revamped snappy
API.
In practical terms for nimbus-eth2, there are performance increases to
gossip processing, database reading and writing as well as era file
processing. Exporting `.era` files for example, a snappy-heavy
operation, almost halves in total processing time:
Pre:
```
Average, StdDev, Min, Max, Samples, Test
39.088, 8.735, 23.619, 53.301, 50, tState
237.079, 46.692, 165.620, 355.481, 49, tBlocks
```
Post:
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
25.350, 5.303, 15.351, 41.856, 50, tState
141.238, 24.164, 99.990, 199.329, 49, tBlocks
```
2022-04-15 07:44:06 +00:00
|
|
|
tmp2 == encodeFramed(tmp)
|
2022-05-05 11:00:02 +00:00
|
|
|
uncompressedLenFramed(tmp2).isSome
|
2021-09-30 01:07:24 +00:00
|
|
|
|
|
|
|
check:
|
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
|
|
|
db.delBlock(BeaconBlockFork.Bellatrix, root)
|
2021-09-30 01:07:24 +00:00
|
|
|
not db.containsBlock(root)
|
2022-02-21 08:48:02 +00:00
|
|
|
not db.containsBlock(root, phase0.TrustedSignedBeaconBlock)
|
|
|
|
not db.containsBlock(root, altair.TrustedSignedBeaconBlock)
|
|
|
|
not db.containsBlock(root, bellatrix.TrustedSignedBeaconBlock)
|
2022-11-09 17:32:10 +00:00
|
|
|
not db.containsBlock(root, capella.TrustedSignedBeaconBlock)
|
2022-12-13 00:56:50 +00:00
|
|
|
not db.containsBlock(root, eip4844.TrustedSignedBeaconBlock)
|
2022-02-21 08:48:02 +00:00
|
|
|
db.getBlock(root, bellatrix.TrustedSignedBeaconBlock).isErr()
|
|
|
|
not db.getBlockSSZ(root, tmp, bellatrix.TrustedSignedBeaconBlock)
|
2022-03-29 11:33:06 +00:00
|
|
|
not db.getBlockSZ(root, tmp2, bellatrix.TrustedSignedBeaconBlock)
|
2021-09-30 01:07:24 +00:00
|
|
|
|
|
|
|
db.putStateRoot(root, signedBlock.message.slot, root)
|
|
|
|
var root2 = root
|
|
|
|
root2.data[0] = root.data[0] + 1
|
|
|
|
db.putStateRoot(root, signedBlock.message.slot + 1, root2)
|
|
|
|
|
|
|
|
check:
|
|
|
|
db.getStateRoot(root, signedBlock.message.slot).get() == root
|
|
|
|
db.getStateRoot(root, signedBlock.message.slot + 1).get() == root2
|
|
|
|
|
|
|
|
db.close()
|
|
|
|
|
2022-11-09 17:32:10 +00:00
|
|
|
test "sanity check Capella blocks" & preset():
|
|
|
|
let db = BeaconChainDB.new("", inMemory = true)
|
|
|
|
|
|
|
|
let
|
|
|
|
signedBlock = withDigest((capella.TrustedBeaconBlock)())
|
|
|
|
root = hash_tree_root(signedBlock.message)
|
|
|
|
|
|
|
|
db.putBlock(signedBlock)
|
|
|
|
|
|
|
|
var tmp, tmp2: seq[byte]
|
|
|
|
check:
|
|
|
|
db.containsBlock(root)
|
|
|
|
not db.containsBlock(root, phase0.TrustedSignedBeaconBlock)
|
|
|
|
not db.containsBlock(root, altair.TrustedSignedBeaconBlock)
|
|
|
|
not db.containsBlock(root, bellatrix.TrustedSignedBeaconBlock)
|
2022-12-13 00:56:50 +00:00
|
|
|
not db.containsBlock(root, eip4844.TrustedSignedBeaconBlock)
|
2022-11-09 17:32:10 +00:00
|
|
|
db.containsBlock(root, capella.TrustedSignedBeaconBlock)
|
|
|
|
db.getBlock(root, capella.TrustedSignedBeaconBlock).get() == signedBlock
|
|
|
|
db.getBlockSSZ(root, tmp, capella.TrustedSignedBeaconBlock)
|
|
|
|
db.getBlockSZ(root, tmp2, capella.TrustedSignedBeaconBlock)
|
|
|
|
tmp == SSZ.encode(signedBlock)
|
|
|
|
tmp2 == encodeFramed(tmp)
|
|
|
|
uncompressedLenFramed(tmp2).isSome
|
|
|
|
|
|
|
|
check:
|
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
|
|
|
db.delBlock(BeaconBlockFork.Capella, root)
|
2022-11-09 17:32:10 +00:00
|
|
|
not db.containsBlock(root)
|
|
|
|
not db.containsBlock(root, phase0.TrustedSignedBeaconBlock)
|
|
|
|
not db.containsBlock(root, altair.TrustedSignedBeaconBlock)
|
|
|
|
not db.containsBlock(root, bellatrix.TrustedSignedBeaconBlock)
|
|
|
|
not db.containsBlock(root, capella.TrustedSignedBeaconBlock)
|
2022-12-13 00:56:50 +00:00
|
|
|
not db.containsBlock(root, eip4844.TrustedSignedBeaconBlock)
|
2022-11-09 17:32:10 +00:00
|
|
|
db.getBlock(root, capella.TrustedSignedBeaconBlock).isErr()
|
|
|
|
not db.getBlockSSZ(root, tmp, capella.TrustedSignedBeaconBlock)
|
|
|
|
not db.getBlockSZ(root, tmp2, capella.TrustedSignedBeaconBlock)
|
|
|
|
|
|
|
|
db.putStateRoot(root, signedBlock.message.slot, root)
|
|
|
|
var root2 = root
|
|
|
|
root2.data[0] = root.data[0] + 1
|
|
|
|
db.putStateRoot(root, signedBlock.message.slot + 1, root2)
|
|
|
|
|
|
|
|
check:
|
|
|
|
db.getStateRoot(root, signedBlock.message.slot).get() == root
|
|
|
|
db.getStateRoot(root, signedBlock.message.slot + 1).get() == root2
|
|
|
|
|
|
|
|
db.close()
|
|
|
|
|
2022-12-13 00:56:50 +00:00
|
|
|
test "sanity check EIP4844 blocks" & preset():
|
|
|
|
let db = BeaconChainDB.new("", inMemory = true)
|
|
|
|
|
|
|
|
let
|
|
|
|
signedBlock = withDigest((eip4844.TrustedBeaconBlock)())
|
|
|
|
root = hash_tree_root(signedBlock.message)
|
|
|
|
|
|
|
|
db.putBlock(signedBlock)
|
|
|
|
|
|
|
|
var tmp, tmp2: seq[byte]
|
|
|
|
check:
|
|
|
|
db.containsBlock(root)
|
|
|
|
not db.containsBlock(root, phase0.TrustedSignedBeaconBlock)
|
|
|
|
not db.containsBlock(root, altair.TrustedSignedBeaconBlock)
|
|
|
|
not db.containsBlock(root, bellatrix.TrustedSignedBeaconBlock)
|
|
|
|
not db.containsBlock(root, capella.TrustedSignedBeaconBlock)
|
|
|
|
db.containsBlock(root, eip4844.TrustedSignedBeaconBlock)
|
|
|
|
db.getBlock(root, eip4844.TrustedSignedBeaconBlock).get() == signedBlock
|
|
|
|
db.getBlockSSZ(root, tmp, eip4844.TrustedSignedBeaconBlock)
|
|
|
|
db.getBlockSZ(root, tmp2, eip4844.TrustedSignedBeaconBlock)
|
|
|
|
tmp == SSZ.encode(signedBlock)
|
|
|
|
tmp2 == encodeFramed(tmp)
|
|
|
|
uncompressedLenFramed(tmp2).isSome
|
|
|
|
|
|
|
|
check:
|
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
|
|
|
db.delBlock(BeaconBlockFork.EIP4844, root)
|
2022-12-13 00:56:50 +00:00
|
|
|
not db.containsBlock(root)
|
|
|
|
not db.containsBlock(root, phase0.TrustedSignedBeaconBlock)
|
|
|
|
not db.containsBlock(root, altair.TrustedSignedBeaconBlock)
|
|
|
|
not db.containsBlock(root, bellatrix.TrustedSignedBeaconBlock)
|
|
|
|
not db.containsBlock(root, capella.TrustedSignedBeaconBlock)
|
|
|
|
not db.containsBlock(root, eip4844.TrustedSignedBeaconBlock)
|
|
|
|
db.getBlock(root, eip4844.TrustedSignedBeaconBlock).isErr()
|
|
|
|
not db.getBlockSSZ(root, tmp, eip4844.TrustedSignedBeaconBlock)
|
|
|
|
not db.getBlockSZ(root, tmp2, eip4844.TrustedSignedBeaconBlock)
|
|
|
|
|
|
|
|
db.putStateRoot(root, signedBlock.message.slot, root)
|
|
|
|
var root2 = root
|
|
|
|
root2.data[0] = root.data[0] + 1
|
|
|
|
db.putStateRoot(root, signedBlock.message.slot + 1, root2)
|
|
|
|
|
|
|
|
check:
|
|
|
|
db.getStateRoot(root, signedBlock.message.slot).get() == root
|
|
|
|
db.getStateRoot(root, signedBlock.message.slot + 1).get() == root2
|
|
|
|
|
|
|
|
db.close()
|
|
|
|
|
2021-06-24 07:11:47 +00:00
|
|
|
test "sanity check phase 0 states" & preset():
|
2022-02-20 20:13:06 +00:00
|
|
|
let db = makeTestDB(SLOTS_PER_EPOCH)
|
2019-03-08 16:40:17 +00:00
|
|
|
|
2021-09-27 14:22:58 +00:00
|
|
|
for state in testStatesPhase0:
|
2021-11-18 12:02:43 +00:00
|
|
|
let root = state[].phase0Data.root
|
|
|
|
db.putState(root, state[].phase0Data.data)
|
2019-03-08 16:40:17 +00:00
|
|
|
|
2021-03-15 14:11:51 +00:00
|
|
|
check:
|
|
|
|
db.containsState(root)
|
2021-06-24 07:11:47 +00:00
|
|
|
hash_tree_root(db.getPhase0StateRef(root)[]) == root
|
2021-03-15 14:11:51 +00:00
|
|
|
|
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
|
|
|
db.delState(BeaconStateFork.Phase0, root)
|
2021-06-24 07:11:47 +00:00
|
|
|
check:
|
|
|
|
not db.containsState(root)
|
|
|
|
db.getPhase0StateRef(root).isNil
|
2021-03-15 14:11:51 +00:00
|
|
|
|
|
|
|
db.close()
|
|
|
|
|
2021-06-24 07:11:47 +00:00
|
|
|
test "sanity check Altair states" & preset():
|
2022-02-20 20:13:06 +00:00
|
|
|
let db = makeTestDB(SLOTS_PER_EPOCH)
|
2021-06-24 07:11:47 +00:00
|
|
|
|
2021-09-27 14:22:58 +00:00
|
|
|
for state in testStatesAltair:
|
2021-11-18 12:02:43 +00:00
|
|
|
let root = state[].altairData.root
|
|
|
|
db.putState(root, state[].altairData.data)
|
2021-06-24 07:11:47 +00:00
|
|
|
|
|
|
|
check:
|
|
|
|
db.containsState(root)
|
|
|
|
hash_tree_root(db.getAltairStateRef(root)[]) == root
|
|
|
|
|
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
|
|
|
db.delState(BeaconStateFork.Altair, root)
|
2021-06-24 07:11:47 +00:00
|
|
|
check:
|
|
|
|
not db.containsState(root)
|
|
|
|
db.getAltairStateRef(root).isNil
|
|
|
|
|
|
|
|
db.close()
|
|
|
|
|
2022-01-26 12:21:29 +00:00
|
|
|
test "sanity check Bellatrix states" & preset():
|
2022-02-20 20:13:06 +00:00
|
|
|
let db = makeTestDB(SLOTS_PER_EPOCH)
|
2021-09-30 01:07:24 +00:00
|
|
|
|
2022-01-04 09:45:38 +00:00
|
|
|
for state in testStatesBellatrix:
|
2022-01-24 16:23:13 +00:00
|
|
|
let root = state[].bellatrixData.root
|
|
|
|
db.putState(root, state[].bellatrixData.data)
|
2021-09-30 01:07:24 +00:00
|
|
|
|
|
|
|
check:
|
|
|
|
db.containsState(root)
|
2022-02-21 11:55:56 +00:00
|
|
|
hash_tree_root(db.getBellatrixStateRef(root)[]) == root
|
2021-09-30 01:07:24 +00:00
|
|
|
|
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
|
|
|
db.delState(BeaconStateFork.Bellatrix, root)
|
2021-09-30 01:07:24 +00:00
|
|
|
check:
|
|
|
|
not db.containsState(root)
|
2022-02-21 11:55:56 +00:00
|
|
|
db.getBellatrixStateRef(root).isNil
|
2021-09-30 01:07:24 +00:00
|
|
|
|
|
|
|
db.close()
|
|
|
|
|
2022-12-14 23:12:29 +00:00
|
|
|
test "sanity check Capella states" & preset():
|
|
|
|
let db = makeTestDB(SLOTS_PER_EPOCH)
|
|
|
|
|
|
|
|
for state in testStatesCapella:
|
|
|
|
let root = state[].capellaData.root
|
|
|
|
db.putState(root, state[].capellaData.data)
|
|
|
|
|
|
|
|
check:
|
|
|
|
db.containsState(root)
|
|
|
|
hash_tree_root(db.getCapellaStateRef(root)[]) == root
|
|
|
|
|
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
|
|
|
db.delState(BeaconStateFork.Capella, root)
|
2022-12-14 23:12:29 +00:00
|
|
|
check:
|
|
|
|
not db.containsState(root)
|
|
|
|
db.getCapellaStateRef(root).isNil
|
|
|
|
|
|
|
|
db.close()
|
|
|
|
|
|
|
|
test "sanity check EIP4844 states" & preset():
|
|
|
|
let db = makeTestDB(SLOTS_PER_EPOCH)
|
|
|
|
|
|
|
|
for state in testStatesEIP4844:
|
|
|
|
let root = state[].eip4844Data.root
|
|
|
|
db.putState(root, state[].eip4844Data.data)
|
|
|
|
|
|
|
|
check:
|
|
|
|
db.containsState(root)
|
|
|
|
hash_tree_root(db.getEIP4844StateRef(root)[]) == root
|
|
|
|
|
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
|
|
|
db.delState(BeaconStateFork.EIP4844, root)
|
2022-12-14 23:12:29 +00:00
|
|
|
check:
|
|
|
|
not db.containsState(root)
|
|
|
|
db.getEIP4844StateRef(root).isNil
|
|
|
|
|
|
|
|
db.close()
|
|
|
|
|
2021-06-24 07:11:47 +00:00
|
|
|
test "sanity check phase 0 states, reusing buffers" & preset():
|
2022-02-20 20:13:06 +00:00
|
|
|
let db = makeTestDB(SLOTS_PER_EPOCH)
|
2021-06-24 07:11:47 +00:00
|
|
|
let stateBuffer = (phase0.BeaconStateRef)()
|
2021-03-15 14:11:51 +00:00
|
|
|
|
2021-09-27 14:22:58 +00:00
|
|
|
for state in testStatesPhase0:
|
2021-11-18 12:02:43 +00:00
|
|
|
let root = state[].phase0Data.root
|
|
|
|
db.putState(root, state[].phase0Data.data)
|
2021-03-15 14:11:51 +00:00
|
|
|
|
|
|
|
check:
|
|
|
|
db.getState(root, stateBuffer[], noRollback)
|
|
|
|
db.containsState(root)
|
|
|
|
hash_tree_root(stateBuffer[]) == root
|
|
|
|
|
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
|
|
|
db.delState(BeaconStateFork.Phase0, root)
|
2021-06-24 07:11:47 +00:00
|
|
|
check:
|
|
|
|
not db.containsState(root)
|
|
|
|
not db.getState(root, stateBuffer[], noRollback)
|
|
|
|
|
|
|
|
db.close()
|
|
|
|
|
|
|
|
test "sanity check Altair states, reusing buffers" & preset():
|
2022-02-20 20:13:06 +00:00
|
|
|
let db = makeTestDB(SLOTS_PER_EPOCH)
|
2021-06-24 07:11:47 +00:00
|
|
|
let stateBuffer = (altair.BeaconStateRef)()
|
|
|
|
|
2021-09-27 14:22:58 +00:00
|
|
|
for state in testStatesAltair:
|
2021-11-18 12:02:43 +00:00
|
|
|
let root = state[].altairData.root
|
|
|
|
db.putState(root, state[].altairData.data)
|
2021-06-24 07:11:47 +00:00
|
|
|
|
|
|
|
check:
|
2021-11-05 07:34:34 +00:00
|
|
|
db.getState(root, stateBuffer[], noRollback)
|
2021-06-24 07:11:47 +00:00
|
|
|
db.containsState(root)
|
|
|
|
hash_tree_root(stateBuffer[]) == root
|
|
|
|
|
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
|
|
|
db.delState(BeaconStateFork.Altair, root)
|
2021-06-24 07:11:47 +00:00
|
|
|
check:
|
|
|
|
not db.containsState(root)
|
2021-11-05 07:34:34 +00:00
|
|
|
not db.getState(root, stateBuffer[], noRollback)
|
2021-03-15 14:11:51 +00:00
|
|
|
|
|
|
|
db.close()
|
|
|
|
|
2022-01-26 12:21:29 +00:00
|
|
|
test "sanity check Bellatrix states, reusing buffers" & preset():
|
2022-02-20 20:13:06 +00:00
|
|
|
let db = makeTestDB(SLOTS_PER_EPOCH)
|
2022-01-12 14:50:30 +00:00
|
|
|
let stateBuffer = (bellatrix.BeaconStateRef)()
|
2021-09-30 01:07:24 +00:00
|
|
|
|
2022-01-04 09:45:38 +00:00
|
|
|
for state in testStatesBellatrix:
|
2022-01-24 16:23:13 +00:00
|
|
|
let root = state[].bellatrixData.root
|
|
|
|
db.putState(root, state[].bellatrixData.data)
|
2021-09-30 01:07:24 +00:00
|
|
|
|
|
|
|
check:
|
2021-11-05 07:34:34 +00:00
|
|
|
db.getState(root, stateBuffer[], noRollback)
|
2021-09-30 01:07:24 +00:00
|
|
|
db.containsState(root)
|
|
|
|
hash_tree_root(stateBuffer[]) == root
|
|
|
|
|
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
|
|
|
db.delState(BeaconStateFork.Bellatrix, root)
|
2021-09-30 01:07:24 +00:00
|
|
|
check:
|
|
|
|
not db.containsState(root)
|
2021-11-05 07:34:34 +00:00
|
|
|
not db.getState(root, stateBuffer[], noRollback)
|
2021-09-30 01:07:24 +00:00
|
|
|
|
|
|
|
db.close()
|
|
|
|
|
2022-12-14 23:12:29 +00:00
|
|
|
test "sanity check Capella states, reusing buffers" & preset():
|
|
|
|
let db = makeTestDB(SLOTS_PER_EPOCH)
|
|
|
|
let stateBuffer = (capella.BeaconStateRef)()
|
|
|
|
|
|
|
|
for state in testStatesCapella:
|
|
|
|
let root = state[].capellaData.root
|
|
|
|
db.putState(root, state[].capellaData.data)
|
|
|
|
|
|
|
|
check:
|
|
|
|
db.getState(root, stateBuffer[], noRollback)
|
|
|
|
db.containsState(root)
|
|
|
|
hash_tree_root(stateBuffer[]) == root
|
|
|
|
|
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
|
|
|
db.delState(BeaconStateFork.Capella, root)
|
2022-12-14 23:12:29 +00:00
|
|
|
check:
|
|
|
|
not db.containsState(root)
|
|
|
|
not db.getState(root, stateBuffer[], noRollback)
|
|
|
|
|
|
|
|
db.close()
|
|
|
|
|
|
|
|
test "sanity check EIP4844 states, reusing buffers" & preset():
|
|
|
|
let db = makeTestDB(SLOTS_PER_EPOCH)
|
|
|
|
let stateBuffer = (eip4844.BeaconStateRef)()
|
|
|
|
|
|
|
|
for state in testStatesEIP4844:
|
|
|
|
let root = state[].eip4844Data.root
|
|
|
|
db.putState(root, state[].eip4844Data.data)
|
|
|
|
|
|
|
|
check:
|
|
|
|
db.getState(root, stateBuffer[], noRollback)
|
|
|
|
db.containsState(root)
|
|
|
|
hash_tree_root(stateBuffer[]) == root
|
|
|
|
|
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
|
|
|
db.delState(BeaconStateFork.EIP4844, root)
|
2022-12-14 23:12:29 +00:00
|
|
|
check:
|
|
|
|
not db.containsState(root)
|
|
|
|
not db.getState(root, stateBuffer[], noRollback)
|
|
|
|
|
|
|
|
db.close()
|
|
|
|
|
2021-06-29 15:09:29 +00:00
|
|
|
test "sanity check phase 0 getState rollback" & preset():
|
|
|
|
var
|
|
|
|
db = makeTestDB(SLOTS_PER_EPOCH)
|
2021-12-20 19:20:31 +00:00
|
|
|
validatorMonitor = newClone(ValidatorMonitor.init())
|
|
|
|
dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {})
|
2021-06-29 15:09:29 +00:00
|
|
|
state = (ref ForkedHashedBeaconState)(
|
2021-10-18 16:37:27 +00:00
|
|
|
kind: BeaconStateFork.Phase0,
|
|
|
|
phase0Data: phase0.HashedBeaconState(data: phase0.BeaconState(
|
2021-06-29 15:09:29 +00:00
|
|
|
slot: 10.Slot)))
|
|
|
|
root = Eth2Digest()
|
|
|
|
|
2022-02-21 11:55:56 +00:00
|
|
|
db.putCorruptState(BeaconStateFork.Phase0, root)
|
2021-06-29 15:09:29 +00:00
|
|
|
|
|
|
|
let restoreAddr = addr dag.headState
|
|
|
|
|
|
|
|
func restore() =
|
2022-03-16 07:20:40 +00:00
|
|
|
assign(state[], restoreAddr[])
|
2021-06-29 15:09:29 +00:00
|
|
|
|
|
|
|
check:
|
2021-10-18 16:37:27 +00:00
|
|
|
state[].phase0Data.data.slot == 10.Slot
|
|
|
|
not db.getState(root, state[].phase0Data.data, restore)
|
|
|
|
state[].phase0Data.data.slot != 10.Slot
|
2021-06-29 15:09:29 +00:00
|
|
|
|
|
|
|
test "sanity check Altair and cross-fork getState rollback" & preset():
|
|
|
|
var
|
|
|
|
db = makeTestDB(SLOTS_PER_EPOCH)
|
2021-12-20 19:20:31 +00:00
|
|
|
validatorMonitor = newClone(ValidatorMonitor.init())
|
|
|
|
dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {})
|
2021-06-29 15:09:29 +00:00
|
|
|
state = (ref ForkedHashedBeaconState)(
|
2021-10-18 16:37:27 +00:00
|
|
|
kind: BeaconStateFork.Altair,
|
|
|
|
altairData: altair.HashedBeaconState(data: altair.BeaconState(
|
2021-06-29 15:09:29 +00:00
|
|
|
slot: 10.Slot)))
|
|
|
|
root = Eth2Digest()
|
|
|
|
|
2022-02-21 11:55:56 +00:00
|
|
|
db.putCorruptState(BeaconStateFork.Altair, root)
|
2021-06-29 15:09:29 +00:00
|
|
|
|
|
|
|
let restoreAddr = addr dag.headState
|
|
|
|
|
|
|
|
func restore() =
|
2022-03-16 07:20:40 +00:00
|
|
|
assign(state[], restoreAddr[])
|
2021-06-29 15:09:29 +00:00
|
|
|
|
|
|
|
check:
|
2021-10-18 16:37:27 +00:00
|
|
|
state[].altairData.data.slot == 10.Slot
|
2021-11-05 07:34:34 +00:00
|
|
|
not db.getState(root, state[].altairData.data, restore)
|
2021-06-29 15:09:29 +00:00
|
|
|
|
|
|
|
# assign() has switched the case object fork
|
2021-10-18 16:37:27 +00:00
|
|
|
state[].kind == BeaconStateFork.Phase0
|
|
|
|
state[].phase0Data.data.slot != 10.Slot
|
2021-06-29 15:09:29 +00:00
|
|
|
|
2022-01-26 12:21:29 +00:00
|
|
|
test "sanity check Bellatrix and cross-fork getState rollback" & preset():
|
2021-09-30 01:07:24 +00:00
|
|
|
var
|
|
|
|
db = makeTestDB(SLOTS_PER_EPOCH)
|
2021-12-20 19:20:31 +00:00
|
|
|
validatorMonitor = newClone(ValidatorMonitor.init())
|
|
|
|
dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {})
|
2021-09-30 01:07:24 +00:00
|
|
|
state = (ref ForkedHashedBeaconState)(
|
2022-01-04 09:45:38 +00:00
|
|
|
kind: BeaconStateFork.Bellatrix,
|
2022-01-24 16:23:13 +00:00
|
|
|
bellatrixData: bellatrix.HashedBeaconState(data: bellatrix.BeaconState(
|
2021-09-30 01:07:24 +00:00
|
|
|
slot: 10.Slot)))
|
|
|
|
root = Eth2Digest()
|
|
|
|
|
2022-02-21 11:55:56 +00:00
|
|
|
db.putCorruptState(BeaconStateFork.Bellatrix, root)
|
2021-09-30 01:07:24 +00:00
|
|
|
|
|
|
|
let restoreAddr = addr dag.headState
|
|
|
|
|
|
|
|
func restore() =
|
2022-03-16 07:20:40 +00:00
|
|
|
assign(state[], restoreAddr[])
|
2021-09-30 01:07:24 +00:00
|
|
|
|
|
|
|
check:
|
2022-01-24 16:23:13 +00:00
|
|
|
state[].bellatrixData.data.slot == 10.Slot
|
|
|
|
not db.getState(root, state[].bellatrixData.data, restore)
|
2021-09-30 01:07:24 +00:00
|
|
|
|
|
|
|
# assign() has switched the case object fork
|
2021-10-18 16:37:27 +00:00
|
|
|
state[].kind == BeaconStateFork.Phase0
|
|
|
|
state[].phase0Data.data.slot != 10.Slot
|
2022-12-14 23:12:29 +00:00
|
|
|
|
|
|
|
test "sanity check Capella and cross-fork getState rollback" & preset():
|
|
|
|
var
|
|
|
|
db = makeTestDB(SLOTS_PER_EPOCH)
|
|
|
|
validatorMonitor = newClone(ValidatorMonitor.init())
|
|
|
|
dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {})
|
|
|
|
state = (ref ForkedHashedBeaconState)(
|
|
|
|
kind: BeaconStateFork.Capella,
|
|
|
|
capellaData: capella.HashedBeaconState(data: capella.BeaconState(
|
|
|
|
slot: 10.Slot)))
|
|
|
|
root = Eth2Digest()
|
|
|
|
|
|
|
|
db.putCorruptState(BeaconStateFork.Capella, root)
|
|
|
|
|
|
|
|
let restoreAddr = addr dag.headState
|
|
|
|
|
|
|
|
func restore() =
|
|
|
|
assign(state[], restoreAddr[])
|
|
|
|
|
|
|
|
check:
|
|
|
|
state[].capellaData.data.slot == 10.Slot
|
|
|
|
not db.getState(root, state[].capellaData.data, restore)
|
|
|
|
|
|
|
|
# assign() has switched the case object fork
|
|
|
|
state[].kind == BeaconStateFork.Phase0
|
|
|
|
state[].phase0Data.data.slot != 10.Slot
|
|
|
|
|
|
|
|
test "sanity check EIP4844 and cross-fork getState rollback" & preset():
|
|
|
|
var
|
|
|
|
db = makeTestDB(SLOTS_PER_EPOCH)
|
|
|
|
validatorMonitor = newClone(ValidatorMonitor.init())
|
|
|
|
dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {})
|
|
|
|
state = (ref ForkedHashedBeaconState)(
|
|
|
|
kind: BeaconStateFork.EIP4844,
|
|
|
|
eip4844Data: eip4844.HashedBeaconState(data: eip4844.BeaconState(
|
|
|
|
slot: 10.Slot)))
|
|
|
|
root = Eth2Digest()
|
|
|
|
|
|
|
|
db.putCorruptState(BeaconStateFork.EIP4844, root)
|
|
|
|
|
|
|
|
let restoreAddr = addr dag.headState
|
|
|
|
|
|
|
|
func restore() =
|
|
|
|
assign(state[], restoreAddr[])
|
|
|
|
|
|
|
|
check:
|
|
|
|
state[].eip4844Data.data.slot == 10.Slot
|
|
|
|
not db.getState(root, state[].eip4844Data.data, restore)
|
|
|
|
|
|
|
|
# assign() has switched the case object fork
|
|
|
|
state[].kind == BeaconStateFork.Phase0
|
|
|
|
state[].phase0Data.data.slot != 10.Slot
|
2021-09-30 01:07:24 +00:00
|
|
|
|
2021-04-28 16:41:02 +00:00
|
|
|
test "find ancestors" & preset():
|
2019-03-08 16:40:17 +00:00
|
|
|
var
|
2021-07-13 14:27:10 +00:00
|
|
|
db = BeaconChainDB.new("", inMemory = true)
|
2019-02-21 17:20:50 +00:00
|
|
|
|
|
|
|
let
|
2020-07-16 13:16:51 +00:00
|
|
|
a0 = withDigest(
|
2021-06-24 07:11:47 +00:00
|
|
|
(phase0.TrustedBeaconBlock)(slot: GENESIS_SLOT + 0))
|
2020-07-16 13:16:51 +00:00
|
|
|
a1 = withDigest(
|
2021-06-24 07:11:47 +00:00
|
|
|
(phase0.TrustedBeaconBlock)(slot: GENESIS_SLOT + 1, parent_root: a0.root))
|
2020-07-16 13:16:51 +00:00
|
|
|
a2 = withDigest(
|
2021-06-24 07:11:47 +00:00
|
|
|
(phase0.TrustedBeaconBlock)(slot: GENESIS_SLOT + 2, parent_root: a1.root))
|
2019-02-21 17:20:50 +00:00
|
|
|
|
2020-11-03 22:30:43 +00:00
|
|
|
doAssert toSeq(db.getAncestorSummaries(a0.root)).len == 0
|
|
|
|
doAssert toSeq(db.getAncestorSummaries(a2.root)).len == 0
|
2022-01-30 16:51:04 +00:00
|
|
|
doAssert db.getBeaconBlockSummary(a2.root).isNone()
|
2020-11-03 22:30:43 +00:00
|
|
|
|
2019-02-21 21:38:26 +00:00
|
|
|
db.putBlock(a2)
|
2019-02-21 17:20:50 +00:00
|
|
|
|
2020-11-03 22:30:43 +00:00
|
|
|
doAssert toSeq(db.getAncestorSummaries(a0.root)).len == 0
|
|
|
|
doAssert toSeq(db.getAncestorSummaries(a2.root)).len == 1
|
2022-01-30 16:51:04 +00:00
|
|
|
doAssert db.getBeaconBlockSummary(a2.root).get().slot == a2.message.slot
|
2020-11-03 22:30:43 +00:00
|
|
|
|
2019-02-21 21:38:26 +00:00
|
|
|
db.putBlock(a1)
|
2019-02-21 17:20:50 +00:00
|
|
|
|
2020-11-03 22:30:43 +00:00
|
|
|
doAssert toSeq(db.getAncestorSummaries(a0.root)).len == 0
|
|
|
|
doAssert toSeq(db.getAncestorSummaries(a2.root)).len == 2
|
|
|
|
|
2019-02-21 21:38:26 +00:00
|
|
|
db.putBlock(a0)
|
2019-02-21 17:20:50 +00:00
|
|
|
|
2020-11-03 22:30:43 +00:00
|
|
|
doAssert toSeq(db.getAncestorSummaries(a0.root)).len == 1
|
|
|
|
doAssert toSeq(db.getAncestorSummaries(a2.root)).len == 3
|
|
|
|
|
2021-04-28 16:41:02 +00:00
|
|
|
test "sanity check genesis roundtrip" & preset():
|
2019-09-05 14:27:28 +00:00
|
|
|
# This is a really dumb way of checking that we can roundtrip a genesis
|
|
|
|
# state. We've been bit by this because we've had a bug in the BLS
|
|
|
|
# serialization where an all-zero default-initialized bls signature could
|
|
|
|
# not be deserialized because the deserialization was too strict.
|
|
|
|
var
|
2021-07-13 14:27:10 +00:00
|
|
|
db = BeaconChainDB.new("", inMemory = true)
|
2019-09-05 14:27:28 +00:00
|
|
|
|
|
|
|
let
|
2021-11-18 12:02:43 +00:00
|
|
|
state = newClone(initialize_hashed_beacon_state_from_eth1(
|
Implement split preset/config support (#2710)
* Implement split preset/config support
This is the initial bulk refactor to introduce runtime config values in
a number of places, somewhat replacing the existing mechanism of loading
network metadata.
It still needs more work, this is the initial refactor that introduces
runtime configuration in some of the places that need it.
The PR changes the way presets and constants work, to match the spec. In
particular, a "preset" now refers to the compile-time configuration
while a "cfg" or "RuntimeConfig" is the dynamic part.
A single binary can support either mainnet or minimal, but not both.
Support for other presets has been removed completely (can be readded,
in case there's need).
There's a number of outstanding tasks:
* `SECONDS_PER_SLOT` still needs fixing
* loading custom runtime configs needs redoing
* checking constants against YAML file
* yeerongpilly support
`build/nimbus_beacon_node --network=yeerongpilly --discv5:no --log-level=DEBUG`
* load fork epoch from config
* fix fork digest sent in status
* nicer error string for request failures
* fix tools
* one more
* fixup
* fixup
* fixup
* use "standard" network definition folder in local testnet
Files are loaded from their standard locations, including genesis etc,
to conform to the format used in the `eth2-networks` repo.
* fix launch scripts, allow unknown config values
* fix base config of rest test
* cleanups
* bundle mainnet config using common loader
* fix spec links and names
* only include supported preset in binary
* drop yeerongpilly, add altair-devnet-0, support boot_enr.yaml
2021-07-12 13:01:38 +00:00
|
|
|
defaultRuntimeConfig, eth1BlockHash, 0,
|
2021-11-18 12:02:43 +00:00
|
|
|
makeInitialDeposits(SLOTS_PER_EPOCH), {skipBlsValidation}))
|
2019-09-05 14:27:28 +00:00
|
|
|
|
2021-11-18 12:02:43 +00:00
|
|
|
db.putState(state[].root, state[].data)
|
2019-09-05 14:27:28 +00:00
|
|
|
|
2021-11-18 12:02:43 +00:00
|
|
|
check db.containsState(state[].root)
|
|
|
|
let state2 = db.getPhase0StateRef(state[].root)
|
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
|
|
|
db.delState(BeaconStateFork.Phase0, state[].root)
|
2021-11-18 12:02:43 +00:00
|
|
|
check not db.containsState(state[].root)
|
2021-01-18 20:34:41 +00:00
|
|
|
db.close()
|
|
|
|
|
|
|
|
check:
|
2021-11-18 12:02:43 +00:00
|
|
|
hash_tree_root(state2[]) == state[].root
|
2021-01-18 20:34:41 +00:00
|
|
|
|
2021-04-28 16:41:02 +00:00
|
|
|
test "sanity check state diff roundtrip" & preset():
|
2021-01-18 20:34:41 +00:00
|
|
|
var
|
2021-07-13 14:27:10 +00:00
|
|
|
db = BeaconChainDB.new("", inMemory = true)
|
2021-01-18 20:34:41 +00:00
|
|
|
|
|
|
|
# TODO htr(diff) probably not interesting/useful, but stand-in
|
|
|
|
let
|
|
|
|
stateDiff = BeaconStateDiff()
|
|
|
|
root = hash_tree_root(stateDiff)
|
|
|
|
|
|
|
|
db.putStateDiff(root, stateDiff)
|
|
|
|
|
|
|
|
let state2 = db.getStateDiff(root)
|
|
|
|
db.delStateDiff(root)
|
2021-05-17 16:37:26 +00:00
|
|
|
check db.getStateDiff(root).isNone()
|
2020-09-12 05:35:58 +00:00
|
|
|
db.close()
|
2020-05-28 08:28:14 +00:00
|
|
|
|
2019-09-05 14:27:28 +00:00
|
|
|
check:
|
2020-05-28 08:28:14 +00:00
|
|
|
hash_tree_root(state2[]) == root
|
2022-01-30 16:51:04 +00:00
|
|
|
|
|
|
|
suite "FinalizedBlocks" & preset():
|
|
|
|
test "Basic ops" & preset():
|
|
|
|
var
|
|
|
|
db = SqStoreRef.init("", "test", inMemory = true).expect(
|
|
|
|
"working database (out of memory?)")
|
|
|
|
|
|
|
|
var s = FinalizedBlocks.init(db, "finalized_blocks").get()
|
|
|
|
|
|
|
|
check:
|
|
|
|
s.low.isNone
|
|
|
|
s.high.isNone
|
|
|
|
|
2022-06-18 04:57:37 +00:00
|
|
|
s.insert(Slot 0, ZERO_HASH)
|
2022-01-30 16:51:04 +00:00
|
|
|
check:
|
|
|
|
s.low.get() == Slot 0
|
|
|
|
s.high.get() == Slot 0
|
|
|
|
|
2022-06-18 04:57:37 +00:00
|
|
|
s.insert(Slot 5, ZERO_HASH)
|
2022-01-30 16:51:04 +00:00
|
|
|
check:
|
|
|
|
s.low.get() == Slot 0
|
|
|
|
s.high.get() == Slot 5
|
|
|
|
|
|
|
|
var items = 0
|
|
|
|
for k, v in s:
|
|
|
|
check: k in [Slot 0, Slot 5]
|
|
|
|
items += 1
|
|
|
|
|
|
|
|
check: items == 2
|