Revert writing backfill root to database (#3215)

Introduced in #3171, it turns out we can just follow the block headers
to achieve the same effect

* leaves the constant in the code so as to avoid confusion when reading
database that had the constant written (such as the fleet nodes and
other unstable users)
This commit is contained in:
Jacek Sieka 2021-12-21 11:40:14 +01:00 committed by GitHub
parent c270ec21e4
commit 1021e3324e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 51 additions and 70 deletions

View File

@ -137,9 +137,7 @@ type
## only recent contract state data (i.e. only recent `deposit_roots`).
kHashToStateDiff # Obsolete
kHashToStateOnlyMutableValidators
kBackfillBlock
## Pointer to the earliest block that we have backfilled - if this is not
## set, backfill == tail
kBackfillBlock # Obsolete, was in `unstable` for a while, but never released
BeaconBlockSummary* = object
## Cache of beacon block summaries - during startup when we construct the
@ -477,7 +475,7 @@ proc close*(db: BeaconchainDB) =
db.db = nil
func toBeaconBlockSummary(v: SomeSomeBeaconBlock): BeaconBlockSummary =
func toBeaconBlockSummary*(v: SomeSomeBeaconBlock): BeaconBlockSummary =
BeaconBlockSummary(
slot: v.slot,
parent_root: v.parent_root,
@ -591,9 +589,6 @@ proc putTailBlock*(db: BeaconChainDB, key: Eth2Digest) =
proc putGenesisBlock*(db: BeaconChainDB, key: Eth2Digest) =
db.keyValues.putRaw(subkey(kGenesisBlock), key)
proc putBackfillBlock*(db: BeaconChainDB, key: Eth2Digest) =
db.keyValues.putRaw(subkey(kBackfillBlock), key)
proc putEth2FinalizedTo*(db: BeaconChainDB,
eth1Checkpoint: DepositContractSnapshot) =
db.keyValues.putSnappySSZ(subkey(kDepositsFinalizedByEth2), eth1Checkpoint)
@ -797,9 +792,6 @@ proc getGenesisBlock*(db: BeaconChainDB): Opt[Eth2Digest] =
db.keyValues.getRaw(subkey(kGenesisBlock), Eth2Digest) or
db.v0.getGenesisBlock()
proc getBackfillBlock*(db: BeaconChainDB): Opt[Eth2Digest] =
db.keyValues.getRaw(subkey(kBackfillBlock), Eth2Digest)
proc getEth2FinalizedTo(db: BeaconChainDBV0): Opt[DepositContractSnapshot] =
result.ok(DepositContractSnapshot())
let r = db.backend.getSnappySSZ(subkey(kDepositsFinalizedByEth2), result.get)
@ -856,7 +848,7 @@ iterator getAncestors*(db: BeaconChainDB, root: Eth2Digest):
yield res
root = res.message.parent_root
proc loadSummaries(db: BeaconChainDB): Table[Eth2Digest, BeaconBlockSummary] =
proc loadSummaries*(db: BeaconChainDB): Table[Eth2Digest, BeaconBlockSummary] =
# Load summaries into table - there's no telling what order they're in so we
# load them all - bugs in nim prevent this code from living in the iterator.
var summaries = initTable[Eth2Digest, BeaconBlockSummary](1024*1024)

View File

@ -273,7 +273,7 @@ proc addBackfillBlock*(
logScope:
blockRoot = shortLog(signedBlock.root)
blck = shortLog(signedBlock.message)
backfill = (dag.backfill.slot, shortLog(dag.backfill.root))
backfill = (dag.backfill.slot, shortLog(dag.backfill.parent_root))
template blck(): untyped = signedBlock.message # shortcuts without copy
template blockRoot(): untyped = signedBlock.root
@ -292,7 +292,7 @@ proc addBackfillBlock*(
debug "Block unviable or duplicate"
return err(BlockError.UnviableFork)
if dag.backfill.root != signedBlock.root:
if dag.backfill.parent_root != signedBlock.root:
debug "Block does not match expected backfill root"
return err(BlockError.MissingParent) # MissingChild really, but ..
@ -319,8 +319,7 @@ proc addBackfillBlock*(
return err(BlockError.Invalid)
dag.putBlock(signedBlock.asTrusted())
dag.db.putBackfillBlock(signedBlock.root)
dag.backfill = (blck.slot, blck.parent_root)
dag.backfill = blck.toBeaconBlockSummary()
# Invariants maintained on startup
doAssert dag.backfillBlocks.lenu64 == dag.tail.slot.uint64

View File

@ -113,14 +113,9 @@ type
## go - the tail block is unique in that its parent is set to `nil`, even
## in the case where a later genesis block exists.
backfill*: tuple[slot: Slot, root: Eth2Digest] ##\
## The backfill is root of the parent of the the earliest block that we
## have synced, when performing a checkpoint sync start. Because the
## `tail` BlockRef does not have a parent, we store here the root of the
## block we're expecting during backfill.
## When starting a checkpoint sync, `backfill` == `tail.parent_root` - we
## then sync backards, moving the backfill (but not tail!) until we hit
## genesis at which point we set backfill to the zero hash.
backfill*: BeaconBlockSummary ##\
## The backfill points to the oldest block that we have, in the database -
## when backfilling, we'll be fetching its parent first
heads*: seq[BlockRef] ##\
## Candidate heads of candidate chains

View File

@ -354,7 +354,6 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
let
tailBlockRoot = db.getTailBlock()
headBlockRoot = db.getHeadBlock()
backfillBlockRoot = db.getBackfillBlock()
doAssert tailBlockRoot.isSome(), "Missing tail block, database corrupt?"
doAssert headBlockRoot.isSome(), "Missing head block, database corrupt?"
@ -375,18 +374,6 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
"preInit should have initialized the database with a genesis block")
withBlck(genesisBlock): BlockRef.init(genesisBlockRoot, blck.message)
let backfill =
if backfillBlockRoot.isSome():
let backfillBlock = db.getForkedBlock(backfillBlockRoot.get()).expect(
"backfill block must be present in database, database corrupt?")
(getForkedBlockField(backfillBlock, slot),
getForkedBlockField(backfillBlock, parentRoot))
elif tailRef.slot > GENESIS_SLOT:
(getForkedBlockField(tailBlock, slot),
getForkedBlockField(tailBlock, parentRoot))
else:
(GENESIS_SLOT, Eth2Digest())
var
blocks: HashSet[KeyedBlockRef]
headRef: BlockRef
@ -399,11 +386,15 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
var
backfillBlocks = newSeq[Eth2Digest](tailRef.slot.int)
curRef: BlockRef
backfill = BeaconBlockSummary(slot: GENESIS_SLOT)
for blck in db.getAncestorSummaries(headRoot):
if blck.summary.slot < tailRef.slot:
backfillBlocks[blck.summary.slot.int] = blck.root
backfill = blck.summary
elif blck.summary.slot == tailRef.slot:
backfill = blck.summary
if curRef == nil:
curRef = tailRef
headRef = tailRef
@ -564,7 +555,7 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
finalizedHead = shortLog(dag.finalizedHead),
tail = shortLog(dag.tail),
totalBlocks = dag.blocks.len(),
backfill = (dag.backfill.slot, shortLog(dag.backfill.root))
backfill = (dag.backfill.slot, shortLog(dag.backfill.parent_root))
dag
@ -1386,33 +1377,45 @@ proc updateHead*(
dag.finalizedHead.slot.epoch)
dag.onFinHappened(data)
proc isInitialized*(T: type ChainDAGRef, db: BeaconChainDB): bool =
proc isInitialized*(T: type ChainDAGRef, db: BeaconChainDB): Result[void, cstring] =
# Lightweight check to see if we have the minimal information needed to
# load up a database - we don't check head here - if something is wrong with
# head, it's likely an initialized, but corrupt database - init will detect
# that
let
genesisBlockRoot = db.getGenesisBlock()
tailBlockRoot = db.getTailBlock()
if not (genesisBlockRoot.isSome() and tailBlockRoot.isSome()):
return false
if not genesisBlockRoot.isSome():
return err("Genesis block root missing")
let
genesisBlock = db.getForkedBlock(genesisBlockRoot.get())
tailBlock = db.getForkedBlock(tailBlockRoot.get())
if not genesisBlock.isSome():
return err("Genesis block missing")
if not (genesisBlock.isSome() and tailBlock.isSome()):
return false
let
genesisStateRoot = withBlck(genesisBlock.get()): blck.message.state_root
if not db.containsState(genesisStateRoot):
return err("Genesis state missing")
let
tailBlockRoot = db.getTailBlock()
if not tailBlockRoot.isSome():
return err("Tail block root missing")
let
tailBlock = db.getForkedBlock(tailBlockRoot.get())
if not tailBlock.isSome():
return err("Tail block missing")
let
tailStateRoot = withBlck(tailBlock.get()): blck.message.state_root
if not (
db.containsState(genesisStateRoot) and db.containsState(tailStateRoot)):
return false
if not db.containsState(tailStateRoot):
return err("Tail state missing")
true
ok()
proc preInit*(
T: type ChainDAGRef, db: BeaconChainDB,

View File

@ -199,7 +199,7 @@ proc init(T: type BeaconNode,
quit 1
var eth1Monitor: Eth1Monitor
if not ChainDAGRef.isInitialized(db):
if not ChainDAGRef.isInitialized(db).isOk():
var
tailState: ref ForkedHashedBeaconState
tailBlock: ForkedTrustedSignedBeaconBlock
@ -279,7 +279,7 @@ proc init(T: type BeaconNode,
try:
ChainDAGRef.preInit(db, genesisState[], tailState[], tailBlock)
doAssert ChainDAGRef.isInitialized(db), "preInit should have initialized db"
doAssert ChainDAGRef.isInitialized(db).isOk(), "preInit should have initialized db"
except CatchableError as exc:
error "Failed to initialize database", err = exc.msg
quit 1

View File

@ -5,6 +5,3 @@
@if release:
-d:"chronicles_line_numbers:0"
@end
# Use only `secp256k1` public key cryptography as an identity in LibP2P.
-d:"libp2p_pki_schemes=secp256k1"

View File

@ -5,6 +5,3 @@
@if release:
-d:"chronicles_line_numbers:0"
@end
# Use only `secp256k1` public key cryptography as an identity in LibP2P.
-d:"libp2p_pki_schemes=secp256k1"

View File

@ -5,6 +5,3 @@
@if release:
-d:"chronicles_line_numbers:0"
@end
# Use only `secp256k1` public key cryptography as an identity in LibP2P.
-d:"libp2p_pki_schemes=secp256k1"

View File

@ -195,8 +195,8 @@ proc cmdBench(conf: DbConf, cfg: RuntimeConfig) =
db.close()
dbBenchmark.close()
if not ChainDAGRef.isInitialized(db):
echo "Database not initialized"
if (let v = ChainDAGRef.isInitialized(db); v.isErr()):
echo "Database not initialized: ", v.error()
quit 1
echo "Initializing block pool..."
@ -470,8 +470,8 @@ proc cmdRewindState(conf: DbConf, cfg: RuntimeConfig) =
let db = BeaconChainDB.new(conf.databaseDir.string)
defer: db.close()
if not ChainDAGRef.isInitialized(db):
echo "Database not initialized"
if (let v = ChainDAGRef.isInitialized(db); v.isErr()):
echo "Database not initialized: ", v.error()
quit 1
echo "Initializing block pool..."
@ -501,8 +501,8 @@ proc cmdExportEra(conf: DbConf, cfg: RuntimeConfig) =
let db = BeaconChainDB.new(conf.databaseDir.string)
defer: db.close()
if not ChainDAGRef.isInitialized(db):
echo "Database not initialized"
if (let v = ChainDAGRef.isInitialized(db); v.isErr()):
echo "Database not initialized: ", v.error()
quit 1
echo "Initializing block pool..."
@ -565,8 +565,8 @@ proc cmdValidatorPerf(conf: DbConf, cfg: RuntimeConfig) =
defer:
db.close()
if not ChainDAGRef.isInitialized(db):
echo "Database not initialized"
if (let v = ChainDAGRef.isInitialized(db); v.isErr()):
echo "Database not initialized: ", v.error()
quit 1
echo "# Initializing block pool..."
@ -705,8 +705,8 @@ proc cmdValidatorDb(conf: DbConf, cfg: RuntimeConfig) =
defer:
db.close()
if not ChainDAGRef.isInitialized(db):
echo "Database not initialized"
if (let v = ChainDAGRef.isInitialized(db); v.isErr()):
echo "Database not initialized: ", v.error()
quit 1
echo "Initializing block pool..."

2
tests/nim.cfg Normal file
View File

@ -0,0 +1,2 @@
# Use only `secp256k1` public key cryptography as an identity in LibP2P.
-d:"libp2p_pki_schemes=secp256k1"

View File

@ -1 +0,0 @@
-d:"libp2p_pki_schemes=secp256k1"