Make trusted node sync era-aware (#4283)

This PR removes a bunch of code to make TNS aware of era files, avoiding
a duplicated backfill when era files are available.

* reuse chaindag for loading backfill state, replacing the TNS homebrew
* fix era block iteration to skip empty slots
* add tests for `can_advance_slots`
This commit is contained in:
Jacek Sieka 2022-11-10 11:44:47 +01:00 committed by GitHub
parent a63ec842bc
commit 09ade6d33d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 250 additions and 303 deletions

View File

@ -47,13 +47,14 @@ OK: 17/17 Fail: 0/17 Skip: 0/17
## Beacon state [Preset: mainnet]
```diff
+ Smoke test initialize_beacon_state_from_eth1 [Preset: mainnet] OK
+ can_advance_slots OK
+ dependent_root OK
+ get_beacon_proposer_index OK
+ latest_block_root OK
+ merklizer state roundtrip OK
+ process_slots OK
```
OK: 6/6 Fail: 0/6 Skip: 0/6
OK: 7/7 Fail: 0/7 Skip: 0/7
## Beacon time
```diff
+ basics OK
@ -607,4 +608,4 @@ OK: 2/2 Fail: 0/2 Skip: 0/2
OK: 9/9 Fail: 0/9 Skip: 0/9
---TOTAL---
OK: 336/341 Fail: 0/341 Skip: 5/341
OK: 337/342 Fail: 0/342 Skip: 5/342

View File

@ -302,7 +302,8 @@ proc addHeadBlock*(
proc addBackfillBlock*(
dag: ChainDAGRef,
signedBlock: ForkySignedBeaconBlock): Result[void, BlockError] =
signedBlock: ForkySignedBeaconBlock | ForkySigVerifiedSignedBeaconBlock):
Result[void, BlockError] =
## When performing checkpoint sync, we need to backfill historical blocks
## in order to respond to GetBlocksByRange requests. Backfill blocks are
## added in backwards order, one by one, based on the `parent_root` of the
@ -322,34 +323,35 @@ proc addBackfillBlock*(
template checkSignature =
# If the hash is correct, the block itself must be correct, but the root does
# not cover the signature, which we check next
if blck.slot == GENESIS_SLOT:
# The genesis block must have an empty signature (since there's no proposer)
if signedBlock.signature != ValidatorSig():
info "Invalid genesis block signature"
return err(BlockError.Invalid)
else:
let proposerKey = dag.validatorKey(blck.proposer_index)
if proposerKey.isNone():
# We've verified that the block root matches our expectations by following
# the chain of parents all the way from checkpoint. If all those blocks
# were valid, the proposer_index in this block must also be valid, and we
# should have a key for it but we don't: this is either a bug on our from
# which we cannot recover, or an invalid checkpoint state was given in which
# case we're in trouble.
fatal "Invalid proposer in backfill block - checkpoint state corrupt?",
head = shortLog(dag.head), tail = shortLog(dag.tail)
when signedBlock.signature isnot TrustedSig:
if blck.slot == GENESIS_SLOT:
# The genesis block must have an empty signature (since there's no proposer)
if signedBlock.signature != ValidatorSig():
info "Invalid genesis block signature"
return err(BlockError.Invalid)
else:
let proposerKey = dag.validatorKey(blck.proposer_index)
if proposerKey.isNone():
# We've verified that the block root matches our expectations by following
# the chain of parents all the way from checkpoint. If all those blocks
# were valid, the proposer_index in this block must also be valid, and we
# should have a key for it but we don't: this is either a bug on our from
# which we cannot recover, or an invalid checkpoint state was given in which
# case we're in trouble.
fatal "Invalid proposer in backfill block - checkpoint state corrupt?",
head = shortLog(dag.head), tail = shortLog(dag.tail)
quit 1
quit 1
if not verify_block_signature(
dag.forkAtEpoch(blck.slot.epoch),
getStateField(dag.headState, genesis_validators_root),
blck.slot,
signedBlock.root,
proposerKey.get(),
signedBlock.signature):
info "Block signature verification failed"
return err(BlockError.Invalid)
if not verify_block_signature(
dag.forkAtEpoch(blck.slot.epoch),
getStateField(dag.headState, genesis_validators_root),
blck.slot,
signedBlock.root,
proposerKey.get(),
signedBlock.signature):
info "Block signature verification failed"
return err(BlockError.Invalid)
let startTick = Moment.now()

View File

@ -279,7 +279,7 @@ proc getForkedBlock*(
result.err()
return
proc getBlockId(db: BeaconChainDB, root: Eth2Digest): Opt[BlockId] =
proc getBlockId*(db: BeaconChainDB, root: Eth2Digest): Opt[BlockId] =
block: # We might have a summary in the database
let summary = db.getBeaconBlockSummary(root)
if summary.isOk():
@ -623,6 +623,23 @@ proc getState(
db.getState(cfg.stateForkAtEpoch(slot.epoch), state_root, state, rollback)
proc getState*(
db: BeaconChainDB, cfg: RuntimeConfig, block_root: Eth2Digest,
slots: Slice[Slot], state: var ForkedHashedBeaconState,
rollback: RollbackProc): bool =
var slot = slots.b
while slot >= slots.a:
let state_root = db.getStateRoot(block_root, slot)
if state_root.isSome() and
db.getState(
cfg.stateForkAtEpoch(slot.epoch), state_root.get(), state, rollback):
return true
if slot == slots.a: # avoid underflow at genesis
break
slot -= 1
false
proc getState(
dag: ChainDAGRef, bsi: BlockSlotId, state: var ForkedHashedBeaconState): bool =
## Load a state from the database given a block and a slot - this will first
@ -676,16 +693,8 @@ proc getStateByParent(
func rollback() =
assign(v[], rollbackAddr[])
while true:
if dag.db.getState(dag.cfg, summary.parent_root, slot, state, rollback):
return true
if slot == parentMinSlot:
return false
slot -= 1
return false
dag.db.getState(
dag.cfg, summary.parent_root, parentMinSlot..slot, state, rollback)
proc currentSyncCommitteeForPeriod*(
dag: ChainDAGRef,
@ -938,15 +947,9 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
dag.forkBlocks.incl(KeyedBlockRef.init(curRef))
if not foundHeadState:
while slot >= blck.summary.slot:
# Try loading state from database - we need the head state early on to
# establish the (real) finalized checkpoint
if db.getState(cfg, blck.root, slot, dag.headState, noRollback):
foundHeadState = true
break
slot -= 1
slot += 1
foundHeadState = db.getState(
cfg, blck.root, blck.summary.slot..slot, dag.headState, noRollback)
slot = blck.summary.slot
if not foundHeadState:
# When the database has been written with a pre-fork version of the
@ -1090,37 +1093,33 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
var
blocks = 0
parent: Eth2Digest
# Here, we'll build up the slot->root mapping in memory for the range of
# blocks from genesis to backfill, if possible.
for summary in dag.era.getBlockIds(historical_roots, Slot(0)):
if summary.slot >= dag.backfill.slot:
for bid in dag.era.getBlockIds(historical_roots, Slot(0), Eth2Digest()):
if bid.slot >= dag.backfill.slot:
# If we end up in here, we failed the root comparison just below in
# an earlier iteration
fatal "Era summaries don't lead up to backfill, database or era files corrupt?",
slot = summary.slot
bid
quit 1
# In BeaconState.block_roots, empty slots are filled with the root of
# the previous block - in our data structure, we use a zero hash instead
if summary.root != parent:
dag.frontfillBlocks.setLen(summary.slot.int + 1)
dag.frontfillBlocks[summary.slot.int] = summary.root
dag.frontfillBlocks.setLen(bid.slot.int + 1)
dag.frontfillBlocks[bid.slot.int] = bid.root
if summary.root == dag.backfill.parent_root:
# We've reached the backfill point, meaning blocks are available
# in the sqlite database from here onwards - remember this point in
# time so that we can write summaries to the database - it's a lot
# faster to load from database than to iterate over era files with
# the current naive era file reader.
reset(dag.backfill)
if bid.root == dag.backfill.parent_root:
# We've reached the backfill point, meaning blocks are available
# in the sqlite database from here onwards - remember this point in
# time so that we can write summaries to the database - it's a lot
# faster to load from database than to iterate over era files with
# the current naive era file reader.
reset(dag.backfill)
dag.updateFrontfillBlocks()
dag.updateFrontfillBlocks()
break
parent = summary.root
break
blocks += 1

View File

@ -352,10 +352,15 @@ proc getPartialState(
false
iterator getBlockIds*(
db: EraDB, historical_roots: openArray[Eth2Digest], start_slot: Slot): BlockId =
db: EraDB, historical_roots: openArray[Eth2Digest],
start_slot: Slot, prev_root: Eth2Digest): BlockId =
## Iterate over block roots starting from the given slot - `prev_root` must
## point out the last block added to the chain before `start_slot` such that
## empty slots can be filtered out correctly
var
state = (ref PartialBeaconState)() # avoid stack overflow
slot = start_slot
prev_root = prev_root
while true:
# `case` ensures we're on a fork for which the `PartialBeaconState`
@ -372,11 +377,14 @@ iterator getBlockIds*(
break
let
x = slot.int mod state[].block_roots.len
for i in x..<state[].block_roots.len():
# TODO these are not actually valid BlockId instances in the case where
# the slot is missing a block - use index to filter..
yield BlockId(root: state[].block_roots.data[i], slot: slot)
x = slot.uint64 mod state[].block_roots.lenu64
for i in x..<state[].block_roots.lenu64():
# When no block is included for a particular slot, the block root is
# repeated
if slot == 0 or prev_root != state[].block_roots.data[i]:
yield BlockId(root: state[].block_roots.data[i], slot: slot)
prev_root = state[].block_roots.data[i]
slot += 1
proc new*(
@ -399,31 +407,50 @@ when isMainModule:
db = EraDB.new(
defaultRuntimeConfig, dbPath,
Eth2Digest(
data: array[32, byte].initCopyFrom([byte 0x4b, 0x36, 0x3d, 0xb9])))
Eth2Digest.fromHex(
"0x4b363db94e286120d76eb905340fdd4e54bfe9f06bf33ff6cf5ad27f511bfe95"))
historical_roots = [
Eth2Digest(
data: array[32, byte].initCopyFrom([byte 0x40, 0xcf, 0x2f, 0x3c]))]
Eth2Digest.fromHex(
"0x40cf2f3cffd63d9ffeb89999ee359926abfa07ca5eb3fe2a70bc9d6b15720b8c"),
Eth2Digest.fromHex(
"0x74a3850f3cbccce2271f7c99e53ab07dae55cd8022c937c2dde7a20c5a2b83f9")]
var got8191 = false
var slot4: Eth2Digest
for bid in db.getBlockIds(historical_roots, Era(0)):
if bid.slot == Slot(1):
var
got0 = false
got8191 = false
got8192 = false
got8193 = false
for bid in db.getBlockIds(historical_roots, Slot(0), Eth2Digest()):
if bid.slot == Slot(0):
doAssert bid.root == Eth2Digest.fromHex(
"0x4d611d5b93fdab69013a7f0a2f961caca0c853f87cfe9595fe50038163079360")
got0 = true
elif bid.slot == Slot(1):
doAssert bid.root == Eth2Digest.fromHex(
"0xbacd20f09da907734434f052bd4c9503aa16bab1960e89ea20610d08d064481c")
elif bid.slot == Slot(4):
slot4 = bid.root
elif bid.slot == Slot(5) and bid.root != slot4:
raiseAssert "this slot was skipped, should have same root"
elif bid.slot == Slot(5):
raiseAssert "this slot was skipped, should not be iterated over"
elif bid.slot == Slot(8191):
doAssert bid.root == Eth2Digest.fromHex(
"0x48ea23af46320b0290eae668b0c3e6ae3e0534270f897db0e83a57f51a22baca")
got8191 = true
elif bid.slot == Slot(8192):
doAssert bid.root == Eth2Digest.fromHex(
"0xa7d379a9cbf87ae62127ddee8660ddc08a83a788087d23eaddd852fd8c408ef1")
got8192 = true
elif bid.slot == Slot(8193):
doAssert bid.root == Eth2Digest.fromHex(
"0x0934b14ec4ec9d45f4a2a7c3e4f6bb12d35444c74de8e30c13138c4d41b393aa")
got8193 = true
break
doAssert got0
doAssert got8191
doAssert got8192
doAssert got8193
doAssert db.getBlock(
historical_roots, Slot(1), Opt[Eth2Digest].err(),
phase0.TrustedSignedBeaconBlock).get().root ==
Eth2Digest.fromHex(
"0xbacd20f09da907734434f052bd4c9503aa16bab1960e89ea20610d08d064481c")
doAssert got8191

View File

@ -1999,6 +1999,7 @@ proc handleStartUpCmd(config: var BeaconNodeConf) {.raises: [Defect, CatchableEr
waitFor doTrustedNodeSync(
cfg,
config.databaseDir,
config.eraDir,
config.trustedNodeUrl,
config.stateId,
config.backfillBlocks,

View File

@ -1158,7 +1158,7 @@ func matches_block_slot*(
func can_advance_slots*(
state: ForkyHashedBeaconState, block_root: Eth2Digest, target_slot: Slot): bool =
## Return true iff we can reach the given block/slot combination simply by
## advancing slots
## advancing 0 or more slots
target_slot >= state.data.slot and block_root == state.latest_block_root
func can_advance_slots*(
state: ForkedHashedBeaconState, block_root: Eth2Digest, target_slot: Slot): bool =

View File

@ -13,60 +13,14 @@ import
stew/base10,
chronicles, chronos,
./sync/sync_manager,
./consensus_object_pools/blockchain_dag,
./consensus_object_pools/[block_clearance, blockchain_dag],
./spec/eth2_apis/rest_beacon_client,
./spec/[beaconstate, eth2_merkleization, forks, presets, state_transition],
"."/[beacon_clock, beacon_chain_db]
type
DbCache = object
summaries: Table[Eth2Digest, BeaconBlockSummary]
slots: seq[Option[Eth2Digest]]
proc updateSlots(cache: var DbCache, slot: Slot) =
if cache.slots.lenu64() < slot:
cache.slots.setLen(slot.int + 1)
proc updateSlots(cache: var DbCache, root: Eth2Digest, slot: Slot) =
# The slots mapping stores one linear block history - we construct it by
# starting from a given root/slot and walking the known parents as far back
# as possible which ensures that all blocks belong to the same history
cache.updateSlots(slot)
var
root = root
lastSlot = slot
while true:
cache.summaries.withValue(root, v) do:
let slot = v[].slot
for i in slot.int + 1..<lastSlot.int: # Avoid re-querying known gaps
cache.slots[i] = some(ZERO_HASH)
cache.slots[slot.int] = some(root)
if slot == 0:
return
root = v[].parent_root
lastSlot = slot
do:
return
proc update(cache: var DbCache, blck: ForkySignedBeaconBlock) =
if blck.root notin cache.summaries:
cache.summaries[blck.root] = blck.message.toBeaconBlockSummary()
cache.updateSlots(blck.root, blck.message.slot)
proc isKnown(cache: DbCache, slot: Slot): bool =
slot < cache.slots.lenu64 and cache.slots[slot.int].isSome()
"."/[beacon_clock, beacon_chain_db, era_db]
proc doTrustedNodeSync*(
cfg: RuntimeConfig, databaseDir: string, restUrl: string,
stateId: string, backfill: bool, reindex: bool,
cfg: RuntimeConfig, databaseDir, eraDir, restUrl, stateId: string,
backfill: bool, reindex: bool,
genesisState: ref ForkedHashedBeaconState = nil) {.async.} =
logScope:
restUrl
@ -75,57 +29,16 @@ proc doTrustedNodeSync*(
notice "Starting trusted node sync",
databaseDir, backfill, reindex
var
client = RestClientRef.new(restUrl).valueOr:
error "Cannot connect to server", error = error
quit 1
let
db = BeaconChainDB.new(databaseDir, inMemory = false)
defer:
db.close()
var
dbCache = DbCache(summaries: db.loadSummaries())
let
dbHead = db.getHeadBlock()
headSlot = if dbHead.isSome():
if dbHead.get() notin dbCache.summaries:
# This can happen with pre-blocksummary database - it's better to start
# over in this case
error "Database missing head block summary - database too old or corrupt"
quit 1
let slot = dbCache.summaries[dbHead.get()].slot
dbCache.updateSlots(dbHead.get(), slot)
slot
else:
# When we don't have a head, we'll use the given checkpoint as head
FAR_FUTURE_SLOT
var client = RestClientRef.new(restUrl).valueOr:
error "Cannot connect to server", error = error
quit 1
proc downloadBlock(slot: Slot):
Future[Option[ref ForkedSignedBeaconBlock]] {.async.} =
# Download block at given slot, retrying a few times,
var lastError: ref CatchableError
for i in 0..<3:
try:
return await client.getBlockV2(BlockIdent.init(slot), cfg)
except RestResponseError as exc:
lastError = exc
notice "Server does not support block downloads / backfilling",
msg = exc.msg
break
except CatchableError as exc:
# We'll assume this may be a connectivity error or something similar
lastError = exc
warn "Retrying download of block", slot, err = exc.msg
client = RestClientRef.new(restUrl).valueOr:
error "Cannot connect to server", url = restUrl, error = error
quit 1
raise lastError
# If possible, we'll store the genesis state in the database - this is not
# strictly necessary but renders the resulting database compatible with
# versions prior to 22.11 and makes reindexing possible
@ -170,7 +83,21 @@ proc doTrustedNodeSync*(
restUrl
tmp
let (checkpointSlot, checkpointRoot) = if dbHead.isNone:
let
dbHead = db.getHeadBlock()
head = if dbHead.isSome():
let
bid = db.getBlockId(dbHead.get()).valueOr:
error "Database missing head block summary - database too old or corrupt",
headRoot = dbHead.get()
quit 1
Opt.some bid
else:
# When we don't have a head, we'll use the given checkpoint as head
Opt.none(BlockId)
if head.isNone:
notice "Downloading checkpoint state"
let
@ -210,48 +137,34 @@ proc doTrustedNodeSync*(
rootInGenesis = getStateField(genesisState[], genesis_validators_root)
quit 1
withState(genesisState[]):
let blck = get_initial_beacon_block(forkyState)
dbCache.update(blck.asSigned())
ChainDAGRef.preInit(db, genesisState[])
if getStateField(genesisState[], slot) != getStateField(state[], slot):
ChainDAGRef.preInit(db, state[])
else:
ChainDAGRef.preInit(db, state[])
let latest_bid = state[].latest_block_id()
(latest_bid.slot, latest_bid.root)
else:
notice "Skipping checkpoint download, database already exists (remove db directory to get a fresh snapshot)",
databaseDir, head = shortLog(dbHead.get())
(headSlot, dbHead.get())
databaseDir, head = shortLog(head.get())
# Coming this far, we've done what ChainDAGRef.preInit would normally do -
# Let's do a sanity check and start backfilling blocks from the trusted node
if (let v = ChainDAGRef.isInitialized(db); v.isErr()):
error "Database not initialized after checkpoint sync, report bug",
err = v.error()
quit 1
dbCache.updateSlots(checkpointSlot)
# we can now load a ChainDAG to start backfilling it
let
missingSlots = block:
var total = 0
for slot in Slot(0)..<checkpointSlot:
if not dbCache.isKnown(slot):
total += 1
total
validatorMonitor = newClone(ValidatorMonitor.init(false, false))
dag = ChainDAGRef.init(cfg, db, validatorMonitor, {}, eraPath = eraDir)
backfillSlot = dag.backfill.slot
frontfill = dag.frontfill.valueOr(BlockId())
let canReindex = if missingSlots == 0:
let canReindex = if backfillSlot <= frontfill.slot:
info "Database backfilled"
true
elif backfill:
# +1 because we need to download the frontfill slot for the frontfill match
# detection to kick in, in addBackfillBlock
let missingSlots = dag.backfill.slot - frontfill.slot + 1
notice "Downloading historical blocks - you can interrupt this process at any time and it automatically be completed when you start the beacon node",
checkpointSlot, missingSlots
backfillSlot, frontfill, missingSlots
var # Same averaging as SyncManager
syncCount = 0
@ -259,99 +172,79 @@ proc doTrustedNodeSync*(
avgSyncSpeed = 0.0
stamp = SyncMoment.now(0)
proc downloadBlock(slot: Slot):
Future[Option[ref ForkedSignedBeaconBlock]] {.async.} =
# Download block at given slot, retrying a few times,
var lastError: ref CatchableError
for i in 0..<3:
try:
return await client.getBlockV2(BlockIdent.init(slot), cfg)
except RestResponseError as exc:
lastError = exc
notice "Server does not support block downloads / backfilling - blocks will be downloaded later",
msg = exc.msg
break
except CatchableError as exc:
# We'll assume this may be a connectivity error or something similar
lastError = exc
warn "Retrying download of block", slot, err = exc.msg
client = RestClientRef.new(restUrl).valueOr:
error "Cannot connect to server", url = restUrl, error = error
quit 1
raise lastError
# Download several blocks in parallel but process them serially
var gets: array[16, Future[Option[ref ForkedSignedBeaconBlock]]]
proc processBlock(
fut: Future[Option[ref ForkedSignedBeaconBlock]], slot: Slot) {.async.} =
proc processBlock(blck: Option[ref ForkedSignedBeaconBlock]) =
let newStamp = SyncMoment.now(processed)
if newStamp.stamp - stamp.stamp > 12.seconds:
syncCount += 1
let
remaining = dag.backfill.slot - frontfill.slot
slotsPerSec = speed(stamp, newStamp)
avgSyncSpeed = avgSyncSpeed + (slotsPerSec - avgSyncSpeed) / float(syncCount)
info "Backfilling",
timeleft = toTimeLeftString(
if avgSyncSpeed >= 0.001:
Duration.fromFloatSeconds(remaining.float / avgSyncSpeed)
else: InfiniteDuration),
slotsPerSecond = avgSyncSpeed,
remainingSlots = remaining
stamp = newStamp
processed += 1
var blck = await fut
if blck.isNone():
dbCache.slots[slot.int] = some ZERO_HASH
return
if blck.isSome():
let
data = blck.get()
let data = blck.get()
withBlck(data[]):
debug "Processing",
blck = shortLog(blck.message),
blockRoot = shortLog(blck.root)
if blck.message.slot == checkpointSlot:
if blck.root != checkpointRoot:
error "Downloaded block does not match checkpoint history",
blck = shortLog(blck),
expectedRoot = shortLog(checkpointRoot)
quit 1
else:
var childSlot = blck.message.slot + 1
while true:
if childSlot >= dbCache.slots.lenu64():
error "Downloaded block does not match checkpoint history"
withBlck(data[]):
if (let res = dag.addBackfillBlock(blck.asSigVerified()); res.isErr()):
case res.error()
of BlockError.Invalid, BlockError.MissingParent, BlockError.UnviableFork:
error "Got invalid block from trusted node - is it on the right network?",
blck = shortLog(blck), err = res.error()
quit 1
of BlockError.Duplicate:
discard
if not dbCache.slots[childSlot.int].isSome():
# Should never happen - we download slots backwards
error "Downloaded block does not match checkpoint history"
quit 1
let knownRoot = dbCache.slots[childSlot.int].get()
if knownRoot == ZERO_HASH:
childSlot += 1
continue
dbCache.summaries.withValue(knownRoot, summary):
if summary[].parent_root != blck.root:
error "Downloaded block does not match checkpoint history",
blockRoot = shortLog(blck.root),
expectedRoot = shortLog(summary[].parent_root)
quit 1
break
# This shouldn't happen - we should have downloaded the child and
# updated knownBlocks before here
error "Expected child block not found in checkpoint history"
quit 1
if blck.root notin dbCache.summaries:
db.putBlock(blck.asTrusted())
dbCache.update(blck)
let newStamp = SyncMoment.now(processed)
if newStamp.stamp - stamp.stamp > 12.seconds:
syncCount += 1
let
remaining = blck.message.slot.int
slotsPerSec = speed(stamp, newStamp)
avgSyncSpeed = avgSyncSpeed + (slotsPerSec - avgSyncSpeed) / float(syncCount)
info "Backfilling",
timeleft = toTimeLeftString(
if avgSyncSpeed >= 0.001:
Duration.fromFloatSeconds(remaining.float / avgSyncSpeed)
else: InfiniteDuration),
slotsPerSecond = avgSyncSpeed,
remainingSlots = remaining
stamp = newStamp
# Download blocks backwards from the checkpoint slot, skipping the ones we
# already have in the database. We'll do a few downloads in parallel which
# risks having some redundant downloads going on, but speeds things up
# Download blocks backwards from the backfill slot, ie the first slot for
# which we don't have a block, when walking backwards from the head
try:
for i in 0'u64..<(checkpointSlot.uint64 + gets.lenu64()):
if not isNil(gets[int(i mod gets.lenu64)]):
await processBlock(
gets[int(i mod gets.lenu64)],
checkpointSlot + gets.lenu64() - uint64(i))
gets[int(i mod gets.lenu64)] = nil
var
gets: array[16, Future[Option[ref ForkedSignedBeaconBlock]]]
if i < checkpointSlot:
let slot = checkpointSlot - i
if dbCache.isKnown(slot):
continue
for i in 0.uint64..missingSlots + gets.lenu64:
if i >= gets.lenu64():
let
fut = gets[int(i mod gets.lenu64)]
processBlock(await fut)
if i <= backfillSlot:
let slot = backfillSlot - i
gets[int(i mod gets.lenu64)] = downloadBlock(slot)
if i mod 1024 == 0:
@ -361,6 +254,7 @@ proc doTrustedNodeSync*(
notice "Backfilling incomplete - blocks will be downloaded when starting the node", msg = exc.msg
false
else:
let missingSlots = dag.backfill.slot - frontfill.slot
notice "Database initialized, historical blocks will be backfilled when starting the node",
missingSlots
@ -370,22 +264,18 @@ proc doTrustedNodeSync*(
notice "Reindexing historical state lookup tables (you can interrupt this process at any time)"
# Build a DAG
let
validatorMonitor = newClone(ValidatorMonitor.init(false, false))
dag = ChainDAGRef.init(cfg, db, validatorMonitor, {})
dag.rebuildIndex()
notice "Done, your beacon node is ready to serve you! Don't forget to check that you're on the canonical chain by comparing the checkpoint root with other online sources. See https://nimbus.guide/trusted-node-sync.html for more information.",
checkpointRoot
checkpoint = dag.head
when isMainModule:
import
std/[os],
networking/network_metadata
let backfill = os.paramCount() > 4 and os.paramStr(5) == "true"
let backfill = os.paramCount() > 5 and os.paramStr(6) == "true"
waitFor doTrustedNodeSync(
getRuntimeConfig(some os.paramStr(1)), os.paramStr(2), os.paramStr(3),
os.paramStr(4), backfill, false)
os.paramStr(4), os.paramStr(5), backfill, false)

View File

@ -130,3 +130,30 @@ suite "Beacon state" & preset():
check:
dcs == merkleizer.toDepositContractState()
test "can_advance_slots":
var
state = (ref ForkedHashedBeaconState)(
kind: BeaconStateFork.Phase0,
phase0Data: initialize_hashed_beacon_state_from_eth1(
defaultRuntimeConfig, ZERO_HASH, 0,
makeInitialDeposits(SLOTS_PER_EPOCH, {}), {skipBlsValidation}))
genBlock = get_initial_beacon_block(state[])
cache: StateCache
info: ForkedEpochInfo
check:
state[].can_advance_slots(genBlock.root, Slot(0))
state[].can_advance_slots(genBlock.root, Slot(0))
state[].can_advance_slots(genBlock.root, Slot(0))
let blck = addTestBlock(
state[], cache, flags = {skipBlsValidation})
check:
not state[].can_advance_slots(genBlock.root, Slot(0))
not state[].can_advance_slots(genBlock.root, Slot(0))
not state[].can_advance_slots(genBlock.root, Slot(0))
not state[].can_advance_slots(blck.root, Slot(0))
state[].can_advance_slots(blck.root, Slot(1))
state[].can_advance_slots(blck.root, Slot(2))