Make trusted node sync era-aware (#4283)
This PR removes a bunch of code to make TNS aware of era files, avoiding a duplicated backfill when era files are available. * reuse chaindag for loading backfill state, replacing the TNS homebrew * fix era block iteration to skip empty slots * add tests for `can_advance_slots`
This commit is contained in:
parent
a63ec842bc
commit
09ade6d33d
|
@ -47,13 +47,14 @@ OK: 17/17 Fail: 0/17 Skip: 0/17
|
||||||
## Beacon state [Preset: mainnet]
|
## Beacon state [Preset: mainnet]
|
||||||
```diff
|
```diff
|
||||||
+ Smoke test initialize_beacon_state_from_eth1 [Preset: mainnet] OK
|
+ Smoke test initialize_beacon_state_from_eth1 [Preset: mainnet] OK
|
||||||
|
+ can_advance_slots OK
|
||||||
+ dependent_root OK
|
+ dependent_root OK
|
||||||
+ get_beacon_proposer_index OK
|
+ get_beacon_proposer_index OK
|
||||||
+ latest_block_root OK
|
+ latest_block_root OK
|
||||||
+ merklizer state roundtrip OK
|
+ merklizer state roundtrip OK
|
||||||
+ process_slots OK
|
+ process_slots OK
|
||||||
```
|
```
|
||||||
OK: 6/6 Fail: 0/6 Skip: 0/6
|
OK: 7/7 Fail: 0/7 Skip: 0/7
|
||||||
## Beacon time
|
## Beacon time
|
||||||
```diff
|
```diff
|
||||||
+ basics OK
|
+ basics OK
|
||||||
|
@ -607,4 +608,4 @@ OK: 2/2 Fail: 0/2 Skip: 0/2
|
||||||
OK: 9/9 Fail: 0/9 Skip: 0/9
|
OK: 9/9 Fail: 0/9 Skip: 0/9
|
||||||
|
|
||||||
---TOTAL---
|
---TOTAL---
|
||||||
OK: 336/341 Fail: 0/341 Skip: 5/341
|
OK: 337/342 Fail: 0/342 Skip: 5/342
|
||||||
|
|
|
@ -302,7 +302,8 @@ proc addHeadBlock*(
|
||||||
|
|
||||||
proc addBackfillBlock*(
|
proc addBackfillBlock*(
|
||||||
dag: ChainDAGRef,
|
dag: ChainDAGRef,
|
||||||
signedBlock: ForkySignedBeaconBlock): Result[void, BlockError] =
|
signedBlock: ForkySignedBeaconBlock | ForkySigVerifiedSignedBeaconBlock):
|
||||||
|
Result[void, BlockError] =
|
||||||
## When performing checkpoint sync, we need to backfill historical blocks
|
## When performing checkpoint sync, we need to backfill historical blocks
|
||||||
## in order to respond to GetBlocksByRange requests. Backfill blocks are
|
## in order to respond to GetBlocksByRange requests. Backfill blocks are
|
||||||
## added in backwards order, one by one, based on the `parent_root` of the
|
## added in backwards order, one by one, based on the `parent_root` of the
|
||||||
|
@ -322,34 +323,35 @@ proc addBackfillBlock*(
|
||||||
template checkSignature =
|
template checkSignature =
|
||||||
# If the hash is correct, the block itself must be correct, but the root does
|
# If the hash is correct, the block itself must be correct, but the root does
|
||||||
# not cover the signature, which we check next
|
# not cover the signature, which we check next
|
||||||
if blck.slot == GENESIS_SLOT:
|
when signedBlock.signature isnot TrustedSig:
|
||||||
# The genesis block must have an empty signature (since there's no proposer)
|
if blck.slot == GENESIS_SLOT:
|
||||||
if signedBlock.signature != ValidatorSig():
|
# The genesis block must have an empty signature (since there's no proposer)
|
||||||
info "Invalid genesis block signature"
|
if signedBlock.signature != ValidatorSig():
|
||||||
return err(BlockError.Invalid)
|
info "Invalid genesis block signature"
|
||||||
else:
|
return err(BlockError.Invalid)
|
||||||
let proposerKey = dag.validatorKey(blck.proposer_index)
|
else:
|
||||||
if proposerKey.isNone():
|
let proposerKey = dag.validatorKey(blck.proposer_index)
|
||||||
# We've verified that the block root matches our expectations by following
|
if proposerKey.isNone():
|
||||||
# the chain of parents all the way from checkpoint. If all those blocks
|
# We've verified that the block root matches our expectations by following
|
||||||
# were valid, the proposer_index in this block must also be valid, and we
|
# the chain of parents all the way from checkpoint. If all those blocks
|
||||||
# should have a key for it but we don't: this is either a bug on our from
|
# were valid, the proposer_index in this block must also be valid, and we
|
||||||
# which we cannot recover, or an invalid checkpoint state was given in which
|
# should have a key for it but we don't: this is either a bug on our from
|
||||||
# case we're in trouble.
|
# which we cannot recover, or an invalid checkpoint state was given in which
|
||||||
fatal "Invalid proposer in backfill block - checkpoint state corrupt?",
|
# case we're in trouble.
|
||||||
head = shortLog(dag.head), tail = shortLog(dag.tail)
|
fatal "Invalid proposer in backfill block - checkpoint state corrupt?",
|
||||||
|
head = shortLog(dag.head), tail = shortLog(dag.tail)
|
||||||
|
|
||||||
quit 1
|
quit 1
|
||||||
|
|
||||||
if not verify_block_signature(
|
if not verify_block_signature(
|
||||||
dag.forkAtEpoch(blck.slot.epoch),
|
dag.forkAtEpoch(blck.slot.epoch),
|
||||||
getStateField(dag.headState, genesis_validators_root),
|
getStateField(dag.headState, genesis_validators_root),
|
||||||
blck.slot,
|
blck.slot,
|
||||||
signedBlock.root,
|
signedBlock.root,
|
||||||
proposerKey.get(),
|
proposerKey.get(),
|
||||||
signedBlock.signature):
|
signedBlock.signature):
|
||||||
info "Block signature verification failed"
|
info "Block signature verification failed"
|
||||||
return err(BlockError.Invalid)
|
return err(BlockError.Invalid)
|
||||||
|
|
||||||
let startTick = Moment.now()
|
let startTick = Moment.now()
|
||||||
|
|
||||||
|
|
|
@ -279,7 +279,7 @@ proc getForkedBlock*(
|
||||||
result.err()
|
result.err()
|
||||||
return
|
return
|
||||||
|
|
||||||
proc getBlockId(db: BeaconChainDB, root: Eth2Digest): Opt[BlockId] =
|
proc getBlockId*(db: BeaconChainDB, root: Eth2Digest): Opt[BlockId] =
|
||||||
block: # We might have a summary in the database
|
block: # We might have a summary in the database
|
||||||
let summary = db.getBeaconBlockSummary(root)
|
let summary = db.getBeaconBlockSummary(root)
|
||||||
if summary.isOk():
|
if summary.isOk():
|
||||||
|
@ -623,6 +623,23 @@ proc getState(
|
||||||
|
|
||||||
db.getState(cfg.stateForkAtEpoch(slot.epoch), state_root, state, rollback)
|
db.getState(cfg.stateForkAtEpoch(slot.epoch), state_root, state, rollback)
|
||||||
|
|
||||||
|
proc getState*(
|
||||||
|
db: BeaconChainDB, cfg: RuntimeConfig, block_root: Eth2Digest,
|
||||||
|
slots: Slice[Slot], state: var ForkedHashedBeaconState,
|
||||||
|
rollback: RollbackProc): bool =
|
||||||
|
var slot = slots.b
|
||||||
|
while slot >= slots.a:
|
||||||
|
let state_root = db.getStateRoot(block_root, slot)
|
||||||
|
if state_root.isSome() and
|
||||||
|
db.getState(
|
||||||
|
cfg.stateForkAtEpoch(slot.epoch), state_root.get(), state, rollback):
|
||||||
|
return true
|
||||||
|
|
||||||
|
if slot == slots.a: # avoid underflow at genesis
|
||||||
|
break
|
||||||
|
slot -= 1
|
||||||
|
false
|
||||||
|
|
||||||
proc getState(
|
proc getState(
|
||||||
dag: ChainDAGRef, bsi: BlockSlotId, state: var ForkedHashedBeaconState): bool =
|
dag: ChainDAGRef, bsi: BlockSlotId, state: var ForkedHashedBeaconState): bool =
|
||||||
## Load a state from the database given a block and a slot - this will first
|
## Load a state from the database given a block and a slot - this will first
|
||||||
|
@ -676,16 +693,8 @@ proc getStateByParent(
|
||||||
func rollback() =
|
func rollback() =
|
||||||
assign(v[], rollbackAddr[])
|
assign(v[], rollbackAddr[])
|
||||||
|
|
||||||
while true:
|
dag.db.getState(
|
||||||
if dag.db.getState(dag.cfg, summary.parent_root, slot, state, rollback):
|
dag.cfg, summary.parent_root, parentMinSlot..slot, state, rollback)
|
||||||
return true
|
|
||||||
|
|
||||||
if slot == parentMinSlot:
|
|
||||||
return false
|
|
||||||
|
|
||||||
slot -= 1
|
|
||||||
|
|
||||||
return false
|
|
||||||
|
|
||||||
proc currentSyncCommitteeForPeriod*(
|
proc currentSyncCommitteeForPeriod*(
|
||||||
dag: ChainDAGRef,
|
dag: ChainDAGRef,
|
||||||
|
@ -938,15 +947,9 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
|
||||||
dag.forkBlocks.incl(KeyedBlockRef.init(curRef))
|
dag.forkBlocks.incl(KeyedBlockRef.init(curRef))
|
||||||
|
|
||||||
if not foundHeadState:
|
if not foundHeadState:
|
||||||
while slot >= blck.summary.slot:
|
foundHeadState = db.getState(
|
||||||
# Try loading state from database - we need the head state early on to
|
cfg, blck.root, blck.summary.slot..slot, dag.headState, noRollback)
|
||||||
# establish the (real) finalized checkpoint
|
slot = blck.summary.slot
|
||||||
if db.getState(cfg, blck.root, slot, dag.headState, noRollback):
|
|
||||||
foundHeadState = true
|
|
||||||
break
|
|
||||||
slot -= 1
|
|
||||||
|
|
||||||
slot += 1
|
|
||||||
|
|
||||||
if not foundHeadState:
|
if not foundHeadState:
|
||||||
# When the database has been written with a pre-fork version of the
|
# When the database has been written with a pre-fork version of the
|
||||||
|
@ -1090,37 +1093,33 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
|
||||||
|
|
||||||
var
|
var
|
||||||
blocks = 0
|
blocks = 0
|
||||||
parent: Eth2Digest
|
|
||||||
|
|
||||||
# Here, we'll build up the slot->root mapping in memory for the range of
|
# Here, we'll build up the slot->root mapping in memory for the range of
|
||||||
# blocks from genesis to backfill, if possible.
|
# blocks from genesis to backfill, if possible.
|
||||||
for summary in dag.era.getBlockIds(historical_roots, Slot(0)):
|
for bid in dag.era.getBlockIds(historical_roots, Slot(0), Eth2Digest()):
|
||||||
if summary.slot >= dag.backfill.slot:
|
if bid.slot >= dag.backfill.slot:
|
||||||
# If we end up in here, we failed the root comparison just below in
|
# If we end up in here, we failed the root comparison just below in
|
||||||
# an earlier iteration
|
# an earlier iteration
|
||||||
fatal "Era summaries don't lead up to backfill, database or era files corrupt?",
|
fatal "Era summaries don't lead up to backfill, database or era files corrupt?",
|
||||||
slot = summary.slot
|
bid
|
||||||
quit 1
|
quit 1
|
||||||
|
|
||||||
# In BeaconState.block_roots, empty slots are filled with the root of
|
# In BeaconState.block_roots, empty slots are filled with the root of
|
||||||
# the previous block - in our data structure, we use a zero hash instead
|
# the previous block - in our data structure, we use a zero hash instead
|
||||||
if summary.root != parent:
|
dag.frontfillBlocks.setLen(bid.slot.int + 1)
|
||||||
dag.frontfillBlocks.setLen(summary.slot.int + 1)
|
dag.frontfillBlocks[bid.slot.int] = bid.root
|
||||||
dag.frontfillBlocks[summary.slot.int] = summary.root
|
|
||||||
|
|
||||||
if summary.root == dag.backfill.parent_root:
|
if bid.root == dag.backfill.parent_root:
|
||||||
# We've reached the backfill point, meaning blocks are available
|
# We've reached the backfill point, meaning blocks are available
|
||||||
# in the sqlite database from here onwards - remember this point in
|
# in the sqlite database from here onwards - remember this point in
|
||||||
# time so that we can write summaries to the database - it's a lot
|
# time so that we can write summaries to the database - it's a lot
|
||||||
# faster to load from database than to iterate over era files with
|
# faster to load from database than to iterate over era files with
|
||||||
# the current naive era file reader.
|
# the current naive era file reader.
|
||||||
reset(dag.backfill)
|
reset(dag.backfill)
|
||||||
|
|
||||||
dag.updateFrontfillBlocks()
|
dag.updateFrontfillBlocks()
|
||||||
|
|
||||||
break
|
break
|
||||||
|
|
||||||
parent = summary.root
|
|
||||||
|
|
||||||
blocks += 1
|
blocks += 1
|
||||||
|
|
||||||
|
|
|
@ -352,10 +352,15 @@ proc getPartialState(
|
||||||
false
|
false
|
||||||
|
|
||||||
iterator getBlockIds*(
|
iterator getBlockIds*(
|
||||||
db: EraDB, historical_roots: openArray[Eth2Digest], start_slot: Slot): BlockId =
|
db: EraDB, historical_roots: openArray[Eth2Digest],
|
||||||
|
start_slot: Slot, prev_root: Eth2Digest): BlockId =
|
||||||
|
## Iterate over block roots starting from the given slot - `prev_root` must
|
||||||
|
## point out the last block added to the chain before `start_slot` such that
|
||||||
|
## empty slots can be filtered out correctly
|
||||||
var
|
var
|
||||||
state = (ref PartialBeaconState)() # avoid stack overflow
|
state = (ref PartialBeaconState)() # avoid stack overflow
|
||||||
slot = start_slot
|
slot = start_slot
|
||||||
|
prev_root = prev_root
|
||||||
|
|
||||||
while true:
|
while true:
|
||||||
# `case` ensures we're on a fork for which the `PartialBeaconState`
|
# `case` ensures we're on a fork for which the `PartialBeaconState`
|
||||||
|
@ -372,11 +377,14 @@ iterator getBlockIds*(
|
||||||
break
|
break
|
||||||
|
|
||||||
let
|
let
|
||||||
x = slot.int mod state[].block_roots.len
|
x = slot.uint64 mod state[].block_roots.lenu64
|
||||||
for i in x..<state[].block_roots.len():
|
|
||||||
# TODO these are not actually valid BlockId instances in the case where
|
for i in x..<state[].block_roots.lenu64():
|
||||||
# the slot is missing a block - use index to filter..
|
# When no block is included for a particular slot, the block root is
|
||||||
yield BlockId(root: state[].block_roots.data[i], slot: slot)
|
# repeated
|
||||||
|
if slot == 0 or prev_root != state[].block_roots.data[i]:
|
||||||
|
yield BlockId(root: state[].block_roots.data[i], slot: slot)
|
||||||
|
prev_root = state[].block_roots.data[i]
|
||||||
slot += 1
|
slot += 1
|
||||||
|
|
||||||
proc new*(
|
proc new*(
|
||||||
|
@ -399,31 +407,50 @@ when isMainModule:
|
||||||
|
|
||||||
db = EraDB.new(
|
db = EraDB.new(
|
||||||
defaultRuntimeConfig, dbPath,
|
defaultRuntimeConfig, dbPath,
|
||||||
Eth2Digest(
|
Eth2Digest.fromHex(
|
||||||
data: array[32, byte].initCopyFrom([byte 0x4b, 0x36, 0x3d, 0xb9])))
|
"0x4b363db94e286120d76eb905340fdd4e54bfe9f06bf33ff6cf5ad27f511bfe95"))
|
||||||
historical_roots = [
|
historical_roots = [
|
||||||
Eth2Digest(
|
Eth2Digest.fromHex(
|
||||||
data: array[32, byte].initCopyFrom([byte 0x40, 0xcf, 0x2f, 0x3c]))]
|
"0x40cf2f3cffd63d9ffeb89999ee359926abfa07ca5eb3fe2a70bc9d6b15720b8c"),
|
||||||
|
Eth2Digest.fromHex(
|
||||||
|
"0x74a3850f3cbccce2271f7c99e53ab07dae55cd8022c937c2dde7a20c5a2b83f9")]
|
||||||
|
|
||||||
var got8191 = false
|
var
|
||||||
var slot4: Eth2Digest
|
got0 = false
|
||||||
for bid in db.getBlockIds(historical_roots, Era(0)):
|
got8191 = false
|
||||||
if bid.slot == Slot(1):
|
got8192 = false
|
||||||
|
got8193 = false
|
||||||
|
for bid in db.getBlockIds(historical_roots, Slot(0), Eth2Digest()):
|
||||||
|
if bid.slot == Slot(0):
|
||||||
|
doAssert bid.root == Eth2Digest.fromHex(
|
||||||
|
"0x4d611d5b93fdab69013a7f0a2f961caca0c853f87cfe9595fe50038163079360")
|
||||||
|
got0 = true
|
||||||
|
elif bid.slot == Slot(1):
|
||||||
doAssert bid.root == Eth2Digest.fromHex(
|
doAssert bid.root == Eth2Digest.fromHex(
|
||||||
"0xbacd20f09da907734434f052bd4c9503aa16bab1960e89ea20610d08d064481c")
|
"0xbacd20f09da907734434f052bd4c9503aa16bab1960e89ea20610d08d064481c")
|
||||||
elif bid.slot == Slot(4):
|
elif bid.slot == Slot(5):
|
||||||
slot4 = bid.root
|
raiseAssert "this slot was skipped, should not be iterated over"
|
||||||
elif bid.slot == Slot(5) and bid.root != slot4:
|
|
||||||
raiseAssert "this slot was skipped, should have same root"
|
|
||||||
elif bid.slot == Slot(8191):
|
elif bid.slot == Slot(8191):
|
||||||
doAssert bid.root == Eth2Digest.fromHex(
|
doAssert bid.root == Eth2Digest.fromHex(
|
||||||
"0x48ea23af46320b0290eae668b0c3e6ae3e0534270f897db0e83a57f51a22baca")
|
"0x48ea23af46320b0290eae668b0c3e6ae3e0534270f897db0e83a57f51a22baca")
|
||||||
got8191 = true
|
got8191 = true
|
||||||
|
elif bid.slot == Slot(8192):
|
||||||
|
doAssert bid.root == Eth2Digest.fromHex(
|
||||||
|
"0xa7d379a9cbf87ae62127ddee8660ddc08a83a788087d23eaddd852fd8c408ef1")
|
||||||
|
got8192 = true
|
||||||
|
elif bid.slot == Slot(8193):
|
||||||
|
doAssert bid.root == Eth2Digest.fromHex(
|
||||||
|
"0x0934b14ec4ec9d45f4a2a7c3e4f6bb12d35444c74de8e30c13138c4d41b393aa")
|
||||||
|
got8193 = true
|
||||||
|
break
|
||||||
|
|
||||||
|
doAssert got0
|
||||||
|
doAssert got8191
|
||||||
|
doAssert got8192
|
||||||
|
doAssert got8193
|
||||||
|
|
||||||
doAssert db.getBlock(
|
doAssert db.getBlock(
|
||||||
historical_roots, Slot(1), Opt[Eth2Digest].err(),
|
historical_roots, Slot(1), Opt[Eth2Digest].err(),
|
||||||
phase0.TrustedSignedBeaconBlock).get().root ==
|
phase0.TrustedSignedBeaconBlock).get().root ==
|
||||||
Eth2Digest.fromHex(
|
Eth2Digest.fromHex(
|
||||||
"0xbacd20f09da907734434f052bd4c9503aa16bab1960e89ea20610d08d064481c")
|
"0xbacd20f09da907734434f052bd4c9503aa16bab1960e89ea20610d08d064481c")
|
||||||
|
|
||||||
doAssert got8191
|
|
||||||
|
|
|
@ -1999,6 +1999,7 @@ proc handleStartUpCmd(config: var BeaconNodeConf) {.raises: [Defect, CatchableEr
|
||||||
waitFor doTrustedNodeSync(
|
waitFor doTrustedNodeSync(
|
||||||
cfg,
|
cfg,
|
||||||
config.databaseDir,
|
config.databaseDir,
|
||||||
|
config.eraDir,
|
||||||
config.trustedNodeUrl,
|
config.trustedNodeUrl,
|
||||||
config.stateId,
|
config.stateId,
|
||||||
config.backfillBlocks,
|
config.backfillBlocks,
|
||||||
|
|
|
@ -1158,7 +1158,7 @@ func matches_block_slot*(
|
||||||
func can_advance_slots*(
|
func can_advance_slots*(
|
||||||
state: ForkyHashedBeaconState, block_root: Eth2Digest, target_slot: Slot): bool =
|
state: ForkyHashedBeaconState, block_root: Eth2Digest, target_slot: Slot): bool =
|
||||||
## Return true iff we can reach the given block/slot combination simply by
|
## Return true iff we can reach the given block/slot combination simply by
|
||||||
## advancing slots
|
## advancing 0 or more slots
|
||||||
target_slot >= state.data.slot and block_root == state.latest_block_root
|
target_slot >= state.data.slot and block_root == state.latest_block_root
|
||||||
func can_advance_slots*(
|
func can_advance_slots*(
|
||||||
state: ForkedHashedBeaconState, block_root: Eth2Digest, target_slot: Slot): bool =
|
state: ForkedHashedBeaconState, block_root: Eth2Digest, target_slot: Slot): bool =
|
||||||
|
|
|
@ -13,60 +13,14 @@ import
|
||||||
stew/base10,
|
stew/base10,
|
||||||
chronicles, chronos,
|
chronicles, chronos,
|
||||||
./sync/sync_manager,
|
./sync/sync_manager,
|
||||||
./consensus_object_pools/blockchain_dag,
|
./consensus_object_pools/[block_clearance, blockchain_dag],
|
||||||
./spec/eth2_apis/rest_beacon_client,
|
./spec/eth2_apis/rest_beacon_client,
|
||||||
./spec/[beaconstate, eth2_merkleization, forks, presets, state_transition],
|
./spec/[beaconstate, eth2_merkleization, forks, presets, state_transition],
|
||||||
"."/[beacon_clock, beacon_chain_db]
|
"."/[beacon_clock, beacon_chain_db, era_db]
|
||||||
|
|
||||||
type
|
|
||||||
DbCache = object
|
|
||||||
summaries: Table[Eth2Digest, BeaconBlockSummary]
|
|
||||||
slots: seq[Option[Eth2Digest]]
|
|
||||||
|
|
||||||
proc updateSlots(cache: var DbCache, slot: Slot) =
|
|
||||||
if cache.slots.lenu64() < slot:
|
|
||||||
cache.slots.setLen(slot.int + 1)
|
|
||||||
|
|
||||||
proc updateSlots(cache: var DbCache, root: Eth2Digest, slot: Slot) =
|
|
||||||
# The slots mapping stores one linear block history - we construct it by
|
|
||||||
# starting from a given root/slot and walking the known parents as far back
|
|
||||||
# as possible which ensures that all blocks belong to the same history
|
|
||||||
|
|
||||||
cache.updateSlots(slot)
|
|
||||||
|
|
||||||
var
|
|
||||||
root = root
|
|
||||||
lastSlot = slot
|
|
||||||
|
|
||||||
while true:
|
|
||||||
cache.summaries.withValue(root, v) do:
|
|
||||||
let slot = v[].slot
|
|
||||||
|
|
||||||
for i in slot.int + 1..<lastSlot.int: # Avoid re-querying known gaps
|
|
||||||
cache.slots[i] = some(ZERO_HASH)
|
|
||||||
|
|
||||||
cache.slots[slot.int] = some(root)
|
|
||||||
|
|
||||||
if slot == 0:
|
|
||||||
return
|
|
||||||
|
|
||||||
root = v[].parent_root
|
|
||||||
lastSlot = slot
|
|
||||||
do:
|
|
||||||
return
|
|
||||||
|
|
||||||
proc update(cache: var DbCache, blck: ForkySignedBeaconBlock) =
|
|
||||||
if blck.root notin cache.summaries:
|
|
||||||
cache.summaries[blck.root] = blck.message.toBeaconBlockSummary()
|
|
||||||
|
|
||||||
cache.updateSlots(blck.root, blck.message.slot)
|
|
||||||
|
|
||||||
proc isKnown(cache: DbCache, slot: Slot): bool =
|
|
||||||
slot < cache.slots.lenu64 and cache.slots[slot.int].isSome()
|
|
||||||
|
|
||||||
proc doTrustedNodeSync*(
|
proc doTrustedNodeSync*(
|
||||||
cfg: RuntimeConfig, databaseDir: string, restUrl: string,
|
cfg: RuntimeConfig, databaseDir, eraDir, restUrl, stateId: string,
|
||||||
stateId: string, backfill: bool, reindex: bool,
|
backfill: bool, reindex: bool,
|
||||||
genesisState: ref ForkedHashedBeaconState = nil) {.async.} =
|
genesisState: ref ForkedHashedBeaconState = nil) {.async.} =
|
||||||
logScope:
|
logScope:
|
||||||
restUrl
|
restUrl
|
||||||
|
@ -75,57 +29,16 @@ proc doTrustedNodeSync*(
|
||||||
notice "Starting trusted node sync",
|
notice "Starting trusted node sync",
|
||||||
databaseDir, backfill, reindex
|
databaseDir, backfill, reindex
|
||||||
|
|
||||||
|
var
|
||||||
|
client = RestClientRef.new(restUrl).valueOr:
|
||||||
|
error "Cannot connect to server", error = error
|
||||||
|
quit 1
|
||||||
|
|
||||||
let
|
let
|
||||||
db = BeaconChainDB.new(databaseDir, inMemory = false)
|
db = BeaconChainDB.new(databaseDir, inMemory = false)
|
||||||
defer:
|
defer:
|
||||||
db.close()
|
db.close()
|
||||||
|
|
||||||
var
|
|
||||||
dbCache = DbCache(summaries: db.loadSummaries())
|
|
||||||
|
|
||||||
let
|
|
||||||
dbHead = db.getHeadBlock()
|
|
||||||
headSlot = if dbHead.isSome():
|
|
||||||
if dbHead.get() notin dbCache.summaries:
|
|
||||||
# This can happen with pre-blocksummary database - it's better to start
|
|
||||||
# over in this case
|
|
||||||
error "Database missing head block summary - database too old or corrupt"
|
|
||||||
quit 1
|
|
||||||
|
|
||||||
let slot = dbCache.summaries[dbHead.get()].slot
|
|
||||||
dbCache.updateSlots(dbHead.get(), slot)
|
|
||||||
slot
|
|
||||||
else:
|
|
||||||
# When we don't have a head, we'll use the given checkpoint as head
|
|
||||||
FAR_FUTURE_SLOT
|
|
||||||
|
|
||||||
var client = RestClientRef.new(restUrl).valueOr:
|
|
||||||
error "Cannot connect to server", error = error
|
|
||||||
quit 1
|
|
||||||
|
|
||||||
proc downloadBlock(slot: Slot):
|
|
||||||
Future[Option[ref ForkedSignedBeaconBlock]] {.async.} =
|
|
||||||
# Download block at given slot, retrying a few times,
|
|
||||||
var lastError: ref CatchableError
|
|
||||||
for i in 0..<3:
|
|
||||||
try:
|
|
||||||
return await client.getBlockV2(BlockIdent.init(slot), cfg)
|
|
||||||
except RestResponseError as exc:
|
|
||||||
lastError = exc
|
|
||||||
notice "Server does not support block downloads / backfilling",
|
|
||||||
msg = exc.msg
|
|
||||||
break
|
|
||||||
except CatchableError as exc:
|
|
||||||
# We'll assume this may be a connectivity error or something similar
|
|
||||||
lastError = exc
|
|
||||||
|
|
||||||
warn "Retrying download of block", slot, err = exc.msg
|
|
||||||
client = RestClientRef.new(restUrl).valueOr:
|
|
||||||
error "Cannot connect to server", url = restUrl, error = error
|
|
||||||
quit 1
|
|
||||||
|
|
||||||
raise lastError
|
|
||||||
|
|
||||||
# If possible, we'll store the genesis state in the database - this is not
|
# If possible, we'll store the genesis state in the database - this is not
|
||||||
# strictly necessary but renders the resulting database compatible with
|
# strictly necessary but renders the resulting database compatible with
|
||||||
# versions prior to 22.11 and makes reindexing possible
|
# versions prior to 22.11 and makes reindexing possible
|
||||||
|
@ -170,7 +83,21 @@ proc doTrustedNodeSync*(
|
||||||
restUrl
|
restUrl
|
||||||
tmp
|
tmp
|
||||||
|
|
||||||
let (checkpointSlot, checkpointRoot) = if dbHead.isNone:
|
let
|
||||||
|
dbHead = db.getHeadBlock()
|
||||||
|
head = if dbHead.isSome():
|
||||||
|
let
|
||||||
|
bid = db.getBlockId(dbHead.get()).valueOr:
|
||||||
|
error "Database missing head block summary - database too old or corrupt",
|
||||||
|
headRoot = dbHead.get()
|
||||||
|
quit 1
|
||||||
|
|
||||||
|
Opt.some bid
|
||||||
|
else:
|
||||||
|
# When we don't have a head, we'll use the given checkpoint as head
|
||||||
|
Opt.none(BlockId)
|
||||||
|
|
||||||
|
if head.isNone:
|
||||||
notice "Downloading checkpoint state"
|
notice "Downloading checkpoint state"
|
||||||
|
|
||||||
let
|
let
|
||||||
|
@ -210,48 +137,34 @@ proc doTrustedNodeSync*(
|
||||||
rootInGenesis = getStateField(genesisState[], genesis_validators_root)
|
rootInGenesis = getStateField(genesisState[], genesis_validators_root)
|
||||||
quit 1
|
quit 1
|
||||||
|
|
||||||
withState(genesisState[]):
|
|
||||||
let blck = get_initial_beacon_block(forkyState)
|
|
||||||
dbCache.update(blck.asSigned())
|
|
||||||
|
|
||||||
ChainDAGRef.preInit(db, genesisState[])
|
ChainDAGRef.preInit(db, genesisState[])
|
||||||
|
|
||||||
if getStateField(genesisState[], slot) != getStateField(state[], slot):
|
if getStateField(genesisState[], slot) != getStateField(state[], slot):
|
||||||
ChainDAGRef.preInit(db, state[])
|
ChainDAGRef.preInit(db, state[])
|
||||||
else:
|
else:
|
||||||
ChainDAGRef.preInit(db, state[])
|
ChainDAGRef.preInit(db, state[])
|
||||||
|
|
||||||
let latest_bid = state[].latest_block_id()
|
|
||||||
|
|
||||||
(latest_bid.slot, latest_bid.root)
|
|
||||||
else:
|
else:
|
||||||
notice "Skipping checkpoint download, database already exists (remove db directory to get a fresh snapshot)",
|
notice "Skipping checkpoint download, database already exists (remove db directory to get a fresh snapshot)",
|
||||||
databaseDir, head = shortLog(dbHead.get())
|
databaseDir, head = shortLog(head.get())
|
||||||
(headSlot, dbHead.get())
|
|
||||||
|
|
||||||
# Coming this far, we've done what ChainDAGRef.preInit would normally do -
|
# Coming this far, we've done what ChainDAGRef.preInit would normally do -
|
||||||
# Let's do a sanity check and start backfilling blocks from the trusted node
|
# we can now load a ChainDAG to start backfilling it
|
||||||
if (let v = ChainDAGRef.isInitialized(db); v.isErr()):
|
|
||||||
error "Database not initialized after checkpoint sync, report bug",
|
|
||||||
err = v.error()
|
|
||||||
quit 1
|
|
||||||
|
|
||||||
dbCache.updateSlots(checkpointSlot)
|
|
||||||
|
|
||||||
let
|
let
|
||||||
missingSlots = block:
|
validatorMonitor = newClone(ValidatorMonitor.init(false, false))
|
||||||
var total = 0
|
dag = ChainDAGRef.init(cfg, db, validatorMonitor, {}, eraPath = eraDir)
|
||||||
for slot in Slot(0)..<checkpointSlot:
|
backfillSlot = dag.backfill.slot
|
||||||
if not dbCache.isKnown(slot):
|
frontfill = dag.frontfill.valueOr(BlockId())
|
||||||
total += 1
|
|
||||||
total
|
|
||||||
|
|
||||||
let canReindex = if missingSlots == 0:
|
let canReindex = if backfillSlot <= frontfill.slot:
|
||||||
info "Database backfilled"
|
info "Database backfilled"
|
||||||
true
|
true
|
||||||
elif backfill:
|
elif backfill:
|
||||||
|
# +1 because we need to download the frontfill slot for the frontfill match
|
||||||
|
# detection to kick in, in addBackfillBlock
|
||||||
|
let missingSlots = dag.backfill.slot - frontfill.slot + 1
|
||||||
|
|
||||||
notice "Downloading historical blocks - you can interrupt this process at any time and it automatically be completed when you start the beacon node",
|
notice "Downloading historical blocks - you can interrupt this process at any time and it automatically be completed when you start the beacon node",
|
||||||
checkpointSlot, missingSlots
|
backfillSlot, frontfill, missingSlots
|
||||||
|
|
||||||
var # Same averaging as SyncManager
|
var # Same averaging as SyncManager
|
||||||
syncCount = 0
|
syncCount = 0
|
||||||
|
@ -259,99 +172,79 @@ proc doTrustedNodeSync*(
|
||||||
avgSyncSpeed = 0.0
|
avgSyncSpeed = 0.0
|
||||||
stamp = SyncMoment.now(0)
|
stamp = SyncMoment.now(0)
|
||||||
|
|
||||||
|
proc downloadBlock(slot: Slot):
|
||||||
|
Future[Option[ref ForkedSignedBeaconBlock]] {.async.} =
|
||||||
|
# Download block at given slot, retrying a few times,
|
||||||
|
var lastError: ref CatchableError
|
||||||
|
for i in 0..<3:
|
||||||
|
try:
|
||||||
|
return await client.getBlockV2(BlockIdent.init(slot), cfg)
|
||||||
|
except RestResponseError as exc:
|
||||||
|
lastError = exc
|
||||||
|
notice "Server does not support block downloads / backfilling - blocks will be downloaded later",
|
||||||
|
msg = exc.msg
|
||||||
|
break
|
||||||
|
except CatchableError as exc:
|
||||||
|
# We'll assume this may be a connectivity error or something similar
|
||||||
|
lastError = exc
|
||||||
|
|
||||||
|
warn "Retrying download of block", slot, err = exc.msg
|
||||||
|
client = RestClientRef.new(restUrl).valueOr:
|
||||||
|
error "Cannot connect to server", url = restUrl, error = error
|
||||||
|
quit 1
|
||||||
|
|
||||||
|
raise lastError
|
||||||
|
|
||||||
# Download several blocks in parallel but process them serially
|
# Download several blocks in parallel but process them serially
|
||||||
var gets: array[16, Future[Option[ref ForkedSignedBeaconBlock]]]
|
proc processBlock(blck: Option[ref ForkedSignedBeaconBlock]) =
|
||||||
proc processBlock(
|
let newStamp = SyncMoment.now(processed)
|
||||||
fut: Future[Option[ref ForkedSignedBeaconBlock]], slot: Slot) {.async.} =
|
if newStamp.stamp - stamp.stamp > 12.seconds:
|
||||||
|
syncCount += 1
|
||||||
|
|
||||||
|
let
|
||||||
|
remaining = dag.backfill.slot - frontfill.slot
|
||||||
|
slotsPerSec = speed(stamp, newStamp)
|
||||||
|
avgSyncSpeed = avgSyncSpeed + (slotsPerSec - avgSyncSpeed) / float(syncCount)
|
||||||
|
|
||||||
|
info "Backfilling",
|
||||||
|
timeleft = toTimeLeftString(
|
||||||
|
if avgSyncSpeed >= 0.001:
|
||||||
|
Duration.fromFloatSeconds(remaining.float / avgSyncSpeed)
|
||||||
|
else: InfiniteDuration),
|
||||||
|
slotsPerSecond = avgSyncSpeed,
|
||||||
|
remainingSlots = remaining
|
||||||
|
stamp = newStamp
|
||||||
|
|
||||||
processed += 1
|
processed += 1
|
||||||
var blck = await fut
|
if blck.isSome():
|
||||||
if blck.isNone():
|
let
|
||||||
dbCache.slots[slot.int] = some ZERO_HASH
|
data = blck.get()
|
||||||
return
|
|
||||||
|
|
||||||
let data = blck.get()
|
withBlck(data[]):
|
||||||
withBlck(data[]):
|
if (let res = dag.addBackfillBlock(blck.asSigVerified()); res.isErr()):
|
||||||
debug "Processing",
|
case res.error()
|
||||||
blck = shortLog(blck.message),
|
of BlockError.Invalid, BlockError.MissingParent, BlockError.UnviableFork:
|
||||||
blockRoot = shortLog(blck.root)
|
error "Got invalid block from trusted node - is it on the right network?",
|
||||||
|
blck = shortLog(blck), err = res.error()
|
||||||
if blck.message.slot == checkpointSlot:
|
|
||||||
if blck.root != checkpointRoot:
|
|
||||||
error "Downloaded block does not match checkpoint history",
|
|
||||||
blck = shortLog(blck),
|
|
||||||
expectedRoot = shortLog(checkpointRoot)
|
|
||||||
|
|
||||||
quit 1
|
|
||||||
else:
|
|
||||||
var childSlot = blck.message.slot + 1
|
|
||||||
while true:
|
|
||||||
if childSlot >= dbCache.slots.lenu64():
|
|
||||||
error "Downloaded block does not match checkpoint history"
|
|
||||||
quit 1
|
quit 1
|
||||||
|
of BlockError.Duplicate:
|
||||||
|
discard
|
||||||
|
|
||||||
if not dbCache.slots[childSlot.int].isSome():
|
# Download blocks backwards from the backfill slot, ie the first slot for
|
||||||
# Should never happen - we download slots backwards
|
# which we don't have a block, when walking backwards from the head
|
||||||
error "Downloaded block does not match checkpoint history"
|
|
||||||
quit 1
|
|
||||||
|
|
||||||
let knownRoot = dbCache.slots[childSlot.int].get()
|
|
||||||
if knownRoot == ZERO_HASH:
|
|
||||||
childSlot += 1
|
|
||||||
continue
|
|
||||||
|
|
||||||
dbCache.summaries.withValue(knownRoot, summary):
|
|
||||||
if summary[].parent_root != blck.root:
|
|
||||||
error "Downloaded block does not match checkpoint history",
|
|
||||||
blockRoot = shortLog(blck.root),
|
|
||||||
expectedRoot = shortLog(summary[].parent_root)
|
|
||||||
quit 1
|
|
||||||
|
|
||||||
break
|
|
||||||
|
|
||||||
# This shouldn't happen - we should have downloaded the child and
|
|
||||||
# updated knownBlocks before here
|
|
||||||
error "Expected child block not found in checkpoint history"
|
|
||||||
quit 1
|
|
||||||
|
|
||||||
if blck.root notin dbCache.summaries:
|
|
||||||
db.putBlock(blck.asTrusted())
|
|
||||||
|
|
||||||
dbCache.update(blck)
|
|
||||||
|
|
||||||
let newStamp = SyncMoment.now(processed)
|
|
||||||
if newStamp.stamp - stamp.stamp > 12.seconds:
|
|
||||||
syncCount += 1
|
|
||||||
|
|
||||||
let
|
|
||||||
remaining = blck.message.slot.int
|
|
||||||
slotsPerSec = speed(stamp, newStamp)
|
|
||||||
avgSyncSpeed = avgSyncSpeed + (slotsPerSec - avgSyncSpeed) / float(syncCount)
|
|
||||||
|
|
||||||
info "Backfilling",
|
|
||||||
timeleft = toTimeLeftString(
|
|
||||||
if avgSyncSpeed >= 0.001:
|
|
||||||
Duration.fromFloatSeconds(remaining.float / avgSyncSpeed)
|
|
||||||
else: InfiniteDuration),
|
|
||||||
slotsPerSecond = avgSyncSpeed,
|
|
||||||
remainingSlots = remaining
|
|
||||||
stamp = newStamp
|
|
||||||
|
|
||||||
# Download blocks backwards from the checkpoint slot, skipping the ones we
|
|
||||||
# already have in the database. We'll do a few downloads in parallel which
|
|
||||||
# risks having some redundant downloads going on, but speeds things up
|
|
||||||
try:
|
try:
|
||||||
for i in 0'u64..<(checkpointSlot.uint64 + gets.lenu64()):
|
var
|
||||||
if not isNil(gets[int(i mod gets.lenu64)]):
|
gets: array[16, Future[Option[ref ForkedSignedBeaconBlock]]]
|
||||||
await processBlock(
|
|
||||||
gets[int(i mod gets.lenu64)],
|
|
||||||
checkpointSlot + gets.lenu64() - uint64(i))
|
|
||||||
gets[int(i mod gets.lenu64)] = nil
|
|
||||||
|
|
||||||
if i < checkpointSlot:
|
for i in 0.uint64..missingSlots + gets.lenu64:
|
||||||
let slot = checkpointSlot - i
|
if i >= gets.lenu64():
|
||||||
if dbCache.isKnown(slot):
|
let
|
||||||
continue
|
fut = gets[int(i mod gets.lenu64)]
|
||||||
|
|
||||||
|
processBlock(await fut)
|
||||||
|
|
||||||
|
if i <= backfillSlot:
|
||||||
|
let slot = backfillSlot - i
|
||||||
gets[int(i mod gets.lenu64)] = downloadBlock(slot)
|
gets[int(i mod gets.lenu64)] = downloadBlock(slot)
|
||||||
|
|
||||||
if i mod 1024 == 0:
|
if i mod 1024 == 0:
|
||||||
|
@ -361,6 +254,7 @@ proc doTrustedNodeSync*(
|
||||||
notice "Backfilling incomplete - blocks will be downloaded when starting the node", msg = exc.msg
|
notice "Backfilling incomplete - blocks will be downloaded when starting the node", msg = exc.msg
|
||||||
false
|
false
|
||||||
else:
|
else:
|
||||||
|
let missingSlots = dag.backfill.slot - frontfill.slot
|
||||||
notice "Database initialized, historical blocks will be backfilled when starting the node",
|
notice "Database initialized, historical blocks will be backfilled when starting the node",
|
||||||
missingSlots
|
missingSlots
|
||||||
|
|
||||||
|
@ -370,22 +264,18 @@ proc doTrustedNodeSync*(
|
||||||
notice "Reindexing historical state lookup tables (you can interrupt this process at any time)"
|
notice "Reindexing historical state lookup tables (you can interrupt this process at any time)"
|
||||||
|
|
||||||
# Build a DAG
|
# Build a DAG
|
||||||
let
|
|
||||||
validatorMonitor = newClone(ValidatorMonitor.init(false, false))
|
|
||||||
dag = ChainDAGRef.init(cfg, db, validatorMonitor, {})
|
|
||||||
|
|
||||||
dag.rebuildIndex()
|
dag.rebuildIndex()
|
||||||
|
|
||||||
notice "Done, your beacon node is ready to serve you! Don't forget to check that you're on the canonical chain by comparing the checkpoint root with other online sources. See https://nimbus.guide/trusted-node-sync.html for more information.",
|
notice "Done, your beacon node is ready to serve you! Don't forget to check that you're on the canonical chain by comparing the checkpoint root with other online sources. See https://nimbus.guide/trusted-node-sync.html for more information.",
|
||||||
checkpointRoot
|
checkpoint = dag.head
|
||||||
|
|
||||||
when isMainModule:
|
when isMainModule:
|
||||||
import
|
import
|
||||||
std/[os],
|
std/[os],
|
||||||
networking/network_metadata
|
networking/network_metadata
|
||||||
|
|
||||||
let backfill = os.paramCount() > 4 and os.paramStr(5) == "true"
|
let backfill = os.paramCount() > 5 and os.paramStr(6) == "true"
|
||||||
|
|
||||||
waitFor doTrustedNodeSync(
|
waitFor doTrustedNodeSync(
|
||||||
getRuntimeConfig(some os.paramStr(1)), os.paramStr(2), os.paramStr(3),
|
getRuntimeConfig(some os.paramStr(1)), os.paramStr(2), os.paramStr(3),
|
||||||
os.paramStr(4), backfill, false)
|
os.paramStr(4), os.paramStr(5), backfill, false)
|
||||||
|
|
|
@ -130,3 +130,30 @@ suite "Beacon state" & preset():
|
||||||
|
|
||||||
check:
|
check:
|
||||||
dcs == merkleizer.toDepositContractState()
|
dcs == merkleizer.toDepositContractState()
|
||||||
|
|
||||||
|
test "can_advance_slots":
|
||||||
|
var
|
||||||
|
state = (ref ForkedHashedBeaconState)(
|
||||||
|
kind: BeaconStateFork.Phase0,
|
||||||
|
phase0Data: initialize_hashed_beacon_state_from_eth1(
|
||||||
|
defaultRuntimeConfig, ZERO_HASH, 0,
|
||||||
|
makeInitialDeposits(SLOTS_PER_EPOCH, {}), {skipBlsValidation}))
|
||||||
|
genBlock = get_initial_beacon_block(state[])
|
||||||
|
cache: StateCache
|
||||||
|
info: ForkedEpochInfo
|
||||||
|
|
||||||
|
check:
|
||||||
|
state[].can_advance_slots(genBlock.root, Slot(0))
|
||||||
|
state[].can_advance_slots(genBlock.root, Slot(0))
|
||||||
|
state[].can_advance_slots(genBlock.root, Slot(0))
|
||||||
|
|
||||||
|
let blck = addTestBlock(
|
||||||
|
state[], cache, flags = {skipBlsValidation})
|
||||||
|
|
||||||
|
check:
|
||||||
|
not state[].can_advance_slots(genBlock.root, Slot(0))
|
||||||
|
not state[].can_advance_slots(genBlock.root, Slot(0))
|
||||||
|
not state[].can_advance_slots(genBlock.root, Slot(0))
|
||||||
|
not state[].can_advance_slots(blck.root, Slot(0))
|
||||||
|
state[].can_advance_slots(blck.root, Slot(1))
|
||||||
|
state[].can_advance_slots(blck.root, Slot(2))
|
||||||
|
|
Loading…
Reference in New Issue