Trusted node sync (#3209)
* Trusted node sync Trusted node sync, aka checkpoint sync, allows syncing tyhe chain from a trusted node instead of relying on a full sync from genesis. Features include: * sync from any slot, including the latest finalized slot * backfill blocks either from the REST api (default) or p2p (#3263) Future improvements: * top up blocks between head in database and some other node - this makes for an efficient backup tool * recreate historical state to enable historical queries * fixes * load genesis from network metadata * check checkpoint block root against state * fix invalid block root in rest json decoding * odds and ends * retry looking for epoch-boundary checkpoint blocks
This commit is contained in:
parent
ebde027262
commit
68247f81b3
|
@ -37,7 +37,7 @@ const
|
|||
defaultSigningNodeRequestTimeout* = 60
|
||||
|
||||
type
|
||||
BNStartUpCmd* = enum
|
||||
BNStartUpCmd* {.pure.} = enum
|
||||
noCommand
|
||||
createTestnet
|
||||
deposits
|
||||
|
@ -45,6 +45,7 @@ type
|
|||
record
|
||||
web3
|
||||
slashingdb
|
||||
trustedNodeSync
|
||||
|
||||
WalletsCmd* {.pure.} = enum
|
||||
create = "Creates a new EIP-2386 wallet"
|
||||
|
@ -177,9 +178,9 @@ type
|
|||
|
||||
case cmd* {.
|
||||
command
|
||||
defaultValue: noCommand }: BNStartUpCmd
|
||||
defaultValue: BNStartUpCmd.noCommand }: BNStartUpCmd
|
||||
|
||||
of noCommand:
|
||||
of BNStartUpCmd.noCommand:
|
||||
bootstrapNodes* {.
|
||||
desc: "Specifies one or more bootstrap nodes to use when connecting to the network"
|
||||
abbr: "b"
|
||||
|
@ -417,7 +418,7 @@ type
|
|||
defaultValue: false
|
||||
name: "validator-monitor-totals" }: bool
|
||||
|
||||
of createTestnet:
|
||||
of BNStartUpCmd.createTestnet:
|
||||
testnetDepositsFile* {.
|
||||
desc: "A LaunchPad deposits file for the genesis state validators"
|
||||
name: "deposits-file" }: InputFile
|
||||
|
@ -451,7 +452,7 @@ type
|
|||
desc: "Output file with list of bootstrap nodes for the network"
|
||||
name: "output-bootstrap-file" }: OutFile
|
||||
|
||||
of wallets:
|
||||
of BNStartUpCmd.wallets:
|
||||
case walletsCmd* {.command.}: WalletsCmd
|
||||
of WalletsCmd.create:
|
||||
nextAccount* {.
|
||||
|
@ -484,7 +485,7 @@ type
|
|||
of WalletsCmd.list:
|
||||
discard
|
||||
|
||||
of deposits:
|
||||
of BNStartUpCmd.deposits:
|
||||
case depositsCmd* {.command.}: DepositsCmd
|
||||
of DepositsCmd.createTestnetDeposits:
|
||||
totalDeposits* {.
|
||||
|
@ -543,7 +544,7 @@ type
|
|||
name: "epoch"
|
||||
desc: "The desired exit epoch" }: Option[uint64]
|
||||
|
||||
of record:
|
||||
of BNStartUpCmd.record:
|
||||
case recordCmd* {.command.}: RecordCmd
|
||||
of RecordCmd.create:
|
||||
ipExt* {.
|
||||
|
@ -573,7 +574,7 @@ type
|
|||
desc: "ENR URI of the record to print"
|
||||
name: "enr" .}: Record
|
||||
|
||||
of web3:
|
||||
of BNStartUpCmd.web3:
|
||||
case web3Cmd* {.command.}: Web3Cmd
|
||||
of Web3Cmd.test:
|
||||
web3TestUrl* {.
|
||||
|
@ -581,7 +582,7 @@ type
|
|||
desc: "The web3 provider URL to test"
|
||||
name: "url" }: Uri
|
||||
|
||||
of slashingdb:
|
||||
of BNStartUpCmd.slashingdb:
|
||||
case slashingdbCmd* {.command.}: SlashProtCmd
|
||||
of SlashProtCmd.`import`:
|
||||
importedInterchangeFile* {.
|
||||
|
@ -597,6 +598,23 @@ type
|
|||
desc: "EIP-3076 slashing protection interchange file to export"
|
||||
argument }: OutFile
|
||||
|
||||
of BNStartUpCmd.trustedNodeSync:
|
||||
trustedNodeUrl* {.
|
||||
desc: "URL of the REST API to sync from"
|
||||
defaultValue: "http://localhost:5052"
|
||||
name: "trusted-node-url"
|
||||
.}: string
|
||||
|
||||
blockId* {.
|
||||
desc: "Block id to sync to - this can be a block root, slot number, \"finalized\" or \"head\""
|
||||
defaultValue: "finalized"
|
||||
.}: string
|
||||
|
||||
backfillBlocks* {.
|
||||
desc: "Backfill blocks directly from REST server instead of fetching via API"
|
||||
defaultValue: true
|
||||
name: "backfill"}: bool
|
||||
|
||||
ValidatorClientConf* = object
|
||||
logLevel* {.
|
||||
desc: "Sets the log level"
|
||||
|
|
|
@ -26,7 +26,7 @@ import
|
|||
# Local modules
|
||||
"."/[
|
||||
beacon_clock, beacon_chain_db, beacon_node, beacon_node_status,
|
||||
conf, filepath, interop, nimbus_binary_common, statusbar,
|
||||
conf, filepath, interop, nimbus_binary_common, statusbar, trusted_node_sync,
|
||||
version],
|
||||
./networking/[eth2_discovery, eth2_network, network_metadata],
|
||||
./gossip_processing/[eth2_processor, block_processor, consensus_manager],
|
||||
|
@ -1874,7 +1874,6 @@ proc doSlashingImport(conf: BeaconNodeConf) {.raises: [SerializationError, IOErr
|
|||
echo "Import finished: '", interchange, "' into '", dir/filetrunc & ".sqlite3", "'"
|
||||
|
||||
proc doSlashingInterchange(conf: BeaconNodeConf) {.raises: [Defect, CatchableError].} =
|
||||
doAssert conf.cmd == slashingdb
|
||||
case conf.slashingdbCmd
|
||||
of SlashProtCmd.`export`:
|
||||
conf.doSlashingExport()
|
||||
|
@ -1921,10 +1920,28 @@ programMain:
|
|||
let rng = keys.newRng()
|
||||
|
||||
case config.cmd
|
||||
of createTestnet: doCreateTestnet(config, rng[])
|
||||
of noCommand: doRunBeaconNode(config, rng)
|
||||
of deposits: doDeposits(config, rng[])
|
||||
of wallets: doWallets(config, rng[])
|
||||
of record: doRecord(config, rng[])
|
||||
of web3: doWeb3Cmd(config)
|
||||
of slashingdb: doSlashingInterchange(config)
|
||||
of BNStartUpCmd.createTestnet: doCreateTestnet(config, rng[])
|
||||
of BNStartUpCmd.noCommand: doRunBeaconNode(config, rng)
|
||||
of BNStartUpCmd.deposits: doDeposits(config, rng[])
|
||||
of BNStartUpCmd.wallets: doWallets(config, rng[])
|
||||
of BNStartUpCmd.record: doRecord(config, rng[])
|
||||
of BNStartUpCmd.web3: doWeb3Cmd(config)
|
||||
of BNStartUpCmd.slashingdb: doSlashingInterchange(config)
|
||||
of BNStartupCmd.trustedNodeSync:
|
||||
let
|
||||
network = loadEth2Network(config)
|
||||
cfg = network.cfg
|
||||
genesis =
|
||||
if network.genesisData.len > 0:
|
||||
newClone(readSszForkedHashedBeaconState(
|
||||
cfg,
|
||||
network.genesisData.toOpenArrayByte(0, network.genesisData.high())))
|
||||
else: nil
|
||||
|
||||
waitFor doTrustedNodeSync(
|
||||
cfg,
|
||||
config.databaseDir,
|
||||
config.trustedNodeUrl,
|
||||
config.blockId,
|
||||
config.backfillBlocks,
|
||||
genesis)
|
||||
|
|
|
@ -795,6 +795,8 @@ proc readValue*(reader: var JsonReader[RestJson],
|
|||
if res.isNone():
|
||||
reader.raiseUnexpectedValue("Incorrect merge block format")
|
||||
value = ForkedSignedBeaconBlock.init(res.get())
|
||||
withBlck(value):
|
||||
blck.root = hash_tree_root(blck.message)
|
||||
|
||||
proc writeValue*(writer: var JsonWriter[RestJson],
|
||||
value: ForkedSignedBeaconBlock) {.
|
||||
|
|
|
@ -471,7 +471,7 @@ proc guardTask[A, B](man: SyncManager[A, B]) {.async.} =
|
|||
man.workers[index].future = future
|
||||
pending[index] = future
|
||||
|
||||
proc toTimeLeftString(d: Duration): string =
|
||||
proc toTimeLeftString*(d: Duration): string =
|
||||
if d == InfiniteDuration:
|
||||
"--h--m"
|
||||
else:
|
||||
|
|
|
@ -0,0 +1,411 @@
|
|||
# Copyright (c) 2018-2022 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
import
|
||||
std/[os],
|
||||
stew/[assign2, base10],
|
||||
chronicles, chronos,
|
||||
./sync/sync_manager,
|
||||
./consensus_object_pools/blockchain_dag,
|
||||
./spec/eth2_apis/rest_beacon_client,
|
||||
./spec/[beaconstate, eth2_merkleization, forks, presets, state_transition],
|
||||
"."/[beacon_clock, beacon_chain_db]
|
||||
|
||||
type
|
||||
DbCache = object
|
||||
summaries: Table[Eth2Digest, BeaconBlockSummary]
|
||||
slots: seq[Option[Eth2Digest]]
|
||||
|
||||
const
|
||||
emptyHash = Eth2Digest()
|
||||
|
||||
proc updateSlots(cache: var DbCache, root: Eth2Digest, slot: Slot) =
|
||||
# The slots mapping stores one linear block history - we construct it by
|
||||
# starting from a given root/slot and walking the known parents as far back
|
||||
# as possible which ensures that all blocks belong to the same history
|
||||
|
||||
if cache.slots.len() < slot.int + 1:
|
||||
cache.slots.setLen(slot.int + 1)
|
||||
|
||||
var
|
||||
root = root
|
||||
lastSlot = slot
|
||||
|
||||
while true:
|
||||
cache.summaries.withValue(root, v) do:
|
||||
let slot = v[].slot
|
||||
|
||||
for i in slot.int + 1..<lastSlot.int: # Avoid re-querying known gaps
|
||||
cache.slots[i] = some(emptyHash)
|
||||
|
||||
cache.slots[slot.int] = some(root)
|
||||
|
||||
if slot == 0:
|
||||
return
|
||||
|
||||
root = v[].parent_root
|
||||
lastSlot = slot
|
||||
do:
|
||||
return
|
||||
|
||||
proc update(cache: var DbCache, blck: ForkySignedBeaconBlock) =
|
||||
let
|
||||
slot = blck.message.slot
|
||||
|
||||
if blck.root notin cache.summaries:
|
||||
cache.summaries[blck.root] = blck.message.toBeaconBlockSummary()
|
||||
|
||||
cache.updateSlots(blck.root, blck.message.slot)
|
||||
|
||||
proc isKnown(cache: DbCache, slot: Slot): bool =
|
||||
slot < cache.slots.lenu64 and cache.slots[slot.int].isSome()
|
||||
|
||||
proc doTrustedNodeSync*(
|
||||
cfg: RuntimeConfig, databaseDir: string, restUrl: string,
|
||||
blockId: string, backfill: bool,
|
||||
genesisState: ref ForkedHashedBeaconState = nil) {.async.} =
|
||||
notice "Starting trusted node sync",
|
||||
databaseDir, restUrl, blockId, backfill
|
||||
|
||||
let
|
||||
db = BeaconChainDB.new(databaseDir, inMemory = false)
|
||||
|
||||
var
|
||||
dbCache = DbCache(summaries: db.loadSummaries())
|
||||
|
||||
let
|
||||
dbHead = db.getHeadBlock()
|
||||
headSlot = if dbHead.isSome():
|
||||
if dbHead.get() notin dbCache.summaries:
|
||||
# This can happen with pre-blocksummary database - it's better to start
|
||||
# over in this case
|
||||
error "Database missing head block summary - database too old or corrupt"
|
||||
quit 1
|
||||
|
||||
let slot = dbCache.summaries[dbHead.get()].slot
|
||||
dbCache.updateSlots(dbHead.get(), slot)
|
||||
slot
|
||||
else:
|
||||
# When we don't have a head, we'll use the given checkpoint as head
|
||||
FAR_FUTURE_SLOT
|
||||
|
||||
var client = RestClientRef.new(restUrl).get()
|
||||
|
||||
proc downloadBlock(slot: Slot):
|
||||
Future[Option[ForkedSignedBeaconBlock]] {.async.} =
|
||||
# Download block at given slot, retrying a few times,
|
||||
var lastError: ref CatchableError
|
||||
for i in 0..<3:
|
||||
try:
|
||||
return await client.getBlockV2(BlockIdent.init(slot), cfg)
|
||||
except CatchableError as exc:
|
||||
lastError = exc
|
||||
warn "Retrying download of block", slot, err = exc.msg
|
||||
client = RestClientRef.new(restUrl).get()
|
||||
|
||||
error "Unable to download block - backfill incomplete, but will resume when you start the beacon node",
|
||||
slot, error = lastError.msg, url = client.address
|
||||
|
||||
quit 1
|
||||
|
||||
let
|
||||
dbGenesis = db.getGenesisBlock()
|
||||
localGenesisRoot = if dbGenesis.isSome():
|
||||
dbGenesis.get()
|
||||
else:
|
||||
let genesisState = if genesisState != nil:
|
||||
genesisState
|
||||
else:
|
||||
notice "Downloading genesis state", restUrl
|
||||
let state = try:
|
||||
await client.getStateV2(
|
||||
StateIdent.init(StateIdentType.Genesis), cfg)
|
||||
except CatchableError as exc:
|
||||
error "Unable to download genesis state",
|
||||
error = exc.msg, restUrl
|
||||
quit 1
|
||||
|
||||
if isNil(state):
|
||||
error "Server is missing genesis state",
|
||||
restUrl
|
||||
quit 1
|
||||
state
|
||||
|
||||
withState(genesisState[]):
|
||||
info "Writing genesis state",
|
||||
stateRoot = shortLog(state.root),
|
||||
genesis_validators_root = shortLog(state.data.genesis_validators_root)
|
||||
|
||||
db.putStateRoot(state.latest_block_root(), state.data.slot, state.root)
|
||||
db.putState(state.root, state.data)
|
||||
|
||||
let blck = get_initial_beacon_block(state)
|
||||
|
||||
info "Writing genesis block",
|
||||
blockRoot = shortLog(blck.root),
|
||||
blck = shortLog(blck.message)
|
||||
db.putBlock(blck)
|
||||
db.putGenesisBlock(blck.root)
|
||||
|
||||
dbCache.update(blck.asSigned())
|
||||
blck.root
|
||||
|
||||
remoteGenesisRoot = try:
|
||||
(await client.getBlockRoot(
|
||||
BlockIdent.init(BlockIdentType.Genesis))).data.data.root
|
||||
except CatchableError as exc:
|
||||
error "Unable to download genesis block root",
|
||||
error = exc.msg, restUrl
|
||||
quit 1
|
||||
|
||||
if remoteGenesisRoot != localGenesisRoot:
|
||||
error "Server genesis block root does not match local genesis, is the server serving the same chain?",
|
||||
localGenesisRoot = shortLog(localGenesisRoot),
|
||||
remoteGenesisRoot = shortLog(remoteGenesisRoot)
|
||||
quit 1
|
||||
|
||||
notice "Downloading checkpoint block", restUrl, blockId
|
||||
|
||||
let checkpointBlock = block:
|
||||
# Finding a checkpoint block is tricky: we need the block to fall on an
|
||||
# epoch boundary and when making the first request, we don't know exactly
|
||||
# what slot we'll get - to find it, we'll keep walking backwards for a
|
||||
# reasonable number of tries
|
||||
var
|
||||
checkpointBlock: ForkedSignedBeaconBlock
|
||||
id = BlockIdent.decodeString(blockId).valueOr:
|
||||
error "Cannot decode checkpoint block id, must be a slot, hash, 'finalized' or 'head'",
|
||||
blockId
|
||||
quit 1
|
||||
found = false
|
||||
|
||||
for i in 0..<10:
|
||||
let blck = try:
|
||||
await client.getBlockV2(id, cfg)
|
||||
except CatchableError as exc:
|
||||
error "Unable to download checkpoint block",
|
||||
error = exc.msg, restUrl
|
||||
quit 1
|
||||
|
||||
if blck.isNone():
|
||||
# Server returned 404 - no block was found at the given id, so we need
|
||||
# to try an earlier slot - assuming we know of one!
|
||||
if id.kind == BlockQueryKind.Slot:
|
||||
let slot = id.slot
|
||||
id = BlockIdent.init((id.slot.epoch() - 1).start_slot)
|
||||
|
||||
info "No block found at given slot, trying an earlier epoch",
|
||||
slot, id
|
||||
continue
|
||||
else:
|
||||
error "Cannot find a block at given block id, and cannot compute an earlier slot",
|
||||
id, blockId
|
||||
quit 1
|
||||
|
||||
checkpointBlock = blck.get()
|
||||
|
||||
let checkpointSlot = getForkedBlockField(checkpointBlock, slot)
|
||||
if checkpointSlot > headSlot:
|
||||
# When the checkpoint is newer than the head, we run into trouble: the
|
||||
# current backfill in ChainDAG does not support filling in arbitrary gaps.
|
||||
# If we were to update the backfill pointer in this case, the ChainDAG
|
||||
# backfiller would re-download the entire backfill history.
|
||||
# For now, we'll abort and let the user choose what to do.
|
||||
error "Checkpoint block is newer than head slot - start with a new database or use a checkpoint no more recent than the head",
|
||||
checkpointSlot, checkpointRoot = shortLog(checkpointBlock.root), headSlot
|
||||
quit 1
|
||||
|
||||
if checkpointSlot.is_epoch():
|
||||
found = true
|
||||
break
|
||||
|
||||
id = BlockIdent.init((checkpointSlot.epoch() - 1).start_slot)
|
||||
|
||||
info "Downloaded checkpoint block does not fall on epoch boundary, trying an earlier epoch",
|
||||
checkpointSlot, id
|
||||
|
||||
if not found:
|
||||
# The ChainDAG requires that the tail falls on an epoch boundary, or it
|
||||
# will be unable to load the corresponding state - this could be fixed, but
|
||||
# for now, we ask the user to fix it instead
|
||||
error "A checkpoint block from the first slot of an epoch could not be found with the given block id - pass an epoch slot with a block using the --block-id parameter",
|
||||
blockId
|
||||
quit 1
|
||||
checkpointBlock
|
||||
|
||||
let checkpointSlot = getForkedBlockField(checkpointBlock, slot)
|
||||
if checkpointBlock.root in dbCache.summaries:
|
||||
notice "Checkpoint block is already known, skipping checkpoint state download"
|
||||
|
||||
withBlck(checkpointBlock):
|
||||
dbCache.updateSlots(blck.root, blck.message.slot)
|
||||
|
||||
else:
|
||||
notice "Downloading checkpoint state", restUrl, checkpointSlot
|
||||
|
||||
let
|
||||
state = try:
|
||||
await client.getStateV2(StateIdent.init(checkpointSlot), cfg)
|
||||
except CatchableError as exc:
|
||||
error "Unable to download checkpoint state",
|
||||
error = exc.msg, restUrl, checkpointSlot
|
||||
quit 1
|
||||
|
||||
if isNil(state):
|
||||
notice "No state found at given checkpoint", checkpointSlot
|
||||
quit 1
|
||||
|
||||
withState(state[]):
|
||||
let latest_block_root = state.latest_block_root
|
||||
|
||||
if latest_block_root != checkpointBlock.root:
|
||||
error "Checkpoint state does not match checkpoint block, server error?",
|
||||
blockRoot = shortLog(checkpointBlock.root),
|
||||
blck = shortLog(checkpointBlock),
|
||||
stateBlockRoot = shortLog(latest_block_root)
|
||||
quit 1
|
||||
|
||||
info "Writing checkpoint state",
|
||||
stateRoot = shortLog(state.root)
|
||||
db.putStateRoot(state.latest_block_root(), state.data.slot, state.root)
|
||||
db.putState(state.root, state.data)
|
||||
|
||||
withBlck(checkpointBlock):
|
||||
info "Writing checkpoint block",
|
||||
blockRoot = shortLog(blck.root),
|
||||
blck = shortLog(blck.message)
|
||||
|
||||
db.putBlock(blck.asTrusted())
|
||||
db.putHeadBlock(blck.root)
|
||||
db.putTailBlock(blck.root)
|
||||
|
||||
dbCache.update(blck)
|
||||
|
||||
# Coming this far, we've done what ChainDAGRef.preInit would normally do -
|
||||
# Let's do a sanity check and start backfilling blocks from the trusted node
|
||||
if (let v = ChainDAGRef.isInitialized(db); v.isErr()):
|
||||
error "Database not initialized after checkpoint sync, report bug",
|
||||
err = v.error()
|
||||
quit 1
|
||||
|
||||
let missingSlots = block:
|
||||
var total = 0
|
||||
for i in 0..<checkpointSlot.int:
|
||||
if dbCache.slots[i].isNone():
|
||||
total += 1
|
||||
total
|
||||
|
||||
if missingSlots == 0:
|
||||
info "Database fully backfilled"
|
||||
elif backfill:
|
||||
notice "Downloading historical blocks - you can interrupt this process at any time and it automatically be completed when you start the beacon node",
|
||||
checkpointSlot, missingSlots
|
||||
|
||||
var # Same averaging as SyncManager
|
||||
syncCount = 0
|
||||
processed = 0'u64
|
||||
avgSyncSpeed = 0.0
|
||||
stamp = SyncMoment.now(0)
|
||||
|
||||
# Download several blocks in parallel but process them serially
|
||||
var gets: array[8, Future[Option[ForkedSignedBeaconBlock]]]
|
||||
proc processBlock(fut: Future[Option[ForkedSignedBeaconBlock]], slot: Slot) {.async.} =
|
||||
processed += 1
|
||||
var blck = await fut
|
||||
if blck.isNone():
|
||||
dbCache.slots[slot.int] = some emptyHash
|
||||
return
|
||||
|
||||
let data = blck.get()
|
||||
withBlck(data):
|
||||
debug "Processing",
|
||||
blck = shortLog(blck.message),
|
||||
blockRoot = shortLog(blck.root)
|
||||
|
||||
var childSlot = blck.message.slot + 1
|
||||
while true:
|
||||
if childSlot >= dbCache.slots.lenu64():
|
||||
error "Downloaded block does not match checkpoint history"
|
||||
quit 1
|
||||
|
||||
if not dbCache.slots[childSlot.int].isSome():
|
||||
# Should never happen - we download slots backwards
|
||||
error "Downloaded block does not match checkpoint history"
|
||||
quit 1
|
||||
|
||||
let knownRoot = dbCache.slots[childSlot.int].get()
|
||||
if knownRoot == emptyHash:
|
||||
childSlot += 1
|
||||
continue
|
||||
|
||||
dbCache.summaries.withValue(knownRoot, summary):
|
||||
if summary[].parent_root != blck.root:
|
||||
error "Downloaded block does not match checkpoint history",
|
||||
blockRoot = shortLog(blck.root),
|
||||
expectedRoot = shortLog(summary[].parent_root)
|
||||
quit 1
|
||||
|
||||
break
|
||||
|
||||
# This shouldn't happen - we should have downloaded the child and
|
||||
# updated knownBlocks before here
|
||||
error "Expected child block not found in checkpoint history"
|
||||
quit 1
|
||||
|
||||
if blck.root notin dbCache.summaries:
|
||||
db.putBlock(blck.asTrusted())
|
||||
|
||||
dbCache.update(blck)
|
||||
|
||||
let newStamp = SyncMoment.now(processed)
|
||||
if newStamp.stamp - stamp.stamp > 12.seconds:
|
||||
syncCount += 1
|
||||
|
||||
let
|
||||
remaining = blck.message.slot.int.float
|
||||
slotsPerSec = speed(stamp, newStamp)
|
||||
avgSyncSpeed = avgSyncSpeed + (slotsPerSec - avgSyncSpeed) / float(syncCount)
|
||||
|
||||
info "Backfilling",
|
||||
timeleft = toTimeLeftString(
|
||||
if avgSyncSpeed >= 0.001:
|
||||
Duration.fromFloatSeconds(remaining / avgSyncSpeed)
|
||||
else: InfiniteDuration),
|
||||
avgSyncSpeed,
|
||||
remaining
|
||||
stamp = newStamp
|
||||
|
||||
# Download blocks backwards from the checkpoint slot, skipping the ones we
|
||||
# already have in the database. We'll do a few downloads in parallel which
|
||||
# risks having some redundant downloads going on, but speeds things up
|
||||
for i in 0'u64..<(checkpointSlot.uint64 + gets.lenu64()):
|
||||
if not isNil(gets[int(i mod gets.lenu64)]):
|
||||
await processBlock(
|
||||
gets[int(i mod gets.lenu64)],
|
||||
checkpointSlot + gets.lenu64() - uint64(i))
|
||||
gets[int(i mod gets.lenu64)] = nil
|
||||
|
||||
if i < checkpointSlot:
|
||||
let slot = checkpointSlot - i
|
||||
if dbCache.isKnown(slot):
|
||||
continue
|
||||
|
||||
gets[int(i mod gets.lenu64)] = downloadBlock(slot)
|
||||
else:
|
||||
notice "Database initialized, historical blocks will be backfilled when starting the node",
|
||||
missingSlots
|
||||
|
||||
notice "Done, your beacon node is ready to serve you! Don't forget to check that you're on the canoncial chain by comparing the checkpoint root with other online sources. See https://nimbus.guide/trusted-node-sync.html for more information.",
|
||||
checkpointRoot = checkpointBlock.root
|
||||
|
||||
when isMainModule:
|
||||
let backfill = os.paramCount() > 3 and os.paramStr(4) == "true"
|
||||
|
||||
waitFor doTrustedNodeSync(
|
||||
defaultRuntimeConfig, os.paramStr(1), os.paramStr(2), os.paramStr(3),
|
||||
backfill)
|
|
@ -0,0 +1,75 @@
|
|||
# Trusted node sync
|
||||
|
||||
When you start the beacon node for the first time, it will connect to the beacon chain network and start syncing automatically, a process that can take several days.
|
||||
|
||||
Trusted node sync allows getting started more quickly by syncing with a single trusted node.
|
||||
|
||||
To use trusted node sync, you must have access to a node that you trust completely that exposes the REST HTTP API. Should this node or your connection to it be compromised, your node will not be able to detect an attack, thus it is important that you use a node and a connection that you trust, for example a locally running node or an SSH tunnel.
|
||||
|
||||
## Verifying that you synced the correct chain
|
||||
|
||||
When performing a trusted node sync, you can manually verify that the correct chain was synced by comparing the head hash with other sources, such as friends, forums, chats and web sites. You can retrieve the current head from the node using:
|
||||
|
||||
```
|
||||
# Make sure to enabled the `--rest` option when running your node:
|
||||
|
||||
curl http://localhost:5052/eth/v1/beacon/blocks/head/root
|
||||
```
|
||||
|
||||
The `head` root is also printed in the log output at regular intervals.
|
||||
|
||||
## Performing a trusted node sync
|
||||
|
||||
**Prater (testnet)**
|
||||
|
||||
```bash
|
||||
build/nimbus_beacon_node trustedNodeSync --network:prater \
|
||||
--data-dir=build/data/shared_prater_0 \
|
||||
--trusted-node-url=http://localhost:5052
|
||||
```
|
||||
|
||||
**Mainnet**
|
||||
|
||||
```bash
|
||||
build/nimbus_beacon_node trustedNodeSync --network:mainnet \
|
||||
--data-dir=build/data/shared_mainnet_0 \
|
||||
--trusted-node-url=http://localhost:5052
|
||||
```
|
||||
|
||||
**NOTE**
|
||||
|
||||
Because trusted node sync by default copies all blocks via REST, if you use a service such as Infura, you might hit API limits - see the `--backfill` option.
|
||||
|
||||
## Block history
|
||||
|
||||
By default, both the state and the full block history will be downloaded from the trusted node.
|
||||
|
||||
It is possible to get started more quickly by delaying the backfill of the block history using the `--backfill=false` parameter. In this case, the beacon node will first sync to the current head so that it can start performing its duties, then backfill the blocks from the network.
|
||||
|
||||
While backfilling blocks from the network, the node will not be conforming to the protocol and may be disconnected or lose reputation with other nodes.
|
||||
|
||||
## Sync point
|
||||
|
||||
By default, the node will sync up to the latest finalized checkpoint of the node that you're syncing with. You can choose a different sync point using a block hash or a slot number - this block must fall on an epoch boundary:
|
||||
|
||||
```
|
||||
build/nimbus_beacon_node trustedNodeSync --blockId:0x239940f2537f5bbee1a3829f9058f4c04f49897e4d325145153ca89838dfc9e2 ...
|
||||
|
||||
```
|
||||
|
||||
## Sync from checkpoint files
|
||||
|
||||
If you have a state and a block file available, you can instead start the node using the finalized checkpoint options:
|
||||
|
||||
```
|
||||
# Obtain a state and a block from a REST API - these must be in SSZ format:
|
||||
|
||||
wget -o state.32000.ssz http://localhost:5052/eth/v2/debug/beacon/states/32000
|
||||
wget -o block.32000.ssz http://localhost:5052/eth/v2/beacon/blocks/32000
|
||||
|
||||
build/nimbus_beacon_node --data-dir:trusted --finalized-checkpoint-block=block.32000.ssz --finalized-checkpoint-state=state.32000.ssz
|
||||
```
|
||||
|
||||
## Caveats
|
||||
|
||||
A node synced using trusted node sync will not be able to serve historical requests from before the checkpoint. Future versions will resolve this issue.
|
Loading…
Reference in New Issue