2024-05-31 07:13:56 +00:00
|
|
|
# Nimbus
|
|
|
|
# Copyright (c) 2024 Status Research & Development GmbH
|
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
|
|
|
# at your option.
|
|
|
|
# This file may not be copied, modified, or distributed except according to
|
|
|
|
# those terms.
|
|
|
|
|
2024-06-02 11:00:05 +00:00
|
|
|
{.push raises: [].}
|
|
|
|
|
2024-05-31 07:13:56 +00:00
|
|
|
import
|
|
|
|
chronicles,
|
2024-06-20 17:06:58 +00:00
|
|
|
metrics,
|
2024-06-02 11:00:05 +00:00
|
|
|
chronos/timer,
|
2024-10-03 11:42:24 +00:00
|
|
|
std/[strformat, strutils],
|
2024-05-31 07:13:56 +00:00
|
|
|
stew/io2,
|
2024-07-12 03:15:14 +00:00
|
|
|
beacon_chain/era_db,
|
|
|
|
beacon_chain/networking/network_metadata,
|
2024-05-31 07:13:56 +00:00
|
|
|
./config,
|
|
|
|
./common/common,
|
2024-12-04 13:36:07 +00:00
|
|
|
./core/chain,
|
2024-05-31 07:13:56 +00:00
|
|
|
./db/era1_db,
|
2024-07-12 03:15:14 +00:00
|
|
|
./utils/era_helpers
|
2024-05-31 07:13:56 +00:00
|
|
|
|
2024-07-09 13:28:01 +00:00
|
|
|
declareGauge nec_import_block_number, "Latest imported block number"
|
2024-06-24 05:56:41 +00:00
|
|
|
|
2024-07-09 13:28:01 +00:00
|
|
|
declareCounter nec_imported_blocks, "Blocks processed during import"
|
2024-06-20 17:06:58 +00:00
|
|
|
|
2024-07-09 13:28:01 +00:00
|
|
|
declareCounter nec_imported_transactions, "Transactions processed during import"
|
2024-06-20 17:06:58 +00:00
|
|
|
|
2024-07-09 13:28:01 +00:00
|
|
|
declareCounter nec_imported_gas, "Gas processed during import"
|
2024-06-20 17:06:58 +00:00
|
|
|
|
2024-05-31 07:13:56 +00:00
|
|
|
var running {.volatile.} = true
|
|
|
|
|
2024-12-18 12:21:20 +00:00
|
|
|
proc openCsv(name: string): File =
|
|
|
|
try:
|
|
|
|
let f = open(name, fmAppend)
|
|
|
|
let pos = f.getFileSize()
|
|
|
|
if pos == 0:
|
|
|
|
f.writeLine("block_number,blocks,slot,txs,gas,time")
|
|
|
|
f
|
|
|
|
except IOError as exc:
|
|
|
|
fatal "Could not open statistics output file", file = name, err = exc.msg
|
|
|
|
quit(QuitFailure)
|
|
|
|
|
|
|
|
proc getMetadata(networkId: NetworkId): auto =
|
|
|
|
# Network Specific Configurations
|
|
|
|
# TODO: the merge block number could be fetched from the era1 file instead,
|
|
|
|
# specially if the accumulator is added to the chain metadata
|
|
|
|
case networkId
|
|
|
|
of MainNet:
|
|
|
|
(
|
|
|
|
getMetadataForNetwork("mainnet").cfg,
|
|
|
|
# Mainnet Validators Root
|
|
|
|
Eth2Digest.fromHex(
|
|
|
|
"0x4b363db94e286120d76eb905340fdd4e54bfe9f06bf33ff6cf5ad27f511bfe95"
|
|
|
|
),
|
|
|
|
15537393'u64, # Last pre-merge block
|
|
|
|
4700013'u64, # First post-merge slot
|
|
|
|
)
|
|
|
|
of SepoliaNet:
|
|
|
|
(
|
|
|
|
getMetadataForNetwork("sepolia").cfg,
|
|
|
|
Eth2Digest.fromHex(
|
|
|
|
"0xd8ea171f3c94aea21ebc42a1ed61052acf3f9209c00e4efbaaddac09ed9b8078"
|
|
|
|
),
|
|
|
|
1450408'u64, # Last pre-merge block number
|
|
|
|
115193'u64, # First post-merge slot
|
|
|
|
)
|
|
|
|
of HoleskyNet:
|
|
|
|
(
|
|
|
|
getMetadataForNetwork("holesky").cfg,
|
|
|
|
Eth2Digest.fromHex(
|
|
|
|
"0x9143aa7c615a7f7115e2b6aac319c03529df8242ae705fba9df39b79c59fa8b1"
|
|
|
|
),
|
|
|
|
0'u64, # Last pre-merge block number
|
|
|
|
0'u64, # First post-merge slot
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
fatal "Unsupported network", network = networkId
|
|
|
|
quit(QuitFailure)
|
|
|
|
|
|
|
|
template boolFlag(flags, b): PersistBlockFlags =
|
|
|
|
if b:
|
|
|
|
flags
|
|
|
|
else:
|
|
|
|
{}
|
|
|
|
|
2024-05-31 07:13:56 +00:00
|
|
|
proc importBlocks*(conf: NimbusConf, com: CommonRef) =
|
|
|
|
proc controlCHandler() {.noconv.} =
|
|
|
|
when defined(windows):
|
|
|
|
# workaround for https://github.com/nim-lang/Nim/issues/4057
|
|
|
|
setupForeignThreadGc()
|
|
|
|
running = false
|
|
|
|
|
|
|
|
setControlCHook(controlCHandler)
|
|
|
|
|
|
|
|
let
|
2024-06-14 07:31:08 +00:00
|
|
|
start = com.db.getSavedStateBlockNumber() + 1
|
2024-05-31 07:13:56 +00:00
|
|
|
chain = com.newChain()
|
2024-12-18 12:21:20 +00:00
|
|
|
(cfg, genesis_validators_root, lastEra1Block, firstSlotAfterMerge) =
|
|
|
|
getMetadata(conf.networkId)
|
|
|
|
time0 = Moment.now()
|
2024-05-31 07:13:56 +00:00
|
|
|
|
2024-12-18 12:21:20 +00:00
|
|
|
# These variables are used from closures on purpose, so as to place them on
|
|
|
|
# the heap rather than the stack
|
2024-05-31 07:13:56 +00:00
|
|
|
var
|
2024-12-18 12:21:20 +00:00
|
|
|
slot = 1'u64
|
|
|
|
time1 = Moment.now() # time at start of chunk
|
2024-06-06 05:03:11 +00:00
|
|
|
csv =
|
|
|
|
if conf.csvStats.isSome:
|
2024-12-18 12:21:20 +00:00
|
|
|
openCsv(conf.csvStats.get())
|
2024-06-06 05:03:11 +00:00
|
|
|
else:
|
|
|
|
File(nil)
|
2024-06-15 09:22:37 +00:00
|
|
|
flags =
|
2024-07-04 14:51:50 +00:00
|
|
|
boolFlag({PersistBlockFlag.NoValidation}, conf.noValidation) +
|
2024-06-15 09:22:37 +00:00
|
|
|
boolFlag({PersistBlockFlag.NoFullValidation}, not conf.fullValidation) +
|
|
|
|
boolFlag(NoPersistBodies, not conf.storeBodies) +
|
2024-08-16 06:22:51 +00:00
|
|
|
boolFlag({PersistBlockFlag.NoPersistReceipts}, not conf.storeReceipts) +
|
|
|
|
boolFlag({PersistBlockFlag.NoPersistSlotHashes}, not conf.storeSlotHashes)
|
2024-12-18 12:21:20 +00:00
|
|
|
blk: Block
|
|
|
|
persister = Persister.init(chain, flags)
|
|
|
|
cstats: PersistStats # stats at start of chunk
|
2024-06-15 09:22:37 +00:00
|
|
|
|
2024-06-06 05:03:11 +00:00
|
|
|
defer:
|
|
|
|
if csv != nil:
|
|
|
|
close(csv)
|
|
|
|
|
2024-12-18 12:21:20 +00:00
|
|
|
template blockNumber(): uint64 =
|
|
|
|
start + uint64 persister.stats.blocks
|
2024-07-09 13:28:01 +00:00
|
|
|
|
2024-06-24 05:56:41 +00:00
|
|
|
nec_import_block_number.set(start.int64)
|
|
|
|
|
2024-07-09 13:28:01 +00:00
|
|
|
func f(value: float): string =
|
2024-12-18 12:21:20 +00:00
|
|
|
if value >= 1000:
|
|
|
|
&"{int(value)}"
|
|
|
|
elif value >= 100:
|
|
|
|
&"{value:4.1f}"
|
|
|
|
elif value >= 10:
|
|
|
|
&"{value:4.2f}"
|
|
|
|
else:
|
|
|
|
&"{value:4.3f}"
|
2024-07-09 13:28:01 +00:00
|
|
|
|
2024-12-18 12:21:20 +00:00
|
|
|
proc persistBlock() =
|
|
|
|
persister.persistBlock(blk).isOkOr:
|
|
|
|
fatal "Could not persist block", blockNumber = blk.header.number, error
|
|
|
|
quit(QuitFailure)
|
|
|
|
|
|
|
|
proc checkpoint(force: bool = false) =
|
|
|
|
let (blocks, txs, gas) = persister.stats
|
|
|
|
|
|
|
|
if not force and blocks.uint64 mod conf.chunkSize != 0:
|
|
|
|
return
|
|
|
|
|
|
|
|
persister.checkpoint().isOkOr:
|
|
|
|
fatal "Could not write database checkpoint", error
|
2024-07-09 13:28:01 +00:00
|
|
|
quit(QuitFailure)
|
|
|
|
|
2024-12-18 12:21:20 +00:00
|
|
|
let (cblocks, ctxs, cgas) =
|
|
|
|
(blocks - cstats.blocks, txs - cstats.txs, gas - cstats.gas)
|
|
|
|
|
|
|
|
if cblocks == 0:
|
|
|
|
return
|
|
|
|
|
|
|
|
cstats = persister.stats
|
|
|
|
|
2024-07-09 13:28:01 +00:00
|
|
|
let
|
|
|
|
time2 = Moment.now()
|
|
|
|
diff1 = (time2 - time1).nanoseconds().float / 1000000000
|
|
|
|
diff0 = (time2 - time0).nanoseconds().float / 1000000000
|
|
|
|
|
|
|
|
info "Imported blocks",
|
|
|
|
blockNumber,
|
2024-12-18 12:21:20 +00:00
|
|
|
slot,
|
|
|
|
blocks,
|
2024-07-09 13:28:01 +00:00
|
|
|
txs,
|
|
|
|
mgas = f(gas.float / 1000000),
|
2024-12-18 12:21:20 +00:00
|
|
|
bps = f(cblocks.float / diff1),
|
|
|
|
tps = f(ctxs.float / diff1),
|
|
|
|
mgps = f(cgas.float / 1000000 / diff1),
|
|
|
|
avgBps = f(blocks.float / diff0),
|
2024-07-09 13:28:01 +00:00
|
|
|
avgTps = f(txs.float / diff0),
|
|
|
|
avgMGps = f(gas.float / 1000000 / diff0),
|
2024-10-03 11:42:24 +00:00
|
|
|
elapsed = toString(time2 - time0, 3)
|
2024-07-09 13:28:01 +00:00
|
|
|
|
|
|
|
metrics.set(nec_import_block_number, int64(blockNumber))
|
2024-12-18 12:21:20 +00:00
|
|
|
nec_imported_blocks.inc(cblocks)
|
|
|
|
nec_imported_transactions.inc(ctxs)
|
|
|
|
nec_imported_gas.inc(int64 cgas)
|
2024-05-31 07:13:56 +00:00
|
|
|
|
2024-07-09 13:28:01 +00:00
|
|
|
if csv != nil:
|
|
|
|
# In the CSV, we store a line for every chunk of blocks processed so
|
|
|
|
# that the file can meaningfully be appended to when restarting the
|
|
|
|
# process - this way, each sample is independent
|
|
|
|
try:
|
|
|
|
csv.writeLine(
|
2024-12-18 12:21:20 +00:00
|
|
|
[$blockNumber, $cblocks, $slot, $ctxs, $cgas, $(time2 - time1).nanoseconds()].join(
|
|
|
|
","
|
|
|
|
)
|
2024-07-09 13:28:01 +00:00
|
|
|
)
|
|
|
|
csv.flushFile()
|
|
|
|
except IOError as exc:
|
|
|
|
warn "Could not write csv", err = exc.msg
|
2024-12-18 12:21:20 +00:00
|
|
|
|
|
|
|
time1 = time2
|
2024-07-09 13:28:01 +00:00
|
|
|
|
2024-07-22 21:17:07 +00:00
|
|
|
# Finds the slot number to resume the import process
|
|
|
|
# First it sets the initial lower bound to `firstSlotAfterMerge` + number of blocks after Era1
|
|
|
|
# Then it iterates over the slots to find the current slot number, along with reducing the
|
|
|
|
# search space by calculating the difference between the `blockNumber` and the `block_number` from the executionPayload
|
2024-07-26 05:32:01 +00:00
|
|
|
# of the slot, then adding the difference to the importedSlot. This pushes the lower bound more,
|
2024-07-22 21:17:07 +00:00
|
|
|
# making the search way smaller
|
2024-08-22 08:06:45 +00:00
|
|
|
proc updateLastImportedSlot(
|
2024-07-09 13:28:01 +00:00
|
|
|
era: EraDB,
|
|
|
|
historical_roots: openArray[Eth2Digest],
|
|
|
|
historical_summaries: openArray[HistoricalSummary],
|
2024-09-21 06:38:38 +00:00
|
|
|
endSlot: Slot,
|
|
|
|
): bool =
|
|
|
|
# Checks if the Nimbus block number is ahead the era block number
|
2024-10-03 11:42:24 +00:00
|
|
|
# First we load the last era number, and get the fist slot number
|
2024-09-21 06:38:38 +00:00
|
|
|
# Since the slot emptiness cannot be predicted, we iterate over to find the block and check
|
|
|
|
# if the block number is greater than the current block number
|
|
|
|
var
|
|
|
|
lastEra = era(endSlot - 1)
|
|
|
|
startSlot = start_slot(lastEra) - 8192
|
|
|
|
debug "Finding slot number to resume import", startSlot, endSlot
|
|
|
|
|
|
|
|
while startSlot < endSlot:
|
2024-12-18 12:21:20 +00:00
|
|
|
if not getEthBlockFromEra(
|
|
|
|
era, historical_roots, historical_summaries, startSlot, cfg, blk
|
|
|
|
):
|
2024-09-21 06:38:38 +00:00
|
|
|
startSlot += 1
|
|
|
|
if startSlot == endSlot - 1:
|
|
|
|
error "No blocks found in the last era file"
|
|
|
|
return false
|
2024-12-18 12:21:20 +00:00
|
|
|
|
|
|
|
continue
|
2024-09-21 06:38:38 +00:00
|
|
|
|
|
|
|
startSlot += 1
|
|
|
|
if blk.header.number < blockNumber:
|
2024-12-18 12:21:20 +00:00
|
|
|
notice "Available `era` files are already imported",
|
2024-09-21 06:38:38 +00:00
|
|
|
stateBlockNumber = blockNumber, eraBlockNumber = blk.header.number
|
2024-12-18 12:21:20 +00:00
|
|
|
return false
|
|
|
|
break
|
2024-09-21 06:38:38 +00:00
|
|
|
|
2024-07-09 13:28:01 +00:00
|
|
|
if blockNumber > 1:
|
2024-07-26 05:32:01 +00:00
|
|
|
# Setting the initial lower bound
|
2024-12-18 12:21:20 +00:00
|
|
|
slot = (blockNumber - lastEra1Block) + firstSlotAfterMerge
|
|
|
|
debug "Finding slot number after resuming import", slot
|
2024-07-22 21:17:07 +00:00
|
|
|
|
|
|
|
# BlockNumber based slot finding
|
|
|
|
var clNum = 0'u64
|
|
|
|
|
|
|
|
while clNum < blockNumber:
|
2024-12-18 12:21:20 +00:00
|
|
|
if not getEthBlockFromEra(
|
|
|
|
era, historical_roots, historical_summaries, Slot(slot), cfg, blk
|
|
|
|
):
|
|
|
|
slot += 1
|
2024-07-22 21:17:07 +00:00
|
|
|
continue
|
|
|
|
|
2024-07-26 05:32:01 +00:00
|
|
|
clNum = blk.header.number
|
2024-07-22 21:17:07 +00:00
|
|
|
# decreasing the lower bound with each iteration
|
2024-12-18 12:21:20 +00:00
|
|
|
slot += blockNumber - clNum
|
2024-07-09 13:28:01 +00:00
|
|
|
|
2024-12-18 12:21:20 +00:00
|
|
|
notice "Matched block to slot number", blockNumber, slot
|
2024-09-21 06:38:38 +00:00
|
|
|
return true
|
2024-07-09 13:28:01 +00:00
|
|
|
|
2024-12-18 12:21:20 +00:00
|
|
|
if lastEra1Block > 0 and start <= lastEra1Block:
|
|
|
|
let
|
|
|
|
era1Name =
|
|
|
|
case conf.networkId
|
|
|
|
of MainNet:
|
|
|
|
"mainnet"
|
|
|
|
of SepoliaNet:
|
|
|
|
"sepolia"
|
2024-07-09 13:28:01 +00:00
|
|
|
else:
|
2024-12-18 12:21:20 +00:00
|
|
|
raiseAssert "Other networks are unsupported or do not have an era1"
|
|
|
|
db = Era1DbRef.init(conf.era1Dir.string, era1Name).valueOr:
|
|
|
|
fatal "Could not open era1 database", era1Dir = conf.era1Dir, era1Name, error
|
|
|
|
quit(QuitFailure)
|
|
|
|
|
|
|
|
notice "Importing era1 archive",
|
|
|
|
start, dataDir = conf.dataDir.string, era1Dir = conf.era1Dir.string
|
|
|
|
|
|
|
|
defer:
|
|
|
|
db.dispose()
|
|
|
|
|
|
|
|
proc loadEraBlock(blockNumber: uint64): bool =
|
|
|
|
db.getEthBlock(blockNumber, blk).isOkOr:
|
|
|
|
return false
|
|
|
|
true
|
|
|
|
|
|
|
|
while running and persister.stats.blocks.uint64 < conf.maxBlocks and
|
|
|
|
blockNumber <= lastEra1Block:
|
|
|
|
if not loadEraBlock(blockNumber):
|
|
|
|
notice "No more `era1` blocks to import", blockNumber, slot
|
|
|
|
break
|
|
|
|
persistBlock()
|
|
|
|
checkpoint()
|
2024-05-31 07:13:56 +00:00
|
|
|
|
2024-12-18 12:21:20 +00:00
|
|
|
block era1Import:
|
2024-07-12 03:15:14 +00:00
|
|
|
if blockNumber > lastEra1Block:
|
2024-12-18 12:21:20 +00:00
|
|
|
if not isDir(conf.eraDir.string):
|
|
|
|
if blockNumber == 0:
|
|
|
|
fatal "`era` directory not found, cannot start import",
|
|
|
|
blockNumber, eraDir = conf.eraDir.string
|
|
|
|
quit(QuitFailure)
|
|
|
|
else:
|
|
|
|
notice "`era` directory not found, stopping import at merge boundary",
|
|
|
|
blockNumber, eraDir = conf.eraDir.string
|
|
|
|
break era1Import
|
|
|
|
|
2024-07-09 13:28:01 +00:00
|
|
|
notice "Importing era archive",
|
2024-10-04 01:37:50 +00:00
|
|
|
blockNumber, dataDir = conf.dataDir.string, eraDir = conf.eraDir.string
|
2024-07-09 13:28:01 +00:00
|
|
|
|
|
|
|
let
|
2024-12-18 12:21:20 +00:00
|
|
|
eraDB = EraDB.new(cfg, conf.eraDir.string, genesis_validators_root)
|
2024-07-09 13:28:01 +00:00
|
|
|
(historical_roots, historical_summaries, endSlot) = loadHistoricalRootsFromEra(
|
2024-12-18 12:21:20 +00:00
|
|
|
conf.eraDir.string, cfg
|
2024-07-09 13:28:01 +00:00
|
|
|
).valueOr:
|
2024-12-18 12:21:20 +00:00
|
|
|
fatal "Could not load historical summaries",
|
|
|
|
eraDir = conf.eraDir.string, error
|
|
|
|
quit(QuitFailure)
|
2024-07-09 13:28:01 +00:00
|
|
|
|
|
|
|
# Load the last slot number
|
2024-09-21 06:38:38 +00:00
|
|
|
var moreEraAvailable = true
|
2024-07-12 03:15:14 +00:00
|
|
|
if blockNumber > lastEra1Block + 1:
|
2024-09-21 06:38:38 +00:00
|
|
|
moreEraAvailable = updateLastImportedSlot(
|
|
|
|
eraDB, historical_roots.asSeq(), historical_summaries.asSeq(), endSlot
|
2024-07-12 03:15:14 +00:00
|
|
|
)
|
2024-07-09 13:28:01 +00:00
|
|
|
|
2024-12-18 12:21:20 +00:00
|
|
|
if slot < firstSlotAfterMerge and firstSlotAfterMerge != 0:
|
2024-07-09 13:28:01 +00:00
|
|
|
# if resuming import we do not update the slot
|
2024-12-18 12:21:20 +00:00
|
|
|
slot = firstSlotAfterMerge
|
2024-07-09 13:28:01 +00:00
|
|
|
|
2024-12-18 12:21:20 +00:00
|
|
|
proc loadEra1Block(): bool =
|
2024-08-22 08:06:45 +00:00
|
|
|
# Separate proc to reduce stack usage of blk
|
2024-12-18 12:21:20 +00:00
|
|
|
if not getEthBlockFromEra(
|
2024-07-09 13:28:01 +00:00
|
|
|
eraDB,
|
|
|
|
historical_roots.asSeq(),
|
|
|
|
historical_summaries.asSeq(),
|
2024-12-18 12:21:20 +00:00
|
|
|
Slot(slot),
|
|
|
|
cfg,
|
|
|
|
blk,
|
|
|
|
):
|
2024-08-22 08:06:45 +00:00
|
|
|
return false
|
|
|
|
|
|
|
|
true
|
|
|
|
|
2024-12-18 12:21:20 +00:00
|
|
|
while running and moreEraAvailable and
|
|
|
|
persister.stats.blocks.uint64 < conf.maxBlocks and slot < endSlot:
|
|
|
|
if not loadEra1Block():
|
|
|
|
slot += 1
|
2024-07-09 13:28:01 +00:00
|
|
|
continue
|
2024-12-18 12:21:20 +00:00
|
|
|
slot += 1
|
2024-07-09 13:28:01 +00:00
|
|
|
|
2024-12-18 12:21:20 +00:00
|
|
|
persistBlock()
|
|
|
|
checkpoint()
|
2024-08-22 08:06:45 +00:00
|
|
|
|
2024-12-18 12:21:20 +00:00
|
|
|
checkpoint(true)
|
2024-07-09 13:28:01 +00:00
|
|
|
|
2024-12-18 12:21:20 +00:00
|
|
|
notice "Import complete",
|
|
|
|
blockNumber,
|
|
|
|
slot,
|
|
|
|
blocks = persister.stats.blocks,
|
|
|
|
txs = persister.stats.txs,
|
|
|
|
mgas = f(persister.stats.gas.float / 1000000)
|