2024-05-31 09:13:56 +02:00
|
|
|
# Nimbus
|
2025-01-29 15:04:27 +00:00
|
|
|
# Copyright (c) 2024-2025 Status Research & Development GmbH
|
2024-05-31 09:13:56 +02:00
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
|
|
|
# at your option.
|
|
|
|
# This file may not be copied, modified, or distributed except according to
|
|
|
|
# those terms.
|
|
|
|
|
2024-06-02 13:00:05 +02:00
|
|
|
{.push raises: [].}
|
|
|
|
|
2024-05-31 09:13:56 +02:00
|
|
|
import
|
|
|
|
chronicles,
|
2024-06-20 19:06:58 +02:00
|
|
|
metrics,
|
2024-06-02 13:00:05 +02:00
|
|
|
chronos/timer,
|
2024-10-03 13:42:24 +02:00
|
|
|
std/[strformat, strutils],
|
2024-05-31 09:13:56 +02:00
|
|
|
stew/io2,
|
2024-07-12 08:45:14 +05:30
|
|
|
beacon_chain/era_db,
|
|
|
|
beacon_chain/networking/network_metadata,
|
2024-05-31 09:13:56 +02:00
|
|
|
./config,
|
|
|
|
./common/common,
|
2024-12-04 20:36:07 +07:00
|
|
|
./core/chain,
|
2024-05-31 09:13:56 +02:00
|
|
|
./db/era1_db,
|
2024-07-12 08:45:14 +05:30
|
|
|
./utils/era_helpers
|
2024-05-31 09:13:56 +02:00
|
|
|
|
2024-07-09 18:58:01 +05:30
|
|
|
declareGauge nec_import_block_number, "Latest imported block number"
|
2024-06-24 07:56:41 +02:00
|
|
|
|
2024-07-09 18:58:01 +05:30
|
|
|
declareCounter nec_imported_blocks, "Blocks processed during import"
|
2024-06-20 19:06:58 +02:00
|
|
|
|
2024-07-09 18:58:01 +05:30
|
|
|
declareCounter nec_imported_transactions, "Transactions processed during import"
|
2024-06-20 19:06:58 +02:00
|
|
|
|
2024-07-09 18:58:01 +05:30
|
|
|
declareCounter nec_imported_gas, "Gas processed during import"
|
2024-06-20 19:06:58 +02:00
|
|
|
|
2024-05-31 09:13:56 +02:00
|
|
|
var running {.volatile.} = true
|
|
|
|
|
2024-12-18 13:21:20 +01:00
|
|
|
proc openCsv(name: string): File =
|
|
|
|
try:
|
|
|
|
let f = open(name, fmAppend)
|
|
|
|
let pos = f.getFileSize()
|
|
|
|
if pos == 0:
|
|
|
|
f.writeLine("block_number,blocks,slot,txs,gas,time")
|
|
|
|
f
|
|
|
|
except IOError as exc:
|
|
|
|
fatal "Could not open statistics output file", file = name, err = exc.msg
|
|
|
|
quit(QuitFailure)
|
|
|
|
|
|
|
|
proc getMetadata(networkId: NetworkId): auto =
|
|
|
|
# Network Specific Configurations
|
|
|
|
# TODO: the merge block number could be fetched from the era1 file instead,
|
|
|
|
# specially if the accumulator is added to the chain metadata
|
|
|
|
case networkId
|
|
|
|
of MainNet:
|
|
|
|
(
|
|
|
|
getMetadataForNetwork("mainnet").cfg,
|
|
|
|
# Mainnet Validators Root
|
|
|
|
Eth2Digest.fromHex(
|
|
|
|
"0x4b363db94e286120d76eb905340fdd4e54bfe9f06bf33ff6cf5ad27f511bfe95"
|
|
|
|
),
|
|
|
|
15537393'u64, # Last pre-merge block
|
|
|
|
4700013'u64, # First post-merge slot
|
|
|
|
)
|
|
|
|
of SepoliaNet:
|
|
|
|
(
|
|
|
|
getMetadataForNetwork("sepolia").cfg,
|
|
|
|
Eth2Digest.fromHex(
|
|
|
|
"0xd8ea171f3c94aea21ebc42a1ed61052acf3f9209c00e4efbaaddac09ed9b8078"
|
|
|
|
),
|
|
|
|
1450408'u64, # Last pre-merge block number
|
|
|
|
115193'u64, # First post-merge slot
|
|
|
|
)
|
|
|
|
of HoleskyNet:
|
|
|
|
(
|
|
|
|
getMetadataForNetwork("holesky").cfg,
|
|
|
|
Eth2Digest.fromHex(
|
|
|
|
"0x9143aa7c615a7f7115e2b6aac319c03529df8242ae705fba9df39b79c59fa8b1"
|
|
|
|
),
|
|
|
|
0'u64, # Last pre-merge block number
|
|
|
|
0'u64, # First post-merge slot
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
fatal "Unsupported network", network = networkId
|
|
|
|
quit(QuitFailure)
|
|
|
|
|
|
|
|
template boolFlag(flags, b): PersistBlockFlags =
|
|
|
|
if b:
|
|
|
|
flags
|
|
|
|
else:
|
|
|
|
{}
|
|
|
|
|
2024-05-31 09:13:56 +02:00
|
|
|
proc importBlocks*(conf: NimbusConf, com: CommonRef) =
|
|
|
|
proc controlCHandler() {.noconv.} =
|
|
|
|
when defined(windows):
|
|
|
|
# workaround for https://github.com/nim-lang/Nim/issues/4057
|
|
|
|
setupForeignThreadGc()
|
|
|
|
running = false
|
|
|
|
|
|
|
|
setControlCHook(controlCHandler)
|
|
|
|
|
|
|
|
let
|
aristo: fork support via layers/txframes (#2960)
* aristo: fork support via layers/txframes
This change reorganises how the database is accessed: instead holding a
"current frame" in the database object, a dag of frames is created based
on the "base frame" held in `AristoDbRef` and all database access
happens through this frame, which can be thought of as a consistent
point-in-time snapshot of the database based on a particular fork of the
chain.
In the code, "frame", "transaction" and "layer" is used to denote more
or less the same thing: a dag of stacked changes backed by the on-disk
database.
Although this is not a requirement, in practice each frame holds the
change set of a single block - as such, the frame and its ancestors
leading up to the on-disk state represents the state of the database
after that block has been applied.
"committing" means merging the changes to its parent frame so that the
difference between them is lost and only the cumulative changes remain -
this facility enables frames to be combined arbitrarily wherever they
are in the dag.
In particular, it becomes possible to consolidate a set of changes near
the base of the dag and commit those to disk without having to re-do the
in-memory frames built on top of them - this is useful for "flattening"
a set of changes during a base update and sending those to storage
without having to perform a block replay on top.
Looking at abstractions, a side effect of this change is that the KVT
and Aristo are brought closer together by considering them to be part of
the "same" atomic transaction set - the way the code gets organised,
applying a block and saving it to the kvt happens in the same "logical"
frame - therefore, discarding the frame discards both the aristo and kvt
changes at the same time - likewise, they are persisted to disk together
- this makes reasoning about the database somewhat easier but has the
downside of increased memory usage, something that perhaps will need
addressing in the future.
Because the code reasons more strictly about frames and the state of the
persisted database, it also makes it more visible where ForkedChain
should be used and where it is still missing - in particular, frames
represent a single branch of history while forkedchain manages multiple
parallel forks - user-facing services such as the RPC should use the
latter, ie until it has been finalized, a getBlock request should
consider all forks and not just the blocks in the canonical head branch.
Another advantage of this approach is that `AristoDbRef` conceptually
becomes more simple - removing its tracking of the "current" transaction
stack simplifies reasoning about what can go wrong since this state now
has to be passed around in the form of `AristoTxRef` - as such, many of
the tests and facilities in the code that were dealing with "stack
inconsistency" are now structurally prevented from happening. The test
suite will need significant refactoring after this change.
Once this change has been merged, there are several follow-ups to do:
* there's no mechanism for keeping frames up to date as they get
committed or rolled back - TODO
* naming is confused - many names for the same thing for legacy reason
* forkedchain support is still missing in lots of code
* clean up redundant logic based on previous designs - in particular the
debug and introspection code no longer makes sense
* the way change sets are stored will probably need revisiting - because
it's a stack of changes where each frame must be interrogated to find an
on-disk value, with a base distance of 128 we'll at minimum have to
perform 128 frame lookups for *every* database interaction - regardless,
the "dag-like" nature will stay
* dispose and commit are poorly defined and perhaps redundant - in
theory, one could simply let the GC collect abandoned frames etc, though
it's likely an explicit mechanism will remain useful, so they stay for
now
More about the changes:
* `AristoDbRef` gains a `txRef` field (todo: rename) that "more or less"
corresponds to the old `balancer` field
* `AristoDbRef.stack` is gone - instead, there's a chain of
`AristoTxRef` objects that hold their respective "layer" which has the
actual changes
* No more reasoning about "top" and "stack" - instead, each
`AristoTxRef` can be a "head" that "more or less" corresponds to the old
single-history `top` notion and its stack
* `level` still represents "distance to base" - it's computed from the
parent chain instead of being stored
* one has to be careful not to use frames where forkedchain was intended
- layers are only for a single branch of history!
* fix layer vtop after rollback
* engine fix
* Fix test_txpool
* Fix test_rpc
* Fix copyright year
* fix simulator
* Fix copyright year
* Fix copyright year
* Fix tracer
* Fix infinite recursion bug
* Remove aristo and kvt empty files
* Fic copyright year
* Fix fc chain_kvt
* ForkedChain refactoring
* Fix merge master conflict
* Fix copyright year
* Reparent txFrame
* Fix test
* Fix txFrame reparent again
* Cleanup and fix test
* UpdateBase bugfix and fix test
* Fixe newPayload bug discovered by hive
* Fix engine api fcu
* Clean up call template, chain_kvt, andn txguid
* Fix copyright year
* work around base block loading issue
* Add test
* Fix updateHead bug
* Fix updateBase bug
* Change func commitBase to proc commitBase
* Touch up and fix debug mode crash
---------
Co-authored-by: jangko <jangko128@gmail.com>
2025-02-06 08:04:50 +01:00
|
|
|
start = com.db.baseTxFrame().getSavedStateBlockNumber() + 1
|
2024-12-18 13:21:20 +01:00
|
|
|
(cfg, genesis_validators_root, lastEra1Block, firstSlotAfterMerge) =
|
|
|
|
getMetadata(conf.networkId)
|
|
|
|
time0 = Moment.now()
|
2024-05-31 09:13:56 +02:00
|
|
|
|
2024-12-18 13:21:20 +01:00
|
|
|
# These variables are used from closures on purpose, so as to place them on
|
|
|
|
# the heap rather than the stack
|
2024-05-31 09:13:56 +02:00
|
|
|
var
|
2024-12-18 13:21:20 +01:00
|
|
|
slot = 1'u64
|
|
|
|
time1 = Moment.now() # time at start of chunk
|
2024-06-06 07:03:11 +02:00
|
|
|
csv =
|
|
|
|
if conf.csvStats.isSome:
|
2024-12-18 13:21:20 +01:00
|
|
|
openCsv(conf.csvStats.get())
|
2024-06-06 07:03:11 +02:00
|
|
|
else:
|
|
|
|
File(nil)
|
2024-06-15 11:22:37 +02:00
|
|
|
flags =
|
2024-07-04 16:51:50 +02:00
|
|
|
boolFlag({PersistBlockFlag.NoValidation}, conf.noValidation) +
|
2024-06-15 11:22:37 +02:00
|
|
|
boolFlag({PersistBlockFlag.NoFullValidation}, not conf.fullValidation) +
|
|
|
|
boolFlag(NoPersistBodies, not conf.storeBodies) +
|
2024-08-16 08:22:51 +02:00
|
|
|
boolFlag({PersistBlockFlag.NoPersistReceipts}, not conf.storeReceipts) +
|
|
|
|
boolFlag({PersistBlockFlag.NoPersistSlotHashes}, not conf.storeSlotHashes)
|
2024-12-18 13:21:20 +01:00
|
|
|
blk: Block
|
2025-02-19 04:04:22 +01:00
|
|
|
persister = Persister.init(com, flags)
|
2024-12-18 13:21:20 +01:00
|
|
|
cstats: PersistStats # stats at start of chunk
|
2024-06-15 11:22:37 +02:00
|
|
|
|
2024-06-06 07:03:11 +02:00
|
|
|
defer:
|
|
|
|
if csv != nil:
|
|
|
|
close(csv)
|
|
|
|
|
2024-12-18 13:21:20 +01:00
|
|
|
template blockNumber(): uint64 =
|
|
|
|
start + uint64 persister.stats.blocks
|
2024-07-09 18:58:01 +05:30
|
|
|
|
2024-06-24 07:56:41 +02:00
|
|
|
nec_import_block_number.set(start.int64)
|
|
|
|
|
2024-07-09 18:58:01 +05:30
|
|
|
func f(value: float): string =
|
2024-12-18 13:21:20 +01:00
|
|
|
if value >= 1000:
|
|
|
|
&"{int(value)}"
|
|
|
|
elif value >= 100:
|
|
|
|
&"{value:4.1f}"
|
|
|
|
elif value >= 10:
|
|
|
|
&"{value:4.2f}"
|
|
|
|
else:
|
|
|
|
&"{value:4.3f}"
|
2024-07-09 18:58:01 +05:30
|
|
|
|
2024-12-18 13:21:20 +01:00
|
|
|
proc persistBlock() =
|
|
|
|
persister.persistBlock(blk).isOkOr:
|
|
|
|
fatal "Could not persist block", blockNumber = blk.header.number, error
|
|
|
|
quit(QuitFailure)
|
|
|
|
|
|
|
|
proc checkpoint(force: bool = false) =
|
|
|
|
let (blocks, txs, gas) = persister.stats
|
|
|
|
|
|
|
|
if not force and blocks.uint64 mod conf.chunkSize != 0:
|
|
|
|
return
|
|
|
|
|
|
|
|
persister.checkpoint().isOkOr:
|
|
|
|
fatal "Could not write database checkpoint", error
|
2024-07-09 18:58:01 +05:30
|
|
|
quit(QuitFailure)
|
|
|
|
|
2024-12-18 13:21:20 +01:00
|
|
|
let (cblocks, ctxs, cgas) =
|
|
|
|
(blocks - cstats.blocks, txs - cstats.txs, gas - cstats.gas)
|
|
|
|
|
|
|
|
if cblocks == 0:
|
|
|
|
return
|
|
|
|
|
|
|
|
cstats = persister.stats
|
|
|
|
|
2024-07-09 18:58:01 +05:30
|
|
|
let
|
|
|
|
time2 = Moment.now()
|
|
|
|
diff1 = (time2 - time1).nanoseconds().float / 1000000000
|
|
|
|
diff0 = (time2 - time0).nanoseconds().float / 1000000000
|
|
|
|
|
|
|
|
info "Imported blocks",
|
|
|
|
blockNumber,
|
2024-12-18 13:21:20 +01:00
|
|
|
slot,
|
|
|
|
blocks,
|
2024-07-09 18:58:01 +05:30
|
|
|
txs,
|
|
|
|
mgas = f(gas.float / 1000000),
|
2024-12-18 13:21:20 +01:00
|
|
|
bps = f(cblocks.float / diff1),
|
|
|
|
tps = f(ctxs.float / diff1),
|
|
|
|
mgps = f(cgas.float / 1000000 / diff1),
|
|
|
|
avgBps = f(blocks.float / diff0),
|
2024-07-09 18:58:01 +05:30
|
|
|
avgTps = f(txs.float / diff0),
|
|
|
|
avgMGps = f(gas.float / 1000000 / diff0),
|
2024-10-03 13:42:24 +02:00
|
|
|
elapsed = toString(time2 - time0, 3)
|
2024-07-09 18:58:01 +05:30
|
|
|
|
|
|
|
metrics.set(nec_import_block_number, int64(blockNumber))
|
2024-12-18 13:21:20 +01:00
|
|
|
nec_imported_blocks.inc(cblocks)
|
|
|
|
nec_imported_transactions.inc(ctxs)
|
|
|
|
nec_imported_gas.inc(int64 cgas)
|
2024-05-31 09:13:56 +02:00
|
|
|
|
2024-07-09 18:58:01 +05:30
|
|
|
if csv != nil:
|
|
|
|
# In the CSV, we store a line for every chunk of blocks processed so
|
|
|
|
# that the file can meaningfully be appended to when restarting the
|
|
|
|
# process - this way, each sample is independent
|
|
|
|
try:
|
|
|
|
csv.writeLine(
|
2024-12-18 13:21:20 +01:00
|
|
|
[$blockNumber, $cblocks, $slot, $ctxs, $cgas, $(time2 - time1).nanoseconds()].join(
|
|
|
|
","
|
|
|
|
)
|
2024-07-09 18:58:01 +05:30
|
|
|
)
|
|
|
|
csv.flushFile()
|
|
|
|
except IOError as exc:
|
|
|
|
warn "Could not write csv", err = exc.msg
|
2024-12-18 13:21:20 +01:00
|
|
|
|
|
|
|
time1 = time2
|
2024-07-09 18:58:01 +05:30
|
|
|
|
2024-07-23 02:47:07 +05:30
|
|
|
# Finds the slot number to resume the import process
|
|
|
|
# First it sets the initial lower bound to `firstSlotAfterMerge` + number of blocks after Era1
|
|
|
|
# Then it iterates over the slots to find the current slot number, along with reducing the
|
|
|
|
# search space by calculating the difference between the `blockNumber` and the `block_number` from the executionPayload
|
2024-07-26 07:32:01 +02:00
|
|
|
# of the slot, then adding the difference to the importedSlot. This pushes the lower bound more,
|
2024-07-23 02:47:07 +05:30
|
|
|
# making the search way smaller
|
2024-08-22 10:06:45 +02:00
|
|
|
proc updateLastImportedSlot(
|
2024-07-09 18:58:01 +05:30
|
|
|
era: EraDB,
|
|
|
|
historical_roots: openArray[Eth2Digest],
|
|
|
|
historical_summaries: openArray[HistoricalSummary],
|
2024-09-21 12:08:38 +05:30
|
|
|
endSlot: Slot,
|
|
|
|
): bool =
|
|
|
|
# Checks if the Nimbus block number is ahead the era block number
|
2024-10-03 13:42:24 +02:00
|
|
|
# First we load the last era number, and get the fist slot number
|
2024-09-21 12:08:38 +05:30
|
|
|
# Since the slot emptiness cannot be predicted, we iterate over to find the block and check
|
|
|
|
# if the block number is greater than the current block number
|
|
|
|
var
|
|
|
|
lastEra = era(endSlot - 1)
|
|
|
|
startSlot = start_slot(lastEra) - 8192
|
|
|
|
debug "Finding slot number to resume import", startSlot, endSlot
|
|
|
|
|
|
|
|
while startSlot < endSlot:
|
2024-12-18 13:21:20 +01:00
|
|
|
if not getEthBlockFromEra(
|
|
|
|
era, historical_roots, historical_summaries, startSlot, cfg, blk
|
|
|
|
):
|
2024-09-21 12:08:38 +05:30
|
|
|
startSlot += 1
|
|
|
|
if startSlot == endSlot - 1:
|
|
|
|
error "No blocks found in the last era file"
|
|
|
|
return false
|
2024-12-18 13:21:20 +01:00
|
|
|
|
|
|
|
continue
|
2024-09-21 12:08:38 +05:30
|
|
|
|
|
|
|
startSlot += 1
|
|
|
|
if blk.header.number < blockNumber:
|
2024-12-18 13:21:20 +01:00
|
|
|
notice "Available `era` files are already imported",
|
2024-09-21 12:08:38 +05:30
|
|
|
stateBlockNumber = blockNumber, eraBlockNumber = blk.header.number
|
2024-12-18 13:21:20 +01:00
|
|
|
return false
|
|
|
|
break
|
2024-09-21 12:08:38 +05:30
|
|
|
|
2024-07-09 18:58:01 +05:30
|
|
|
if blockNumber > 1:
|
2024-07-26 07:32:01 +02:00
|
|
|
# Setting the initial lower bound
|
2024-12-18 13:21:20 +01:00
|
|
|
slot = (blockNumber - lastEra1Block) + firstSlotAfterMerge
|
|
|
|
debug "Finding slot number after resuming import", slot
|
2024-07-23 02:47:07 +05:30
|
|
|
|
|
|
|
# BlockNumber based slot finding
|
|
|
|
var clNum = 0'u64
|
|
|
|
|
|
|
|
while clNum < blockNumber:
|
2024-12-18 13:21:20 +01:00
|
|
|
if not getEthBlockFromEra(
|
|
|
|
era, historical_roots, historical_summaries, Slot(slot), cfg, blk
|
|
|
|
):
|
|
|
|
slot += 1
|
2024-07-23 02:47:07 +05:30
|
|
|
continue
|
|
|
|
|
2024-07-26 07:32:01 +02:00
|
|
|
clNum = blk.header.number
|
2024-07-23 02:47:07 +05:30
|
|
|
# decreasing the lower bound with each iteration
|
2024-12-18 13:21:20 +01:00
|
|
|
slot += blockNumber - clNum
|
2024-07-09 18:58:01 +05:30
|
|
|
|
2024-12-18 13:21:20 +01:00
|
|
|
notice "Matched block to slot number", blockNumber, slot
|
2024-09-21 12:08:38 +05:30
|
|
|
return true
|
2024-07-09 18:58:01 +05:30
|
|
|
|
2024-12-18 13:21:20 +01:00
|
|
|
if lastEra1Block > 0 and start <= lastEra1Block:
|
|
|
|
let
|
|
|
|
era1Name =
|
|
|
|
case conf.networkId
|
|
|
|
of MainNet:
|
|
|
|
"mainnet"
|
|
|
|
of SepoliaNet:
|
|
|
|
"sepolia"
|
2024-07-09 18:58:01 +05:30
|
|
|
else:
|
2024-12-18 13:21:20 +01:00
|
|
|
raiseAssert "Other networks are unsupported or do not have an era1"
|
|
|
|
db = Era1DbRef.init(conf.era1Dir.string, era1Name).valueOr:
|
2025-01-29 15:04:27 +00:00
|
|
|
fatal "Could not open era1 database", era1Dir=conf.era1Dir, era1Name=era1Name, error=error
|
2024-12-18 13:21:20 +01:00
|
|
|
quit(QuitFailure)
|
|
|
|
|
|
|
|
notice "Importing era1 archive",
|
|
|
|
start, dataDir = conf.dataDir.string, era1Dir = conf.era1Dir.string
|
|
|
|
|
|
|
|
defer:
|
|
|
|
db.dispose()
|
|
|
|
|
|
|
|
proc loadEraBlock(blockNumber: uint64): bool =
|
|
|
|
db.getEthBlock(blockNumber, blk).isOkOr:
|
|
|
|
return false
|
|
|
|
true
|
|
|
|
|
|
|
|
while running and persister.stats.blocks.uint64 < conf.maxBlocks and
|
|
|
|
blockNumber <= lastEra1Block:
|
|
|
|
if not loadEraBlock(blockNumber):
|
|
|
|
notice "No more `era1` blocks to import", blockNumber, slot
|
|
|
|
break
|
|
|
|
persistBlock()
|
|
|
|
checkpoint()
|
2024-05-31 09:13:56 +02:00
|
|
|
|
2024-12-18 13:21:20 +01:00
|
|
|
block era1Import:
|
2024-07-12 08:45:14 +05:30
|
|
|
if blockNumber > lastEra1Block:
|
2024-12-18 13:21:20 +01:00
|
|
|
if not isDir(conf.eraDir.string):
|
|
|
|
if blockNumber == 0:
|
|
|
|
fatal "`era` directory not found, cannot start import",
|
|
|
|
blockNumber, eraDir = conf.eraDir.string
|
|
|
|
quit(QuitFailure)
|
|
|
|
else:
|
|
|
|
notice "`era` directory not found, stopping import at merge boundary",
|
|
|
|
blockNumber, eraDir = conf.eraDir.string
|
|
|
|
break era1Import
|
|
|
|
|
2024-07-09 18:58:01 +05:30
|
|
|
notice "Importing era archive",
|
2024-10-04 07:07:50 +05:30
|
|
|
blockNumber, dataDir = conf.dataDir.string, eraDir = conf.eraDir.string
|
2024-07-09 18:58:01 +05:30
|
|
|
|
|
|
|
let
|
2024-12-18 13:21:20 +01:00
|
|
|
eraDB = EraDB.new(cfg, conf.eraDir.string, genesis_validators_root)
|
2024-07-09 18:58:01 +05:30
|
|
|
(historical_roots, historical_summaries, endSlot) = loadHistoricalRootsFromEra(
|
2024-12-18 13:21:20 +01:00
|
|
|
conf.eraDir.string, cfg
|
2024-07-09 18:58:01 +05:30
|
|
|
).valueOr:
|
2024-12-18 13:21:20 +01:00
|
|
|
fatal "Could not load historical summaries",
|
|
|
|
eraDir = conf.eraDir.string, error
|
|
|
|
quit(QuitFailure)
|
2024-07-09 18:58:01 +05:30
|
|
|
|
|
|
|
# Load the last slot number
|
2024-09-21 12:08:38 +05:30
|
|
|
var moreEraAvailable = true
|
2024-07-12 08:45:14 +05:30
|
|
|
if blockNumber > lastEra1Block + 1:
|
2024-09-21 12:08:38 +05:30
|
|
|
moreEraAvailable = updateLastImportedSlot(
|
|
|
|
eraDB, historical_roots.asSeq(), historical_summaries.asSeq(), endSlot
|
2024-07-12 08:45:14 +05:30
|
|
|
)
|
2024-07-09 18:58:01 +05:30
|
|
|
|
2024-12-18 13:21:20 +01:00
|
|
|
if slot < firstSlotAfterMerge and firstSlotAfterMerge != 0:
|
2024-07-09 18:58:01 +05:30
|
|
|
# if resuming import we do not update the slot
|
2024-12-18 13:21:20 +01:00
|
|
|
slot = firstSlotAfterMerge
|
2024-07-09 18:58:01 +05:30
|
|
|
|
2024-12-18 13:21:20 +01:00
|
|
|
proc loadEra1Block(): bool =
|
2024-08-22 10:06:45 +02:00
|
|
|
# Separate proc to reduce stack usage of blk
|
2024-12-18 13:21:20 +01:00
|
|
|
if not getEthBlockFromEra(
|
2024-07-09 18:58:01 +05:30
|
|
|
eraDB,
|
|
|
|
historical_roots.asSeq(),
|
|
|
|
historical_summaries.asSeq(),
|
2024-12-18 13:21:20 +01:00
|
|
|
Slot(slot),
|
|
|
|
cfg,
|
|
|
|
blk,
|
|
|
|
):
|
2024-08-22 10:06:45 +02:00
|
|
|
return false
|
|
|
|
|
|
|
|
true
|
|
|
|
|
2024-12-18 13:21:20 +01:00
|
|
|
while running and moreEraAvailable and
|
|
|
|
persister.stats.blocks.uint64 < conf.maxBlocks and slot < endSlot:
|
|
|
|
if not loadEra1Block():
|
|
|
|
slot += 1
|
2024-07-09 18:58:01 +05:30
|
|
|
continue
|
2024-12-18 13:21:20 +01:00
|
|
|
slot += 1
|
2024-07-09 18:58:01 +05:30
|
|
|
|
2024-12-18 13:21:20 +01:00
|
|
|
persistBlock()
|
|
|
|
checkpoint()
|
2024-08-22 10:06:45 +02:00
|
|
|
|
2025-02-14 12:51:56 +00:00
|
|
|
# If there were no blocks written, we will not have loaded the block number
|
|
|
|
# and therefore should not call checkpoint().
|
|
|
|
if 0 < persister.stats.blocks:
|
|
|
|
checkpoint(true)
|
2024-07-09 18:58:01 +05:30
|
|
|
|
2024-12-18 13:21:20 +01:00
|
|
|
notice "Import complete",
|
|
|
|
blockNumber,
|
|
|
|
slot,
|
|
|
|
blocks = persister.stats.blocks,
|
|
|
|
txs = persister.stats.txs,
|
|
|
|
mgas = f(persister.stats.gas.float / 1000000)
|