2018-04-27 08:53:53 +00:00
|
|
|
# Nimbus
|
2024-01-23 15:29:12 +00:00
|
|
|
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
2018-04-27 08:53:53 +00:00
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
|
|
|
# at your option.
|
|
|
|
# This file may not be copied, modified, or distributed except according to
|
|
|
|
# those terms.
|
|
|
|
|
2021-04-21 10:15:06 +00:00
|
|
|
import
|
2024-06-17 07:56:39 +00:00
|
|
|
../nimbus/compile_info
|
2021-04-21 10:15:06 +00:00
|
|
|
|
2018-07-22 21:34:43 +00:00
|
|
|
import
|
2022-12-02 04:39:12 +00:00
|
|
|
std/[os, strutils, net],
|
2022-04-06 14:11:13 +00:00
|
|
|
chronicles,
|
2024-01-29 13:20:04 +00:00
|
|
|
eth/net/nat,
|
2022-04-06 14:11:13 +00:00
|
|
|
metrics,
|
2024-01-29 13:20:04 +00:00
|
|
|
metrics/chronicles_support,
|
2024-09-29 12:37:09 +00:00
|
|
|
kzg4844/kzg,
|
2024-11-06 02:01:42 +00:00
|
|
|
stew/byteutils,
|
2024-01-29 13:20:04 +00:00
|
|
|
./rpc,
|
|
|
|
./version,
|
|
|
|
./constants,
|
|
|
|
./nimbus_desc,
|
2024-05-31 07:13:56 +00:00
|
|
|
./nimbus_import,
|
2023-10-01 14:00:32 +00:00
|
|
|
./core/eip4844,
|
2024-01-29 13:20:04 +00:00
|
|
|
./db/core_db/persistent,
|
2024-11-06 02:01:42 +00:00
|
|
|
./db/storage_types,
|
|
|
|
./sync/handlers,
|
|
|
|
./common/chain_config_hash
|
2018-07-26 20:08:43 +00:00
|
|
|
|
Speed up initial MPT root computation after import (#2788)
When `nimbus import` runs, we end up with a database without MPT roots
leading to long startup times the first time one is needed.
Computing the state root is slow because the on-disk order based on
VertexID sorting does not match the trie traversal order and therefore
makes lookups inefficent.
Here we introduce a helper that speeds up this computation by traversing
the trie in on-disk order and computing the trie hashes bottom up
instead - even though this leads to some redundant reads of nodes that
we cannot yet compute, it's still a net win as leaves and "bottom"
branches make up the majority of the database.
This PR also addresses a few other sources of inefficiency largely due
to the separation of AriKey and AriVtx into their own column families.
Each column family is its own LSM tree that produces hundreds of SST
filtes - with a limit of 512 open files, rocksdb must keep closing and
opening files which leads to expensive metadata reads during random
access.
When rocksdb makes a lookup, it has to read several layers of files for
each lookup. Ribbon filters to skip over files that don't have the
requested data but when these filters are not in memory, reading them is
slow - this happens in two cases: when opening a file and when the
filter has been evicted from the LRU cache. Addressing the open file
limit solves one source of inefficiency, but we must also increase the
block cache size to deal with this problem.
* rocksdb.max_open_files increased to 2048
* per-file size limits increased so that fewer files are created
* WAL size increased to avoid partial flushes which lead to small files
* rocksdb block cache increased
All these increases of course lead to increased memory usage, but at
least performance is acceptable - in the future, we'll need to explore
options such as joining AriVtx and AriKey and/or reducing the row count
(by grouping branch layers under a single vertexid).
With this PR, the mainnet state root can be computed in ~8 hours (down
from 2-3 days) - not great, but still better.
Further, we write all keys to the database, also those that are less
than 32 bytes - because the mpt path is part of the input, it is very
rare that we actually hit a key like this (about 200k such entries on
mainnet), so the code complexity is not worth the benefit really, in the
current database layout / design.
2024-10-27 11:08:37 +00:00
|
|
|
from beacon_chain/nimbus_binary_common import setupFileLimits
|
|
|
|
|
2021-12-05 11:20:27 +00:00
|
|
|
when defined(evmc_enabled):
|
|
|
|
import transaction/evmc_dynamic_loader
|
|
|
|
|
2018-06-20 17:27:32 +00:00
|
|
|
## TODO:
|
|
|
|
## * No IPv6 support
|
|
|
|
## * No multiple bind addresses support
|
|
|
|
## * No database support
|
|
|
|
|
2022-10-10 02:31:28 +00:00
|
|
|
proc basicServices(nimbus: NimbusNode,
|
|
|
|
conf: NimbusConf,
|
2022-12-02 04:39:12 +00:00
|
|
|
com: CommonRef) =
|
2024-05-28 18:26:51 +00:00
|
|
|
nimbus.txPool = TxPoolRef.new(com)
|
2022-10-10 02:31:28 +00:00
|
|
|
|
2023-08-04 03:59:12 +00:00
|
|
|
# txPool must be informed of active head
|
|
|
|
# so it can know the latest account state
|
|
|
|
# e.g. sender nonce, etc
|
2024-10-17 12:14:09 +00:00
|
|
|
nimbus.chainRef = ForkedChainRef.init(com)
|
|
|
|
doAssert nimbus.txPool.smartHead(nimbus.chainRef.latestHeader,nimbus.chainRef)
|
2022-10-10 02:31:28 +00:00
|
|
|
|
2023-08-27 01:23:45 +00:00
|
|
|
nimbus.beaconEngine = BeaconEngineRef.new(nimbus.txPool, nimbus.chainRef)
|
2022-11-14 07:32:33 +00:00
|
|
|
|
2021-09-16 15:59:46 +00:00
|
|
|
proc manageAccounts(nimbus: NimbusNode, conf: NimbusConf) =
|
2021-09-11 14:58:01 +00:00
|
|
|
if string(conf.keyStore).len > 0:
|
|
|
|
let res = nimbus.ctx.am.loadKeystores(string conf.keyStore)
|
2021-09-07 13:45:01 +00:00
|
|
|
if res.isErr:
|
2022-07-27 16:07:54 +00:00
|
|
|
fatal "Load keystore error", msg = res.error()
|
2021-09-07 13:45:01 +00:00
|
|
|
quit(QuitFailure)
|
|
|
|
|
2021-09-11 14:58:01 +00:00
|
|
|
if string(conf.importKey).len > 0:
|
|
|
|
let res = nimbus.ctx.am.importPrivateKey(string conf.importKey)
|
2021-09-07 13:45:01 +00:00
|
|
|
if res.isErr:
|
2022-07-27 16:07:54 +00:00
|
|
|
fatal "Import private key error", msg = res.error()
|
2021-09-07 13:45:01 +00:00
|
|
|
quit(QuitFailure)
|
2021-03-18 15:05:15 +00:00
|
|
|
|
2021-09-16 15:59:46 +00:00
|
|
|
proc setupP2P(nimbus: NimbusNode, conf: NimbusConf,
|
2024-10-28 22:17:07 +00:00
|
|
|
com: CommonRef) =
|
2018-06-20 17:27:32 +00:00
|
|
|
## Creating P2P Server
|
2022-07-27 16:07:54 +00:00
|
|
|
let kpres = nimbus.ctx.getNetKeys(conf.netKey, conf.dataDir.string)
|
2021-09-07 13:45:01 +00:00
|
|
|
if kpres.isErr:
|
2022-07-27 16:07:54 +00:00
|
|
|
fatal "Get network keys error", msg = kpres.error
|
2021-09-07 13:45:01 +00:00
|
|
|
quit(QuitFailure)
|
|
|
|
|
|
|
|
let keypair = kpres.get()
|
2024-09-29 12:37:09 +00:00
|
|
|
var address = enode.Address(
|
2021-09-11 14:58:01 +00:00
|
|
|
ip: conf.listenAddress,
|
|
|
|
tcpPort: conf.tcpPort,
|
|
|
|
udpPort: conf.udpPort
|
|
|
|
)
|
|
|
|
|
|
|
|
if conf.nat.hasExtIp:
|
|
|
|
# any required port redirection is assumed to be done by hand
|
|
|
|
address.ip = conf.nat.extIp
|
2019-04-17 01:56:28 +00:00
|
|
|
else:
|
|
|
|
# automated NAT traversal
|
2021-09-11 14:58:01 +00:00
|
|
|
let extIP = getExternalIP(conf.nat.nat)
|
2019-04-17 23:17:06 +00:00
|
|
|
# This external IP only appears in the logs, so don't worry about dynamic
|
|
|
|
# IPs. Don't remove it either, because the above call does initialisation
|
|
|
|
# and discovery for NAT-related objects.
|
2019-04-17 01:56:28 +00:00
|
|
|
if extIP.isSome:
|
|
|
|
address.ip = extIP.get()
|
|
|
|
let extPorts = redirectPorts(tcpPort = address.tcpPort,
|
2021-09-11 14:58:01 +00:00
|
|
|
udpPort = address.udpPort,
|
2022-03-27 11:21:15 +00:00
|
|
|
description = NimbusName & " " & NimbusVersion)
|
2019-04-17 01:56:28 +00:00
|
|
|
if extPorts.isSome:
|
|
|
|
(address.tcpPort, address.udpPort) = extPorts.get()
|
2018-06-20 17:27:32 +00:00
|
|
|
|
2022-03-16 08:01:35 +00:00
|
|
|
let bootstrapNodes = conf.getBootNodes()
|
|
|
|
|
|
|
|
nimbus.ethNode = newEthereumNode(
|
2022-10-10 02:31:28 +00:00
|
|
|
keypair, address, conf.networkId, conf.agentString,
|
2022-03-16 08:01:35 +00:00
|
|
|
addAllCapabilities = false, minPeers = conf.maxPeers,
|
|
|
|
bootstrapNodes = bootstrapNodes,
|
|
|
|
bindUdpPort = conf.udpPort, bindTcpPort = conf.tcpPort,
|
2022-07-27 16:07:54 +00:00
|
|
|
bindIp = conf.listenAddress,
|
|
|
|
rng = nimbus.ctx.rng)
|
2022-03-16 08:01:35 +00:00
|
|
|
|
2024-10-28 22:17:07 +00:00
|
|
|
# Add protocol capabilities
|
|
|
|
nimbus.ethNode.addEthHandlerCapability(
|
|
|
|
nimbus.ethNode.peerPool, nimbus.chainRef, nimbus.txPool)
|
2018-06-20 17:27:32 +00:00
|
|
|
|
2024-10-21 18:01:45 +00:00
|
|
|
# Always initialise beacon syncer
|
2024-10-02 11:31:33 +00:00
|
|
|
nimbus.beaconSyncRef = BeaconSyncRef.init(
|
|
|
|
nimbus.ethNode, nimbus.chainRef, conf.maxPeers, conf.beaconChunkSize)
|
2022-05-09 14:04:48 +00:00
|
|
|
|
2021-09-16 15:59:46 +00:00
|
|
|
# Connect directly to the static nodes
|
|
|
|
let staticPeers = conf.getStaticPeers()
|
2022-08-26 09:36:04 +00:00
|
|
|
if staticPeers.len > 0:
|
|
|
|
nimbus.peerManager = PeerManagerRef.new(
|
|
|
|
nimbus.ethNode.peerPool,
|
|
|
|
conf.reconnectInterval,
|
|
|
|
conf.reconnectMaxRetry,
|
|
|
|
staticPeers
|
|
|
|
)
|
|
|
|
nimbus.peerManager.start()
|
2021-09-16 15:59:46 +00:00
|
|
|
|
2022-03-16 08:01:35 +00:00
|
|
|
# Start Eth node
|
2022-03-23 14:07:11 +00:00
|
|
|
if conf.maxPeers > 0:
|
2022-04-13 09:10:43 +00:00
|
|
|
nimbus.networkLoop = nimbus.ethNode.connectToNetwork(
|
2022-05-09 14:04:48 +00:00
|
|
|
enableDiscovery = conf.discovery != DiscoveryType.None,
|
2024-10-02 11:31:33 +00:00
|
|
|
waitForPeers = true)
|
2021-09-16 15:59:46 +00:00
|
|
|
|
2023-04-12 12:39:11 +00:00
|
|
|
|
2024-06-20 17:06:58 +00:00
|
|
|
proc setupMetrics(nimbus: NimbusNode, conf: NimbusConf) =
|
2021-09-16 15:59:46 +00:00
|
|
|
# metrics logging
|
|
|
|
if conf.logMetricsEnabled:
|
|
|
|
# https://github.com/nim-lang/Nim/issues/17369
|
2023-05-10 16:04:35 +00:00
|
|
|
var logMetrics: proc(udata: pointer) {.gcsafe, raises: [].}
|
2021-09-16 15:59:46 +00:00
|
|
|
logMetrics = proc(udata: pointer) =
|
|
|
|
{.gcsafe.}:
|
|
|
|
let registry = defaultRegistry
|
|
|
|
info "metrics", registry
|
|
|
|
discard setTimer(Moment.fromNow(conf.logMetricsInterval.seconds), logMetrics)
|
|
|
|
discard setTimer(Moment.fromNow(conf.logMetricsInterval.seconds), logMetrics)
|
2022-07-18 04:35:50 +00:00
|
|
|
|
2019-07-16 10:43:05 +00:00
|
|
|
# metrics server
|
2021-09-11 14:58:01 +00:00
|
|
|
if conf.metricsEnabled:
|
|
|
|
info "Starting metrics HTTP server", address = conf.metricsAddress, port = conf.metricsPort
|
2024-01-29 13:20:04 +00:00
|
|
|
let res = MetricsHttpServerRef.new($conf.metricsAddress, conf.metricsPort)
|
|
|
|
if res.isErr:
|
|
|
|
fatal "Failed to create metrics server", msg=res.error
|
|
|
|
quit(QuitFailure)
|
|
|
|
|
|
|
|
nimbus.metricsServer = res.get
|
|
|
|
waitFor nimbus.metricsServer.start()
|
2019-06-26 12:51:59 +00:00
|
|
|
|
2024-11-06 02:01:42 +00:00
|
|
|
proc preventLoadingDataDirForTheWrongNetwork(db: CoreDbRef; conf: NimbusConf) =
|
|
|
|
let
|
|
|
|
kvt = db.ctx.getKvt()
|
|
|
|
calculatedId = calcHash(conf.networkId, conf.networkParams)
|
|
|
|
dataDirIdBytes = kvt.get(dataDirIdKey().toOpenArray).valueOr:
|
|
|
|
# an empty database
|
|
|
|
info "Writing data dir ID", ID=calculatedId
|
|
|
|
kvt.put(dataDirIdKey().toOpenArray, calculatedId.data).isOkOr:
|
|
|
|
fatal "Cannot write data dir ID", ID=calculatedId
|
|
|
|
quit(QuitFailure)
|
|
|
|
return
|
|
|
|
|
|
|
|
if calculatedId.data != dataDirIdBytes:
|
|
|
|
fatal "Data dir already initialized with other network configuration",
|
|
|
|
get=dataDirIdBytes.toHex,
|
|
|
|
expected=calculatedId
|
|
|
|
quit(QuitFailure)
|
|
|
|
|
2024-05-31 07:13:56 +00:00
|
|
|
proc run(nimbus: NimbusNode, conf: NimbusConf) =
|
2021-09-16 15:59:46 +00:00
|
|
|
## logging
|
|
|
|
setLogLevel(conf.logLevel)
|
|
|
|
if conf.logFile.isSome:
|
|
|
|
let logFile = string conf.logFile.get()
|
|
|
|
defaultChroniclesStream.output.outFile = nil # to avoid closing stdout
|
|
|
|
discard defaultChroniclesStream.output.open(logFile, fmAppend)
|
2019-04-23 13:49:49 +00:00
|
|
|
|
Speed up initial MPT root computation after import (#2788)
When `nimbus import` runs, we end up with a database without MPT roots
leading to long startup times the first time one is needed.
Computing the state root is slow because the on-disk order based on
VertexID sorting does not match the trie traversal order and therefore
makes lookups inefficent.
Here we introduce a helper that speeds up this computation by traversing
the trie in on-disk order and computing the trie hashes bottom up
instead - even though this leads to some redundant reads of nodes that
we cannot yet compute, it's still a net win as leaves and "bottom"
branches make up the majority of the database.
This PR also addresses a few other sources of inefficiency largely due
to the separation of AriKey and AriVtx into their own column families.
Each column family is its own LSM tree that produces hundreds of SST
filtes - with a limit of 512 open files, rocksdb must keep closing and
opening files which leads to expensive metadata reads during random
access.
When rocksdb makes a lookup, it has to read several layers of files for
each lookup. Ribbon filters to skip over files that don't have the
requested data but when these filters are not in memory, reading them is
slow - this happens in two cases: when opening a file and when the
filter has been evicted from the LRU cache. Addressing the open file
limit solves one source of inefficiency, but we must also increase the
block cache size to deal with this problem.
* rocksdb.max_open_files increased to 2048
* per-file size limits increased so that fewer files are created
* WAL size increased to avoid partial flushes which lead to small files
* rocksdb block cache increased
All these increases of course lead to increased memory usage, but at
least performance is acceptable - in the future, we'll need to explore
options such as joining AriVtx and AriKey and/or reducing the row count
(by grouping branch layers under a single vertexid).
With this PR, the mainnet state root can be computed in ~8 hours (down
from 2-3 days) - not great, but still better.
Further, we write all keys to the database, also those that are less
than 32 bytes - because the mpt path is part of the input, it is very
rare that we actually hit a key like this (about 200k such entries on
mainnet), so the code complexity is not worth the benefit really, in the
current database layout / design.
2024-10-27 11:08:37 +00:00
|
|
|
setupFileLimits()
|
|
|
|
|
2024-06-14 08:08:44 +00:00
|
|
|
info "Launching execution client",
|
|
|
|
version = FullVersionStr,
|
|
|
|
conf
|
|
|
|
|
2021-12-05 11:20:27 +00:00
|
|
|
when defined(evmc_enabled):
|
|
|
|
evmcSetLibraryPath(conf.evm)
|
EVMC: Option `--evm`, load third-party EVM as a shared library
This patch adds:
- Load and use a third-party EVM in a shared library, instead of Nimbus EVM.
- New option `--evm` to specify which library to load.
- The library and this loader conforms to the [EVMC]
(https://evmc.ethereum.org/) 9.x specification.
Any third-party EVM which is compatible with EVMC version 9.x and supports EVM1
contract code will be accepted. The operating system's shared library format
applies. These are `.so*` files on Linux, `.dll` files on Windows and `.dylib`
files on Mac.
The alternative EVM can be selected in two ways:
- Nimbus command line option `--evm:<path>`.
- Environment variable `NIMBUS_EVM=<path>`.
The reason for an environment variable is this allows all the test programs to
run with a third-party EVM as well. Some don't parse command line options.
There are some limitations to be aware of:
- The third-party EVM must use EVMC version 9.x, no other major version.
EVMC 9.x supports EIP-1559 / London fork and older transactions.
- Nested `*CALL` and `CREATE*` operations don't use the third-party EVM yet.
These call the built-in Nimbus EVM. This mixing of different EVMs between
levels is explicitly allowed in specs, so there is no problem doing it.
- The third-party EVM doesn't need to support precompiles, because those are
nested calls, which use the built-in Nimbus EVM.
- Third-party EVMs execute contracts correctly, but fail the final `rootHash`
match. The reason is that some account state changes, which are correct, are
currently inside the Nimbus EVM and need to be moved to EVMC host logic.
*This is a known work in progress*. The EVM execution itself is fine.
Test results using "evmone" third-party EVM:
- [evmone](https://github.com/ethereum/evmone) has been tested. Only on
Linux but it "should" work on Windows and Mac equally well.
- [Version 0.8.1](https://github.com/ethereum/evmone/releases/tag/v0.8.1) was
used because it is compatible with EVMC 9.x, which is required for the
EIP-1559 / London fork, which Nimbus supports. Version 0.8.0 could be used
but it looks like an important bug was fixed in 0.8.1.
- evmone runs fine and the trace output looks good. The calls and arguments
are the same as the built-in Nimbus EVM for tests that have been checked
manually, except evmone skips some calls that can be safely skipped.
- The final `rootHash` is incorrect, due to the *work in progress* mentioned
above which is not part of the evmone execution. Due to this, it's possible
to try evmone and verify expected behaviours, which also validates our own
EVMC implementation, but it can't be used as a full substitute yet.
Signed-off-by: Jamie Lokier <jamie@shareable.org>
2021-12-05 11:20:27 +00:00
|
|
|
|
2024-05-31 07:13:56 +00:00
|
|
|
# Trusted setup is needed for processing Cancun+ blocks
|
|
|
|
if conf.trustedSetupFile.isSome:
|
|
|
|
let fileName = conf.trustedSetupFile.get()
|
2024-09-29 12:37:09 +00:00
|
|
|
let res = loadTrustedSetup(fileName, 0)
|
2024-05-31 07:13:56 +00:00
|
|
|
if res.isErr:
|
|
|
|
fatal "Cannot load Kzg trusted setup from file", msg=res.error
|
|
|
|
quit(QuitFailure)
|
|
|
|
else:
|
|
|
|
let res = loadKzgTrustedSetup()
|
|
|
|
if res.isErr:
|
|
|
|
fatal "Cannot load baked in Kzg trusted setup", msg=res.error
|
|
|
|
quit(QuitFailure)
|
|
|
|
|
2021-09-16 15:59:46 +00:00
|
|
|
createDir(string conf.dataDir)
|
2024-04-17 18:09:55 +00:00
|
|
|
let coreDB =
|
|
|
|
# Resolve statically for database type
|
|
|
|
case conf.chainDbMode:
|
2024-05-20 10:17:51 +00:00
|
|
|
of Aristo,AriPrune:
|
2024-09-05 09:18:32 +00:00
|
|
|
AristoDbRocks.newCoreDbRef(
|
|
|
|
string conf.dataDir,
|
|
|
|
conf.dbOptions(noKeyCache = conf.cmd == NimbusCmd.`import`))
|
2024-06-05 15:08:29 +00:00
|
|
|
|
2024-11-06 02:01:42 +00:00
|
|
|
preventLoadingDataDirForTheWrongNetwork(coreDB, conf)
|
2024-06-20 17:06:58 +00:00
|
|
|
setupMetrics(nimbus, conf)
|
|
|
|
|
2023-08-04 11:10:09 +00:00
|
|
|
let com = CommonRef.new(
|
2024-04-17 18:09:55 +00:00
|
|
|
db = coreDB,
|
2024-05-20 10:17:51 +00:00
|
|
|
pruneHistory = (conf.chainDbMode == AriPrune),
|
2024-04-17 18:09:55 +00:00
|
|
|
networkId = conf.networkId,
|
|
|
|
params = conf.networkParams)
|
2018-07-12 11:14:04 +00:00
|
|
|
|
2024-11-06 02:01:25 +00:00
|
|
|
if conf.extraData.len > 32:
|
|
|
|
warn "ExtraData exceeds 32 bytes limit, truncate",
|
|
|
|
extraData=conf.extraData,
|
|
|
|
len=conf.extraData.len
|
|
|
|
|
|
|
|
com.extraData = conf.extraData
|
|
|
|
|
2024-05-31 07:13:56 +00:00
|
|
|
defer:
|
|
|
|
com.db.finish()
|
2023-08-17 04:08:01 +00:00
|
|
|
|
2021-09-16 15:59:46 +00:00
|
|
|
case conf.cmd
|
|
|
|
of NimbusCmd.`import`:
|
2022-12-02 04:39:12 +00:00
|
|
|
importBlocks(conf, com)
|
2021-09-16 15:59:46 +00:00
|
|
|
else:
|
2022-12-02 04:39:12 +00:00
|
|
|
basicServices(nimbus, conf, com)
|
2021-09-16 15:59:46 +00:00
|
|
|
manageAccounts(nimbus, conf)
|
2024-10-28 22:17:07 +00:00
|
|
|
setupP2P(nimbus, conf, com)
|
|
|
|
setupRpc(nimbus, conf, com)
|
2021-09-16 15:59:46 +00:00
|
|
|
|
2024-10-28 16:22:04 +00:00
|
|
|
if conf.maxPeers > 0 and conf.engineApiServerEnabled():
|
2024-10-21 18:01:45 +00:00
|
|
|
# Not starting syncer if there is definitely no way to run it. This
|
|
|
|
# avoids polling (i.e. waiting for instructions) and some logging.
|
2024-10-28 16:22:04 +00:00
|
|
|
if not nimbus.beaconSyncRef.start():
|
2024-10-21 18:01:45 +00:00
|
|
|
nimbus.beaconSyncRef = BeaconSyncRef(nil)
|
2021-09-16 15:59:46 +00:00
|
|
|
|
2024-01-29 13:20:04 +00:00
|
|
|
if nimbus.state == NimbusState.Starting:
|
2021-09-16 15:59:46 +00:00
|
|
|
# it might have been set to "Stopping" with Ctrl+C
|
2024-01-29 13:20:04 +00:00
|
|
|
nimbus.state = NimbusState.Running
|
2018-06-20 17:27:32 +00:00
|
|
|
|
2024-05-31 07:13:56 +00:00
|
|
|
# Main event loop
|
|
|
|
while nimbus.state == NimbusState.Running:
|
|
|
|
try:
|
|
|
|
poll()
|
|
|
|
except CatchableError as e:
|
|
|
|
debug "Exception in poll()", exc = e.name, err = e.msg
|
|
|
|
discard e # silence warning when chronicles not activated
|
2018-06-20 17:27:32 +00:00
|
|
|
|
2024-05-31 07:13:56 +00:00
|
|
|
# Stop loop
|
|
|
|
waitFor nimbus.stop(conf)
|
2018-04-27 08:53:53 +00:00
|
|
|
|
|
|
|
when isMainModule:
|
2024-01-29 13:20:04 +00:00
|
|
|
var nimbus = NimbusNode(state: NimbusState.Starting, ctx: newEthContext())
|
2020-03-25 18:00:04 +00:00
|
|
|
|
|
|
|
## Ctrl+C handling
|
|
|
|
proc controlCHandler() {.noconv.} =
|
|
|
|
when defined(windows):
|
|
|
|
# workaround for https://github.com/nim-lang/Nim/issues/4057
|
|
|
|
setupForeignThreadGc()
|
2024-01-29 13:20:04 +00:00
|
|
|
nimbus.state = NimbusState.Stopping
|
2020-03-25 18:00:04 +00:00
|
|
|
echo "\nCtrl+C pressed. Waiting for a graceful shutdown."
|
|
|
|
setControlCHook(controlCHandler)
|
|
|
|
|
2019-07-10 21:23:11 +00:00
|
|
|
## Show logs on stdout until we get the user's logging choice
|
2019-04-17 01:56:28 +00:00
|
|
|
discard defaultChroniclesStream.output.open(stdout)
|
|
|
|
|
2018-06-20 17:27:32 +00:00
|
|
|
## Processing command line arguments
|
2021-09-11 14:58:01 +00:00
|
|
|
let conf = makeConfig()
|
2019-07-10 21:23:11 +00:00
|
|
|
|
2024-05-31 07:13:56 +00:00
|
|
|
nimbus.run(conf)
|