nimbus-eth1/nimbus/nimbus.nim

539 lines
18 KiB
Nim
Raw Normal View History

2018-04-27 08:53:53 +00:00
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import
../nimbus/vm_compile_info
import
2022-12-02 04:39:12 +00:00
std/[os, strutils, net],
chronicles,
chronos,
2022-12-02 04:39:12 +00:00
eth/[keys, net/nat],
eth/p2p as eth_p2p,
json_rpc/rpcserver,
metrics,
metrics/[chronos_httpserver, chronicles_support],
stew/shims/net as stewNet,
websock/websock as ws,
kzg4844/kzg_ex as kzg,
./core/eip4844,
2022-12-02 04:39:12 +00:00
"."/[config, constants, version, rpc, common],
Unified database frontend integration (#1670) * Nimbus folder environment update details: * Integrated `CoreDbRef` for the sources in the `nimbus` sub-folder. * The `nimbus` program does not compile yet as it needs the updates in the parallel `stateless` sub-folder. * Stateless environment update details: * Integrated `CoreDbRef` for the sources in the `stateless` sub-folder. * The `nimbus` program compiles now. * Premix environment update details: * Integrated `CoreDbRef` for the sources in the `premix` sub-folder. * Fluffy environment update details: * Integrated `CoreDbRef` for the sources in the `fluffy` sub-folder. * Tools environment update details: * Integrated `CoreDbRef` for the sources in the `tools` sub-folder. * Nodocker environment update details: * Integrated `CoreDbRef` for the sources in the `hive_integration/nodocker` sub-folder. * Tests environment update details: * Integrated `CoreDbRef` for the sources in the `tests` sub-folder. * The unit tests compile and run cleanly now. * Generalise `CoreDbRef` to any `select_backend` supported database why: Generalisation was just missed due to overcoming some compiler oddity which was tied to rocksdb for testing. * Suppress compiler warning for `newChainDB()` why: Warning was added to this function which must be wrapped so that any `CatchableError` is re-raised as `Defect`. * Split off persistent `CoreDbRef` constructor into separate file why: This allows to compile a memory only database version without linking the backend library. * Use memory `CoreDbRef` database by default detail: Persistent DB constructor needs to import `db/core_db/persistent why: Most tests use memory DB anyway. This avoids linking `-lrocksdb` or any other backend by default. * fix `toLegacyBackend()` availability check why: got garbled after memory/persistent split. * Clarify raw access to MPT for snap sync handler why: Logically, `kvt` is not the raw access for the hexary trie (although this holds for the legacy database)
2023-08-04 11:10:09 +00:00
./db/[core_db/persistent, select_backend],
./graphql/ethapi,
2022-12-02 04:39:12 +00:00
./core/[chain, sealer, clique/clique_desc,
clique/clique_sealer, tx_pool, block_import],
./beacon/beacon_engine,
./sync/[beacon, legacy, full, protocol, snap, stateless,
protocol/les_protocol, handlers, peers],
./evm/async/data_sources/json_rpc_data_source
when defined(evmc_enabled):
import transaction/evmc_dynamic_loader
2018-06-20 17:27:32 +00:00
## TODO:
## * No IPv6 support
## * No multiple bind addresses support
## * No database support
type
NimbusState = enum
2020-05-21 01:33:11 +00:00
Starting, Running, Stopping
2018-06-20 17:27:32 +00:00
2020-03-25 18:00:04 +00:00
NimbusNode = ref object
rpcServer: RpcHttpServer
engineApiServer: RpcHttpServer
engineApiWsServer: RpcWebSocketServer
ethNode: EthereumNode
state: NimbusState
graphqlServer: GraphqlHttpServerRef
wsRpcServer: RpcWebSocketServer
sealingEngine: SealingEngineRef
ctx: EthContext
2022-12-02 04:39:12 +00:00
chainRef: ChainRef
txPool: TxPoolRef
networkLoop: Future[void]
peerManager: PeerManagerRef
legaSyncRef: LegacySyncRef
snapSyncRef: SnapSyncRef
fullSyncRef: FullSyncRef
beaconSyncRef: BeaconSyncRef
statelessSyncRef: StatelessSyncRef
beaconEngine: BeaconEngineRef
2018-06-20 17:27:32 +00:00
2022-12-02 04:39:12 +00:00
proc importBlocks(conf: NimbusConf, com: CommonRef) =
if string(conf.blocksFile).len > 0:
# success or not, we quit after importing blocks
2022-12-02 04:39:12 +00:00
if not importRlpBlock(string conf.blocksFile, com):
quit(QuitFailure)
else:
quit(QuitSuccess)
proc basicServices(nimbus: NimbusNode,
conf: NimbusConf,
2022-12-02 04:39:12 +00:00
com: CommonRef) =
nimbus.txPool = TxPoolRef.new(com, conf.engineSigner)
# txPool must be informed of active head
# so it can know the latest account state
# e.g. sender nonce, etc
let head = com.db.getCanonicalHead()
doAssert nimbus.txPool.smartHead(head)
# chainRef: some name to avoid module-name/filed/function misunderstandings
2022-12-02 04:39:12 +00:00
nimbus.chainRef = newChain(com)
if conf.verifyFrom.isSome:
let verifyFrom = conf.verifyFrom.get()
nimbus.chainRef.extraValidation = 0 < verifyFrom
nimbus.chainRef.verifyFrom = verifyFrom
nimbus.beaconEngine = BeaconEngineRef.new(nimbus.txPool, nimbus.chainRef)
proc manageAccounts(nimbus: NimbusNode, conf: NimbusConf) =
if string(conf.keyStore).len > 0:
let res = nimbus.ctx.am.loadKeystores(string conf.keyStore)
if res.isErr:
fatal "Load keystore error", msg = res.error()
quit(QuitFailure)
if string(conf.importKey).len > 0:
let res = nimbus.ctx.am.importPrivateKey(string conf.importKey)
if res.isErr:
fatal "Import private key error", msg = res.error()
quit(QuitFailure)
proc setupP2P(nimbus: NimbusNode, conf: NimbusConf,
com: CommonRef, protocols: set[ProtocolFlag]) =
2018-06-20 17:27:32 +00:00
## Creating P2P Server
let kpres = nimbus.ctx.getNetKeys(conf.netKey, conf.dataDir.string)
if kpres.isErr:
fatal "Get network keys error", msg = kpres.error
quit(QuitFailure)
let keypair = kpres.get()
var address = Address(
ip: conf.listenAddress,
tcpPort: conf.tcpPort,
udpPort: conf.udpPort
)
if conf.nat.hasExtIp:
# any required port redirection is assumed to be done by hand
address.ip = conf.nat.extIp
2019-04-17 01:56:28 +00:00
else:
# automated NAT traversal
let extIP = getExternalIP(conf.nat.nat)
2019-04-17 23:17:06 +00:00
# This external IP only appears in the logs, so don't worry about dynamic
# IPs. Don't remove it either, because the above call does initialisation
# and discovery for NAT-related objects.
2019-04-17 01:56:28 +00:00
if extIP.isSome:
address.ip = extIP.get()
let extPorts = redirectPorts(tcpPort = address.tcpPort,
udpPort = address.udpPort,
description = NimbusName & " " & NimbusVersion)
2019-04-17 01:56:28 +00:00
if extPorts.isSome:
(address.tcpPort, address.udpPort) = extPorts.get()
2018-06-20 17:27:32 +00:00
let bootstrapNodes = conf.getBootNodes()
nimbus.ethNode = newEthereumNode(
keypair, address, conf.networkId, conf.agentString,
addAllCapabilities = false, minPeers = conf.maxPeers,
bootstrapNodes = bootstrapNodes,
bindUdpPort = conf.udpPort, bindTcpPort = conf.tcpPort,
bindIp = conf.listenAddress,
rng = nimbus.ctx.rng)
# Add protocol capabilities based on protocol flags
for w in protocols:
case w: # handle all possibilities
of ProtocolFlag.Eth:
nimbus.ethNode.addEthHandlerCapability(
nimbus.ethNode.peerPool,
nimbus.chainRef,
nimbus.txPool)
of ProtocolFlag.Les:
nimbus.ethNode.addCapability les
of ProtocolFlag.Snap:
nimbus.ethNode.addSnapHandlerCapability(
nimbus.ethNode.peerPool,
nimbus.chainRef)
# Cannot do without minimal `eth` capability
if ProtocolFlag.Eth notin protocols:
nimbus.ethNode.addEthHandlerCapability(
nimbus.ethNode.peerPool,
nimbus.chainRef)
2018-06-20 17:27:32 +00:00
# Early-initialise "--snap-sync" before starting any network connections.
block:
let
exCtrlFile = if conf.syncCtrlFile.isNone: none(string)
2023-06-04 06:00:50 +00:00
else: some(conf.syncCtrlFile.get)
tickerOK = conf.logLevel in {
LogLevel.INFO, LogLevel.DEBUG, LogLevel.TRACE}
case conf.syncMode:
of SyncMode.Full:
nimbus.fullSyncRef = FullSyncRef.init(
nimbus.ethNode, nimbus.chainRef, nimbus.ctx.rng, conf.maxPeers,
tickerOK, exCtrlFile)
of SyncMode.Snap:
# Minimal capability needed for sync only
if ProtocolFlag.Snap notin protocols:
nimbus.ethNode.addSnapHandlerCapability(
nimbus.ethNode.peerPool)
nimbus.snapSyncRef = SnapSyncRef.init(
nimbus.ethNode, nimbus.chainRef, nimbus.ctx.rng, conf.maxPeers,
Unified database frontend integration (#1670) * Nimbus folder environment update details: * Integrated `CoreDbRef` for the sources in the `nimbus` sub-folder. * The `nimbus` program does not compile yet as it needs the updates in the parallel `stateless` sub-folder. * Stateless environment update details: * Integrated `CoreDbRef` for the sources in the `stateless` sub-folder. * The `nimbus` program compiles now. * Premix environment update details: * Integrated `CoreDbRef` for the sources in the `premix` sub-folder. * Fluffy environment update details: * Integrated `CoreDbRef` for the sources in the `fluffy` sub-folder. * Tools environment update details: * Integrated `CoreDbRef` for the sources in the `tools` sub-folder. * Nodocker environment update details: * Integrated `CoreDbRef` for the sources in the `hive_integration/nodocker` sub-folder. * Tests environment update details: * Integrated `CoreDbRef` for the sources in the `tests` sub-folder. * The unit tests compile and run cleanly now. * Generalise `CoreDbRef` to any `select_backend` supported database why: Generalisation was just missed due to overcoming some compiler oddity which was tied to rocksdb for testing. * Suppress compiler warning for `newChainDB()` why: Warning was added to this function which must be wrapped so that any `CatchableError` is re-raised as `Defect`. * Split off persistent `CoreDbRef` constructor into separate file why: This allows to compile a memory only database version without linking the backend library. * Use memory `CoreDbRef` database by default detail: Persistent DB constructor needs to import `db/core_db/persistent why: Most tests use memory DB anyway. This avoids linking `-lrocksdb` or any other backend by default. * fix `toLegacyBackend()` availability check why: got garbled after memory/persistent split. * Clarify raw access to MPT for snap sync handler why: Logically, `kvt` is not the raw access for the hexary trie (although this holds for the legacy database)
2023-08-04 11:10:09 +00:00
tickerOK, exCtrlFile)
of SyncMode.Stateless:
# FIXME-Adam: what needs to go here?
nimbus.statelessSyncRef = StatelessSyncRef.init()
of SyncMode.Default:
if com.forkGTE(MergeFork):
nimbus.beaconSyncRef = BeaconSyncRef.init(
nimbus.ethNode, nimbus.chainRef, nimbus.ctx.rng, conf.maxPeers,
)
else:
nimbus.legaSyncRef = LegacySyncRef.new(
nimbus.ethNode, nimbus.chainRef)
# Connect directly to the static nodes
let staticPeers = conf.getStaticPeers()
if staticPeers.len > 0:
nimbus.peerManager = PeerManagerRef.new(
nimbus.ethNode.peerPool,
conf.reconnectInterval,
conf.reconnectMaxRetry,
staticPeers
)
nimbus.peerManager.start()
# Start Eth node
if conf.maxPeers > 0:
var waitForPeers = true
case conf.syncMode:
of SyncMode.Snap, SyncMode.Stateless:
waitForPeers = false
of SyncMode.Full, SyncMode.Default:
discard
nimbus.networkLoop = nimbus.ethNode.connectToNetwork(
enableDiscovery = conf.discovery != DiscoveryType.None,
waitForPeers = waitForPeers)
proc maybeStatelessAsyncDataSource*(nimbus: NimbusNode, conf: NimbusConf): Option[AsyncDataSource] =
if conf.syncMode == SyncMode.Stateless:
let rpcClient = waitFor(makeAnRpcClient(conf.statelessModeDataSourceUrl))
let asyncDataSource = realAsyncDataSource(nimbus.ethNode.peerPool, rpcClient, false)
some(asyncDataSource)
else:
none[AsyncDataSource]()
proc localServices(nimbus: NimbusNode, conf: NimbusConf,
2022-12-02 04:39:12 +00:00
com: CommonRef, protocols: set[ProtocolFlag]) =
# metrics logging
if conf.logMetricsEnabled:
# https://github.com/nim-lang/Nim/issues/17369
var logMetrics: proc(udata: pointer) {.gcsafe, raises: [].}
logMetrics = proc(udata: pointer) =
{.gcsafe.}:
let registry = defaultRegistry
info "metrics", registry
discard setTimer(Moment.fromNow(conf.logMetricsInterval.seconds), logMetrics)
discard setTimer(Moment.fromNow(conf.logMetricsInterval.seconds), logMetrics)
# Provide JWT authentication handler for rpcHttpServer
let jwtKey = block:
# Create or load shared secret
let rc = nimbus.ctx.rng.jwtSharedSecret(conf)
if rc.isErr:
fatal "Failed create or load shared secret",
msg = $(rc.unsafeError) # avoid side effects
quit(QuitFailure)
rc.value
let allowedOrigins = conf.getAllowedOrigins()
# Provide JWT authentication handler for rpcHttpServer
let httpJwtAuthHook = httpJwtAuth(jwtKey)
let httpCorsHook = httpCors(allowedOrigins)
# Creating RPC Server
if conf.rpcEnabled:
let enableAuthHook = conf.engineApiEnabled and
conf.engineApiPort == conf.rpcPort
let hooks = if enableAuthHook:
@[httpJwtAuthHook, httpCorsHook]
else:
@[httpCorsHook]
nimbus.rpcServer = newRpcHttpServerWithParams(
initTAddress(conf.rpcAddress, conf.rpcPort),
authHooks = hooks
)
setupCommonRpc(nimbus.ethNode, conf, nimbus.rpcServer)
# Enable RPC APIs based on RPC flags and protocol flags
let rpcFlags = conf.getRpcFlags()
if (RpcFlag.Eth in rpcFlags and ProtocolFlag.Eth in protocols) or
(conf.engineApiPort == conf.rpcPort):
2022-12-02 04:39:12 +00:00
setupEthRpc(nimbus.ethNode, nimbus.ctx, com, nimbus.txPool, nimbus.rpcServer)
if RpcFlag.Debug in rpcFlags:
2022-12-02 04:39:12 +00:00
setupDebugRpc(com, nimbus.rpcServer)
if RpcFlag.Exp in rpcFlags:
setupExpRpc(com, nimbus.rpcServer)
2021-08-05 07:51:28 +00:00
2018-06-20 17:27:32 +00:00
nimbus.rpcServer.rpc("admin_quit") do() -> string:
{.gcsafe.}:
nimbus.state = Stopping
2018-06-20 17:27:32 +00:00
result = "EXITING"
2018-06-20 17:27:32 +00:00
nimbus.rpcServer.start()
2020-03-25 18:00:04 +00:00
# Provide JWT authentication handler for rpcWebsocketServer
let wsJwtAuthHook = wsJwtAuth(jwtKey)
let wsCorsHook = wsCors(allowedOrigins)
# Creating Websocket RPC Server
if conf.wsEnabled:
let enableAuthHook = conf.engineApiWsEnabled and
conf.engineApiWsPort == conf.wsPort
let hooks = if enableAuthHook:
@[wsJwtAuthHook, wsCorsHook]
else:
@[wsCorsHook]
# Construct server object
nimbus.wsRpcServer = newRpcWebSocketServer(
initTAddress(conf.wsAddress, conf.wsPort),
authHooks = hooks,
# yuck, we should remove this ugly cast when
# we fix nim-websock
rng = cast[ws.Rng](nimbus.ctx.rng)
)
setupCommonRpc(nimbus.ethNode, conf, nimbus.wsRpcServer)
# Enable Websocket RPC APIs based on RPC flags and protocol flags
let wsFlags = conf.getWsFlags()
if (RpcFlag.Eth in wsFlags and ProtocolFlag.Eth in protocols) or
(conf.engineApiWsPort == conf.wsPort):
2022-12-02 04:39:12 +00:00
setupEthRpc(nimbus.ethNode, nimbus.ctx, com, nimbus.txPool, nimbus.wsRpcServer)
if RpcFlag.Debug in wsFlags:
2022-12-02 04:39:12 +00:00
setupDebugRpc(com, nimbus.wsRpcServer)
if RpcFlag.Exp in wsFlags:
setupExpRpc(com, nimbus.rpcServer)
nimbus.wsRpcServer.start()
if conf.graphqlEnabled:
nimbus.graphqlServer = setupGraphqlHttpServer(
conf,
2022-12-02 04:39:12 +00:00
com,
nimbus.ethNode,
nimbus.txPool,
@[httpCorsHook]
)
nimbus.graphqlServer.start()
if conf.engineSigner != ZERO_ADDRESS and not com.forkGTE(MergeFork):
let res = nimbus.ctx.am.getAccount(conf.engineSigner)
if res.isErr:
error "Failed to get account",
msg = res.error,
hint = "--key-store or --import-key"
quit(QuitFailure)
let rs = validateSealer(conf, nimbus.ctx, nimbus.chainRef)
if rs.isErr:
fatal "Engine signer validation error", msg = rs.error
quit(QuitFailure)
proc signFunc(signer: EthAddress, message: openArray[byte]): Result[RawSignature, cstring] {.gcsafe.} =
let
hashData = keccakHash(message)
acc = nimbus.ctx.am.getAccount(signer).tryGet()
rawSign = sign(acc.privateKey, SkMessage(hashData.data)).toRaw
ok(rawSign)
nimbus.chainRef.clique.authorize(conf.engineSigner, signFunc)
# disable sealing engine if beacon engine enabled
if not com.forkGTE(MergeFork):
nimbus.sealingEngine = SealingEngineRef.new(
nimbus.chainRef, nimbus.ctx, conf.engineSigner,
nimbus.txPool, EngineStopped
)
# only run sealing engine if there is a signer
if conf.engineSigner != ZERO_ADDRESS:
nimbus.sealingEngine.start()
if conf.engineApiEnabled:
#let maybeAsyncDataSource = maybeStatelessAsyncDataSource(nimbus, conf)
if conf.engineApiPort != conf.rpcPort:
nimbus.engineApiServer = newRpcHttpServerWithParams(
initTAddress(conf.engineApiAddress, conf.engineApiPort),
authHooks = @[httpJwtAuthHook, httpCorsHook]
)
setupEngineAPI(nimbus.beaconEngine, nimbus.engineApiServer)
2022-12-02 04:39:12 +00:00
setupEthRpc(nimbus.ethNode, nimbus.ctx, com, nimbus.txPool, nimbus.engineApiServer)
nimbus.engineApiServer.start()
else:
setupEngineAPI(nimbus.beaconEngine, nimbus.rpcServer)
info "Starting engine API server", port = conf.engineApiPort
if conf.engineApiWsEnabled:
#let maybeAsyncDataSource = maybeStatelessAsyncDataSource(nimbus, conf)
if conf.engineApiWsPort != conf.wsPort:
nimbus.engineApiWsServer = newRpcWebSocketServer(
initTAddress(conf.engineApiWsAddress, conf.engineApiWsPort),
authHooks = @[wsJwtAuthHook, wsCorsHook]
)
setupEngineAPI(nimbus.beaconEngine, nimbus.engineApiWsServer)
2022-12-02 04:39:12 +00:00
setupEthRpc(nimbus.ethNode, nimbus.ctx, com, nimbus.txPool, nimbus.engineApiWsServer)
nimbus.engineApiWsServer.start()
else:
setupEngineAPI(nimbus.beaconEngine, nimbus.wsRpcServer)
info "Starting WebSocket engine API server", port = conf.engineApiWsPort
# metrics server
if conf.metricsEnabled:
info "Starting metrics HTTP server", address = conf.metricsAddress, port = conf.metricsPort
startMetricsHttpServer($conf.metricsAddress, conf.metricsPort)
proc start(nimbus: NimbusNode, conf: NimbusConf) =
## logging
setLogLevel(conf.logLevel)
if conf.logFile.isSome:
let logFile = string conf.logFile.get()
defaultChroniclesStream.output.outFile = nil # to avoid closing stdout
discard defaultChroniclesStream.output.open(logFile, fmAppend)
when defined(evmc_enabled):
evmcSetLibraryPath(conf.evm)
EVMC: Option `--evm`, load third-party EVM as a shared library This patch adds: - Load and use a third-party EVM in a shared library, instead of Nimbus EVM. - New option `--evm` to specify which library to load. - The library and this loader conforms to the [EVMC] (https://evmc.ethereum.org/) 9.x specification. Any third-party EVM which is compatible with EVMC version 9.x and supports EVM1 contract code will be accepted. The operating system's shared library format applies. These are `.so*` files on Linux, `.dll` files on Windows and `.dylib` files on Mac. The alternative EVM can be selected in two ways: - Nimbus command line option `--evm:<path>`. - Environment variable `NIMBUS_EVM=<path>`. The reason for an environment variable is this allows all the test programs to run with a third-party EVM as well. Some don't parse command line options. There are some limitations to be aware of: - The third-party EVM must use EVMC version 9.x, no other major version. EVMC 9.x supports EIP-1559 / London fork and older transactions. - Nested `*CALL` and `CREATE*` operations don't use the third-party EVM yet. These call the built-in Nimbus EVM. This mixing of different EVMs between levels is explicitly allowed in specs, so there is no problem doing it. - The third-party EVM doesn't need to support precompiles, because those are nested calls, which use the built-in Nimbus EVM. - Third-party EVMs execute contracts correctly, but fail the final `rootHash` match. The reason is that some account state changes, which are correct, are currently inside the Nimbus EVM and need to be moved to EVMC host logic. *This is a known work in progress*. The EVM execution itself is fine. Test results using "evmone" third-party EVM: - [evmone](https://github.com/ethereum/evmone) has been tested. Only on Linux but it "should" work on Windows and Mac equally well. - [Version 0.8.1](https://github.com/ethereum/evmone/releases/tag/v0.8.1) was used because it is compatible with EVMC 9.x, which is required for the EIP-1559 / London fork, which Nimbus supports. Version 0.8.0 could be used but it looks like an important bug was fixed in 0.8.1. - evmone runs fine and the trace output looks good. The calls and arguments are the same as the built-in Nimbus EVM for tests that have been checked manually, except evmone skips some calls that can be safely skipped. - The final `rootHash` is incorrect, due to the *work in progress* mentioned above which is not part of the evmone execution. Due to this, it's possible to try evmone and verify expected behaviours, which also validates our own EVMC implementation, but it can't be used as a full substitute yet. Signed-off-by: Jamie Lokier <jamie@shareable.org>
2021-12-05 11:20:27 +00:00
createDir(string conf.dataDir)
Unified database frontend integration (#1670) * Nimbus folder environment update details: * Integrated `CoreDbRef` for the sources in the `nimbus` sub-folder. * The `nimbus` program does not compile yet as it needs the updates in the parallel `stateless` sub-folder. * Stateless environment update details: * Integrated `CoreDbRef` for the sources in the `stateless` sub-folder. * The `nimbus` program compiles now. * Premix environment update details: * Integrated `CoreDbRef` for the sources in the `premix` sub-folder. * Fluffy environment update details: * Integrated `CoreDbRef` for the sources in the `fluffy` sub-folder. * Tools environment update details: * Integrated `CoreDbRef` for the sources in the `tools` sub-folder. * Nodocker environment update details: * Integrated `CoreDbRef` for the sources in the `hive_integration/nodocker` sub-folder. * Tests environment update details: * Integrated `CoreDbRef` for the sources in the `tests` sub-folder. * The unit tests compile and run cleanly now. * Generalise `CoreDbRef` to any `select_backend` supported database why: Generalisation was just missed due to overcoming some compiler oddity which was tied to rocksdb for testing. * Suppress compiler warning for `newChainDB()` why: Warning was added to this function which must be wrapped so that any `CatchableError` is re-raised as `Defect`. * Split off persistent `CoreDbRef` constructor into separate file why: This allows to compile a memory only database version without linking the backend library. * Use memory `CoreDbRef` database by default detail: Persistent DB constructor needs to import `db/core_db/persistent why: Most tests use memory DB anyway. This avoids linking `-lrocksdb` or any other backend by default. * fix `toLegacyBackend()` availability check why: got garbled after memory/persistent split. * Clarify raw access to MPT for snap sync handler why: Logically, `kvt` is not the raw access for the hexary trie (although this holds for the legacy database)
2023-08-04 11:10:09 +00:00
let com = CommonRef.new(
newCoreDbRef(LegacyDbPersistent, string conf.dataDir),
conf.pruneMode == PruneMode.Full,
conf.networkId,
conf.networkParams
)
2022-12-02 04:39:12 +00:00
com.initializeEmptyDb()
com.db.compensateLegacySetup()
let protocols = conf.getProtocolFlags()
if conf.trustedSetupFile.isSome:
let fileName = conf.trustedSetupFile.get()
let res = Kzg.loadTrustedSetup(fileName)
if res.isErr:
fatal "Cannot load Kzg trusted setup from file", msg=res.error
quit(QuitFailure)
else:
let res = loadKzgTrustedSetup()
if res.isErr:
fatal "Cannot load baked in Kzg trusted setup", msg=res.error
quit(QuitFailure)
case conf.cmd
of NimbusCmd.`import`:
2022-12-02 04:39:12 +00:00
importBlocks(conf, com)
else:
2022-12-02 04:39:12 +00:00
basicServices(nimbus, conf, com)
manageAccounts(nimbus, conf)
setupP2P(nimbus, conf, com, protocols)
2022-12-02 04:39:12 +00:00
localServices(nimbus, conf, com, protocols)
if conf.maxPeers > 0:
case conf.syncMode:
of SyncMode.Default:
if com.forkGTE(MergeFork):
nimbus.beaconSyncRef.start
else:
nimbus.legaSyncRef.start
nimbus.ethNode.setEthHandlerNewBlocksAndHashes(
legacy.newBlockHandler,
legacy.newBlockHashesHandler,
cast[pointer](nimbus.legaSyncRef))
of SyncMode.Full:
nimbus.fullSyncRef.start
of SyncMode.Stateless:
nimbus.statelessSyncRef.start
of SyncMode.Snap:
nimbus.snapSyncRef.start
if nimbus.state == Starting:
# it might have been set to "Stopping" with Ctrl+C
nimbus.state = Running
2018-06-20 17:27:32 +00:00
proc stop*(nimbus: NimbusNode, conf: NimbusConf) {.async, gcsafe.} =
trace "Graceful shutdown"
if conf.rpcEnabled:
2021-11-30 07:13:20 +00:00
await nimbus.rpcServer.stop()
# nimbus.engineApiServer can be nil if conf.engineApiPort == conf.rpcPort
if conf.engineApiEnabled and nimbus.engineApiServer.isNil.not:
2022-04-08 04:54:11 +00:00
await nimbus.engineApiServer.stop()
if conf.wsEnabled:
nimbus.wsRpcServer.stop()
# nimbus.engineApiWsServer can be nil if conf.engineApiWsPort == conf.wsPort
if conf.engineApiWsEnabled and nimbus.engineApiWsServer.isNil.not:
nimbus.engineApiWsServer.stop()
if conf.graphqlEnabled:
await nimbus.graphqlServer.stop()
if conf.engineSigner != ZERO_ADDRESS and nimbus.sealingEngine.isNil.not:
await nimbus.sealingEngine.stop()
if conf.maxPeers > 0:
await nimbus.networkLoop.cancelAndWait()
if nimbus.peerManager.isNil.not:
await nimbus.peerManager.stop()
if nimbus.statelessSyncRef.isNil.not:
nimbus.statelessSyncRef.stop()
if nimbus.snapSyncRef.isNil.not:
nimbus.snapSyncRef.stop()
if nimbus.fullSyncRef.isNil.not:
nimbus.fullSyncRef.stop()
if nimbus.beaconSyncRef.isNil.not:
nimbus.beaconSyncRef.stop()
2018-06-20 17:27:32 +00:00
proc process*(nimbus: NimbusNode, conf: NimbusConf) =
2020-05-21 01:33:11 +00:00
# Main event loop
while nimbus.state == Running:
try:
poll()
except CatchableError as e:
debug "Exception in poll()", exc = e.name, err = e.msg
discard e # silence warning when chronicles not activated
2018-06-20 17:27:32 +00:00
# Stop loop
waitFor nimbus.stop(conf)
2018-04-27 08:53:53 +00:00
when isMainModule:
var nimbus = NimbusNode(state: Starting, ctx: newEthContext())
2020-03-25 18:00:04 +00:00
## Ctrl+C handling
proc controlCHandler() {.noconv.} =
when defined(windows):
# workaround for https://github.com/nim-lang/Nim/issues/4057
setupForeignThreadGc()
nimbus.state = Stopping
echo "\nCtrl+C pressed. Waiting for a graceful shutdown."
setControlCHook(controlCHandler)
## Show logs on stdout until we get the user's logging choice
2019-04-17 01:56:28 +00:00
discard defaultChroniclesStream.output.open(stdout)
2018-06-20 17:27:32 +00:00
## Processing command line arguments
let conf = makeConfig()
nimbus.start(conf)
nimbus.process(conf)