2024-01-24 15:28:03 +00:00
|
|
|
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
2018-04-27 08:53:53 +00:00
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
|
|
|
# at your option.
|
|
|
|
# This file may not be copied, modified, or distributed except according to
|
|
|
|
# those terms.
|
|
|
|
|
2023-02-14 20:27:17 +00:00
|
|
|
{.push raises: [].}
|
|
|
|
|
2018-10-16 00:10:01 +00:00
|
|
|
import
|
2022-07-19 08:15:18 +00:00
|
|
|
std/[
|
|
|
|
options,
|
|
|
|
strutils,
|
|
|
|
os,
|
2024-01-24 15:28:03 +00:00
|
|
|
uri,
|
|
|
|
net
|
2022-07-19 08:15:18 +00:00
|
|
|
],
|
2021-09-11 14:58:01 +00:00
|
|
|
pkg/[
|
2024-10-26 07:18:02 +00:00
|
|
|
chronos/transports/common,
|
2021-09-16 15:59:46 +00:00
|
|
|
chronicles,
|
2021-09-11 14:58:01 +00:00
|
|
|
confutils,
|
|
|
|
confutils/defs,
|
|
|
|
confutils/std/net
|
|
|
|
],
|
2024-01-26 22:04:08 +00:00
|
|
|
eth/[common, net/utils, net/nat, p2p/bootnodes, p2p/enode, p2p/discoveryv5/enr],
|
2024-06-17 07:56:39 +00:00
|
|
|
"."/[constants, compile_info, version],
|
2024-06-05 15:08:29 +00:00
|
|
|
common/chain_config,
|
|
|
|
db/opts
|
2018-04-27 08:53:53 +00:00
|
|
|
|
2024-06-14 08:08:44 +00:00
|
|
|
export net, defs
|
2021-09-16 15:59:46 +00:00
|
|
|
|
2019-01-15 15:25:14 +00:00
|
|
|
|
2024-11-08 03:46:37 +00:00
|
|
|
const
|
2021-09-11 14:58:01 +00:00
|
|
|
# e.g.: Copyright (c) 2018-2021 Status Research & Development GmbH
|
|
|
|
NimbusCopyright* = "Copyright (c) 2018-" &
|
2024-11-08 03:46:37 +00:00
|
|
|
CompileDate.split('-')[0] &
|
2021-09-11 14:58:01 +00:00
|
|
|
" Status Research & Development GmbH"
|
|
|
|
|
|
|
|
# e.g.:
|
2024-11-01 14:29:38 +00:00
|
|
|
# nimbus/v0.1.0-abcdef/os-cpu/nim-a.b.c/emvc
|
2021-09-11 14:58:01 +00:00
|
|
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
|
|
|
NimbusBuild* = "$#\p$#" % [
|
2024-11-01 14:29:38 +00:00
|
|
|
ClientId,
|
2021-09-11 14:58:01 +00:00
|
|
|
NimbusCopyright,
|
|
|
|
]
|
|
|
|
|
2024-11-01 14:29:38 +00:00
|
|
|
NimbusHeader* = "$#\p\pNim version $#" % [
|
2021-09-11 14:58:01 +00:00
|
|
|
NimbusBuild,
|
2024-11-01 14:29:38 +00:00
|
|
|
NimVersion
|
2021-09-11 14:58:01 +00:00
|
|
|
]
|
|
|
|
|
2024-05-26 06:15:18 +00:00
|
|
|
func defaultDataDir*(): string =
|
2021-09-11 14:58:01 +00:00
|
|
|
when defined(windows):
|
|
|
|
getHomeDir() / "AppData" / "Roaming" / "Nimbus"
|
|
|
|
elif defined(macosx):
|
|
|
|
getHomeDir() / "Library" / "Application Support" / "Nimbus"
|
|
|
|
else:
|
|
|
|
getHomeDir() / ".cache" / "nimbus"
|
|
|
|
|
2024-05-26 06:15:18 +00:00
|
|
|
func defaultKeystoreDir*(): string =
|
2021-09-11 14:58:01 +00:00
|
|
|
defaultDataDir() / "keystore"
|
|
|
|
|
2024-05-26 06:15:18 +00:00
|
|
|
func getLogLevels(): string =
|
2021-09-11 14:58:01 +00:00
|
|
|
var logLevels: seq[string]
|
|
|
|
for level in LogLevel:
|
|
|
|
if level < enabledLogLevel:
|
|
|
|
continue
|
|
|
|
logLevels.add($level)
|
|
|
|
join(logLevels, ", ")
|
|
|
|
|
|
|
|
const
|
|
|
|
defaultDataDirDesc = defaultDataDir()
|
|
|
|
defaultPort = 30303
|
|
|
|
defaultMetricsServerPort = 9093
|
2024-01-29 13:20:04 +00:00
|
|
|
defaultHttpPort = 8545
|
2024-10-26 07:18:02 +00:00
|
|
|
# https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.4/src/engine/authentication.md#jwt-specifications
|
|
|
|
defaultEngineApiPort = 8551
|
2023-11-10 18:38:11 +00:00
|
|
|
defaultAdminListenAddress = (static parseIpAddress("127.0.0.1"))
|
2021-09-16 15:59:46 +00:00
|
|
|
defaultAdminListenAddressDesc = $defaultAdminListenAddress & ", meaning local host only"
|
2021-09-11 14:58:01 +00:00
|
|
|
logLevelDesc = getLogLevels()
|
2018-06-20 17:27:32 +00:00
|
|
|
|
2024-10-26 07:18:02 +00:00
|
|
|
let
|
|
|
|
defaultListenAddress = getAutoAddress(Port(0)).toIpAddress()
|
2024-11-06 02:01:25 +00:00
|
|
|
defaultListenAddressDesc = $defaultListenAddress & ", meaning all network interfaces"
|
2024-10-26 07:18:02 +00:00
|
|
|
|
2021-12-05 11:20:27 +00:00
|
|
|
# `when` around an option doesn't work with confutils; it fails to compile.
|
|
|
|
# Workaround that by setting the `ignore` pragma on EVMC-specific options.
|
|
|
|
when defined(evmc_enabled):
|
|
|
|
{.pragma: includeIfEvmc.}
|
|
|
|
else:
|
|
|
|
{.pragma: includeIfEvmc, ignore.}
|
|
|
|
|
|
|
|
const sharedLibText = if defined(linux): " (*.so, *.so.N)"
|
|
|
|
elif defined(windows): " (*.dll)"
|
|
|
|
elif defined(macosx): " (*.dylib)"
|
|
|
|
else: ""
|
|
|
|
|
2018-04-27 08:53:53 +00:00
|
|
|
type
|
2024-04-17 18:09:55 +00:00
|
|
|
ChainDbMode* {.pure.} = enum
|
|
|
|
Aristo
|
2024-05-20 10:17:51 +00:00
|
|
|
AriPrune
|
2021-09-11 14:58:01 +00:00
|
|
|
|
2021-09-16 15:59:46 +00:00
|
|
|
NimbusCmd* {.pure.} = enum
|
2021-09-11 14:58:01 +00:00
|
|
|
noCommand
|
2021-09-16 15:59:46 +00:00
|
|
|
`import`
|
2018-04-27 08:53:53 +00:00
|
|
|
|
2021-09-11 14:58:01 +00:00
|
|
|
RpcFlag* {.pure.} = enum
|
|
|
|
## RPC flags
|
|
|
|
Eth ## enable eth_ set of RPC API
|
|
|
|
Debug ## enable debug_ set of RPC API
|
2018-04-27 08:53:53 +00:00
|
|
|
|
2021-09-16 15:59:46 +00:00
|
|
|
DiscoveryType* {.pure.} = enum
|
|
|
|
None
|
|
|
|
V4
|
|
|
|
V5
|
2018-11-30 03:03:30 +00:00
|
|
|
|
2021-09-11 14:58:01 +00:00
|
|
|
NimbusConf* = object of RootObj
|
2018-04-27 08:53:53 +00:00
|
|
|
## Main Nimbus configuration object
|
2020-01-13 18:35:40 +00:00
|
|
|
|
2021-09-11 14:58:01 +00:00
|
|
|
dataDir* {.
|
|
|
|
separator: "ETHEREUM OPTIONS:"
|
|
|
|
desc: "The directory where nimbus will store all blockchain data"
|
|
|
|
defaultValue: defaultDataDir()
|
|
|
|
defaultValueDesc: $defaultDataDirDesc
|
|
|
|
abbr: "d"
|
|
|
|
name: "data-dir" }: OutDir
|
|
|
|
|
2024-05-31 07:13:56 +00:00
|
|
|
era1DirOpt* {.
|
|
|
|
desc: "Directory where era1 (pre-merge) archive can be found"
|
|
|
|
defaultValueDesc: "<data-dir>/era1"
|
|
|
|
name: "era1-dir" }: Option[OutDir]
|
2024-07-17 02:57:19 +00:00
|
|
|
|
2024-07-09 13:28:01 +00:00
|
|
|
eraDirOpt* {.
|
|
|
|
desc: "Directory where era (post-merge) archive can be found"
|
|
|
|
defaultValueDesc: "<data-dir>/era"
|
|
|
|
name: "era-dir" }: Option[OutDir]
|
2024-05-31 07:13:56 +00:00
|
|
|
|
2021-09-11 14:58:01 +00:00
|
|
|
keyStore* {.
|
2021-09-16 15:59:46 +00:00
|
|
|
desc: "Load one or more keystore files from this directory"
|
2021-09-11 14:58:01 +00:00
|
|
|
defaultValue: defaultKeystoreDir()
|
|
|
|
defaultValueDesc: "inside datadir"
|
|
|
|
abbr: "k"
|
|
|
|
name: "key-store" }: OutDir
|
|
|
|
|
2024-04-17 18:09:55 +00:00
|
|
|
chainDbMode* {.
|
|
|
|
desc: "Blockchain database"
|
|
|
|
longDesc:
|
2024-05-20 10:17:51 +00:00
|
|
|
"- Aristo -- Single state DB, full node\n" &
|
|
|
|
"- AriPrune -- Aristo with curbed block history (for testing)\n" &
|
2024-04-17 18:09:55 +00:00
|
|
|
""
|
2024-05-20 10:17:51 +00:00
|
|
|
defaultValue: ChainDbMode.Aristo
|
|
|
|
defaultValueDesc: $ChainDbMode.Aristo
|
2021-09-11 14:58:01 +00:00
|
|
|
abbr : "p"
|
2024-04-17 18:09:55 +00:00
|
|
|
name: "chaindb" }: ChainDbMode
|
2021-09-11 14:58:01 +00:00
|
|
|
|
|
|
|
importKey* {.
|
2021-09-16 15:59:46 +00:00
|
|
|
desc: "Import unencrypted 32 bytes hex private key from a file"
|
2021-09-11 14:58:01 +00:00
|
|
|
defaultValue: ""
|
|
|
|
abbr: "e"
|
|
|
|
name: "import-key" }: InputFile
|
|
|
|
|
|
|
|
verifyFrom* {.
|
|
|
|
desc: "Enable extra verification when current block number greater than verify-from"
|
|
|
|
defaultValueDesc: ""
|
|
|
|
name: "verify-from" }: Option[uint64]
|
|
|
|
|
EVMC: Option `--evm`, load third-party EVM as a shared library
This patch adds:
- Load and use a third-party EVM in a shared library, instead of Nimbus EVM.
- New option `--evm` to specify which library to load.
- The library and this loader conforms to the [EVMC]
(https://evmc.ethereum.org/) 9.x specification.
Any third-party EVM which is compatible with EVMC version 9.x and supports EVM1
contract code will be accepted. The operating system's shared library format
applies. These are `.so*` files on Linux, `.dll` files on Windows and `.dylib`
files on Mac.
The alternative EVM can be selected in two ways:
- Nimbus command line option `--evm:<path>`.
- Environment variable `NIMBUS_EVM=<path>`.
The reason for an environment variable is this allows all the test programs to
run with a third-party EVM as well. Some don't parse command line options.
There are some limitations to be aware of:
- The third-party EVM must use EVMC version 9.x, no other major version.
EVMC 9.x supports EIP-1559 / London fork and older transactions.
- Nested `*CALL` and `CREATE*` operations don't use the third-party EVM yet.
These call the built-in Nimbus EVM. This mixing of different EVMs between
levels is explicitly allowed in specs, so there is no problem doing it.
- The third-party EVM doesn't need to support precompiles, because those are
nested calls, which use the built-in Nimbus EVM.
- Third-party EVMs execute contracts correctly, but fail the final `rootHash`
match. The reason is that some account state changes, which are correct, are
currently inside the Nimbus EVM and need to be moved to EVMC host logic.
*This is a known work in progress*. The EVM execution itself is fine.
Test results using "evmone" third-party EVM:
- [evmone](https://github.com/ethereum/evmone) has been tested. Only on
Linux but it "should" work on Windows and Mac equally well.
- [Version 0.8.1](https://github.com/ethereum/evmone/releases/tag/v0.8.1) was
used because it is compatible with EVMC 9.x, which is required for the
EIP-1559 / London fork, which Nimbus supports. Version 0.8.0 could be used
but it looks like an important bug was fixed in 0.8.1.
- evmone runs fine and the trace output looks good. The calls and arguments
are the same as the built-in Nimbus EVM for tests that have been checked
manually, except evmone skips some calls that can be safely skipped.
- The final `rootHash` is incorrect, due to the *work in progress* mentioned
above which is not part of the evmone execution. Due to this, it's possible
to try evmone and verify expected behaviours, which also validates our own
EVMC implementation, but it can't be used as a full substitute yet.
Signed-off-by: Jamie Lokier <jamie@shareable.org>
2021-12-05 11:20:27 +00:00
|
|
|
evm* {.
|
2021-12-05 11:20:27 +00:00
|
|
|
desc: "Load alternative EVM from EVMC-compatible shared library" & sharedLibText
|
EVMC: Option `--evm`, load third-party EVM as a shared library
This patch adds:
- Load and use a third-party EVM in a shared library, instead of Nimbus EVM.
- New option `--evm` to specify which library to load.
- The library and this loader conforms to the [EVMC]
(https://evmc.ethereum.org/) 9.x specification.
Any third-party EVM which is compatible with EVMC version 9.x and supports EVM1
contract code will be accepted. The operating system's shared library format
applies. These are `.so*` files on Linux, `.dll` files on Windows and `.dylib`
files on Mac.
The alternative EVM can be selected in two ways:
- Nimbus command line option `--evm:<path>`.
- Environment variable `NIMBUS_EVM=<path>`.
The reason for an environment variable is this allows all the test programs to
run with a third-party EVM as well. Some don't parse command line options.
There are some limitations to be aware of:
- The third-party EVM must use EVMC version 9.x, no other major version.
EVMC 9.x supports EIP-1559 / London fork and older transactions.
- Nested `*CALL` and `CREATE*` operations don't use the third-party EVM yet.
These call the built-in Nimbus EVM. This mixing of different EVMs between
levels is explicitly allowed in specs, so there is no problem doing it.
- The third-party EVM doesn't need to support precompiles, because those are
nested calls, which use the built-in Nimbus EVM.
- Third-party EVMs execute contracts correctly, but fail the final `rootHash`
match. The reason is that some account state changes, which are correct, are
currently inside the Nimbus EVM and need to be moved to EVMC host logic.
*This is a known work in progress*. The EVM execution itself is fine.
Test results using "evmone" third-party EVM:
- [evmone](https://github.com/ethereum/evmone) has been tested. Only on
Linux but it "should" work on Windows and Mac equally well.
- [Version 0.8.1](https://github.com/ethereum/evmone/releases/tag/v0.8.1) was
used because it is compatible with EVMC 9.x, which is required for the
EIP-1559 / London fork, which Nimbus supports. Version 0.8.0 could be used
but it looks like an important bug was fixed in 0.8.1.
- evmone runs fine and the trace output looks good. The calls and arguments
are the same as the built-in Nimbus EVM for tests that have been checked
manually, except evmone skips some calls that can be safely skipped.
- The final `rootHash` is incorrect, due to the *work in progress* mentioned
above which is not part of the evmone execution. Due to this, it's possible
to try evmone and verify expected behaviours, which also validates our own
EVMC implementation, but it can't be used as a full substitute yet.
Signed-off-by: Jamie Lokier <jamie@shareable.org>
2021-12-05 11:20:27 +00:00
|
|
|
defaultValue: ""
|
2021-12-05 11:20:27 +00:00
|
|
|
name: "evm"
|
|
|
|
includeIfEvmc }: string
|
EVMC: Option `--evm`, load third-party EVM as a shared library
This patch adds:
- Load and use a third-party EVM in a shared library, instead of Nimbus EVM.
- New option `--evm` to specify which library to load.
- The library and this loader conforms to the [EVMC]
(https://evmc.ethereum.org/) 9.x specification.
Any third-party EVM which is compatible with EVMC version 9.x and supports EVM1
contract code will be accepted. The operating system's shared library format
applies. These are `.so*` files on Linux, `.dll` files on Windows and `.dylib`
files on Mac.
The alternative EVM can be selected in two ways:
- Nimbus command line option `--evm:<path>`.
- Environment variable `NIMBUS_EVM=<path>`.
The reason for an environment variable is this allows all the test programs to
run with a third-party EVM as well. Some don't parse command line options.
There are some limitations to be aware of:
- The third-party EVM must use EVMC version 9.x, no other major version.
EVMC 9.x supports EIP-1559 / London fork and older transactions.
- Nested `*CALL` and `CREATE*` operations don't use the third-party EVM yet.
These call the built-in Nimbus EVM. This mixing of different EVMs between
levels is explicitly allowed in specs, so there is no problem doing it.
- The third-party EVM doesn't need to support precompiles, because those are
nested calls, which use the built-in Nimbus EVM.
- Third-party EVMs execute contracts correctly, but fail the final `rootHash`
match. The reason is that some account state changes, which are correct, are
currently inside the Nimbus EVM and need to be moved to EVMC host logic.
*This is a known work in progress*. The EVM execution itself is fine.
Test results using "evmone" third-party EVM:
- [evmone](https://github.com/ethereum/evmone) has been tested. Only on
Linux but it "should" work on Windows and Mac equally well.
- [Version 0.8.1](https://github.com/ethereum/evmone/releases/tag/v0.8.1) was
used because it is compatible with EVMC 9.x, which is required for the
EIP-1559 / London fork, which Nimbus supports. Version 0.8.0 could be used
but it looks like an important bug was fixed in 0.8.1.
- evmone runs fine and the trace output looks good. The calls and arguments
are the same as the built-in Nimbus EVM for tests that have been checked
manually, except evmone skips some calls that can be safely skipped.
- The final `rootHash` is incorrect, due to the *work in progress* mentioned
above which is not part of the evmone execution. Due to this, it's possible
to try evmone and verify expected behaviours, which also validates our own
EVMC implementation, but it can't be used as a full substitute yet.
Signed-off-by: Jamie Lokier <jamie@shareable.org>
2021-12-05 11:20:27 +00:00
|
|
|
|
2024-05-31 07:36:25 +00:00
|
|
|
trustedSetupFile* {.
|
|
|
|
desc: "Load EIP-4844 trusted setup file"
|
|
|
|
defaultValue: none(string)
|
|
|
|
defaultValueDesc: "Baked in trusted setup"
|
|
|
|
name: "trusted-setup-file" .}: Option[string]
|
|
|
|
|
2024-11-06 02:01:25 +00:00
|
|
|
extraData* {.
|
|
|
|
desc: "Value of extraData field when assemble a block(max 32 bytes)"
|
|
|
|
defaultValue: ShortClientId
|
|
|
|
defaultValueDesc: $ShortClientId
|
|
|
|
name: "extra-data" .}: string
|
|
|
|
|
2021-09-16 15:59:46 +00:00
|
|
|
network {.
|
|
|
|
separator: "\pETHEREUM NETWORK OPTIONS:"
|
2024-05-30 11:49:47 +00:00
|
|
|
desc: "Name or id number of Ethereum network(mainnet(1), sepolia(11155111), holesky(17000), other=custom)"
|
2021-12-12 09:45:58 +00:00
|
|
|
longDesc:
|
2021-09-16 15:59:46 +00:00
|
|
|
"- mainnet: Ethereum main network\n" &
|
2023-10-25 06:27:55 +00:00
|
|
|
"- sepolia: Test network (proof-of-work)\n" &
|
|
|
|
"- holesky: The holesovice post-merge testnet"
|
2021-09-16 15:59:46 +00:00
|
|
|
defaultValue: "" # the default value is set in makeConfig
|
|
|
|
defaultValueDesc: "mainnet(1)"
|
|
|
|
abbr: "i"
|
|
|
|
name: "network" }: string
|
|
|
|
|
|
|
|
customNetwork {.
|
|
|
|
desc: "Use custom genesis block for private Ethereum Network (as /path/to/genesis.json)"
|
|
|
|
defaultValueDesc: ""
|
|
|
|
abbr: "c"
|
|
|
|
name: "custom-network" }: Option[NetworkParams]
|
|
|
|
|
|
|
|
networkId* {.
|
2021-12-12 09:45:58 +00:00
|
|
|
ignore # this field is not processed by confutils
|
|
|
|
defaultValue: MainNet # the defaultValue value is set by `makeConfig`
|
2021-09-16 15:59:46 +00:00
|
|
|
name: "network-id"}: NetworkId
|
|
|
|
|
|
|
|
networkParams* {.
|
2021-12-12 09:45:58 +00:00
|
|
|
ignore # this field is not processed by confutils
|
|
|
|
defaultValue: NetworkParams() # the defaultValue value is set by `makeConfig`
|
2021-09-16 15:59:46 +00:00
|
|
|
name: "network-params"}: NetworkParams
|
|
|
|
|
|
|
|
logLevel* {.
|
|
|
|
separator: "\pLOGGING AND DEBUGGING OPTIONS:"
|
|
|
|
desc: "Sets the log level for process and topics (" & logLevelDesc & ")"
|
|
|
|
defaultValue: LogLevel.INFO
|
|
|
|
defaultValueDesc: $LogLevel.INFO
|
|
|
|
name: "log-level" }: LogLevel
|
|
|
|
|
|
|
|
logFile* {.
|
|
|
|
desc: "Specifies a path for the written Json log file"
|
|
|
|
name: "log-file" }: Option[OutFile]
|
|
|
|
|
|
|
|
logMetricsEnabled* {.
|
|
|
|
desc: "Enable metrics logging"
|
|
|
|
defaultValue: false
|
|
|
|
name: "log-metrics" .}: bool
|
|
|
|
|
|
|
|
logMetricsInterval* {.
|
|
|
|
desc: "Interval at which to log metrics, in seconds"
|
|
|
|
defaultValue: 10
|
|
|
|
name: "log-metrics-interval" .}: int
|
|
|
|
|
2024-06-20 17:06:58 +00:00
|
|
|
metricsEnabled* {.
|
|
|
|
desc: "Enable the built-in metrics HTTP server"
|
|
|
|
defaultValue: false
|
|
|
|
name: "metrics" }: bool
|
|
|
|
|
|
|
|
metricsPort* {.
|
|
|
|
desc: "Listening port of the built-in metrics HTTP server"
|
|
|
|
defaultValue: defaultMetricsServerPort
|
|
|
|
defaultValueDesc: $defaultMetricsServerPort
|
|
|
|
name: "metrics-port" }: Port
|
|
|
|
|
|
|
|
metricsAddress* {.
|
|
|
|
desc: "Listening IP address of the built-in metrics HTTP server"
|
|
|
|
defaultValue: defaultAdminListenAddress
|
|
|
|
defaultValueDesc: $defaultAdminListenAddressDesc
|
|
|
|
name: "metrics-address" }: IpAddress
|
|
|
|
|
2021-09-16 15:59:46 +00:00
|
|
|
bootstrapNodes {.
|
|
|
|
separator: "\pNETWORKING OPTIONS:"
|
|
|
|
desc: "Specifies one or more bootstrap nodes(as enode URL) to use when connecting to the network"
|
|
|
|
defaultValue: @[]
|
|
|
|
defaultValueDesc: ""
|
|
|
|
abbr: "b"
|
|
|
|
name: "bootstrap-node" }: seq[string]
|
2021-09-11 14:58:01 +00:00
|
|
|
|
2021-09-16 15:59:46 +00:00
|
|
|
bootstrapFile {.
|
|
|
|
desc: "Specifies a line-delimited file of bootstrap Ethereum network addresses(enode URL). " &
|
|
|
|
"By default, addresses will be added to bootstrap node list. " &
|
|
|
|
"But if the first line equals to `override` word, it will override built-in list"
|
|
|
|
defaultValue: ""
|
|
|
|
name: "bootstrap-file" }: InputFile
|
2021-09-11 14:58:01 +00:00
|
|
|
|
2023-07-07 07:47:26 +00:00
|
|
|
bootstrapEnrs {.
|
|
|
|
desc: "ENR URI of node to bootstrap discovery from. Argument may be repeated"
|
|
|
|
defaultValue: @[]
|
|
|
|
defaultValueDesc: ""
|
|
|
|
name: "bootstrap-enr" }: seq[enr.Record]
|
|
|
|
|
2021-09-16 15:59:46 +00:00
|
|
|
staticPeers {.
|
|
|
|
desc: "Connect to one or more trusted peers(as enode URL)"
|
|
|
|
defaultValue: @[]
|
|
|
|
defaultValueDesc: ""
|
|
|
|
name: "static-peers" }: seq[string]
|
|
|
|
|
2022-02-11 16:28:39 +00:00
|
|
|
staticPeersFile {.
|
|
|
|
desc: "Specifies a line-delimited file of trusted peers addresses(enode URL)" &
|
2023-10-31 10:53:06 +00:00
|
|
|
"to be added to the --static-peers list. If the first line equals to the word `override`, "&
|
|
|
|
"the file contents will replace the --static-peers list"
|
2022-02-11 16:28:39 +00:00
|
|
|
defaultValue: ""
|
|
|
|
name: "static-peers-file" }: InputFile
|
|
|
|
|
2023-07-07 07:47:26 +00:00
|
|
|
staticPeersEnrs {.
|
|
|
|
desc: "ENR URI of node to connect to as trusted peer. Argument may be repeated"
|
|
|
|
defaultValue: @[]
|
|
|
|
defaultValueDesc: ""
|
|
|
|
name: "static-peer-enr" }: seq[enr.Record]
|
|
|
|
|
2022-08-26 09:36:04 +00:00
|
|
|
reconnectMaxRetry* {.
|
|
|
|
desc: "Specifies max number of retries if static peers disconnected/not connected. " &
|
|
|
|
"0 = infinite."
|
|
|
|
defaultValue: 0
|
|
|
|
name: "reconnect-max-retry" }: int
|
|
|
|
|
|
|
|
reconnectInterval* {.
|
|
|
|
desc: "Interval in seconds before next attempt to reconnect to static peers. Min 5 seconds."
|
|
|
|
defaultValue: 15
|
|
|
|
name: "reconnect-interval" }: int
|
|
|
|
|
2021-09-16 15:59:46 +00:00
|
|
|
listenAddress* {.
|
|
|
|
desc: "Listening IP address for Ethereum P2P and Discovery traffic"
|
|
|
|
defaultValue: defaultListenAddress
|
|
|
|
defaultValueDesc: $defaultListenAddressDesc
|
2023-11-10 18:38:11 +00:00
|
|
|
name: "listen-address" }: IpAddress
|
2021-09-16 15:59:46 +00:00
|
|
|
|
|
|
|
tcpPort* {.
|
|
|
|
desc: "Ethereum P2P network listening TCP port"
|
|
|
|
defaultValue: defaultPort
|
|
|
|
defaultValueDesc: $defaultPort
|
|
|
|
name: "tcp-port" }: Port
|
|
|
|
|
|
|
|
udpPort* {.
|
|
|
|
desc: "Ethereum P2P network listening UDP port"
|
|
|
|
defaultValue: 0 # set udpPort defaultValue in `makeConfig`
|
|
|
|
defaultValueDesc: "default to --tcp-port"
|
|
|
|
name: "udp-port" }: Port
|
|
|
|
|
|
|
|
maxPeers* {.
|
|
|
|
desc: "Maximum number of peers to connect to"
|
|
|
|
defaultValue: 25
|
|
|
|
name: "max-peers" }: int
|
|
|
|
|
|
|
|
nat* {.
|
|
|
|
desc: "Specify method to use for determining public address. " &
|
|
|
|
"Must be one of: any, none, upnp, pmp, extip:<IP>"
|
|
|
|
defaultValue: NatConfig(hasExtIp: false, nat: NatAny)
|
|
|
|
defaultValueDesc: "any"
|
|
|
|
name: "nat" .}: NatConfig
|
|
|
|
|
|
|
|
discovery* {.
|
|
|
|
desc: "Specify method to find suitable peer in an Ethereum network (None, V4, V5)"
|
2021-12-12 09:45:58 +00:00
|
|
|
longDesc:
|
2021-09-16 15:59:46 +00:00
|
|
|
"- None: Disables the peer discovery mechanism (manual peer addition)\n" &
|
|
|
|
"- V4 : Node Discovery Protocol v4(default)\n" &
|
2021-12-12 09:45:58 +00:00
|
|
|
"- V5 : Node Discovery Protocol v5"
|
2021-09-16 15:59:46 +00:00
|
|
|
defaultValue: DiscoveryType.V4
|
|
|
|
defaultValueDesc: $DiscoveryType.V4
|
|
|
|
name: "discovery" .}: DiscoveryType
|
|
|
|
|
2022-07-27 16:07:54 +00:00
|
|
|
netKey* {.
|
|
|
|
desc: "P2P ethereum node (secp256k1) private key (random, path, hex)"
|
|
|
|
longDesc:
|
|
|
|
"- random: generate random network key for this node instance\n" &
|
|
|
|
"- path : path to where the private key will be loaded or auto generated\n" &
|
|
|
|
"- hex : 32 bytes hex of network private key"
|
|
|
|
defaultValue: "random"
|
|
|
|
name: "net-key" .}: string
|
2021-09-11 14:58:01 +00:00
|
|
|
|
2021-09-16 15:59:46 +00:00
|
|
|
agentString* {.
|
|
|
|
desc: "Node agent string which is used as identifier in network"
|
2024-11-01 14:29:38 +00:00
|
|
|
defaultValue: ClientId
|
|
|
|
defaultValueDesc: $ClientId
|
2021-09-16 15:59:46 +00:00
|
|
|
name: "agent-string" .}: string
|
2021-09-11 14:58:01 +00:00
|
|
|
|
2024-10-02 11:31:33 +00:00
|
|
|
beaconChunkSize* {.
|
Flare sync (#2627)
* Cosmetics, small fixes, add stashed headers verifier
* Remove direct `Era1` support
why:
Era1 is indirectly supported by using the import tool before syncing.
* Clarify database persistent save function.
why:
Function relied on the last saved state block number which was wrong.
It now relies on the tx-level. If it is 0, then data are saved directly.
Otherwise the task that owns the tx will do it.
* Extracted configuration constants into separate file
* Enable single peer mode for debugging
* Fix peer losing issue in multi-mode
details:
Running concurrent download peers was previously programmed as running
a batch downloading and storing ~8k headers and then leaving the `async`
function to be restarted by a scheduler.
This was unfortunate because of occasionally occurring long waiting
times for restart.
While the time gap until restarting were typically observed a few
millisecs, there were always a few outliers which well exceed several
seconds. This seemed to let remote peers run into timeouts.
* Prefix function names `unprocXxx()` and `stagedYyy()` by `headers`
why:
There will be other `unproc` and `staged` modules.
* Remove cruft, update logging
* Fix accounting issue
details:
When staging after fetching headers from the network, there was an off
by 1 error occurring when the result was by one smaller than requested.
Also, a whole range was mis-accounted when a peer was terminating
connection immediately after responding.
* Fix slow/error header accounting when fetching
why:
Originally set for detecting slow headers in a row, the counter
was wrongly extended to general errors.
* Ban peers for a while that respond with too few headers continuously
why:
Some peers only returned one header at a time. If these peers sit on a
farm, they might collectively slow down the download process.
* Update RPC beacon header updater
why:
Old function hook has slightly changed its meaning since it was used
for snap sync. Also, the old hook is used by other functions already.
* Limit number of peers or set to single peer mode
details:
Merge several concepts, single peer mode being one of it.
* Some code clean up, fixings for removing of compiler warnings
* De-noise header fetch related sources
why:
Header download looks relatively stable, so general debugging is not
needed, anymore. This is the equivalent of removing the scaffold from
the part of the building where work has completed.
* More clean up and code prettification for headers stuff
* Implement body fetch and block import
details:
Available headers are used stage blocks by combining existing headers
with newly fetched blocks. Then these blocks are imported/executed via
`persistBlocks()`.
* Logger cosmetics and cleanup
* Remove staged block queue debugging
details:
Feature still available, just not executed anymore
* Docu, logging update
* Update/simplify `runDaemon()`
* Re-calibrate block body requests and soft config for import blocks batch
why:
* For fetching, larger fetch requests are mostly truncated anyway on
MainNet.
* For executing, smaller batch sizes reduce the memory needed for the
price of longer execution times.
* Update metrics counters
* Docu update
* Some fixes, formatting updates, etc.
* Update `borrowed` type: uint -. uint64
also:
Always convert to `uint64` rather than `uint` where appropriate
2024-09-27 15:07:42 +00:00
|
|
|
hidden
|
2024-10-02 11:31:33 +00:00
|
|
|
desc: "Number of blocks per database transaction for beacon sync"
|
Flare sync (#2627)
* Cosmetics, small fixes, add stashed headers verifier
* Remove direct `Era1` support
why:
Era1 is indirectly supported by using the import tool before syncing.
* Clarify database persistent save function.
why:
Function relied on the last saved state block number which was wrong.
It now relies on the tx-level. If it is 0, then data are saved directly.
Otherwise the task that owns the tx will do it.
* Extracted configuration constants into separate file
* Enable single peer mode for debugging
* Fix peer losing issue in multi-mode
details:
Running concurrent download peers was previously programmed as running
a batch downloading and storing ~8k headers and then leaving the `async`
function to be restarted by a scheduler.
This was unfortunate because of occasionally occurring long waiting
times for restart.
While the time gap until restarting were typically observed a few
millisecs, there were always a few outliers which well exceed several
seconds. This seemed to let remote peers run into timeouts.
* Prefix function names `unprocXxx()` and `stagedYyy()` by `headers`
why:
There will be other `unproc` and `staged` modules.
* Remove cruft, update logging
* Fix accounting issue
details:
When staging after fetching headers from the network, there was an off
by 1 error occurring when the result was by one smaller than requested.
Also, a whole range was mis-accounted when a peer was terminating
connection immediately after responding.
* Fix slow/error header accounting when fetching
why:
Originally set for detecting slow headers in a row, the counter
was wrongly extended to general errors.
* Ban peers for a while that respond with too few headers continuously
why:
Some peers only returned one header at a time. If these peers sit on a
farm, they might collectively slow down the download process.
* Update RPC beacon header updater
why:
Old function hook has slightly changed its meaning since it was used
for snap sync. Also, the old hook is used by other functions already.
* Limit number of peers or set to single peer mode
details:
Merge several concepts, single peer mode being one of it.
* Some code clean up, fixings for removing of compiler warnings
* De-noise header fetch related sources
why:
Header download looks relatively stable, so general debugging is not
needed, anymore. This is the equivalent of removing the scaffold from
the part of the building where work has completed.
* More clean up and code prettification for headers stuff
* Implement body fetch and block import
details:
Available headers are used stage blocks by combining existing headers
with newly fetched blocks. Then these blocks are imported/executed via
`persistBlocks()`.
* Logger cosmetics and cleanup
* Remove staged block queue debugging
details:
Feature still available, just not executed anymore
* Docu, logging update
* Update/simplify `runDaemon()`
* Re-calibrate block body requests and soft config for import blocks batch
why:
* For fetching, larger fetch requests are mostly truncated anyway on
MainNet.
* For executing, smaller batch sizes reduce the memory needed for the
price of longer execution times.
* Update metrics counters
* Docu update
* Some fixes, formatting updates, etc.
* Update `borrowed` type: uint -. uint64
also:
Always convert to `uint64` rather than `uint` where appropriate
2024-09-27 15:07:42 +00:00
|
|
|
defaultValue: 0
|
2024-10-02 11:31:33 +00:00
|
|
|
name: "debug-beacon-chunk-size" .}: int
|
Flare sync (#2627)
* Cosmetics, small fixes, add stashed headers verifier
* Remove direct `Era1` support
why:
Era1 is indirectly supported by using the import tool before syncing.
* Clarify database persistent save function.
why:
Function relied on the last saved state block number which was wrong.
It now relies on the tx-level. If it is 0, then data are saved directly.
Otherwise the task that owns the tx will do it.
* Extracted configuration constants into separate file
* Enable single peer mode for debugging
* Fix peer losing issue in multi-mode
details:
Running concurrent download peers was previously programmed as running
a batch downloading and storing ~8k headers and then leaving the `async`
function to be restarted by a scheduler.
This was unfortunate because of occasionally occurring long waiting
times for restart.
While the time gap until restarting were typically observed a few
millisecs, there were always a few outliers which well exceed several
seconds. This seemed to let remote peers run into timeouts.
* Prefix function names `unprocXxx()` and `stagedYyy()` by `headers`
why:
There will be other `unproc` and `staged` modules.
* Remove cruft, update logging
* Fix accounting issue
details:
When staging after fetching headers from the network, there was an off
by 1 error occurring when the result was by one smaller than requested.
Also, a whole range was mis-accounted when a peer was terminating
connection immediately after responding.
* Fix slow/error header accounting when fetching
why:
Originally set for detecting slow headers in a row, the counter
was wrongly extended to general errors.
* Ban peers for a while that respond with too few headers continuously
why:
Some peers only returned one header at a time. If these peers sit on a
farm, they might collectively slow down the download process.
* Update RPC beacon header updater
why:
Old function hook has slightly changed its meaning since it was used
for snap sync. Also, the old hook is used by other functions already.
* Limit number of peers or set to single peer mode
details:
Merge several concepts, single peer mode being one of it.
* Some code clean up, fixings for removing of compiler warnings
* De-noise header fetch related sources
why:
Header download looks relatively stable, so general debugging is not
needed, anymore. This is the equivalent of removing the scaffold from
the part of the building where work has completed.
* More clean up and code prettification for headers stuff
* Implement body fetch and block import
details:
Available headers are used stage blocks by combining existing headers
with newly fetched blocks. Then these blocks are imported/executed via
`persistBlocks()`.
* Logger cosmetics and cleanup
* Remove staged block queue debugging
details:
Feature still available, just not executed anymore
* Docu, logging update
* Update/simplify `runDaemon()`
* Re-calibrate block body requests and soft config for import blocks batch
why:
* For fetching, larger fetch requests are mostly truncated anyway on
MainNet.
* For executing, smaller batch sizes reduce the memory needed for the
price of longer execution times.
* Update metrics counters
* Docu update
* Some fixes, formatting updates, etc.
* Update `borrowed` type: uint -. uint64
also:
Always convert to `uint64` rather than `uint` where appropriate
2024-09-27 15:07:42 +00:00
|
|
|
|
2024-06-05 15:08:29 +00:00
|
|
|
rocksdbMaxOpenFiles {.
|
|
|
|
hidden
|
|
|
|
defaultValue: defaultMaxOpenFiles
|
|
|
|
defaultValueDesc: $defaultMaxOpenFiles
|
|
|
|
name: "debug-rocksdb-max-open-files".}: int
|
|
|
|
|
|
|
|
rocksdbWriteBufferSize {.
|
|
|
|
hidden
|
|
|
|
defaultValue: defaultWriteBufferSize
|
|
|
|
defaultValueDesc: $defaultWriteBufferSize
|
|
|
|
name: "debug-rocksdb-write-buffer-size".}: int
|
|
|
|
|
|
|
|
rocksdbRowCacheSize {.
|
|
|
|
hidden
|
|
|
|
defaultValue: defaultRowCacheSize
|
|
|
|
defaultValueDesc: $defaultRowCacheSize
|
|
|
|
name: "debug-rocksdb-row-cache-size".}: int
|
|
|
|
|
|
|
|
rocksdbBlockCacheSize {.
|
|
|
|
hidden
|
|
|
|
defaultValue: defaultBlockCacheSize
|
|
|
|
defaultValueDesc: $defaultBlockCacheSize
|
|
|
|
name: "debug-rocksdb-block-cache-size".}: int
|
|
|
|
|
2024-09-05 09:18:32 +00:00
|
|
|
rdbKeyCacheSize {.
|
|
|
|
hidden
|
|
|
|
defaultValue: defaultRdbKeyCacheSize
|
|
|
|
defaultValueDesc: $defaultRdbKeyCacheSize
|
|
|
|
name: "debug-rdb-key-cache-size".}: int
|
|
|
|
|
|
|
|
rdbVtxCacheSize {.
|
|
|
|
hidden
|
|
|
|
defaultValue: defaultRdbVtxCacheSize
|
|
|
|
defaultValueDesc: $defaultRdbVtxCacheSize
|
|
|
|
name: "debug-rdb-vtx-cache-size".}: int
|
|
|
|
|
|
|
|
rdbPrintStats {.
|
|
|
|
hidden
|
|
|
|
desc: "Print RDB statistics at exit"
|
|
|
|
name: "debug-rdb-print-stats".}: bool
|
|
|
|
|
2021-09-16 15:59:46 +00:00
|
|
|
case cmd* {.
|
|
|
|
command
|
|
|
|
defaultValue: NimbusCmd.noCommand }: NimbusCmd
|
2021-09-11 14:58:01 +00:00
|
|
|
|
2021-09-16 15:59:46 +00:00
|
|
|
of noCommand:
|
2024-01-29 13:20:04 +00:00
|
|
|
httpPort* {.
|
|
|
|
separator: "\pLOCAL SERVICES OPTIONS:"
|
|
|
|
desc: "Listening port of the HTTP server(rpc, ws, graphql)"
|
|
|
|
defaultValue: defaultHttpPort
|
|
|
|
defaultValueDesc: $defaultHttpPort
|
|
|
|
name: "http-port" }: Port
|
|
|
|
|
|
|
|
httpAddress* {.
|
|
|
|
desc: "Listening IP address of the HTTP server(rpc, ws, graphql)"
|
|
|
|
defaultValue: defaultAdminListenAddress
|
|
|
|
defaultValueDesc: $defaultAdminListenAddressDesc
|
|
|
|
name: "http-address" }: IpAddress
|
|
|
|
|
2021-09-11 14:58:01 +00:00
|
|
|
rpcEnabled* {.
|
|
|
|
desc: "Enable the JSON-RPC server"
|
|
|
|
defaultValue: false
|
|
|
|
name: "rpc" }: bool
|
|
|
|
|
2021-09-16 15:59:46 +00:00
|
|
|
rpcApi {.
|
2024-09-18 08:53:26 +00:00
|
|
|
desc: "Enable specific set of RPC API (available: eth, debug)"
|
2021-09-16 15:59:46 +00:00
|
|
|
defaultValue: @[]
|
2021-09-11 14:58:01 +00:00
|
|
|
defaultValueDesc: $RpcFlag.Eth
|
2021-09-16 15:59:46 +00:00
|
|
|
name: "rpc-api" }: seq[string]
|
2021-09-11 14:58:01 +00:00
|
|
|
|
|
|
|
wsEnabled* {.
|
|
|
|
desc: "Enable the Websocket JSON-RPC server"
|
|
|
|
defaultValue: false
|
|
|
|
name: "ws" }: bool
|
|
|
|
|
2021-09-16 15:59:46 +00:00
|
|
|
wsApi {.
|
2024-09-18 08:53:26 +00:00
|
|
|
desc: "Enable specific set of Websocket RPC API (available: eth, debug)"
|
2021-09-16 15:59:46 +00:00
|
|
|
defaultValue: @[]
|
2021-09-11 14:58:01 +00:00
|
|
|
defaultValueDesc: $RpcFlag.Eth
|
2021-09-16 15:59:46 +00:00
|
|
|
name: "ws-api" }: seq[string]
|
2021-09-11 14:58:01 +00:00
|
|
|
|
2024-01-29 13:20:04 +00:00
|
|
|
graphqlEnabled* {.
|
|
|
|
desc: "Enable the GraphQL HTTP server"
|
|
|
|
defaultValue: false
|
|
|
|
name: "graphql" }: bool
|
|
|
|
|
2022-07-19 08:15:18 +00:00
|
|
|
engineApiEnabled* {.
|
|
|
|
desc: "Enable the Engine API"
|
|
|
|
defaultValue: false
|
|
|
|
name: "engine-api" .}: bool
|
|
|
|
|
|
|
|
engineApiPort* {.
|
2024-01-29 13:20:04 +00:00
|
|
|
desc: "Listening port for the Engine API(http and ws)"
|
2022-07-19 08:15:18 +00:00
|
|
|
defaultValue: defaultEngineApiPort
|
|
|
|
defaultValueDesc: $defaultEngineApiPort
|
|
|
|
name: "engine-api-port" .}: Port
|
|
|
|
|
|
|
|
engineApiAddress* {.
|
2024-01-29 13:20:04 +00:00
|
|
|
desc: "Listening address for the Engine API(http and ws)"
|
2022-07-19 08:15:18 +00:00
|
|
|
defaultValue: defaultAdminListenAddress
|
|
|
|
defaultValueDesc: $defaultAdminListenAddressDesc
|
2023-11-10 18:38:11 +00:00
|
|
|
name: "engine-api-address" .}: IpAddress
|
2022-07-19 08:15:18 +00:00
|
|
|
|
|
|
|
engineApiWsEnabled* {.
|
|
|
|
desc: "Enable the WebSocket Engine API"
|
|
|
|
defaultValue: false
|
|
|
|
name: "engine-api-ws" .}: bool
|
|
|
|
|
|
|
|
allowedOrigins* {.
|
|
|
|
desc: "Comma separated list of domains from which to accept cross origin requests"
|
|
|
|
defaultValue: @[]
|
|
|
|
defaultValueDesc: "*"
|
|
|
|
name: "allowed-origins" .}: seq[string]
|
|
|
|
|
2022-04-06 14:11:13 +00:00
|
|
|
# github.com/ethereum/execution-apis/
|
|
|
|
# /blob/v1.0.0-alpha.8/src/engine/authentication.md#key-distribution
|
|
|
|
jwtSecret* {.
|
|
|
|
desc: "Path to a file containing a 32 byte hex-encoded shared secret" &
|
|
|
|
" needed for websocket authentication. By default, the secret key" &
|
|
|
|
" is auto-generated."
|
|
|
|
defaultValueDesc: "\"jwt.hex\" in the data directory (see --data-dir)"
|
|
|
|
name: "jwt-secret" .}: Option[InputFile]
|
|
|
|
|
2021-09-16 15:59:46 +00:00
|
|
|
of `import`:
|
|
|
|
blocksFile* {.
|
|
|
|
argument
|
2024-05-31 07:13:56 +00:00
|
|
|
desc: "One or more RLP encoded block(s) files"
|
|
|
|
name: "blocks-file" }: seq[InputFile]
|
|
|
|
|
|
|
|
maxBlocks* {.
|
|
|
|
desc: "Maximum number of blocks to import"
|
|
|
|
defaultValue: uint64.high()
|
|
|
|
name: "max-blocks" .}: uint64
|
|
|
|
|
|
|
|
chunkSize* {.
|
|
|
|
desc: "Number of blocks per database transaction"
|
|
|
|
defaultValue: 8192
|
|
|
|
name: "chunk-size" .}: uint64
|
2021-09-11 14:58:01 +00:00
|
|
|
|
2024-06-06 05:03:11 +00:00
|
|
|
csvStats* {.
|
|
|
|
hidden
|
|
|
|
desc: "Save performance statistics to CSV"
|
|
|
|
name: "debug-csv-stats".}: Option[string]
|
|
|
|
|
2024-06-15 09:22:37 +00:00
|
|
|
# TODO validation and storage options should be made non-hidden when the
|
|
|
|
# UX has stabilised and era1 storage is in the app
|
|
|
|
fullValidation* {.
|
|
|
|
hidden
|
|
|
|
desc: "Enable full per-block validation (slow)"
|
|
|
|
defaultValue: false
|
|
|
|
name: "debug-full-validation".}: bool
|
|
|
|
|
2024-07-04 14:51:50 +00:00
|
|
|
noValidation* {.
|
|
|
|
hidden
|
|
|
|
desc: "Disble per-chunk validation"
|
|
|
|
defaultValue: true
|
|
|
|
name: "debug-no-validation".}: bool
|
|
|
|
|
2024-06-15 09:22:37 +00:00
|
|
|
storeBodies* {.
|
|
|
|
hidden
|
|
|
|
desc: "Store block blodies in database"
|
|
|
|
defaultValue: false
|
|
|
|
name: "debug-store-bodies".}: bool
|
|
|
|
|
|
|
|
# TODO this option should probably only cover the redundant parts, ie
|
|
|
|
# those that are in era1 files - era files presently do not store
|
|
|
|
# receipts
|
|
|
|
storeReceipts* {.
|
|
|
|
hidden
|
|
|
|
desc: "Store receipts in database"
|
|
|
|
defaultValue: false
|
|
|
|
name: "debug-store-receipts".}: bool
|
|
|
|
|
2024-08-16 06:22:51 +00:00
|
|
|
storeSlotHashes* {.
|
|
|
|
hidden
|
|
|
|
desc: "Store reverse slot hashes in database"
|
|
|
|
defaultValue: false
|
|
|
|
name: "debug-store-slot-hashes".}: bool
|
|
|
|
|
2024-05-26 06:15:18 +00:00
|
|
|
func parseCmdArg(T: type NetworkId, p: string): T
|
2023-02-14 20:27:17 +00:00
|
|
|
{.gcsafe, raises: [ValueError].} =
|
2024-09-29 12:37:09 +00:00
|
|
|
parseBiggestUInt(p).T
|
2021-09-11 14:58:01 +00:00
|
|
|
|
2024-05-26 06:15:18 +00:00
|
|
|
func completeCmdArg(T: type NetworkId, val: string): seq[string] =
|
2021-09-11 14:58:01 +00:00
|
|
|
return @[]
|
|
|
|
|
2024-05-26 06:15:18 +00:00
|
|
|
func parseCmdArg*(T: type enr.Record, p: string): T {.raises: [ValueError].} =
|
2024-07-17 02:57:19 +00:00
|
|
|
result = fromURI(enr.Record, p).valueOr:
|
2023-07-07 07:47:26 +00:00
|
|
|
raise newException(ValueError, "Invalid ENR")
|
|
|
|
|
2024-05-26 06:15:18 +00:00
|
|
|
func completeCmdArg*(T: type enr.Record, val: string): seq[string] =
|
2023-07-07 07:47:26 +00:00
|
|
|
return @[]
|
|
|
|
|
2024-05-26 06:15:18 +00:00
|
|
|
func processList(v: string, o: var seq[string])
|
2023-02-14 20:27:17 +00:00
|
|
|
=
|
2018-05-01 00:47:35 +00:00
|
|
|
## Process comma-separated list of strings.
|
2018-04-27 08:53:53 +00:00
|
|
|
if len(v) > 0:
|
|
|
|
for n in v.split({' ', ','}):
|
|
|
|
if len(n) > 0:
|
|
|
|
o.add(n)
|
|
|
|
|
2023-02-14 20:27:17 +00:00
|
|
|
proc parseCmdArg(T: type NetworkParams, p: string): T
|
|
|
|
{.gcsafe, raises: [ValueError].} =
|
2021-09-11 14:58:01 +00:00
|
|
|
try:
|
2023-01-31 12:38:08 +00:00
|
|
|
if not loadNetworkParams(p, result):
|
2021-09-11 14:58:01 +00:00
|
|
|
raise newException(ValueError, "failed to load customNetwork")
|
2023-02-14 20:27:17 +00:00
|
|
|
except CatchableError:
|
2021-09-11 14:58:01 +00:00
|
|
|
raise newException(ValueError, "failed to load customNetwork")
|
|
|
|
|
2024-05-26 06:15:18 +00:00
|
|
|
func completeCmdArg(T: type NetworkParams, val: string): seq[string] =
|
2021-09-11 14:58:01 +00:00
|
|
|
return @[]
|
|
|
|
|
2024-05-26 06:15:18 +00:00
|
|
|
func setBootnodes(output: var seq[ENode], nodeUris: openArray[string]) =
|
2021-09-11 14:58:01 +00:00
|
|
|
output = newSeqOfCap[ENode](nodeUris.len)
|
2018-08-01 12:50:44 +00:00
|
|
|
for item in nodeUris:
|
2023-07-07 07:47:26 +00:00
|
|
|
output.add(ENode.fromString(item).expect("valid hardcoded ENode"))
|
2021-09-11 14:58:01 +00:00
|
|
|
|
2023-02-14 20:27:17 +00:00
|
|
|
iterator repeatingList(listOfList: openArray[string]): string
|
|
|
|
=
|
2021-09-16 15:59:46 +00:00
|
|
|
for strList in listOfList:
|
|
|
|
var list = newSeq[string]()
|
|
|
|
processList(strList, list)
|
|
|
|
for item in list:
|
|
|
|
yield item
|
|
|
|
|
2023-02-14 20:27:17 +00:00
|
|
|
proc append(output: var seq[ENode], nodeUris: openArray[string])
|
|
|
|
=
|
2021-09-16 15:59:46 +00:00
|
|
|
for item in repeatingList(nodeUris):
|
|
|
|
let res = ENode.fromString(item)
|
|
|
|
if res.isErr:
|
|
|
|
warn "Ignoring invalid bootstrap address", address=item
|
|
|
|
continue
|
|
|
|
output.add res.get()
|
|
|
|
|
2023-02-14 20:27:17 +00:00
|
|
|
iterator strippedLines(filename: string): (int, string)
|
|
|
|
{.gcsafe, raises: [IOError].} =
|
2021-09-16 15:59:46 +00:00
|
|
|
var i = 0
|
|
|
|
for line in lines(filename):
|
|
|
|
let stripped = strip(line)
|
|
|
|
if stripped.startsWith('#'): # Comments
|
|
|
|
continue
|
|
|
|
|
|
|
|
if stripped.len > 0:
|
|
|
|
yield (i, stripped)
|
|
|
|
inc i
|
|
|
|
|
2023-02-14 20:27:17 +00:00
|
|
|
proc loadEnodeFile(fileName: string; output: var seq[ENode]; info: string)
|
|
|
|
=
|
2021-09-16 15:59:46 +00:00
|
|
|
if fileName.len == 0:
|
|
|
|
return
|
|
|
|
|
|
|
|
try:
|
|
|
|
for i, ln in strippedLines(fileName):
|
|
|
|
if cmpIgnoreCase(ln, "override") == 0 and i == 0:
|
|
|
|
# override built-in list if the first line is 'override'
|
|
|
|
output = newSeq[ENode]()
|
|
|
|
continue
|
|
|
|
|
|
|
|
let res = ENode.fromString(ln)
|
|
|
|
if res.isErr:
|
2022-02-11 16:28:39 +00:00
|
|
|
warn "Ignoring invalid address", address=ln, line=i, file=fileName, purpose=info
|
2021-09-16 15:59:46 +00:00
|
|
|
continue
|
|
|
|
|
|
|
|
output.add res.get()
|
|
|
|
|
|
|
|
except IOError as e:
|
2022-02-11 16:28:39 +00:00
|
|
|
error "Could not read file", msg = e.msg, purpose = info
|
2021-09-16 15:59:46 +00:00
|
|
|
quit 1
|
2021-09-11 14:58:01 +00:00
|
|
|
|
2022-04-08 04:54:11 +00:00
|
|
|
proc loadBootstrapFile(fileName: string, output: var seq[ENode]) =
|
2022-02-11 16:28:39 +00:00
|
|
|
fileName.loadEnodeFile(output, "bootstrap")
|
|
|
|
|
2022-04-08 04:54:11 +00:00
|
|
|
proc loadStaticPeersFile(fileName: string, output: var seq[ENode]) =
|
2022-02-11 16:28:39 +00:00
|
|
|
fileName.loadEnodeFile(output, "static peers")
|
|
|
|
|
2021-09-11 14:58:01 +00:00
|
|
|
proc getNetworkId(conf: NimbusConf): Option[NetworkId] =
|
2021-09-16 15:59:46 +00:00
|
|
|
if conf.network.len == 0:
|
|
|
|
return none NetworkId
|
|
|
|
|
|
|
|
let network = toLowerAscii(conf.network)
|
|
|
|
case network
|
|
|
|
of "mainnet": return some MainNet
|
2022-07-01 20:16:26 +00:00
|
|
|
of "sepolia": return some SepoliaNet
|
2023-10-25 06:27:55 +00:00
|
|
|
of "holesky": return some HoleskyNet
|
2018-06-20 17:27:32 +00:00
|
|
|
else:
|
2021-09-16 15:59:46 +00:00
|
|
|
try:
|
2024-09-29 12:37:09 +00:00
|
|
|
some parseBiggestUInt(network).NetworkId
|
2021-09-16 15:59:46 +00:00
|
|
|
except CatchableError:
|
|
|
|
error "Failed to parse network name or id", network
|
|
|
|
quit QuitFailure
|
|
|
|
|
|
|
|
proc getRpcFlags(api: openArray[string]): set[RpcFlag] =
|
|
|
|
if api.len == 0:
|
|
|
|
return {RpcFlag.Eth}
|
2024-02-12 06:01:05 +00:00
|
|
|
|
2021-09-16 15:59:46 +00:00
|
|
|
for item in repeatingList(api):
|
|
|
|
case item.toLowerAscii()
|
|
|
|
of "eth": result.incl RpcFlag.Eth
|
|
|
|
of "debug": result.incl RpcFlag.Debug
|
|
|
|
else:
|
|
|
|
error "Unknown RPC API: ", name=item
|
|
|
|
quit QuitFailure
|
|
|
|
|
|
|
|
proc getRpcFlags*(conf: NimbusConf): set[RpcFlag] =
|
|
|
|
getRpcFlags(conf.rpcApi)
|
|
|
|
|
|
|
|
proc getWsFlags*(conf: NimbusConf): set[RpcFlag] =
|
|
|
|
getRpcFlags(conf.wsApi)
|
|
|
|
|
2024-05-26 06:15:18 +00:00
|
|
|
func fromEnr*(T: type ENode, r: enr.Record): ENodeResult[ENode] =
|
2023-07-07 07:47:26 +00:00
|
|
|
let
|
|
|
|
# TODO: there must always be a public key, else no signature verification
|
|
|
|
# could have been done and no Record would exist here.
|
|
|
|
# TypedRecord should be reworked not to have public key as an option.
|
|
|
|
pk = r.get(PublicKey).get()
|
2024-07-17 02:57:19 +00:00
|
|
|
tr = TypedRecord.fromRecord(r)#.expect("id in valid record")
|
2023-07-07 07:47:26 +00:00
|
|
|
|
|
|
|
if tr.ip.isNone():
|
|
|
|
return err(IncorrectIP)
|
|
|
|
if tr.udp.isNone():
|
|
|
|
return err(IncorrectDiscPort)
|
|
|
|
if tr.tcp.isNone():
|
|
|
|
return err(IncorrectPort)
|
|
|
|
|
|
|
|
ok(ENode(
|
|
|
|
pubkey: pk,
|
2024-09-29 12:37:09 +00:00
|
|
|
address: enode.Address(
|
2024-01-29 13:20:04 +00:00
|
|
|
ip: utils.ipv4(tr.ip.get()),
|
2023-07-07 07:47:26 +00:00
|
|
|
udpPort: Port(tr.udp.get()),
|
|
|
|
tcpPort: Port(tr.tcp.get())
|
|
|
|
)
|
|
|
|
))
|
|
|
|
|
|
|
|
proc getBootNodes*(conf: NimbusConf): seq[ENode] =
|
|
|
|
var bootstrapNodes: seq[ENode]
|
2021-09-16 15:59:46 +00:00
|
|
|
# Ignore standard bootnodes if customNetwork is loaded
|
|
|
|
if conf.customNetwork.isNone:
|
|
|
|
case conf.networkId
|
|
|
|
of MainNet:
|
2023-07-07 07:47:26 +00:00
|
|
|
bootstrapNodes.setBootnodes(MainnetBootnodes)
|
2022-07-01 20:16:26 +00:00
|
|
|
of SepoliaNet:
|
2023-07-07 07:47:26 +00:00
|
|
|
bootstrapNodes.setBootnodes(SepoliaBootnodes)
|
2023-10-25 06:27:55 +00:00
|
|
|
of HoleskyNet:
|
|
|
|
bootstrapNodes.setBootnodes(HoleskyBootnodes)
|
2021-09-16 15:59:46 +00:00
|
|
|
else:
|
|
|
|
# custom network id
|
|
|
|
discard
|
|
|
|
|
2023-07-07 07:47:26 +00:00
|
|
|
# always allow bootstrap nodes provided by the user
|
2021-09-16 15:59:46 +00:00
|
|
|
if conf.bootstrapNodes.len > 0:
|
2023-07-07 07:47:26 +00:00
|
|
|
bootstrapNodes.append(conf.bootstrapNodes)
|
2021-09-16 15:59:46 +00:00
|
|
|
|
|
|
|
# bootstrap nodes loaded from file might append or
|
|
|
|
# override built-in bootnodes
|
2023-07-07 07:47:26 +00:00
|
|
|
loadBootstrapFile(string conf.bootstrapFile, bootstrapNodes)
|
|
|
|
|
|
|
|
# Bootstrap nodes provided as ENRs
|
|
|
|
for enr in conf.bootstrapEnrs:
|
2024-11-01 19:06:26 +00:00
|
|
|
let enode = ENode.fromEnr(enr).valueOr:
|
2023-07-07 07:47:26 +00:00
|
|
|
fatal "Invalid bootstrap ENR provided", error
|
|
|
|
quit 1
|
|
|
|
|
|
|
|
bootstrapNodes.add(enode)
|
|
|
|
|
|
|
|
bootstrapNodes
|
2021-09-16 15:59:46 +00:00
|
|
|
|
2022-04-08 04:54:11 +00:00
|
|
|
proc getStaticPeers*(conf: NimbusConf): seq[ENode] =
|
2023-07-07 07:47:26 +00:00
|
|
|
var staticPeers: seq[ENode]
|
|
|
|
staticPeers.append(conf.staticPeers)
|
|
|
|
loadStaticPeersFile(string conf.staticPeersFile, staticPeers)
|
|
|
|
|
|
|
|
# Static peers provided as ENRs
|
|
|
|
for enr in conf.staticPeersEnrs:
|
2024-11-01 19:06:26 +00:00
|
|
|
let enode = ENode.fromEnr(enr).valueOr:
|
2023-07-07 07:47:26 +00:00
|
|
|
fatal "Invalid static peer ENR provided", error
|
|
|
|
quit 1
|
|
|
|
|
|
|
|
staticPeers.add(enode)
|
|
|
|
|
|
|
|
staticPeers
|
2021-09-11 14:58:01 +00:00
|
|
|
|
2024-05-26 06:15:18 +00:00
|
|
|
func getAllowedOrigins*(conf: NimbusConf): seq[Uri] =
|
2022-07-19 08:15:18 +00:00
|
|
|
for item in repeatingList(conf.allowedOrigins):
|
|
|
|
result.add parseUri(item)
|
|
|
|
|
2024-01-29 13:20:04 +00:00
|
|
|
func engineApiServerEnabled*(conf: NimbusConf): bool =
|
|
|
|
conf.engineApiEnabled or conf.engineApiWsEnabled
|
|
|
|
|
|
|
|
func shareServerWithEngineApi*(conf: NimbusConf): bool =
|
|
|
|
conf.engineApiServerEnabled and
|
|
|
|
conf.engineApiPort == conf.httpPort
|
|
|
|
|
|
|
|
func httpServerEnabled*(conf: NimbusConf): bool =
|
|
|
|
conf.graphqlEnabled or
|
|
|
|
conf.wsEnabled or
|
2024-02-12 06:01:05 +00:00
|
|
|
conf.rpcEnabled
|
2024-01-29 13:20:04 +00:00
|
|
|
|
2024-05-31 07:13:56 +00:00
|
|
|
func era1Dir*(conf: NimbusConf): OutDir =
|
|
|
|
conf.era1DirOpt.get(OutDir(conf.dataDir.string & "/era1"))
|
|
|
|
|
2024-07-09 13:28:01 +00:00
|
|
|
func eraDir*(conf: NimbusConf): OutDir =
|
|
|
|
conf.eraDirOpt.get(OutDir(conf.dataDir.string & "/era"))
|
|
|
|
|
2024-09-05 09:18:32 +00:00
|
|
|
func dbOptions*(conf: NimbusConf, noKeyCache = false): DbOptions =
|
2024-06-05 15:08:29 +00:00
|
|
|
DbOptions.init(
|
|
|
|
maxOpenFiles = conf.rocksdbMaxOpenFiles,
|
|
|
|
writeBufferSize = conf.rocksdbWriteBufferSize,
|
|
|
|
rowCacheSize = conf.rocksdbRowCacheSize,
|
|
|
|
blockCacheSize = conf.rocksdbBlockCacheSize,
|
2024-09-05 09:18:32 +00:00
|
|
|
rdbKeyCacheSize =
|
|
|
|
if noKeyCache: 0 else: conf.rdbKeyCacheSize ,
|
|
|
|
rdbVtxCacheSize =
|
|
|
|
# The import command does not use the key cache - better give it to vtx
|
|
|
|
if noKeyCache: conf.rdbKeyCacheSize + conf.rdbVtxCacheSize
|
|
|
|
else: conf.rdbVtxCacheSize,
|
|
|
|
rdbPrintStats = conf.rdbPrintStats,
|
2024-06-05 15:08:29 +00:00
|
|
|
)
|
|
|
|
|
2023-02-14 20:27:17 +00:00
|
|
|
# KLUDGE: The `load()` template does currently not work within any exception
|
|
|
|
# annotated environment.
|
|
|
|
{.pop.}
|
|
|
|
|
|
|
|
proc makeConfig*(cmdLine = commandLineParams()): NimbusConf
|
|
|
|
{.raises: [CatchableError].} =
|
|
|
|
## Note: this function is not gc-safe
|
|
|
|
|
|
|
|
# The try/catch clause can go away when `load()` is clean
|
|
|
|
try:
|
|
|
|
{.push warning[ProveInit]: off.}
|
|
|
|
result = NimbusConf.load(
|
|
|
|
cmdLine,
|
|
|
|
version = NimbusBuild,
|
|
|
|
copyrightBanner = NimbusHeader
|
|
|
|
)
|
|
|
|
{.pop.}
|
|
|
|
except CatchableError as e:
|
|
|
|
raise e
|
2021-09-11 14:58:01 +00:00
|
|
|
|
2021-09-16 15:59:46 +00:00
|
|
|
var networkId = result.getNetworkId()
|
2021-09-11 14:58:01 +00:00
|
|
|
|
2021-09-16 15:59:46 +00:00
|
|
|
if result.customNetwork.isSome:
|
|
|
|
result.networkParams = result.customNetwork.get()
|
|
|
|
if networkId.isNone:
|
|
|
|
# WARNING: networkId and chainId are two distinct things
|
|
|
|
# they usage should not be mixed in other places.
|
|
|
|
# We only set networkId to chainId if networkId not set in cli and
|
|
|
|
# --custom-network is set.
|
|
|
|
# If chainId is not defined in config file, it's ok because
|
|
|
|
# zero means CustomNet
|
|
|
|
networkId = some(NetworkId(result.networkParams.config.chainId))
|
2021-09-11 14:58:01 +00:00
|
|
|
|
2021-09-16 15:59:46 +00:00
|
|
|
if networkId.isNone:
|
2021-09-11 14:58:01 +00:00
|
|
|
# bootnodes is set via getBootNodes
|
2021-09-16 15:59:46 +00:00
|
|
|
networkId = some MainNet
|
2021-09-11 14:58:01 +00:00
|
|
|
|
2021-09-16 15:59:46 +00:00
|
|
|
result.networkId = networkId.get()
|
2021-09-11 14:58:01 +00:00
|
|
|
|
|
|
|
if result.customNetwork.isNone:
|
2021-09-16 15:59:46 +00:00
|
|
|
result.networkParams = networkParams(result.networkId)
|
|
|
|
|
|
|
|
if result.cmd == noCommand:
|
|
|
|
if result.udpPort == Port(0):
|
|
|
|
# if udpPort not set in cli, then
|
|
|
|
result.udpPort = result.tcpPort
|
2021-09-11 14:58:01 +00:00
|
|
|
|
2022-12-22 04:17:04 +00:00
|
|
|
# see issue #1346
|
|
|
|
if result.keyStore.string == defaultKeystoreDir() and
|
|
|
|
result.dataDir.string != defaultDataDir():
|
|
|
|
result.keyStore = OutDir(result.dataDir.string / "keystore")
|
|
|
|
|
2021-09-11 14:58:01 +00:00
|
|
|
when isMainModule:
|
|
|
|
# for testing purpose
|
|
|
|
discard makeConfig()
|