nimbus-eth1/nimbus/nimbus.nim

375 lines
12 KiB
Nim
Raw Normal View History

2018-04-27 08:53:53 +00:00
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import
../nimbus/vm_compile_info
import
std/[os, strutils, net, options],
chronicles,
chronos,
eth/[keys, net/nat, trie/db],
eth/common as eth_common,
eth/p2p as eth_p2p,
eth/p2p/[peer_pool, rlpx_protocols/les_protocol],
json_rpc/rpcserver,
metrics,
metrics/[chronos_httpserver, chronicles_support],
stew/shims/net as stewNet,
websock/types as ws,
"."/[conf_utils, config, constants, context, genesis, sealer, utils, version],
./db/[storage_types, db_chain, select_backend],
./graphql/ethapi,
./p2p/[chain, blockchain_sync],
./p2p/clique/[clique_desc, clique_sealer],
./rpc/[common, debug, engine_api, jwt_auth, p2p],
./sync/protocol_ethxx,
./utils/tx_pool
when defined(evmc_enabled):
import transaction/evmc_dynamic_loader
2018-06-20 17:27:32 +00:00
## TODO:
## * No IPv6 support
## * No multiple bind addresses support
## * No database support
type
NimbusState = enum
2020-05-21 01:33:11 +00:00
Starting, Running, Stopping
2018-06-20 17:27:32 +00:00
2020-03-25 18:00:04 +00:00
NimbusNode = ref object
rpcServer: RpcHttpServer
engineApiServer: RpcHttpServer
engineApiWsServer: RpcWebSocketServer
ethNode: EthereumNode
state: NimbusState
graphqlServer: GraphqlHttpServerRef
wsRpcServer: RpcWebSocketServer
sealingEngine: SealingEngineRef
ctx: EthContext
chainRef: Chain
txPool: TxPoolRef
2018-06-20 17:27:32 +00:00
proc importBlocks(conf: NimbusConf, chainDB: BaseChainDB) =
if string(conf.blocksFile).len > 0:
# success or not, we quit after importing blocks
if not importRlpBlock(string conf.blocksFile, chainDB):
quit(QuitFailure)
else:
quit(QuitSuccess)
proc manageAccounts(nimbus: NimbusNode, conf: NimbusConf) =
if string(conf.keyStore).len > 0:
let res = nimbus.ctx.am.loadKeystores(string conf.keyStore)
if res.isErr:
echo res.error()
quit(QuitFailure)
if string(conf.importKey).len > 0:
let res = nimbus.ctx.am.importPrivateKey(string conf.importKey)
if res.isErr:
echo res.error()
quit(QuitFailure)
proc setupP2P(nimbus: NimbusNode, conf: NimbusConf,
chainDB: BaseChainDB, protocols: set[ProtocolFlag]) =
2018-06-20 17:27:32 +00:00
## Creating P2P Server
let kpres = nimbus.ctx.hexToKeyPair(conf.nodeKeyHex)
if kpres.isErr:
echo kpres.error()
quit(QuitFailure)
let keypair = kpres.get()
var address = Address(
ip: conf.listenAddress,
tcpPort: conf.tcpPort,
udpPort: conf.udpPort
)
if conf.nat.hasExtIp:
# any required port redirection is assumed to be done by hand
address.ip = conf.nat.extIp
2019-04-17 01:56:28 +00:00
else:
# automated NAT traversal
let extIP = getExternalIP(conf.nat.nat)
2019-04-17 23:17:06 +00:00
# This external IP only appears in the logs, so don't worry about dynamic
# IPs. Don't remove it either, because the above call does initialisation
# and discovery for NAT-related objects.
2019-04-17 01:56:28 +00:00
if extIP.isSome:
address.ip = extIP.get()
let extPorts = redirectPorts(tcpPort = address.tcpPort,
udpPort = address.udpPort,
description = NimbusName & " " & NimbusVersion)
2019-04-17 01:56:28 +00:00
if extPorts.isSome:
(address.tcpPort, address.udpPort) = extPorts.get()
2018-06-20 17:27:32 +00:00
let bootstrapNodes = conf.getBootNodes()
nimbus.ethNode = newEthereumNode(
keypair, address, conf.networkId, nil, conf.agentString,
addAllCapabilities = false, minPeers = conf.maxPeers,
bootstrapNodes = bootstrapNodes,
bindUdpPort = conf.udpPort, bindTcpPort = conf.tcpPort,
bindIp = conf.listenAddress)
# Add protocol capabilities based on protocol flags
if ProtocolFlag.Eth in protocols:
nimbus.ethNode.addCapability eth
if ProtocolFlag.Les in protocols:
nimbus.ethNode.addCapability les
2018-06-20 17:27:32 +00:00
Fearture/poa clique tuning (#765) * Provide API details: API is bundled via clique.nim. * Set extraValidation as default for PoA chains why: This triggers consensus verification and an update of the list of authorised signers. These signers are integral part of the PoA block chain. todo: Option argument to control validation for the nimbus binary. * Fix snapshot state block number why: Using sub-sequence here, so the len() function was wrong. * Optional start where block verification begins why: Can speed up time building loading initial parts of block chain. For PoA, this allows to prove & test that authorised signers can be (correctly) calculated starting at any point on the block chain. todo: On Goerli around blocks #193537..#197568, processing time increases disproportionally -- needs to be understand * For Clique test, get old grouping back (7 transactions per log entry) why: Forgot to change back after troubleshooting * Fix field/function/module-name misunderstanding why: Make compilation work * Use eth_types.blockHash() rather than utils.hash() in Clique modules why: Prefer lib module * Dissolve snapshot_misc.nim details: .. into clique_verify.nim (the other source file clique_unused.nim is inactive) * Hide unused AsyncLock in Clique descriptor details: Unused here but was part of the Go reference implementation * Remove fakeDiff flag from Clique descriptor details: This flag was a kludge in the Go reference implementation used for the canonical tests. The tests have been adapted so there is no need for the fakeDiff flag and its implementation. * Not observing minimum distance from epoch sync point why: For compiling PoA state, the go implementation will walk back to the epoch header with at least 90000 blocks apart from the current header in the absence of other synchronisation points. Here just the nearest epoch header is used. The assumption is that all the checkpoints before have been vetted already regardless of the current branch. details: The behaviour of using the nearest vs the minimum distance epoch is controlled by a flag and can be changed at run time. * Analysing processing time (patch adds some debugging/visualisation support) why: At the first half million blocks of the Goerli replay, blocks on the interval #194854..#196224 take exceptionally long to process, but not due to PoA processing. details: It turns out that much time is spent in p2p/excecutor.processBlock() where the elapsed transaction execution time is significantly greater for many of these blocks. Between the 1371 blocks #194854..#196224 there are 223 blocks with more than 1/2 seconds execution time whereas there are only 4 such blocks before and 13 such after this range up to #504192. * fix debugging symbol in clique_desc (causes CI failing) * Fixing canonical reference tests why: Two errors were introduced earlier but ovelooked: 1. "Remove fakeDiff flag .." patch was incomplete 2. "Not observing minimum distance .." introduced problem w/tests 23/24 details: Fixing 2. needed to revert the behaviour by setting the applySnapsMinBacklog flag for the Clique descriptor. Also a new test was added to lock the new behaviour. * Remove cruft why: Clique/PoA processing was intended to take place somewhere in executor/process_block.processBlock() but was decided later to run from chain/persist_block.persistBlock() instead. * Update API comment * ditto
2021-07-30 14:06:51 +00:00
# chainRef: some name to avoid module-name/filed/function misunderstandings
nimbus.chainRef = newChain(chainDB)
nimbus.ethNode.chain = nimbus.chainRef
if conf.verifyFrom.isSome:
let verifyFrom = conf.verifyFrom.get()
nimbus.chainRef.extraValidation = 0 < verifyFrom
nimbus.chainRef.verifyFrom = verifyFrom
# Connect directly to the static nodes
let staticPeers = conf.getStaticPeers()
for enode in staticPeers:
asyncCheck nimbus.ethNode.peerPool.connectToNode(newNode(enode))
# Start Eth node
if conf.maxPeers > 0:
waitFor nimbus.ethNode.connectToNetwork(
enableDiscovery = conf.discovery != DiscoveryType.None)
proc localServices(nimbus: NimbusNode, conf: NimbusConf,
chainDB: BaseChainDB, protocols: set[ProtocolFlag]) =
# app wide TxPool singleton
# TODO: disable some of txPool internal mechanism if
# the engineSigner is zero.
nimbus.txPool = TxPoolRef.new(chainDB, conf.engineSigner)
# metrics logging
if conf.logMetricsEnabled:
# https://github.com/nim-lang/Nim/issues/17369
var logMetrics: proc(udata: pointer) {.gcsafe, raises: [Defect].}
logMetrics = proc(udata: pointer) =
{.gcsafe.}:
let registry = defaultRegistry
info "metrics", registry
discard setTimer(Moment.fromNow(conf.logMetricsInterval.seconds), logMetrics)
discard setTimer(Moment.fromNow(conf.logMetricsInterval.seconds), logMetrics)
# Creating RPC Server
if conf.rpcEnabled:
nimbus.rpcServer = newRpcHttpServer([initTAddress(conf.rpcAddress, conf.rpcPort)])
setupCommonRpc(nimbus.ethNode, conf, nimbus.rpcServer)
# Enable RPC APIs based on RPC flags and protocol flags
let rpcFlags = conf.getRpcFlags()
if (RpcFlag.Eth in rpcFlags and ProtocolFlag.Eth in protocols) or
(conf.engineApiPort == conf.rpcPort):
setupEthRpc(nimbus.ethNode, nimbus.ctx, chainDB, nimbus.txPool, nimbus.rpcServer)
if RpcFlag.Debug in rpcFlags:
setupDebugRpc(chainDB, nimbus.rpcServer)
2021-08-05 07:51:28 +00:00
2018-06-20 17:27:32 +00:00
nimbus.rpcServer.rpc("admin_quit") do() -> string:
{.gcsafe.}:
nimbus.state = Stopping
2018-06-20 17:27:32 +00:00
result = "EXITING"
2018-06-20 17:27:32 +00:00
nimbus.rpcServer.start()
2020-03-25 18:00:04 +00:00
# Provide JWT authentication handler for websockets
let jwtHook = block:
# Create or load shared secret
let rc = nimbus.ctx.rng.jwtSharedSecret(conf)
if rc.isErr:
error "Failed create or load shared secret",
msg = $(rc.unsafeError) # avoid side effects
quit(QuitFailure)
# Authentcation handler constructor
some(rc.value.jwtAuthHandler)
# Creating Websocket RPC Server
if conf.wsEnabled:
# Construct server object
nimbus.wsRpcServer = newRpcWebSocketServer(
initTAddress(conf.wsAddress, conf.wsPort),
authHandler = jwtHook)
setupCommonRpc(nimbus.ethNode, conf, nimbus.wsRpcServer)
# Enable Websocket RPC APIs based on RPC flags and protocol flags
let wsFlags = conf.getWsFlags()
if (RpcFlag.Eth in wsFlags and ProtocolFlag.Eth in protocols) or
(conf.engineApiWsPort == conf.wsPort):
setupEthRpc(nimbus.ethNode, nimbus.ctx, chainDB, nimbus.txPool, nimbus.wsRpcServer)
if RpcFlag.Debug in wsFlags:
setupDebugRpc(chainDB, nimbus.wsRpcServer)
nimbus.wsRpcServer.start()
if conf.graphqlEnabled:
nimbus.graphqlServer = setupGraphqlHttpServer(conf, chainDB, nimbus.ethNode, nimbus.txPool)
nimbus.graphqlServer.start()
if conf.engineSigner != ZERO_ADDRESS:
let res = nimbus.ctx.am.getAccount(conf.engineSigner)
if res.isErr:
error "Failed to get account",
msg = res.error,
hint = "--key-store or --import-key"
quit(QuitFailure)
let rs = validateSealer(conf, nimbus.ctx, nimbus.chainRef)
if rs.isErr:
echo rs.error
quit(QuitFailure)
proc signFunc(signer: EthAddress, message: openArray[byte]): Result[RawSignature, cstring] {.gcsafe.} =
let
hashData = keccakHash(message)
acc = nimbus.ctx.am.getAccount(signer).tryGet()
rawSign = sign(acc.privateKey, SkMessage(hashData.data)).toRaw
ok(rawSign)
# TODO: There should be a better place to initialize this
nimbus.chainRef.clique.authorize(conf.engineSigner, signFunc)
var initialState = EngineStopped
if chainDB.totalDifficulty > chainDB.ttd:
initialState = EnginePostMerge
nimbus.sealingEngine = SealingEngineRef.new(
nimbus.chainRef, nimbus.ctx, conf.engineSigner,
nimbus.txPool, initialState
)
nimbus.sealingEngine.start()
if conf.engineApiEnabled:
if conf.engineApiPort != conf.rpcPort:
nimbus.engineApiServer = newRpcHttpServer([
initTAddress(conf.engineApiAddress, conf.engineApiPort)
])
setupEngineAPI(nimbus.sealingEngine, nimbus.engineApiServer)
setupEthRpc(nimbus.ethNode, nimbus.ctx, chainDB, nimbus.txPool, nimbus.engineApiServer)
nimbus.engineApiServer.start()
else:
setupEngineAPI(nimbus.sealingEngine, nimbus.rpcServer)
info "Starting engine API server", port = conf.engineApiPort
if conf.engineApiWsEnabled:
if conf.engineApiWsPort != conf.wsPort:
nimbus.engineApiWsServer = newRpcWebSocketServer(
initTAddress(conf.engineApiWsAddress, conf.engineApiWsPort),
authHandler = jwtHook)
setupEngineAPI(nimbus.sealingEngine, nimbus.engineApiWsServer)
setupEthRpc(nimbus.ethNode, nimbus.ctx, chainDB, nimbus.txPool, nimbus.engineApiWsServer)
nimbus.engineApiWsServer.start()
else:
setupEngineAPI(nimbus.sealingEngine, nimbus.wsRpcServer)
info "Starting WebSocket engine API server", port = conf.engineApiWsPort
else:
if conf.engineApiEnabled or conf.engineApiWsEnabled:
warn "Cannot enable engine API without sealing engine",
hint = "use --engine-signer to enable sealing engine"
# metrics server
if conf.metricsEnabled:
info "Starting metrics HTTP server", address = conf.metricsAddress, port = conf.metricsPort
startMetricsHttpServer($conf.metricsAddress, conf.metricsPort)
proc start(nimbus: NimbusNode, conf: NimbusConf) =
## logging
setLogLevel(conf.logLevel)
if conf.logFile.isSome:
let logFile = string conf.logFile.get()
defaultChroniclesStream.output.outFile = nil # to avoid closing stdout
discard defaultChroniclesStream.output.open(logFile, fmAppend)
when defined(evmc_enabled):
evmcSetLibraryPath(conf.evm)
EVMC: Option `--evm`, load third-party EVM as a shared library This patch adds: - Load and use a third-party EVM in a shared library, instead of Nimbus EVM. - New option `--evm` to specify which library to load. - The library and this loader conforms to the [EVMC] (https://evmc.ethereum.org/) 9.x specification. Any third-party EVM which is compatible with EVMC version 9.x and supports EVM1 contract code will be accepted. The operating system's shared library format applies. These are `.so*` files on Linux, `.dll` files on Windows and `.dylib` files on Mac. The alternative EVM can be selected in two ways: - Nimbus command line option `--evm:<path>`. - Environment variable `NIMBUS_EVM=<path>`. The reason for an environment variable is this allows all the test programs to run with a third-party EVM as well. Some don't parse command line options. There are some limitations to be aware of: - The third-party EVM must use EVMC version 9.x, no other major version. EVMC 9.x supports EIP-1559 / London fork and older transactions. - Nested `*CALL` and `CREATE*` operations don't use the third-party EVM yet. These call the built-in Nimbus EVM. This mixing of different EVMs between levels is explicitly allowed in specs, so there is no problem doing it. - The third-party EVM doesn't need to support precompiles, because those are nested calls, which use the built-in Nimbus EVM. - Third-party EVMs execute contracts correctly, but fail the final `rootHash` match. The reason is that some account state changes, which are correct, are currently inside the Nimbus EVM and need to be moved to EVMC host logic. *This is a known work in progress*. The EVM execution itself is fine. Test results using "evmone" third-party EVM: - [evmone](https://github.com/ethereum/evmone) has been tested. Only on Linux but it "should" work on Windows and Mac equally well. - [Version 0.8.1](https://github.com/ethereum/evmone/releases/tag/v0.8.1) was used because it is compatible with EVMC 9.x, which is required for the EIP-1559 / London fork, which Nimbus supports. Version 0.8.0 could be used but it looks like an important bug was fixed in 0.8.1. - evmone runs fine and the trace output looks good. The calls and arguments are the same as the built-in Nimbus EVM for tests that have been checked manually, except evmone skips some calls that can be safely skipped. - The final `rootHash` is incorrect, due to the *work in progress* mentioned above which is not part of the evmone execution. Due to this, it's possible to try evmone and verify expected behaviours, which also validates our own EVMC implementation, but it can't be used as a full substitute yet. Signed-off-by: Jamie Lokier <jamie@shareable.org>
2021-12-05 11:20:27 +00:00
createDir(string conf.dataDir)
let trieDB = trieDB newChainDb(string conf.dataDir)
var chainDB = newBaseChainDB(trieDB,
conf.pruneMode == PruneMode.Full,
conf.networkId,
conf.networkParams
)
chainDB.populateProgress()
if canonicalHeadHashKey().toOpenArray notin trieDB:
initializeEmptyDb(chainDb)
doAssert(canonicalHeadHashKey().toOpenArray in trieDB)
let protocols = conf.getProtocolFlags()
case conf.cmd
of NimbusCmd.`import`:
importBlocks(conf, chainDB)
else:
manageAccounts(nimbus, conf)
setupP2P(nimbus, conf, chainDB, protocols)
localServices(nimbus, conf, chainDB, protocols)
if ProtocolFlag.Eth in protocols:
# TODO: temp code until the CLI/RPC interface is fleshed out
let status = waitFor nimbus.ethNode.fastBlockchainSync()
if status != syncSuccess:
debug "Block sync failed: ", status
if nimbus.state == Starting:
# it might have been set to "Stopping" with Ctrl+C
nimbus.state = Running
2018-06-20 17:27:32 +00:00
proc stop*(nimbus: NimbusNode, conf: NimbusConf) {.async, gcsafe.} =
trace "Graceful shutdown"
if conf.rpcEnabled:
2021-11-30 07:13:20 +00:00
await nimbus.rpcServer.stop()
if conf.engineApiEnabled:
await nimbus.engineAPiServer.stop()
if conf.wsEnabled:
nimbus.wsRpcServer.stop()
if conf.engineApiWsEnabled:
nimbus.engineApiWsServer.stop()
if conf.graphqlEnabled:
await nimbus.graphqlServer.stop()
if conf.engineSigner != ZERO_ADDRESS:
await nimbus.sealingEngine.stop()
2018-06-20 17:27:32 +00:00
proc process*(nimbus: NimbusNode, conf: NimbusConf) =
2020-05-21 01:33:11 +00:00
# Main event loop
while nimbus.state == Running:
try:
poll()
except CatchableError as e:
debug "Exception in poll()", exc = e.name, err = e.msg
discard e # silence warning when chronicles not activated
2018-06-20 17:27:32 +00:00
# Stop loop
waitFor nimbus.stop(conf)
2018-04-27 08:53:53 +00:00
when isMainModule:
var nimbus = NimbusNode(state: Starting, ctx: newEthContext())
2020-03-25 18:00:04 +00:00
## Ctrl+C handling
proc controlCHandler() {.noconv.} =
when defined(windows):
# workaround for https://github.com/nim-lang/Nim/issues/4057
setupForeignThreadGc()
nimbus.state = Stopping
echo "\nCtrl+C pressed. Waiting for a graceful shutdown."
setControlCHook(controlCHandler)
## Show logs on stdout until we get the user's logging choice
2019-04-17 01:56:28 +00:00
discard defaultChroniclesStream.output.open(stdout)
2018-06-20 17:27:32 +00:00
## Processing command line arguments
let conf = makeConfig()
nimbus.start(conf)
nimbus.process(conf)