2020-05-22 17:04:52 +00:00
|
|
|
# beacon_chain
|
2023-01-20 14:14:37 +00:00
|
|
|
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
2020-05-22 17:04:52 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
2023-01-20 14:14:37 +00:00
|
|
|
{.push raises: [].}
|
2021-03-26 06:52:01 +00:00
|
|
|
|
2020-05-27 17:06:28 +00:00
|
|
|
# Common routines for a BeaconNode and a ValidatorClient
|
2020-05-22 17:04:52 +00:00
|
|
|
|
|
|
|
import
|
|
|
|
# Standard library
|
2022-06-09 08:50:36 +00:00
|
|
|
std/[tables, strutils, terminal, typetraits],
|
2020-05-22 17:04:52 +00:00
|
|
|
|
|
|
|
# Nimble packages
|
2022-11-30 10:47:11 +00:00
|
|
|
chronos, confutils, presto, toml_serialization, metrics,
|
2020-10-27 09:00:57 +00:00
|
|
|
chronicles, chronicles/helpers as chroniclesHelpers, chronicles/topics_registry,
|
2020-08-27 18:23:41 +00:00
|
|
|
stew/io2,
|
2020-05-22 17:04:52 +00:00
|
|
|
|
|
|
|
# Local modules
|
2023-02-16 17:25:48 +00:00
|
|
|
./spec/[helpers, keystore],
|
2021-08-12 13:08:20 +00:00
|
|
|
./spec/datatypes/base,
|
2022-11-30 10:47:11 +00:00
|
|
|
"."/[beacon_clock, beacon_node_status, conf, version]
|
2020-05-22 17:04:52 +00:00
|
|
|
|
2021-03-16 08:06:45 +00:00
|
|
|
when defined(posix):
|
|
|
|
import termios
|
|
|
|
|
2022-11-30 10:47:11 +00:00
|
|
|
declareGauge versionGauge, "Nimbus version info (as metric labels)", ["version", "commit"], name = "version"
|
|
|
|
versionGauge.set(1, labelValues=[fullVersionStr, gitRevision])
|
|
|
|
|
|
|
|
declareGauge nimVersionGauge, "Nim version info", ["version", "nim_commit"], name = "nim_version"
|
|
|
|
nimVersionGauge.set(1, labelValues=[NimVersion, getNimGitHash()])
|
|
|
|
|
2022-03-05 02:33:15 +00:00
|
|
|
export
|
|
|
|
confutils, toml_serialization, beacon_clock, beacon_node_status, conf
|
2021-10-19 14:09:26 +00:00
|
|
|
|
2021-07-13 11:15:07 +00:00
|
|
|
type
|
|
|
|
SlotStartProc*[T] = proc(node: T, wallTime: BeaconTime,
|
2022-07-13 14:43:57 +00:00
|
|
|
lastSlot: Slot): Future[bool] {.gcsafe,
|
2021-07-13 11:15:07 +00:00
|
|
|
raises: [Defect].}
|
|
|
|
|
2021-11-02 17:06:36 +00:00
|
|
|
# silly chronicles, colors is a compile-time property
|
|
|
|
proc stripAnsi(v: string): string =
|
|
|
|
var
|
|
|
|
res = newStringOfCap(v.len)
|
|
|
|
i: int
|
|
|
|
|
|
|
|
while i < v.len:
|
|
|
|
let c = v[i]
|
|
|
|
if c == '\x1b':
|
|
|
|
var
|
|
|
|
x = i + 1
|
|
|
|
found = false
|
|
|
|
|
|
|
|
while x < v.len: # look for [..m
|
|
|
|
let c2 = v[x]
|
|
|
|
if x == i + 1:
|
|
|
|
if c2 != '[':
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
if c2 in {'0'..'9'} + {';'}:
|
|
|
|
discard # keep looking
|
|
|
|
elif c2 == 'm':
|
|
|
|
i = x + 1
|
|
|
|
found = true
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
inc x
|
|
|
|
|
|
|
|
if found: # skip adding c
|
|
|
|
continue
|
|
|
|
res.add c
|
|
|
|
inc i
|
|
|
|
|
|
|
|
res
|
2020-05-22 17:04:52 +00:00
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
proc updateLogLevel*(logLevel: string) {.raises: [Defect, ValueError].} =
|
2020-11-12 10:46:02 +00:00
|
|
|
# Updates log levels (without clearing old ones)
|
|
|
|
let directives = logLevel.split(";")
|
|
|
|
try:
|
2023-01-18 17:20:28 +00:00
|
|
|
setLogLevel(parseEnum[LogLevel](directives[0].capitalizeAscii()))
|
2020-11-12 10:46:02 +00:00
|
|
|
except ValueError:
|
|
|
|
raise (ref ValueError)(msg: "Please specify one of TRACE, DEBUG, INFO, NOTICE, WARN, ERROR or FATAL")
|
|
|
|
|
|
|
|
if directives.len > 1:
|
|
|
|
for topicName, settings in parseTopicDirectives(directives[1..^1]):
|
|
|
|
if not setTopicState(topicName, settings.state, settings.logLevel):
|
|
|
|
warn "Unrecognized logging topic", topic = topicName
|
|
|
|
|
2022-05-30 08:25:27 +00:00
|
|
|
proc detectTTY*(stdoutKind: StdoutLogKind): StdoutLogKind =
|
|
|
|
if stdoutKind == StdoutLogKind.Auto:
|
|
|
|
if isatty(stdout):
|
|
|
|
# On a TTY, let's be fancy
|
|
|
|
StdoutLogKind.Colors
|
|
|
|
else:
|
|
|
|
# When there's no TTY, we output no colors because this matches what
|
|
|
|
# released binaries were doing before auto-detection was around and
|
|
|
|
# looks decent in systemd-captured journals.
|
|
|
|
StdoutLogKind.NoColors
|
|
|
|
else:
|
|
|
|
stdoutKind
|
|
|
|
|
2022-06-09 08:50:36 +00:00
|
|
|
when defaultChroniclesStream.outputs.type.arity == 2:
|
|
|
|
from std/os import splitFile
|
2022-09-26 17:14:24 +00:00
|
|
|
from "."/filepath import secureCreatePath
|
2022-06-09 08:50:36 +00:00
|
|
|
|
2021-11-02 17:06:36 +00:00
|
|
|
proc setupLogging*(
|
|
|
|
logLevel: string, stdoutKind: StdoutLogKind, logFile: Option[OutFile]) =
|
|
|
|
# In the cfg file for nimbus, we create two formats: textlines and json.
|
|
|
|
# Here, we either write those logs to an output, or not, depending on the
|
|
|
|
# given configuration.
|
|
|
|
# Arguably, if we don't use a format, chronicles should not create it.
|
|
|
|
|
|
|
|
when defaultChroniclesStream.outputs.type.arity != 2:
|
|
|
|
warn "Logging configuration options not enabled in the current build"
|
|
|
|
else:
|
|
|
|
# Naive approach where chronicles will form a string and we will discard
|
|
|
|
# it, even if it could have skipped the formatting phase
|
|
|
|
|
|
|
|
proc noOutput(logLevel: LogLevel, msg: LogOutputStr) = discard
|
|
|
|
proc writeAndFlush(f: File, msg: LogOutputStr) =
|
|
|
|
try:
|
|
|
|
f.write(msg)
|
|
|
|
f.flushFile()
|
|
|
|
except IOError as err:
|
|
|
|
logLoggingFailure(cstring(msg), err)
|
|
|
|
|
|
|
|
proc stdoutFlush(logLevel: LogLevel, msg: LogOutputStr) =
|
|
|
|
writeAndFlush(stdout, msg)
|
|
|
|
|
|
|
|
proc noColorsFlush(logLevel: LogLevel, msg: LogOutputStr) =
|
|
|
|
writeAndFlush(stdout, stripAnsi(msg))
|
|
|
|
|
|
|
|
let fileWriter =
|
|
|
|
if logFile.isSome():
|
2020-07-15 13:15:55 +00:00
|
|
|
let
|
|
|
|
logFile = logFile.get.string
|
|
|
|
logFileDir = splitFile(logFile).dir
|
2021-11-02 17:06:36 +00:00
|
|
|
lres = secureCreatePath(logFileDir)
|
|
|
|
if lres.isOk():
|
|
|
|
try:
|
|
|
|
let
|
|
|
|
f = open(logFile, fmAppend)
|
|
|
|
x = proc(logLevel: LogLevel, msg: LogOutputStr) =
|
|
|
|
writeAndFlush(f, msg) # will close when program terminates
|
|
|
|
x
|
|
|
|
except CatchableError as exc:
|
|
|
|
error "Failed to create log file", logFile, msg = exc.msg
|
|
|
|
noOutput
|
|
|
|
else:
|
2020-08-27 18:23:41 +00:00
|
|
|
error "Failed to create directory for log file",
|
|
|
|
path = logFileDir, err = ioErrorMsg(lres.error)
|
2021-11-02 17:06:36 +00:00
|
|
|
noOutput
|
2020-07-15 13:15:55 +00:00
|
|
|
else:
|
2021-11-02 17:06:36 +00:00
|
|
|
noOutput
|
|
|
|
|
|
|
|
defaultChroniclesStream.outputs[1].writer = fileWriter
|
|
|
|
|
2022-05-30 08:25:27 +00:00
|
|
|
let tmp = detectTTY(stdoutKind)
|
2021-11-02 17:06:36 +00:00
|
|
|
|
|
|
|
case tmp
|
|
|
|
of StdoutLogKind.Auto: raiseAssert "checked above"
|
|
|
|
of StdoutLogKind.Colors:
|
|
|
|
defaultChroniclesStream.outputs[0].writer = stdoutFlush
|
|
|
|
of StdoutLogKind.NoColors:
|
|
|
|
defaultChroniclesStream.outputs[0].writer = noColorsFlush
|
|
|
|
of StdoutLogKind.Json:
|
|
|
|
defaultChroniclesStream.outputs[0].writer = noOutput
|
|
|
|
|
|
|
|
let prevWriter = defaultChroniclesStream.outputs[1].writer
|
|
|
|
defaultChroniclesStream.outputs[1].writer =
|
|
|
|
proc(logLevel: LogLevel, msg: LogOutputStr) =
|
|
|
|
stdoutFlush(logLevel, msg)
|
|
|
|
prevWriter(logLevel, msg)
|
|
|
|
of StdoutLogKind.None:
|
|
|
|
defaultChroniclesStream.outputs[0].writer = noOutput
|
2020-07-15 13:15:55 +00:00
|
|
|
|
2021-11-10 09:02:18 +00:00
|
|
|
if logFile.isSome():
|
|
|
|
warn "The --log-file option is deprecated. Consider redirecting the standard output to a file instead"
|
2020-05-22 17:04:52 +00:00
|
|
|
try:
|
2020-11-12 10:46:02 +00:00
|
|
|
updateLogLevel(logLevel)
|
2020-05-22 17:04:52 +00:00
|
|
|
except ValueError as err:
|
2021-03-26 06:52:01 +00:00
|
|
|
try:
|
|
|
|
stderr.write "Invalid value for --log-level. " & err.msg
|
2021-05-28 12:51:15 +00:00
|
|
|
except IOError:
|
2021-03-26 06:52:01 +00:00
|
|
|
echo "Invalid value for --log-level. " & err.msg
|
2020-05-22 17:04:52 +00:00
|
|
|
quit 1
|
|
|
|
|
2020-05-27 17:06:28 +00:00
|
|
|
template makeBannerAndConfig*(clientId: string, ConfType: type): untyped =
|
2020-06-30 12:23:52 +00:00
|
|
|
let
|
|
|
|
version = clientId & "\p" & copyrights & "\p\p" &
|
|
|
|
"eth2 specification v" & SPEC_VERSION & "\p\p" &
|
|
|
|
nimBanner
|
2022-03-05 02:33:15 +00:00
|
|
|
|
2020-06-30 12:23:52 +00:00
|
|
|
# TODO for some reason, copyrights are printed when doing `--help`
|
2020-08-04 13:00:55 +00:00
|
|
|
{.push warning[ProveInit]: off.}
|
2022-03-09 16:28:17 +00:00
|
|
|
let config = try:
|
|
|
|
ConfType.load(
|
|
|
|
version = version, # but a short version string makes more sense...
|
|
|
|
copyrightBanner = clientId,
|
|
|
|
secondarySources = proc (config: ConfType, sources: auto) =
|
|
|
|
if config.configFile.isSome:
|
|
|
|
sources.addConfigFile(Toml, config.configFile.get)
|
|
|
|
)
|
|
|
|
except CatchableError as err:
|
|
|
|
# We need to log to stderr here, because logging hasn't been configured yet
|
|
|
|
stderr.write "Failure while loading the configuration:\n"
|
|
|
|
stderr.write err.msg
|
|
|
|
stderr.write "\n"
|
|
|
|
|
|
|
|
if err[] of ConfigurationError and
|
|
|
|
err.parent != nil and
|
|
|
|
err.parent[] of TomlFieldReadingError:
|
|
|
|
let fieldName = ((ref TomlFieldReadingError)(err.parent)).field
|
|
|
|
if fieldName in ["web3-url", "bootstrap-node",
|
|
|
|
"direct-peer", "validator-monitor-pubkey"]:
|
|
|
|
stderr.write "Since the '" & fieldName & "' option is allowed to " &
|
|
|
|
"have more than one value, please make sure to supply " &
|
|
|
|
"a properly formatted TOML array\n"
|
|
|
|
quit 1
|
2020-08-04 13:00:55 +00:00
|
|
|
{.pop.}
|
|
|
|
config
|
2020-06-05 09:57:40 +00:00
|
|
|
|
2022-07-13 14:43:57 +00:00
|
|
|
proc checkIfShouldStopAtEpoch*(scheduledSlot: Slot,
|
|
|
|
stopAtEpoch: uint64): bool =
|
2020-09-01 13:38:34 +00:00
|
|
|
# Offset backwards slightly to allow this epoch's finalization check to occur
|
|
|
|
if scheduledSlot > 3 and stopAtEpoch > 0'u64 and
|
2022-01-11 10:01:54 +00:00
|
|
|
(scheduledSlot - 3).epoch() >= stopAtEpoch:
|
2020-09-01 13:38:34 +00:00
|
|
|
info "Stopping at pre-chosen epoch",
|
|
|
|
chosenEpoch = stopAtEpoch,
|
2022-01-11 10:01:54 +00:00
|
|
|
epoch = scheduledSlot.epoch(),
|
2020-09-01 13:38:34 +00:00
|
|
|
slot = scheduledSlot
|
2022-07-13 14:43:57 +00:00
|
|
|
true
|
|
|
|
else:
|
|
|
|
false
|
2021-03-16 08:06:45 +00:00
|
|
|
|
|
|
|
proc resetStdin*() =
|
|
|
|
when defined(posix):
|
|
|
|
# restore echoing, in case it was disabled by a password prompt
|
|
|
|
let fd = stdin.getFileHandle()
|
|
|
|
var attrs: Termios
|
|
|
|
discard fd.tcGetAttr(attrs.addr)
|
|
|
|
attrs.c_lflag = attrs.c_lflag or Cflag(ECHO)
|
|
|
|
discard fd.tcSetAttr(TCSANOW, attrs.addr)
|
|
|
|
|
2023-02-16 17:25:48 +00:00
|
|
|
proc runKeystoreCachePruningLoop*(cache: KeystoreCacheRef) {.async.} =
|
|
|
|
while true:
|
|
|
|
let exitLoop =
|
|
|
|
try:
|
|
|
|
await sleepAsync(60.seconds)
|
|
|
|
false
|
|
|
|
except CatchableError:
|
|
|
|
cache.clear()
|
|
|
|
true
|
|
|
|
if exitLoop: break
|
|
|
|
cache.pruneExpiredKeys()
|
|
|
|
|
2021-07-13 11:15:07 +00:00
|
|
|
proc runSlotLoop*[T](node: T, startTime: BeaconTime,
|
|
|
|
slotProc: SlotStartProc[T]) {.async.} =
|
|
|
|
var
|
|
|
|
curSlot = startTime.slotOrZero()
|
|
|
|
nextSlot = curSlot + 1 # No earlier than GENESIS_SLOT + 1
|
2022-01-11 10:01:54 +00:00
|
|
|
timeToNextSlot = nextSlot.start_beacon_time() - startTime
|
2021-07-13 11:15:07 +00:00
|
|
|
|
|
|
|
info "Scheduling first slot action",
|
|
|
|
startTime = shortLog(startTime),
|
|
|
|
nextSlot = shortLog(nextSlot),
|
|
|
|
timeToNextSlot = shortLog(timeToNextSlot)
|
|
|
|
|
|
|
|
while true:
|
|
|
|
# Start by waiting for the time when the slot starts. Sleeping relinquishes
|
|
|
|
# control to other tasks which may or may not finish within the alotted
|
|
|
|
# time, so below, we need to be wary that the ship might have sailed
|
|
|
|
# already.
|
|
|
|
await sleepAsync(timeToNextSlot)
|
|
|
|
|
|
|
|
let
|
|
|
|
wallTime = node.beaconClock.now()
|
|
|
|
wallSlot = wallTime.slotOrZero() # Always > GENESIS!
|
|
|
|
|
|
|
|
if wallSlot < nextSlot:
|
|
|
|
# While we were sleeping, the system clock changed and time moved
|
|
|
|
# backwards!
|
|
|
|
if wallSlot + 1 < nextSlot:
|
|
|
|
# This is a critical condition where it's hard to reason about what
|
|
|
|
# to do next - we'll call the attention of the user here by shutting
|
|
|
|
# down.
|
|
|
|
fatal "System time adjusted backwards significantly - clock may be inaccurate - shutting down",
|
|
|
|
nextSlot = shortLog(nextSlot),
|
|
|
|
wallSlot = shortLog(wallSlot)
|
|
|
|
bnStatus = BeaconNodeStatus.Stopping
|
|
|
|
return
|
|
|
|
|
|
|
|
# Time moved back by a single slot - this could be a minor adjustment,
|
|
|
|
# for example when NTP does its thing after not working for a while
|
|
|
|
warn "System time adjusted backwards, rescheduling slot actions",
|
|
|
|
wallTime = shortLog(wallTime),
|
|
|
|
nextSlot = shortLog(nextSlot),
|
|
|
|
wallSlot = shortLog(wallSlot)
|
|
|
|
|
|
|
|
# cur & next slot remain the same
|
2022-01-11 10:01:54 +00:00
|
|
|
timeToNextSlot = nextSlot.start_beacon_time() - wallTime
|
2021-07-13 11:15:07 +00:00
|
|
|
continue
|
|
|
|
|
|
|
|
if wallSlot > nextSlot + SLOTS_PER_EPOCH:
|
|
|
|
# Time moved forwards by more than an epoch - either the clock was reset
|
|
|
|
# or we've been stuck in processing for a long time - either way, we will
|
|
|
|
# skip ahead so that we only process the events of the last
|
|
|
|
# SLOTS_PER_EPOCH slots
|
|
|
|
warn "Time moved forwards by more than an epoch, skipping ahead",
|
|
|
|
curSlot = shortLog(curSlot),
|
|
|
|
nextSlot = shortLog(nextSlot),
|
|
|
|
wallSlot = shortLog(wallSlot)
|
|
|
|
|
|
|
|
curSlot = wallSlot - SLOTS_PER_EPOCH
|
|
|
|
|
|
|
|
elif wallSlot > nextSlot:
|
|
|
|
notice "Missed expected slot start, catching up",
|
2022-01-11 10:01:54 +00:00
|
|
|
delay = shortLog(wallTime - nextSlot.start_beacon_time()),
|
2021-07-13 11:15:07 +00:00
|
|
|
curSlot = shortLog(curSlot),
|
|
|
|
nextSlot = shortLog(curSlot)
|
|
|
|
|
2022-07-13 14:43:57 +00:00
|
|
|
let breakLoop = await slotProc(node, wallTime, curSlot)
|
|
|
|
if breakLoop:
|
|
|
|
break
|
2021-07-13 11:15:07 +00:00
|
|
|
|
|
|
|
curSlot = wallSlot
|
|
|
|
nextSlot = wallSlot + 1
|
2022-01-11 10:01:54 +00:00
|
|
|
timeToNextSlot = nextSlot.start_beacon_time() - node.beaconClock.now()
|
2022-08-19 10:30:07 +00:00
|
|
|
|
|
|
|
proc init*(T: type RestServerRef,
|
|
|
|
ip: ValidIpAddress,
|
|
|
|
port: Port,
|
|
|
|
allowedOrigin: Option[string],
|
|
|
|
validateFn: PatternCallback,
|
|
|
|
config: AnyConf): T =
|
|
|
|
let address = initTAddress(ip, port)
|
|
|
|
let serverFlags = {HttpServerFlags.QueryCommaSeparatedArray,
|
|
|
|
HttpServerFlags.NotifyDisconnect}
|
|
|
|
# We increase default timeout to help validator clients who poll our server
|
|
|
|
# at least once per slot (12.seconds).
|
|
|
|
let
|
|
|
|
headersTimeout =
|
|
|
|
if config.restRequestTimeout == 0:
|
|
|
|
chronos.InfiniteDuration
|
|
|
|
else:
|
|
|
|
seconds(int64(config.restRequestTimeout))
|
|
|
|
maxHeadersSize = config.restMaxRequestHeadersSize * 1024
|
|
|
|
maxRequestBodySize = config.restMaxRequestBodySize * 1024
|
|
|
|
|
|
|
|
let res = try:
|
|
|
|
RestServerRef.new(RestRouter.init(validateFn),
|
|
|
|
address, serverFlags = serverFlags,
|
|
|
|
httpHeadersTimeout = headersTimeout,
|
|
|
|
maxHeadersSize = maxHeadersSize,
|
|
|
|
maxRequestBodySize = maxRequestBodySize)
|
|
|
|
except CatchableError as err:
|
|
|
|
notice "Rest server could not be started", address = $address,
|
|
|
|
reason = err.msg
|
|
|
|
return nil
|
|
|
|
|
|
|
|
if res.isErr():
|
|
|
|
notice "Rest server could not be started", address = $address,
|
|
|
|
reason = res.error()
|
|
|
|
nil
|
|
|
|
else:
|
|
|
|
notice "Starting REST HTTP server",
|
|
|
|
url = "http://" & $ip & ":" & $port & "/"
|
|
|
|
|
|
|
|
res.get()
|
|
|
|
|
|
|
|
type
|
|
|
|
KeymanagerInitResult* = object
|
|
|
|
server*: RestServerRef
|
|
|
|
token*: string
|
|
|
|
|
|
|
|
proc initKeymanagerServer*(
|
|
|
|
config: AnyConf,
|
|
|
|
existingRestServer: RestServerRef = nil): KeymanagerInitResult
|
|
|
|
{.raises: [Defect].} =
|
|
|
|
|
|
|
|
var token: string
|
|
|
|
let keymanagerServer = if config.keymanagerEnabled:
|
|
|
|
if config.keymanagerTokenFile.isNone:
|
|
|
|
echo "To enable the Keymanager API, you must also specify " &
|
|
|
|
"the --keymanager-token-file option."
|
|
|
|
quit 1
|
|
|
|
|
|
|
|
let
|
|
|
|
tokenFilePath = config.keymanagerTokenFile.get.string
|
|
|
|
tokenFileReadRes = readAllChars(tokenFilePath)
|
|
|
|
|
|
|
|
if tokenFileReadRes.isErr:
|
|
|
|
fatal "Failed to read the keymanager token file",
|
|
|
|
error = $tokenFileReadRes.error
|
|
|
|
quit 1
|
|
|
|
|
|
|
|
token = tokenFileReadRes.value.strip
|
|
|
|
if token.len == 0:
|
|
|
|
fatal "The keymanager token should not be empty", tokenFilePath
|
|
|
|
quit 1
|
|
|
|
|
|
|
|
when config is BeaconNodeConf:
|
|
|
|
if existingRestServer != nil and
|
|
|
|
config.restAddress == config.keymanagerAddress and
|
|
|
|
config.restPort == config.keymanagerPort:
|
|
|
|
existingRestServer
|
|
|
|
else:
|
|
|
|
RestServerRef.init(config.keymanagerAddress, config.keymanagerPort,
|
|
|
|
config.keymanagerAllowedOrigin,
|
|
|
|
validateKeymanagerApiQueries,
|
|
|
|
config)
|
|
|
|
else:
|
|
|
|
RestServerRef.init(config.keymanagerAddress, config.keymanagerPort,
|
|
|
|
config.keymanagerAllowedOrigin,
|
|
|
|
validateKeymanagerApiQueries,
|
|
|
|
config)
|
|
|
|
else:
|
|
|
|
nil
|
|
|
|
|
|
|
|
KeymanagerInitResult(server: keymanagerServer, token: token)
|
2022-12-09 16:05:55 +00:00
|
|
|
|
|
|
|
proc quitDoppelganger*() =
|
|
|
|
# Avoid colliding with
|
|
|
|
# https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Process%20Exit%20Codes
|
|
|
|
# This error code is used to permanently shut down validators
|
|
|
|
fatal "Doppelganger detection triggered! It appears a validator loaded into " &
|
|
|
|
"this process is already live on the network - the validator is at high " &
|
|
|
|
"risk of being slashed due to the same keys being used in two setups. " &
|
|
|
|
"See https://nimbus.guide/doppelganger-detection.html for more information!"
|
|
|
|
|
|
|
|
const QuitDoppelganger = 129
|
|
|
|
quit QuitDoppelganger
|