Logging and startup improvements (#3038)

* Logging and startup improvements

Color support for released binaries!

* startup scripts no longer log to file by default - this only affects
source builds - released binaries don't support file logging
* add --log-stdout option to control logging to stdout (colors, json)
* detect tty:s vs redirected logs and log accordingly
* add option to disable log colors at runtime
* simplify several "common" logs, showing the most important information
earlier and more clearly
* remove line numbers / file information / tid - these take up space and
are of little use to end users
  * still enabled in debug builds and tools
* remove `testnet_servers_image` compile-time option
* server images, released binaries and compile-from-source now offer
the same behaviour and features
* fixes https://github.com/status-im/nimbus-eth2/issues/2326
* fixes https://github.com/status-im/nimbus-eth2/issues/1794
* remove instanteneous block speed from sync message, keeping only
average

before:

```
INF 2021-10-28 16:45:59.000+02:00 Slot start                                 topics="beacnde" tid=386429 file=nimbus_beacon_node.nim:884 lastSlot=2384027 wallSlot=2384028 delay=461us84ns peers=0 head=75a10ee5:3348 headEpoch=104 finalized=cd6804ba:3264 finalizedEpoch=102 sync="wwwwwwwwww:0:0.0000:0.0000:00h00m (3348)"
INF 2021-10-28 16:45:59.046+02:00 Slot end                                   topics="beacnde" tid=386429 file=nimbus_beacon_node.nim:821 slot=2384028 nextSlot=2384029 head=75a10ee5:3348 headEpoch=104 finalizedHead=cd6804ba:3264 finalizedEpoch=102 nextAttestationSlot=-1 nextProposalSlot=-1 nextActionWait=n/a
```

after:

```
INF 2021-10-28 22:43:23.033+02:00 Slot start                                 topics="beacnde" slot=2385815 epoch=74556 sync="DDPDDPUDDD:10:5.2258:01h19m (2361088)" peers=37 head=eacd2dae:2361096 finalized=73782:a4751487 delay=33ms687us715ns
INF 2021-10-28 22:43:23.291+02:00 Slot end                                   topics="beacnde" slot=2385815 nextActionWait=n/a nextAttestationSlot=-1 nextProposalSlot=-1 head=eacd2dae:2361096
```

* fix comment

* documentation updates

* mention `--log-file` may be deprecated in the future
* update various docs
This commit is contained in:
Jacek Sieka 2021-11-02 18:06:36 +01:00 committed by GitHub
parent 56c6d6b583
commit 233d756518
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
26 changed files with 286 additions and 157 deletions

View File

@ -33,7 +33,6 @@ build_script:
test_script: test_script:
- mingw32-make -j2 ARCH_OVERRIDE=%PLATFORM% LOG_LEVEL=TRACE - mingw32-make -j2 ARCH_OVERRIDE=%PLATFORM% LOG_LEVEL=TRACE
- mingw32-make -j2 ARCH_OVERRIDE=%PLATFORM% LOG_LEVEL=TRACE NIMFLAGS="-d:testnet_servers_image" nimbus_beacon_node
- mingw32-make -j2 ARCH_OVERRIDE=%PLATFORM% DISABLE_TEST_FIXTURES_SCRIPT=1 test - mingw32-make -j2 ARCH_OVERRIDE=%PLATFORM% DISABLE_TEST_FIXTURES_SCRIPT=1 test
deploy: off deploy: off

View File

@ -274,7 +274,7 @@ jobs:
if [[ "${{ runner.os }}" == "macOS" ]]; then if [[ "${{ runner.os }}" == "macOS" ]]; then
ulimit -n 1024 ulimit -n 1024
fi fi
make -j$ncpu ARCH_OVERRIDE=$PLATFORM LOG_LEVEL=TRACE NIMFLAGS="-d:testnet_servers_image" nimbus_beacon_node nimbus_validator_client make -j$ncpu ARCH_OVERRIDE=$PLATFORM LOG_LEVEL=TRACE nimbus_beacon_node nimbus_validator_client
- name: Run nimbus-eth2 tests - name: Run nimbus-eth2 tests
if: matrix.target.TEST_KIND == 'unit-tests' if: matrix.target.TEST_KIND == 'unit-tests'

View File

@ -56,5 +56,4 @@ script:
# Building Nim-1.0.4 takes up to 10 minutes on Travis - the time limit after which jobs are cancelled for having no output # Building Nim-1.0.4 takes up to 10 minutes on Travis - the time limit after which jobs are cancelled for having no output
- make -j${NPROC} NIMFLAGS="--parallelBuild:2" V=1 update # to allow a newer Nim version to be detected - make -j${NPROC} NIMFLAGS="--parallelBuild:2" V=1 update # to allow a newer Nim version to be detected
- make -j${NPROC} NIMFLAGS="--parallelBuild:2" LOG_LEVEL=TRACE - make -j${NPROC} NIMFLAGS="--parallelBuild:2" LOG_LEVEL=TRACE
- make -j${NPROC} NIMFLAGS="--parallelBuild:2 -d:testnet_servers_image" LOG_LEVEL=TRACE nimbus_beacon_node
- make -j${NPROC} NIMFLAGS="--parallelBuild:2" DISABLE_TEST_FIXTURES_SCRIPT=1 test - make -j${NPROC} NIMFLAGS="--parallelBuild:2" DISABLE_TEST_FIXTURES_SCRIPT=1 test

2
Jenkinsfile vendored
View File

@ -36,7 +36,7 @@ def runStages() {
sh """#!/bin/bash sh """#!/bin/bash
set -e set -e
make -j${env.NPROC} make -j${env.NPROC}
make -j${env.NPROC} LOG_LEVEL=TRACE NIMFLAGS='-d:testnet_servers_image' nimbus_beacon_node make -j${env.NPROC} LOG_LEVEL=TRACE
""" """
} }

View File

@ -64,10 +64,6 @@ jobs:
git config --global core.autocrlf false git config --global core.autocrlf false
mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} CI_CACHE=NimBinaries update mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} CI_CACHE=NimBinaries update
mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} LOG_LEVEL=TRACE mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} LOG_LEVEL=TRACE
if [[ $PLATFORM == "x64" ]]; then
# everything builds more slowly on 32-bit, since there's no libbacktrace support
mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} LOG_LEVEL=TRACE NIMFLAGS="-d:testnet_servers_image" nimbus_beacon_node
fi
file build/nimbus_beacon_node file build/nimbus_beacon_node
# fail fast # fail fast
export NIMTEST_ABORT_ON_ERROR=1 export NIMTEST_ABORT_ON_ERROR=1

View File

@ -70,6 +70,13 @@ type
v2 v2
both both
StdoutLogKind* {.pure.} = enum
Auto = "auto"
Colors = "colors"
NoColors = "nocolors"
Json = "json"
None = "none"
SlashProtCmd* = enum SlashProtCmd* = enum
`import` = "Import a EIP-3076 slashing protection interchange file" `import` = "Import a EIP-3076 slashing protection interchange file"
`export` = "Export a EIP-3076 slashing protection interchange file" `export` = "Export a EIP-3076 slashing protection interchange file"
@ -82,6 +89,12 @@ type
defaultValue: "INFO" defaultValue: "INFO"
name: "log-level" }: string name: "log-level" }: string
logStdout* {.
desc: "Specifies what kind of logs should be written to stdout (auto, colors, nocolors, json)"
defaultValueDesc: "auto"
defaultValue: StdoutLogKind.Auto
name: "log-stdout" }: StdoutLogKind
logFile* {. logFile* {.
desc: "Specifies a path for the written Json log file" desc: "Specifies a path for the written Json log file"
name: "log-file" }: Option[OutFile] name: "log-file" }: Option[OutFile]
@ -518,6 +531,12 @@ type
defaultValue: "INFO" defaultValue: "INFO"
name: "log-level" }: string name: "log-level" }: string
logStdout* {.
desc: "Specifies what kind of logs should be written to stdout (auto, colors, nocolors, json)"
defaultValueDesc: "auto"
defaultValue: StdoutLogKind.Auto
name: "log-stdout" }: StdoutLogKind
logFile* {. logFile* {.
desc: "Specifies a path for the written Json log file" desc: "Specifies a path for the written Json log file"
name: "log-file" }: Option[OutFile] name: "log-file" }: Option[OutFile]

View File

@ -1220,9 +1220,8 @@ proc updateHead*(
beacon_reorgs_total.inc() beacon_reorgs_total.inc()
else: else:
debug "Updated head block", debug "Updated head block",
head = shortLog(dag.headState.blck),
stateRoot = shortLog(getStateRoot(dag.headState.data)), stateRoot = shortLog(getStateRoot(dag.headState.data)),
headBlock = shortLog(dag.headState.blck),
stateSlot = shortLog(getStateField(dag.headState.data, slot)),
justified = shortLog(getStateField( justified = shortLog(getStateField(
dag.headState.data, current_justified_checkpoint)), dag.headState.data, current_justified_checkpoint)),
finalized = shortLog(getStateField( finalized = shortLog(getStateField(
@ -1285,9 +1284,13 @@ proc updateHead*(
beacon_current_active_validators.set(number_of_active_validators) beacon_current_active_validators.set(number_of_active_validators)
if finalizedHead != dag.finalizedHead: if finalizedHead != dag.finalizedHead:
notice "Reached new finalization checkpoint", info "Reached new finalization checkpoint",
newFinalizedHead = shortLog(finalizedHead), head = shortLog(dag.headState.blck),
oldFinalizedHead = shortLog(dag.finalizedHead) stateRoot = shortLog(getStateRoot(dag.headState.data)),
justified = shortLog(getStateField(
dag.headState.data, current_justified_checkpoint)),
finalized = shortLog(getStateField(
dag.headState.data, finalized_checkpoint))
dag.finalizedHead = finalizedHead dag.finalizedHead = finalizedHead

View File

@ -836,19 +836,14 @@ proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} =
info "Slot end", info "Slot end",
slot = shortLog(slot), slot = shortLog(slot),
nextSlot = shortLog(slot + 1),
head = shortLog(node.dag.head),
headEpoch = shortLog(node.dag.head.slot.compute_epoch_at_slot()),
finalizedHead = shortLog(node.dag.finalizedHead.blck),
finalizedEpoch =
shortLog(node.dag.finalizedHead.blck.slot.compute_epoch_at_slot()),
nextAttestationSlot = displayInt64(nextAttestationSlot),
nextProposalSlot = displayInt64(nextProposalSlot),
nextActionWait = nextActionWait =
if nextAttestationSlot == FAR_FUTURE_SLOT: if nextAttestationSlot == FAR_FUTURE_SLOT:
"n/a" "n/a"
else: else:
shortLog(nextActionWaitTime) shortLog(nextActionWaitTime),
nextAttestationSlot = displayInt64(nextAttestationSlot),
nextProposalSlot = displayInt64(nextProposalSlot),
head = shortLog(node.dag.head)
if nextAttestationSlot != FAR_FUTURE_SLOT: if nextAttestationSlot != FAR_FUTURE_SLOT:
next_action_wait.set(nextActionWaitTime.toFloatSeconds) next_action_wait.set(nextActionWaitTime.toFloatSeconds)
@ -898,17 +893,16 @@ proc onSlotStart(
delay = wallTime - expectedSlot.toBeaconTime() delay = wallTime - expectedSlot.toBeaconTime()
info "Slot start", info "Slot start",
lastSlot = shortLog(lastSlot), slot = shortLog(wallSlot),
wallSlot = shortLog(wallSlot), epoch = shortLog(wallSlot.epoch),
delay = shortLog(delay),
peers = len(node.network.peerPool),
head = shortLog(node.dag.head),
headEpoch = shortLog(node.dag.head.slot.compute_epoch_at_slot()),
finalized = shortLog(node.dag.finalizedHead.blck),
finalizedEpoch = shortLog(finalizedEpoch),
sync = sync =
if node.syncManager.inProgress: node.syncManager.syncStatus if node.syncManager.inProgress: node.syncManager.syncStatus
else: "synced" else: "synced",
peers = len(node.network.peerPool),
head = shortLog(node.dag.head),
finalized = shortLog(getStateField(
node.dag.headState.data, finalized_checkpoint)),
delay = shortLog(delay)
# Check before any re-scheduling of onSlotStart() # Check before any re-scheduling of onSlotStart()
checkIfShouldStopAtEpoch(wallSlot, node.config.stopAtEpoch) checkIfShouldStopAtEpoch(wallSlot, node.config.stopAtEpoch)
@ -1166,6 +1160,10 @@ proc start(node: BeaconNode) {.raises: [Defect, CatchableError].} =
timeSinceFinalization = timeSinceFinalization =
node.beaconClock.now() - finalizedHead.slot.toBeaconTime(), node.beaconClock.now() - finalizedHead.slot.toBeaconTime(),
head = shortLog(head), head = shortLog(head),
justified = shortLog(getStateField(
node.dag.headState.data, current_justified_checkpoint)),
finalized = shortLog(getStateField(
node.dag.headState.data, finalized_checkpoint)),
finalizedHead = shortLog(finalizedHead), finalizedHead = shortLog(finalizedHead),
SLOTS_PER_EPOCH, SLOTS_PER_EPOCH,
SECONDS_PER_SLOT, SECONDS_PER_SLOT,
@ -1292,14 +1290,16 @@ proc initStatusBar(node: BeaconNode) {.raises: [Defect, ValueError].} =
node.config.statusBarContents, node.config.statusBarContents,
dataResolver) dataResolver)
when compiles(defaultChroniclesStream.output.writer): when compiles(defaultChroniclesStream.outputs[0].writer):
defaultChroniclesStream.output.writer = let tmp = defaultChroniclesStream.outputs[0].writer
defaultChroniclesStream.outputs[0].writer =
proc (logLevel: LogLevel, msg: LogOutputStr) {.raises: [Defect].} = proc (logLevel: LogLevel, msg: LogOutputStr) {.raises: [Defect].} =
try: try:
# p.hidePrompt # p.hidePrompt
erase statusBar erase statusBar
# p.writeLine msg # p.writeLine msg
stdout.write msg tmp(logLevel, msg)
render statusBar render statusBar
# p.showPrompt # p.showPrompt
except Exception as e: # render raises Exception except Exception as e: # render raises Exception
@ -1827,14 +1827,12 @@ programMain:
var var
config = makeBannerAndConfig(clientId, BeaconNodeConf) config = makeBannerAndConfig(clientId, BeaconNodeConf)
setupStdoutLogging(config.logLevel)
if not(checkAndCreateDataDir(string(config.dataDir))): if not(checkAndCreateDataDir(string(config.dataDir))):
# We are unable to access/create data folder or data folder's # We are unable to access/create data folder or data folder's
# permissions are insecure. # permissions are insecure.
quit QuitFailure quit QuitFailure
setupLogging(config.logLevel, config.logFile) setupLogging(config.logLevel, config.logStdout, config.logFile)
## This Ctrl+C handler exits the program in non-graceful way. ## This Ctrl+C handler exits the program in non-graceful way.
## It's responsible for handling Ctrl+C in sub-commands such ## It's responsible for handling Ctrl+C in sub-commands such

View File

@ -1,10 +1,10 @@
-d:"chronicles_sinks=textlines,json[file]" -d:"chronicles_sinks=textlines[dynamic],json[dynamic]"
-d:"chronicles_runtime_filtering=on" -d:"chronicles_runtime_filtering=on"
-d:"chronicles_default_output_device=dynamic" -d:"chronicles_disable_thread_id"
@if release:
-d:"chronicles_line_numbers:0"
@end
# Use only `secp256k1` public key cryptography as an identity in LibP2P. # Use only `secp256k1` public key cryptography as an identity in LibP2P.
-d:"libp2p_pki_schemes=secp256k1" -d:"libp2p_pki_schemes=secp256k1"
@if testnet_servers_image:
-d:"chronicles_sinks=json"
@end

View File

@ -11,7 +11,7 @@
import import
# Standard library # Standard library
std/[os, tables, strutils, typetraits], std/[os, tables, strutils, terminal, typetraits],
# Nimble packages # Nimble packages
chronos, confutils/defs, chronos, confutils/defs,
@ -21,7 +21,7 @@ import
# Local modules # Local modules
./spec/[helpers], ./spec/[helpers],
./spec/datatypes/base, ./spec/datatypes/base,
"."/[beacon_clock, filepath, beacon_node_status] "."/[beacon_clock, conf, filepath, beacon_node_status]
when defined(posix): when defined(posix):
import termios import termios
@ -33,15 +33,41 @@ type
lastSlot: Slot): Future[void] {.gcsafe, lastSlot: Slot): Future[void] {.gcsafe,
raises: [Defect].} raises: [Defect].}
proc setupStdoutLogging*(logLevel: string) = # silly chronicles, colors is a compile-time property
when compiles(defaultChroniclesStream.output.writer): proc stripAnsi(v: string): string =
defaultChroniclesStream.outputs[0].writer = var
proc (logLevel: LogLevel, msg: LogOutputStr) {.gcsafe, raises: [Defect].} = res = newStringOfCap(v.len)
try: i: int
stdout.write(msg)
stdout.flushFile() while i < v.len:
except IOError as err: let c = v[i]
logLoggingFailure(cstring(msg), err) if c == '\x1b':
var
x = i + 1
found = false
while x < v.len: # look for [..m
let c2 = v[x]
if x == i + 1:
if c2 != '[':
break
else:
if c2 in {'0'..'9'} + {';'}:
discard # keep looking
elif c2 == 'm':
i = x + 1
found = true
break
else:
break
inc x
if found: # skip adding c
continue
res.add c
inc i
res
proc updateLogLevel*(logLevel: string) {.raises: [Defect, ValueError].} = proc updateLogLevel*(logLevel: string) {.raises: [Defect, ValueError].} =
# Updates log levels (without clearing old ones) # Updates log levels (without clearing old ones)
@ -56,27 +82,90 @@ proc updateLogLevel*(logLevel: string) {.raises: [Defect, ValueError].} =
if not setTopicState(topicName, settings.state, settings.logLevel): if not setTopicState(topicName, settings.state, settings.logLevel):
warn "Unrecognized logging topic", topic = topicName warn "Unrecognized logging topic", topic = topicName
proc setupLogging*(logLevel: string, logFile: Option[OutFile]) = proc setupLogging*(
if logFile.isSome: logLevel: string, stdoutKind: StdoutLogKind, logFile: Option[OutFile]) =
when defaultChroniclesStream.outputs.type.arity > 1: # In the cfg file for nimbus, we create two formats: textlines and json.
block openLogFile: # Here, we either write those logs to an output, or not, depending on the
# given configuration.
# Arguably, if we don't use a format, chronicles should not create it.
when defaultChroniclesStream.outputs.type.arity != 2:
warn "Logging configuration options not enabled in the current build"
else:
# Naive approach where chronicles will form a string and we will discard
# it, even if it could have skipped the formatting phase
proc noOutput(logLevel: LogLevel, msg: LogOutputStr) = discard
proc writeAndFlush(f: File, msg: LogOutputStr) =
try:
f.write(msg)
f.flushFile()
except IOError as err:
logLoggingFailure(cstring(msg), err)
proc stdoutFlush(logLevel: LogLevel, msg: LogOutputStr) =
writeAndFlush(stdout, msg)
proc noColorsFlush(logLevel: LogLevel, msg: LogOutputStr) =
writeAndFlush(stdout, stripAnsi(msg))
let fileWriter =
if logFile.isSome():
let let
logFile = logFile.get.string logFile = logFile.get.string
logFileDir = splitFile(logFile).dir logFileDir = splitFile(logFile).dir
let lres = secureCreatePath(logFileDir) lres = secureCreatePath(logFileDir)
if lres.isErr(): if lres.isOk():
try:
let
f = open(logFile, fmAppend)
x = proc(logLevel: LogLevel, msg: LogOutputStr) =
writeAndFlush(f, msg) # will close when program terminates
x
except CatchableError as exc:
error "Failed to create log file", logFile, msg = exc.msg
noOutput
else:
error "Failed to create directory for log file", error "Failed to create directory for log file",
path = logFileDir, err = ioErrorMsg(lres.error) path = logFileDir, err = ioErrorMsg(lres.error)
break openLogFile noOutput
try:
if not defaultChroniclesStream.outputs[1].open(logFile):
error "Failed to create log file", logFile
except CatchableError as exc:
# TODO why is there both exception and bool?
error "Failed to create log file", logFile, msg = exc.msg
else: else:
warn "The --log-file option is not active in the current build" noOutput
defaultChroniclesStream.outputs[1].writer = fileWriter
let tmp =
if stdoutKind == StdoutLogKind.Auto:
if logFile.isSome():
# No stdout logging by default, when file logging is enabled
StdoutLogKind.None
elif isatty(stdout):
# On a TTY, let's be fancy
StdoutLogKind.Colors
else:
# When there's no TTY, we output no colors because this matches what
# released binaries were doing before auto-detection was around and
# looks decent in systemd-captured journals.
StdoutLogKind.NoColors
else:
stdoutKind
case tmp
of StdoutLogKind.Auto: raiseAssert "checked above"
of StdoutLogKind.Colors:
defaultChroniclesStream.outputs[0].writer = stdoutFlush
of StdoutLogKind.NoColors:
defaultChroniclesStream.outputs[0].writer = noColorsFlush
of StdoutLogKind.Json:
defaultChroniclesStream.outputs[0].writer = noOutput
let prevWriter = defaultChroniclesStream.outputs[1].writer
defaultChroniclesStream.outputs[1].writer =
proc(logLevel: LogLevel, msg: LogOutputStr) =
stdoutFlush(logLevel, msg)
prevWriter(logLevel, msg)
of StdoutLogKind.None:
defaultChroniclesStream.outputs[0].writer = noOutput
try: try:
updateLogLevel(logLevel) updateLogLevel(logLevel)

View File

@ -146,11 +146,10 @@ proc onSlotStart(vc: ValidatorClientRef, wallTime: BeaconTime,
checkIfShouldStopAtEpoch(wallSlot.slot, vc.config.stopAtEpoch) checkIfShouldStopAtEpoch(wallSlot.slot, vc.config.stopAtEpoch)
info "Slot start", info "Slot start",
lastSlot = shortLog(lastSlot), slot = shortLog(wallSlot.slot),
wallSlot = shortLog(wallSlot.slot),
delay = shortLog(delay),
attestationIn = vc.getDurationToNextAttestation(wallSlot.slot), attestationIn = vc.getDurationToNextAttestation(wallSlot.slot),
blockIn = vc.getDurationToNextBlock(wallSlot.slot) blockIn = vc.getDurationToNextBlock(wallSlot.slot),
delay = shortLog(delay)
proc asyncRun*(vc: ValidatorClientRef) {.async.} = proc asyncRun*(vc: ValidatorClientRef) {.async.} =
vc.fallbackService.start() vc.fallbackService.start()
@ -164,8 +163,7 @@ programMain:
let config = makeBannerAndConfig("Nimbus validator client " & fullVersionStr, let config = makeBannerAndConfig("Nimbus validator client " & fullVersionStr,
ValidatorClientConf) ValidatorClientConf)
setupStdoutLogging(config.logLevel) setupLogging(config.logLevel, config.logStdout, config.logFile)
setupLogging(config.logLevel, config.logFile)
case config.cmd case config.cmd
of VCNoCommand: of VCNoCommand:

View File

@ -1,11 +1,10 @@
-d:"chronicles_sinks=textlines,json[file]" -d:"chronicles_sinks=textlines[dynamic],json[dynamic]"
-d:"chronicles_runtime_filtering=on" -d:"chronicles_runtime_filtering=on"
-d:"chronicles_default_output_device=dynamic" -d:"chronicles_disable_thread_id"
@if release:
-d:"chronicles_line_numbers:0"
@end
# Use only `secp256k1` public key cryptography as an identity in LibP2P. # Use only `secp256k1` public key cryptography as an identity in LibP2P.
-d:"libp2p_pki_schemes=secp256k1" -d:"libp2p_pki_schemes=secp256k1"
@if testnet_servers_image:
-d:"chronicles_sinks=json"
@end

View File

@ -1151,7 +1151,6 @@ proc syncLoop[A, B](man: SyncManager[A, B]) {.async.} =
# Update status string # Update status string
man.syncStatus = map & ":" & $pending & ":" & man.syncStatus = map & ":" & $pending & ":" &
man.insSyncSpeed.formatBiggestFloat(ffDecimal, 4) & ":" &
man.avgSyncSpeed.formatBiggestFloat(ffDecimal, 4) & ":" & man.avgSyncSpeed.formatBiggestFloat(ffDecimal, 4) & ":" &
man.timeLeft.toTimeLeftString() & man.timeLeft.toTimeLeftString() &
" (" & $man.queue.outSlot & ")" " (" & $man.queue.outSlot & ")"

View File

@ -71,7 +71,7 @@ switch("passL", "-fno-omit-frame-pointer")
--excessiveStackTrace:on --excessiveStackTrace:on
# enable metric collection # enable metric collection
--define:metrics --define:metrics
--define:chronicles_line_numbers --define:chronicles_line_numbers # These are disabled for release binaries
# for heap-usage-by-instance-type metrics and object base-type strings # for heap-usage-by-instance-type metrics and object base-type strings
--define:nimTypeNames --define:nimTypeNames

View File

@ -20,9 +20,8 @@ PLATFORM="${1}"
BINARIES="nimbus_beacon_node nimbus_signing_process" BINARIES="nimbus_beacon_node nimbus_signing_process"
#- we need to build everything against libraries available inside this container, including the Nim compiler #- we need to build everything against libraries available inside this container, including the Nim compiler
#- we disable the log file and log colours; the user only has to worry about logging stdout now
make clean make clean
NIMFLAGS_COMMON="-d:disableMarchNative -d:chronicles_sinks=textlines -d:chronicles_colors=none --gcc.options.debug:'-g1' --clang.options.debug:'-gline-tables-only'" NIMFLAGS_COMMON="-d:disableMarchNative --gcc.options.debug:'-g1' --clang.options.debug:'-gline-tables-only'"
if [[ "${PLATFORM}" == "Windows_amd64" ]]; then if [[ "${PLATFORM}" == "Windows_amd64" ]]; then
# Cross-compilation using the MXE distribution of Mingw-w64 # Cross-compilation using the MXE distribution of Mingw-w64
export PATH="/usr/lib/mxe/usr/bin:${PATH}" export PATH="/usr/lib/mxe/usr/bin:${PATH}"

View File

@ -33,7 +33,7 @@
- [Set up log rotation](./log-rotate.md) - [Set up log rotation](./log-rotate.md)
- [Verify the integrity of Nimbus](./checksums.md) - [Verify the integrity of Nimbus](./checksums.md)
- [Back up your database](./database-backup.md) - [Back up your database](./database-backup.md)
- [Change log level](./log-levels.md) - [Set up logging](./logging.md)
- [Set up email notifications](./email-notifications.md) - [Set up email notifications](./email-notifications.md)
- [Graffiti the blockchain](./graffiti.md) - [Graffiti the blockchain](./graffiti.md)
- [Optimise for profitability](./profits.md) - [Optimise for profitability](./profits.md)
@ -64,3 +64,4 @@
- [Prater: What you should know](./prater.md) - [Prater: What you should know](./prater.md)
- [Security Audit](./audit.md) - [Security Audit](./audit.md)
- [FAQ](./faq.md) - [FAQ](./faq.md)

View File

@ -13,9 +13,3 @@ Click on the `tar.gz` file that corresponds to your OS and architecture, unpack
We've designed the build process to be reproducible. In practice, this means that anyone can verify that these exact binaries were produced from the corresponding source code commits. For more about the philosophy and importance of this feature see [reproducible-builds.org](https://reproducible-builds.org/). We've designed the build process to be reproducible. In practice, this means that anyone can verify that these exact binaries were produced from the corresponding source code commits. For more about the philosophy and importance of this feature see [reproducible-builds.org](https://reproducible-builds.org/).
For instructions on how to reproduce those binaries, see "README.md" inside the archive. For instructions on how to reproduce those binaries, see "README.md" inside the archive.
> **N.B.** These binaries are meant to run as a service. As such, they do not
support the `log-file` [option](./options.md) - duplicating stdout to a
log file would complicate things. We recommend capturing
their output (stdout and stderr) with a [log rotation
helper](./log-rotate.md) instead.

View File

@ -8,9 +8,6 @@ These images are simply the contents of [release tarballs](./binaries.md) inside
The unpacked archive is in `/home/user/nimbus-eth2` which is also the default *WORKDIR*. The default *ENTRYPOINT* is the binary itself: `/home/user/nimbus-eth2/build/nimbus\_beacon\_node` The unpacked archive is in `/home/user/nimbus-eth2` which is also the default *WORKDIR*. The default *ENTRYPOINT* is the binary itself: `/home/user/nimbus-eth2/build/nimbus\_beacon\_node`
> **N.B.** Docker images do not support the `log-file` [option](./options.md) - The way we recommend setting up docker is to capture its console output with a [log rotation helper](./log-rotate.md).
## Usage ## Usage
You need to create an external data directory and mount it as a volume inside the container, with mounting point: `/home/user/nimbus-eth2/build/data` You need to create an external data directory and mount it as a volume inside the container, with mounting point: `/home/user/nimbus-eth2/build/data`

View File

@ -1,26 +1 @@
# Change log level This information has moved to [logging](./logging.md).
You can customise Nimbus' verbosity with the `--log-level` option.
For example:
```
./run-mainnet-beacon-node.sh --log-level=WARN
```
The default value is `INFO`.
Possible values (in order of decreasing verbosity) are:
```
TRACE
DEBUG
INFO
NOTICE
WARN
ERROR
FATAL
NONE
```

View File

@ -1,10 +1,10 @@
# Log rotation # Log rotation
Nimbus logs are written to stdout, and optionally to a file. Writing to a file for a long-running process may lead to difficulties when the file grows large. This is typically solved with a *log rotator*. A log rotator is responsible for switching the written-to file, as well as compressing and removing old logs. Nimbus logs are written to `stdout`, and can be redirected to a file. Writing to a file for a long-running process may lead to difficulties when the file grows large. This is typically solved with a *log rotator*. A log rotator is responsible for switching the written-to file, as well as compressing and removing old logs.
## Using `logrotate` ## Using `logrotate`
[logrotate](https://github.com/logrotate/logrotate) provides log rotation and compression. The corresponding package will install its Cron hooks (or Systemd timer) -- all you have to do is add a configuration file for Nimbus-eth2 in "/etc/logrotate.d/nimbus-eth2": [logrotate](https://github.com/logrotate/logrotate) provides log rotation and compression. The corresponding package will install its Cron hooks (or Systemd timer) -- all you have to do is add a configuration file for Nimbus in "/etc/logrotate.d/nimbus-eth2":
```text ```text
/var/log/nimbus-eth2/*.log { /var/log/nimbus-eth2/*.log {
@ -14,7 +14,7 @@ Nimbus logs are written to stdout, and optionally to a file. Writing to a file f
} }
``` ```
The above assumes you've configured Nimbus-eth2 to write its logs to "/var/log/nimbus-eth2/" (usually by redirecting stout and stderr from your init script). The above assumes you've configured Nimbus to write its logs to "/var/log/nimbus-eth2/" (usually by redirecting `stdout` and `stderr` from your init script).
"copytruncate" is required because, when it comes to moving the log file, `logrotate`'s default behaviour requires application support for re-opening that log file at runtime (something which is currently lacking). So, instead of a move, we tell `logrotate` to do a copy and a truncation of the existing file. A few log lines may be lost in the process. "copytruncate" is required because, when it comes to moving the log file, `logrotate`'s default behaviour requires application support for re-opening that log file at runtime (something which is currently lacking). So, instead of a move, we tell `logrotate` to do a copy and a truncation of the existing file. A few log lines may be lost in the process.
@ -29,13 +29,15 @@ rotate 7
## Using `rotatelogs` ## Using `rotatelogs`
[rotatelogs](https://httpd.apache.org/docs/2.4/programs/rotatelogs.html) is available on most servers and can be used with `Docker`, `Systemd` and manual setups to write rotated logs files. [rotatelogs](https://httpd.apache.org/docs/2.4/programs/rotatelogs.html) captures `stdout` logging and redirects it to a file, rotating and compressing on the fly.
In particular, when `Systemd` and its accompanying `Journald` log daemon are used, this setup avoids clogging the system log by keeping the Nimbus logs in a separate location. It is available on most servers and can be used with `Docker`, `Systemd` and manual setups to write rotated logs files.
In particular, when `systemd` and its accompanying `journald` log daemon are used, this setup avoids clogging the system log by keeping the Nimbus logs in a separate location.
### Compression ### Compression
`rotatelogs` works by reading stdin and redirecting it to a file based on a name pattern. Whenever the log is about to be rotated, the application invokes a shell script with the old and new log files. Our aim is to compress the log file to save space. The [Nimbus-eth2 repo](https://github.com/status-im/nimbus-eth2/tree/unstable/scripts/rotatelogs-compress.sh) provides a helper script that does this: `rotatelogs` works by reading `stdin` and redirecting it to a file based on a name pattern. Whenever the log is about to be rotated, the application invokes a shell script with the old and new log files. Our aim is to compress the log file to save space. The [Nimbus-eth2 repo](https://github.com/status-im/nimbus-eth2/tree/unstable/scripts/rotatelogs-compress.sh) provides a helper script that does this:
```bash ```bash
# Create a rotation script for rotatelogs # Create a rotation script for rotatelogs
@ -53,19 +55,6 @@ EOF
chmod +x rotatelogs-compress.sh chmod +x rotatelogs-compress.sh
``` ```
### Build
Logs in files generally don't benefit from colors. To avoid colors being written to the file, additional flags can be added to the Nimbus [build process](./build.md) -- these flags are best saved in a build script to which one can add more options. Future versions of Nimbus will support disabling colors at runtime.
```bash
# Build nimbus with colors disabled
cat << EOF > build-nbc-text.sh
#!/bin/bash
make NIMFLAGS="-d:chronicles_colors=off -d:chronicles_sinks=textlines" nimbus_beacon_node
EOF
```
### Run ### Run
The final step is to redirect logs to `rotatelogs` using a pipe when starting Nimbus: The final step is to redirect logs to `rotatelogs` using a pipe when starting Nimbus:

View File

@ -0,0 +1,58 @@
# Logging
Nimbus offers several options for logging - by default, logs are written to stdout using the [chronicles](https://github.com/status-im/nim-chronicles#introduction) `textlines` format which is convenient to read and can be used with tooling for [heroku/logfmt](https://brandur.org/logfmt).
## Change log level
You can customise Nimbus' verbosity with the `--log-level` option.
For example:
```
./run-mainnet-beacon-node.sh --log-level=WARN
```
The default value is `INFO`.
Possible values (in order of decreasing verbosity) are:
```
TRACE
DEBUG
INFO
NOTICE
WARN
ERROR
FATAL
NONE
```
## Change logging style
Nimbus supports 3 styles of logging: `colors`, `nocolors` and `json`. In `auto` mode, logs will be printed using either `colors` or `nocolors`.
You can choose a log style with the `--log-stdout` option, which also understands `auto` and `none`:
```
./run-mainnet-beacon-node.sh --log-stdout=none # disable logging to std out
./run-mainnet-beacon-node.sh --log-stdout=json # print json logs, one line per item
```
## Logging to a file
To sending logs to a file, it is recommended to use a redirect of the stdout logs:
```
# log json to filename.jsonl
./run-mainnet-beacon-node.sh --log-stdout=json > filename.jsonl
```
Keep an eye on the growth of this file with a [log rotator](./log-rotate.md). Logs will be written in the "JSON Lines" format - one `json` entry per line.
Nimbus also supports writing logs to a log file using the `--log-file` option - this is provided for historical reasons and may be removed in future releases.
```
./run-mainnet-beacon-node.sh --log-file=filename.jsonl # write json logs to the given filename
```
When the `--log-file` option is enabled, stdout logs will by default be disabled - enable them with the `--log-stdout` option choosing a format other than `auto`.

View File

@ -23,21 +23,28 @@ The following options are available:
--log-level Sets the log level for process and topics (e.g. "DEBUG; --log-level Sets the log level for process and topics (e.g. "DEBUG;
TRACE:discv5,libp2p; REQUIRED:none; DISABLED:none") [=INFO]. TRACE:discv5,libp2p; REQUIRED:none; DISABLED:none") [=INFO].
--log-stdout Specifies what kind of logs should be written to stdout
(auto, colors, nocolors, json) [=auto].
--log-file Specifies a path for the written Json log file. --log-file Specifies a path for the written Json log file.
--network The Eth2 network to join [=mainnet]. --network The Eth2 network to join [=mainnet].
-d, --data-dir The directory where nimbus will store all blockchain data. -d, --data-dir The directory where nimbus will store all blockchain data.
--validators-dir A directory containing validator keystores. --validators-dir A directory containing validator keystores.
--secrets-dir A directory containing validator keystore passwords. --secrets-dir A directory containing validator keystore passwords.
--wallets-dir A directory containing wallet files. --wallets-dir A directory containing wallet files.
--web3-url URL of the Web3 server to observe Eth1. --web3-url One or more Web3 provider URLs used for obtaining deposit
contract data.
--non-interactive Do not display interative prompts. Quit on missing --non-interactive Do not display interative prompts. Quit on missing
configuration. configuration.
--netkey-file Source of network (secp256k1) private key file --netkey-file Source of network (secp256k1) private key file
(random|<path>) [=random]. (random|<path>) [=random].
--insecure-netkey-password Use pre-generated INSECURE password for network private key --insecure-netkey-password Use pre-generated INSECURE password for network private key
file [=false]. file [=false].
--agent-string Node agent string which is used as identifier in network. --agent-string Node agent string which is used as identifier in network
--subscribe-all-subnets Subscribe to all attestation subnet topics when gossiping. [=nimbus].
--subscribe-all-subnets Subscribe to all attestation subnet topics when gossiping
[=false].
--num-threads Number of worker threads (set this to 0 to use as many
threads as there are CPU cores available) [=1].
-b, --bootstrap-node Specifies one or more bootstrap nodes to use when connecting -b, --bootstrap-node Specifies one or more bootstrap nodes to use when connecting
to the network. to the network.
--bootstrap-file Specifies a line-delimited file of bootstrap Ethereum network --bootstrap-file Specifies a line-delimited file of bootstrap Ethereum network
@ -48,11 +55,11 @@ The following options are available:
--udp-port Listening UDP port for node discovery [=9000]. --udp-port Listening UDP port for node discovery [=9000].
--max-peers The maximum number of peers to connect to [=160]. --max-peers The maximum number of peers to connect to [=160].
--nat Specify method to use for determining public address. Must be --nat Specify method to use for determining public address. Must be
one of: any, none, upnp, pmp, extip:<IP>. one of: any, none, upnp, pmp, extip:<IP> [=any].
--enr-auto-update Discovery can automatically update its ENR with the IP --enr-auto-update Discovery can automatically update its ENR with the IP
address and UDP port as seen by other nodes it communicates address and UDP port as seen by other nodes it communicates
with. This option allows to enable/disable this with. This option allows to enable/disable this functionality
functionality. [=false].
--weak-subjectivity-checkpoint Weak subjectivity checkpoint in the format --weak-subjectivity-checkpoint Weak subjectivity checkpoint in the format
block_root:epoch_number. block_root:epoch_number.
--finalized-checkpoint-state SSZ file specifying a recent finalized state. --finalized-checkpoint-state SSZ file specifying a recent finalized state.
@ -64,19 +71,26 @@ The following options are available:
can use a 0x-prefixed hex encoded string to specify raw can use a 0x-prefixed hex encoded string to specify raw
bytes. bytes.
--verify-finalization Specify whether to verify finalization occurs on schedule, --verify-finalization Specify whether to verify finalization occurs on schedule,
for testing. for testing [=false].
--stop-at-epoch A positive epoch selects the epoch at which to stop. --stop-at-epoch A positive epoch selects the epoch at which to stop [=0].
--metrics Enable the metrics server [=false]. --metrics Enable the metrics server [=false].
--metrics-address Listening address of the metrics server [=127.0.0.1]. --metrics-address Listening address of the metrics server [=127.0.0.1].
--metrics-port Listening HTTP port of the metrics server [=8008]. --metrics-port Listening HTTP port of the metrics server [=8008].
--status-bar Display a status bar at the bottom of the terminal screen. --status-bar Display a status bar at the bottom of the terminal screen
[=true].
--status-bar-contents Textual template for the contents of the status bar. --status-bar-contents Textual template for the contents of the status bar.
--rpc Enable the JSON-RPC server [=false]. --rpc Enable the JSON-RPC server [=false].
--rpc-port HTTP port for the JSON-RPC service [=9190]. --rpc-port HTTP port for the JSON-RPC service [=9190].
--rpc-address Listening address of the RPC server [=127.0.0.1]. --rpc-address Listening address of the RPC server [=127.0.0.1].
--rest Enable the REST server [=false].
--rest-port Port for the REST server [=5052].
--rest-address Listening address of the REST server [=127.0.0.1].
--validator-api Enable the REST (BETA version) validator keystore management
API [=false].
--in-process-validators Disable the push model (the beacon node tells a signing --in-process-validators Disable the push model (the beacon node tells a signing
process with the private keys of the validators what to sign process with the private keys of the validators what to sign
and when) and load the validators in the beacon node itself. and when) and load the validators in the beacon node itself
[=true].
--discv5 Enable Discovery v5 [=true]. --discv5 Enable Discovery v5 [=true].
--dump Write SSZ dumps of blocks, attestations and states to data --dump Write SSZ dumps of blocks, attestations and states to data
dir [=false]. dir [=false].
@ -86,8 +100,12 @@ The following options are available:
/ip4/<address>/tcp/<port>/p2p/<peerId-public-key>. Peering /ip4/<address>/tcp/<port>/p2p/<peerId-public-key>. Peering
agreements are established out of band and must be agreements are established out of band and must be
reciprocal.. reciprocal..
--doppelganger-detection Whether to detect whether another validator is be running the --doppelganger-detection If enabled, the beacon node prudently listens for 2 epochs
same validator keys [=true]. for attestations from a validator with the same index (a
doppelganger), before sending an attestation itself. This
protects against slashing (due to double-voting) but means
you will miss two attestations when restarting. [=true].
... ...
``` ```

View File

@ -228,7 +228,7 @@ if [[ "$ENABLE_LOGTRACE" == "1" ]]; then
BINARIES="${BINARIES} logtrace" BINARIES="${BINARIES} logtrace"
fi fi
$MAKE -j ${NPROC} LOG_LEVEL=TRACE NIMFLAGS="${NIMFLAGS} -d:testnet_servers_image -d:local_testnet -d:const_preset=${CONST_PRESET}" ${BINARIES} $MAKE -j ${NPROC} LOG_LEVEL=TRACE NIMFLAGS="${NIMFLAGS} -d:local_testnet -d:const_preset=${CONST_PRESET}" ${BINARIES}
# Kill child processes on Ctrl-C/SIGTERM/exit, passing the PID of this shell # Kill child processes on Ctrl-C/SIGTERM/exit, passing the PID of this shell
# instance as the parent and the target process name as a pattern to the # instance as the parent and the target process name as a pattern to the

View File

@ -93,7 +93,6 @@ fi
exec ${WINPTY} build/${NBC_BINARY} \ exec ${WINPTY} build/${NBC_BINARY} \
--network=${NETWORK} \ --network=${NETWORK} \
--data-dir="${DATA_DIR}" \ --data-dir="${DATA_DIR}" \
--log-file="${DATA_DIR}/nbc_bn_$(date +"%Y%m%d%H%M%S").log" \
--tcp-port=$(( ${BASE_P2P_PORT} + ${NODE_ID} )) \ --tcp-port=$(( ${BASE_P2P_PORT} + ${NODE_ID} )) \
--udp-port=$(( ${BASE_P2P_PORT} + ${NODE_ID} )) \ --udp-port=$(( ${BASE_P2P_PORT} + ${NODE_ID} )) \
--rest \ --rest \

View File

@ -49,7 +49,7 @@ mkdir -p "$SECRETS_DIR"
cd "$GIT_ROOT" cd "$GIT_ROOT"
CUSTOM_NIMFLAGS="${NIMFLAGS} -d:useSysAsserts -d:chronicles_sinks:textlines,json[file] -d:const_preset=mainnet -d:local_testnet" CUSTOM_NIMFLAGS="${NIMFLAGS} -d:useSysAsserts -d:const_preset=mainnet -d:local_testnet"
GANACHE_BLOCK_TIME=5 GANACHE_BLOCK_TIME=5
# Run with "SLOTS_PER_EPOCH=8 ./start.sh" to change these # Run with "SLOTS_PER_EPOCH=8 ./start.sh" to change these

@ -1 +1 @@
Subproject commit ba2817f1a9f5ddb59274295f3fe9a54fbf1a5a96 Subproject commit c277f795ed998bc67312991e944254abed99eafc