rm beacon_chain/nimbus_binary_common.nim
This commit is contained in:
parent
a7652d6dc0
commit
3c79aab250
|
@ -1,20 +1,36 @@
|
|||
import
|
||||
std/os,
|
||||
chronos,
|
||||
stew/io2,
|
||||
./validators/beacon_validators,
|
||||
"."/nimbus_binary_common
|
||||
./validators/beacon_validators
|
||||
|
||||
import
|
||||
"."/[beacon_clock, conf],
|
||||
./spec/forks
|
||||
import "."/beacon_clock
|
||||
import "."/spec/beacon_time
|
||||
|
||||
proc runSlotLoop*[T](node: T, startTime: BeaconTime) {.async.} =
|
||||
var
|
||||
curSlot = startTime.slotOrZero()
|
||||
nextSlot = curSlot + 1 # No earlier than GENESIS_SLOT + 1
|
||||
timeToNextSlot = nextSlot.start_beacon_time() - startTime
|
||||
|
||||
while true:
|
||||
let
|
||||
wallTime = node.beaconClock.now()
|
||||
wallSlot = wallTime.slotOrZero() # Always > GENESIS!
|
||||
|
||||
if false:
|
||||
timeToNextSlot = nextSlot.start_beacon_time() - wallTime
|
||||
continue
|
||||
|
||||
await proposeBlock(getBlockRef2(static(default(Eth2Digest))).get, wallSlot)
|
||||
quit 0
|
||||
|
||||
import ./conf
|
||||
|
||||
type
|
||||
RuntimeConfig = object
|
||||
BeaconNode = ref object
|
||||
beaconClock: BeaconClock
|
||||
cfg: RuntimeConfig
|
||||
genesisState: ref ForkedHashedBeaconState
|
||||
|
||||
proc init(T: type BeaconNode,
|
||||
config: BeaconNodeConf,
|
||||
|
|
|
@ -1,155 +0,0 @@
|
|||
import
|
||||
std/[tables, strutils, terminal, typetraits],
|
||||
chronicles, chronos, confutils, presto,
|
||||
chronicles/helpers as chroniclesHelpers, chronicles/topics_registry,
|
||||
stew/io2,
|
||||
"."/[beacon_clock, conf]
|
||||
|
||||
export
|
||||
confutils, beacon_clock, conf
|
||||
|
||||
proc updateLogLevel*(logLevel: string) {.raises: [ValueError].} =
|
||||
# Updates log levels (without clearing old ones)
|
||||
let directives = logLevel.split(";")
|
||||
try:
|
||||
setLogLevel(parseEnum[LogLevel](directives[0].capitalizeAscii()))
|
||||
except ValueError:
|
||||
raise (ref ValueError)(msg: "Please specify one of TRACE, DEBUG, INFO, NOTICE, WARN, ERROR or FATAL")
|
||||
|
||||
if directives.len > 1:
|
||||
for topicName, settings in parseTopicDirectives(directives[1..^1]):
|
||||
if not setTopicState(topicName, settings.state, settings.logLevel):
|
||||
warn "Unrecognized logging topic", topic = topicName
|
||||
|
||||
proc detectTTY*(stdoutKind: StdoutLogKind): StdoutLogKind =
|
||||
if stdoutKind == StdoutLogKind.Auto:
|
||||
if isatty(stdout):
|
||||
# On a TTY, let's be fancy
|
||||
StdoutLogKind.Colors
|
||||
else:
|
||||
# When there's no TTY, we output no colors because this matches what
|
||||
# released binaries were doing before auto-detection was around and
|
||||
# looks decent in systemd-captured journals.
|
||||
StdoutLogKind.NoColors
|
||||
else:
|
||||
stdoutKind
|
||||
|
||||
when defaultChroniclesStream.outputs.type.arity == 2:
|
||||
from std/os import splitFile
|
||||
|
||||
proc setupFileLimits*() =
|
||||
when not defined(windows):
|
||||
# In addition to databases and sockets, we need a file descriptor for every
|
||||
# validator - setting it to 16k should provide sufficient margin
|
||||
let
|
||||
limit = getMaxOpenFiles2().valueOr(16384)
|
||||
|
||||
if limit < 16384:
|
||||
setMaxOpenFiles2(16384).isOkOr:
|
||||
warn "Cannot increase open file limit", err = osErrorMsg(error)
|
||||
|
||||
proc setupLogging*(
|
||||
logLevel: string, stdoutKind: StdoutLogKind, logFile: Option[OutFile]) =
|
||||
# In the cfg file for nimbus, we create two formats: textlines and json.
|
||||
# Here, we either write those logs to an output, or not, depending on the
|
||||
# given configuration.
|
||||
# Arguably, if we don't use a format, chronicles should not create it.
|
||||
|
||||
when defaultChroniclesStream.outputs.type.arity != 2:
|
||||
warn "Logging configuration options not enabled in the current build"
|
||||
else:
|
||||
# Naive approach where chronicles will form a string and we will discard
|
||||
# it, even if it could have skipped the formatting phase
|
||||
|
||||
proc noOutput(logLevel: LogLevel, msg: LogOutputStr) = discard
|
||||
proc writeAndFlush(f: File, msg: LogOutputStr) =
|
||||
try:
|
||||
f.write(msg)
|
||||
f.flushFile()
|
||||
except IOError as err:
|
||||
logLoggingFailure(cstring(msg), err)
|
||||
|
||||
proc stdoutFlush(logLevel: LogLevel, msg: LogOutputStr) =
|
||||
writeAndFlush(stdout, msg)
|
||||
|
||||
proc noColorsFlush(logLevel: LogLevel, msg: LogOutputStr) =
|
||||
writeAndFlush(stdout, msg)
|
||||
|
||||
let fileWriter =
|
||||
if logFile.isSome():
|
||||
let
|
||||
logFile = logFile.get.string
|
||||
logFileDir = splitFile(logFile).dir
|
||||
lres = createPath(logFileDir, 0o700)
|
||||
if lres.isOk():
|
||||
try:
|
||||
let
|
||||
f = open(logFile, fmAppend)
|
||||
x = proc(logLevel: LogLevel, msg: LogOutputStr) =
|
||||
writeAndFlush(f, msg) # will close when program terminates
|
||||
x
|
||||
except CatchableError as exc:
|
||||
error "Failed to create log file", logFile, msg = exc.msg
|
||||
noOutput
|
||||
else:
|
||||
error "Failed to create directory for log file",
|
||||
path = logFileDir, err = ioErrorMsg(lres.error)
|
||||
noOutput
|
||||
else:
|
||||
noOutput
|
||||
|
||||
defaultChroniclesStream.outputs[1].writer = fileWriter
|
||||
|
||||
let tmp = detectTTY(stdoutKind)
|
||||
|
||||
case tmp
|
||||
of StdoutLogKind.Auto: raiseAssert "checked above"
|
||||
of StdoutLogKind.Colors:
|
||||
defaultChroniclesStream.outputs[0].writer = stdoutFlush
|
||||
of StdoutLogKind.NoColors:
|
||||
defaultChroniclesStream.outputs[0].writer = noColorsFlush
|
||||
of StdoutLogKind.Json:
|
||||
defaultChroniclesStream.outputs[0].writer = noOutput
|
||||
|
||||
let prevWriter = defaultChroniclesStream.outputs[1].writer
|
||||
defaultChroniclesStream.outputs[1].writer =
|
||||
proc(logLevel: LogLevel, msg: LogOutputStr) =
|
||||
stdoutFlush(logLevel, msg)
|
||||
prevWriter(logLevel, msg)
|
||||
of StdoutLogKind.None:
|
||||
defaultChroniclesStream.outputs[0].writer = noOutput
|
||||
|
||||
if logFile.isSome():
|
||||
warn "The --log-file option is deprecated. Consider redirecting the standard output to a file instead"
|
||||
try:
|
||||
updateLogLevel(logLevel)
|
||||
except ValueError as err:
|
||||
try:
|
||||
stderr.write "Invalid value for --log-level. " & err.msg
|
||||
except IOError:
|
||||
echo "Invalid value for --log-level. " & err.msg
|
||||
quit 1
|
||||
|
||||
import "."/spec/beacon_time
|
||||
|
||||
proc runSlotLoop*[T](node: T, startTime: BeaconTime) {.async.} =
|
||||
var
|
||||
curSlot = startTime.slotOrZero()
|
||||
nextSlot = curSlot + 1 # No earlier than GENESIS_SLOT + 1
|
||||
timeToNextSlot = nextSlot.start_beacon_time() - startTime
|
||||
|
||||
while true:
|
||||
let
|
||||
wallTime = node.beaconClock.now()
|
||||
wallSlot = wallTime.slotOrZero() # Always > GENESIS!
|
||||
|
||||
if false:
|
||||
if false:
|
||||
return
|
||||
|
||||
# cur & next slot remain the same
|
||||
timeToNextSlot = nextSlot.start_beacon_time() - wallTime
|
||||
continue
|
||||
|
||||
await proposeBlock(getBlockRef2(static(default(Eth2Digest))).get, wallSlot)
|
||||
quit 0
|
|
@ -1,6 +1,7 @@
|
|||
import
|
||||
results,
|
||||
stew/[bitseqs, endians2, objects, byteutils],
|
||||
stew/byteutils,
|
||||
stew/objects,
|
||||
blscurve
|
||||
|
||||
from std/tables import Table, withValue, `[]=`
|
||||
|
|
|
@ -31,8 +31,6 @@ proc getBlockSignature(): Future[SignatureResult]
|
|||
{.async: (raises: [CancelledError]).} =
|
||||
SignatureResult.ok(default(ValidatorSig))
|
||||
|
||||
import stint
|
||||
|
||||
type
|
||||
KzgProofs = seq[int]
|
||||
Blobs = seq[int]
|
||||
|
@ -42,11 +40,11 @@ type
|
|||
blobs: Blobs
|
||||
|
||||
EngineBid = tuple[
|
||||
blockValue: UInt256,
|
||||
blockValue: uint64,
|
||||
blobsBundleOpt: Opt[BlobsBundle]]
|
||||
|
||||
BuilderBid[SBBB] = tuple[
|
||||
blindedBlckPart: SBBB, blockValue: UInt256]
|
||||
blindedBlckPart: SBBB, blockValue: uint64]
|
||||
|
||||
ForkedBlockResult =
|
||||
Result[EngineBid, string]
|
||||
|
@ -125,7 +123,7 @@ proc makeBeaconBlockForHeadAndSlot(
|
|||
|
||||
var blobsBundleOpt = Opt.none(BlobsBundle)
|
||||
return if blck.isOk:
|
||||
ok((0.u256, blobsBundleOpt))
|
||||
ok((0'u64, blobsBundleOpt))
|
||||
else:
|
||||
err(blck.error)
|
||||
|
||||
|
@ -158,7 +156,7 @@ proc getBlindedBlockParts[EPH](
|
|||
head: BlockRef,
|
||||
pubkey: ValidatorPubKey, slot: uint64,
|
||||
validator_index: int32):
|
||||
Future[Result[(UInt256, ForkedBeaconBlock), string]]
|
||||
Future[Result[(uint64, ForkedBeaconBlock), string]]
|
||||
{.async: (raises: [CancelledError]).} =
|
||||
return err("")
|
||||
|
||||
|
@ -254,15 +252,8 @@ proc collectBids(
|
|||
builderBid: builderBid)
|
||||
|
||||
func builderBetterBid(
|
||||
localBlockValueBoost: uint8, builderValue: UInt256, engineValue: UInt256): bool =
|
||||
const scalingBits = 10
|
||||
static: doAssert 1 shl scalingBits >
|
||||
high(typeof(localBlockValueBoost)).uint16 + 100
|
||||
let
|
||||
scaledBuilderValue = (builderValue shr scalingBits) * 100
|
||||
scaledEngineValue = engineValue shr scalingBits
|
||||
scaledBuilderValue >
|
||||
scaledEngineValue * (localBlockValueBoost.uint16 + 100).u256
|
||||
localBlockValueBoost: uint8, builderValue: uint64, engineValue: uint64): bool =
|
||||
false
|
||||
|
||||
import chronicles
|
||||
import
|
||||
|
|
|
@ -1,98 +1,6 @@
|
|||
import
|
||||
serialization,
|
||||
../consensus_object_pools/block_dag
|
||||
import ../consensus_object_pools/block_dag
|
||||
|
||||
type
|
||||
SPDIR* = object
|
||||
## Slashing Protection Database Interchange Format
|
||||
metadata*: SPDIR_Meta
|
||||
data*: seq[SPDIR_Validator]
|
||||
|
||||
Eth2Digest0x* = distinct Eth2Digest
|
||||
## The spec mandates "0x" prefix on serialization
|
||||
## So we need to set custom read/write
|
||||
|
||||
PubKeyBytes* = array[48, byte]
|
||||
## This is the serialized byte representation
|
||||
## of a Validator Public Key.
|
||||
## Portable between backend implementations
|
||||
## and limits serialization/deserialization call
|
||||
|
||||
PubKey0x* = distinct PubKeyBytes
|
||||
## The spec mandates "0x" prefix on serialization
|
||||
## So we need to set custom read/write
|
||||
## We also assume that pubkeys in the database
|
||||
## are valid points on the BLS12-381 G1 curve
|
||||
## (so we skip fromRaw/serialization checks)
|
||||
|
||||
EpochString* = distinct uint64
|
||||
## The spec mandates string serialization for wide compatibility (javascript)
|
||||
|
||||
SPDIR_Meta* = object
|
||||
interchange_format_version*: string
|
||||
genesis_validators_root*: Eth2Digest0x
|
||||
|
||||
SPDIR_Validator* = object
|
||||
pubkey*: PubKey0x
|
||||
signed_blocks*: seq[SPDIR_SignedBlock]
|
||||
signed_attestations*: seq[SPDIR_SignedAttestation]
|
||||
|
||||
SPDIR_SignedBlock* = object
|
||||
slot*: EpochString
|
||||
signing_root*: Option[Eth2Digest0x] # compute_signing_root(block, domain)
|
||||
|
||||
SPDIR_SignedAttestation* = object
|
||||
source_epoch*: EpochString
|
||||
target_epoch*: EpochString
|
||||
signing_root*: Option[Eth2Digest0x] # compute_signing_root(attestation, domain)
|
||||
|
||||
# Slashing Protection types
|
||||
# --------------------------------------------
|
||||
|
||||
SlashingImportStatus* = enum
|
||||
siSuccess
|
||||
siFailure
|
||||
siPartial
|
||||
|
||||
BadVoteKind* = enum
|
||||
## Attestation bad vote kind
|
||||
# h: height (i.e. epoch for attestation, slot for blocks)
|
||||
# t: target
|
||||
# s: source
|
||||
# 1: existing attestations
|
||||
# 2: candidate attestation
|
||||
|
||||
# Spec slashing condition
|
||||
DoubleVote # h(t1) == h(t2)
|
||||
SurroundVote # h(s1) < h(s2) < h(t2) < h(t1) or h(s2) < h(s1) < h(t1) < h(t2)
|
||||
|
||||
# Non-spec, should never happen in a well functioning client
|
||||
TargetPrecedesSource # h(t1) < h(s1) - current epoch precedes last justified epoch
|
||||
|
||||
# EIP-3067 (https://eips.ethereum.org/EIPS/eip-3076)
|
||||
MinSourceViolation # h(s2) < h(s1) - EIP3067 condition 4 (strict inequality)
|
||||
MinTargetViolation # h(t2) <= h(t1) - EIP3067 condition 5
|
||||
DatabaseError # Cannot read/write the slashing protection db
|
||||
|
||||
BadVote* {.pure.} = object
|
||||
case kind*: BadVoteKind
|
||||
of DoubleVote:
|
||||
existingAttestation*: Eth2Digest
|
||||
of SurroundVote:
|
||||
existingAttestationRoot*: Eth2Digest # Many roots might be in conflict
|
||||
sourceExisting*, targetExisting*: uint64
|
||||
sourceSlashable*, targetSlashable*: uint64
|
||||
of TargetPrecedesSource:
|
||||
discard
|
||||
of MinSourceViolation:
|
||||
minSource*: uint64
|
||||
candidateSource*: uint64
|
||||
of MinTargetViolation:
|
||||
minTarget*: uint64
|
||||
candidateTarget*: uint64
|
||||
of BadVoteKind.DatabaseError:
|
||||
message*: string
|
||||
|
||||
BadProposalKind* {.pure.} = enum
|
||||
# Spec slashing condition
|
||||
DoubleProposal # h(t1) == h(t2)
|
||||
|
@ -108,4 +16,4 @@ type
|
|||
minSlot*: uint64
|
||||
candidateSlot*: uint64
|
||||
of BadProposalKind.DatabaseError:
|
||||
message*: string
|
||||
message: string
|
||||
|
|
Loading…
Reference in New Issue