Merge branch 'devel'
This commit is contained in:
commit
c5a58555ef
|
@ -82,7 +82,7 @@ jobs:
|
||||||
|
|
||||||
- name: Cancel Previous Runs (except master/devel)
|
- name: Cancel Previous Runs (except master/devel)
|
||||||
if: >
|
if: >
|
||||||
steps.get_branch.outputs.branch_name != 'master' ||
|
steps.get_branch.outputs.branch_name != 'master' &&
|
||||||
steps.get_branch.outputs.branch_name != 'devel'
|
steps.get_branch.outputs.branch_name != 'devel'
|
||||||
uses: styfle/cancel-workflow-action@0.5.0
|
uses: styfle/cancel-workflow-action@0.5.0
|
||||||
with:
|
with:
|
||||||
|
|
|
@ -130,10 +130,22 @@ OK: 6/6 Fail: 0/6 Skip: 0/6
|
||||||
OK: 11/11 Fail: 0/11 Skip: 0/11
|
OK: 11/11 Fail: 0/11 Skip: 0/11
|
||||||
## Official - Operations - Proposer slashing [Preset: mainnet]
|
## Official - Operations - Proposer slashing [Preset: mainnet]
|
||||||
```diff
|
```diff
|
||||||
+ [Invalid] identifier OK
|
+ [Invalid] epochs_are_different OK
|
||||||
+ [Valid] identifier OK
|
+ [Invalid] headers_are_same_sigs_are_different OK
|
||||||
|
+ [Invalid] headers_are_same_sigs_are_same OK
|
||||||
|
+ [Invalid] invalid_different_proposer_indices OK
|
||||||
|
+ [Invalid] invalid_proposer_index OK
|
||||||
|
+ [Invalid] invalid_sig_1 OK
|
||||||
|
+ [Invalid] invalid_sig_1_and_2 OK
|
||||||
|
+ [Invalid] invalid_sig_1_and_2_swap OK
|
||||||
|
+ [Invalid] invalid_sig_2 OK
|
||||||
|
+ [Invalid] proposer_is_not_activated OK
|
||||||
|
+ [Invalid] proposer_is_slashed OK
|
||||||
|
+ [Invalid] proposer_is_withdrawn OK
|
||||||
|
+ [Valid] success OK
|
||||||
|
+ [Valid] success_slashed_and_proposer_index_the_same OK
|
||||||
```
|
```
|
||||||
OK: 2/2 Fail: 0/2 Skip: 0/2
|
OK: 14/14 Fail: 0/14 Skip: 0/14
|
||||||
## Official - Operations - Voluntary exit [Preset: mainnet]
|
## Official - Operations - Voluntary exit [Preset: mainnet]
|
||||||
```diff
|
```diff
|
||||||
+ [Invalid] invalid_signature OK
|
+ [Invalid] invalid_signature OK
|
||||||
|
@ -242,4 +254,4 @@ OK: 32/32 Fail: 0/32 Skip: 0/32
|
||||||
OK: 5/5 Fail: 0/5 Skip: 0/5
|
OK: 5/5 Fail: 0/5 Skip: 0/5
|
||||||
|
|
||||||
---TOTAL---
|
---TOTAL---
|
||||||
OK: 184/184 Fail: 0/184 Skip: 0/184
|
OK: 196/196 Fail: 0/196 Skip: 0/196
|
||||||
|
|
2
Makefile
2
Makefile
|
@ -11,7 +11,7 @@ SHELL := bash # the shell used internally by "make"
|
||||||
BUILD_SYSTEM_DIR := vendor/nimbus-build-system
|
BUILD_SYSTEM_DIR := vendor/nimbus-build-system
|
||||||
|
|
||||||
# we set its default value before LOG_LEVEL is used in "variables.mk"
|
# we set its default value before LOG_LEVEL is used in "variables.mk"
|
||||||
LOG_LEVEL := DEBUG
|
LOG_LEVEL := INFO
|
||||||
|
|
||||||
# we don't want an error here, so we can handle things later, in the ".DEFAULT" target
|
# we don't want an error here, so we can handle things later, in the ".DEFAULT" target
|
||||||
-include $(BUILD_SYSTEM_DIR)/makefiles/variables.mk
|
-include $(BUILD_SYSTEM_DIR)/makefiles/variables.mk
|
||||||
|
|
|
@ -226,6 +226,11 @@ proc validateAttestation*(
|
||||||
attesting_indices = get_attesting_indices(
|
attesting_indices = get_attesting_indices(
|
||||||
epochRef, attestation.data, attestation.aggregation_bits)
|
epochRef, attestation.data, attestation.aggregation_bits)
|
||||||
|
|
||||||
|
if attesting_indices.len == 0:
|
||||||
|
# an extension of the check_aggregation_count() check
|
||||||
|
const err_str: cstring =
|
||||||
|
"validateAttestation: inconsistent aggregation and committee length"
|
||||||
|
return err((EVRESULT_REJECT, err_str))
|
||||||
doAssert attesting_indices.len == 1, "Per bits check above"
|
doAssert attesting_indices.len == 1, "Per bits check above"
|
||||||
let validator_index = toSeq(attesting_indices)[0]
|
let validator_index = toSeq(attesting_indices)[0]
|
||||||
|
|
||||||
|
|
|
@ -55,7 +55,7 @@ proc init*(T: type AttestationPool, chainDag: ChainDAGRef, quarantine: Quarantin
|
||||||
|
|
||||||
doAssert status.isOk(), "Error in preloading the fork choice: " & $status.error
|
doAssert status.isOk(), "Error in preloading the fork choice: " & $status.error
|
||||||
|
|
||||||
info "Fork choice initialized",
|
debug "Fork choice initialized",
|
||||||
justified_epoch = chainDag.headState.data.data.current_justified_checkpoint.epoch,
|
justified_epoch = chainDag.headState.data.data.current_justified_checkpoint.epoch,
|
||||||
finalized_epoch = chainDag.headState.data.data.finalized_checkpoint.epoch,
|
finalized_epoch = chainDag.headState.data.data.finalized_checkpoint.epoch,
|
||||||
finalized_root = shortlog(chainDag.finalizedHead.blck.root)
|
finalized_root = shortlog(chainDag.finalizedHead.blck.root)
|
||||||
|
@ -184,7 +184,7 @@ proc addAttestation*(pool: var AttestationPool,
|
||||||
attestation.data.slot, participants, attestation.data.beacon_block_root,
|
attestation.data.slot, participants, attestation.data.beacon_block_root,
|
||||||
wallSlot)
|
wallSlot)
|
||||||
|
|
||||||
info "Attestation resolved",
|
debug "Attestation resolved",
|
||||||
attestation = shortLog(attestation),
|
attestation = shortLog(attestation),
|
||||||
validations = a.validations.len()
|
validations = a.validations.len()
|
||||||
|
|
||||||
|
@ -201,7 +201,7 @@ proc addAttestation*(pool: var AttestationPool,
|
||||||
attestation.data.slot, participants, attestation.data.beacon_block_root,
|
attestation.data.slot, participants, attestation.data.beacon_block_root,
|
||||||
wallSlot)
|
wallSlot)
|
||||||
|
|
||||||
info "Attestation resolved",
|
debug "Attestation resolved",
|
||||||
attestation = shortLog(attestation),
|
attestation = shortLog(attestation),
|
||||||
validations = 1
|
validations = 1
|
||||||
|
|
||||||
|
|
|
@ -186,7 +186,7 @@ proc init*(T: type BeaconNode,
|
||||||
if bnStatus == BeaconNodeStatus.Stopping:
|
if bnStatus == BeaconNodeStatus.Stopping:
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
info "Eth2 genesis state detected",
|
notice "Eth2 genesis state detected",
|
||||||
genesisTime = genesisState.genesisTime,
|
genesisTime = genesisState.genesisTime,
|
||||||
eth1Block = genesisState.eth1_data.block_hash,
|
eth1Block = genesisState.eth1_data.block_hash,
|
||||||
totalDeposits = genesisState.eth1_data.deposit_count
|
totalDeposits = genesisState.eth1_data.deposit_count
|
||||||
|
@ -619,7 +619,7 @@ proc onSlotStart(node: BeaconNode, lastSlot, scheduledSlot: Slot) {.async.} =
|
||||||
proc handleMissingBlocks(node: BeaconNode) =
|
proc handleMissingBlocks(node: BeaconNode) =
|
||||||
let missingBlocks = node.quarantine.checkMissing()
|
let missingBlocks = node.quarantine.checkMissing()
|
||||||
if missingBlocks.len > 0:
|
if missingBlocks.len > 0:
|
||||||
info "Requesting detected missing blocks", blocks = shortLog(missingBlocks)
|
debug "Requesting detected missing blocks", blocks = shortLog(missingBlocks)
|
||||||
node.requestManager.fetchAncestorBlocks(missingBlocks)
|
node.requestManager.fetchAncestorBlocks(missingBlocks)
|
||||||
|
|
||||||
proc onSecond(node: BeaconNode) =
|
proc onSecond(node: BeaconNode) =
|
||||||
|
@ -639,7 +639,7 @@ proc runOnSecondLoop(node: BeaconNode) {.async.} =
|
||||||
let finished = chronos.now(chronos.Moment)
|
let finished = chronos.now(chronos.Moment)
|
||||||
let processingTime = finished - afterSleep
|
let processingTime = finished - afterSleep
|
||||||
ticks_delay.set(sleepTime.nanoseconds.float / nanosecondsIn1s)
|
ticks_delay.set(sleepTime.nanoseconds.float / nanosecondsIn1s)
|
||||||
debug "onSecond task completed", sleepTime, processingTime
|
trace "onSecond task completed", sleepTime, processingTime
|
||||||
|
|
||||||
proc startSyncManager(node: BeaconNode) =
|
proc startSyncManager(node: BeaconNode) =
|
||||||
func getLocalHeadSlot(): Slot =
|
func getLocalHeadSlot(): Slot =
|
||||||
|
@ -845,12 +845,12 @@ proc installMessageValidators(node: BeaconNode) =
|
||||||
|
|
||||||
proc stop*(node: BeaconNode) =
|
proc stop*(node: BeaconNode) =
|
||||||
bnStatus = BeaconNodeStatus.Stopping
|
bnStatus = BeaconNodeStatus.Stopping
|
||||||
info "Graceful shutdown"
|
notice "Graceful shutdown"
|
||||||
if not node.config.inProcessValidators:
|
if not node.config.inProcessValidators:
|
||||||
node.vcProcess.close()
|
node.vcProcess.close()
|
||||||
waitFor node.network.stop()
|
waitFor node.network.stop()
|
||||||
node.db.close()
|
node.db.close()
|
||||||
info "Database closed"
|
notice "Database closed"
|
||||||
|
|
||||||
proc run*(node: BeaconNode) =
|
proc run*(node: BeaconNode) =
|
||||||
if bnStatus == BeaconNodeStatus.Starting:
|
if bnStatus == BeaconNodeStatus.Starting:
|
||||||
|
@ -907,7 +907,7 @@ proc initializeNetworking(node: BeaconNode) {.async.} =
|
||||||
|
|
||||||
await node.network.start()
|
await node.network.start()
|
||||||
|
|
||||||
info "Networking initialized",
|
notice "Networking initialized",
|
||||||
enr = node.network.announcedENR.toURI,
|
enr = node.network.announcedENR.toURI,
|
||||||
libp2p = shortLog(node.network.switch.peerInfo)
|
libp2p = shortLog(node.network.switch.peerInfo)
|
||||||
|
|
||||||
|
@ -917,7 +917,7 @@ proc start(node: BeaconNode) =
|
||||||
finalizedHead = node.chainDag.finalizedHead
|
finalizedHead = node.chainDag.finalizedHead
|
||||||
genesisTime = node.beaconClock.fromNow(toBeaconTime(Slot 0))
|
genesisTime = node.beaconClock.fromNow(toBeaconTime(Slot 0))
|
||||||
|
|
||||||
info "Starting beacon node",
|
notice "Starting beacon node",
|
||||||
version = fullVersionStr,
|
version = fullVersionStr,
|
||||||
nim = shortNimBanner(),
|
nim = shortNimBanner(),
|
||||||
timeSinceFinalization =
|
timeSinceFinalization =
|
||||||
|
@ -1202,14 +1202,14 @@ programMain:
|
||||||
when defined(windows):
|
when defined(windows):
|
||||||
# workaround for https://github.com/nim-lang/Nim/issues/4057
|
# workaround for https://github.com/nim-lang/Nim/issues/4057
|
||||||
setupForeignThreadGc()
|
setupForeignThreadGc()
|
||||||
info "Shutting down after having received SIGINT"
|
notice "Shutting down after having received SIGINT"
|
||||||
bnStatus = BeaconNodeStatus.Stopping
|
bnStatus = BeaconNodeStatus.Stopping
|
||||||
setControlCHook(controlCHandler)
|
setControlCHook(controlCHandler)
|
||||||
|
|
||||||
when useInsecureFeatures:
|
when useInsecureFeatures:
|
||||||
if config.metricsEnabled:
|
if config.metricsEnabled:
|
||||||
let metricsAddress = config.metricsAddress
|
let metricsAddress = config.metricsAddress
|
||||||
info "Starting metrics HTTP server",
|
notice "Starting metrics HTTP server",
|
||||||
address = metricsAddress, port = config.metricsPort
|
address = metricsAddress, port = config.metricsPort
|
||||||
metrics.startHttpServer($metricsAddress, config.metricsPort)
|
metrics.startHttpServer($metricsAddress, config.metricsPort)
|
||||||
|
|
||||||
|
@ -1282,7 +1282,7 @@ programMain:
|
||||||
mapIt(deposits.value, LaunchPadDeposit.init(config.runtimePreset, it))
|
mapIt(deposits.value, LaunchPadDeposit.init(config.runtimePreset, it))
|
||||||
|
|
||||||
Json.saveFile(depositDataPath, launchPadDeposits)
|
Json.saveFile(depositDataPath, launchPadDeposits)
|
||||||
info "Deposit data written", filename = depositDataPath
|
notice "Deposit data written", filename = depositDataPath
|
||||||
|
|
||||||
walletPath.wallet.nextAccount += deposits.value.len
|
walletPath.wallet.nextAccount += deposits.value.len
|
||||||
let status = saveWallet(walletPath)
|
let status = saveWallet(walletPath)
|
||||||
|
|
|
@ -185,7 +185,7 @@ func isAncestorOf*(a, b: BlockRef): bool =
|
||||||
b = b.parent
|
b = b.parent
|
||||||
|
|
||||||
func get_ancestor*(blck: BlockRef, slot: Slot): BlockRef =
|
func get_ancestor*(blck: BlockRef, slot: Slot): BlockRef =
|
||||||
## https://github.com/ethereum/eth2.0-specs/blob/v0.12.2/specs/phase0/fork-choice.md#get_ancestor
|
## https://github.com/ethereum/eth2.0-specs/blob/v0.12.3/specs/phase0/fork-choice.md#get_ancestor
|
||||||
## Return the most recent block as of the time at `slot` that not more recent
|
## Return the most recent block as of the time at `slot` that not more recent
|
||||||
## than `blck` itself
|
## than `blck` itself
|
||||||
doAssert not blck.isNil
|
doAssert not blck.isNil
|
||||||
|
@ -487,7 +487,7 @@ proc putState*(dag: ChainDAGRef, state: StateData) =
|
||||||
if dag.db.containsState(state.data.root):
|
if dag.db.containsState(state.data.root):
|
||||||
return
|
return
|
||||||
|
|
||||||
info "Storing state"
|
debug "Storing state"
|
||||||
# Ideally we would save the state and the root lookup cache in a single
|
# Ideally we would save the state and the root lookup cache in a single
|
||||||
# transaction to prevent database inconsistencies, but the state loading code
|
# transaction to prevent database inconsistencies, but the state loading code
|
||||||
# is resilient against one or the other going missing
|
# is resilient against one or the other going missing
|
||||||
|
@ -626,7 +626,7 @@ proc updateStateData*(
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
||||||
debug "UpdateStateData miss",
|
debug "UpdateStateData cache miss",
|
||||||
bs, stateBlock = state.blck, stateSlot = state.data.data.slot
|
bs, stateBlock = state.blck, stateSlot = state.data.data.slot
|
||||||
|
|
||||||
# Either the state is too new or was created by applying a different block.
|
# Either the state is too new or was created by applying a different block.
|
||||||
|
@ -676,7 +676,7 @@ proc updateStateData*(
|
||||||
|
|
||||||
beacon_state_rewinds.inc()
|
beacon_state_rewinds.inc()
|
||||||
|
|
||||||
debug "State reloaded from database",
|
trace "State reloaded from database",
|
||||||
blocks = ancestors.len,
|
blocks = ancestors.len,
|
||||||
slots = state.data.data.slot - startSlot,
|
slots = state.data.data.slot - startSlot,
|
||||||
stateRoot = shortLog(state.data.root),
|
stateRoot = shortLog(state.data.root),
|
||||||
|
@ -707,7 +707,7 @@ proc updateHead*(
|
||||||
newHead = shortLog(newHead)
|
newHead = shortLog(newHead)
|
||||||
|
|
||||||
if dag.head == newHead:
|
if dag.head == newHead:
|
||||||
debug "No head block update"
|
trace "No head block update"
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -730,7 +730,7 @@ proc updateHead*(
|
||||||
dag.head = newHead
|
dag.head = newHead
|
||||||
|
|
||||||
if not lastHead.isAncestorOf(newHead):
|
if not lastHead.isAncestorOf(newHead):
|
||||||
info "Updated head block with reorg",
|
notice "Updated head block with chain reorg",
|
||||||
lastHead = shortLog(lastHead),
|
lastHead = shortLog(lastHead),
|
||||||
headParent = shortLog(newHead.parent),
|
headParent = shortLog(newHead.parent),
|
||||||
stateRoot = shortLog(dag.headState.data.root),
|
stateRoot = shortLog(dag.headState.data.root),
|
||||||
|
@ -743,7 +743,7 @@ proc updateHead*(
|
||||||
quarantine.clearQuarantine()
|
quarantine.clearQuarantine()
|
||||||
beacon_reorgs_total.inc()
|
beacon_reorgs_total.inc()
|
||||||
else:
|
else:
|
||||||
info "Updated head block",
|
debug "Updated head block",
|
||||||
stateRoot = shortLog(dag.headState.data.root),
|
stateRoot = shortLog(dag.headState.data.root),
|
||||||
headBlock = shortLog(dag.headState.blck),
|
headBlock = shortLog(dag.headState.blck),
|
||||||
stateSlot = shortLog(dag.headState.data.data.slot),
|
stateSlot = shortLog(dag.headState.data.data.slot),
|
||||||
|
@ -818,7 +818,7 @@ proc updateHead*(
|
||||||
|
|
||||||
dag.finalizedHead = finalizedHead
|
dag.finalizedHead = finalizedHead
|
||||||
|
|
||||||
info "Reached new finalization checkpoint",
|
notice "Reached new finalization checkpoint",
|
||||||
finalizedHead = shortLog(finalizedHead),
|
finalizedHead = shortLog(finalizedHead),
|
||||||
heads = dag.heads.len
|
heads = dag.heads.len
|
||||||
|
|
||||||
|
|
|
@ -88,7 +88,7 @@ proc addResolvedBlock(
|
||||||
foundHead = blockRef
|
foundHead = blockRef
|
||||||
dag.heads.add(foundHead)
|
dag.heads.add(foundHead)
|
||||||
|
|
||||||
info "Block resolved",
|
debug "Block resolved",
|
||||||
blck = shortLog(signedBlock.message),
|
blck = shortLog(signedBlock.message),
|
||||||
blockRoot = shortLog(blockRoot),
|
blockRoot = shortLog(blockRoot),
|
||||||
heads = dag.heads.len()
|
heads = dag.heads.len()
|
||||||
|
@ -165,7 +165,7 @@ proc addRawBlock*(
|
||||||
if parent.slot >= blck.slot:
|
if parent.slot >= blck.slot:
|
||||||
# A block whose parent is newer than the block itself is clearly invalid -
|
# A block whose parent is newer than the block itself is clearly invalid -
|
||||||
# discard it immediately
|
# discard it immediately
|
||||||
notice "Invalid block slot",
|
debug "Invalid block slot",
|
||||||
parentBlock = shortLog(parent)
|
parentBlock = shortLog(parent)
|
||||||
|
|
||||||
return err((EVRESULT_REJECT, Invalid))
|
return err((EVRESULT_REJECT, Invalid))
|
||||||
|
@ -211,7 +211,7 @@ proc addRawBlock*(
|
||||||
|
|
||||||
if not state_transition(dag.runtimePreset, dag.clearanceState.data, signedBlock,
|
if not state_transition(dag.runtimePreset, dag.clearanceState.data, signedBlock,
|
||||||
cache, dag.updateFlags + {slotProcessed}, restore):
|
cache, dag.updateFlags + {slotProcessed}, restore):
|
||||||
notice "Invalid block"
|
info "Invalid block"
|
||||||
|
|
||||||
return err((EVRESULT_REJECT, Invalid))
|
return err((EVRESULT_REJECT, Invalid))
|
||||||
|
|
||||||
|
@ -329,7 +329,7 @@ proc isValidBeaconBlock*(
|
||||||
signed_beacon_block.message.proposer_index and
|
signed_beacon_block.message.proposer_index and
|
||||||
blck.message.slot == signed_beacon_block.message.slot and
|
blck.message.slot == signed_beacon_block.message.slot and
|
||||||
blck.signature.toRaw() != signed_beacon_block.signature.toRaw():
|
blck.signature.toRaw() != signed_beacon_block.signature.toRaw():
|
||||||
debug "block isn't first block with valid signature received for the proposer",
|
notice "block isn't first block with valid signature received for the proposer",
|
||||||
blckRef = slotBlockRef,
|
blckRef = slotBlockRef,
|
||||||
existing_block = shortLog(blck.message)
|
existing_block = shortLog(blck.message)
|
||||||
return err((EVRESULT_IGNORE, Invalid))
|
return err((EVRESULT_IGNORE, Invalid))
|
||||||
|
@ -348,7 +348,7 @@ proc isValidBeaconBlock*(
|
||||||
debug "parent unknown, putting block in quarantine",
|
debug "parent unknown, putting block in quarantine",
|
||||||
current_slot = shortLog(current_slot)
|
current_slot = shortLog(current_slot)
|
||||||
if not quarantine.add(dag, signed_beacon_block):
|
if not quarantine.add(dag, signed_beacon_block):
|
||||||
debug "Block quarantine full"
|
warn "Block quarantine full"
|
||||||
return err((EVRESULT_IGNORE, MissingParent))
|
return err((EVRESULT_IGNORE, MissingParent))
|
||||||
|
|
||||||
# [REJECT] The current finalized_checkpoint is an ancestor of block -- i.e.
|
# [REJECT] The current finalized_checkpoint is an ancestor of block -- i.e.
|
||||||
|
@ -378,12 +378,12 @@ proc isValidBeaconBlock*(
|
||||||
proposer = getProposer(dag, parent_ref, signed_beacon_block.message.slot)
|
proposer = getProposer(dag, parent_ref, signed_beacon_block.message.slot)
|
||||||
|
|
||||||
if proposer.isNone:
|
if proposer.isNone:
|
||||||
notice "cannot compute proposer for message"
|
warn "cannot compute proposer for message"
|
||||||
return err((EVRESULT_IGNORE, Invalid)) # basically an internal issue
|
return err((EVRESULT_IGNORE, Invalid)) # basically an internal issue
|
||||||
|
|
||||||
if proposer.get()[0] !=
|
if proposer.get()[0] !=
|
||||||
ValidatorIndex(signed_beacon_block.message.proposer_index):
|
ValidatorIndex(signed_beacon_block.message.proposer_index):
|
||||||
debug "block had unexpected proposer",
|
notice "block had unexpected proposer",
|
||||||
expected_proposer = proposer.get()[0]
|
expected_proposer = proposer.get()[0]
|
||||||
return err((EVRESULT_REJECT, Invalid))
|
return err((EVRESULT_REJECT, Invalid))
|
||||||
|
|
||||||
|
|
|
@ -139,7 +139,7 @@ proc sendDeposits*(deposits: seq[LaunchPadDeposit],
|
||||||
web3Url, privateKey: string,
|
web3Url, privateKey: string,
|
||||||
depositContractAddress: Eth1Address,
|
depositContractAddress: Eth1Address,
|
||||||
delayGenerator: DelayGenerator = nil) {.async.} =
|
delayGenerator: DelayGenerator = nil) {.async.} =
|
||||||
info "Sending deposits",
|
notice "Sending deposits",
|
||||||
web3 = web3Url,
|
web3 = web3Url,
|
||||||
depositContract = depositContractAddress
|
depositContract = depositContractAddress
|
||||||
|
|
||||||
|
@ -197,7 +197,7 @@ proc main() {.async.} =
|
||||||
mapIt(deposits.value, LaunchPadDeposit.init(runtimePreset, it))
|
mapIt(deposits.value, LaunchPadDeposit.init(runtimePreset, it))
|
||||||
|
|
||||||
Json.saveFile(string cfg.outDepositsFile, launchPadDeposits)
|
Json.saveFile(string cfg.outDepositsFile, launchPadDeposits)
|
||||||
info "Deposit data written", filename = cfg.outDepositsFile
|
notice "Deposit data written", filename = cfg.outDepositsFile
|
||||||
quit 0
|
quit 0
|
||||||
|
|
||||||
var deposits: seq[LaunchPadDeposit]
|
var deposits: seq[LaunchPadDeposit]
|
||||||
|
|
|
@ -822,7 +822,7 @@ proc runDiscoveryLoop*(node: Eth2Node) {.async.} =
|
||||||
debug "Failed to decode discovery's node address",
|
debug "Failed to decode discovery's node address",
|
||||||
node = $discnode, errMsg = res.error
|
node = $discnode, errMsg = res.error
|
||||||
|
|
||||||
debug "Discovery tick", wanted_peers = node.wantedPeers,
|
trace "Discovery tick", wanted_peers = node.wantedPeers,
|
||||||
space = node.peerPool.shortLogSpace(),
|
space = node.peerPool.shortLogSpace(),
|
||||||
acquired = node.peerPool.shortLogAcquired(),
|
acquired = node.peerPool.shortLogAcquired(),
|
||||||
available = node.peerPool.shortLogAvailable(),
|
available = node.peerPool.shortLogAvailable(),
|
||||||
|
@ -832,9 +832,10 @@ proc runDiscoveryLoop*(node: Eth2Node) {.async.} =
|
||||||
new_peers = newPeers
|
new_peers = newPeers
|
||||||
|
|
||||||
if newPeers == 0:
|
if newPeers == 0:
|
||||||
warn "Could not discover any new nodes in network, waiting",
|
if node.peerPool.lenSpace() <= node.wantedPeers shr 2:
|
||||||
discovered = len(discoveredNodes), new_peers = newPeers,
|
warn "Less than 25% wanted peers and could not discover new nodes",
|
||||||
wanted_peers = node.wantedPeers
|
discovered = len(discoveredNodes), new_peers = newPeers,
|
||||||
|
wanted_peers = node.wantedPeers
|
||||||
await sleepAsync(5.seconds)
|
await sleepAsync(5.seconds)
|
||||||
else:
|
else:
|
||||||
await sleepAsync(1.seconds)
|
await sleepAsync(1.seconds)
|
||||||
|
@ -980,7 +981,7 @@ proc start*(node: Eth2Node) {.async.} =
|
||||||
node.discovery.start()
|
node.discovery.start()
|
||||||
traceAsyncErrors node.runDiscoveryLoop()
|
traceAsyncErrors node.runDiscoveryLoop()
|
||||||
else:
|
else:
|
||||||
debug "Discovery disabled, trying bootstrap nodes",
|
notice "Discovery disabled; trying bootstrap nodes",
|
||||||
nodes = node.discovery.bootstrapRecords.len
|
nodes = node.discovery.bootstrapRecords.len
|
||||||
for enr in node.discovery.bootstrapRecords:
|
for enr in node.discovery.bootstrapRecords:
|
||||||
let tr = enr.toTypedRecord()
|
let tr = enr.toTypedRecord()
|
||||||
|
@ -1232,7 +1233,7 @@ proc createEth2Node*(
|
||||||
announcedAddresses = if extIp.isNone(): @[]
|
announcedAddresses = if extIp.isNone(): @[]
|
||||||
else: @[tcpEndPoint(extIp.get(), extTcpPort)]
|
else: @[tcpEndPoint(extIp.get(), extTcpPort)]
|
||||||
|
|
||||||
info "Initializing networking", hostAddress,
|
notice "Initializing networking", hostAddress,
|
||||||
announcedAddresses
|
announcedAddresses
|
||||||
|
|
||||||
let keys = getPersistentNetKeys(rng[], conf)
|
let keys = getPersistentNetKeys(rng[], conf)
|
||||||
|
|
|
@ -301,7 +301,7 @@ proc attestationValidator*(
|
||||||
|
|
||||||
# Potential under/overflows are fine; would just create odd metrics and logs
|
# Potential under/overflows are fine; would just create odd metrics and logs
|
||||||
let delay = wallTime - attestation.data.slot.toBeaconTime
|
let delay = wallTime - attestation.data.slot.toBeaconTime
|
||||||
debug "Attestation received", delay
|
trace "Attestation received", delay
|
||||||
let v = self.attestationPool[].validateAttestation(
|
let v = self.attestationPool[].validateAttestation(
|
||||||
attestation, wallTime, committeeIndex)
|
attestation, wallTime, committeeIndex)
|
||||||
if v.isErr():
|
if v.isErr():
|
||||||
|
|
|
@ -343,7 +343,7 @@ proc find_head*(
|
||||||
? self.proto_array.find_head(new_head, justified_root)
|
? self.proto_array.find_head(new_head, justified_root)
|
||||||
|
|
||||||
{.noSideEffect.}:
|
{.noSideEffect.}:
|
||||||
debug "Fork choice requested",
|
trace "Fork choice requested",
|
||||||
justified_epoch = justified_epoch,
|
justified_epoch = justified_epoch,
|
||||||
justified_root = shortLog(justified_root),
|
justified_root = shortLog(justified_root),
|
||||||
finalized_epoch = finalized_epoch,
|
finalized_epoch = finalized_epoch,
|
||||||
|
|
|
@ -157,7 +157,7 @@ proc generateDeposits*(preset: RuntimePreset,
|
||||||
secretsDir: string): Result[seq[DepositData], KeystoreGenerationError] =
|
secretsDir: string): Result[seq[DepositData], KeystoreGenerationError] =
|
||||||
var deposits: seq[DepositData]
|
var deposits: seq[DepositData]
|
||||||
|
|
||||||
info "Generating deposits", totalNewValidators, validatorsDir, secretsDir
|
notice "Generating deposits", totalNewValidators, validatorsDir, secretsDir
|
||||||
|
|
||||||
let withdrawalKeyPath = makeKeyPath(0, withdrawalKeyKind)
|
let withdrawalKeyPath = makeKeyPath(0, withdrawalKeyKind)
|
||||||
# TODO: Explain why we are using an empty password
|
# TODO: Explain why we are using an empty password
|
||||||
|
@ -256,7 +256,7 @@ proc importKeystoresFromDir*(rng: var BrHmacDrbgContext,
|
||||||
let keystore = try:
|
let keystore = try:
|
||||||
Json.loadFile(file, Keystore)
|
Json.loadFile(file, Keystore)
|
||||||
except SerializationError as e:
|
except SerializationError as e:
|
||||||
trace "Invalid keystore", err = e.formatMsg(file)
|
warn "Invalid keystore", err = e.formatMsg(file)
|
||||||
continue
|
continue
|
||||||
except IOError as e:
|
except IOError as e:
|
||||||
warn "Failed to read keystore file", file, err = e.msg
|
warn "Failed to read keystore file", file, err = e.msg
|
||||||
|
@ -292,7 +292,7 @@ proc importKeystoresFromDir*(rng: var BrHmacDrbgContext,
|
||||||
privKey.value, pubKey,
|
privKey.value, pubKey,
|
||||||
keystore.path)
|
keystore.path)
|
||||||
if status.isOk:
|
if status.isOk:
|
||||||
info "Keystore imported", file
|
notice "Keystore imported", file
|
||||||
else:
|
else:
|
||||||
error "Failed to import keystore", file, err = status.error
|
error "Failed to import keystore", file, err = status.error
|
||||||
else:
|
else:
|
||||||
|
@ -398,7 +398,7 @@ proc pickPasswordAndSaveWallet(rng: var BrHmacDrbgContext,
|
||||||
if status.isErr:
|
if status.isErr:
|
||||||
return err("failure to create wallet file due to " & status.error)
|
return err("failure to create wallet file due to " & status.error)
|
||||||
|
|
||||||
info "Wallet file written", path = outWalletFile
|
notice "Wallet file written", path = outWalletFile
|
||||||
return ok WalletPathPair(wallet: wallet, path: outWalletFile)
|
return ok WalletPathPair(wallet: wallet, path: outWalletFile)
|
||||||
finally:
|
finally:
|
||||||
burnMem(password)
|
burnMem(password)
|
||||||
|
|
|
@ -319,7 +319,7 @@ proc getBlockNumber(p: DataProviderRef, hash: BlockHash): Future[Eth1BlockNumber
|
||||||
return 0
|
return 0
|
||||||
return Eth1BlockNumber(blk.number)
|
return Eth1BlockNumber(blk.number)
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
notice "Failed to get Eth1 block number from hash",
|
debug "Failed to get Eth1 block number from hash",
|
||||||
hash = $hash, err = exc.msg
|
hash = $hash, err = exc.msg
|
||||||
raise exc
|
raise exc
|
||||||
|
|
||||||
|
@ -374,7 +374,7 @@ proc readJsonDeposits(depositsList: JsonNode): seq[Eth1Block] =
|
||||||
method fetchDepositData*(p: Web3DataProviderRef,
|
method fetchDepositData*(p: Web3DataProviderRef,
|
||||||
fromBlock, toBlock: Eth1BlockNumber): Future[seq[Eth1Block]]
|
fromBlock, toBlock: Eth1BlockNumber): Future[seq[Eth1Block]]
|
||||||
{.async, locks: 0.} =
|
{.async, locks: 0.} =
|
||||||
info "Obtaining deposit log events", fromBlock, toBlock
|
debug "Obtaining deposit log events", fromBlock, toBlock
|
||||||
return readJsonDeposits(await p.ns.getJsonLogs(DepositEvent,
|
return readJsonDeposits(await p.ns.getJsonLogs(DepositEvent,
|
||||||
fromBlock = some blockId(fromBlock),
|
fromBlock = some blockId(fromBlock),
|
||||||
toBlock = some blockId(toBlock)))
|
toBlock = some blockId(toBlock)))
|
||||||
|
@ -413,7 +413,7 @@ method onBlockHeaders*(p: Web3DataProviderRef,
|
||||||
if p.blockHeadersSubscription != nil:
|
if p.blockHeadersSubscription != nil:
|
||||||
await p.blockHeadersSubscription.unsubscribe()
|
await p.blockHeadersSubscription.unsubscribe()
|
||||||
|
|
||||||
info "Waiting for new Eth1 block headers"
|
debug "Waiting for new Eth1 block headers"
|
||||||
|
|
||||||
p.blockHeadersSubscription = await p.web3.subscribeForBlockHeaders(
|
p.blockHeadersSubscription = await p.web3.subscribeForBlockHeaders(
|
||||||
blockHeaderHandler, errorHandler)
|
blockHeaderHandler, errorHandler)
|
||||||
|
@ -554,7 +554,7 @@ proc findGenesisBlockInRange(m: MainchainMonitor,
|
||||||
let candidateGenesisTime = genesis_time_from_eth1_timestamp(
|
let candidateGenesisTime = genesis_time_from_eth1_timestamp(
|
||||||
m.preset, candidateBlock.timestamp.uint64)
|
m.preset, candidateBlock.timestamp.uint64)
|
||||||
|
|
||||||
info "Probing possible genesis block",
|
notice "Probing possible genesis block",
|
||||||
`block` = candidateBlock.number.uint64,
|
`block` = candidateBlock.number.uint64,
|
||||||
candidateGenesisTime
|
candidateGenesisTime
|
||||||
|
|
||||||
|
@ -596,7 +596,7 @@ proc checkForGenesisLoop(m: MainchainMonitor) {.async.} =
|
||||||
genesisCandidateIdx = genesisCandidateIdx.get
|
genesisCandidateIdx = genesisCandidateIdx.get
|
||||||
genesisCandidate = m.eth1Chain.blocks[genesisCandidateIdx]
|
genesisCandidate = m.eth1Chain.blocks[genesisCandidateIdx]
|
||||||
|
|
||||||
info "Generating state for candidate block for genesis",
|
notice "Generating state for candidate block for genesis",
|
||||||
blockNum = genesisCandidate.number,
|
blockNum = genesisCandidate.number,
|
||||||
blockHash = genesisCandidate.voteData.block_hash,
|
blockHash = genesisCandidate.voteData.block_hash,
|
||||||
potentialDeposits = genesisCandidate.voteData.deposit_count
|
potentialDeposits = genesisCandidate.voteData.deposit_count
|
||||||
|
@ -635,7 +635,7 @@ proc checkForGenesisLoop(m: MainchainMonitor) {.async.} =
|
||||||
m.signalGenesis candidateState
|
m.signalGenesis candidateState
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
info "Eth2 genesis candidate block rejected",
|
notice "Eth2 genesis candidate block rejected",
|
||||||
`block` = shortLog(genesisCandidate),
|
`block` = shortLog(genesisCandidate),
|
||||||
validDeposits = genesisCandidate.knownGoodDepositsCount.get,
|
validDeposits = genesisCandidate.knownGoodDepositsCount.get,
|
||||||
needed = m.preset.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT
|
needed = m.preset.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT
|
||||||
|
@ -734,7 +734,7 @@ proc processDeposits(m: MainchainMonitor,
|
||||||
voteData: latestEth1Data)
|
voteData: latestEth1Data)
|
||||||
else:
|
else:
|
||||||
template logBlockProcessed(blk) =
|
template logBlockProcessed(blk) =
|
||||||
info "Eth1 block processed",
|
debug "Eth1 block processed",
|
||||||
`block` = shortLog(blk), totalDeposits = blk.voteData.deposit_count
|
`block` = shortLog(blk), totalDeposits = blk.voteData.deposit_count
|
||||||
|
|
||||||
await dataProvider.fetchBlockDetails(eth1Blocks[0])
|
await dataProvider.fetchBlockDetails(eth1Blocks[0])
|
||||||
|
@ -828,4 +828,3 @@ proc getEth1BlockHash*(url: string, blockId: RtBlockIdentifier): Future[BlockHas
|
||||||
await web3.close()
|
await web3.close()
|
||||||
|
|
||||||
{.pop.}
|
{.pop.}
|
||||||
|
|
||||||
|
|
|
@ -172,7 +172,7 @@ proc slash_validator*(state: var BeaconState, slashed_index: ValidatorIndex,
|
||||||
initiate_validator_exit(state, slashed_index, cache)
|
initiate_validator_exit(state, slashed_index, cache)
|
||||||
let validator = addr state.validators[slashed_index]
|
let validator = addr state.validators[slashed_index]
|
||||||
|
|
||||||
debug "slash_validator: ejecting validator via slashing (validator_leaving)",
|
notice "slash_validator: ejecting validator via slashing (validator_leaving)",
|
||||||
index = slashed_index,
|
index = slashed_index,
|
||||||
num_validators = state.validators.len,
|
num_validators = state.validators.len,
|
||||||
current_epoch = get_current_epoch(state),
|
current_epoch = get_current_epoch(state),
|
||||||
|
@ -388,7 +388,7 @@ proc process_registry_updates*(state: var BeaconState,
|
||||||
|
|
||||||
if is_active_validator(validator, get_current_epoch(state)) and
|
if is_active_validator(validator, get_current_epoch(state)) and
|
||||||
validator.effective_balance <= EJECTION_BALANCE:
|
validator.effective_balance <= EJECTION_BALANCE:
|
||||||
debug "Registry updating: ejecting validator due to low balance (validator_leaving)",
|
notice "Registry updating: ejecting validator due to low balance (validator_leaving)",
|
||||||
index = index,
|
index = index,
|
||||||
num_validators = state.validators.len,
|
num_validators = state.validators.len,
|
||||||
current_epoch = get_current_epoch(state),
|
current_epoch = get_current_epoch(state),
|
||||||
|
|
|
@ -126,8 +126,8 @@ proc get_attestation_subnet_changes*(
|
||||||
state: BeaconState, attachedValidators: openarray[ValidatorIndex],
|
state: BeaconState, attachedValidators: openarray[ValidatorIndex],
|
||||||
prevAttestationSubnets: AttestationSubnets, epoch: Epoch):
|
prevAttestationSubnets: AttestationSubnets, epoch: Epoch):
|
||||||
tuple[a: AttestationSubnets, b: set[uint8], c: set[uint8]] =
|
tuple[a: AttestationSubnets, b: set[uint8], c: set[uint8]] =
|
||||||
static:
|
static: doAssert ATTESTATION_SUBNET_COUNT == 64 # Fits in a set[uint8]
|
||||||
doAssert ATTESTATION_SUBNET_COUNT == 64
|
doAssert attachedValidators.len > 0
|
||||||
|
|
||||||
var attestationSubnets = prevAttestationSubnets
|
var attestationSubnets = prevAttestationSubnets
|
||||||
|
|
||||||
|
@ -144,8 +144,7 @@ proc get_attestation_subnet_changes*(
|
||||||
state, state.slot.epoch + 1, attachedValidators.toHashSet):
|
state, state.slot.epoch + 1, attachedValidators.toHashSet):
|
||||||
nextEpochSubnets.incl it.subnetIndex.uint8
|
nextEpochSubnets.incl it.subnetIndex.uint8
|
||||||
|
|
||||||
doAssert nextEpochSubnets.len > 0 and
|
doAssert nextEpochSubnets.len <= attachedValidators.len
|
||||||
nextEpochSubnets.len <= attachedValidators.len
|
|
||||||
|
|
||||||
let
|
let
|
||||||
epochParity = epoch mod 2
|
epochParity = epoch mod 2
|
||||||
|
|
|
@ -59,4 +59,5 @@ type
|
||||||
TARGET_COMMITTEE_SIZE
|
TARGET_COMMITTEE_SIZE
|
||||||
VALIDATOR_REGISTRY_LIMIT
|
VALIDATOR_REGISTRY_LIMIT
|
||||||
WHISTLEBLOWER_REWARD_QUOTIENT
|
WHISTLEBLOWER_REWARD_QUOTIENT
|
||||||
|
CONFIG_NAME
|
||||||
|
|
||||||
|
|
|
@ -53,6 +53,7 @@ const
|
||||||
DOMAIN_VOLUNTARY_EXIT,
|
DOMAIN_VOLUNTARY_EXIT,
|
||||||
DOMAIN_SELECTION_PROOF,
|
DOMAIN_SELECTION_PROOF,
|
||||||
DOMAIN_AGGREGATE_AND_PROOF,
|
DOMAIN_AGGREGATE_AND_PROOF,
|
||||||
|
CONFIG_NAME
|
||||||
}
|
}
|
||||||
|
|
||||||
presetValueTypes* = {
|
presetValueTypes* = {
|
||||||
|
|
|
@ -102,7 +102,7 @@ proc process_randao(
|
||||||
if not verify_epoch_signature(
|
if not verify_epoch_signature(
|
||||||
state.fork, state.genesis_validators_root, epoch, proposer_pubkey,
|
state.fork, state.genesis_validators_root, epoch, proposer_pubkey,
|
||||||
body.randao_reveal):
|
body.randao_reveal):
|
||||||
notice "Randao mismatch", proposer_pubkey = shortLog(proposer_pubkey),
|
debug "Randao mismatch", proposer_pubkey = shortLog(proposer_pubkey),
|
||||||
epoch,
|
epoch,
|
||||||
signature = shortLog(body.randao_reveal),
|
signature = shortLog(body.randao_reveal),
|
||||||
slot = state.slot
|
slot = state.slot
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
|
|
||||||
# This module contains the parts necessary to create a merkle hash from the core
|
# This module contains the parts necessary to create a merkle hash from the core
|
||||||
# SSZ types outlined in the spec:
|
# SSZ types outlined in the spec:
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.2/ssz/simple-serialize.md#merkleization
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.3/ssz/simple-serialize.md#merkleization
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
|
|
|
@ -3,8 +3,8 @@
|
||||||
# {.push raises: [Defect].}
|
# {.push raises: [Defect].}
|
||||||
{.pragma: raisesssz, raises: [Defect, MalformedSszError, SszSizeMismatchError].}
|
{.pragma: raisesssz, raises: [Defect, MalformedSszError, SszSizeMismatchError].}
|
||||||
|
|
||||||
## SSZ serialiazation for core SSZ types, as specified in:
|
## SSZ serialization for core SSZ types, as specified in:
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.2/ssz/simple-serialize.md#serialization
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.3/ssz/simple-serialize.md#serialization
|
||||||
|
|
||||||
import
|
import
|
||||||
typetraits, options,
|
typetraits, options,
|
||||||
|
|
|
@ -702,7 +702,7 @@ proc syncStep[A, B](man: SyncManager[A, B], index: int, peer: A) {.async.} =
|
||||||
if peerAge >= man.maxStatusAge:
|
if peerAge >= man.maxStatusAge:
|
||||||
# Peer's status information is very old, its time to update it
|
# Peer's status information is very old, its time to update it
|
||||||
man.workers[index].status = SyncWorkerStatus.UpdatingStatus
|
man.workers[index].status = SyncWorkerStatus.UpdatingStatus
|
||||||
debug "Updating peer's status information", wall_clock_slot = wallSlot,
|
trace "Updating peer's status information", wall_clock_slot = wallSlot,
|
||||||
remote_head_slot = peerSlot, local_head_slot = headSlot,
|
remote_head_slot = peerSlot, local_head_slot = headSlot,
|
||||||
peer = peer, peer_score = peer.getScore(), index = index,
|
peer = peer, peer_score = peer.getScore(), index = index,
|
||||||
peer_speed = peer.netKbps(), topics = "syncman"
|
peer_speed = peer.netKbps(), topics = "syncman"
|
||||||
|
@ -742,7 +742,7 @@ proc syncStep[A, B](man: SyncManager[A, B], index: int, peer: A) {.async.} =
|
||||||
peerSlot = newPeerSlot
|
peerSlot = newPeerSlot
|
||||||
|
|
||||||
if headAge <= man.maxHeadAge:
|
if headAge <= man.maxHeadAge:
|
||||||
debug "We are in sync with network, exiting", wall_clock_slot = wallSlot,
|
info "We are in sync with network", wall_clock_slot = wallSlot,
|
||||||
remote_head_slot = peerSlot, local_head_slot = headSlot,
|
remote_head_slot = peerSlot, local_head_slot = headSlot,
|
||||||
peer = peer, peer_score = peer.getScore(), index = index,
|
peer = peer, peer_score = peer.getScore(), index = index,
|
||||||
peer_speed = peer.netKbps(), topics = "syncman"
|
peer_speed = peer.netKbps(), topics = "syncman"
|
||||||
|
@ -752,7 +752,7 @@ proc syncStep[A, B](man: SyncManager[A, B], index: int, peer: A) {.async.} =
|
||||||
return
|
return
|
||||||
|
|
||||||
if headSlot >= peerSlot - man.maxHeadAge:
|
if headSlot >= peerSlot - man.maxHeadAge:
|
||||||
debug "We are in sync with peer, refreshing peer's status information",
|
debug "We are in sync with peer; refreshing peer's status information",
|
||||||
wall_clock_slot = wallSlot, remote_head_slot = peerSlot,
|
wall_clock_slot = wallSlot, remote_head_slot = peerSlot,
|
||||||
local_head_slot = headSlot, peer = peer, peer_score = peer.getScore(),
|
local_head_slot = headSlot, peer = peer, peer_score = peer.getScore(),
|
||||||
index = index, peer_speed = peer.netKbps(), topics = "syncman"
|
index = index, peer_speed = peer.netKbps(), topics = "syncman"
|
||||||
|
@ -942,7 +942,7 @@ proc syncLoop[A, B](man: SyncManager[A, B]) {.async.} =
|
||||||
man.syncSpeed = 0.0
|
man.syncSpeed = 0.0
|
||||||
else:
|
else:
|
||||||
if (lsm2.slot - lsm1.slot == 0'u64) and (pending > 1):
|
if (lsm2.slot - lsm1.slot == 0'u64) and (pending > 1):
|
||||||
debug "Syncing process is not progressing, reset the queue",
|
info "Syncing process is not progressing, reset the queue",
|
||||||
pending_workers_count = pending,
|
pending_workers_count = pending,
|
||||||
to_slot = man.queue.outSlot,
|
to_slot = man.queue.outSlot,
|
||||||
local_head_slot = lsm1.slot, topics = "syncman"
|
local_head_slot = lsm1.slot, topics = "syncman"
|
||||||
|
@ -950,7 +950,7 @@ proc syncLoop[A, B](man: SyncManager[A, B]) {.async.} =
|
||||||
else:
|
else:
|
||||||
man.syncSpeed = speed(lsm1, lsm2)
|
man.syncSpeed = speed(lsm1, lsm2)
|
||||||
|
|
||||||
debug "Synchronization loop tick", wall_head_slot = wallSlot,
|
trace "Synchronization loop tick", wall_head_slot = wallSlot,
|
||||||
local_head_slot = headSlot, queue_start_slot = man.queue.startSlot,
|
local_head_slot = headSlot, queue_start_slot = man.queue.startSlot,
|
||||||
queue_last_slot = man.queue.lastSlot,
|
queue_last_slot = man.queue.lastSlot,
|
||||||
sync_speed = man.syncSpeed, pending_workers_count = pending,
|
sync_speed = man.syncSpeed, pending_workers_count = pending,
|
||||||
|
@ -964,7 +964,7 @@ proc syncLoop[A, B](man: SyncManager[A, B]) {.async.} =
|
||||||
|
|
||||||
let (map, sleeping, waiting, pending) = man.getWorkersStats()
|
let (map, sleeping, waiting, pending) = man.getWorkersStats()
|
||||||
|
|
||||||
debug "Current syncing state", workers_map = map,
|
trace "Current syncing state", workers_map = map,
|
||||||
sleeping_workers_count = sleeping,
|
sleeping_workers_count = sleeping,
|
||||||
waiting_workers_count = waiting,
|
waiting_workers_count = waiting,
|
||||||
pending_workers_count = pending,
|
pending_workers_count = pending,
|
||||||
|
|
|
@ -110,7 +110,7 @@ p2pProtocol BeaconSync(version = 1,
|
||||||
await peer.handleStatus(peer.networkState,
|
await peer.handleStatus(peer.networkState,
|
||||||
ourStatus, theirStatus.get())
|
ourStatus, theirStatus.get())
|
||||||
else:
|
else:
|
||||||
warn "Status response not received in time",
|
debug "Status response not received in time",
|
||||||
peer, error = theirStatus.error
|
peer, error = theirStatus.error
|
||||||
|
|
||||||
proc status(peer: Peer,
|
proc status(peer: Peer,
|
||||||
|
@ -189,7 +189,7 @@ p2pProtocol BeaconSync(version = 1,
|
||||||
debug "Received Goodbye message", reason = disconnectReasonName(reason), peer
|
debug "Received Goodbye message", reason = disconnectReasonName(reason), peer
|
||||||
|
|
||||||
proc setStatusMsg(peer: Peer, statusMsg: StatusMsg) =
|
proc setStatusMsg(peer: Peer, statusMsg: StatusMsg) =
|
||||||
debug "Peer status", peer, statusMsg
|
trace "Peer status", peer, statusMsg
|
||||||
peer.state(BeaconSync).statusMsg = statusMsg
|
peer.state(BeaconSync).statusMsg = statusMsg
|
||||||
peer.state(BeaconSync).statusLastTime = Moment.now()
|
peer.state(BeaconSync).statusLastTime = Moment.now()
|
||||||
|
|
||||||
|
@ -220,7 +220,7 @@ proc handleStatus(peer: Peer,
|
||||||
ourStatus: StatusMsg,
|
ourStatus: StatusMsg,
|
||||||
theirStatus: StatusMsg) {.async, gcsafe.} =
|
theirStatus: StatusMsg) {.async, gcsafe.} =
|
||||||
if theirStatus.forkDigest != state.forkDigest:
|
if theirStatus.forkDigest != state.forkDigest:
|
||||||
notice "Irrelevant peer", peer, theirStatus, ourStatus
|
debug "Irrelevant peer", peer, theirStatus, ourStatus
|
||||||
await peer.disconnect(IrrelevantNetwork)
|
await peer.disconnect(IrrelevantNetwork)
|
||||||
else:
|
else:
|
||||||
peer.setStatusMsg(theirStatus)
|
peer.setStatusMsg(theirStatus)
|
||||||
|
|
|
@ -16,7 +16,7 @@ type
|
||||||
## which blocks are valid - in particular, blocks are not valid if they
|
## which blocks are valid - in particular, blocks are not valid if they
|
||||||
## come from the future as seen from the local clock.
|
## come from the future as seen from the local clock.
|
||||||
##
|
##
|
||||||
## https://github.com/ethereum/eth2.0-specs/blob/v0.12.2/specs/phase0/fork-choice.md#fork-choice
|
## https://github.com/ethereum/eth2.0-specs/blob/v0.12.3/specs/phase0/fork-choice.md#fork-choice
|
||||||
##
|
##
|
||||||
# TODO replace time in chronos with a proper unit type, then this code can
|
# TODO replace time in chronos with a proper unit type, then this code can
|
||||||
# follow:
|
# follow:
|
||||||
|
|
|
@ -348,7 +348,7 @@ proc installValidatorApiHandlers*(rpcServer: RpcServer, node: BeaconNode) =
|
||||||
payload: SignedAggregateAndProof) -> bool:
|
payload: SignedAggregateAndProof) -> bool:
|
||||||
debug "post_v1_validator_aggregate_and_proofs"
|
debug "post_v1_validator_aggregate_and_proofs"
|
||||||
node.network.broadcast(node.topicAggregateAndProofs, payload)
|
node.network.broadcast(node.topicAggregateAndProofs, payload)
|
||||||
info "Aggregated attestation sent",
|
notice "Aggregated attestation sent",
|
||||||
attestation = shortLog(payload.message.aggregate)
|
attestation = shortLog(payload.message.aggregate)
|
||||||
|
|
||||||
rpcServer.rpc("get_v1_validator_duties_attester") do (
|
rpcServer.rpc("get_v1_validator_duties_attester") do (
|
||||||
|
|
|
@ -141,7 +141,7 @@ proc onSlotStart(vc: ValidatorClient, lastSlot, scheduledSlot: Slot) {.gcsafe, a
|
||||||
.checkSlashableBlockProposal(public_key, slot)
|
.checkSlashableBlockProposal(public_key, slot)
|
||||||
if notSlashable.isOk:
|
if notSlashable.isOk:
|
||||||
let validator = vc.attachedValidators.validators[public_key]
|
let validator = vc.attachedValidators.validators[public_key]
|
||||||
info "Proposing block", slot = slot, public_key = public_key
|
notice "Proposing block", slot = slot, public_key = public_key
|
||||||
let randao_reveal = await validator.genRandaoReveal(
|
let randao_reveal = await validator.genRandaoReveal(
|
||||||
vc.fork, vc.beaconGenesis.genesis_validators_root, slot)
|
vc.fork, vc.beaconGenesis.genesis_validators_root, slot)
|
||||||
var newBlock = SignedBeaconBlock(
|
var newBlock = SignedBeaconBlock(
|
||||||
|
@ -179,7 +179,7 @@ proc onSlotStart(vc: ValidatorClient, lastSlot, scheduledSlot: Slot) {.gcsafe, a
|
||||||
vc.attestationsForEpoch[epoch].contains slot:
|
vc.attestationsForEpoch[epoch].contains slot:
|
||||||
var validatorToAttestationDataRoot: Table[ValidatorPubKey, Eth2Digest]
|
var validatorToAttestationDataRoot: Table[ValidatorPubKey, Eth2Digest]
|
||||||
for a in vc.attestationsForEpoch[epoch][slot]:
|
for a in vc.attestationsForEpoch[epoch][slot]:
|
||||||
info "Attesting", slot = slot, public_key = a.public_key
|
notice "Attesting", slot = slot, public_key = a.public_key
|
||||||
|
|
||||||
let validator = vc.attachedValidators.validators[a.public_key]
|
let validator = vc.attachedValidators.validators[a.public_key]
|
||||||
let ad = await vc.client.get_v1_validator_attestation(slot, a.committee_index)
|
let ad = await vc.client.get_v1_validator_attestation(slot, a.committee_index)
|
||||||
|
@ -232,13 +232,13 @@ proc onSlotStart(vc: ValidatorClient, lastSlot, scheduledSlot: Slot) {.gcsafe, a
|
||||||
|
|
||||||
if is_aggregator(a.committee_length, slot_signature) and
|
if is_aggregator(a.committee_length, slot_signature) and
|
||||||
validatorToAttestationDataRoot.contains(a.public_key):
|
validatorToAttestationDataRoot.contains(a.public_key):
|
||||||
info "Aggregating", slot = slot, public_key = a.public_key
|
notice "Aggregating", slot = slot, public_key = a.public_key
|
||||||
|
|
||||||
let aa = await vc.client.get_v1_validator_aggregate_attestation(
|
let aa = await vc.client.get_v1_validator_aggregate_attestation(
|
||||||
slot, validatorToAttestationDataRoot[a.public_key])
|
slot, validatorToAttestationDataRoot[a.public_key])
|
||||||
let aap = AggregateAndProof(aggregator_index: a.validator_index.uint64,
|
let aap = AggregateAndProof(aggregator_index: a.validator_index.uint64,
|
||||||
aggregate: aa, selection_proof: slot_signature)
|
aggregate: aa, selection_proof: slot_signature)
|
||||||
let sig = await signAggregateAndProof(validator,
|
let sig = await signAggregateAndProof(validator,
|
||||||
aap, vc.fork, vc.beaconGenesis.genesis_validators_root)
|
aap, vc.fork, vc.beaconGenesis.genesis_validators_root)
|
||||||
var signedAP = SignedAggregateAndProof(message: aap, signature: sig)
|
var signedAP = SignedAggregateAndProof(message: aap, signature: sig)
|
||||||
discard await vc.client.post_v1_validator_aggregate_and_proofs(signedAP)
|
discard await vc.client.post_v1_validator_aggregate_and_proofs(signedAP)
|
||||||
|
|
|
@ -40,7 +40,7 @@ proc saveValidatorKey*(keyName, key: string, conf: BeaconNodeConf) =
|
||||||
let outputFile = validatorsDir / keyName
|
let outputFile = validatorsDir / keyName
|
||||||
createDir validatorsDir
|
createDir validatorsDir
|
||||||
writeFile(outputFile, key)
|
writeFile(outputFile, key)
|
||||||
info "Imported validator key", file = outputFile
|
notice "Imported validator key", file = outputFile
|
||||||
|
|
||||||
proc checkValidatorInRegistry(state: BeaconState,
|
proc checkValidatorInRegistry(state: BeaconState,
|
||||||
pubKey: ValidatorPubKey) =
|
pubKey: ValidatorPubKey) =
|
||||||
|
@ -60,7 +60,7 @@ proc addLocalValidator*(node: BeaconNode,
|
||||||
proc addLocalValidators*(node: BeaconNode) =
|
proc addLocalValidators*(node: BeaconNode) =
|
||||||
for validatorKey in node.config.validatorKeys:
|
for validatorKey in node.config.validatorKeys:
|
||||||
node.addLocalValidator node.chainDag.headState.data.data, validatorKey
|
node.addLocalValidator node.chainDag.headState.data.data, validatorKey
|
||||||
info "Local validators attached ", count = node.attachedValidators.count
|
notice "Local validators attached ", count = node.attachedValidators.count
|
||||||
|
|
||||||
proc addRemoteValidators*(node: BeaconNode) =
|
proc addRemoteValidators*(node: BeaconNode) =
|
||||||
# load all the validators from the child process - loop until `end`
|
# load all the validators from the child process - loop until `end`
|
||||||
|
@ -77,7 +77,7 @@ proc addRemoteValidators*(node: BeaconNode) =
|
||||||
outStream: node.vcProcess.outputStream,
|
outStream: node.vcProcess.outputStream,
|
||||||
pubKeyStr: $key))
|
pubKeyStr: $key))
|
||||||
node.attachedValidators.addRemoteValidator(key, v)
|
node.attachedValidators.addRemoteValidator(key, v)
|
||||||
info "Remote validators attached ", count = node.attachedValidators.count
|
notice "Remote validators attached ", count = node.attachedValidators.count
|
||||||
|
|
||||||
proc getAttachedValidator*(node: BeaconNode,
|
proc getAttachedValidator*(node: BeaconNode,
|
||||||
pubkey: ValidatorPubKey): AttachedValidator =
|
pubkey: ValidatorPubKey): AttachedValidator =
|
||||||
|
@ -340,7 +340,7 @@ proc handleAttestations(node: BeaconNode, head: BlockRef, slot: Slot) =
|
||||||
# TODO the oldest attestations allowed are those that are older than the
|
# TODO the oldest attestations allowed are those that are older than the
|
||||||
# finalized epoch.. also, it seems that posting very old attestations
|
# finalized epoch.. also, it seems that posting very old attestations
|
||||||
# is risky from a slashing perspective. More work is needed here.
|
# is risky from a slashing perspective. More work is needed here.
|
||||||
notice "Skipping attestation, head is too recent",
|
warn "Skipping attestation, head is too recent",
|
||||||
headSlot = shortLog(head.slot),
|
headSlot = shortLog(head.slot),
|
||||||
slot = shortLog(slot)
|
slot = shortLog(slot)
|
||||||
return
|
return
|
||||||
|
@ -485,7 +485,7 @@ proc broadcastAggregatedAttestations(
|
||||||
message: aggregateAndProof.get,
|
message: aggregateAndProof.get,
|
||||||
signature: sig)
|
signature: sig)
|
||||||
node.network.broadcast(node.topicAggregateAndProofs, signedAP)
|
node.network.broadcast(node.topicAggregateAndProofs, signedAP)
|
||||||
info "Aggregated attestation sent",
|
notice "Aggregated attestation sent",
|
||||||
attestation = shortLog(signedAP.message.aggregate),
|
attestation = shortLog(signedAP.message.aggregate),
|
||||||
validator = shortLog(curr[0].v)
|
validator = shortLog(curr[0].v)
|
||||||
|
|
||||||
|
@ -514,7 +514,7 @@ proc handleValidatorDuties*(
|
||||||
# TODO maybe even collect all work synchronously to avoid unnecessary
|
# TODO maybe even collect all work synchronously to avoid unnecessary
|
||||||
# state rewinds while waiting for async operations like validator
|
# state rewinds while waiting for async operations like validator
|
||||||
# signature..
|
# signature..
|
||||||
notice "Catching up",
|
notice "Catching up on validator duties",
|
||||||
curSlot = shortLog(curSlot),
|
curSlot = shortLog(curSlot),
|
||||||
lastSlot = shortLog(lastSlot),
|
lastSlot = shortLog(lastSlot),
|
||||||
slot = shortLog(slot)
|
slot = shortLog(slot)
|
||||||
|
|
|
@ -26,13 +26,13 @@ proc addLocalValidator*(pool: var ValidatorPool,
|
||||||
kind: inProcess,
|
kind: inProcess,
|
||||||
privKey: privKey)
|
privKey: privKey)
|
||||||
pool.validators[pubKey] = v
|
pool.validators[pubKey] = v
|
||||||
info "Local validator attached", pubKey, validator = shortLog(v)
|
notice "Local validator attached", pubKey, validator = shortLog(v)
|
||||||
|
|
||||||
proc addRemoteValidator*(pool: var ValidatorPool,
|
proc addRemoteValidator*(pool: var ValidatorPool,
|
||||||
pubKey: ValidatorPubKey,
|
pubKey: ValidatorPubKey,
|
||||||
v: AttachedValidator) =
|
v: AttachedValidator) =
|
||||||
pool.validators[pubKey] = v
|
pool.validators[pubKey] = v
|
||||||
info "Remote validator attached", pubKey, validator = shortLog(v)
|
notice "Remote validator attached", pubKey, validator = shortLog(v)
|
||||||
|
|
||||||
proc getValidator*(pool: ValidatorPool,
|
proc getValidator*(pool: ValidatorPool,
|
||||||
validatorKey: ValidatorPubKey): AttachedValidator =
|
validatorKey: ValidatorPubKey): AttachedValidator =
|
||||||
|
|
|
@ -1,9 +1,5 @@
|
||||||
# Emergency fixes
|
# Emergency fixes
|
||||||
# ---------------------------------------------------
|
# ---------------------------------------------------
|
||||||
if defined(arm64):
|
|
||||||
# https://twitter.com/EthereumOnARM/status/1309477357938499585?s=20
|
|
||||||
# https://github.com/supranational/blst/issues/31
|
|
||||||
switch("define", "BLS_FORCE_BACKEND=miracl")
|
|
||||||
|
|
||||||
# ---------------------------------------------------
|
# ---------------------------------------------------
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,46 @@
|
||||||
|
# Logging strategy
|
||||||
|
|
||||||
|
This document describes the overall logging strategy of NBC.
|
||||||
|
This is a suggested guideline, rare events can have a higher logging level
|
||||||
|
than suggested in the guideline for example at beacon node start or stop.
|
||||||
|
|
||||||
|
The main objectives are:
|
||||||
|
- INFO log level or higher should be suitable for long-term use, i.e. running for months or weeks. Logs are the users' main interface their beacon node and validators. In particular it should not be a denial-of-service vector, either by leading to high CPU usage of the console or filling disk space at an unsustainable rate.
|
||||||
|
- INFO logs or higher should be target at users, logs only relevant to devs should be relegated to DEBUG or TRACE or commented out.
|
||||||
|
- DEBUG log level should still be readable by visual inspection during a slot time (6 seconds).
|
||||||
|
|
||||||
|
Here is the suggestion of content per log level
|
||||||
|
|
||||||
|
- Fatal: Node will crash
|
||||||
|
- Error: Bugs or critical unexpected behaviors
|
||||||
|
- node cannot proceed with task
|
||||||
|
- node consistency is compromised
|
||||||
|
- Warning: Errors that can be expected or handled
|
||||||
|
- networking issue,
|
||||||
|
- node cannot proceed with task but can recover or work in degraded mode (invalid bootstrap address, out of Infura requests)
|
||||||
|
- Notice: Important user and validator info and one-time events
|
||||||
|
- node start/quit,
|
||||||
|
- log about validator funds,
|
||||||
|
- own PoS attestations,
|
||||||
|
- own PoS blocks,
|
||||||
|
- chain reached finality,
|
||||||
|
- validators have been slashed (i.e. might indicate malicious activity or network/chain split)
|
||||||
|
- Info: standard user target
|
||||||
|
- Networking or consensus information
|
||||||
|
- Debug: dev and debugging users
|
||||||
|
- Common networking activity (new peers, kick peers, timeouts),
|
||||||
|
- consensus/proof-of-stake various processing,
|
||||||
|
- Trace: dev only
|
||||||
|
- Keep-alive,
|
||||||
|
- routine tasks schedules
|
||||||
|
- "spammy" tasks that clutter debugging (attestations received, status/control messages)
|
||||||
|
|
||||||
|
Logs done at high frequency should be summarized even at trace level to avoid drowning other subsystems.
|
||||||
|
For example they can use an uint8 counter with
|
||||||
|
```
|
||||||
|
proc myHighFreqProc() =
|
||||||
|
var counter {.threadvar.}: uint8
|
||||||
|
if counter == 255:
|
||||||
|
trace "Total of 255 myHighFreqProc call"
|
||||||
|
counter += 1
|
||||||
|
```
|
|
@ -5,7 +5,7 @@ The Nimbus beacon chain is a research implementation of the Beacon Chain – the
|
||||||
|
|
||||||
- Open sourced at [github.com/status-im/nim-beacon-chain](https://github.com/status-im/nim-beacon-chain/tree/master)
|
- Open sourced at [github.com/status-im/nim-beacon-chain](https://github.com/status-im/nim-beacon-chain/tree/master)
|
||||||
|
|
||||||
- Specification described at [ethereum/eth2.0-specs](https://github.com/ethereum/eth2.0-specs/tree/v0.12.2#phase-0)
|
- Specification described at [ethereum/eth2.0-specs](https://github.com/ethereum/eth2.0-specs/tree/v0.12.3#phase-0)
|
||||||
|
|
||||||
|
|
||||||
### Helpful resources
|
### Helpful resources
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -218,7 +218,8 @@ proc copyPrunedDatabase(
|
||||||
let sr = db.getStateRoot(signedBlock.root, slot)
|
let sr = db.getStateRoot(signedBlock.root, slot)
|
||||||
if sr.isErr:
|
if sr.isErr:
|
||||||
if stateRequired:
|
if stateRequired:
|
||||||
doAssert false, "state root and state required"
|
echo "skipping state root required for slot ",
|
||||||
|
slot, " with root ", signedBlock.root
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if not db.getState(sr.get, beaconState[], noRollback):
|
if not db.getState(sr.get, beaconState[], noRollback):
|
||||||
|
|
|
@ -4,7 +4,7 @@ import testutils/fuzzing_engines
|
||||||
|
|
||||||
const
|
const
|
||||||
gitRoot = thisDir() / ".."
|
gitRoot = thisDir() / ".."
|
||||||
fixturesDir = gitRoot / "vendor" / "nim-eth2-scenarios" / "tests-v0.12.2" / "mainnet" / "phase0" / "ssz_static"
|
fixturesDir = gitRoot / "vendor" / "nim-eth2-scenarios" / "tests-v0.12.3" / "mainnet" / "phase0" / "ssz_static"
|
||||||
|
|
||||||
fuzzingTestsDir = gitRoot / "tests" / "fuzzing"
|
fuzzingTestsDir = gitRoot / "tests" / "fuzzing"
|
||||||
fuzzingCorpusesDir = fuzzingTestsDir / "corpus"
|
fuzzingCorpusesDir = fuzzingTestsDir / "corpus"
|
||||||
|
|
|
@ -38,7 +38,7 @@ proc runTest(identifier: string) =
|
||||||
else:
|
else:
|
||||||
prefix = "[Invalid] "
|
prefix = "[Invalid] "
|
||||||
|
|
||||||
timedTest prefix & astToStr(identifier):
|
timedTest prefix & identifier:
|
||||||
let proposerSlashing = parseTest(testDir/"proposer_slashing.ssz", SSZ, ProposerSlashing)
|
let proposerSlashing = parseTest(testDir/"proposer_slashing.ssz", SSZ, ProposerSlashing)
|
||||||
var preState = newClone(parseTest(testDir/"pre.ssz", SSZ, BeaconState))
|
var preState = newClone(parseTest(testDir/"pre.ssz", SSZ, BeaconState))
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,7 @@ const
|
||||||
RewardsDirLeak = RewardsDirBase/"leak"/"pyspec_tests"
|
RewardsDirLeak = RewardsDirBase/"leak"/"pyspec_tests"
|
||||||
RewardsDirRandom = RewardsDirBase/"random"/"pyspec_tests"
|
RewardsDirRandom = RewardsDirBase/"random"/"pyspec_tests"
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/tree/v0.12.2/tests/formats/rewards#rewards-tests
|
# https://github.com/ethereum/eth2.0-specs/tree/v0.12.3/tests/formats/rewards#rewards-tests
|
||||||
type Deltas = object
|
type Deltas = object
|
||||||
rewards: List[uint64, Limit VALIDATOR_REGISTRY_LIMIT]
|
rewards: List[uint64, Limit VALIDATOR_REGISTRY_LIMIT]
|
||||||
penalties: List[uint64, Limit VALIDATOR_REGISTRY_LIMIT]
|
penalties: List[uint64, Limit VALIDATOR_REGISTRY_LIMIT]
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Subproject commit 39456fa3d5b637053b616e50a8350b2b932a1d4c
|
Subproject commit 2e8040ec5e6e5416846e008b4d455bd026394fb0
|
Loading…
Reference in New Issue