mirror of
https://github.com/status-im/nimbus-eth2.git
synced 2025-03-03 23:51:22 +00:00
More tracing
This commit is contained in:
parent
ff3da4e152
commit
8de4d38e4e
@ -1,6 +1,6 @@
|
||||
import
|
||||
deques, tables, options,
|
||||
stew/[endians2],
|
||||
stew/[endians2], chronicles,
|
||||
spec/[datatypes, crypto, digest],
|
||||
beacon_chain_db
|
||||
|
||||
@ -172,6 +172,10 @@ type
|
||||
## Unique identifier for a particular fork in the block chain - normally,
|
||||
## there's a block for every slot, but in the case a block is not produced,
|
||||
## the chain progresses anyway, producing a new state for every slot.
|
||||
#
|
||||
# TODO: Isn't this type unnecessary?
|
||||
# The `BlockRef` stored here already includes the `slot` number as well.
|
||||
# We should either remove it or write a comment clarifying why it exists.
|
||||
blck*: BlockRef
|
||||
slot*: Slot
|
||||
|
||||
@ -208,3 +212,9 @@ type
|
||||
|
||||
proc shortLog*(v: AttachedValidator): string = shortLog(v.pubKey)
|
||||
|
||||
chronicles.formatIt BlockSlot:
|
||||
($it.blck.root)[0..7] & ":" & $it.slot
|
||||
|
||||
chronicles.formatIt BlockRef:
|
||||
($it.root)[0..7] & ":" & $it.slot
|
||||
|
||||
|
@ -120,6 +120,10 @@ proc init*(T: type BlockPool, db: BeaconChainDB): BlockPool =
|
||||
doAssert justifiedHead.slot >= finalizedHead.slot,
|
||||
"justified head comes before finalized head - database corrupt?"
|
||||
|
||||
debug "Block pool initialized",
|
||||
head = head.blck, finalizedHead, tail = tailRef,
|
||||
totalBlocks = blocks.len, totalKnownSlots = blocksBySlot.len
|
||||
|
||||
BlockPool(
|
||||
pending: initTable[Eth2Digest, BeaconBlock](),
|
||||
missing: initTable[Eth2Digest, MissingBlock](),
|
||||
@ -348,7 +352,7 @@ func getBlockRange*(pool: BlockPool, headBlock: Eth2Digest,
|
||||
|
||||
var b = pool.getRef(headBlock)
|
||||
if b == nil:
|
||||
trace "head block not found"
|
||||
trace "head block not found", headBlock
|
||||
return
|
||||
|
||||
if b.slot < startSlot:
|
||||
@ -643,6 +647,7 @@ proc updateHead*(pool: BlockPool, state: var StateData, blck: BlockRef) =
|
||||
## of operations naturally becomes important here - after updating the head,
|
||||
## blocks that were once considered potential candidates for a tree will
|
||||
## now fall from grace, or no longer be considered resolved.
|
||||
doAssert blck.parent != nil
|
||||
logScope: pcs = "fork_choice"
|
||||
|
||||
if pool.head.blck == blck:
|
||||
@ -705,6 +710,8 @@ proc updateHead*(pool: BlockPool, state: var StateData, blck: BlockRef) =
|
||||
headBlockSlot = shortLog(blck.slot),
|
||||
cat = "fork_choice"
|
||||
|
||||
pool.finalizedHead = finalizedHead
|
||||
|
||||
var cur = finalizedHead.blck
|
||||
while cur != pool.finalizedHead.blck:
|
||||
# Finalization means that we choose a single chain as the canonical one -
|
||||
@ -728,8 +735,6 @@ proc updateHead*(pool: BlockPool, state: var StateData, blck: BlockRef) =
|
||||
cur.parent.children = @[cur]
|
||||
cur = cur.parent
|
||||
|
||||
pool.finalizedHead = finalizedHead
|
||||
|
||||
let hlen = pool.heads.len
|
||||
for i in 0..<hlen:
|
||||
let n = hlen - i - 1
|
||||
|
@ -593,7 +593,7 @@ proc p2pProtocolBackendImpl*(p: P2PProtocol): Backend =
|
||||
when chronicles.runtimeFilteringEnabled:
|
||||
setLogLevel(LogLevel.TRACE)
|
||||
defer: setLogLevel(LogLevel.DEBUG)
|
||||
trace "incoming beaconBlocksByRange stream"
|
||||
trace "incoming " & `msgNameLit` & " stream"
|
||||
|
||||
defer:
|
||||
`await` safeClose(`streamVar`)
|
||||
|
@ -33,7 +33,7 @@ type
|
||||
|
||||
chronicles.formatIt Eth2Digest:
|
||||
mixin toHex
|
||||
it.data.toHex(true)
|
||||
it.data.toHex(true)[0..7]
|
||||
|
||||
func shortLog*(x: Eth2Digest): string =
|
||||
x.data.toHex(true)[0..7]
|
||||
|
@ -109,6 +109,7 @@ p2pProtocol BeaconSync(version = 1,
|
||||
let
|
||||
ourStatus = peer.networkState.getCurrentStatus()
|
||||
|
||||
trace "Sending status msg", ourStatus
|
||||
await response.send(ourStatus)
|
||||
|
||||
if not peer.state.initialStatusReceived:
|
||||
|
@ -20,7 +20,7 @@ cd - &>/dev/null
|
||||
# When changing these, also update the readme section on running simulation
|
||||
# so that the run_node example is correct!
|
||||
NUM_VALIDATORS=${VALIDATORS:-192}
|
||||
TOTAL_NODES=${NODES:-4}
|
||||
TOTAL_NODES=${NODES:-2}
|
||||
TOTAL_USER_NODES=${USER_NODES:-0}
|
||||
TOTAL_SYSTEM_NODES=$(( TOTAL_NODES - TOTAL_USER_NODES ))
|
||||
MASTER_NODE=$(( TOTAL_NODES - 1 ))
|
||||
|
Loading…
x
Reference in New Issue
Block a user