mirror of
https://github.com/status-im/nimbus-eth2.git
synced 2025-01-22 20:42:13 +00:00
more debug rpcs (#2385)
* more debug rpcs * cleanups * oops fixing * add wip change log notes about rpc renaming * fix wrong breaking changes changelog location
This commit is contained in:
parent
c519c6012e
commit
26c56c2800
10
CHANGELOG.md
10
CHANGELOG.md
@ -1,3 +1,13 @@
|
|||||||
|
TBD
|
||||||
|
==================
|
||||||
|
|
||||||
|
**Breaking changes:**
|
||||||
|
|
||||||
|
* Renamed some semi-internal debug rpc to be more explicit about their nature:
|
||||||
|
* `getGossipSubPeers` is now `debug_getGossipSubPeers`
|
||||||
|
* `getChronosFutures` is now `debug_getChronosFutures`
|
||||||
|
|
||||||
|
|
||||||
2021-03-10 v1.0.10
|
2021-03-10 v1.0.10
|
||||||
==================
|
==================
|
||||||
|
|
||||||
|
@ -20,7 +20,6 @@ import
|
|||||||
|
|
||||||
libp2p/protocols/pubsub/pubsubpeer
|
libp2p/protocols/pubsub/pubsubpeer
|
||||||
|
|
||||||
|
|
||||||
logScope: topics = "nimbusapi"
|
logScope: topics = "nimbusapi"
|
||||||
|
|
||||||
type
|
type
|
||||||
@ -100,7 +99,7 @@ proc installNimbusApiHandlers*(rpcServer: RpcServer, node: BeaconNode) =
|
|||||||
node.chainDag.withState(node.chainDag.tmpState, head.atSlot(wallSlot)):
|
node.chainDag.withState(node.chainDag.tmpState, head.atSlot(wallSlot)):
|
||||||
return node.getBlockProposalEth1Data(state)
|
return node.getBlockProposalEth1Data(state)
|
||||||
|
|
||||||
rpcServer.rpc("getChronosFutures") do () -> seq[FutureInfo]:
|
rpcServer.rpc("debug_getChronosFutures") do () -> seq[FutureInfo]:
|
||||||
when defined(chronosFutureTracking):
|
when defined(chronosFutureTracking):
|
||||||
var res: seq[FutureInfo]
|
var res: seq[FutureInfo]
|
||||||
|
|
||||||
@ -119,11 +118,11 @@ proc installNimbusApiHandlers*(rpcServer: RpcServer, node: BeaconNode) =
|
|||||||
raise (ref CatchableError)(
|
raise (ref CatchableError)(
|
||||||
msg: "Compile with '-d:chronosFutureTracking' to enable this request")
|
msg: "Compile with '-d:chronosFutureTracking' to enable this request")
|
||||||
|
|
||||||
rpcServer.rpc("getGossipSubPeers") do () -> JsonNode:
|
rpcServer.rpc("debug_getGossipSubPeers") do () -> JsonNode:
|
||||||
var res = newJObject()
|
var res = newJObject()
|
||||||
var gossipsub = newJObject()
|
var gossipsub = newJObject()
|
||||||
|
|
||||||
proc toNode(v: PubSubPeer): JsonNode =
|
proc toNode(v: PubSubPeer, backoff: Moment): JsonNode =
|
||||||
%(
|
%(
|
||||||
peerId: $v.peerId,
|
peerId: $v.peerId,
|
||||||
score: v.score,
|
score: v.score,
|
||||||
@ -131,13 +130,27 @@ proc installNimbusApiHandlers*(rpcServer: RpcServer, node: BeaconNode) =
|
|||||||
iHaveBudget: v.iHaveBudget,
|
iHaveBudget: v.iHaveBudget,
|
||||||
outbound: v.outbound,
|
outbound: v.outbound,
|
||||||
appScore: v.appScore,
|
appScore: v.appScore,
|
||||||
behaviourPenalty: v.behaviourPenalty
|
behaviourPenalty: v.behaviourPenalty,
|
||||||
|
sendConnAvail: v.sendConn != nil,
|
||||||
|
closed: v.sendConn != nil and v.sendConn.closed,
|
||||||
|
atEof: v.sendConn != nil and v.sendConn.atEof,
|
||||||
|
address: if v.address.isSome():
|
||||||
|
$v.address.get()
|
||||||
|
else:
|
||||||
|
"<no address>",
|
||||||
|
backoff: $(backoff - Moment.now()),
|
||||||
|
agent: when defined(libp2p_agents_metrics):
|
||||||
|
v.shortAgent
|
||||||
|
else:
|
||||||
|
"unknown",
|
||||||
)
|
)
|
||||||
|
|
||||||
for topic, v in node.network.pubsub.gossipsub:
|
for topic, v in node.network.pubsub.gossipsub:
|
||||||
var peers = newJArray()
|
var peers = newJArray()
|
||||||
|
let backoff = node.network.pubsub.backingOff.getOrDefault(topic)
|
||||||
for peer in v:
|
for peer in v:
|
||||||
peers.add(peer.toNode())
|
peers.add(peer.toNode(backOff.getOrDefault(peer.peerId)))
|
||||||
|
|
||||||
|
|
||||||
gossipsub.add(topic, peers)
|
gossipsub.add(topic, peers)
|
||||||
|
|
||||||
@ -146,11 +159,52 @@ proc installNimbusApiHandlers*(rpcServer: RpcServer, node: BeaconNode) =
|
|||||||
var mesh = newJObject()
|
var mesh = newJObject()
|
||||||
for topic, v in node.network.pubsub.mesh:
|
for topic, v in node.network.pubsub.mesh:
|
||||||
var peers = newJArray()
|
var peers = newJArray()
|
||||||
|
let backoff = node.network.pubsub.backingOff.getOrDefault(topic)
|
||||||
for peer in v:
|
for peer in v:
|
||||||
peers.add(peer.toNode())
|
peers.add(peer.toNode(backOff.getOrDefault(peer.peerId)))
|
||||||
|
|
||||||
mesh.add(topic, peers)
|
mesh.add(topic, peers)
|
||||||
|
|
||||||
res.add("mesh", mesh)
|
res.add("mesh", mesh)
|
||||||
|
|
||||||
|
var coloc = newJArray()
|
||||||
|
for k, v in node.network.pubsub.peersInIP:
|
||||||
|
var a = newJObject()
|
||||||
|
var peers = newJArray()
|
||||||
|
for p in v:
|
||||||
|
peers.add(%($p))
|
||||||
|
a.add($k, peers)
|
||||||
|
coloc.add(a)
|
||||||
|
|
||||||
|
res.add("colocationPeers", coloc)
|
||||||
|
|
||||||
|
var stats = newJArray()
|
||||||
|
for peerId, pstats in node.network.pubsub.peerStats:
|
||||||
|
let
|
||||||
|
peer = node.network.pubsub.peers.getOrDefault(peerId)
|
||||||
|
null = isNil(peer)
|
||||||
|
connected = if null:
|
||||||
|
false
|
||||||
|
else :
|
||||||
|
peer.connected()
|
||||||
|
|
||||||
|
stats.add(%(
|
||||||
|
peerId: $peerId,
|
||||||
|
null: null,
|
||||||
|
connected: connected,
|
||||||
|
expire: $(pstats.expire - Moment.now()),
|
||||||
|
score: pstats.score
|
||||||
|
))
|
||||||
|
|
||||||
|
res.add("peerStats", stats)
|
||||||
|
|
||||||
|
var peers = newJArray()
|
||||||
|
for peerId, peer in node.network.pubsub.peers:
|
||||||
|
peers.add(%(
|
||||||
|
connected: peer.connected,
|
||||||
|
peerId: $peerId
|
||||||
|
))
|
||||||
|
|
||||||
|
res.add("allPeers", peers)
|
||||||
|
|
||||||
return res
|
return res
|
||||||
|
Loading…
x
Reference in New Issue
Block a user