put expensive metrics under a Nim define (#310)

This commit is contained in:
Ștefan Talpalaru 2020-08-05 01:27:59 +02:00 committed by GitHub
parent cf2b42b914
commit 843d32f8db
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 67 additions and 47 deletions

View File

@ -68,6 +68,7 @@ Please read the [GETTING_STARTED.md](docs/GETTING_STARTED.md) guide.
### Tutorials and Examples
Example code can be found in the [examples folder](/examples).
#### Direct Chat Tutorial
- [Part I](https://our.status.im/nim-libp2p-tutorial-a-peer-to-peer-chat-example-1/): Set up the main function and use multi-thread for processing IO.
- [Part II](https://our.status.im/nim-libp2p-tutorial-a-peer-to-peer-chat-example-2/): Dial remote peer and allow customized user input commands.
@ -148,6 +149,13 @@ Packages that exist in the original libp2p specs and are under active developmen
** Note that the current stack reflects the minimal requirements for the upcoming Eth2 implementation.
### Tips and tricks
- enable expensive metrics:
```bash
nim c -d:libp2p_expensive_metrics some_file.nim
```
## Contribute
The libp2p implementation in Nim is a work in progress. We welcome contributors to help out! Specifically, you can:
@ -168,4 +176,5 @@ or
* Apache License, Version 2.0, ([LICENSE-APACHEv2](LICENSE-APACHEv2) or http://www.apache.org/licenses/LICENSE-2.0)
at your option. This file may not be copied, modified, or distributed except according to those terms.
at your option. These files may not be copied, modified, or distributed except according to those terms.

View File

@ -23,7 +23,8 @@ export muxer
logScope:
topics = "mplex"
declareGauge(libp2p_mplex_channels, "mplex channels", labels = ["initiator", "peer"])
when defined(libp2p_expensive_metrics):
declareGauge(libp2p_mplex_channels, "mplex channels", labels = ["initiator", "peer"])
type
Mplex* = ref object of Muxer
@ -76,6 +77,7 @@ proc newStreamInternal*(m: Mplex,
"channel slot already taken!")
m.getChannelList(initiator)[id] = result
when defined(libp2p_expensive_metrics):
libp2p_mplex_channels.set(
m.getChannelList(initiator).len.int64,
labelValues = [$initiator,
@ -89,6 +91,7 @@ proc cleanupChann(m: Mplex, chann: LPChannel) {.async, inline.} =
m.getChannelList(chann.initiator).del(chann.id)
trace "cleaned up channel", id = chann.id
when defined(libp2p_expensive_metrics):
libp2p_mplex_channels.set(
m.getChannelList(chann.initiator).len.int64,
labelValues = [$chann.initiator,

View File

@ -140,6 +140,7 @@ method publish*(f: FloodSub,
# start the future but do not wait yet
let published = await f.publishHelper(f.floodsub.getOrDefault(topic), @[msg], timeout)
when defined(libp2p_expensive_metrics):
libp2p_pubsub_messages_published.inc(labelValues = [topic])
trace "published message to peers", peers = published,

View File

@ -518,6 +518,7 @@ method publish*(g: GossipSub,
g.mcache.put(msgId, msg)
let published = await g.publishHelper(peers, @[msg], timeout)
when defined(libp2p_expensive_metrics):
if published > 0:
libp2p_pubsub_messages_published.inc(labelValues = [topic])

View File

@ -27,7 +27,8 @@ declareGauge(libp2p_pubsub_peers, "pubsub peer instances")
declareGauge(libp2p_pubsub_topics, "pubsub subscribed topics")
declareCounter(libp2p_pubsub_validation_success, "pubsub successfully validated messages")
declareCounter(libp2p_pubsub_validation_failure, "pubsub failed validated messages")
declarePublicCounter(libp2p_pubsub_messages_published, "published messages", labels = ["topic"])
when defined(libp2p_expensive_metrics):
declarePublicCounter(libp2p_pubsub_messages_published, "published messages", labels = ["topic"])
type
TopicHandler* = proc(topic: string,

View File

@ -21,10 +21,11 @@ import rpc/[messages, message, protobuf],
logScope:
topics = "pubsubpeer"
declareCounter(libp2p_pubsub_sent_messages, "number of messages sent", labels = ["id", "topic"])
declareCounter(libp2p_pubsub_received_messages, "number of messages received", labels = ["id", "topic"])
declareCounter(libp2p_pubsub_skipped_received_messages, "number of received skipped messages", labels = ["id"])
declareCounter(libp2p_pubsub_skipped_sent_messages, "number of sent skipped messages", labels = ["id"])
when defined(libp2p_expensive_metrics):
declareCounter(libp2p_pubsub_sent_messages, "number of messages sent", labels = ["id", "topic"])
declareCounter(libp2p_pubsub_received_messages, "number of messages received", labels = ["id", "topic"])
declareCounter(libp2p_pubsub_skipped_received_messages, "number of received skipped messages", labels = ["id"])
declareCounter(libp2p_pubsub_skipped_sent_messages, "number of sent skipped messages", labels = ["id"])
const
DefaultReadTimeout* = 1.minutes
@ -91,6 +92,7 @@ proc handle*(p: PubSubPeer, conn: Connection) {.async.} =
let digest = $(sha256.digest(data))
trace "read data from peer", data = data.shortLog
if digest in p.recvdRpcCache:
when defined(libp2p_expensive_metrics):
libp2p_pubsub_skipped_received_messages.inc(labelValues = [p.id])
trace "message already received, skipping"
continue
@ -106,6 +108,7 @@ proc handle*(p: PubSubPeer, conn: Connection) {.async.} =
# trigger hooks
p.recvObservers(msg)
when defined(libp2p_expensive_metrics):
for m in msg.messages:
for t in m.topicIDs:
# metrics
@ -148,6 +151,7 @@ proc send*(
let digest = $(sha256.digest(encoded))
if digest in p.sentRpcCache:
trace "message already sent to peer, skipping"
when defined(libp2p_expensive_metrics):
libp2p_pubsub_skipped_sent_messages.inc(labelValues = [p.id])
return
@ -168,6 +172,7 @@ proc send*(
p.sentRpcCache.put(digest)
trace "sent pubsub message to remote"
when defined(libp2p_expensive_metrics):
for x in mm.messages:
for t in x.topicIDs:
# metrics