Revert "feat: message prioritization with immediate peer-published dispatch and queuing for other msgs (#1015)"
This reverts commit fe4ff79885
.
This commit is contained in:
parent
fe4ff79885
commit
c5e4f8e12d
|
@ -157,7 +157,7 @@ method rpcHandler*(f: FloodSub,
|
||||||
|
|
||||||
# In theory, if topics are the same in all messages, we could batch - we'd
|
# In theory, if topics are the same in all messages, we could batch - we'd
|
||||||
# also have to be careful to only include validated messages
|
# also have to be careful to only include validated messages
|
||||||
f.broadcast(toSendPeers, RPCMsg(messages: @[msg]), isHighPriority = false)
|
f.broadcast(toSendPeers, RPCMsg(messages: @[msg]))
|
||||||
trace "Forwared message to peers", peers = toSendPeers.len
|
trace "Forwared message to peers", peers = toSendPeers.len
|
||||||
|
|
||||||
f.updateMetrics(rpcMsg)
|
f.updateMetrics(rpcMsg)
|
||||||
|
@ -219,7 +219,7 @@ method publish*(f: FloodSub,
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
# Try to send to all peers that are known to be interested
|
# Try to send to all peers that are known to be interested
|
||||||
f.broadcast(peers, RPCMsg(messages: @[msg]), isHighPriority = true)
|
f.broadcast(peers, RPCMsg(messages: @[msg]))
|
||||||
|
|
||||||
when defined(libp2p_expensive_metrics):
|
when defined(libp2p_expensive_metrics):
|
||||||
libp2p_pubsub_messages_published.inc(labelValues = [topic])
|
libp2p_pubsub_messages_published.inc(labelValues = [topic])
|
||||||
|
|
|
@ -220,8 +220,6 @@ method unsubscribePeer*(g: GossipSub, peer: PeerId) =
|
||||||
for topic, info in stats[].topicInfos.mpairs:
|
for topic, info in stats[].topicInfos.mpairs:
|
||||||
info.firstMessageDeliveries = 0
|
info.firstMessageDeliveries = 0
|
||||||
|
|
||||||
pubSubPeer.stopSendNonPriorityTask()
|
|
||||||
|
|
||||||
procCall FloodSub(g).unsubscribePeer(peer)
|
procCall FloodSub(g).unsubscribePeer(peer)
|
||||||
|
|
||||||
proc handleSubscribe*(g: GossipSub,
|
proc handleSubscribe*(g: GossipSub,
|
||||||
|
@ -281,28 +279,12 @@ proc handleControl(g: GossipSub, peer: PubSubPeer, control: ControlMessage) =
|
||||||
respControl.prune.add(g.handleGraft(peer, control.graft))
|
respControl.prune.add(g.handleGraft(peer, control.graft))
|
||||||
let messages = g.handleIWant(peer, control.iwant)
|
let messages = g.handleIWant(peer, control.iwant)
|
||||||
|
|
||||||
let
|
if
|
||||||
isPruneNotEmpty = respControl.prune.len > 0
|
respControl.prune.len > 0 or
|
||||||
isIWantNotEmpty = respControl.iwant.len > 0
|
respControl.iwant.len > 0 or
|
||||||
|
messages.len > 0:
|
||||||
|
# iwant and prunes from here, also messages
|
||||||
|
|
||||||
if isPruneNotEmpty or isIWantNotEmpty:
|
|
||||||
|
|
||||||
if isIWantNotEmpty:
|
|
||||||
libp2p_pubsub_broadcast_iwant.inc(respControl.iwant.len.int64)
|
|
||||||
|
|
||||||
if isPruneNotEmpty:
|
|
||||||
for prune in respControl.prune:
|
|
||||||
if g.knownTopics.contains(prune.topicId):
|
|
||||||
libp2p_pubsub_broadcast_prune.inc(labelValues = [prune.topicId])
|
|
||||||
else:
|
|
||||||
libp2p_pubsub_broadcast_prune.inc(labelValues = ["generic"])
|
|
||||||
|
|
||||||
trace "sending control message", msg = shortLog(respControl), peer
|
|
||||||
g.send(
|
|
||||||
peer,
|
|
||||||
RPCMsg(control: some(respControl)), isHighPriority = true)
|
|
||||||
|
|
||||||
if messages.len > 0:
|
|
||||||
for smsg in messages:
|
for smsg in messages:
|
||||||
for topic in smsg.topicIds:
|
for topic in smsg.topicIds:
|
||||||
if g.knownTopics.contains(topic):
|
if g.knownTopics.contains(topic):
|
||||||
|
@ -310,11 +292,18 @@ proc handleControl(g: GossipSub, peer: PubSubPeer, control: ControlMessage) =
|
||||||
else:
|
else:
|
||||||
libp2p_pubsub_broadcast_messages.inc(labelValues = ["generic"])
|
libp2p_pubsub_broadcast_messages.inc(labelValues = ["generic"])
|
||||||
|
|
||||||
# iwant replies have lower priority
|
libp2p_pubsub_broadcast_iwant.inc(respControl.iwant.len.int64)
|
||||||
trace "sending iwant reply messages", peer
|
|
||||||
|
for prune in respControl.prune:
|
||||||
|
if g.knownTopics.contains(prune.topicId):
|
||||||
|
libp2p_pubsub_broadcast_prune.inc(labelValues = [prune.topicId])
|
||||||
|
else:
|
||||||
|
libp2p_pubsub_broadcast_prune.inc(labelValues = ["generic"])
|
||||||
|
|
||||||
|
trace "sending control message", msg = shortLog(respControl), peer
|
||||||
g.send(
|
g.send(
|
||||||
peer,
|
peer,
|
||||||
RPCMsg(messages: messages), isHighPriority = false)
|
RPCMsg(control: some(respControl), messages: messages))
|
||||||
|
|
||||||
proc validateAndRelay(g: GossipSub,
|
proc validateAndRelay(g: GossipSub,
|
||||||
msg: Message,
|
msg: Message,
|
||||||
|
@ -367,7 +356,7 @@ proc validateAndRelay(g: GossipSub,
|
||||||
if msg.data.len > msgId.len * 10:
|
if msg.data.len > msgId.len * 10:
|
||||||
g.broadcast(toSendPeers, RPCMsg(control: some(ControlMessage(
|
g.broadcast(toSendPeers, RPCMsg(control: some(ControlMessage(
|
||||||
idontwant: @[ControlIWant(messageIds: @[msgId])]
|
idontwant: @[ControlIWant(messageIds: @[msgId])]
|
||||||
))), isHighPriority = true)
|
))))
|
||||||
|
|
||||||
for peer in toSendPeers:
|
for peer in toSendPeers:
|
||||||
for heDontWant in peer.heDontWants:
|
for heDontWant in peer.heDontWants:
|
||||||
|
@ -381,7 +370,7 @@ proc validateAndRelay(g: GossipSub,
|
||||||
|
|
||||||
# In theory, if topics are the same in all messages, we could batch - we'd
|
# In theory, if topics are the same in all messages, we could batch - we'd
|
||||||
# also have to be careful to only include validated messages
|
# also have to be careful to only include validated messages
|
||||||
g.broadcast(toSendPeers, RPCMsg(messages: @[msg]), isHighPriority = false)
|
g.broadcast(toSendPeers, RPCMsg(messages: @[msg]))
|
||||||
trace "forwarded message to peers", peers = toSendPeers.len, msgId, peer
|
trace "forwarded message to peers", peers = toSendPeers.len, msgId, peer
|
||||||
for topic in msg.topicIds:
|
for topic in msg.topicIds:
|
||||||
if topic notin g.topics: continue
|
if topic notin g.topics: continue
|
||||||
|
@ -452,7 +441,7 @@ method rpcHandler*(g: GossipSub,
|
||||||
peer.recvObservers(rpcMsg)
|
peer.recvObservers(rpcMsg)
|
||||||
|
|
||||||
if rpcMsg.ping.len in 1..<64 and peer.pingBudget > 0:
|
if rpcMsg.ping.len in 1..<64 and peer.pingBudget > 0:
|
||||||
g.send(peer, RPCMsg(pong: rpcMsg.ping), isHighPriority = true)
|
g.send(peer, RPCMsg(pong: rpcMsg.ping))
|
||||||
peer.pingBudget.dec
|
peer.pingBudget.dec
|
||||||
for i in 0..<min(g.topicsHigh, rpcMsg.subscriptions.len):
|
for i in 0..<min(g.topicsHigh, rpcMsg.subscriptions.len):
|
||||||
template sub: untyped = rpcMsg.subscriptions[i]
|
template sub: untyped = rpcMsg.subscriptions[i]
|
||||||
|
@ -562,7 +551,7 @@ method onTopicSubscription*(g: GossipSub, topic: string, subscribed: bool) =
|
||||||
topicID: topic,
|
topicID: topic,
|
||||||
peers: g.peerExchangeList(topic),
|
peers: g.peerExchangeList(topic),
|
||||||
backoff: g.parameters.unsubscribeBackoff.seconds.uint64)])))
|
backoff: g.parameters.unsubscribeBackoff.seconds.uint64)])))
|
||||||
g.broadcast(mpeers, msg, isHighPriority = true)
|
g.broadcast(mpeers, msg)
|
||||||
|
|
||||||
for peer in mpeers:
|
for peer in mpeers:
|
||||||
g.pruned(peer, topic, backoff = some(g.parameters.unsubscribeBackoff))
|
g.pruned(peer, topic, backoff = some(g.parameters.unsubscribeBackoff))
|
||||||
|
@ -666,7 +655,7 @@ method publish*(g: GossipSub,
|
||||||
|
|
||||||
g.mcache.put(msgId, msg)
|
g.mcache.put(msgId, msg)
|
||||||
|
|
||||||
g.broadcast(peers, RPCMsg(messages: @[msg]), isHighPriority = true)
|
g.broadcast(peers, RPCMsg(messages: @[msg]))
|
||||||
|
|
||||||
if g.knownTopics.contains(topic):
|
if g.knownTopics.contains(topic):
|
||||||
libp2p_pubsub_messages_published.inc(peers.len.int64, labelValues = [topic])
|
libp2p_pubsub_messages_published.inc(peers.len.int64, labelValues = [topic])
|
||||||
|
|
|
@ -530,14 +530,14 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
|
||||||
# Send changes to peers after table updates to avoid stale state
|
# Send changes to peers after table updates to avoid stale state
|
||||||
if grafts.len > 0:
|
if grafts.len > 0:
|
||||||
let graft = RPCMsg(control: some(ControlMessage(graft: @[ControlGraft(topicID: topic)])))
|
let graft = RPCMsg(control: some(ControlMessage(graft: @[ControlGraft(topicID: topic)])))
|
||||||
g.broadcast(grafts, graft, isHighPriority = true)
|
g.broadcast(grafts, graft)
|
||||||
if prunes.len > 0:
|
if prunes.len > 0:
|
||||||
let prune = RPCMsg(control: some(ControlMessage(
|
let prune = RPCMsg(control: some(ControlMessage(
|
||||||
prune: @[ControlPrune(
|
prune: @[ControlPrune(
|
||||||
topicID: topic,
|
topicID: topic,
|
||||||
peers: g.peerExchangeList(topic),
|
peers: g.peerExchangeList(topic),
|
||||||
backoff: g.parameters.pruneBackoff.seconds.uint64)])))
|
backoff: g.parameters.pruneBackoff.seconds.uint64)])))
|
||||||
g.broadcast(prunes, prune, isHighPriority = true)
|
g.broadcast(prunes, prune)
|
||||||
|
|
||||||
proc dropFanoutPeers*(g: GossipSub) {.raises: [].} =
|
proc dropFanoutPeers*(g: GossipSub) {.raises: [].} =
|
||||||
# drop peers that we haven't published to in
|
# drop peers that we haven't published to in
|
||||||
|
@ -669,7 +669,7 @@ proc onHeartbeat(g: GossipSub) {.raises: [].} =
|
||||||
topicID: t,
|
topicID: t,
|
||||||
peers: g.peerExchangeList(t),
|
peers: g.peerExchangeList(t),
|
||||||
backoff: g.parameters.pruneBackoff.seconds.uint64)])))
|
backoff: g.parameters.pruneBackoff.seconds.uint64)])))
|
||||||
g.broadcast(prunes, prune, isHighPriority = true)
|
g.broadcast(prunes, prune)
|
||||||
|
|
||||||
# pass by ptr in order to both signal we want to update metrics
|
# pass by ptr in order to both signal we want to update metrics
|
||||||
# and as well update the struct for each topic during this iteration
|
# and as well update the struct for each topic during this iteration
|
||||||
|
@ -691,7 +691,7 @@ proc onHeartbeat(g: GossipSub) {.raises: [].} =
|
||||||
libp2p_pubsub_broadcast_ihave.inc(labelValues = [ihave.topicId])
|
libp2p_pubsub_broadcast_ihave.inc(labelValues = [ihave.topicId])
|
||||||
else:
|
else:
|
||||||
libp2p_pubsub_broadcast_ihave.inc(labelValues = ["generic"])
|
libp2p_pubsub_broadcast_ihave.inc(labelValues = ["generic"])
|
||||||
g.send(peer, RPCMsg(control: some(control)), isHighPriority = true)
|
g.send(peer, RPCMsg(control: some(control)))
|
||||||
|
|
||||||
g.mcache.shift() # shift the cache
|
g.mcache.shift() # shift the cache
|
||||||
|
|
||||||
|
|
|
@ -138,34 +138,18 @@ method unsubscribePeer*(p: PubSub, peerId: PeerId) {.base, gcsafe.} =
|
||||||
|
|
||||||
libp2p_pubsub_peers.set(p.peers.len.int64)
|
libp2p_pubsub_peers.set(p.peers.len.int64)
|
||||||
|
|
||||||
proc send*(p: PubSub, peer: PubSubPeer, msg: RPCMsg, isHighPriority: bool) {.raises: [].} =
|
proc send*(p: PubSub, peer: PubSubPeer, msg: RPCMsg) {.raises: [].} =
|
||||||
## This procedure attempts to send a `msg` (of type `RPCMsg`) to the specified remote peer in the PubSub network.
|
## Attempt to send `msg` to remote peer
|
||||||
##
|
##
|
||||||
## Parameters:
|
|
||||||
## - `p`: The `PubSub` instance.
|
|
||||||
## - `peer`: An instance of `PubSubPeer` representing the peer to whom the message should be sent.
|
|
||||||
## - `msg`: The `RPCMsg` instance that contains the message to be sent.
|
|
||||||
## - `isHighPriority`: A boolean indicating whether the message should be treated as high priority.
|
|
||||||
## High priority messages are sent immediately, while low priority messages are queued and sent only after all high
|
|
||||||
## priority messages have been sent.
|
|
||||||
|
|
||||||
trace "sending pubsub message to peer", peer, msg = shortLog(msg)
|
trace "sending pubsub message to peer", peer, msg = shortLog(msg)
|
||||||
asyncSpawn peer.send(msg, p.anonymize, isHighPriority)
|
peer.send(msg, p.anonymize)
|
||||||
|
|
||||||
proc broadcast*(
|
proc broadcast*(
|
||||||
p: PubSub,
|
p: PubSub,
|
||||||
sendPeers: auto, # Iteratble[PubSubPeer]
|
sendPeers: auto, # Iteratble[PubSubPeer]
|
||||||
msg: RPCMsg,
|
msg: RPCMsg) {.raises: [].} =
|
||||||
isHighPriority: bool) {.raises: [].} =
|
## Attempt to send `msg` to the given peers
|
||||||
## This procedure attempts to send a `msg` (of type `RPCMsg`) to a specified group of peers in the PubSub network.
|
|
||||||
##
|
|
||||||
## Parameters:
|
|
||||||
## - `p`: The `PubSub` instance.
|
|
||||||
## - `sendPeers`: An iterable of `PubSubPeer` instances representing the peers to whom the message should be sent.
|
|
||||||
## - `msg`: The `RPCMsg` instance that contains the message to be broadcast.
|
|
||||||
## - `isHighPriority`: A boolean indicating whether the message should be treated as high priority.
|
|
||||||
## High priority messages are sent immediately, while low priority messages are queued and sent only after all high
|
|
||||||
## priority messages have been sent.
|
|
||||||
|
|
||||||
let npeers = sendPeers.len.int64
|
let npeers = sendPeers.len.int64
|
||||||
for sub in msg.subscriptions:
|
for sub in msg.subscriptions:
|
||||||
|
@ -211,19 +195,19 @@ proc broadcast*(
|
||||||
|
|
||||||
if anyIt(sendPeers, it.hasObservers):
|
if anyIt(sendPeers, it.hasObservers):
|
||||||
for peer in sendPeers:
|
for peer in sendPeers:
|
||||||
p.send(peer, msg, isHighPriority)
|
p.send(peer, msg)
|
||||||
else:
|
else:
|
||||||
# Fast path that only encodes message once
|
# Fast path that only encodes message once
|
||||||
let encoded = encodeRpcMsg(msg, p.anonymize)
|
let encoded = encodeRpcMsg(msg, p.anonymize)
|
||||||
for peer in sendPeers:
|
for peer in sendPeers:
|
||||||
asyncSpawn peer.sendEncoded(encoded, isHighPriority)
|
asyncSpawn peer.sendEncoded(encoded)
|
||||||
|
|
||||||
proc sendSubs*(p: PubSub,
|
proc sendSubs*(p: PubSub,
|
||||||
peer: PubSubPeer,
|
peer: PubSubPeer,
|
||||||
topics: openArray[string],
|
topics: openArray[string],
|
||||||
subscribe: bool) =
|
subscribe: bool) =
|
||||||
## send subscriptions to remote peer
|
## send subscriptions to remote peer
|
||||||
p.send(peer, RPCMsg.withSubs(topics, subscribe), isHighPriority = true)
|
p.send(peer, RPCMsg.withSubs(topics, subscribe))
|
||||||
|
|
||||||
for topic in topics:
|
for topic in topics:
|
||||||
if subscribe:
|
if subscribe:
|
||||||
|
|
|
@ -31,9 +31,6 @@ when defined(libp2p_expensive_metrics):
|
||||||
declareCounter(libp2p_pubsub_skipped_received_messages, "number of received skipped messages", labels = ["id"])
|
declareCounter(libp2p_pubsub_skipped_received_messages, "number of received skipped messages", labels = ["id"])
|
||||||
declareCounter(libp2p_pubsub_skipped_sent_messages, "number of sent skipped messages", labels = ["id"])
|
declareCounter(libp2p_pubsub_skipped_sent_messages, "number of sent skipped messages", labels = ["id"])
|
||||||
|
|
||||||
declareGauge(libp2p_gossipsub_priority_queue_size, "the number of messages in the priority queue", labels = ["id"])
|
|
||||||
declareGauge(libp2p_gossipsub_non_priority_queue_size, "the number of messages in the non-priority queue", labels = ["id"])
|
|
||||||
|
|
||||||
type
|
type
|
||||||
PeerRateLimitError* = object of CatchableError
|
PeerRateLimitError* = object of CatchableError
|
||||||
|
|
||||||
|
@ -52,14 +49,6 @@ type
|
||||||
DropConn* = proc(peer: PubSubPeer) {.gcsafe, raises: [].} # have to pass peer as it's unknown during init
|
DropConn* = proc(peer: PubSubPeer) {.gcsafe, raises: [].} # have to pass peer as it's unknown during init
|
||||||
OnEvent* = proc(peer: PubSubPeer, event: PubSubPeerEvent) {.gcsafe, raises: [].}
|
OnEvent* = proc(peer: PubSubPeer, event: PubSubPeerEvent) {.gcsafe, raises: [].}
|
||||||
|
|
||||||
RpcMessageQueue* = ref object
|
|
||||||
# Tracks async tasks for sending high-priority peer-published messages.
|
|
||||||
sendPriorityQueue: Deque[Future[void]]
|
|
||||||
# Queue for lower-priority messages, like "IWANT" replies and relay messages.
|
|
||||||
nonPriorityQueue: AsyncQueue[seq[byte]]
|
|
||||||
# Task for processing non-priority message queue.
|
|
||||||
sendNonPriorityTask: Future[void]
|
|
||||||
|
|
||||||
PubSubPeer* = ref object of RootObj
|
PubSubPeer* = ref object of RootObj
|
||||||
getConn*: GetConn # callback to establish a new send connection
|
getConn*: GetConn # callback to establish a new send connection
|
||||||
onEvent*: OnEvent # Connectivity updates for peer
|
onEvent*: OnEvent # Connectivity updates for peer
|
||||||
|
@ -81,8 +70,6 @@ type
|
||||||
behaviourPenalty*: float64 # the eventual penalty score
|
behaviourPenalty*: float64 # the eventual penalty score
|
||||||
overheadRateLimitOpt*: Opt[TokenBucket]
|
overheadRateLimitOpt*: Opt[TokenBucket]
|
||||||
|
|
||||||
rpcmessagequeue: RpcMessageQueue
|
|
||||||
|
|
||||||
RPCHandler* = proc(peer: PubSubPeer, data: seq[byte]): Future[void]
|
RPCHandler* = proc(peer: PubSubPeer, data: seq[byte]): Future[void]
|
||||||
{.gcsafe, raises: [].}
|
{.gcsafe, raises: [].}
|
||||||
|
|
||||||
|
@ -95,16 +82,6 @@ when defined(libp2p_agents_metrics):
|
||||||
#so we have to read the parents short agent..
|
#so we have to read the parents short agent..
|
||||||
p.sendConn.getWrapped().shortAgent
|
p.sendConn.getWrapped().shortAgent
|
||||||
|
|
||||||
proc getAgent*(peer: PubSubPeer): string =
|
|
||||||
return
|
|
||||||
when defined(libp2p_agents_metrics):
|
|
||||||
if peer.shortAgent.len > 0:
|
|
||||||
peer.shortAgent
|
|
||||||
else:
|
|
||||||
"unknown"
|
|
||||||
else:
|
|
||||||
"unknown"
|
|
||||||
|
|
||||||
func hash*(p: PubSubPeer): Hash =
|
func hash*(p: PubSubPeer): Hash =
|
||||||
p.peerId.hash
|
p.peerId.hash
|
||||||
|
|
||||||
|
@ -250,13 +227,17 @@ template sendMetrics(msg: RPCMsg): untyped =
|
||||||
# metrics
|
# metrics
|
||||||
libp2p_pubsub_sent_messages.inc(labelValues = [$p.peerId, t])
|
libp2p_pubsub_sent_messages.inc(labelValues = [$p.peerId, t])
|
||||||
|
|
||||||
proc clearSendPriorityQueue(p: PubSubPeer) =
|
proc sendEncoded*(p: PubSubPeer, msg: seq[byte]) {.async.} =
|
||||||
while p.rpcmessagequeue.sendPriorityQueue.len > 0 and p.rpcmessagequeue.sendPriorityQueue[0].finished:
|
doAssert(not isNil(p), "pubsubpeer nil!")
|
||||||
when defined(libp2p_expensive_metrics):
|
|
||||||
libp2p_gossipsub_priority_queue_size.dec(labelValues = [$p.peerId])
|
if msg.len <= 0:
|
||||||
discard p.rpcmessagequeue.sendPriorityQueue.popFirst()
|
debug "empty message, skipping", p, msg = shortLog(msg)
|
||||||
|
return
|
||||||
|
|
||||||
|
if msg.len > p.maxMessageSize:
|
||||||
|
info "trying to send a msg too big for pubsub", maxSize=p.maxMessageSize, msgSize=msg.len
|
||||||
|
return
|
||||||
|
|
||||||
proc sendMsg(p: PubSubPeer, msg: seq[byte]) {.async.} =
|
|
||||||
if p.sendConn == nil:
|
if p.sendConn == nil:
|
||||||
# Wait for a send conn to be setup. `connectOnce` will
|
# Wait for a send conn to be setup. `connectOnce` will
|
||||||
# complete this even if the sendConn setup failed
|
# complete this even if the sendConn setup failed
|
||||||
|
@ -281,38 +262,6 @@ proc sendMsg(p: PubSubPeer, msg: seq[byte]) {.async.} =
|
||||||
|
|
||||||
await conn.close() # This will clean up the send connection
|
await conn.close() # This will clean up the send connection
|
||||||
|
|
||||||
proc sendEncoded*(p: PubSubPeer, msg: seq[byte], isHighPriority: bool) {.async.} =
|
|
||||||
## Asynchronously sends an encoded message to a specified `PubSubPeer`.
|
|
||||||
##
|
|
||||||
## Parameters:
|
|
||||||
## - `p`: The `PubSubPeer` instance to which the message is to be sent.
|
|
||||||
## - `msg`: The message to be sent, encoded as a sequence of bytes (`seq[byte]`).
|
|
||||||
## - `isHighPriority`: A boolean indicating whether the message should be treated as high priority.
|
|
||||||
## High priority messages are sent immediately, while low priority messages are queued and sent only after all high
|
|
||||||
## priority messages have been sent.
|
|
||||||
doAssert(not isNil(p), "pubsubpeer nil!")
|
|
||||||
|
|
||||||
if msg.len <= 0:
|
|
||||||
debug "empty message, skipping", p, msg = shortLog(msg)
|
|
||||||
return
|
|
||||||
|
|
||||||
if msg.len > p.maxMessageSize:
|
|
||||||
info "trying to send a msg too big for pubsub", maxSize=p.maxMessageSize, msgSize=msg.len
|
|
||||||
return
|
|
||||||
|
|
||||||
if isHighPriority:
|
|
||||||
p.clearSendPriorityQueue()
|
|
||||||
let f = p.sendMsg(msg)
|
|
||||||
if not f.finished:
|
|
||||||
p.rpcmessagequeue.sendPriorityQueue.addLast(f)
|
|
||||||
when defined(libp2p_expensive_metrics):
|
|
||||||
libp2p_gossipsub_priority_queue_size.inc(labelValues = [$p.peerId])
|
|
||||||
else:
|
|
||||||
await p.rpcmessagequeue.nonPriorityQueue.addLast(msg)
|
|
||||||
when defined(libp2p_expensive_metrics):
|
|
||||||
libp2p_gossipsub_non_priority_queue_size.inc(labelValues = [$p.peerId])
|
|
||||||
trace "message queued", p, msg = shortLog(msg)
|
|
||||||
|
|
||||||
iterator splitRPCMsg(peer: PubSubPeer, rpcMsg: RPCMsg, maxSize: int, anonymize: bool): seq[byte] =
|
iterator splitRPCMsg(peer: PubSubPeer, rpcMsg: RPCMsg, maxSize: int, anonymize: bool): seq[byte] =
|
||||||
## This iterator takes an `RPCMsg` and sequentially repackages its Messages into new `RPCMsg` instances.
|
## This iterator takes an `RPCMsg` and sequentially repackages its Messages into new `RPCMsg` instances.
|
||||||
## Each new `RPCMsg` accumulates Messages until reaching the specified `maxSize`. If a single Message
|
## Each new `RPCMsg` accumulates Messages until reaching the specified `maxSize`. If a single Message
|
||||||
|
@ -348,16 +297,7 @@ iterator splitRPCMsg(peer: PubSubPeer, rpcMsg: RPCMsg, maxSize: int, anonymize:
|
||||||
else:
|
else:
|
||||||
trace "message too big to sent", peer, rpcMsg = shortLog(currentRPCMsg)
|
trace "message too big to sent", peer, rpcMsg = shortLog(currentRPCMsg)
|
||||||
|
|
||||||
proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool, isHighPriority: bool) {.async.} =
|
proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool) {.raises: [].} =
|
||||||
## Asynchronously sends an `RPCMsg` to a specified `PubSubPeer` with an option for anonymization.
|
|
||||||
##
|
|
||||||
## Parameters:
|
|
||||||
## - `p`: The `PubSubPeer` instance to which the message is to be sent.
|
|
||||||
## - `msg`: The `RPCMsg` instance representing the message to be sent.
|
|
||||||
## - `anonymize`: A boolean flag indicating whether the message should be sent with anonymization.
|
|
||||||
## - `isHighPriority`: A boolean flag indicating whether the message should be treated as high priority.
|
|
||||||
## High priority messages are sent immediately, while low priority messages are queued and sent only after all high
|
|
||||||
## priority messages have been sent.
|
|
||||||
# When sending messages, we take care to re-encode them with the right
|
# When sending messages, we take care to re-encode them with the right
|
||||||
# anonymization flag to ensure that we're not penalized for sending invalid
|
# anonymization flag to ensure that we're not penalized for sending invalid
|
||||||
# or malicious data on the wire - in particular, re-encoding protects against
|
# or malicious data on the wire - in particular, re-encoding protects against
|
||||||
|
@ -377,11 +317,11 @@ proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool, isHighPriority: bool) {.
|
||||||
|
|
||||||
if encoded.len > p.maxMessageSize and msg.messages.len > 1:
|
if encoded.len > p.maxMessageSize and msg.messages.len > 1:
|
||||||
for encodedSplitMsg in splitRPCMsg(p, msg, p.maxMessageSize, anonymize):
|
for encodedSplitMsg in splitRPCMsg(p, msg, p.maxMessageSize, anonymize):
|
||||||
await p.sendEncoded(encodedSplitMsg, isHighPriority)
|
asyncSpawn p.sendEncoded(encodedSplitMsg)
|
||||||
else:
|
else:
|
||||||
# If the message size is within limits, send it as is
|
# If the message size is within limits, send it as is
|
||||||
trace "sending msg to peer", peer = p, rpcMsg = shortLog(msg)
|
trace "sending msg to peer", peer = p, rpcMsg = shortLog(msg)
|
||||||
await p.sendEncoded(encoded, isHighPriority)
|
asyncSpawn p.sendEncoded(encoded)
|
||||||
|
|
||||||
proc canAskIWant*(p: PubSubPeer, msgId: MessageId): bool =
|
proc canAskIWant*(p: PubSubPeer, msgId: MessageId): bool =
|
||||||
for sentIHave in p.sentIHaves.mitems():
|
for sentIHave in p.sentIHaves.mitems():
|
||||||
|
@ -390,43 +330,6 @@ proc canAskIWant*(p: PubSubPeer, msgId: MessageId): bool =
|
||||||
return true
|
return true
|
||||||
return false
|
return false
|
||||||
|
|
||||||
proc sendNonPriorityTask(p: PubSubPeer) {.async.} =
|
|
||||||
while true:
|
|
||||||
# we send non-priority messages only if there are no pending priority messages
|
|
||||||
let msg = await p.rpcmessagequeue.nonPriorityQueue.popFirst()
|
|
||||||
while p.rpcmessagequeue.sendPriorityQueue.len > 0:
|
|
||||||
p.clearSendPriorityQueue()
|
|
||||||
# this minimizes the number of times we have to wait for something (each wait = performance cost)
|
|
||||||
# we will never wait for a finished future and by waiting for the last one, all that come before it are guaranteed
|
|
||||||
# to be finished already (since sends are processed in order).
|
|
||||||
if p.rpcmessagequeue.sendPriorityQueue.len > 0:
|
|
||||||
await p.rpcmessagequeue.sendPriorityQueue[^1]
|
|
||||||
when defined(libp2p_expensive_metrics):
|
|
||||||
libp2p_gossipsub_non_priority_queue_size.dec(labelValues = [$p.peerId])
|
|
||||||
await p.sendMsg(msg)
|
|
||||||
|
|
||||||
proc startSendNonPriorityTask(p: PubSubPeer) =
|
|
||||||
debug "starting sendNonPriorityTask", p
|
|
||||||
if p.rpcmessagequeue.sendNonPriorityTask.isNil:
|
|
||||||
p.rpcmessagequeue.sendNonPriorityTask = p.sendNonPriorityTask()
|
|
||||||
|
|
||||||
proc stopSendNonPriorityTask*(p: PubSubPeer) =
|
|
||||||
if not p.rpcmessagequeue.sendNonPriorityTask.isNil:
|
|
||||||
debug "stopping sendNonPriorityTask", p
|
|
||||||
p.rpcmessagequeue.sendNonPriorityTask.cancel()
|
|
||||||
p.rpcmessagequeue.sendNonPriorityTask = nil
|
|
||||||
p.rpcmessagequeue.sendPriorityQueue.clear()
|
|
||||||
p.rpcmessagequeue.nonPriorityQueue.clear()
|
|
||||||
when defined(libp2p_expensive_metrics):
|
|
||||||
libp2p_gossipsub_priority_queue_size.set(labelValues = [$p.peerId], value = 0)
|
|
||||||
libp2p_gossipsub_non_priority_queue_size.set(labelValues = [$p.peerId], value = 0)
|
|
||||||
|
|
||||||
proc new(T: typedesc[RpcMessageQueue]): T =
|
|
||||||
return T(
|
|
||||||
sendPriorityQueue: initDeque[Future[void]](),
|
|
||||||
nonPriorityQueue: newAsyncQueue[seq[byte]](),
|
|
||||||
)
|
|
||||||
|
|
||||||
proc new*(
|
proc new*(
|
||||||
T: typedesc[PubSubPeer],
|
T: typedesc[PubSubPeer],
|
||||||
peerId: PeerId,
|
peerId: PeerId,
|
||||||
|
@ -443,9 +346,17 @@ proc new*(
|
||||||
peerId: peerId,
|
peerId: peerId,
|
||||||
connectedFut: newFuture[void](),
|
connectedFut: newFuture[void](),
|
||||||
maxMessageSize: maxMessageSize,
|
maxMessageSize: maxMessageSize,
|
||||||
overheadRateLimitOpt: overheadRateLimitOpt,
|
overheadRateLimitOpt: overheadRateLimitOpt
|
||||||
rpcmessagequeue: RpcMessageQueue.new(),
|
|
||||||
)
|
)
|
||||||
result.sentIHaves.addFirst(default(HashSet[MessageId]))
|
result.sentIHaves.addFirst(default(HashSet[MessageId]))
|
||||||
result.heDontWants.addFirst(default(HashSet[MessageId]))
|
result.heDontWants.addFirst(default(HashSet[MessageId]))
|
||||||
result.startSendNonPriorityTask()
|
|
||||||
|
proc getAgent*(peer: PubSubPeer): string =
|
||||||
|
return
|
||||||
|
when defined(libp2p_agents_metrics):
|
||||||
|
if peer.shortAgent.len > 0:
|
||||||
|
peer.shortAgent
|
||||||
|
else:
|
||||||
|
"unknown"
|
||||||
|
else:
|
||||||
|
"unknown"
|
||||||
|
|
|
@ -779,7 +779,7 @@ suite "GossipSub internal":
|
||||||
|
|
||||||
gossip1.broadcast(gossip1.mesh["foobar"], RPCMsg(control: some(ControlMessage(
|
gossip1.broadcast(gossip1.mesh["foobar"], RPCMsg(control: some(ControlMessage(
|
||||||
ihave: @[ControlIHave(topicId: "foobar", messageIds: iwantMessageIds)]
|
ihave: @[ControlIHave(topicId: "foobar", messageIds: iwantMessageIds)]
|
||||||
))), isHighPriority = false)
|
))))
|
||||||
|
|
||||||
checkUntilTimeout: receivedMessages[] == sentMessages
|
checkUntilTimeout: receivedMessages[] == sentMessages
|
||||||
check receivedMessages[].len == 2
|
check receivedMessages[].len == 2
|
||||||
|
@ -796,7 +796,7 @@ suite "GossipSub internal":
|
||||||
|
|
||||||
gossip1.broadcast(gossip1.mesh["foobar"], RPCMsg(control: some(ControlMessage(
|
gossip1.broadcast(gossip1.mesh["foobar"], RPCMsg(control: some(ControlMessage(
|
||||||
ihave: @[ControlIHave(topicId: "foobar", messageIds: bigIWantMessageIds)]
|
ihave: @[ControlIHave(topicId: "foobar", messageIds: bigIWantMessageIds)]
|
||||||
))), isHighPriority = false)
|
))))
|
||||||
|
|
||||||
await sleepAsync(300.milliseconds)
|
await sleepAsync(300.milliseconds)
|
||||||
checkUntilTimeout: receivedMessages[].len == 0
|
checkUntilTimeout: receivedMessages[].len == 0
|
||||||
|
@ -813,7 +813,7 @@ suite "GossipSub internal":
|
||||||
|
|
||||||
gossip1.broadcast(gossip1.mesh["foobar"], RPCMsg(control: some(ControlMessage(
|
gossip1.broadcast(gossip1.mesh["foobar"], RPCMsg(control: some(ControlMessage(
|
||||||
ihave: @[ControlIHave(topicId: "foobar", messageIds: bigIWantMessageIds)]
|
ihave: @[ControlIHave(topicId: "foobar", messageIds: bigIWantMessageIds)]
|
||||||
))), isHighPriority = false)
|
))))
|
||||||
|
|
||||||
checkUntilTimeout: receivedMessages[] == sentMessages
|
checkUntilTimeout: receivedMessages[] == sentMessages
|
||||||
check receivedMessages[].len == 2
|
check receivedMessages[].len == 2
|
||||||
|
@ -831,7 +831,7 @@ suite "GossipSub internal":
|
||||||
|
|
||||||
gossip1.broadcast(gossip1.mesh["foobar"], RPCMsg(control: some(ControlMessage(
|
gossip1.broadcast(gossip1.mesh["foobar"], RPCMsg(control: some(ControlMessage(
|
||||||
ihave: @[ControlIHave(topicId: "foobar", messageIds: bigIWantMessageIds)]
|
ihave: @[ControlIHave(topicId: "foobar", messageIds: bigIWantMessageIds)]
|
||||||
))), isHighPriority = false)
|
))))
|
||||||
|
|
||||||
var smallestSet: HashSet[seq[byte]]
|
var smallestSet: HashSet[seq[byte]]
|
||||||
let seqs = toSeq(sentMessages)
|
let seqs = toSeq(sentMessages)
|
||||||
|
|
|
@ -912,7 +912,7 @@ suite "GossipSub":
|
||||||
|
|
||||||
gossip3.broadcast(gossip3.mesh["foobar"], RPCMsg(control: some(ControlMessage(
|
gossip3.broadcast(gossip3.mesh["foobar"], RPCMsg(control: some(ControlMessage(
|
||||||
idontwant: @[ControlIWant(messageIds: @[newSeq[byte](10)])]
|
idontwant: @[ControlIWant(messageIds: @[newSeq[byte](10)])]
|
||||||
))), isHighPriority = true)
|
))))
|
||||||
checkUntilTimeout: gossip2.mesh.getOrDefault("foobar").anyIt(it.heDontWants[^1].len == 1)
|
checkUntilTimeout: gossip2.mesh.getOrDefault("foobar").anyIt(it.heDontWants[^1].len == 1)
|
||||||
|
|
||||||
tryPublish await nodes[0].publish("foobar", newSeq[byte](10000)), 1
|
tryPublish await nodes[0].publish("foobar", newSeq[byte](10000)), 1
|
||||||
|
@ -968,10 +968,7 @@ suite "GossipSub":
|
||||||
let rateLimitHits = currentRateLimitHits()
|
let rateLimitHits = currentRateLimitHits()
|
||||||
let (nodes, gossip0, gossip1) = await initializeGossipTest()
|
let (nodes, gossip0, gossip1) = await initializeGossipTest()
|
||||||
|
|
||||||
gossip0.broadcast(
|
gossip0.broadcast(gossip0.mesh["foobar"], RPCMsg(messages: @[Message(topicIDs: @["foobar"], data: newSeq[byte](10))]))
|
||||||
gossip0.mesh["foobar"],
|
|
||||||
RPCMsg(messages: @[Message(topicIDs: @["foobar"], data: newSeq[byte](10))]),
|
|
||||||
isHighPriority = true)
|
|
||||||
await sleepAsync(300.millis)
|
await sleepAsync(300.millis)
|
||||||
|
|
||||||
check currentRateLimitHits() == rateLimitHits
|
check currentRateLimitHits() == rateLimitHits
|
||||||
|
@ -979,10 +976,7 @@ suite "GossipSub":
|
||||||
|
|
||||||
# Disconnect peer when rate limiting is enabled
|
# Disconnect peer when rate limiting is enabled
|
||||||
gossip1.parameters.disconnectPeerAboveRateLimit = true
|
gossip1.parameters.disconnectPeerAboveRateLimit = true
|
||||||
gossip0.broadcast(
|
gossip0.broadcast(gossip0.mesh["foobar"], RPCMsg(messages: @[Message(topicIDs: @["foobar"], data: newSeq[byte](12))]))
|
||||||
gossip0.mesh["foobar"],
|
|
||||||
RPCMsg(messages: @[Message(topicIDs: @["foobar"], data: newSeq[byte](12))]),
|
|
||||||
isHighPriority = true)
|
|
||||||
await sleepAsync(300.millis)
|
await sleepAsync(300.millis)
|
||||||
|
|
||||||
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
|
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
|
||||||
|
@ -996,7 +990,7 @@ suite "GossipSub":
|
||||||
let (nodes, gossip0, gossip1) = await initializeGossipTest()
|
let (nodes, gossip0, gossip1) = await initializeGossipTest()
|
||||||
|
|
||||||
# Simulate sending an undecodable message
|
# Simulate sending an undecodable message
|
||||||
await gossip1.peers[gossip0.switch.peerInfo.peerId].sendEncoded(newSeqWith[byte](33, 1.byte), isHighPriority = true)
|
await gossip1.peers[gossip0.switch.peerInfo.peerId].sendEncoded(newSeqWith[byte](33, 1.byte))
|
||||||
await sleepAsync(300.millis)
|
await sleepAsync(300.millis)
|
||||||
|
|
||||||
check currentRateLimitHits() == rateLimitHits + 1
|
check currentRateLimitHits() == rateLimitHits + 1
|
||||||
|
@ -1004,7 +998,7 @@ suite "GossipSub":
|
||||||
|
|
||||||
# Disconnect peer when rate limiting is enabled
|
# Disconnect peer when rate limiting is enabled
|
||||||
gossip1.parameters.disconnectPeerAboveRateLimit = true
|
gossip1.parameters.disconnectPeerAboveRateLimit = true
|
||||||
await gossip0.peers[gossip1.switch.peerInfo.peerId].sendEncoded(newSeqWith[byte](35, 1.byte), isHighPriority = true)
|
await gossip0.peers[gossip1.switch.peerInfo.peerId].sendEncoded(newSeqWith[byte](35, 1.byte))
|
||||||
|
|
||||||
checkUntilTimeout gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
|
checkUntilTimeout gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
|
||||||
check currentRateLimitHits() == rateLimitHits + 2
|
check currentRateLimitHits() == rateLimitHits + 2
|
||||||
|
@ -1020,7 +1014,7 @@ suite "GossipSub":
|
||||||
PeerInfoMsg(peerId: PeerId(data: newSeq[byte](33)))
|
PeerInfoMsg(peerId: PeerId(data: newSeq[byte](33)))
|
||||||
], backoff: 123'u64)
|
], backoff: 123'u64)
|
||||||
])))
|
])))
|
||||||
gossip0.broadcast(gossip0.mesh["foobar"], msg, isHighPriority = true)
|
gossip0.broadcast(gossip0.mesh["foobar"], msg)
|
||||||
await sleepAsync(300.millis)
|
await sleepAsync(300.millis)
|
||||||
|
|
||||||
check currentRateLimitHits() == rateLimitHits + 1
|
check currentRateLimitHits() == rateLimitHits + 1
|
||||||
|
@ -1033,7 +1027,7 @@ suite "GossipSub":
|
||||||
PeerInfoMsg(peerId: PeerId(data: newSeq[byte](35)))
|
PeerInfoMsg(peerId: PeerId(data: newSeq[byte](35)))
|
||||||
], backoff: 123'u64)
|
], backoff: 123'u64)
|
||||||
])))
|
])))
|
||||||
gossip0.broadcast(gossip0.mesh["foobar"], msg2, isHighPriority = true)
|
gossip0.broadcast(gossip0.mesh["foobar"], msg2)
|
||||||
|
|
||||||
checkUntilTimeout gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
|
checkUntilTimeout gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
|
||||||
check currentRateLimitHits() == rateLimitHits + 2
|
check currentRateLimitHits() == rateLimitHits + 2
|
||||||
|
@ -1055,7 +1049,7 @@ suite "GossipSub":
|
||||||
|
|
||||||
let msg = RPCMsg(messages: @[Message(topicIDs: @[topic], data: newSeq[byte](40))])
|
let msg = RPCMsg(messages: @[Message(topicIDs: @[topic], data: newSeq[byte](40))])
|
||||||
|
|
||||||
gossip0.broadcast(gossip0.mesh[topic], msg, isHighPriority = true)
|
gossip0.broadcast(gossip0.mesh[topic], msg)
|
||||||
await sleepAsync(300.millis)
|
await sleepAsync(300.millis)
|
||||||
|
|
||||||
check currentRateLimitHits() == rateLimitHits + 1
|
check currentRateLimitHits() == rateLimitHits + 1
|
||||||
|
@ -1063,10 +1057,7 @@ suite "GossipSub":
|
||||||
|
|
||||||
# Disconnect peer when rate limiting is enabled
|
# Disconnect peer when rate limiting is enabled
|
||||||
gossip1.parameters.disconnectPeerAboveRateLimit = true
|
gossip1.parameters.disconnectPeerAboveRateLimit = true
|
||||||
gossip0.broadcast(
|
gossip0.broadcast(gossip0.mesh[topic], RPCMsg(messages: @[Message(topicIDs: @[topic], data: newSeq[byte](35))]))
|
||||||
gossip0.mesh[topic],
|
|
||||||
RPCMsg(messages: @[Message(topicIDs: @[topic], data: newSeq[byte](35))]),
|
|
||||||
isHighPriority = true)
|
|
||||||
|
|
||||||
checkUntilTimeout gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
|
checkUntilTimeout gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
|
||||||
check currentRateLimitHits() == rateLimitHits + 2
|
check currentRateLimitHits() == rateLimitHits + 2
|
||||||
|
|
Loading…
Reference in New Issue