mirror of
https://github.com/status-im/nim-libp2p.git
synced 2025-02-21 09:08:17 +00:00
we use semaphores to limit simultaneous transmissions. can speedup message reception for large messages
This commit is contained in:
parent
84659af45b
commit
1e2d733c8d
@ -28,7 +28,8 @@ import ./errors as pubsub_errors,
|
|||||||
../../peerid,
|
../../peerid,
|
||||||
../../peerinfo,
|
../../peerinfo,
|
||||||
../../errors,
|
../../errors,
|
||||||
../../utility
|
../../utility,
|
||||||
|
../../utils/semaphore
|
||||||
|
|
||||||
import metrics
|
import metrics
|
||||||
import stew/results
|
import stew/results
|
||||||
@ -80,6 +81,11 @@ declarePublicCounter(libp2p_pubsub_received_ihave, "pubsub broadcast ihave", lab
|
|||||||
declarePublicCounter(libp2p_pubsub_received_graft, "pubsub broadcast graft", labels = ["topic"])
|
declarePublicCounter(libp2p_pubsub_received_graft, "pubsub broadcast graft", labels = ["topic"])
|
||||||
declarePublicCounter(libp2p_pubsub_received_prune, "pubsub broadcast prune", labels = ["topic"])
|
declarePublicCounter(libp2p_pubsub_received_prune, "pubsub broadcast prune", labels = ["topic"])
|
||||||
|
|
||||||
|
#The number of simultaneous transmissions. A smaller number can speedup message reception and relaying
|
||||||
|
#ideally this should be an adaptive number, increasing for low bandwidth peers and decreasing for high bandwidth peers
|
||||||
|
const
|
||||||
|
DefaultMaxSimultaneousTx* = 2
|
||||||
|
|
||||||
type
|
type
|
||||||
InitializationError* = object of LPError
|
InitializationError* = object of LPError
|
||||||
|
|
||||||
@ -128,6 +134,7 @@ type
|
|||||||
rng*: ref HmacDrbgContext
|
rng*: ref HmacDrbgContext
|
||||||
|
|
||||||
knownTopics*: HashSet[string]
|
knownTopics*: HashSet[string]
|
||||||
|
semTxLimit: AsyncSemaphore
|
||||||
|
|
||||||
method unsubscribePeer*(p: PubSub, peerId: PeerId) {.base, gcsafe.} =
|
method unsubscribePeer*(p: PubSub, peerId: PeerId) {.base, gcsafe.} =
|
||||||
## handle peer disconnects
|
## handle peer disconnects
|
||||||
@ -311,7 +318,7 @@ method getOrCreatePeer*(
|
|||||||
p.onPubSubPeerEvent(peer, event)
|
p.onPubSubPeerEvent(peer, event)
|
||||||
|
|
||||||
# create new pubsub peer
|
# create new pubsub peer
|
||||||
let pubSubPeer = PubSubPeer.new(peerId, getConn, onEvent, protos[0], p.maxMessageSize)
|
let pubSubPeer = PubSubPeer.new(peerId, getConn, onEvent, protos[0], p.maxMessageSize, addr p.semTxLimit)
|
||||||
debug "created new pubsub peer", peerId
|
debug "created new pubsub peer", peerId
|
||||||
|
|
||||||
p.peers[peerId] = pubSubPeer
|
p.peers[peerId] = pubSubPeer
|
||||||
@ -510,6 +517,7 @@ method initPubSub*(p: PubSub)
|
|||||||
p.observers = new(seq[PubSubObserver])
|
p.observers = new(seq[PubSubObserver])
|
||||||
if p.msgIdProvider == nil:
|
if p.msgIdProvider == nil:
|
||||||
p.msgIdProvider = defaultMsgIdProvider
|
p.msgIdProvider = defaultMsgIdProvider
|
||||||
|
p.semTxLimit = newAsyncSemaphore(DefaultMaxSimultaneousTx)
|
||||||
|
|
||||||
method addValidator*(p: PubSub,
|
method addValidator*(p: PubSub,
|
||||||
topic: varargs[string],
|
topic: varargs[string],
|
||||||
|
@ -19,7 +19,8 @@ import rpc/[messages, message, protobuf],
|
|||||||
../../stream/connection,
|
../../stream/connection,
|
||||||
../../crypto/crypto,
|
../../crypto/crypto,
|
||||||
../../protobuf/minprotobuf,
|
../../protobuf/minprotobuf,
|
||||||
../../utility
|
../../utility,
|
||||||
|
../../utils/semaphore
|
||||||
|
|
||||||
export peerid, connection, deques
|
export peerid, connection, deques
|
||||||
|
|
||||||
@ -91,6 +92,7 @@ type
|
|||||||
behaviourPenalty*: float64 # the eventual penalty score
|
behaviourPenalty*: float64 # the eventual penalty score
|
||||||
overheadRateLimitOpt*: Opt[TokenBucket]
|
overheadRateLimitOpt*: Opt[TokenBucket]
|
||||||
|
|
||||||
|
semTxLimit: ptr AsyncSemaphore #Control Max simultaneous transmissions to speed up indivisual receptions
|
||||||
rpcmessagequeue: RpcMessageQueue
|
rpcmessagequeue: RpcMessageQueue
|
||||||
maxNumElementsInNonPriorityQueue*: int # The max number of elements allowed in the non-priority queue.
|
maxNumElementsInNonPriorityQueue*: int # The max number of elements allowed in the non-priority queue.
|
||||||
disconnected: bool
|
disconnected: bool
|
||||||
@ -311,22 +313,34 @@ proc sendMsgSlow(p: PubSubPeer, msg: seq[byte]) {.async.} =
|
|||||||
debug "No send connection", p, msg = shortLog(msg)
|
debug "No send connection", p, msg = shortLog(msg)
|
||||||
return
|
return
|
||||||
|
|
||||||
trace "sending encoded msg to peer", conn, encoded = shortLog(msg)
|
proc sendMsg(p: PubSubPeer, msg: seq[byte]): Future[void] {.async.}=
|
||||||
await sendMsgContinue(conn, conn.writeLp(msg))
|
if p.sendConn == nil or p.sendConn.closed():
|
||||||
|
await sendMsgSlow(p, msg)
|
||||||
|
|
||||||
proc sendMsg(p: PubSubPeer, msg: seq[byte]): Future[void] =
|
|
||||||
if p.sendConn != nil and not p.sendConn.closed():
|
if p.sendConn != nil and not p.sendConn.closed():
|
||||||
# Fast path that avoids copying msg (which happens for {.async.})
|
|
||||||
let conn = p.sendConn
|
let conn = p.sendConn
|
||||||
|
|
||||||
trace "sending encoded msg to peer", conn, encoded = shortLog(msg)
|
trace "sending encoded msg to peer", conn, encoded = shortLog(msg)
|
||||||
let f = conn.writeLp(msg)
|
|
||||||
if not f.completed():
|
if msg.len < 2000: #ideally we should only forward idontwant messages through this fast path
|
||||||
sendMsgContinue(conn, f)
|
let f = conn.writeLp(msg)
|
||||||
|
await f or sleepAsync(5.milliseconds)
|
||||||
|
if not f.completed:
|
||||||
|
asyncSpawn sendMsgContinue(conn, f)
|
||||||
else:
|
else:
|
||||||
f
|
await p.semTxLimit[].acquire()
|
||||||
else:
|
try:
|
||||||
sendMsgSlow(p, msg)
|
let f = conn.writeLp(msg)
|
||||||
|
#ideally sleep time should be based on peer bandwidth and message size
|
||||||
|
await f or sleepAsync(450.milliseconds)
|
||||||
|
|
||||||
|
if not f.completed:
|
||||||
|
asyncSpawn sendMsgContinue(conn, f)
|
||||||
|
p.semTxLimit[].release()
|
||||||
|
|
||||||
|
except CatchableError as exc:
|
||||||
|
p.semTxLimit[].release()
|
||||||
|
await conn.close()
|
||||||
|
|
||||||
|
|
||||||
proc sendEncoded*(p: PubSubPeer, msg: seq[byte], isHighPriority: bool): Future[void] =
|
proc sendEncoded*(p: PubSubPeer, msg: seq[byte], isHighPriority: bool): Future[void] =
|
||||||
## Asynchronously sends an encoded message to a specified `PubSubPeer`.
|
## Asynchronously sends an encoded message to a specified `PubSubPeer`.
|
||||||
@ -499,6 +513,7 @@ proc new*(
|
|||||||
onEvent: OnEvent,
|
onEvent: OnEvent,
|
||||||
codec: string,
|
codec: string,
|
||||||
maxMessageSize: int,
|
maxMessageSize: int,
|
||||||
|
sem: ptr AsyncSemaphore,
|
||||||
maxNumElementsInNonPriorityQueue: int = DefaultMaxNumElementsInNonPriorityQueue,
|
maxNumElementsInNonPriorityQueue: int = DefaultMaxNumElementsInNonPriorityQueue,
|
||||||
overheadRateLimitOpt: Opt[TokenBucket] = Opt.none(TokenBucket)): T =
|
overheadRateLimitOpt: Opt[TokenBucket] = Opt.none(TokenBucket)): T =
|
||||||
|
|
||||||
@ -516,3 +531,4 @@ proc new*(
|
|||||||
result.sentIHaves.addFirst(default(HashSet[MessageId]))
|
result.sentIHaves.addFirst(default(HashSet[MessageId]))
|
||||||
result.heDontWants.addFirst(default(HashSet[SaltedId]))
|
result.heDontWants.addFirst(default(HashSet[SaltedId]))
|
||||||
result.startSendNonPriorityTask()
|
result.startSendNonPriorityTask()
|
||||||
|
result.semTxLimit = sem
|
||||||
|
Loading…
x
Reference in New Issue
Block a user