mirror of https://github.com/vacp2p/nim-libp2p.git
No raise send (#339)
* dont raise in send * check that the lock is acquire on release
This commit is contained in:
parent
840a76915e
commit
d3182c4dba
|
@ -157,13 +157,13 @@ method unsubscribe*(f: FloodSub,
|
||||||
await procCall PubSub(f).unsubscribe(topics)
|
await procCall PubSub(f).unsubscribe(topics)
|
||||||
|
|
||||||
for p in f.peers.values:
|
for p in f.peers.values:
|
||||||
await f.sendSubs(p, topics.mapIt(it.topic).deduplicate(), false)
|
discard await f.sendSubs(p, topics.mapIt(it.topic).deduplicate(), false)
|
||||||
|
|
||||||
method unsubscribeAll*(f: FloodSub, topic: string) {.async.} =
|
method unsubscribeAll*(f: FloodSub, topic: string) {.async.} =
|
||||||
await procCall PubSub(f).unsubscribeAll(topic)
|
await procCall PubSub(f).unsubscribeAll(topic)
|
||||||
|
|
||||||
for p in f.peers.values:
|
for p in f.peers.values:
|
||||||
await f.sendSubs(p, @[topic], false)
|
discard await f.sendSubs(p, @[topic], false)
|
||||||
|
|
||||||
method initPubSub*(f: FloodSub) =
|
method initPubSub*(f: FloodSub) =
|
||||||
procCall PubSub(f).initPubSub()
|
procCall PubSub(f).initPubSub()
|
||||||
|
|
|
@ -229,7 +229,7 @@ proc heartbeat(g: GossipSub) {.async.} =
|
||||||
g.replenishFanout(t)
|
g.replenishFanout(t)
|
||||||
|
|
||||||
let peers = g.getGossipPeers()
|
let peers = g.getGossipPeers()
|
||||||
var sent: seq[Future[void]]
|
var sent: seq[Future[bool]]
|
||||||
for peer, control in peers:
|
for peer, control in peers:
|
||||||
g.peers.withValue(peer.peerId, pubsubPeer) do:
|
g.peers.withValue(peer.peerId, pubsubPeer) do:
|
||||||
sent &= g.send(
|
sent &= g.send(
|
||||||
|
@ -450,10 +450,14 @@ method rpcHandler*(g: GossipSub,
|
||||||
respControl.ihave.len > 0:
|
respControl.ihave.len > 0:
|
||||||
try:
|
try:
|
||||||
info "sending control message", msg = respControl
|
info "sending control message", msg = respControl
|
||||||
await g.send(
|
let sent = await g.send(
|
||||||
peer,
|
peer,
|
||||||
RPCMsg(control: some(respControl), messages: messages),
|
RPCMsg(control: some(respControl), messages: messages),
|
||||||
DefaultSendTimeout)
|
DefaultSendTimeout)
|
||||||
|
|
||||||
|
if not sent:
|
||||||
|
g.unsubscribePeer(peer.peerId)
|
||||||
|
|
||||||
except CancelledError as exc:
|
except CancelledError as exc:
|
||||||
raise exc
|
raise exc
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
## those terms.
|
## those terms.
|
||||||
|
|
||||||
import std/[tables, sequtils, sets]
|
import std/[tables, sequtils, sets]
|
||||||
import chronos, chronicles, chronicles/chronos_tools, metrics
|
import chronos, chronicles, metrics
|
||||||
import pubsubpeer,
|
import pubsubpeer,
|
||||||
rpc/[message, messages],
|
rpc/[message, messages],
|
||||||
../../switch,
|
../../switch,
|
||||||
|
@ -75,20 +75,20 @@ proc send*(
|
||||||
p: PubSub,
|
p: PubSub,
|
||||||
peer: PubSubPeer,
|
peer: PubSubPeer,
|
||||||
msg: RPCMsg,
|
msg: RPCMsg,
|
||||||
timeout: Duration) {.async.} =
|
timeout: Duration): Future[bool] {.async.} =
|
||||||
## send to remote peer
|
## send to remote peer
|
||||||
##
|
##
|
||||||
|
|
||||||
trace "sending pubsub message to peer", peer = $peer, msg = shortLog(msg)
|
trace "sending pubsub message to peer", peer = $peer, msg = shortLog(msg)
|
||||||
try:
|
try:
|
||||||
await peer.send(msg, timeout)
|
await peer.send(msg, timeout)
|
||||||
|
return true
|
||||||
except CancelledError as exc:
|
except CancelledError as exc:
|
||||||
raise exc
|
raise exc
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
trace "exception sending pubsub message to peer",
|
trace "exception sending pubsub message to peer",
|
||||||
peer = $peer, msg = shortLog(msg)
|
peer = $peer, msg = shortLog(msg)
|
||||||
p.unsubscribePeer(peer.peerId)
|
p.unsubscribePeer(peer.peerId)
|
||||||
raise exc
|
|
||||||
|
|
||||||
proc broadcast*(
|
proc broadcast*(
|
||||||
p: PubSub,
|
p: PubSub,
|
||||||
|
@ -102,12 +102,12 @@ proc broadcast*(
|
||||||
peers = sendPeers.len, message = shortLog(msg)
|
peers = sendPeers.len, message = shortLog(msg)
|
||||||
let sent = await allFinished(
|
let sent = await allFinished(
|
||||||
sendPeers.mapIt( p.send(it, msg, timeout) ))
|
sendPeers.mapIt( p.send(it, msg, timeout) ))
|
||||||
return sent.filterIt( it.finished and it.error.isNil ).len
|
return sent.filterIt( it.finished and it.read ).len
|
||||||
|
|
||||||
proc sendSubs*(p: PubSub,
|
proc sendSubs*(p: PubSub,
|
||||||
peer: PubSubPeer,
|
peer: PubSubPeer,
|
||||||
topics: seq[string],
|
topics: seq[string],
|
||||||
subscribe: bool): Future[void] =
|
subscribe: bool): Future[bool] =
|
||||||
## send subscriptions to remote peer
|
## send subscriptions to remote peer
|
||||||
p.send(
|
p.send(
|
||||||
peer,
|
peer,
|
||||||
|
@ -175,11 +175,11 @@ method handleConn*(p: PubSub,
|
||||||
# call pubsub rpc handler
|
# call pubsub rpc handler
|
||||||
await p.rpcHandler(peer, msgs)
|
await p.rpcHandler(peer, msgs)
|
||||||
|
|
||||||
let peer = p.getOrCreatePeer(conn.peerInfo.peerId, proto)
|
|
||||||
if p.topics.len > 0:
|
|
||||||
await p.sendSubs(peer, toSeq(p.topics.keys), true)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
let peer = p.getOrCreatePeer(conn.peerInfo.peerId, proto)
|
||||||
|
if p.topics.len > 0:
|
||||||
|
discard await p.sendSubs(peer, toSeq(p.topics.keys), true)
|
||||||
|
|
||||||
peer.handler = handler
|
peer.handler = handler
|
||||||
await peer.handle(conn) # spawn peer read loop
|
await peer.handle(conn) # spawn peer read loop
|
||||||
trace "pubsub peer handler ended", peer = peer.id
|
trace "pubsub peer handler ended", peer = peer.id
|
||||||
|
@ -201,7 +201,7 @@ method subscribePeer*(p: PubSub, peer: PeerID) {.base.} =
|
||||||
# to escape to the poll loop.
|
# to escape to the poll loop.
|
||||||
# With a bit of luck, it may be harmless to ignore exceptions here -
|
# With a bit of luck, it may be harmless to ignore exceptions here -
|
||||||
# some cleanup is eventually done in PubSubPeer.send
|
# some cleanup is eventually done in PubSubPeer.send
|
||||||
traceAsyncErrors p.sendSubs(pubsubPeer, toSeq(p.topics.keys), true)
|
asyncCheck p.sendSubs(pubsubPeer, toSeq(p.topics.keys), true)
|
||||||
|
|
||||||
pubsubPeer.subscribed = true
|
pubsubPeer.subscribed = true
|
||||||
|
|
||||||
|
@ -249,7 +249,7 @@ method subscribe*(p: PubSub,
|
||||||
|
|
||||||
p.topics[topic].handler.add(handler)
|
p.topics[topic].handler.add(handler)
|
||||||
|
|
||||||
var sent: seq[Future[void]]
|
var sent: seq[Future[bool]]
|
||||||
for peer in toSeq(p.peers.values):
|
for peer in toSeq(p.peers.values):
|
||||||
sent.add(p.sendSubs(peer, @[topic], true))
|
sent.add(p.sendSubs(peer, @[topic], true))
|
||||||
|
|
||||||
|
|
|
@ -129,7 +129,8 @@ proc initBufferStream*(s: BufferStream,
|
||||||
if not(isNil(handler)):
|
if not(isNil(handler)):
|
||||||
s.writeHandler = proc (data: seq[byte]) {.async, gcsafe.} =
|
s.writeHandler = proc (data: seq[byte]) {.async, gcsafe.} =
|
||||||
defer:
|
defer:
|
||||||
s.writeLock.release()
|
if s.writeLock.locked:
|
||||||
|
s.writeLock.release()
|
||||||
|
|
||||||
# Using a lock here to guarantee
|
# Using a lock here to guarantee
|
||||||
# proper write ordering. This is
|
# proper write ordering. This is
|
||||||
|
@ -137,6 +138,7 @@ proc initBufferStream*(s: BufferStream,
|
||||||
# implementing half-closed in mplex
|
# implementing half-closed in mplex
|
||||||
# or other functionality that requires
|
# or other functionality that requires
|
||||||
# strict message ordering
|
# strict message ordering
|
||||||
|
|
||||||
await s.writeLock.acquire()
|
await s.writeLock.acquire()
|
||||||
await handler(data)
|
await handler(data)
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue