remove pubsub peer on disconnect (#212)
* remove pubsub peer on disconnect * make sure lock is aquired * add $ * count upgrades/dials/disconnects
This commit is contained in:
parent
92579435b6
commit
6b196ad7b4
|
@ -64,6 +64,7 @@ template withWriteLock(lock: AsyncLock, body: untyped): untyped =
|
||||||
await lock.acquire()
|
await lock.acquire()
|
||||||
body
|
body
|
||||||
finally:
|
finally:
|
||||||
|
if not(isNil(lock)) and lock.locked:
|
||||||
lock.release()
|
lock.release()
|
||||||
|
|
||||||
template withEOFExceptions(body: untyped): untyped =
|
template withEOFExceptions(body: untyped): untyped =
|
||||||
|
|
|
@ -40,6 +40,8 @@ type
|
||||||
proc id*(p: PeerInfo): string =
|
proc id*(p: PeerInfo): string =
|
||||||
p.peerId.pretty()
|
p.peerId.pretty()
|
||||||
|
|
||||||
|
proc `$`*(p: PeerInfo): string = p.id
|
||||||
|
|
||||||
proc shortLog*(p: PeerInfo): auto =
|
proc shortLog*(p: PeerInfo): auto =
|
||||||
(
|
(
|
||||||
id: p.id(),
|
id: p.id(),
|
||||||
|
@ -58,30 +60,36 @@ template postInit(peerinfo: PeerInfo,
|
||||||
peerinfo.protocols = @protocols
|
peerinfo.protocols = @protocols
|
||||||
peerinfo.lifefut = newFuture[void]("libp2p.peerinfo.lifetime")
|
peerinfo.lifefut = newFuture[void]("libp2p.peerinfo.lifetime")
|
||||||
|
|
||||||
proc init*(p: typedesc[PeerInfo], key: PrivateKey,
|
proc init*(p: typedesc[PeerInfo],
|
||||||
|
key: PrivateKey,
|
||||||
addrs: openarray[MultiAddress] = [],
|
addrs: openarray[MultiAddress] = [],
|
||||||
protocols: openarray[string] = []): PeerInfo {.inline.} =
|
protocols: openarray[string] = []): PeerInfo {.inline.} =
|
||||||
result = PeerInfo(keyType: HasPrivate, peerId: PeerID.init(key),
|
result = PeerInfo(keyType: HasPrivate, peerId: PeerID.init(key),
|
||||||
privateKey: key)
|
privateKey: key)
|
||||||
result.postInit(addrs, protocols)
|
result.postInit(addrs, protocols)
|
||||||
|
|
||||||
proc init*(p: typedesc[PeerInfo], peerId: PeerID,
|
proc init*(p: typedesc[PeerInfo],
|
||||||
|
peerId: PeerID,
|
||||||
addrs: openarray[MultiAddress] = [],
|
addrs: openarray[MultiAddress] = [],
|
||||||
protocols: openarray[string] = []): PeerInfo {.inline.} =
|
protocols: openarray[string] = []): PeerInfo {.inline.} =
|
||||||
result = PeerInfo(keyType: HasPublic, peerId: peerId)
|
result = PeerInfo(keyType: HasPublic, peerId: peerId)
|
||||||
result.postInit(addrs, protocols)
|
result.postInit(addrs, protocols)
|
||||||
|
|
||||||
proc init*(p: typedesc[PeerInfo], peerId: string,
|
proc init*(p: typedesc[PeerInfo],
|
||||||
|
peerId: string,
|
||||||
addrs: openarray[MultiAddress] = [],
|
addrs: openarray[MultiAddress] = [],
|
||||||
protocols: openarray[string] = []): PeerInfo {.inline.} =
|
protocols: openarray[string] = []): PeerInfo {.inline.} =
|
||||||
result = PeerInfo(keyType: HasPublic, peerId: PeerID.init(peerId))
|
result = PeerInfo(keyType: HasPublic, peerId: PeerID.init(peerId))
|
||||||
result.postInit(addrs, protocols)
|
result.postInit(addrs, protocols)
|
||||||
|
|
||||||
proc init*(p: typedesc[PeerInfo], key: PublicKey,
|
proc init*(p: typedesc[PeerInfo],
|
||||||
|
key: PublicKey,
|
||||||
addrs: openarray[MultiAddress] = [],
|
addrs: openarray[MultiAddress] = [],
|
||||||
protocols: openarray[string] = []): PeerInfo {.inline.} =
|
protocols: openarray[string] = []): PeerInfo {.inline.} =
|
||||||
result = PeerInfo(keyType: HasPublic, peerId: PeerID.init(key),
|
result = PeerInfo(keyType: HasPublic,
|
||||||
|
peerId: PeerID.init(key),
|
||||||
key: some(key))
|
key: some(key))
|
||||||
|
|
||||||
result.postInit(addrs, protocols)
|
result.postInit(addrs, protocols)
|
||||||
|
|
||||||
proc close*(p: PeerInfo) {.inline.} =
|
proc close*(p: PeerInfo) {.inline.} =
|
||||||
|
|
|
@ -49,6 +49,8 @@ method subscribeTopic*(f: FloodSub,
|
||||||
f.floodsub[topic].excl(peerId)
|
f.floodsub[topic].excl(peerId)
|
||||||
|
|
||||||
method handleDisconnect*(f: FloodSub, peer: PubSubPeer) {.async.} =
|
method handleDisconnect*(f: FloodSub, peer: PubSubPeer) {.async.} =
|
||||||
|
await procCall PubSub(f).handleDisconnect(peer)
|
||||||
|
|
||||||
## handle peer disconnects
|
## handle peer disconnects
|
||||||
for t in f.floodsub.keys:
|
for t in f.floodsub.keys:
|
||||||
f.floodsub[t].excl(peer.id)
|
f.floodsub[t].excl(peer.id)
|
||||||
|
|
|
@ -96,15 +96,17 @@ method handleDisconnect*(p: PubSub, peer: PubSubPeer) {.async, base.} =
|
||||||
## handle peer disconnects
|
## handle peer disconnects
|
||||||
if peer.id in p.peers:
|
if peer.id in p.peers:
|
||||||
p.peers.del(peer.id)
|
p.peers.del(peer.id)
|
||||||
|
|
||||||
# metrics
|
# metrics
|
||||||
libp2p_pubsub_peers.dec()
|
libp2p_pubsub_peers.dec()
|
||||||
|
|
||||||
proc cleanUpHelper(p: PubSub, peer: PubSubPeer) {.async.} =
|
proc cleanUpHelper(p: PubSub, peer: PubSubPeer) {.async.} =
|
||||||
|
try:
|
||||||
await p.cleanupLock.acquire()
|
await p.cleanupLock.acquire()
|
||||||
|
peer.refs.dec() # decrement refcount
|
||||||
if peer.refs == 0:
|
if peer.refs == 0:
|
||||||
await p.handleDisconnect(peer)
|
await p.handleDisconnect(peer)
|
||||||
|
finally:
|
||||||
peer.refs.dec() # decrement refcount
|
|
||||||
p.cleanupLock.release()
|
p.cleanupLock.release()
|
||||||
|
|
||||||
proc getPeer(p: PubSub,
|
proc getPeer(p: PubSub,
|
||||||
|
@ -117,11 +119,12 @@ proc getPeer(p: PubSub,
|
||||||
# create new pubsub peer
|
# create new pubsub peer
|
||||||
let peer = newPubSubPeer(peerInfo, proto)
|
let peer = newPubSubPeer(peerInfo, proto)
|
||||||
trace "created new pubsub peer", peerId = peer.id
|
trace "created new pubsub peer", peerId = peer.id
|
||||||
|
|
||||||
# metrics
|
# metrics
|
||||||
libp2p_pubsub_peers.inc()
|
libp2p_pubsub_peers.inc()
|
||||||
|
|
||||||
p.peers[peer.id] = peer
|
p.peers[peer.id] = peer
|
||||||
peer.refs.inc # increment reference cound
|
peer.refs.inc # increment reference count
|
||||||
peer.observers = p.observers
|
peer.observers = p.observers
|
||||||
result = peer
|
result = peer
|
||||||
|
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
## those terms.
|
## those terms.
|
||||||
|
|
||||||
import tables, sequtils, options, strformat, sets
|
import tables, sequtils, options, strformat, sets
|
||||||
import chronos, chronicles
|
import chronos, chronicles, metrics
|
||||||
import connection,
|
import connection,
|
||||||
transports/transport,
|
transports/transport,
|
||||||
multistream,
|
multistream,
|
||||||
|
@ -31,6 +31,11 @@ logScope:
|
||||||
# and only if the channel has been secured (i.e. if a secure manager has been
|
# and only if the channel has been secured (i.e. if a secure manager has been
|
||||||
# previously provided)
|
# previously provided)
|
||||||
|
|
||||||
|
declareGauge(libp2p_connected_peers, "total connected peers")
|
||||||
|
declareCounter(libp2p_dialed_peers, "dialed peers")
|
||||||
|
declareCounter(libp2p_failed_dials, "failed dials")
|
||||||
|
declareCounter(libp2p_failed_upgrade, "peers failed upgrade")
|
||||||
|
|
||||||
type
|
type
|
||||||
NoPubSubException = object of CatchableError
|
NoPubSubException = object of CatchableError
|
||||||
|
|
||||||
|
@ -90,6 +95,7 @@ proc identify(s: Switch, conn: Connection): Future[PeerInfo] {.async, gcsafe.} =
|
||||||
|
|
||||||
if info.protos.len > 0:
|
if info.protos.len > 0:
|
||||||
result.protocols = info.protos
|
result.protocols = info.protos
|
||||||
|
|
||||||
debug "identify", info = shortLog(result)
|
debug "identify", info = shortLog(result)
|
||||||
except IdentityInvalidMsgError as exc:
|
except IdentityInvalidMsgError as exc:
|
||||||
error "identify: invalid message", msg = exc.msg
|
error "identify: invalid message", msg = exc.msg
|
||||||
|
@ -152,6 +158,7 @@ proc cleanupConn(s: Switch, conn: Connection) {.async, gcsafe.} =
|
||||||
|
|
||||||
s.dialedPubSubPeers.excl(id)
|
s.dialedPubSubPeers.excl(id)
|
||||||
|
|
||||||
|
libp2p_connected_peers.dec()
|
||||||
# TODO: Investigate cleanupConn() always called twice for one peer.
|
# TODO: Investigate cleanupConn() always called twice for one peer.
|
||||||
if not(conn.peerInfo.isClosed()):
|
if not(conn.peerInfo.isClosed()):
|
||||||
conn.peerInfo.close()
|
conn.peerInfo.close()
|
||||||
|
@ -245,17 +252,27 @@ proc internalConnect(s: Switch,
|
||||||
for a in peer.addrs: # for each address
|
for a in peer.addrs: # for each address
|
||||||
if t.handles(a): # check if it can dial it
|
if t.handles(a): # check if it can dial it
|
||||||
trace "Dialing address", address = $a
|
trace "Dialing address", address = $a
|
||||||
|
try:
|
||||||
conn = await t.dial(a)
|
conn = await t.dial(a)
|
||||||
|
libp2p_dialed_peers.inc()
|
||||||
|
except CatchableError as exc:
|
||||||
|
trace "dialing failed", exc = exc.msg
|
||||||
|
libp2p_failed_dials.inc()
|
||||||
|
continue
|
||||||
|
|
||||||
# make sure to assign the peer to the connection
|
# make sure to assign the peer to the connection
|
||||||
conn.peerInfo = peer
|
conn.peerInfo = peer
|
||||||
|
|
||||||
conn = await s.upgradeOutgoing(conn)
|
conn = await s.upgradeOutgoing(conn)
|
||||||
if isNil(conn):
|
if isNil(conn):
|
||||||
|
libp2p_failed_upgrade.inc()
|
||||||
continue
|
continue
|
||||||
|
|
||||||
conn.closeEvent.wait()
|
conn.closeEvent.wait()
|
||||||
.addCallback do(udata: pointer):
|
.addCallback do(udata: pointer):
|
||||||
asyncCheck s.cleanupConn(conn)
|
asyncCheck s.cleanupConn(conn)
|
||||||
|
|
||||||
|
libp2p_connected_peers.inc()
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
trace "Reusing existing connection"
|
trace "Reusing existing connection"
|
||||||
|
@ -308,6 +325,7 @@ proc start*(s: Switch): Future[seq[Future[void]]] {.async, gcsafe.} =
|
||||||
proc handle(conn: Connection): Future[void] {.async, closure, gcsafe.} =
|
proc handle(conn: Connection): Future[void] {.async, closure, gcsafe.} =
|
||||||
try:
|
try:
|
||||||
try:
|
try:
|
||||||
|
libp2p_connected_peers.inc()
|
||||||
await s.upgradeIncoming(conn) # perform upgrade on incoming connection
|
await s.upgradeIncoming(conn) # perform upgrade on incoming connection
|
||||||
finally:
|
finally:
|
||||||
await s.cleanupConn(conn)
|
await s.cleanupConn(conn)
|
||||||
|
@ -470,6 +488,7 @@ proc newSwitch*(peerInfo: PeerInfo,
|
||||||
# try establishing a pubsub connection
|
# try establishing a pubsub connection
|
||||||
await s.subscribeToPeer(muxer.connection.peerInfo)
|
await s.subscribeToPeer(muxer.connection.peerInfo)
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
|
libp2p_failed_upgrade.inc()
|
||||||
trace "exception in muxer handler", exc = exc.msg
|
trace "exception in muxer handler", exc = exc.msg
|
||||||
finally:
|
finally:
|
||||||
if not(isNil(stream)):
|
if not(isNil(stream)):
|
||||||
|
|
Loading…
Reference in New Issue