nim-libp2p/libp2p/switch.nim

662 lines
21 KiB
Nim
Raw Normal View History

2019-08-27 21:46:12 +00:00
## Nim-LibP2P
2019-09-24 17:48:23 +00:00
## Copyright (c) 2019 Status Research & Development GmbH
2019-08-27 21:46:12 +00:00
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import tables,
sequtils,
options,
sets,
oids
import chronos,
chronicles,
metrics
import stream/connection,
2019-09-11 19:03:30 +00:00
transports/transport,
multistream,
2020-06-17 04:14:02 +00:00
multiaddress,
2019-09-06 07:13:47 +00:00
protocols/protocol,
2019-09-14 13:55:52 +00:00
protocols/secure/secure,
2019-09-11 19:03:30 +00:00
peerinfo,
protocols/identify,
2019-09-06 07:13:47 +00:00
muxers/muxer,
connmanager,
peerid,
errors
2019-09-12 02:10:38 +00:00
logScope:
topics = "switch"
2019-09-12 02:10:38 +00:00
#TODO: General note - use a finite state machine to manage the different
# steps of connections establishing and upgrading. This makes everything
# more robust and less prone to ordering attacks - i.e. muxing can come if
# and only if the channel has been secured (i.e. if a secure manager has been
# previously provided)
declareCounter(libp2p_dialed_peers, "dialed peers")
declareCounter(libp2p_failed_dials, "failed dials")
declareCounter(libp2p_failed_upgrade, "peers failed upgrade")
type
2020-09-07 12:15:11 +00:00
UpgradeFailedError* = object of CatchableError
DialFailedError* = object of CatchableError
2019-09-11 19:03:30 +00:00
ConnEventKind* {.pure.} = enum
Connected, # A connection was made and securely upgraded - there may be
# more than one concurrent connection thus more than one upgrade
# event per peer.
Disconnected # Peer disconnected - this event is fired once per upgrade
# when the associated connection is terminated.
ConnEvent* = object
case kind*: ConnEventKind
of ConnEventKind.Connected:
incoming*: bool
else:
discard
ConnEventHandler* =
proc(peerId: PeerID, event: ConnEvent): Future[void] {.gcsafe.}
PeerEvent* {.pure.} = enum
Left,
Joined
PeerEventHandler* =
proc(peerId: PeerID, event: PeerEvent): Future[void] {.gcsafe.}
Switch* = ref object of RootObj
2019-08-30 05:17:07 +00:00
peerInfo*: PeerInfo
connManager: ConnManager
2019-08-30 05:17:07 +00:00
transports*: seq[Transport]
protocols*: seq[LPProtocol]
2019-09-04 22:00:39 +00:00
muxers*: Table[string, MuxerProvider]
2020-02-04 16:16:21 +00:00
ms*: MultistreamSelect
2019-09-04 01:50:17 +00:00
identity*: Identify
2019-09-06 07:13:47 +00:00
streamHandler*: StreamHandler
secureManagers*: seq[Secure]
2020-08-06 18:14:40 +00:00
dialLock: Table[PeerID, AsyncLock]
connEvents: Table[ConnEventKind, OrderedSet[ConnEventHandler]]
peerEvents: Table[PeerEvent, OrderedSet[PeerEventHandler]]
2019-08-27 21:46:12 +00:00
proc addConnEventHandler*(s: Switch,
handler: ConnEventHandler, kind: ConnEventKind) =
## Add peer event handler - handlers must not raise exceptions!
##
if isNil(handler): return
s.connEvents.mgetOrPut(kind,
initOrderedSet[ConnEventHandler]()).incl(handler)
proc removeConnEventHandler*(s: Switch,
handler: ConnEventHandler, kind: ConnEventKind) =
s.connEvents.withValue(kind, handlers) do:
handlers[].excl(handler)
proc triggerConnEvent(s: Switch, peerId: PeerID, event: ConnEvent) {.async, gcsafe.} =
try:
if event.kind in s.connEvents:
var connEvents: seq[Future[void]]
for h in s.connEvents[event.kind]:
connEvents.add(h(peerId, event))
checkFutures(await allFinished(connEvents))
except CancelledError as exc:
raise exc
except CatchableError as exc: # handlers should not raise!
warn "Exception in triggerConnEvents",
msg = exc.msg, peerId, event = $event
proc addPeerEventHandler*(s: Switch,
handler: PeerEventHandler,
kind: PeerEvent) =
## Add peer event handler - handlers must not raise exceptions!
##
if isNil(handler): return
s.peerEvents.mgetOrPut(kind,
initOrderedSet[PeerEventHandler]()).incl(handler)
proc removePeerEventHandler*(s: Switch,
handler: PeerEventHandler,
kind: PeerEvent) =
s.peerEvents.withValue(kind, handlers) do:
handlers[].excl(handler)
proc triggerPeerEvents(s: Switch,
peerId: PeerID,
event: PeerEvent) {.async, gcsafe.} =
if event notin s.peerEvents:
return
try:
let count = s.connManager.connCount(peerId)
if event == PeerEvent.Joined and count != 1:
trace "peer already joined", local = s.peerInfo.peerId,
remote = peerId, event
return
elif event == PeerEvent.Left and count != 0:
trace "peer still connected or already left", local = s.peerInfo.peerId,
remote = peerId, event
return
trace "triggering peer events", local = s.peerInfo.peerId,
remote = peerId, event
var peerEvents: seq[Future[void]]
for h in s.peerEvents[event]:
peerEvents.add(h(peerId, event))
checkFutures(await allFinished(peerEvents))
except CancelledError as exc:
raise exc
except CatchableError as exc: # handlers should not raise!
warn "exception in triggerPeerEvents", exc = exc.msg, peerId
proc disconnect*(s: Switch, peerId: PeerID) {.async, gcsafe.}
proc isConnected*(s: Switch, peerId: PeerID): bool =
## returns true if the peer has one or more
## associated connections (sockets)
##
peerId in s.connManager
proc secure(s: Switch, conn: Connection): Future[Connection] {.async, gcsafe.} =
if s.secureManagers.len <= 0:
2020-09-07 12:15:11 +00:00
raise newException(UpgradeFailedError, "No secure managers registered!")
2019-09-09 23:17:45 +00:00
let codec = await s.ms.select(conn, s.secureManagers.mapIt(it.codec))
if codec.len == 0:
2020-09-07 12:15:11 +00:00
raise newException(UpgradeFailedError, "Unable to negotiate a secure channel!")
2019-09-09 23:17:45 +00:00
trace "Securing connection", conn, codec
let secureProtocol = s.secureManagers.filterIt(it.codec == codec)
# ms.select should deal with the correctness of this
# let's avoid duplicating checks but detect if it fails to do it properly
2020-06-21 09:14:19 +00:00
doAssert(secureProtocol.len > 0)
return await secureProtocol[0].secure(conn, true)
2019-08-31 17:58:49 +00:00
proc identify(s: Switch, conn: Connection) {.async, gcsafe.} =
2019-09-04 01:50:17 +00:00
## identify the connection
2019-09-28 19:54:32 +00:00
if (await s.ms.select(conn, s.identity.codec)):
let info = await s.identity.identify(conn, conn.peerInfo)
if info.pubKey.isNone and isNil(conn):
2020-09-07 12:15:11 +00:00
raise newException(UpgradeFailedError,
"no public key provided and no existing peer identity found")
if isNil(conn.peerInfo):
conn.peerInfo = PeerInfo.init(info.pubKey.get())
if info.addrs.len > 0:
conn.peerInfo.addrs = info.addrs
if info.agentVersion.isSome:
conn.peerInfo.agentVersion = info.agentVersion.get()
2020-06-09 18:42:52 +00:00
if info.protoVersion.isSome:
conn.peerInfo.protoVersion = info.protoVersion.get()
2020-06-09 18:42:52 +00:00
if info.protos.len > 0:
conn.peerInfo.protocols = info.protos
2020-09-07 12:15:11 +00:00
trace "identified remote peer", conn, peerInfo = shortLog(conn.peerInfo)
2019-09-06 07:13:47 +00:00
proc identify(s: Switch, muxer: Muxer) {.async, gcsafe.} =
# new stream for identify
var stream = await muxer.newStream()
defer:
if not(isNil(stream)):
await stream.close() # close identify stream
# do identify first, so that we have a
# PeerInfo in case we didn't before
await s.identify(stream)
proc mux(s: Switch, conn: Connection): Future[Muxer] {.async, gcsafe.} =
2019-09-04 01:50:17 +00:00
## mux incoming connection
2019-09-28 19:54:32 +00:00
2020-09-07 12:15:11 +00:00
trace "Muxing connection", conn
2020-08-02 10:22:49 +00:00
if s.muxers.len == 0:
warn "no muxers registered, skipping upgrade flow", conn
2019-09-13 20:04:46 +00:00
return
2020-08-02 10:22:49 +00:00
let muxerName = await s.ms.select(conn, toSeq(s.muxers.keys()))
2019-09-06 07:13:47 +00:00
if muxerName.len == 0 or muxerName == "na":
debug "no muxer available, early exit", conn
2019-09-04 22:00:39 +00:00
return
trace "Found a muxer", conn, muxerName
2019-09-14 15:55:58 +00:00
# create new muxer for connection
2019-09-04 22:00:39 +00:00
let muxer = s.muxers[muxerName].newMuxer(conn)
2019-09-04 22:00:39 +00:00
# install stream handler
2019-09-06 07:13:47 +00:00
muxer.streamHandler = s.streamHandler
2019-09-04 22:00:39 +00:00
2020-09-07 12:15:11 +00:00
s.connManager.storeOutgoing(conn)
s.connManager.storeMuxer(muxer)
# start muxer read loop - the future will complete when loop ends
let handlerFut = muxer.handle()
2019-09-08 07:43:33 +00:00
2019-09-05 15:19:39 +00:00
# store it in muxed connections if we have a peer for it
s.connManager.storeMuxer(muxer, handlerFut) # update muxer with handler
return muxer
proc disconnect*(s: Switch, peerId: PeerID): Future[void] {.gcsafe.} =
s.connManager.dropPeer(peerId)
2019-09-28 19:54:32 +00:00
proc upgradeOutgoing(s: Switch, conn: Connection): Future[Connection] {.async, gcsafe.} =
2020-09-07 12:15:11 +00:00
trace "Upgrading outgoing connection", conn
let sconn = await s.secure(conn) # secure the connection
if isNil(sconn):
2020-09-07 12:15:11 +00:00
raise newException(UpgradeFailedError,
"unable to secure connection, stopping upgrade")
if sconn.peerInfo.isNil:
2020-09-07 12:15:11 +00:00
raise newException(UpgradeFailedError,
"current version of nim-libp2p requires that secure protocol negotiates peerid")
let muxer = await s.mux(sconn) # mux it if possible
2020-08-17 11:32:02 +00:00
if muxer == nil:
# TODO this might be relaxed in the future
2020-09-07 12:15:11 +00:00
raise newException(UpgradeFailedError,
2020-08-17 11:32:02 +00:00
"a muxer is required for outgoing connections")
try:
await s.identify(muxer)
except CatchableError as exc:
# Identify is non-essential, though if it fails, it might indicate that
# the connection was closed already - this will be picked up by the read
# loop
debug "Could not identify connection", conn, msg = exc.msg
if isNil(sconn.peerInfo):
await sconn.close()
2020-09-07 12:15:11 +00:00
raise newException(UpgradeFailedError,
"No peerInfo for connection, stopping upgrade")
2020-09-07 12:15:11 +00:00
trace "Upgraded outgoing connection", conn, sconn
return sconn
2019-09-04 22:00:39 +00:00
proc upgradeIncoming(s: Switch, conn: Connection) {.async, gcsafe.} =
2020-09-07 12:15:11 +00:00
trace "Upgrading incoming connection", conn
2019-09-28 19:54:32 +00:00
let ms = newMultistream()
2019-09-04 01:50:17 +00:00
2019-09-28 19:54:32 +00:00
# secure incoming connections
proc securedHandler (conn: Connection,
proto: string)
{.async, gcsafe, closure.} =
trace "Starting secure handler", conn
let secure = s.secureManagers.filterIt(it.codec == proto)[0]
try:
2020-09-07 12:15:11 +00:00
var sconn = await secure.secure(conn, false)
if isNil(sconn):
return
defer:
await sconn.close()
2019-09-28 19:54:32 +00:00
# add the muxer
for muxer in s.muxers.values:
ms.addHandler(muxer.codec, muxer)
# handle subsequent secure requests
await ms.handle(sconn)
except CancelledError as exc:
raise exc
except CatchableError as exc:
debug "Exception in secure handler", msg = exc.msg, conn
2020-09-07 12:15:11 +00:00
trace "Stopped secure handler", conn
2019-09-28 19:54:32 +00:00
if (await ms.select(conn)): # just handshake
# add the secure handlers
for k in s.secureManagers:
ms.addHandler(k.codec, securedHandler)
2019-09-11 19:03:30 +00:00
# handle un-secured connections
# we handshaked above, set this ms handler as active
await ms.handle(conn, active = true)
2020-02-21 01:14:39 +00:00
proc internalConnect(s: Switch,
peerId: PeerID,
addrs: seq[MultiAddress]): Future[Connection] {.async.} =
if s.peerInfo.peerId == peerId:
raise newException(CatchableError, "can't dial self!")
var conn: Connection
# Ensure there's only one in-flight attempt per peer
2020-08-06 18:14:40 +00:00
let lock = s.dialLock.mgetOrPut(peerId, newAsyncLock())
try:
await lock.acquire()
# Check if we have a connection already and try to reuse it
conn = s.connManager.selectConn(peerId)
if conn != nil:
if conn.atEof or conn.closed:
# This connection should already have been removed from the connection
# manager - it's essentially a bug that we end up here - we'll fail
# for now, hoping that this will clean themselves up later...
2020-09-07 12:15:11 +00:00
warn "dead connection in connection manager", conn
await conn.close()
2020-09-07 12:15:11 +00:00
raise newException(DialFailedError, "Zombie connection encountered")
trace "Reusing existing connection", conn, direction = $conn.dir
return conn
trace "Dialing peer", peerId
for t in s.transports: # for each transport
for a in addrs: # for each address
if t.handles(a): # check if it can dial it
trace "Dialing address", address = $a, peerId
let dialed = try:
await t.dial(a)
except CancelledError as exc:
trace "Dialing canceled", msg = exc.msg, peerId
raise exc
except CatchableError as exc:
trace "Dialing failed", msg = exc.msg, peerId
libp2p_failed_dials.inc()
continue # Try the next address
# make sure to assign the peer to the connection
dialed.peerInfo = PeerInfo.init(peerId, addrs)
libp2p_dialed_peers.inc()
let upgraded = try:
await s.upgradeOutgoing(dialed)
except CatchableError as exc:
# If we failed to establish the connection through one transport,
# we won't succeeed through another - no use in trying again
await dialed.close()
debug "Upgrade failed", msg = exc.msg, peerId
if exc isnot CancelledError:
libp2p_failed_upgrade.inc()
raise exc
doAssert not isNil(upgraded), "connection died after upgradeOutgoing"
2020-08-08 07:31:37 +00:00
conn = upgraded
2020-09-07 12:15:11 +00:00
trace "Dial successful", conn, peerInfo = conn.peerInfo
break
finally:
if lock.locked():
lock.release()
if isNil(conn): # None of the addresses connected
raise newException(CatchableError, "Unable to establish outgoing link")
if conn.closed():
# This can happen if one of the peer event handlers deems the peer
# unworthy and disconnects it
raise newLPStreamClosedError()
await s.triggerPeerEvents(peerId, PeerEvent.Joined)
await s.triggerConnEvent(
peerId, ConnEvent(kind: ConnEventKind.Connected, incoming: false))
proc peerCleanup() {.async.} =
try:
await conn.closeEvent.wait()
await s.triggerConnEvent(peerId,
ConnEvent(kind: ConnEventKind.Disconnected))
await s.triggerPeerEvents(peerId, PeerEvent.Left)
except CatchableError as exc:
# This is top-level procedure which will work as separate task, so it
# do not need to propogate CancelledError and should handle other errors
warn "Unexpected exception in switch peer connect cleanup",
conn, msg = exc.msg
# All the errors are handled inside `cleanup()` procedure.
asyncSpawn peerCleanup()
return conn
proc connect*(s: Switch, peerId: PeerID, addrs: seq[MultiAddress]) {.async.} =
discard await s.internalConnect(peerId, addrs)
2020-09-07 12:15:11 +00:00
proc negotiateStream(s: Switch, conn: Connection, proto: string): Future[Connection] {.async.} =
trace "Negotiating stream", conn, proto
2020-09-07 12:15:11 +00:00
if not await s.ms.select(conn, proto):
await conn.close()
raise newException(DialFailedError, "Unable to select sub-protocol " & proto)
2020-09-07 12:15:11 +00:00
return conn
proc dial*(s: Switch,
peerId: PeerID,
proto: string): Future[Connection] {.async.} =
trace "Dialling (existing)", peerId, proto
let stream = await s.connManager.getMuxedStream(peerId)
if stream.isNil:
2020-09-07 12:15:11 +00:00
raise newException(DialFailedError, "Couldn't get muxed stream")
return await s.negotiateStream(stream, proto)
proc dial*(s: Switch,
peerId: PeerID,
addrs: seq[MultiAddress],
proto: string):
Future[Connection] {.async.} =
trace "Dialling (new)", peerId, proto
let conn = await s.internalConnect(peerId, addrs)
trace "Opening stream", conn
let stream = await s.connManager.getMuxedStream(conn)
proc cleanup() {.async.} =
if not(isNil(stream)):
await stream.close()
if not(isNil(conn)):
await conn.close()
try:
if isNil(stream):
await conn.close()
2020-09-07 12:15:11 +00:00
raise newException(DialFailedError, "Couldn't get muxed stream")
2020-02-21 01:14:39 +00:00
return await s.negotiateStream(stream, proto)
except CancelledError as exc:
trace "Dial canceled", conn
await cleanup()
raise exc
except CatchableError as exc:
trace "Error dialing", conn, msg = exc.msg
await cleanup()
raise exc
proc mount*[T: LPProtocol](s: Switch, proto: T) {.gcsafe.} =
2019-08-31 17:58:49 +00:00
if isNil(proto.handler):
raise newException(CatchableError,
"Protocol has to define a handle method or proc")
2019-08-27 21:46:12 +00:00
2019-09-06 07:13:47 +00:00
if proto.codec.len == 0:
raise newException(CatchableError,
"Protocol has to define a codec string")
2019-08-31 18:52:56 +00:00
2019-08-31 17:58:49 +00:00
s.ms.addHandler(proto.codec, proto)
2019-08-28 02:30:53 +00:00
2020-05-05 15:55:02 +00:00
proc start*(s: Switch): Future[seq[Future[void]]] {.async, gcsafe.} =
trace "starting switch for peer", peerInfo = s.peerInfo
2019-08-31 18:52:56 +00:00
proc handle(conn: Connection): Future[void] {.async, closure, gcsafe.} =
2020-09-07 12:15:11 +00:00
trace "Incoming connection", conn
2019-09-04 22:00:39 +00:00
try:
await s.upgradeIncoming(conn) # perform upgrade on incoming connection
except CancelledError as exc:
raise exc
except CatchableError as exc:
trace "Exception occurred in incoming handler", conn, msg = exc.msg
finally:
await conn.close()
2020-09-07 12:15:11 +00:00
trace "Connection handler done", conn
2019-09-12 00:15:04 +00:00
var startFuts: seq[Future[void]]
2019-08-28 02:30:53 +00:00
for t in s.transports: # for each transport
2019-09-28 19:54:32 +00:00
for i, a in s.peerInfo.addrs:
2019-08-28 02:30:53 +00:00
if t.handles(a): # check if it handles the multiaddr
2020-05-05 15:55:02 +00:00
var server = await t.listen(a, handle)
2019-09-28 19:54:32 +00:00
s.peerInfo.addrs[i] = t.ma # update peer's address
2019-09-12 00:15:04 +00:00
startFuts.add(server)
2020-09-07 12:15:11 +00:00
debug "Started libp2p node", peer = s.peerInfo
2019-09-12 00:15:04 +00:00
result = startFuts # listen for incoming connections
2019-08-27 21:46:12 +00:00
proc stop*(s: Switch) {.async.} =
trace "Stopping switch"
# close and cleanup all connections
await s.connManager.close()
2019-09-06 07:13:47 +00:00
for t in s.transports:
try:
await t.close()
except CancelledError as exc:
raise exc
except CatchableError as exc:
warn "error cleaning up transports", msg = exc.msg
trace "Switch stopped"
proc muxerHandler(s: Switch, muxer: Muxer) {.async, gcsafe.} =
2020-09-07 12:15:11 +00:00
let
conn = muxer.connection
if conn.peerInfo.isNil:
warn "This version of nim-libp2p requires secure protocol to negotiate peerid"
await muxer.close()
return
# store incoming connection
2020-09-07 12:15:11 +00:00
s.connManager.storeIncoming(conn)
# store muxer and muxed connection
s.connManager.storeMuxer(muxer)
try:
await s.identify(muxer)
except CatchableError as exc:
# Identify is non-essential, though if it fails, it might indicate that
# the connection was closed already - this will be picked up by the read
# loop
debug "Could not identify connection", conn, msg = exc.msg
try:
2020-09-07 12:15:11 +00:00
let peerId = conn.peerInfo.peerId
proc peerCleanup() {.async.} =
try:
await muxer.connection.join()
await s.triggerConnEvent(peerId,
ConnEvent(kind: ConnEventKind.Disconnected))
await s.triggerPeerEvents(peerId, PeerEvent.Left)
except CatchableError as exc:
# This is top-level procedure which will work as separate task, so it
# do not need to propogate CancelledError and shouldn't leak others
debug "Unexpected exception in switch muxer cleanup",
conn, msg = exc.msg
proc peerStartup() {.async.} =
try:
await s.triggerPeerEvents(peerId, PeerEvent.Joined)
await s.triggerConnEvent(peerId,
ConnEvent(kind: ConnEventKind.Connected,
incoming: true))
except CatchableError as exc:
# This is top-level procedure which will work as separate task, so it
# do not need to propogate CancelledError and shouldn't leak others
debug "Unexpected exception in switch muxer startup",
conn, msg = exc.msg
# All the errors are handled inside `peerStartup()` procedure.
asyncSpawn peerStartup()
# All the errors are handled inside `peerCleanup()` procedure.
asyncSpawn peerCleanup()
except CancelledError as exc:
await muxer.close()
raise exc
except CatchableError as exc:
await muxer.close()
libp2p_failed_upgrade.inc()
trace "Exception in muxer handler", conn, msg = exc.msg
2019-09-06 07:13:47 +00:00
proc newSwitch*(peerInfo: PeerInfo,
transports: seq[Transport],
identity: Identify,
2019-09-09 17:33:32 +00:00
muxers: Table[string, MuxerProvider],
secureManagers: openarray[Secure] = []): Switch =
if secureManagers.len == 0:
raise (ref CatchableError)(msg: "Provide at least one secure manager")
let switch = Switch(
peerInfo: peerInfo,
ms: newMultistream(),
transports: transports,
connManager: ConnManager.init(),
identity: identity,
muxers: muxers,
secureManagers: @secureManagers,
)
2019-09-06 07:13:47 +00:00
switch.streamHandler = proc(conn: Connection) {.async, gcsafe.} = # noraises
trace "Starting stream handler", conn
2020-05-23 19:25:53 +00:00
try:
await switch.ms.handle(conn) # handle incoming connection
except CancelledError as exc:
raise exc
2020-05-23 19:25:53 +00:00
except CatchableError as exc:
trace "exception in stream handler", conn, msg = exc.msg
2020-09-07 12:15:11 +00:00
finally:
await conn.close()
trace "Stream handler done", conn
2019-09-06 07:13:47 +00:00
switch.mount(identity)
2019-09-06 07:13:47 +00:00
for key, val in muxers:
val.streamHandler = switch.streamHandler
val.muxerHandler = proc(muxer: Muxer): Future[void] =
switch.muxerHandler(muxer)
return switch
proc isConnected*(s: Switch, peerInfo: PeerInfo): bool
{.deprecated: "Use PeerID version".} =
not isNil(peerInfo) and isConnected(s, peerInfo.peerId)
proc disconnect*(s: Switch, peerInfo: PeerInfo): Future[void]
{.deprecated: "Use PeerID version", gcsafe.} =
disconnect(s, peerInfo.peerId)
proc connect*(s: Switch, peerInfo: PeerInfo): Future[void]
{.deprecated: "Use PeerID version".} =
connect(s, peerInfo.peerId, peerInfo.addrs)
proc dial*(s: Switch,
peerInfo: PeerInfo,
proto: string):
Future[Connection]
{.deprecated: "Use PeerID version".} =
dial(s, peerInfo.peerId, peerInfo.addrs, proto)