nim-libp2p/libp2p/upgrademngrs/muxedupgrade.nim

219 lines
6.4 KiB
Nim
Raw Normal View History

## Nim-LibP2P
## Copyright (c) 2021 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [Defect].}
import std/[tables, sequtils]
import pkg/[chronos, chronicles, metrics]
import ../upgrademngrs/upgrade,
../muxers/muxer
export Upgrade
logScope:
topics = "libp2p muxedupgrade"
type
MuxedUpgrade* = ref object of Upgrade
muxers*: Table[string, MuxerProvider]
streamHandler*: StreamHandler
proc identify*(u: MuxedUpgrade, muxer: Muxer) {.async, gcsafe.} =
# new stream for identify
var stream = await muxer.newStream()
if stream == nil:
return
try:
await u.identify(stream)
finally:
await stream.closeWithEOF()
proc mux*(u: MuxedUpgrade, conn: Connection): Future[Muxer] {.async, gcsafe.} =
## mux incoming connection
trace "Muxing connection", conn
if u.muxers.len == 0:
warn "no muxers registered, skipping upgrade flow", conn
return
let muxerName = await u.ms.select(conn, toSeq(u.muxers.keys()))
if muxerName.len == 0 or muxerName == "na":
debug "no muxer available, early exit", conn
return
trace "Found a muxer", conn, muxerName
# create new muxer for connection
let muxer = u.muxers[muxerName].newMuxer(conn)
# install stream handler
muxer.streamHandler = u.streamHandler
Connection limits (#384) * master merge * wip * avoid deadlocks * tcp limits * expose client field in chronosstream * limit incoming connections * update with new listen api * fix release * don't override peerinfo in connection * rework transport with accept * use semaphore to track resource ussage * rework with new transport accept api * move events to conn manager (#373) * use semaphore to track resource ussage * merge master * expose api to acquire conn slots * don't fail expensive metrics * allow tracking and updating connections * set global connection limits to 80 * add per peer connection limits * make sure conn is closed if tracking failed * more descriptive naming for handle * rework with new transport accept api * add `getStream` hide `selectConn` * add TransportClosedError * make nil explicit * don't make unnecessary copies of message * logging * error handling * cleanup semaphore * track connections properly * throw `TooManyConnections` when tracking outgoing * use proper exception and handle conventions * check onCloseHandle for nil * revert internalConnect changes * adding upgraded flag * await stream before closing * simplify tracking * wip * logging * split connection limits into incoming and outgoing * further streamline connection limits split counts * don't use closeWithEOF * move peer and conn event triggers from switch * wip * wip * wip * merge master * handle nil connections properly * add clarifying comment * don't raise exc on nil * no finally * add proper min/max connections logic * rebase master * merge master * master merge * remove request timeout should be addressed in separate PR * merge master * share semaphore when in/out limits arent enforced * merge master * use import * pass semaphore to trackConn * don't close last conn * use storeConn * merge master * use storeConn
2021-01-21 04:00:24 +00:00
u.connManager.storeConn(conn)
# store it in muxed connections if we have a peer for it
u.connManager.storeMuxer(muxer, muxer.handle()) # store muxer and start read loop
try:
await u.identify(muxer)
except CatchableError as exc:
# Identify is non-essential, though if it fails, it might indicate that
# the connection was closed already - this will be picked up by the read
# loop
debug "Could not identify connection", conn, msg = exc.msg
return muxer
method upgradeOutgoing*(u: MuxedUpgrade, conn: Connection): Future[Connection] {.async, gcsafe.} =
trace "Upgrading outgoing connection", conn
let sconn = await u.secure(conn) # secure the connection
if isNil(sconn):
raise newException(UpgradeFailedError,
"unable to secure connection, stopping upgrade")
if sconn.peerInfo.isNil:
raise newException(UpgradeFailedError,
"current version of nim-libp2p requires that secure protocol negotiates peerid")
let muxer = await u.mux(sconn) # mux it if possible
if muxer == nil:
# TODO this might be relaxed in the future
raise newException(UpgradeFailedError,
"a muxer is required for outgoing connections")
if sconn.closed() or isNil(sconn.peerInfo):
await sconn.close()
raise newException(UpgradeFailedError,
"Connection closed or missing peer info, stopping upgrade")
trace "Upgraded outgoing connection", conn, sconn
return sconn
method upgradeIncoming*(u: MuxedUpgrade, incomingConn: Connection) {.async, gcsafe.} = # noraises
trace "Upgrading incoming connection", incomingConn
let ms = newMultistream()
# secure incoming connections
proc securedHandler(conn: Connection,
proto: string)
{.async, gcsafe, closure.} =
trace "Starting secure handler", conn
let secure = u.secureManagers.filterIt(it.codec == proto)[0]
var cconn = conn
try:
var sconn = await secure.secure(cconn, false)
if isNil(sconn):
return
cconn = sconn
# add the muxer
for muxer in u.muxers.values:
ms.addHandler(muxer.codecs, muxer)
# handle subsequent secure requests
await ms.handle(cconn)
except CatchableError as exc:
debug "Exception in secure handler during incoming upgrade", msg = exc.msg, conn
if not cconn.isUpgraded:
cconn.upgrade(exc)
finally:
if not isNil(cconn):
await cconn.close()
trace "Stopped secure handler", conn
try:
if (await ms.select(incomingConn)): # just handshake
# add the secure handlers
for k in u.secureManagers:
ms.addHandler(k.codec, securedHandler)
# handle un-secured connections
# we handshaked above, set this ms handler as active
await ms.handle(incomingConn, active = true)
except CatchableError as exc:
debug "Exception upgrading incoming", exc = exc.msg
if not incomingConn.isUpgraded:
incomingConn.upgrade(exc)
finally:
if not isNil(incomingConn):
await incomingConn.close()
proc muxerHandler(u: MuxedUpgrade, muxer: Muxer) {.async, gcsafe.} =
let
conn = muxer.connection
if conn.peerInfo.isNil:
warn "This version of nim-libp2p requires secure protocol to negotiate peerid"
await muxer.close()
return
# store incoming connection
Connection limits (#384) * master merge * wip * avoid deadlocks * tcp limits * expose client field in chronosstream * limit incoming connections * update with new listen api * fix release * don't override peerinfo in connection * rework transport with accept * use semaphore to track resource ussage * rework with new transport accept api * move events to conn manager (#373) * use semaphore to track resource ussage * merge master * expose api to acquire conn slots * don't fail expensive metrics * allow tracking and updating connections * set global connection limits to 80 * add per peer connection limits * make sure conn is closed if tracking failed * more descriptive naming for handle * rework with new transport accept api * add `getStream` hide `selectConn` * add TransportClosedError * make nil explicit * don't make unnecessary copies of message * logging * error handling * cleanup semaphore * track connections properly * throw `TooManyConnections` when tracking outgoing * use proper exception and handle conventions * check onCloseHandle for nil * revert internalConnect changes * adding upgraded flag * await stream before closing * simplify tracking * wip * logging * split connection limits into incoming and outgoing * further streamline connection limits split counts * don't use closeWithEOF * move peer and conn event triggers from switch * wip * wip * wip * merge master * handle nil connections properly * add clarifying comment * don't raise exc on nil * no finally * add proper min/max connections logic * rebase master * merge master * master merge * remove request timeout should be addressed in separate PR * merge master * share semaphore when in/out limits arent enforced * merge master * use import * pass semaphore to trackConn * don't close last conn * use storeConn * merge master * use storeConn
2021-01-21 04:00:24 +00:00
u.connManager.storeConn(conn)
# store muxer and muxed connection
u.connManager.storeMuxer(muxer)
try:
await u.identify(muxer)
except IdentifyError as exc:
# Identify is non-essential, though if it fails, it might indicate that
# the connection was closed already - this will be picked up by the read
# loop
debug "Could not identify connection", conn, msg = exc.msg
except LPStreamClosedError as exc:
debug "Identify stream closed", conn, msg = exc.msg
except LPStreamEOFError as exc:
debug "Identify stream EOF", conn, msg = exc.msg
except CancelledError as exc:
await muxer.close()
raise exc
except CatchableError as exc:
await muxer.close()
trace "Exception in muxer handler", conn, msg = exc.msg
proc init*(
T: type MuxedUpgrade,
identity: Identify,
muxers: Table[string, MuxerProvider],
secureManagers: openarray[Secure] = [],
connManager: ConnManager,
ms: MultistreamSelect): T =
let upgrader = T(
identity: identity,
muxers: muxers,
secureManagers: @secureManagers,
connManager: connManager,
ms: ms)
upgrader.streamHandler = proc(conn: Connection) {.async, gcsafe.} = # noraises
trace "Starting stream handler", conn
try:
await upgrader.ms.handle(conn) # handle incoming connection
except CancelledError as exc:
raise exc
except CatchableError as exc:
trace "exception in stream handler", conn, msg = exc.msg
finally:
await conn.closeWithEOF()
trace "Stream handler done", conn
for _, val in muxers:
val.streamHandler = upgrader.streamHandler
val.muxerHandler = proc(muxer: Muxer): Future[void] =
upgrader.muxerHandler(muxer)
return upgrader