nim-libp2p/libp2p/protocols/secure/secure.nim

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

197 lines
5.4 KiB
Nim
Raw Permalink Normal View History

2022-07-01 18:19:57 +00:00
# Nim-LibP2P
# Copyright (c) 2023-2024 Status Research & Development GmbH
2022-07-01 18:19:57 +00:00
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
2019-09-06 06:51:46 +00:00
2022-10-29 21:26:44 +00:00
{.push gcsafe.}
2023-06-07 11:12:49 +00:00
{.push raises: [].}
2022-05-24 13:10:57 +00:00
import std/[strformat]
import stew/results
import chronos, chronicles
2020-03-09 17:27:57 +00:00
import
../protocol,
../../stream/streamseq,
../../stream/connection,
../../multiaddress,
../../peerinfo,
../../errors
2019-09-06 06:51:46 +00:00
export protocol, results
Gossip one one (#240) * allow multiple codecs per protocol (without breaking things) * add 1.1 protocol to gossip * explicit peering part 1 * explicit peering part 2 * explicit peering part 3 * PeerInfo and ControlPrune protocols * fix encodePrune * validated always, even explicit peers * prune by score (score is stub still) * add a way to pass parameters to gossip * standard setup fixes * take into account explicit direct peers in publish * add floodPublish logic * small fixes, publish still half broken * make sure to waitsub in sparse test * use var semantics to optimize table access * wip... lvalues don't work properly sadly... * big publish refactor, replenish and balance * fix internal tests * use g.peers for fanout (todo: don't include flood peers) * exclude non gossip from fanout * internal test fixes * fix flood tests * fix test's trypublish * test interop fixes * make sure to not remove peers from gossip table * restore old replenishFanout * cleanups * restore utility module import * restore trace vs debug in gossip * improve fanout replenish behavior further * triage publish nil peers (issue is on master too but just hidden behind a if/in) * getGossipPeers fixes * remove topics from pubsubpeer (was unused) * simplify rebalanceMesh (following spec) and make it finally reach D_high * better diagnostics * merge new pubsubpeer, copy 1.1 to new module * fix up merge * conditional enable gossip11 module * add back topics in peers, re-enable flood publish * add more heartbeat locking to prevent races * actually lock the heartbeat * minor fixes * with sugar * merge 1.0 * remove assertion in publish * fix multistream 1.1 multi proto * Fix merge oops * wip * fix gossip 11 upstream * gossipsub11 -> gossipsub * support interop testing * tests fixing * fix directchat build * control prune updates (pb) * wip parameters * gossip internal tests fixes * parameters wip * finishup with params * cleanups/wip * small sugar * grafted and pruned procs * wip updateScores * wip * fix logging issue * pubsubpeer, chronicles explicit override * fix internal gossip tests * wip * tables troubleshooting * score wip * score wip * fixes * fix test utils generateNodes * don't delete while iterating in score update * fix grafted defect * add a handleConnect in subscribeTopic * pruning improvements * wip * score fixes * post merge - builds gossip tests * further merge fixes * rebalance improvements and opportunistic grafting * fix test for now * restore explicit peering * implement peer exchange graft message * add an hard cap to PX * backoff time management * IWANT cap/budget * Adaptive gossip dissemination * outbound mesh quota, internal tests fixing * oversub prune score based, finish outbound quota * finishup with score and ihave budget * use go daemon 0.3.0 * import fixes * byScore cleanup score sorting * remove pointless scaling in `/` Duration operator * revert using libp2p org for daemon * interop fixes * fixes and cleanup * remove heartbeat assertion, minor debug fixes * logging improvements and cleaning up * (to revert) add some traces * add explicit topic to gossip rpcs * pubsub merge fixes and type fix in switch * Revert "(to revert) add some traces" This reverts commit 4663eaab6cc336c81cee50bc54025cf0b7bcbd99. * cleanup some now irrelevant todo * shuffle peers anyway as score might be disabled * add missing shuffle * old merge fix * more merge fixes * debug improvements * re-enable gossip internal tests * add gossip10 fallback (dormant but tested) * split gossipsub internal tests into 1.0 and 1.1 Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
2020-09-21 09:16:29 +00:00
logScope:
2020-12-01 17:34:27 +00:00
topics = "libp2p secure"
const SecureConnTrackerName* = "SecureConn"
2019-09-06 06:51:46 +00:00
type
Secure* = ref object of LPProtocol # base type for secure managers
2020-04-03 20:36:51 +00:00
2020-03-09 17:27:57 +00:00
SecureConn* = ref object of Connection
stream*: Connection
buf: StreamSeq
2020-03-09 17:27:57 +00:00
func shortLog*(conn: SecureConn): auto =
try:
if conn == nil:
"SecureConn(nil)"
else:
&"{shortLog(conn.peerId)}:{conn.oid}"
except ValueError as exc:
raiseAssert(exc.msg)
chronicles.formatIt(SecureConn):
shortLog(it)
proc new*(
T: type SecureConn,
conn: Connection,
peerId: PeerId,
observedAddr: Opt[MultiAddress],
timeout: Duration = DefaultConnectionTimeout,
): T =
result = T(
stream: conn,
peerId: peerId,
observedAddr: observedAddr,
closeEvent: conn.closeEvent,
timeout: timeout,
dir: conn.dir,
)
result.initStream()
method initStream*(s: SecureConn) =
if s.objName.len == 0:
s.objName = SecureConnTrackerName
procCall Connection(s).initStream()
method closeImpl*(s: SecureConn) {.async: (raises: []).} =
trace "Closing secure conn", s, dir = s.dir
if s.stream != nil:
await s.stream.close()
await procCall Connection(s).closeImpl()
method readMessage*(
c: SecureConn
): Future[seq[byte]] {.
async: (raises: [CancelledError, LPStreamError], raw: true), base
.} =
raiseAssert("Not implemented!")
2020-03-09 17:27:57 +00:00
2022-08-01 12:31:22 +00:00
method getWrapped*(s: SecureConn): Connection =
s.stream
method handshake*(
s: Secure, conn: Connection, initiator: bool, peerId: Opt[PeerId]
): Future[SecureConn] {.
async: (raises: [CancelledError, LPStreamError], raw: true), base
.} =
raiseAssert("Not implemented!")
proc handleConn(
s: Secure, conn: Connection, initiator: bool, peerId: Opt[PeerId]
): Future[Connection] {.async: (raises: [CancelledError, LPStreamError]).} =
var sconn = await s.handshake(conn, initiator, peerId)
# mark connection bottom level transport direction
# this is the safest place to do this
# we require this information in for example gossipsub
sconn.transportDir = if initiator: Direction.Out else: Direction.In
proc cleanup() {.async: (raises: []).} =
try:
block:
let
fut1 = conn.join()
fut2 = sconn.join()
try: # https://github.com/status-im/nim-chronos/issues/516
discard await race(fut1, fut2)
except ValueError:
raiseAssert("Futures list is not empty")
# at least one join() completed, cancel pending one, if any
if not fut1.finished:
await fut1.cancelAndWait()
if not fut2.finished:
await fut2.cancelAndWait()
block:
let
fut1 = sconn.close()
fut2 = conn.close()
await allFutures(fut1, fut2)
static:
doAssert typeof(fut1).E is void
# Cannot fail
static:
doAssert typeof(fut2).E is void
# Cannot fail
except CancelledError:
# This is top-level procedure which will work as separate task, so it
# do not need to propagate CancelledError.
discard
if sconn != nil:
# All the errors are handled inside `cleanup()` procedure.
asyncSpawn cleanup()
sconn
method init*(s: Secure) =
procCall LPProtocol(s).init()
proc handle(conn: Connection, proto: string) {.async.} =
trace "handling connection upgrade", proto, conn
try:
# We don't need the result but we
# definitely need to await the handshake
discard await s.handleConn(conn, false, Opt.none(PeerId))
trace "connection secured", conn
except CancelledError as exc:
warn "securing connection canceled", conn
await conn.close()
2020-08-06 18:14:40 +00:00
raise exc
except LPStreamError as exc:
warn "securing connection failed", description = exc.msg, conn
2020-05-23 16:51:54 +00:00
await conn.close()
s.handler = handle
method secure*(
s: Secure, conn: Connection, peerId: Opt[PeerId]
): Future[Connection] {.
async: (raises: [CancelledError, LPStreamError], raw: true), base
.} =
s.handleConn(conn, conn.dir == Direction.Out, peerId)
method readOnce*(
s: SecureConn, pbytes: pointer, nbytes: int
): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} =
doAssert(nbytes > 0, "nbytes must be positive integer")
if s.isEof:
raise newLPStreamEOFError()
if s.buf.data().len() == 0:
try:
let buf = await s.readMessage() # Always returns >0 bytes or raises
s.activity = true
s.buf.add(buf)
except LPStreamEOFError as err:
s.isEof = true
await s.close()
raise err
except CancelledError as exc:
raise exc
except LPStreamError as err:
debug "Error while reading message from secure connection, closing.",
error = err.name, message = err.msg, connection = s
await s.close()
raise err
var p = cast[ptr UncheckedArray[byte]](pbytes)
return s.buf.consumeTo(toOpenArray(p, 0, nbytes - 1))