mirror of
https://github.com/status-im/nim-libp2p.git
synced 2025-01-10 13:06:09 +00:00
b99d2039a8
* allow multiple codecs per protocol (without breaking things) * add 1.1 protocol to gossip * explicit peering part 1 * explicit peering part 2 * explicit peering part 3 * PeerInfo and ControlPrune protocols * fix encodePrune * validated always, even explicit peers * prune by score (score is stub still) * add a way to pass parameters to gossip * standard setup fixes * take into account explicit direct peers in publish * add floodPublish logic * small fixes, publish still half broken * make sure to waitsub in sparse test * use var semantics to optimize table access * wip... lvalues don't work properly sadly... * big publish refactor, replenish and balance * fix internal tests * use g.peers for fanout (todo: don't include flood peers) * exclude non gossip from fanout * internal test fixes * fix flood tests * fix test's trypublish * test interop fixes * make sure to not remove peers from gossip table * restore old replenishFanout * cleanups * restore utility module import * restore trace vs debug in gossip * improve fanout replenish behavior further * triage publish nil peers (issue is on master too but just hidden behind a if/in) * getGossipPeers fixes * remove topics from pubsubpeer (was unused) * simplify rebalanceMesh (following spec) and make it finally reach D_high * better diagnostics * merge new pubsubpeer, copy 1.1 to new module * fix up merge * conditional enable gossip11 module * add back topics in peers, re-enable flood publish * add more heartbeat locking to prevent races * actually lock the heartbeat * minor fixes * with sugar * merge 1.0 * remove assertion in publish * fix multistream 1.1 multi proto * Fix merge oops * wip * fix gossip 11 upstream * gossipsub11 -> gossipsub * support interop testing * tests fixing * fix directchat build * control prune updates (pb) * wip parameters * gossip internal tests fixes * parameters wip * finishup with params * cleanups/wip * small sugar * grafted and pruned procs * wip updateScores * wip * fix logging issue * pubsubpeer, chronicles explicit override * fix internal gossip tests * wip * tables troubleshooting * score wip * score wip * fixes * fix test utils generateNodes * don't delete while iterating in score update * fix grafted defect * add a handleConnect in subscribeTopic * pruning improvements * wip * score fixes * post merge - builds gossip tests * further merge fixes * rebalance improvements and opportunistic grafting * fix test for now * restore explicit peering * implement peer exchange graft message * add an hard cap to PX * backoff time management * IWANT cap/budget * Adaptive gossip dissemination * outbound mesh quota, internal tests fixing * oversub prune score based, finish outbound quota * finishup with score and ihave budget * use go daemon 0.3.0 * import fixes * byScore cleanup score sorting * remove pointless scaling in `/` Duration operator * revert using libp2p org for daemon * interop fixes * fixes and cleanup * remove heartbeat assertion, minor debug fixes * logging improvements and cleaning up * (to revert) add some traces * add explicit topic to gossip rpcs * pubsub merge fixes and type fix in switch * Revert "(to revert) add some traces" This reverts commit 4663eaab6cc336c81cee50bc54025cf0b7bcbd99. * cleanup some now irrelevant todo * shuffle peers anyway as score might be disabled * add missing shuffle * old merge fix * more merge fixes * debug improvements * re-enable gossip internal tests * add gossip10 fallback (dormant but tested) * split gossipsub internal tests into 1.0 and 1.1 Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
207 lines
6.3 KiB
Nim
207 lines
6.3 KiB
Nim
## Nim-LibP2P
|
|
## Copyright (c) 2019 Status Research & Development GmbH
|
|
## Licensed under either of
|
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
|
## at your option.
|
|
## This file may not be copied, modified, or distributed except according to
|
|
## those terms.
|
|
|
|
import std/[strutils]
|
|
import chronos, chronicles, stew/byteutils
|
|
import stream/connection,
|
|
vbuffer,
|
|
protocols/protocol
|
|
|
|
logScope:
|
|
topics = "multistream"
|
|
|
|
const
|
|
MsgSize* = 64*1024
|
|
Codec* = "/multistream/1.0.0"
|
|
|
|
MSCodec* = "\x13" & Codec & "\n"
|
|
Na* = "\x03na\n"
|
|
Ls* = "\x03ls\n"
|
|
|
|
type
|
|
Matcher* = proc (proto: string): bool {.gcsafe.}
|
|
|
|
HandlerHolder* = object
|
|
protos*: seq[string]
|
|
protocol*: LPProtocol
|
|
match*: Matcher
|
|
|
|
MultistreamSelect* = ref object of RootObj
|
|
handlers*: seq[HandlerHolder]
|
|
codec*: string
|
|
|
|
proc newMultistream*(): MultistreamSelect =
|
|
new result
|
|
result.codec = MSCodec
|
|
|
|
template validateSuffix(str: string): untyped =
|
|
if str.endsWith("\n"):
|
|
str.removeSuffix("\n")
|
|
else:
|
|
raise newException(CatchableError, "MultistreamSelect failed, malformed message")
|
|
|
|
proc select*(m: MultistreamSelect,
|
|
conn: Connection,
|
|
proto: seq[string]):
|
|
Future[string] {.async.} =
|
|
trace "initiating handshake", conn, codec = m.codec
|
|
## select a remote protocol
|
|
await conn.write(m.codec) # write handshake
|
|
if proto.len() > 0:
|
|
trace "selecting proto", conn, proto = proto[0]
|
|
await conn.writeLp((proto[0] & "\n")) # select proto
|
|
|
|
var s = string.fromBytes((await conn.readLp(MsgSize))) # read ms header
|
|
validateSuffix(s)
|
|
|
|
if s != Codec:
|
|
notice "handshake failed", conn, codec = s
|
|
raise newException(CatchableError, "MultistreamSelect handshake failed")
|
|
else:
|
|
trace "multistream handshake success", conn
|
|
|
|
if proto.len() == 0: # no protocols, must be a handshake call
|
|
return Codec
|
|
else:
|
|
s = string.fromBytes(await conn.readLp(MsgSize)) # read the first proto
|
|
validateSuffix(s)
|
|
trace "reading first requested proto", conn
|
|
if s == proto[0]:
|
|
trace "successfully selected ", conn, proto = proto[0]
|
|
return proto[0]
|
|
elif proto.len > 1:
|
|
# Try to negotiate alternatives
|
|
let protos = proto[1..<proto.len()]
|
|
trace "selecting one of several protos", conn, protos = protos
|
|
for p in protos:
|
|
trace "selecting proto", conn, proto = p
|
|
await conn.writeLp((p & "\n")) # select proto
|
|
s = string.fromBytes(await conn.readLp(MsgSize)) # read the first proto
|
|
validateSuffix(s)
|
|
if s == p:
|
|
trace "selected protocol", conn, protocol = s
|
|
return s
|
|
return ""
|
|
else:
|
|
# No alternatives, fail
|
|
return ""
|
|
|
|
proc select*(m: MultistreamSelect,
|
|
conn: Connection,
|
|
proto: string): Future[bool] {.async.} =
|
|
if proto.len > 0:
|
|
return (await m.select(conn, @[proto])) == proto
|
|
else:
|
|
return (await m.select(conn, @[])) == Codec
|
|
|
|
proc select*(m: MultistreamSelect, conn: Connection): Future[bool] =
|
|
m.select(conn, "")
|
|
|
|
proc list*(m: MultistreamSelect,
|
|
conn: Connection): Future[seq[string]] {.async.} =
|
|
## list remote protos requests on connection
|
|
if not await m.select(conn):
|
|
return
|
|
|
|
await conn.write(Ls) # send ls
|
|
|
|
var list = newSeq[string]()
|
|
let ms = string.fromBytes(await conn.readLp(MsgSize))
|
|
for s in ms.split("\n"):
|
|
if s.len() > 0:
|
|
list.add(s)
|
|
|
|
result = list
|
|
|
|
proc handle*(m: MultistreamSelect, conn: Connection, active: bool = false) {.async, gcsafe.} =
|
|
trace "Starting multistream handler", conn, handshaked = active
|
|
var handshaked = active
|
|
try:
|
|
while not conn.atEof:
|
|
var ms = string.fromBytes(await conn.readLp(MsgSize))
|
|
validateSuffix(ms)
|
|
|
|
if not handshaked and ms != Codec:
|
|
notice "expected handshake message", conn, instead=ms
|
|
raise newException(CatchableError,
|
|
"MultistreamSelect handling failed, invalid first message")
|
|
|
|
trace "handle: got request", conn, ms
|
|
if ms.len() <= 0:
|
|
trace "handle: invalid proto", conn
|
|
await conn.write(Na)
|
|
|
|
if m.handlers.len() == 0:
|
|
trace "handle: sending `na` for protocol", conn, protocol = ms
|
|
await conn.write(Na)
|
|
continue
|
|
|
|
case ms:
|
|
of "ls":
|
|
trace "handle: listing protos", conn
|
|
var protos = ""
|
|
for h in m.handlers:
|
|
for proto in h.protos:
|
|
protos &= (proto & "\n")
|
|
await conn.writeLp(protos)
|
|
of Codec:
|
|
if not handshaked:
|
|
await conn.write(m.codec)
|
|
handshaked = true
|
|
else:
|
|
trace "handle: sending `na` for duplicate handshake while handshaked",
|
|
conn
|
|
await conn.write(Na)
|
|
else:
|
|
for h in m.handlers:
|
|
if (not isNil(h.match) and h.match(ms)) or h.protos.contains(ms):
|
|
trace "found handler", conn, protocol = ms
|
|
await conn.writeLp(ms & "\n")
|
|
await h.protocol.handler(conn, ms)
|
|
return
|
|
debug "no handlers", conn, protocol = ms
|
|
await conn.write(Na)
|
|
except CancelledError as exc:
|
|
raise exc
|
|
except CatchableError as exc:
|
|
trace "Exception in multistream", conn, msg = exc.msg
|
|
finally:
|
|
await conn.close()
|
|
|
|
trace "Stopped multistream handler", conn
|
|
|
|
proc addHandler*(m: MultistreamSelect,
|
|
codecs: seq[string],
|
|
protocol: LPProtocol,
|
|
matcher: Matcher = nil) =
|
|
trace "registering protocols", protos = codecs
|
|
m.handlers.add(HandlerHolder(protos: codecs,
|
|
protocol: protocol,
|
|
match: matcher))
|
|
|
|
proc addHandler*(m: MultistreamSelect,
|
|
codec: string,
|
|
protocol: LPProtocol,
|
|
matcher: Matcher = nil) =
|
|
addHandler(m, @[codec], protocol, matcher)
|
|
|
|
proc addHandler*(m: MultistreamSelect,
|
|
codec: string,
|
|
handler: LPProtoHandler,
|
|
matcher: Matcher = nil) =
|
|
## helper to allow registering pure handlers
|
|
trace "registering proto handler", proto = codec
|
|
let protocol = new LPProtocol
|
|
protocol.codec = codec
|
|
protocol.handler = handler
|
|
|
|
m.handlers.add(HandlerHolder(protos: @[codec],
|
|
protocol: protocol,
|
|
match: matcher))
|