Cleanup (#55)
* fix: don't allow replacing pubkey * fix: several small improvements * removing pubkey setter * improove error handling * remove the use of Option[T] if not needed * don't use optional * fix-ci: temporarily pin p2pd to a working tag * fix example to comply with latest changes * bumping p2pd again to a higher version
This commit is contained in:
parent
77e866d29a
commit
293a219dbe
|
@ -31,7 +31,7 @@ install:
|
|||
|
||||
# install and build go-libp2p-daemon
|
||||
- curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_p2pd.sh
|
||||
- bash build_p2pd.sh p2pdCache HEAD
|
||||
- bash build_p2pd.sh p2pdCache v0.2.1
|
||||
|
||||
script:
|
||||
- nimble install -y --depsOnly
|
||||
|
|
13
README.md
13
README.md
|
@ -73,8 +73,8 @@ This stack reflects the minimal requirements for the upcoming Eth2 implementatio
|
|||
To run it, add nim-libp2p to your project's nimble file and spawn a node as follows:
|
||||
|
||||
```nim
|
||||
import tables, options
|
||||
import chronos, chronicles
|
||||
import tables
|
||||
import chronos
|
||||
import ../libp2p/[switch,
|
||||
multistream,
|
||||
protocols/identify,
|
||||
|
@ -100,8 +100,7 @@ type
|
|||
method init(p: TestProto) {.gcsafe.} =
|
||||
# handle incoming connections in closure
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
let msg = cast[string](await conn.readLp())
|
||||
echo "Got from remote - ", cast[string](msg)
|
||||
echo "Got from remote - ", cast[string](await conn.readLp())
|
||||
await conn.writeLp("Hello!")
|
||||
await conn.close()
|
||||
|
||||
|
@ -155,9 +154,9 @@ proc main() {.async, gcsafe.} =
|
|||
var switch2Fut = await switch2.start() # start second node
|
||||
let conn = await switch2.dial(switch1.peerInfo, TestCodec) # dial the first node
|
||||
|
||||
await conn.writeLp("Hello!") # writeLp send a lenght prefixed buffer over the wire
|
||||
let msg = cast[string](await conn.readLp()) # readLp reads lenght prefixed bytes and returns a buffer without the prefix
|
||||
echo "Remote responded with - ", cast[string](msg)
|
||||
await conn.writeLp("Hello!") # writeLp send a length prefixed buffer over the wire
|
||||
# readLp reads length prefixed bytes and returns a buffer without the prefix
|
||||
echo "Remote responded with - ", cast[string](await conn.readLp())
|
||||
|
||||
await allFutures(switch1.stop(), switch2.stop()) # close connections and shutdown all transports
|
||||
await allFutures(switch1Fut & switch2Fut) # wait for all transports to shutdown
|
||||
|
|
|
@ -48,9 +48,10 @@ type
|
|||
started: bool
|
||||
|
||||
proc id (p: ChatProto): string =
|
||||
result = "unknown"
|
||||
if p.conn.peerInfo.peerId.isSome:
|
||||
result = $p.conn.peerInfo.peerId.get()
|
||||
if not isNil(p.conn.peerInfo):
|
||||
$p.conn.peerInfo.peerId
|
||||
else:
|
||||
"unknown"
|
||||
|
||||
# forward declaration
|
||||
proc readWriteLoop(p: ChatProto) {.async, gcsafe.}
|
||||
|
@ -66,9 +67,8 @@ proc dialPeer(p: ChatProto, address: string) {.async, gcsafe.} =
|
|||
if parts.len == 11 and parts[^2] notin ["ipfs", "p2p"]:
|
||||
quit("invalid or incompelete peerId")
|
||||
|
||||
var remotePeer: PeerInfo
|
||||
remotePeer.peerId = some(PeerID.init(parts[^1]))
|
||||
remotePeer.addrs.add(MultiAddress.init(address))
|
||||
var remotePeer = PeerInfo.init(parts[^1],
|
||||
@[MultiAddress.init(address)])
|
||||
|
||||
echo &"dialing peer: {address}"
|
||||
p.conn = await p.switch.dial(remotePeer, ChatCodec)
|
||||
|
@ -165,8 +165,7 @@ proc serveThread(customData: CustomData) {.async.} =
|
|||
var transp = fromPipe(customData.consoleFd)
|
||||
|
||||
let seckey = PrivateKey.random(RSA)
|
||||
var peerInfo: PeerInfo
|
||||
peerInfo.peerId = some(PeerID.init(seckey))
|
||||
var peerInfo = PeerInfo.init(seckey)
|
||||
var localAddress = DefaultAddr
|
||||
while true:
|
||||
echo &"Type an address to bind to or Enter to use the default {DefaultAddr}"
|
||||
|
@ -202,7 +201,7 @@ proc serveThread(customData: CustomData) {.async.} =
|
|||
var libp2pFuts = await switch.start()
|
||||
chatProto.started = true
|
||||
|
||||
let id = peerInfo.peerId.get().pretty
|
||||
let id = peerInfo.peerId.pretty
|
||||
echo "PeerID: " & id
|
||||
echo "listening on: "
|
||||
for a in peerInfo.addrs:
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import chronos, chronicles, options
|
||||
import chronos, chronicles
|
||||
import peerinfo,
|
||||
multiaddress,
|
||||
stream/lpstream,
|
||||
|
@ -19,7 +19,7 @@ const DefaultReadSize*: uint = 64 * 1024
|
|||
|
||||
type
|
||||
Connection* = ref object of LPStream
|
||||
peerInfo*: Option[PeerInfo]
|
||||
peerInfo*: PeerInfo
|
||||
stream*: LPStream
|
||||
observedAddrs*: Multiaddress
|
||||
|
||||
|
@ -39,12 +39,10 @@ proc newConnection*(stream: LPStream): Connection =
|
|||
let this = result
|
||||
if not isNil(result.stream.closeEvent):
|
||||
result.stream.closeEvent.wait().
|
||||
addCallback(
|
||||
proc (udata: pointer) =
|
||||
if not this.closed:
|
||||
trace "closing this connection because wrapped stream closed"
|
||||
asyncCheck this.close()
|
||||
)
|
||||
addCallback do (udata: pointer):
|
||||
if not this.closed:
|
||||
trace "closing this connection because wrapped stream closed"
|
||||
asyncCheck this.close()
|
||||
|
||||
method read*(s: Connection, n = -1): Future[seq[byte]] {.gcsafe.} =
|
||||
s.stream.read(n)
|
||||
|
|
|
@ -18,7 +18,7 @@ import types,
|
|||
logScope:
|
||||
topic = "MplexChannel"
|
||||
|
||||
const DefaultChannelSize* = DefaultBufferSize * 64 # 64kb
|
||||
const DefaultChannelSize* = DefaultBufferSize * 64 # 64kb
|
||||
|
||||
type
|
||||
LPChannel* = ref object of BufferStream
|
||||
|
@ -97,7 +97,7 @@ method closed*(s: LPChannel): bool =
|
|||
|
||||
proc pushTo*(s: LPChannel, data: seq[byte]): Future[void] =
|
||||
if s.closedRemote or s.isReset:
|
||||
raise newLPStreamClosedError()
|
||||
raise newLPStreamEOFError()
|
||||
trace "pushing data to channel", data = data.toHex(),
|
||||
id = s.id,
|
||||
initiator = s.initiator
|
||||
|
@ -106,7 +106,7 @@ proc pushTo*(s: LPChannel, data: seq[byte]): Future[void] =
|
|||
|
||||
method read*(s: LPChannel, n = -1): Future[seq[byte]] =
|
||||
if s.closed or s.isReset:
|
||||
raise newLPStreamClosedError()
|
||||
raise newLPStreamEOFError()
|
||||
|
||||
result = procCall read(BufferStream(s), n)
|
||||
|
||||
|
@ -115,7 +115,7 @@ method readExactly*(s: LPChannel,
|
|||
nbytes: int):
|
||||
Future[void] =
|
||||
if s.closed or s.isReset:
|
||||
raise newLPStreamClosedError()
|
||||
raise newLPStreamEOFError()
|
||||
result = procCall readExactly(BufferStream(s), pbytes, nbytes)
|
||||
|
||||
method readLine*(s: LPChannel,
|
||||
|
@ -123,7 +123,7 @@ method readLine*(s: LPChannel,
|
|||
sep = "\r\n"):
|
||||
Future[string] =
|
||||
if s.closed or s.isReset:
|
||||
raise newLPStreamClosedError()
|
||||
raise newLPStreamEOFError()
|
||||
result = procCall readLine(BufferStream(s), limit, sep)
|
||||
|
||||
method readOnce*(s: LPChannel,
|
||||
|
@ -131,7 +131,7 @@ method readOnce*(s: LPChannel,
|
|||
nbytes: int):
|
||||
Future[int] =
|
||||
if s.closed or s.isReset:
|
||||
raise newLPStreamClosedError()
|
||||
raise newLPStreamEOFError()
|
||||
result = procCall readOnce(BufferStream(s), pbytes, nbytes)
|
||||
|
||||
method readUntil*(s: LPChannel,
|
||||
|
@ -139,22 +139,22 @@ method readUntil*(s: LPChannel,
|
|||
sep: seq[byte]):
|
||||
Future[int] =
|
||||
if s.closed or s.isReset:
|
||||
raise newLPStreamClosedError()
|
||||
raise newLPStreamEOFError()
|
||||
result = procCall readOnce(BufferStream(s), pbytes, nbytes)
|
||||
|
||||
method write*(s: LPChannel,
|
||||
pbytes: pointer,
|
||||
nbytes: int): Future[void] =
|
||||
if s.closedLocal or s.isReset:
|
||||
raise newLPStreamClosedError()
|
||||
raise newLPStreamEOFError()
|
||||
result = procCall write(BufferStream(s), pbytes, nbytes)
|
||||
|
||||
method write*(s: LPChannel, msg: string, msglen = -1) {.async.} =
|
||||
if s.closedLocal or s.isReset:
|
||||
raise newLPStreamClosedError()
|
||||
raise newLPStreamEOFError()
|
||||
result = procCall write(BufferStream(s), msg, msglen)
|
||||
|
||||
method write*(s: LPChannel, msg: seq[byte], msglen = -1) {.async.} =
|
||||
if s.closedLocal or s.isReset:
|
||||
raise newLPStreamClosedError()
|
||||
raise newLPStreamEOFError()
|
||||
result = procCall write(BufferStream(s), msg, msglen)
|
||||
|
|
|
@ -100,11 +100,11 @@ method handle*(m: Mplex) {.async, gcsafe.} =
|
|||
|
||||
continue
|
||||
of MessageType.MsgIn, MessageType.MsgOut:
|
||||
trace "pushing data to channel", id = id,
|
||||
initiator = initiator,
|
||||
msgType = msgType
|
||||
trace "pushing data to channel", id = id,
|
||||
initiator = initiator,
|
||||
msgType = msgType
|
||||
|
||||
await channel.pushTo(data)
|
||||
await channel.pushTo(data)
|
||||
of MessageType.CloseIn, MessageType.CloseOut:
|
||||
trace "closing channel", id = id,
|
||||
initiator = initiator,
|
||||
|
@ -135,11 +135,9 @@ proc newMplex*(conn: Connection,
|
|||
result.local = initTable[uint, LPChannel]()
|
||||
|
||||
let m = result
|
||||
conn.closeEvent.wait().addCallback(
|
||||
proc(udata: pointer) =
|
||||
trace "connection closed, cleaning up mplex"
|
||||
asyncCheck m.close()
|
||||
)
|
||||
conn.closeEvent.wait().addCallback do (udata: pointer):
|
||||
trace "connection closed, cleaning up mplex"
|
||||
asyncCheck m.close()
|
||||
|
||||
method newStream*(m: Mplex, name: string = ""): Future[Connection] {.async, gcsafe.} =
|
||||
let channel = await m.newStreamInternal()
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
## those terms.
|
||||
|
||||
## This module implementes API for libp2p peer.
|
||||
import hashes, options
|
||||
import hashes
|
||||
import nimcrypto/utils
|
||||
import crypto/crypto, multicodec, multihash, base58, vbuffer
|
||||
import protobuf/minprotobuf
|
||||
|
|
|
@ -23,8 +23,6 @@ type
|
|||
HasPrivate,
|
||||
HasPublic
|
||||
|
||||
InvalidPublicKeyException* = object of Exception
|
||||
|
||||
PeerInfo* = ref object of RootObj
|
||||
peerId*: PeerID
|
||||
addrs*: seq[MultiAddress]
|
||||
|
@ -35,10 +33,6 @@ type
|
|||
of HasPublic:
|
||||
key: Option[PublicKey]
|
||||
|
||||
proc newInvalidPublicKeyException(): ref Exception =
|
||||
newException(InvalidPublicKeyException,
|
||||
"attempting to assign an invalid public key")
|
||||
|
||||
proc init*(p: typedesc[PeerInfo],
|
||||
key: PrivateKey,
|
||||
addrs: seq[MultiAddress] = @[],
|
||||
|
@ -60,6 +54,16 @@ proc init*(p: typedesc[PeerInfo],
|
|||
addrs: addrs,
|
||||
protocols: protocols)
|
||||
|
||||
proc init*(p: typedesc[PeerInfo],
|
||||
peerId: string,
|
||||
addrs: seq[MultiAddress] = @[],
|
||||
protocols: seq[string] = @[]): PeerInfo {.inline.} =
|
||||
|
||||
PeerInfo(keyType: HasPublic,
|
||||
peerId: PeerID.init(peerId),
|
||||
addrs: addrs,
|
||||
protocols: protocols)
|
||||
|
||||
proc init*(p: typedesc[PeerInfo],
|
||||
key: PublicKey,
|
||||
addrs: seq[MultiAddress] = @[],
|
||||
|
@ -82,12 +86,6 @@ proc publicKey*(p: PeerInfo): Option[PublicKey] {.inline.} =
|
|||
else:
|
||||
result = some(p.privateKey.getKey())
|
||||
|
||||
proc `publicKey=`*(p: PeerInfo, key: PublicKey) =
|
||||
if not (PeerID.init(key) == p.peerId):
|
||||
raise newInvalidPublicKeyException()
|
||||
|
||||
p.key = some(key)
|
||||
|
||||
proc id*(p: PeerInfo): string {.inline.} =
|
||||
p.peerId.pretty
|
||||
|
||||
|
|
|
@ -121,7 +121,7 @@ method init*(p: Identify) =
|
|||
|
||||
proc identify*(p: Identify,
|
||||
conn: Connection,
|
||||
remotePeerInfo: Option[PeerInfo]): Future[IdentifyInfo] {.async, gcsafe.} =
|
||||
remotePeerInfo: PeerInfo): Future[IdentifyInfo] {.async, gcsafe.} =
|
||||
var message = await conn.readLp()
|
||||
if len(message) == 0:
|
||||
trace "identify: Invalid or empty message received!"
|
||||
|
@ -130,13 +130,13 @@ proc identify*(p: Identify,
|
|||
|
||||
result = decodeMsg(message)
|
||||
|
||||
if remotePeerInfo.isSome and result.pubKey.isSome:
|
||||
if not isNil(remotePeerInfo) and result.pubKey.isSome:
|
||||
let peer = PeerID.init(result.pubKey.get())
|
||||
|
||||
# do a string comaprison of the ids,
|
||||
# because that is the only thing we
|
||||
# have in most cases
|
||||
if peer != remotePeerInfo.get().peerId:
|
||||
if peer != remotePeerInfo.peerId:
|
||||
trace "Peer ids don't match",
|
||||
remote = peer.pretty(),
|
||||
local = remotePeerInfo.get().id
|
||||
|
|
|
@ -26,25 +26,25 @@ const FloodSubCodec* = "/floodsub/1.0.0"
|
|||
type
|
||||
FloodSub* = ref object of PubSub
|
||||
floodsub*: Table[string, HashSet[string]] # topic to remote peer map
|
||||
seen*: TimedCache[string] # list of messages forwarded to peers
|
||||
seen*: TimedCache[string] # list of messages forwarded to peers
|
||||
|
||||
method subscribeTopic*(f: FloodSub,
|
||||
topic: string,
|
||||
subscribe: bool,
|
||||
peerId: string) {.gcsafe.} =
|
||||
procCall PubSub(f).subscribeTopic(topic, subscribe, peerId)
|
||||
procCall PubSub(f).subscribeTopic(topic, subscribe, peerId)
|
||||
|
||||
if topic notin f.floodsub:
|
||||
f.floodsub[topic] = initHashSet[string]()
|
||||
if topic notin f.floodsub:
|
||||
f.floodsub[topic] = initHashSet[string]()
|
||||
|
||||
if subscribe:
|
||||
trace "adding subscription for topic", peer = peerId, name = topic
|
||||
# subscribe the peer to the topic
|
||||
f.floodsub[topic].incl(peerId)
|
||||
else:
|
||||
trace "removing subscription for topic", peer = peerId, name = topic
|
||||
# unsubscribe the peer from the topic
|
||||
f.floodsub[topic].excl(peerId)
|
||||
if subscribe:
|
||||
trace "adding subscription for topic", peer = peerId, name = topic
|
||||
# subscribe the peer to the topic
|
||||
f.floodsub[topic].incl(peerId)
|
||||
else:
|
||||
trace "removing subscription for topic", peer = peerId, name = topic
|
||||
# unsubscribe the peer from the topic
|
||||
f.floodsub[topic].excl(peerId)
|
||||
|
||||
method handleDisconnect*(f: FloodSub, peer: PubSubPeer) {.async, gcsafe.} =
|
||||
## handle peer disconnects
|
||||
|
|
|
@ -27,7 +27,7 @@ logScope:
|
|||
const GossipSubCodec* = "/meshsub/1.0.0"
|
||||
|
||||
# overlay parameters
|
||||
const GossipSubD* = 6
|
||||
const GossipSubD* = 6
|
||||
const GossipSubDlo* = 4
|
||||
const GossipSubDhi* = 12
|
||||
|
||||
|
@ -37,25 +37,26 @@ const GossipSubHistoryGossip* = 3
|
|||
|
||||
# heartbeat interval
|
||||
const GossipSubHeartbeatInitialDelay* = 100.millis
|
||||
const GossipSubHeartbeatInterval* = 1.seconds
|
||||
const GossipSubHeartbeatInterval* = 1.seconds
|
||||
|
||||
# fanout ttl
|
||||
const GossipSubFanoutTTL* = 60.seconds
|
||||
|
||||
type
|
||||
GossipSub* = ref object of FloodSub
|
||||
mesh*: Table[string, HashSet[string]] # meshes - topic to peer
|
||||
fanout*: Table[string, HashSet[string]] # fanout - topic to peer
|
||||
mesh*: Table[string, HashSet[string]] # meshes - topic to peer
|
||||
fanout*: Table[string, HashSet[string]] # fanout - topic to peer
|
||||
gossipsub*: Table[string, HashSet[string]] # topic to peer map of all gossipsub peers
|
||||
lastFanoutPubSub*: Table[string, Moment] # last publish time for fanout topics
|
||||
lastFanoutPubSub*: Table[string, Moment] # last publish time for fanout topics
|
||||
gossip*: Table[string, seq[ControlIHave]] # pending gossip
|
||||
control*: Table[string, ControlMessage] # pending control messages
|
||||
mcache*: MCache # messages cache
|
||||
heartbeatCancel*: Future[void] # cancelation future for heartbeat interval
|
||||
control*: Table[string, ControlMessage] # pending control messages
|
||||
mcache*: MCache # messages cache
|
||||
heartbeatCancel*: Future[void] # cancelation future for heartbeat interval
|
||||
heartbeatLock: AsyncLock
|
||||
|
||||
# TODO: This belong in chronos, temporary left here until chronos is updated
|
||||
proc addInterval(every: Duration, cb: CallbackFunc, udata: pointer = nil): Future[void] =
|
||||
proc addInterval(every: Duration, cb: CallbackFunc,
|
||||
udata: pointer = nil): Future[void] =
|
||||
## Arrange the callback ``cb`` to be called on every ``Duration`` window
|
||||
|
||||
var retFuture = newFuture[void]("chronos.addInterval(Duration)")
|
||||
|
@ -99,19 +100,19 @@ method subscribeTopic*(g: GossipSub,
|
|||
topic: string,
|
||||
subscribe: bool,
|
||||
peerId: string) {.gcsafe.} =
|
||||
procCall PubSub(g).subscribeTopic(topic, subscribe, peerId)
|
||||
procCall PubSub(g).subscribeTopic(topic, subscribe, peerId)
|
||||
|
||||
if topic notin g.gossipsub:
|
||||
g.gossipsub[topic] = initHashSet[string]()
|
||||
if topic notin g.gossipsub:
|
||||
g.gossipsub[topic] = initHashSet[string]()
|
||||
|
||||
if subscribe:
|
||||
trace "adding subscription for topic", peer = peerId, name = topic
|
||||
# subscribe the peer to the topic
|
||||
g.gossipsub[topic].incl(peerId)
|
||||
else:
|
||||
trace "removing subscription for topic", peer = peerId, name = topic
|
||||
# unsubscribe the peer from the topic
|
||||
g.gossipsub[topic].excl(peerId)
|
||||
if subscribe:
|
||||
trace "adding subscription for topic", peer = peerId, name = topic
|
||||
# subscribe the peer to the topic
|
||||
g.gossipsub[topic].incl(peerId)
|
||||
else:
|
||||
trace "removing subscription for topic", peer = peerId, name = topic
|
||||
# unsubscribe the peer from the topic
|
||||
g.gossipsub[topic].excl(peerId)
|
||||
|
||||
proc handleGraft(g: GossipSub,
|
||||
peer: PubSubPeer,
|
||||
|
@ -137,7 +138,8 @@ proc handlePrune(g: GossipSub, peer: PubSubPeer, prunes: seq[ControlPrune]) =
|
|||
if prune.topicID in g.mesh:
|
||||
g.mesh[prune.topicID].excl(peer.id)
|
||||
|
||||
proc handleIHave(g: GossipSub, peer: PubSubPeer, ihaves: seq[ControlIHave]): ControlIWant =
|
||||
proc handleIHave(g: GossipSub, peer: PubSubPeer, ihaves: seq[
|
||||
ControlIHave]): ControlIWant =
|
||||
for ihave in ihaves:
|
||||
trace "processing ihave message", peer = peer.id,
|
||||
topicID = ihave.topicID
|
||||
|
@ -147,7 +149,8 @@ proc handleIHave(g: GossipSub, peer: PubSubPeer, ihaves: seq[ControlIHave]): Con
|
|||
if m notin g.seen:
|
||||
result.messageIDs.add(m)
|
||||
|
||||
proc handleIWant(g: GossipSub, peer: PubSubPeer, iwants: seq[ControlIWant]): seq[Message] =
|
||||
proc handleIWant(g: GossipSub, peer: PubSubPeer, iwants: seq[
|
||||
ControlIWant]): seq[Message] =
|
||||
for iwant in iwants:
|
||||
for mid in iwant.messageIDs:
|
||||
trace "processing iwant message", peer = peer.id,
|
||||
|
@ -203,13 +206,13 @@ method rpcHandler(g: GossipSub,
|
|||
for p in toSendPeers:
|
||||
if p in g.peers and
|
||||
g.peers[p].peerInfo.peerId != peer.peerInfo.peerId:
|
||||
let id = g.peers[p].peerInfo.peerId
|
||||
let msgs = m.messages.filterIt(
|
||||
# don't forward to message originator
|
||||
id != it.fromPeerId()
|
||||
)
|
||||
if msgs.len > 0:
|
||||
await g.peers[p].send(@[RPCMsg(messages: msgs)])
|
||||
let id = g.peers[p].peerInfo.peerId
|
||||
let msgs = m.messages.filterIt(
|
||||
# don't forward to message originator
|
||||
id != it.fromPeerId()
|
||||
)
|
||||
if msgs.len > 0:
|
||||
await g.peers[p].send(@[RPCMsg(messages: msgs)])
|
||||
|
||||
var respControl: ControlMessage
|
||||
if m.control.isSome:
|
||||
|
@ -224,7 +227,8 @@ method rpcHandler(g: GossipSub,
|
|||
|
||||
if respControl.graft.len > 0 or respControl.prune.len > 0 or
|
||||
respControl.ihave.len > 0 or respControl.iwant.len > 0:
|
||||
await peer.send(@[RPCMsg(control: some(respControl), messages: messages)])
|
||||
await peer.send(@[RPCMsg(control: some(respControl),
|
||||
messages: messages)])
|
||||
|
||||
proc replenishFanout(g: GossipSub, topic: string) {.async, gcsafe.} =
|
||||
## get fanout peers for a topic
|
||||
|
@ -400,7 +404,7 @@ method start*(g: GossipSub) {.async.} =
|
|||
# setup the heartbeat interval
|
||||
g.heartbeatCancel = addInterval(GossipSubHeartbeatInterval,
|
||||
proc (arg: pointer = nil) {.gcsafe, locks: 0.} =
|
||||
asyncCheck g.heartbeat)
|
||||
asyncCheck g.heartbeat)
|
||||
|
||||
method stop*(g: GossipSub) {.async.} =
|
||||
## stopt pubsub
|
||||
|
@ -455,7 +459,7 @@ when isMainModule and not defined(release):
|
|||
for i in 0..<15:
|
||||
let conn = newConnection(newBufferStream(writeHandler))
|
||||
let peerInfo = PeerInfo.init(PrivateKey.random(RSA))
|
||||
conn.peerInfo = some(peerInfo)
|
||||
conn.peerInfo = peerInfo
|
||||
gossipSub.peers[peerInfo.id] = newPubSubPeer(peerInfo, GossipSubCodec)
|
||||
gossipSub.peers[peerInfo.id].conn = conn
|
||||
gossipSub.mesh[topic].incl(peerInfo.id)
|
||||
|
@ -482,7 +486,7 @@ when isMainModule and not defined(release):
|
|||
for i in 0..<15:
|
||||
let conn = newConnection(newBufferStream(writeHandler))
|
||||
let peerInfo = PeerInfo.init(PrivateKey.random(RSA))
|
||||
conn.peerInfo = some(peerInfo)
|
||||
conn.peerInfo = peerInfo
|
||||
gossipSub.peers[peerInfo.id] = newPubSubPeer(peerInfo, GossipSubCodec)
|
||||
gossipSub.peers[peerInfo.id].conn = conn
|
||||
gossipSub.gossipsub[topic].incl(peerInfo.id)
|
||||
|
@ -512,7 +516,7 @@ when isMainModule and not defined(release):
|
|||
for i in 0..<15:
|
||||
let conn = newConnection(newBufferStream(writeHandler))
|
||||
var peerInfo = PeerInfo.init(PrivateKey.random(RSA))
|
||||
conn.peerInfo = some(peerInfo)
|
||||
conn.peerInfo = peerInfo
|
||||
gossipSub.peers[peerInfo.id] = newPubSubPeer(peerInfo, GossipSubCodec)
|
||||
gossipSub.peers[peerInfo.id].handler = handler
|
||||
gossipSub.gossipsub[topic].incl(peerInfo.id)
|
||||
|
@ -543,7 +547,7 @@ when isMainModule and not defined(release):
|
|||
for i in 0..<6:
|
||||
let conn = newConnection(newBufferStream(writeHandler))
|
||||
let peerInfo = PeerInfo.init(PrivateKey.random(RSA))
|
||||
conn.peerInfo = some(peerInfo)
|
||||
conn.peerInfo = peerInfo
|
||||
gossipSub.peers[peerInfo.id] = newPubSubPeer(peerInfo, GossipSubCodec)
|
||||
gossipSub.peers[peerInfo.id].handler = handler
|
||||
gossipSub.fanout[topic].incl(peerInfo.id)
|
||||
|
@ -580,7 +584,7 @@ when isMainModule and not defined(release):
|
|||
for i in 0..<6:
|
||||
let conn = newConnection(newBufferStream(writeHandler))
|
||||
let peerInfo = PeerInfo.init(PrivateKey.random(RSA))
|
||||
conn.peerInfo = some(peerInfo)
|
||||
conn.peerInfo = peerInfo
|
||||
gossipSub.peers[peerInfo.id] = newPubSubPeer(peerInfo, GossipSubCodec)
|
||||
gossipSub.peers[peerInfo.id].handler = handler
|
||||
gossipSub.fanout[topic1].incl(peerInfo.id)
|
||||
|
@ -617,7 +621,7 @@ when isMainModule and not defined(release):
|
|||
for i in 0..<30:
|
||||
let conn = newConnection(newBufferStream(writeHandler))
|
||||
let peerInfo = PeerInfo.init(PrivateKey.random(RSA))
|
||||
conn.peerInfo = some(peerInfo)
|
||||
conn.peerInfo = peerInfo
|
||||
gossipSub.peers[peerInfo.id] = newPubSubPeer(peerInfo, GossipSubCodec)
|
||||
gossipSub.peers[peerInfo.id].handler = handler
|
||||
if i mod 2 == 0:
|
||||
|
@ -628,7 +632,7 @@ when isMainModule and not defined(release):
|
|||
for i in 0..<15:
|
||||
let conn = newConnection(newBufferStream(writeHandler))
|
||||
let peerInfo = PeerInfo.init(PrivateKey.random(RSA))
|
||||
conn.peerInfo = some(peerInfo)
|
||||
conn.peerInfo = peerInfo
|
||||
gossipSub.peers[peerInfo.id] = newPubSubPeer(peerInfo, GossipSubCodec)
|
||||
gossipSub.peers[peerInfo.id].handler = handler
|
||||
gossipSub.gossipsub[topic].incl(peerInfo.id)
|
||||
|
@ -665,7 +669,7 @@ when isMainModule and not defined(release):
|
|||
for i in 0..<30:
|
||||
let conn = newConnection(newBufferStream(writeHandler))
|
||||
let peerInfo = PeerInfo.init(PrivateKey.random(RSA))
|
||||
conn.peerInfo = some(peerInfo)
|
||||
conn.peerInfo = peerInfo
|
||||
gossipSub.peers[peerInfo.id] = newPubSubPeer(peerInfo, GossipSubCodec)
|
||||
gossipSub.peers[peerInfo.id].handler = handler
|
||||
if i mod 2 == 0:
|
||||
|
@ -697,7 +701,7 @@ when isMainModule and not defined(release):
|
|||
for i in 0..<30:
|
||||
let conn = newConnection(newBufferStream(writeHandler))
|
||||
let peerInfo = PeerInfo.init(PrivateKey.random(RSA))
|
||||
conn.peerInfo = some(peerInfo)
|
||||
conn.peerInfo = peerInfo
|
||||
gossipSub.peers[peerInfo.id] = newPubSubPeer(peerInfo, GossipSubCodec)
|
||||
gossipSub.peers[peerInfo.id].handler = handler
|
||||
if i mod 2 == 0:
|
||||
|
@ -729,7 +733,7 @@ when isMainModule and not defined(release):
|
|||
for i in 0..<30:
|
||||
let conn = newConnection(newBufferStream(writeHandler))
|
||||
let peerInfo = PeerInfo.init(PrivateKey.random(RSA))
|
||||
conn.peerInfo = some(peerInfo)
|
||||
conn.peerInfo = peerInfo
|
||||
gossipSub.peers[peerInfo.id] = newPubSubPeer(peerInfo, GossipSubCodec)
|
||||
gossipSub.peers[peerInfo.id].handler = handler
|
||||
if i mod 2 == 0:
|
||||
|
|
|
@ -32,10 +32,10 @@ type
|
|||
handler*: seq[TopicHandler]
|
||||
|
||||
PubSub* = ref object of LPProtocol
|
||||
peerInfo*: PeerInfo # this peer's info
|
||||
topics*: Table[string, Topic] # local topics
|
||||
peerInfo*: PeerInfo # this peer's info
|
||||
topics*: Table[string, Topic] # local topics
|
||||
peers*: Table[string, PubSubPeer] # peerid to peer map
|
||||
triggerSelf*: bool # trigger own local handler on publish
|
||||
triggerSelf*: bool # trigger own local handler on publish
|
||||
cleanupLock: AsyncLock
|
||||
|
||||
proc sendSubs*(p: PubSub,
|
||||
|
@ -102,16 +102,16 @@ method handleConn*(p: PubSub,
|
|||
## that we're interested in
|
||||
##
|
||||
|
||||
if conn.peerInfo.isNone:
|
||||
if isNil(conn.peerInfo):
|
||||
trace "no valid PeerId for peer"
|
||||
await conn.close()
|
||||
return
|
||||
|
||||
proc handler(peer: PubSubPeer, msgs: seq[RPCMsg]) {.async, gcsafe.} =
|
||||
# call floodsub rpc handler
|
||||
await p.rpcHandler(peer, msgs)
|
||||
# call floodsub rpc handler
|
||||
await p.rpcHandler(peer, msgs)
|
||||
|
||||
let peer = p.getPeer(conn.peerInfo.get(), proto)
|
||||
let peer = p.getPeer(conn.peerInfo, proto)
|
||||
let topics = toSeq(p.topics.keys)
|
||||
if topics.len > 0:
|
||||
await p.sendSubs(peer, topics, true)
|
||||
|
@ -123,20 +123,18 @@ method handleConn*(p: PubSub,
|
|||
|
||||
method subscribeToPeer*(p: PubSub,
|
||||
conn: Connection) {.base, async, gcsafe.} =
|
||||
var peer = p.getPeer(conn.peerInfo.get(), p.codec)
|
||||
trace "setting connection for peer", peerId = conn.peerInfo.get().id
|
||||
var peer = p.getPeer(conn.peerInfo, p.codec)
|
||||
trace "setting connection for peer", peerId = conn.peerInfo.id
|
||||
if not peer.isConnected:
|
||||
peer.conn = conn
|
||||
|
||||
# handle connection close
|
||||
conn.closeEvent.wait()
|
||||
.addCallback(
|
||||
proc(udata: pointer = nil) {.gcsafe.} =
|
||||
trace "connection closed, cleaning up peer",
|
||||
peer = conn.peerInfo.get().id
|
||||
.addCallback do (udata: pointer = nil):
|
||||
trace "connection closed, cleaning up peer",
|
||||
peer = conn.peerInfo.id
|
||||
|
||||
asyncCheck p.cleanUpHelper(peer)
|
||||
)
|
||||
asyncCheck p.cleanUpHelper(peer)
|
||||
|
||||
method unsubscribe*(p: PubSub,
|
||||
topics: seq[TopicPair]) {.base, async, gcsafe.} =
|
||||
|
|
|
@ -92,10 +92,8 @@ proc send*(p: PubSubPeer, msgs: seq[RPCMsg]) {.async, gcsafe.} =
|
|||
await sendToRemote()
|
||||
return
|
||||
|
||||
p.onConnect.wait().addCallback(
|
||||
proc(udata: pointer) =
|
||||
p.onConnect.wait().addCallback do (udata: pointer):
|
||||
asyncCheck sendToRemote()
|
||||
)
|
||||
trace "enqueued message to send at a later time"
|
||||
|
||||
except CatchableError as exc:
|
||||
|
|
|
@ -249,7 +249,7 @@ proc newSecureConnection*(conn: Connection,
|
|||
result.readerCoder.init(cipher, secrets.keyOpenArray(i1),
|
||||
secrets.ivOpenArray(i1))
|
||||
|
||||
result.peerInfo = some(PeerInfo.init(remotePubKey))
|
||||
result.peerInfo = PeerInfo.init(remotePubKey)
|
||||
|
||||
proc transactMessage(conn: Connection,
|
||||
msg: seq[byte]): Future[seq[byte]] {.async.} =
|
||||
|
@ -296,8 +296,11 @@ proc handshake*(s: Secio, conn: Connection): Future[SecureConnection] {.async.}
|
|||
if randomBytes(localNonce) != SecioNonceSize:
|
||||
raise newException(CatchableError, "Could not generate random data")
|
||||
|
||||
var request = createProposal(localNonce, localBytesPubkey, SecioExchanges,
|
||||
SecioCiphers, SecioHashes)
|
||||
var request = createProposal(localNonce,
|
||||
localBytesPubkey,
|
||||
SecioExchanges,
|
||||
SecioCiphers,
|
||||
SecioHashes)
|
||||
|
||||
localPeerId = PeerID.init(s.localPublicKey)
|
||||
|
||||
|
@ -434,12 +437,12 @@ proc handleConn(s: Secio, conn: Connection): Future[Connection] {.async, gcsafe.
|
|||
asyncCheck readLoop(sconn, stream)
|
||||
var secured = newConnection(stream)
|
||||
secured.closeEvent.wait()
|
||||
.addCallback(proc(udata: pointer) =
|
||||
.addCallback do (udata: pointer):
|
||||
trace "wrapped connection closed, closing upstream"
|
||||
if not sconn.closed:
|
||||
if not isNil(sconn) and not sconn.closed:
|
||||
asyncCheck sconn.close()
|
||||
)
|
||||
secured.peerInfo = some(PeerInfo.init(sconn.peerInfo.get().publicKey.get()))
|
||||
|
||||
secured.peerInfo = PeerInfo.init(sconn.peerInfo.publicKey.get())
|
||||
result = secured
|
||||
|
||||
method init(s: Secio) {.gcsafe.} =
|
||||
|
|
|
@ -30,7 +30,7 @@ proc newChronosStream*(server: StreamServer,
|
|||
|
||||
method read*(s: ChronosStream, n = -1): Future[seq[byte]] {.async.} =
|
||||
if s.reader.atEof:
|
||||
raise newLPStreamClosedError()
|
||||
raise newLPStreamEOFError()
|
||||
|
||||
try:
|
||||
result = await s.reader.read(n)
|
||||
|
@ -43,7 +43,7 @@ method readExactly*(s: ChronosStream,
|
|||
pbytes: pointer,
|
||||
nbytes: int): Future[void] {.async.} =
|
||||
if s.reader.atEof:
|
||||
raise newLPStreamClosedError()
|
||||
raise newLPStreamEOFError()
|
||||
|
||||
try:
|
||||
await s.reader.readExactly(pbytes, nbytes)
|
||||
|
@ -56,7 +56,7 @@ method readExactly*(s: ChronosStream,
|
|||
|
||||
method readLine*(s: ChronosStream, limit = 0, sep = "\r\n"): Future[string] {.async.} =
|
||||
if s.reader.atEof:
|
||||
raise newLPStreamClosedError()
|
||||
raise newLPStreamEOFError()
|
||||
|
||||
try:
|
||||
result = await s.reader.readLine(limit, sep)
|
||||
|
@ -67,7 +67,7 @@ method readLine*(s: ChronosStream, limit = 0, sep = "\r\n"): Future[string] {.as
|
|||
|
||||
method readOnce*(s: ChronosStream, pbytes: pointer, nbytes: int): Future[int] {.async.} =
|
||||
if s.reader.atEof:
|
||||
raise newLPStreamClosedError()
|
||||
raise newLPStreamEOFError()
|
||||
|
||||
try:
|
||||
result = await s.reader.readOnce(pbytes, nbytes)
|
||||
|
@ -81,7 +81,7 @@ method readUntil*(s: ChronosStream,
|
|||
nbytes: int,
|
||||
sep: seq[byte]): Future[int] {.async.} =
|
||||
if s.reader.atEof:
|
||||
raise newLPStreamClosedError()
|
||||
raise newLPStreamEOFError()
|
||||
|
||||
try:
|
||||
result = await s.reader.readUntil(pbytes, nbytes, sep)
|
||||
|
@ -96,7 +96,7 @@ method readUntil*(s: ChronosStream,
|
|||
|
||||
method write*(s: ChronosStream, pbytes: pointer, nbytes: int) {.async.} =
|
||||
if s.writer.atEof:
|
||||
raise newLPStreamClosedError()
|
||||
raise newLPStreamEOFError()
|
||||
|
||||
try:
|
||||
await s.writer.write(pbytes, nbytes)
|
||||
|
@ -109,7 +109,7 @@ method write*(s: ChronosStream, pbytes: pointer, nbytes: int) {.async.} =
|
|||
|
||||
method write*(s: ChronosStream, msg: string, msglen = -1) {.async.} =
|
||||
if s.writer.atEof:
|
||||
raise newLPStreamClosedError()
|
||||
raise newLPStreamEOFError()
|
||||
|
||||
try:
|
||||
await s.writer.write(msg, msglen)
|
||||
|
@ -122,7 +122,7 @@ method write*(s: ChronosStream, msg: string, msglen = -1) {.async.} =
|
|||
|
||||
method write*(s: ChronosStream, msg: seq[byte], msglen = -1) {.async.} =
|
||||
if s.writer.atEof:
|
||||
raise newLPStreamClosedError()
|
||||
raise newLPStreamEOFError()
|
||||
|
||||
try:
|
||||
await s.writer.write(msg, msglen)
|
||||
|
|
|
@ -22,7 +22,7 @@ type
|
|||
par*: ref Exception
|
||||
LPStreamWriteError* = object of LPStreamError
|
||||
par*: ref Exception
|
||||
LPStreamClosedError* = object of LPStreamError
|
||||
LPStreamEOFError* = object of LPStreamError
|
||||
|
||||
proc newLPStreamReadError*(p: ref Exception): ref Exception {.inline.} =
|
||||
var w = newException(LPStreamReadError, "Read stream failed")
|
||||
|
@ -45,8 +45,8 @@ proc newLPStreamLimitError*(): ref Exception {.inline.} =
|
|||
proc newLPStreamIncorrectError*(m: string): ref Exception {.inline.} =
|
||||
result = newException(LPStreamIncorrectError, m)
|
||||
|
||||
proc newLPStreamClosedError*(): ref Exception {.inline.} =
|
||||
result = newException(LPStreamClosedError, "Stream closed!")
|
||||
proc newLPStreamEOFError*(): ref Exception {.inline.} =
|
||||
result = newException(LPStreamEOFError, "Stream EOF!")
|
||||
|
||||
method closed*(s: LPStream): bool {.base, inline.} =
|
||||
s.isClosed
|
||||
|
|
|
@ -65,13 +65,17 @@ proc secure(s: Switch, conn: Connection): Future[Connection] {.async, gcsafe.} =
|
|||
proc identify(s: Switch, conn: Connection): Future[PeerInfo] {.async, gcsafe.} =
|
||||
## identify the connection
|
||||
|
||||
if conn.peerInfo.isSome:
|
||||
result = conn.peerInfo.get()
|
||||
if not isNil(conn.peerInfo):
|
||||
result = conn.peerInfo
|
||||
|
||||
try:
|
||||
if (await s.ms.select(conn, s.identity.codec)):
|
||||
let info = await s.identity.identify(conn, conn.peerInfo)
|
||||
|
||||
if info.pubKey.isNone and isNil(result):
|
||||
raise newException(CatchableError,
|
||||
"no public key provided and no existing peer identity found")
|
||||
|
||||
if info.pubKey.isSome:
|
||||
result = PeerInfo.init(info.pubKey.get())
|
||||
trace "identify: identified remote peer", peer = result.id
|
||||
|
@ -112,58 +116,58 @@ proc mux(s: Switch, conn: Connection): Future[void] {.async, gcsafe.} =
|
|||
# add muxer handler cleanup proc
|
||||
handlerFut.addCallback do (udata: pointer = nil):
|
||||
trace "muxer handler completed for peer",
|
||||
peer = conn.peerInfo.get().id
|
||||
peer = conn.peerInfo.id
|
||||
|
||||
# do identify first, so that we have a
|
||||
# PeerInfo in case we didn't before
|
||||
conn.peerInfo = some((await s.identify(stream)))
|
||||
conn.peerInfo = await s.identify(stream)
|
||||
|
||||
await stream.close() # close identify stream
|
||||
|
||||
trace "connection's peerInfo", peerInfo = conn.peerInfo
|
||||
|
||||
# store it in muxed connections if we have a peer for it
|
||||
if conn.peerInfo.isSome:
|
||||
trace "adding muxer for peer", peer = conn.peerInfo.get().id
|
||||
s.muxed[conn.peerInfo.get().id] = muxer
|
||||
if not isNil(conn.peerInfo):
|
||||
trace "adding muxer for peer", peer = conn.peerInfo.id
|
||||
s.muxed[conn.peerInfo.id] = muxer
|
||||
|
||||
proc cleanupConn(s: Switch, conn: Connection) {.async, gcsafe.} =
|
||||
# if conn.peerInfo.peerId.isSome:
|
||||
let id = conn.peerInfo.get().id
|
||||
trace "cleaning up connection for peer", peerId = id
|
||||
if id in s.muxed:
|
||||
await s.muxed[id].close()
|
||||
s.muxed.del(id)
|
||||
if not isNil(conn.peerInfo):
|
||||
let id = conn.peerInfo.id
|
||||
trace "cleaning up connection for peer", peerId = id
|
||||
if id in s.muxed:
|
||||
await s.muxed[id].close()
|
||||
s.muxed.del(id)
|
||||
|
||||
if id in s.connections:
|
||||
await s.connections[id].close()
|
||||
s.connections.del(id)
|
||||
if id in s.connections:
|
||||
await s.connections[id].close()
|
||||
s.connections.del(id)
|
||||
|
||||
proc disconnect*(s: Switch, peer: PeerInfo) {.async, gcsafe.} =
|
||||
let conn = s.connections.getOrDefault(peer.id)
|
||||
if conn != nil:
|
||||
if not isNil(conn):
|
||||
await s.cleanupConn(conn)
|
||||
|
||||
proc getMuxedStream(s: Switch, peerInfo: PeerInfo): Future[Option[Connection]] {.async, gcsafe.} =
|
||||
proc getMuxedStream(s: Switch, peerInfo: PeerInfo): Future[Connection] {.async, gcsafe.} =
|
||||
# if there is a muxer for the connection
|
||||
# use it instead to create a muxed stream
|
||||
if peerInfo.id in s.muxed:
|
||||
trace "connection is muxed, setting up a stream"
|
||||
let muxer = s.muxed[peerInfo.id]
|
||||
let conn = await muxer.newStream()
|
||||
result = some(conn)
|
||||
result = conn
|
||||
|
||||
proc upgradeOutgoing(s: Switch, conn: Connection): Future[Connection] {.async, gcsafe.} =
|
||||
trace "handling connection", conn = conn
|
||||
result = conn
|
||||
|
||||
# don't mux/secure twise
|
||||
if conn.peerInfo.get().id in s.muxed:
|
||||
if conn.peerInfo.id in s.muxed:
|
||||
return
|
||||
|
||||
result = await s.secure(result) # secure the connection
|
||||
await s.mux(result) # mux it if possible
|
||||
s.connections[conn.peerInfo.get().id] = result
|
||||
s.connections[conn.peerInfo.id] = result
|
||||
|
||||
proc upgradeIncoming(s: Switch, conn: Connection) {.async, gcsafe.} =
|
||||
trace "upgrading incoming connection"
|
||||
|
@ -206,7 +210,7 @@ proc dial*(s: Switch,
|
|||
trace "Dialing address", address = $a
|
||||
result = await t.dial(a)
|
||||
# make sure to assign the peer to the connection
|
||||
result.peerInfo = some peer
|
||||
result.peerInfo = peer
|
||||
result = await s.upgradeOutgoing(result)
|
||||
result.closeEvent.wait().addCallback do (udata: pointer):
|
||||
asyncCheck s.cleanupConn(result)
|
||||
|
@ -214,11 +218,14 @@ proc dial*(s: Switch,
|
|||
else:
|
||||
trace "Reusing existing connection"
|
||||
|
||||
if isNil(result):
|
||||
raise newException(CatchableError, "unable to establish outgoing link!")
|
||||
|
||||
if proto.len > 0 and not result.closed:
|
||||
let stream = await s.getMuxedStream(peer)
|
||||
if stream.isSome:
|
||||
if not isNil(stream):
|
||||
trace "Connection is muxed, return muxed stream"
|
||||
result = stream.get()
|
||||
result = stream
|
||||
trace "Attempting to select remote", proto = proto
|
||||
|
||||
if not await s.ms.select(result, proto):
|
||||
|
@ -324,7 +331,7 @@ proc newSwitch*(peerInfo: PeerInfo,
|
|||
val.muxerHandler = proc(muxer: Muxer) {.async, gcsafe.} =
|
||||
trace "got new muxer"
|
||||
let stream = await muxer.newStream()
|
||||
muxer.connection.peerInfo = some((await s.identify(stream)))
|
||||
muxer.connection.peerInfo = await s.identify(stream)
|
||||
await stream.close()
|
||||
|
||||
for k in secureManagers.keys:
|
||||
|
|
|
@ -30,7 +30,7 @@ proc connHandler*(t: Transport,
|
|||
let conn: Connection = newConnection(newChronosStream(server, client))
|
||||
conn.observedAddrs = MultiAddress.init(client.remoteAddress)
|
||||
if not initiator:
|
||||
let handlerFut = if t.handler == nil: nil else: t.handler(conn)
|
||||
let handlerFut = if isNil(t.handler): nil else: t.handler(conn)
|
||||
let connHolder: ConnHolder = ConnHolder(connection: conn,
|
||||
connFuture: handlerFut)
|
||||
t.connections.add(connHolder)
|
||||
|
|
|
@ -33,11 +33,11 @@ suite "GossipSub":
|
|||
|
||||
var buf1 = newBufferStream()
|
||||
var conn1 = newConnection(buf1)
|
||||
conn1.peerInfo = some(gossip1.peerInfo)
|
||||
conn1.peerInfo = gossip1.peerInfo
|
||||
|
||||
var buf2 = newBufferStream()
|
||||
var conn2 = newConnection(buf2)
|
||||
conn2.peerInfo = some(gossip2.peerInfo)
|
||||
conn2.peerInfo = gossip2.peerInfo
|
||||
|
||||
buf1 = buf1 | buf2 | buf1
|
||||
|
||||
|
@ -100,11 +100,11 @@ suite "GossipSub":
|
|||
|
||||
var buf1 = newBufferStream()
|
||||
var conn1 = newConnection(buf1)
|
||||
conn1.peerInfo = some(gossip1.peerInfo)
|
||||
conn1.peerInfo = gossip1.peerInfo
|
||||
|
||||
var buf2 = newBufferStream()
|
||||
var conn2 = newConnection(buf2)
|
||||
conn2.peerInfo = some(gossip2.peerInfo)
|
||||
conn2.peerInfo = gossip2.peerInfo
|
||||
|
||||
buf1 = buf1 | buf2 | buf1
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ suite "Identify":
|
|||
var peerInfo = PeerInfo.init(PrivateKey.random(RSA), @[ma])
|
||||
let identifyProto2 = newIdentify(peerInfo)
|
||||
discard await msDial.select(conn, IdentifyCodec)
|
||||
let id = await identifyProto2.identify(conn, some(remotePeerInfo))
|
||||
let id = await identifyProto2.identify(conn, remotePeerInfo)
|
||||
|
||||
check id.pubKey.get() == remoteSecKey.getKey()
|
||||
check id.addrs[0] == ma
|
||||
|
@ -77,7 +77,7 @@ suite "Identify":
|
|||
var localPeerInfo = PeerInfo.init(PrivateKey.random(RSA), @[ma])
|
||||
let identifyProto2 = newIdentify(localPeerInfo)
|
||||
discard await msDial.select(conn, IdentifyCodec)
|
||||
discard await identifyProto2.identify(conn, some(PeerInfo.init(PrivateKey.random(RSA))))
|
||||
discard await identifyProto2.identify(conn, PeerInfo.init(PrivateKey.random(RSA)))
|
||||
await conn.close()
|
||||
|
||||
expect IdentityNoMatchError:
|
||||
|
|
|
@ -59,8 +59,10 @@ proc readLp*(s: StreamTransport): Future[seq[byte]] {.async, gcsafe.} =
|
|||
result.setLen(size)
|
||||
if size > 0.uint:
|
||||
await s.readExactly(addr result[0], int(size))
|
||||
except LPStreamIncompleteError, LPStreamReadError:
|
||||
trace "remote connection ended unexpectedly", exc = getCurrentExceptionMsg()
|
||||
except LPStreamIncompleteError as exc:
|
||||
trace "remote connection ended unexpectedly", exc = exc.msg
|
||||
except LPStreamReadError as exc:
|
||||
trace "unable to read from remote connection", exc = exc.msg
|
||||
|
||||
proc createNode*(privKey: Option[PrivateKey] = none(PrivateKey),
|
||||
address: string = "/ip4/127.0.0.1/tcp/0",
|
||||
|
|
|
@ -268,7 +268,7 @@ suite "Mplex":
|
|||
await chann.close()
|
||||
await chann.write("Hello")
|
||||
|
||||
expect LPStreamClosedError:
|
||||
expect LPStreamEOFError:
|
||||
waitFor(testClosedForWrite())
|
||||
|
||||
test "half closed - channel should close for read by remote":
|
||||
|
@ -281,7 +281,7 @@ suite "Mplex":
|
|||
discard await chann.read() # this should work, since there is data in the buffer
|
||||
discard await chann.read() # this should throw
|
||||
|
||||
expect LPStreamClosedError:
|
||||
expect LPStreamEOFError:
|
||||
waitFor(testClosedForRead())
|
||||
|
||||
test "reset - channel should fail reading":
|
||||
|
@ -291,7 +291,7 @@ suite "Mplex":
|
|||
await chann.reset()
|
||||
asyncDiscard chann.read()
|
||||
|
||||
expect LPStreamClosedError:
|
||||
expect LPStreamEOFError:
|
||||
waitFor(testResetRead())
|
||||
|
||||
test "reset - channel should fail writing":
|
||||
|
@ -301,7 +301,7 @@ suite "Mplex":
|
|||
await chann.reset()
|
||||
await chann.write(cast[seq[byte]]("Hello!"))
|
||||
|
||||
expect LPStreamClosedError:
|
||||
expect LPStreamEOFError:
|
||||
waitFor(testResetWrite())
|
||||
|
||||
test "should not allow pushing data to channel when remote end closed":
|
||||
|
@ -311,5 +311,5 @@ suite "Mplex":
|
|||
await chann.closedByRemote()
|
||||
await chann.pushTo(@[byte(1)])
|
||||
|
||||
expect LPStreamClosedError:
|
||||
expect LPStreamEOFError:
|
||||
waitFor(testResetWrite())
|
||||
|
|
|
@ -30,24 +30,24 @@ suite "PeerInfo":
|
|||
check peerId == peerInfo.peerId
|
||||
check seckey.getKey == peerInfo.publicKey.get()
|
||||
|
||||
test "Should return none on missing public key":
|
||||
test "Should init from CIDv0 string":
|
||||
var peerInfo = PeerInfo.init("QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N")
|
||||
|
||||
check:
|
||||
PeerID.init("QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N") == peerInfo.peerId
|
||||
|
||||
# TODO: CIDv1 is handling is missing from PeerID
|
||||
# https://github.com/status-im/nim-libp2p/issues/53
|
||||
# test "Should init from CIDv1 string":
|
||||
# var peerInfo = PeerInfo.init("bafzbeie5745rpv2m6tjyuugywy4d5ewrqgqqhfnf445he3omzpjbx5xqxe")
|
||||
|
||||
# check:
|
||||
# PeerID.init("bafzbeie5745rpv2m6tjyuugywy4d5ewrqgqqhfnf445he3omzpjbx5xqxe") == peerInfo.peerId
|
||||
|
||||
test "Should return none if pubkey is missing from id":
|
||||
let peerInfo = PeerInfo.init(PeerID.init(PrivateKey.random(RSA)))
|
||||
check peerInfo.publicKey.isNone
|
||||
|
||||
test "Should allow assigning public key":
|
||||
let key = PrivateKey.random(RSA)
|
||||
|
||||
let peerInfo = PeerInfo.init(PeerID.init(key))
|
||||
peerInfo.publicKey = key.getKey()
|
||||
check peerInfo.publicKey.get() == key.getKey()
|
||||
|
||||
test "Should throw on invalid public key assignement":
|
||||
proc throwsOnInvalidPubKey() =
|
||||
let validKey = PrivateKey.random(RSA)
|
||||
let invalidKey = PrivateKey.random(RSA)
|
||||
|
||||
let peerInfo = PeerInfo.init(PeerID.init(validKey))
|
||||
peerInfo.publicKey = invalidKey.getKey()
|
||||
|
||||
expect InvalidPublicKeyException:
|
||||
throwsOnInvalidPubKey()
|
||||
test "Should return some if pubkey is present in id":
|
||||
let peerInfo = PeerInfo.init(PeerID.init(PrivateKey.random(Ed25519)))
|
||||
check peerInfo.publicKey.isSome
|
||||
|
|
Loading…
Reference in New Issue