2019-09-10 02:15:52 +00:00
|
|
|
## Nim-LibP2P
|
2019-09-24 17:48:23 +00:00
|
|
|
## Copyright (c) 2019 Status Research & Development GmbH
|
2019-09-10 02:15:52 +00:00
|
|
|
## Licensed under either of
|
|
|
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
|
|
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
|
|
|
## at your option.
|
|
|
|
## This file may not be copied, modified, or distributed except according to
|
|
|
|
## those terms.
|
|
|
|
|
2020-07-16 10:06:57 +00:00
|
|
|
import std/[tables, sequtils, sets]
|
2020-07-17 15:36:48 +00:00
|
|
|
import chronos, chronicles, metrics
|
2019-09-10 02:15:52 +00:00
|
|
|
import pubsubpeer,
|
2020-06-28 15:56:38 +00:00
|
|
|
rpc/[message, messages],
|
2019-09-10 02:15:52 +00:00
|
|
|
../protocol,
|
2020-06-19 17:29:43 +00:00
|
|
|
../../stream/connection,
|
2020-07-01 06:25:09 +00:00
|
|
|
../../peerid,
|
2020-07-17 15:36:48 +00:00
|
|
|
../../peerinfo,
|
|
|
|
../../errors
|
2019-09-10 02:15:52 +00:00
|
|
|
|
|
|
|
export PubSubPeer
|
2020-04-30 13:22:31 +00:00
|
|
|
export PubSubObserver
|
2019-09-10 02:15:52 +00:00
|
|
|
|
2019-09-12 02:10:38 +00:00
|
|
|
logScope:
|
2020-06-10 08:48:01 +00:00
|
|
|
topics = "pubsub"
|
2019-09-12 02:10:38 +00:00
|
|
|
|
2020-06-07 07:15:21 +00:00
|
|
|
declareGauge(libp2p_pubsub_peers, "pubsub peer instances")
|
|
|
|
declareGauge(libp2p_pubsub_topics, "pubsub subscribed topics")
|
2020-06-07 07:41:23 +00:00
|
|
|
declareCounter(libp2p_pubsub_validation_success, "pubsub successfully validated messages")
|
|
|
|
declareCounter(libp2p_pubsub_validation_failure, "pubsub failed validated messages")
|
2020-06-17 04:14:02 +00:00
|
|
|
declarePublicCounter(libp2p_pubsub_messages_published, "published messages", labels = ["topic"])
|
2020-06-07 07:15:21 +00:00
|
|
|
|
2019-09-10 02:15:52 +00:00
|
|
|
type
|
2020-07-08 00:33:05 +00:00
|
|
|
SendRes = tuple[published: seq[string], failed: seq[string]] # keep private
|
|
|
|
|
2019-12-17 05:24:03 +00:00
|
|
|
TopicHandler* = proc(topic: string,
|
|
|
|
data: seq[byte]): Future[void] {.gcsafe.}
|
|
|
|
|
|
|
|
ValidatorHandler* = proc(topic: string,
|
2020-03-24 07:48:05 +00:00
|
|
|
message: Message): Future[bool] {.gcsafe, closure.}
|
2019-09-24 16:16:39 +00:00
|
|
|
|
|
|
|
TopicPair* = tuple[topic: string, handler: TopicHandler]
|
2019-09-12 02:10:38 +00:00
|
|
|
|
2020-06-28 15:56:38 +00:00
|
|
|
MsgIdProvider* =
|
|
|
|
proc(m: Message): string {.noSideEffect, raises: [Defect], nimcall, gcsafe.}
|
|
|
|
|
2019-09-10 02:15:52 +00:00
|
|
|
Topic* = object
|
|
|
|
name*: string
|
2019-09-24 16:16:39 +00:00
|
|
|
handler*: seq[TopicHandler]
|
2019-09-10 02:15:52 +00:00
|
|
|
|
|
|
|
PubSub* = ref object of LPProtocol
|
2020-07-17 19:46:24 +00:00
|
|
|
peerInfo*: PeerInfo # this peer's info
|
|
|
|
topics*: Table[string, Topic] # local topics
|
|
|
|
peers*: Table[string, PubSubPeer] # peerid to peer map
|
|
|
|
conns*: Table[PeerInfo, HashSet[Connection]] # peers connections
|
|
|
|
triggerSelf*: bool # trigger own local handler on publish
|
|
|
|
verifySignature*: bool # enable signature verification
|
|
|
|
sign*: bool # enable message signing
|
2019-12-06 02:16:18 +00:00
|
|
|
cleanupLock: AsyncLock
|
2019-12-17 05:24:03 +00:00
|
|
|
validators*: Table[string, HashSet[ValidatorHandler]]
|
2020-04-30 13:22:31 +00:00
|
|
|
observers: ref seq[PubSubObserver] # ref as in smart_ptr
|
2020-07-09 20:21:47 +00:00
|
|
|
msgIdProvider*: MsgIdProvider # Turn message into message id (not nil)
|
2020-07-15 03:51:33 +00:00
|
|
|
msgSeqno*: uint64
|
2019-09-10 02:15:52 +00:00
|
|
|
|
2020-07-08 00:33:05 +00:00
|
|
|
method handleDisconnect*(p: PubSub, peer: PubSubPeer) {.base.} =
|
|
|
|
## handle peer disconnects
|
|
|
|
##
|
2020-07-17 19:46:24 +00:00
|
|
|
if not(isNil(peer)) and peer.peerInfo notin p.conns:
|
2020-07-09 04:19:34 +00:00
|
|
|
trace "deleting peer", peer = peer.id
|
2020-07-08 00:33:05 +00:00
|
|
|
p.peers.del(peer.id)
|
2020-07-09 20:21:47 +00:00
|
|
|
trace "peer disconnected", peer = peer.id
|
2020-07-08 00:33:05 +00:00
|
|
|
|
2020-07-13 14:16:46 +00:00
|
|
|
# metrics
|
|
|
|
libp2p_pubsub_peers.set(p.peers.len.int64)
|
2020-07-08 00:33:05 +00:00
|
|
|
|
2020-07-17 19:46:24 +00:00
|
|
|
proc onConnClose(p: PubSub, conn: Connection) {.async.} =
|
|
|
|
try:
|
|
|
|
let peer = conn.peerInfo
|
|
|
|
await conn.closeEvent.wait()
|
|
|
|
|
|
|
|
if peer in p.conns:
|
|
|
|
p.conns[peer].excl(conn)
|
|
|
|
if p.conns[peer].len <= 0:
|
|
|
|
p.conns.del(peer)
|
|
|
|
|
|
|
|
if peer.id in p.peers:
|
|
|
|
p.handleDisconnect(p.peers[peer.id])
|
|
|
|
|
|
|
|
except CancelledError as exc:
|
|
|
|
raise exc
|
|
|
|
except CatchableError as exc:
|
|
|
|
trace "exception in onConnClose handler", exc = exc.msg
|
|
|
|
|
2019-12-06 02:16:18 +00:00
|
|
|
proc sendSubs*(p: PubSub,
|
|
|
|
peer: PubSubPeer,
|
|
|
|
topics: seq[string],
|
2019-12-17 05:24:03 +00:00
|
|
|
subscribe: bool) {.async.} =
|
2019-12-06 02:16:18 +00:00
|
|
|
## send subscriptions to remote peer
|
|
|
|
|
2020-07-08 00:33:05 +00:00
|
|
|
try:
|
|
|
|
# wait for a connection before publishing
|
|
|
|
# this happens when
|
|
|
|
if not peer.onConnect.isSet:
|
|
|
|
trace "awaiting send connection"
|
|
|
|
await peer.onConnect.wait()
|
|
|
|
|
2020-07-16 10:06:57 +00:00
|
|
|
await peer.sendSubOpts(topics, subscribe)
|
2020-07-08 00:33:05 +00:00
|
|
|
except CancelledError as exc:
|
2020-07-17 19:46:24 +00:00
|
|
|
if not(isNil(peer)) and not(isNil(peer.conn)):
|
|
|
|
await peer.conn.close()
|
|
|
|
|
2020-07-08 00:33:05 +00:00
|
|
|
raise exc
|
|
|
|
except CatchableError as exc:
|
|
|
|
trace "unable to send subscriptions", exc = exc.msg
|
2020-07-17 19:46:24 +00:00
|
|
|
if not(isNil(peer)) and not(isNil(peer.conn)):
|
|
|
|
await peer.conn.close()
|
2019-12-06 02:16:18 +00:00
|
|
|
|
2019-12-17 05:24:03 +00:00
|
|
|
method subscribeTopic*(p: PubSub,
|
|
|
|
topic: string,
|
|
|
|
subscribe: bool,
|
2020-05-27 18:33:49 +00:00
|
|
|
peerId: string) {.base, async.} =
|
2020-07-16 10:06:57 +00:00
|
|
|
# called when remote peer subscribes to a topic
|
2020-07-08 00:33:05 +00:00
|
|
|
discard
|
2019-12-17 05:24:03 +00:00
|
|
|
|
2019-12-06 02:16:18 +00:00
|
|
|
method rpcHandler*(p: PubSub,
|
|
|
|
peer: PubSubPeer,
|
2019-12-17 05:24:03 +00:00
|
|
|
rpcMsgs: seq[RPCMsg]) {.async, base.} =
|
2019-12-06 02:16:18 +00:00
|
|
|
## handle rpc messages
|
2020-03-23 06:03:36 +00:00
|
|
|
trace "processing RPC message", peer = peer.id, msgs = rpcMsgs.len
|
2020-04-30 13:22:31 +00:00
|
|
|
|
2019-12-17 05:24:03 +00:00
|
|
|
for m in rpcMsgs: # for all RPC messages
|
2020-03-23 06:03:36 +00:00
|
|
|
trace "processing messages", msg = m.shortLog
|
2019-12-17 05:24:03 +00:00
|
|
|
if m.subscriptions.len > 0: # if there are any subscriptions
|
|
|
|
for s in m.subscriptions: # subscribe/unsubscribe the peer for each topic
|
2020-05-27 18:33:49 +00:00
|
|
|
trace "about to subscribe to topic", topicId = s.topic
|
|
|
|
await p.subscribeTopic(s.topic, s.subscribe, peer.id)
|
2019-12-17 05:24:03 +00:00
|
|
|
|
2020-07-09 20:21:47 +00:00
|
|
|
proc getOrCreatePeer(p: PubSub,
|
|
|
|
peerInfo: PeerInfo,
|
|
|
|
proto: string): PubSubPeer =
|
2019-12-06 02:16:18 +00:00
|
|
|
if peerInfo.id in p.peers:
|
2020-06-29 15:15:31 +00:00
|
|
|
return p.peers[peerInfo.id]
|
2019-12-06 02:16:18 +00:00
|
|
|
|
|
|
|
# create new pubsub peer
|
|
|
|
let peer = newPubSubPeer(peerInfo, proto)
|
2020-07-09 03:23:03 +00:00
|
|
|
trace "created new pubsub peer", peerId = peer.id
|
2019-12-06 02:16:18 +00:00
|
|
|
|
|
|
|
p.peers[peer.id] = peer
|
2020-04-30 13:22:31 +00:00
|
|
|
peer.observers = p.observers
|
2020-07-13 14:15:27 +00:00
|
|
|
|
|
|
|
# metrics
|
|
|
|
libp2p_pubsub_peers.set(p.peers.len.int64)
|
|
|
|
|
2020-06-29 15:15:31 +00:00
|
|
|
return peer
|
2019-12-06 02:16:18 +00:00
|
|
|
|
|
|
|
method handleConn*(p: PubSub,
|
|
|
|
conn: Connection,
|
2019-12-17 05:24:03 +00:00
|
|
|
proto: string) {.base, async.} =
|
2019-12-06 02:16:18 +00:00
|
|
|
## handle incoming connections
|
|
|
|
##
|
|
|
|
## this proc will:
|
|
|
|
## 1) register a new PubSubPeer for the connection
|
|
|
|
## 2) register a handler with the peer;
|
|
|
|
## this handler gets called on every rpc message
|
|
|
|
## that the peer receives
|
|
|
|
## 3) ask the peer to subscribe us to every topic
|
|
|
|
## that we're interested in
|
|
|
|
##
|
|
|
|
|
2020-07-08 00:33:05 +00:00
|
|
|
if isNil(conn.peerInfo):
|
|
|
|
trace "no valid PeerId for peer"
|
|
|
|
await conn.close()
|
|
|
|
return
|
2020-05-21 15:01:36 +00:00
|
|
|
|
2020-07-17 19:46:24 +00:00
|
|
|
# track connection
|
|
|
|
p.conns.mgetOrPut(conn.peerInfo,
|
|
|
|
initHashSet[Connection]())
|
|
|
|
.incl(conn)
|
|
|
|
|
|
|
|
asyncCheck p.onConnClose(conn)
|
|
|
|
|
2020-07-08 00:33:05 +00:00
|
|
|
proc handler(peer: PubSubPeer, msgs: seq[RPCMsg]) {.async.} =
|
|
|
|
# call pubsub rpc handler
|
|
|
|
await p.rpcHandler(peer, msgs)
|
2020-05-21 15:01:36 +00:00
|
|
|
|
2020-07-09 20:21:47 +00:00
|
|
|
let peer = p.getOrCreatePeer(conn.peerInfo, proto)
|
2020-07-17 19:46:24 +00:00
|
|
|
|
2020-08-02 10:22:49 +00:00
|
|
|
if p.topics.len > 0:
|
|
|
|
await p.sendSubs(peer, toSeq(p.topics.keys), true)
|
2020-05-21 15:01:36 +00:00
|
|
|
|
2020-07-08 00:33:05 +00:00
|
|
|
try:
|
2020-05-21 15:01:36 +00:00
|
|
|
peer.handler = handler
|
|
|
|
await peer.handle(conn) # spawn peer read loop
|
2020-07-08 00:33:05 +00:00
|
|
|
trace "pubsub peer handler ended", peer = peer.id
|
2020-06-29 15:15:31 +00:00
|
|
|
except CancelledError as exc:
|
|
|
|
raise exc
|
2020-05-21 15:01:36 +00:00
|
|
|
except CatchableError as exc:
|
|
|
|
trace "exception ocurred in pubsub handle", exc = exc.msg
|
2020-07-08 00:33:05 +00:00
|
|
|
finally:
|
2020-06-29 15:15:31 +00:00
|
|
|
await conn.close()
|
2020-04-07 15:49:43 +00:00
|
|
|
|
2020-07-08 00:33:05 +00:00
|
|
|
method subscribePeer*(p: PubSub, conn: Connection) {.base.} =
|
2020-06-29 15:15:31 +00:00
|
|
|
if not(isNil(conn)):
|
2020-07-08 00:33:05 +00:00
|
|
|
trace "subscribing to peer", peerId = conn.peerInfo.id
|
2020-07-17 19:46:24 +00:00
|
|
|
|
|
|
|
# track connection
|
|
|
|
p.conns.mgetOrPut(conn.peerInfo,
|
|
|
|
initHashSet[Connection]())
|
|
|
|
.incl(conn)
|
|
|
|
|
|
|
|
asyncCheck p.onConnClose(conn)
|
|
|
|
|
|
|
|
let peer = p.getOrCreatePeer(conn.peerInfo, p.codec)
|
2020-06-29 15:15:31 +00:00
|
|
|
if not peer.connected:
|
|
|
|
peer.conn = conn
|
|
|
|
|
2020-07-08 00:33:05 +00:00
|
|
|
method unsubscribePeer*(p: PubSub, peerInfo: PeerInfo) {.base, async.} =
|
2020-07-09 20:21:47 +00:00
|
|
|
if peerInfo.id in p.peers:
|
|
|
|
let peer = p.peers[peerInfo.id]
|
2020-07-08 00:33:05 +00:00
|
|
|
|
2020-07-09 20:21:47 +00:00
|
|
|
trace "unsubscribing from peer", peerId = $peerInfo
|
2020-07-17 19:46:24 +00:00
|
|
|
if not(isNil(peer)) and not(isNil(peer.conn)):
|
2020-07-09 20:21:47 +00:00
|
|
|
await peer.conn.close()
|
2019-12-06 02:16:18 +00:00
|
|
|
|
2020-07-09 20:21:47 +00:00
|
|
|
proc connected*(p: PubSub, peerInfo: PeerInfo): bool =
|
|
|
|
if peerInfo.id in p.peers:
|
|
|
|
let peer = p.peers[peerInfo.id]
|
|
|
|
|
|
|
|
if not(isNil(peer)):
|
|
|
|
return peer.connected
|
2019-12-06 02:16:18 +00:00
|
|
|
|
2019-09-24 16:16:39 +00:00
|
|
|
method unsubscribe*(p: PubSub,
|
2019-12-17 05:24:03 +00:00
|
|
|
topics: seq[TopicPair]) {.base, async.} =
|
2019-09-10 02:15:52 +00:00
|
|
|
## unsubscribe from a list of ``topic`` strings
|
2019-09-24 16:16:39 +00:00
|
|
|
for t in topics:
|
|
|
|
for i, h in p.topics[t.topic].handler:
|
|
|
|
if h == t.handler:
|
|
|
|
p.topics[t.topic].handler.del(i)
|
|
|
|
|
2020-07-20 16:16:13 +00:00
|
|
|
# make sure we delete the topic if
|
|
|
|
# no more handlers are left
|
|
|
|
if p.topics[t.topic].handler.len <= 0:
|
|
|
|
p.topics.del(t.topic)
|
|
|
|
# metrics
|
2020-07-27 19:33:51 +00:00
|
|
|
libp2p_pubsub_topics.set(p.topics.len.int64)
|
2020-07-09 20:21:47 +00:00
|
|
|
|
2020-07-16 19:26:57 +00:00
|
|
|
proc unsubscribe*(p: PubSub,
|
2020-07-27 19:33:51 +00:00
|
|
|
topic: string,
|
|
|
|
handler: TopicHandler): Future[void] =
|
2019-09-24 16:16:39 +00:00
|
|
|
## unsubscribe from a ``topic`` string
|
2020-07-27 19:33:51 +00:00
|
|
|
##
|
|
|
|
|
2020-06-07 07:15:21 +00:00
|
|
|
p.unsubscribe(@[(topic, handler)])
|
2019-09-10 02:15:52 +00:00
|
|
|
|
2020-07-20 16:16:13 +00:00
|
|
|
method unsubscribeAll*(p: PubSub, topic: string) {.base, async.} =
|
|
|
|
p.topics.del(topic)
|
2020-07-27 19:33:51 +00:00
|
|
|
libp2p_pubsub_topics.set(p.topics.len.int64)
|
2020-07-20 16:16:13 +00:00
|
|
|
|
2019-09-24 16:16:39 +00:00
|
|
|
method subscribe*(p: PubSub,
|
|
|
|
topic: string,
|
2019-12-17 05:24:03 +00:00
|
|
|
handler: TopicHandler) {.base, async.} =
|
2019-09-10 02:15:52 +00:00
|
|
|
## subscribe to a topic
|
|
|
|
##
|
|
|
|
## ``topic`` - a string topic to subscribe to
|
|
|
|
##
|
2019-12-10 20:50:35 +00:00
|
|
|
## ``handler`` - is a user provided proc
|
|
|
|
## that will be triggered
|
2019-09-10 02:15:52 +00:00
|
|
|
## on every received message
|
|
|
|
##
|
2019-12-06 02:16:18 +00:00
|
|
|
if topic notin p.topics:
|
2019-09-28 19:55:35 +00:00
|
|
|
trace "subscribing to topic", name = topic
|
2019-09-24 16:16:39 +00:00
|
|
|
p.topics[topic] = Topic(name: topic)
|
2019-09-28 19:55:35 +00:00
|
|
|
|
2019-09-24 16:16:39 +00:00
|
|
|
p.topics[topic].handler.add(handler)
|
2019-09-10 02:15:52 +00:00
|
|
|
|
2020-07-17 15:36:48 +00:00
|
|
|
var sent: seq[Future[void]]
|
2020-07-08 00:33:05 +00:00
|
|
|
for peer in toSeq(p.peers.values):
|
2020-07-17 15:36:48 +00:00
|
|
|
sent.add(p.sendSubs(peer, @[topic], true))
|
|
|
|
|
|
|
|
checkFutures(await allFinished(sent))
|
2019-12-06 02:16:18 +00:00
|
|
|
|
2020-06-07 07:15:21 +00:00
|
|
|
# metrics
|
2020-07-27 19:33:51 +00:00
|
|
|
libp2p_pubsub_topics.set(p.topics.len.int64)
|
2020-06-07 07:15:21 +00:00
|
|
|
|
2020-07-08 00:33:05 +00:00
|
|
|
proc sendHelper*(p: PubSub,
|
2020-07-13 13:32:38 +00:00
|
|
|
sendPeers: HashSet[PubSubPeer],
|
2020-07-08 00:33:05 +00:00
|
|
|
msgs: seq[Message]): Future[SendRes] {.async.} =
|
|
|
|
var sent: seq[tuple[id: string, fut: Future[void]]]
|
|
|
|
for sendPeer in sendPeers:
|
|
|
|
# avoid sending to self
|
2020-07-13 13:32:38 +00:00
|
|
|
if sendPeer.peerInfo == p.peerInfo:
|
2020-07-08 00:33:05 +00:00
|
|
|
continue
|
|
|
|
|
2020-07-13 13:32:38 +00:00
|
|
|
trace "sending messages to peer", peer = sendPeer.id, msgs
|
2020-07-16 10:06:57 +00:00
|
|
|
sent.add((id: sendPeer.id, fut: sendPeer.send(RPCMsg(messages: msgs))))
|
2020-07-08 00:33:05 +00:00
|
|
|
|
|
|
|
var published: seq[string]
|
|
|
|
var failed: seq[string]
|
|
|
|
let futs = await allFinished(sent.mapIt(it.fut))
|
|
|
|
for s in futs:
|
|
|
|
let f = sent.filterIt(it.fut == s)
|
|
|
|
if f.len > 0:
|
|
|
|
if s.failed:
|
|
|
|
trace "sending messages to peer failed", peer = f[0].id
|
|
|
|
failed.add(f[0].id)
|
|
|
|
else:
|
|
|
|
trace "sending messages to peer succeeded", peer = f[0].id
|
|
|
|
published.add(f[0].id)
|
|
|
|
|
|
|
|
return (published, failed)
|
|
|
|
|
2020-07-17 19:46:24 +00:00
|
|
|
proc publishHelper*(p: PubSub,
|
|
|
|
sendPeers: HashSet[PubSubPeer],
|
|
|
|
msgs: seq[Message]): Future[int] {.async.} =
|
|
|
|
# send messages and cleanup failed peers
|
|
|
|
let (published, failed) = await p.sendHelper(sendPeers, msgs)
|
|
|
|
for f in failed:
|
|
|
|
let peer = p.peers.getOrDefault(f)
|
|
|
|
if not(isNil(peer)) and not(isNil(peer.conn)):
|
|
|
|
await peer.conn.close()
|
|
|
|
|
|
|
|
return published.len
|
|
|
|
|
2019-12-06 02:16:18 +00:00
|
|
|
method publish*(p: PubSub,
|
|
|
|
topic: string,
|
2020-07-08 00:33:05 +00:00
|
|
|
data: seq[byte]): Future[int] {.base, async.} =
|
2019-09-10 02:15:52 +00:00
|
|
|
## publish to a ``topic``
|
2019-10-03 22:22:49 +00:00
|
|
|
if p.triggerSelf and topic in p.topics:
|
|
|
|
for h in p.topics[topic].handler:
|
2019-12-23 18:45:12 +00:00
|
|
|
trace "triggering handler", topicID = topic
|
2020-05-15 03:56:56 +00:00
|
|
|
try:
|
|
|
|
await h(topic, data)
|
|
|
|
except CancelledError as exc:
|
|
|
|
raise exc
|
|
|
|
except CatchableError as exc:
|
|
|
|
# TODO these exceptions are ignored since it's likely that if writes are
|
|
|
|
# are failing, the underlying connection is already closed - this needs
|
|
|
|
# more cleanup though
|
|
|
|
debug "Could not write to pubsub connection", msg = exc.msg
|
2019-10-03 22:22:49 +00:00
|
|
|
|
2020-07-08 00:33:05 +00:00
|
|
|
return 0
|
|
|
|
|
2019-12-06 02:16:18 +00:00
|
|
|
method initPubSub*(p: PubSub) {.base.} =
|
2020-05-27 18:33:49 +00:00
|
|
|
## perform pubsub initialization
|
2020-04-30 13:22:31 +00:00
|
|
|
p.observers = new(seq[PubSubObserver])
|
2020-06-28 15:56:38 +00:00
|
|
|
if p.msgIdProvider == nil:
|
|
|
|
p.msgIdProvider = defaultMsgIdProvider
|
2019-12-06 02:16:18 +00:00
|
|
|
|
|
|
|
method start*(p: PubSub) {.async, base.} =
|
|
|
|
## start pubsub
|
|
|
|
discard
|
|
|
|
|
|
|
|
method stop*(p: PubSub) {.async, base.} =
|
|
|
|
## stopt pubsub
|
2019-09-10 02:15:52 +00:00
|
|
|
discard
|
2019-10-03 22:22:49 +00:00
|
|
|
|
2019-12-17 05:24:03 +00:00
|
|
|
method addValidator*(p: PubSub,
|
|
|
|
topic: varargs[string],
|
|
|
|
hook: ValidatorHandler) {.base.} =
|
|
|
|
for t in topic:
|
|
|
|
if t notin p.validators:
|
|
|
|
p.validators[t] = initHashSet[ValidatorHandler]()
|
|
|
|
|
|
|
|
trace "adding validator for topic", topicId = t
|
|
|
|
p.validators[t].incl(hook)
|
|
|
|
|
|
|
|
method removeValidator*(p: PubSub,
|
|
|
|
topic: varargs[string],
|
|
|
|
hook: ValidatorHandler) {.base.} =
|
|
|
|
for t in topic:
|
|
|
|
if t in p.validators:
|
|
|
|
p.validators[t].excl(hook)
|
|
|
|
|
|
|
|
method validate*(p: PubSub, message: Message): Future[bool] {.async, base.} =
|
|
|
|
var pending: seq[Future[bool]]
|
|
|
|
trace "about to validate message"
|
|
|
|
for topic in message.topicIDs:
|
|
|
|
trace "looking for validators on topic", topicID = topic,
|
|
|
|
registered = toSeq(p.validators.keys)
|
|
|
|
if topic in p.validators:
|
|
|
|
trace "running validators for topic", topicID = topic
|
|
|
|
# TODO: add timeout to validator
|
|
|
|
pending.add(p.validators[topic].mapIt(it(topic, message)))
|
|
|
|
|
2020-04-11 04:08:25 +00:00
|
|
|
let futs = await allFinished(pending)
|
|
|
|
result = futs.allIt(not it.failed and it.read())
|
2020-06-07 07:15:21 +00:00
|
|
|
if result:
|
|
|
|
libp2p_pubsub_validation_success.inc()
|
|
|
|
else:
|
|
|
|
libp2p_pubsub_validation_failure.inc()
|
2019-12-17 05:24:03 +00:00
|
|
|
|
2020-05-06 09:26:08 +00:00
|
|
|
proc newPubSub*(P: typedesc[PubSub],
|
2019-12-06 02:16:18 +00:00
|
|
|
peerInfo: PeerInfo,
|
2020-05-06 09:26:08 +00:00
|
|
|
triggerSelf: bool = false,
|
|
|
|
verifySignature: bool = true,
|
2020-06-28 15:56:38 +00:00
|
|
|
sign: bool = true,
|
|
|
|
msgIdProvider: MsgIdProvider = defaultMsgIdProvider): P =
|
2020-05-06 09:26:08 +00:00
|
|
|
result = P(peerInfo: peerInfo,
|
|
|
|
triggerSelf: triggerSelf,
|
|
|
|
verifySignature: verifySignature,
|
|
|
|
sign: sign,
|
2020-06-28 15:56:38 +00:00
|
|
|
cleanupLock: newAsyncLock(),
|
|
|
|
msgIdProvider: msgIdProvider)
|
2019-10-03 22:22:49 +00:00
|
|
|
result.initPubSub()
|
2020-04-30 13:22:31 +00:00
|
|
|
|
2020-06-29 15:15:31 +00:00
|
|
|
proc addObserver*(p: PubSub; observer: PubSubObserver) =
|
|
|
|
p.observers[] &= observer
|
2020-04-30 13:22:31 +00:00
|
|
|
|
|
|
|
proc removeObserver*(p: PubSub; observer: PubSubObserver) =
|
|
|
|
let idx = p.observers[].find(observer)
|
|
|
|
if idx != -1:
|
|
|
|
p.observers[].del(idx)
|