2022-07-01 18:19:57 +00:00
|
|
|
# Nim-LibP2P
|
2023-01-20 14:47:40 +00:00
|
|
|
# Copyright (c) 2023 Status Research & Development GmbH
|
2022-07-01 18:19:57 +00:00
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
|
|
|
# at your option.
|
|
|
|
# This file may not be copied, modified, or distributed except according to
|
|
|
|
# those terms.
|
2019-09-10 02:14:24 +00:00
|
|
|
|
2023-06-07 11:12:49 +00:00
|
|
|
{.push raises: [].}
|
2021-05-21 16:27:01 +00:00
|
|
|
|
2023-03-06 15:36:10 +00:00
|
|
|
import std/[sets, hashes, tables]
|
2022-06-16 08:08:52 +00:00
|
|
|
import chronos, chronicles, metrics
|
2020-09-04 06:10:32 +00:00
|
|
|
import ./pubsub,
|
|
|
|
./pubsubpeer,
|
|
|
|
./timedcache,
|
|
|
|
./peertable,
|
|
|
|
./rpc/[message, messages],
|
2021-10-25 08:26:32 +00:00
|
|
|
../../crypto/crypto,
|
2020-06-19 17:29:43 +00:00
|
|
|
../../stream/connection,
|
2020-07-01 06:25:09 +00:00
|
|
|
../../peerid,
|
2020-09-04 06:10:32 +00:00
|
|
|
../../peerinfo,
|
|
|
|
../../utility
|
2019-09-10 02:14:24 +00:00
|
|
|
|
2022-07-01 18:19:57 +00:00
|
|
|
## Simple flood-based publishing.
|
|
|
|
|
2019-09-10 02:14:24 +00:00
|
|
|
logScope:
|
2020-12-01 17:34:27 +00:00
|
|
|
topics = "libp2p floodsub"
|
2019-09-10 02:14:24 +00:00
|
|
|
|
|
|
|
const FloodSubCodec* = "/floodsub/1.0.0"
|
|
|
|
|
|
|
|
type
|
2022-07-01 18:19:57 +00:00
|
|
|
FloodSub* {.public.} = ref object of PubSub
|
2020-07-15 19:18:55 +00:00
|
|
|
floodsub*: PeerTable # topic to remote peer map
|
2022-07-27 11:47:50 +00:00
|
|
|
seen*: TimedCache[MessageId] # message id:s already seen on the network
|
2021-04-18 08:08:33 +00:00
|
|
|
seenSalt*: seq[byte]
|
|
|
|
|
2022-07-27 11:47:50 +00:00
|
|
|
proc hasSeen*(f: FloodSub, msgId: MessageId): bool =
|
2021-04-18 08:08:33 +00:00
|
|
|
f.seenSalt & msgId in f.seen
|
|
|
|
|
2022-07-27 11:47:50 +00:00
|
|
|
proc addSeen*(f: FloodSub, msgId: MessageId): bool =
|
2021-04-18 08:08:33 +00:00
|
|
|
# Salting the seen hash helps avoid attacks against the hash function used
|
|
|
|
# in the nim hash table
|
|
|
|
# Return true if the message has already been seen
|
|
|
|
f.seen.put(f.seenSalt & msgId)
|
2019-12-06 02:16:18 +00:00
|
|
|
|
2022-07-27 11:47:50 +00:00
|
|
|
proc firstSeen*(f: FloodSub, msgId: MessageId): Moment =
|
2022-05-11 08:38:43 +00:00
|
|
|
f.seen.addedAt(f.seenSalt & msgId)
|
|
|
|
|
2021-05-06 22:43:45 +00:00
|
|
|
proc handleSubscribe*(f: FloodSub,
|
2022-07-27 11:47:50 +00:00
|
|
|
peer: PubSubPeer,
|
2021-05-06 22:43:45 +00:00
|
|
|
topic: string,
|
|
|
|
subscribe: bool) =
|
2020-12-15 18:32:11 +00:00
|
|
|
logScope:
|
|
|
|
peer
|
|
|
|
topic
|
|
|
|
|
2020-12-19 14:43:32 +00:00
|
|
|
# this is a workaround for a race condition
|
2020-12-15 18:32:11 +00:00
|
|
|
# that can happen if we disconnect the peer very early
|
2020-12-19 14:43:32 +00:00
|
|
|
# in the future we might use this as a test case
|
2020-12-15 18:32:11 +00:00
|
|
|
# and eventually remove this workaround
|
|
|
|
if subscribe and peer.peerId notin f.peers:
|
|
|
|
trace "ignoring unknown peer"
|
|
|
|
return
|
|
|
|
|
2021-01-13 14:49:44 +00:00
|
|
|
if subscribe and not(isNil(f.subscriptionValidator)) and not(f.subscriptionValidator(topic)):
|
|
|
|
# this is a violation, so warn should be in order
|
|
|
|
warn "ignoring invalid topic subscription", topic, peer
|
|
|
|
return
|
2019-12-10 20:50:35 +00:00
|
|
|
|
|
|
|
if subscribe:
|
2020-09-06 08:31:47 +00:00
|
|
|
trace "adding subscription for topic", peer, topic
|
2021-01-13 14:49:44 +00:00
|
|
|
|
2019-12-10 20:50:35 +00:00
|
|
|
# subscribe the peer to the topic
|
2021-05-06 22:43:45 +00:00
|
|
|
f.floodsub.mgetOrPut(topic, HashSet[PubSubPeer]()).incl(peer)
|
2019-12-10 20:50:35 +00:00
|
|
|
else:
|
2021-05-06 22:43:45 +00:00
|
|
|
f.floodsub.withValue(topic, peers):
|
|
|
|
trace "removing subscription for topic", peer, topic
|
2021-01-13 14:49:44 +00:00
|
|
|
|
2021-05-06 22:43:45 +00:00
|
|
|
# unsubscribe the peer from the topic
|
|
|
|
peers[].excl(peer)
|
2019-12-10 20:50:35 +00:00
|
|
|
|
2021-12-16 10:05:20 +00:00
|
|
|
method unsubscribePeer*(f: FloodSub, peer: PeerId) =
|
2019-12-06 02:16:18 +00:00
|
|
|
## handle peer disconnects
|
2020-08-06 02:12:52 +00:00
|
|
|
##
|
2020-09-06 08:31:47 +00:00
|
|
|
trace "unsubscribing floodsub peer", peer
|
2020-08-12 00:05:49 +00:00
|
|
|
let pubSubPeer = f.peers.getOrDefault(peer)
|
|
|
|
if pubSubPeer.isNil:
|
|
|
|
return
|
|
|
|
|
2020-09-04 06:10:32 +00:00
|
|
|
for _, v in f.floodsub.mpairs():
|
|
|
|
v.excl(pubSubPeer)
|
2020-08-12 00:05:49 +00:00
|
|
|
|
|
|
|
procCall PubSub(f).unsubscribePeer(peer)
|
2020-08-06 02:12:52 +00:00
|
|
|
|
2019-12-06 02:16:18 +00:00
|
|
|
method rpcHandler*(f: FloodSub,
|
|
|
|
peer: PubSubPeer,
|
2020-09-01 07:33:03 +00:00
|
|
|
rpcMsg: RPCMsg) {.async.} =
|
2021-05-06 22:43:45 +00:00
|
|
|
for i in 0..<min(f.topicsHigh, rpcMsg.subscriptions.len):
|
|
|
|
template sub: untyped = rpcMsg.subscriptions[i]
|
|
|
|
f.handleSubscribe(peer, sub.topic, sub.subscribe)
|
2020-09-01 07:33:03 +00:00
|
|
|
|
2020-09-04 06:10:32 +00:00
|
|
|
for msg in rpcMsg.messages: # for every message
|
2022-02-21 15:04:17 +00:00
|
|
|
let msgIdResult = f.msgIdProvider(msg)
|
|
|
|
if msgIdResult.isErr:
|
|
|
|
debug "Dropping message due to failed message id generation",
|
|
|
|
error = msgIdResult.error
|
|
|
|
# TODO: descore peers due to error during message validation (malicious?)
|
|
|
|
continue
|
|
|
|
|
|
|
|
let msgId = msgIdResult.get
|
2020-09-01 07:33:03 +00:00
|
|
|
|
2021-04-18 08:08:33 +00:00
|
|
|
if f.addSeen(msgId):
|
2020-09-06 08:31:47 +00:00
|
|
|
trace "Dropping already-seen message", msgId, peer
|
2020-09-04 06:10:32 +00:00
|
|
|
continue
|
2020-09-01 07:33:03 +00:00
|
|
|
|
2020-09-23 15:56:33 +00:00
|
|
|
if (msg.signature.len > 0 or f.verifySignature) and not msg.verify():
|
|
|
|
# always validate if signature is present or required
|
2020-09-06 08:31:47 +00:00
|
|
|
debug "Dropping message due to failed signature verification", msgId, peer
|
2020-09-04 06:10:32 +00:00
|
|
|
continue
|
2020-09-01 07:33:03 +00:00
|
|
|
|
2020-09-23 15:56:33 +00:00
|
|
|
if msg.seqno.len > 0 and msg.seqno.len != 8:
|
|
|
|
# if we have seqno should be 8 bytes long
|
|
|
|
debug "Dropping message due to invalid seqno length", msgId, peer
|
|
|
|
continue
|
|
|
|
|
|
|
|
# g.anonymize needs no evaluation when receiving messages
|
|
|
|
# as we have a "lax" policy and allow signed messages
|
|
|
|
|
2020-10-21 03:25:42 +00:00
|
|
|
let validation = await f.validate(msg)
|
|
|
|
case validation
|
2020-11-11 16:42:12 +00:00
|
|
|
of ValidationResult.Reject:
|
|
|
|
debug "Dropping message after validation, reason: reject", msgId, peer
|
|
|
|
continue
|
|
|
|
of ValidationResult.Ignore:
|
|
|
|
debug "Dropping message after validation, reason: ignore", msgId, peer
|
2020-09-04 06:10:32 +00:00
|
|
|
continue
|
2020-10-21 03:25:42 +00:00
|
|
|
of ValidationResult.Accept:
|
|
|
|
discard
|
2020-09-01 07:33:03 +00:00
|
|
|
|
2020-09-04 06:10:32 +00:00
|
|
|
var toSendPeers = initHashSet[PubSubPeer]()
|
2022-07-27 17:14:05 +00:00
|
|
|
for t in msg.topicIds: # for every topic in the message
|
2021-04-18 08:08:33 +00:00
|
|
|
if t notin f.topics:
|
|
|
|
continue
|
2020-09-04 06:10:32 +00:00
|
|
|
f.floodsub.withValue(t, peers): toSendPeers.incl(peers[])
|
2020-09-01 07:33:03 +00:00
|
|
|
|
2020-09-04 06:10:32 +00:00
|
|
|
await handleData(f, t, msg.data)
|
2020-09-01 07:33:03 +00:00
|
|
|
|
2020-09-04 06:10:32 +00:00
|
|
|
# In theory, if topics are the same in all messages, we could batch - we'd
|
|
|
|
# also have to be careful to only include validated messages
|
2021-04-18 08:08:33 +00:00
|
|
|
f.broadcast(toSendPeers, RPCMsg(messages: @[msg]))
|
2020-09-04 06:10:32 +00:00
|
|
|
trace "Forwared message to peers", peers = toSendPeers.len
|
2019-09-10 02:15:52 +00:00
|
|
|
|
2021-05-06 22:43:45 +00:00
|
|
|
f.updateMetrics(rpcMsg)
|
|
|
|
|
2020-06-12 23:54:12 +00:00
|
|
|
method init*(f: FloodSub) =
|
2019-12-17 05:24:03 +00:00
|
|
|
proc handler(conn: Connection, proto: string) {.async.} =
|
2019-09-12 10:08:11 +00:00
|
|
|
## main protocol handler that gets triggered on every
|
2019-09-10 02:15:52 +00:00
|
|
|
## connection for a protocol string
|
|
|
|
## e.g. ``/floodsub/1.0.0``, etc...
|
|
|
|
##
|
2020-09-04 16:30:45 +00:00
|
|
|
try:
|
|
|
|
await f.handleConn(conn, proto)
|
|
|
|
except CancelledError:
|
|
|
|
# This is top-level procedure which will work as separate task, so it
|
2020-11-23 21:02:23 +00:00
|
|
|
# do not need to propagate CancelledError.
|
2020-09-06 08:31:47 +00:00
|
|
|
trace "Unexpected cancellation in floodsub handler", conn
|
2020-09-04 16:30:45 +00:00
|
|
|
except CatchableError as exc:
|
2020-09-06 08:31:47 +00:00
|
|
|
trace "FloodSub handler leaks an error", exc = exc.msg, conn
|
2019-09-10 02:14:24 +00:00
|
|
|
|
|
|
|
f.handler = handler
|
2019-09-10 02:15:52 +00:00
|
|
|
f.codec = FloodSubCodec
|
|
|
|
|
|
|
|
method publish*(f: FloodSub,
|
|
|
|
topic: string,
|
2020-09-01 07:33:03 +00:00
|
|
|
data: seq[byte]): Future[int] {.async.} =
|
2020-07-08 00:33:05 +00:00
|
|
|
# base returns always 0
|
2020-09-01 07:33:03 +00:00
|
|
|
discard await procCall PubSub(f).publish(topic, data)
|
2019-10-03 22:22:49 +00:00
|
|
|
|
2020-09-06 08:31:47 +00:00
|
|
|
trace "Publishing message on topic", data = data.shortLog, topic
|
2020-09-04 06:10:32 +00:00
|
|
|
|
|
|
|
if topic.len <= 0: # data could be 0/empty
|
2020-09-06 08:31:47 +00:00
|
|
|
debug "Empty topic, skipping publish", topic
|
2020-09-01 07:33:03 +00:00
|
|
|
return 0
|
2019-12-06 02:16:18 +00:00
|
|
|
|
2021-04-18 08:08:33 +00:00
|
|
|
let peers = f.floodsub.getOrDefault(topic)
|
2020-09-04 06:10:32 +00:00
|
|
|
|
|
|
|
if peers.len == 0:
|
2020-09-06 08:31:47 +00:00
|
|
|
debug "No peers for topic, skipping publish", topic
|
2020-09-04 06:10:32 +00:00
|
|
|
return 0
|
2019-12-06 02:16:18 +00:00
|
|
|
|
2020-09-01 07:33:03 +00:00
|
|
|
let
|
2020-09-23 15:56:33 +00:00
|
|
|
msg =
|
|
|
|
if f.anonymize:
|
|
|
|
Message.init(none(PeerInfo), data, topic, none(uint64), false)
|
|
|
|
else:
|
2022-06-30 07:56:49 +00:00
|
|
|
inc f.msgSeqno
|
2020-09-23 15:56:33 +00:00
|
|
|
Message.init(some(f.peerInfo), data, topic, some(f.msgSeqno), f.sign)
|
2022-06-30 07:56:49 +00:00
|
|
|
msgId = f.msgIdProvider(msg).valueOr:
|
|
|
|
trace "Error generating message id, skipping publish",
|
|
|
|
error = error
|
|
|
|
return 0
|
2020-07-17 19:46:24 +00:00
|
|
|
|
2020-09-06 08:31:47 +00:00
|
|
|
trace "Created new message",
|
|
|
|
msg = shortLog(msg), peers = peers.len, topic, msgId
|
2020-09-04 06:10:32 +00:00
|
|
|
|
2021-04-18 08:08:33 +00:00
|
|
|
if f.addSeen(msgId):
|
2020-09-04 06:10:32 +00:00
|
|
|
# custom msgid providers might cause this
|
2020-09-06 08:31:47 +00:00
|
|
|
trace "Dropping already-seen message", msgId, topic
|
2020-09-04 06:10:32 +00:00
|
|
|
return 0
|
|
|
|
|
|
|
|
# Try to send to all peers that are known to be interested
|
|
|
|
f.broadcast(peers, RPCMsg(messages: @[msg]))
|
2019-09-10 02:14:24 +00:00
|
|
|
|
2020-08-04 23:27:59 +00:00
|
|
|
when defined(libp2p_expensive_metrics):
|
|
|
|
libp2p_pubsub_messages_published.inc(labelValues = [topic])
|
2020-06-17 04:14:02 +00:00
|
|
|
|
2020-09-06 08:31:47 +00:00
|
|
|
trace "Published message to peers", msgId, topic
|
2020-09-04 06:10:32 +00:00
|
|
|
|
2020-09-01 07:33:03 +00:00
|
|
|
return peers.len
|
2020-07-08 00:33:05 +00:00
|
|
|
|
2021-05-21 16:27:01 +00:00
|
|
|
method initPubSub*(f: FloodSub)
|
2023-06-07 11:12:49 +00:00
|
|
|
{.raises: [InitializationError].} =
|
2020-04-30 13:22:31 +00:00
|
|
|
procCall PubSub(f).initPubSub()
|
2022-07-27 11:47:50 +00:00
|
|
|
f.seen = TimedCache[MessageId].init(2.minutes)
|
2021-04-18 08:08:33 +00:00
|
|
|
f.seenSalt = newSeqUninitialized[byte](sizeof(Hash))
|
2022-06-16 08:08:52 +00:00
|
|
|
hmacDrbgGenerate(f.rng[], f.seenSalt)
|
2021-04-18 08:08:33 +00:00
|
|
|
|
2019-10-03 22:22:49 +00:00
|
|
|
f.init()
|