mirror of https://github.com/vacp2p/nim-libp2p.git
topicIDs -> topicIds; PubsubPeerEvent -> PubSubPeerEvent; messageIDs -> messageIds (#748)
This commit is contained in:
parent
4ca1c2d7ed
commit
1e598a0239
|
@ -139,7 +139,7 @@ method rpcHandler*(f: FloodSub,
|
|||
discard
|
||||
|
||||
var toSendPeers = initHashSet[PubSubPeer]()
|
||||
for t in msg.topicIDs: # for every topic in the message
|
||||
for t in msg.topicIds: # for every topic in the message
|
||||
if t notin f.topics:
|
||||
continue
|
||||
f.floodsub.withValue(t, peers): toSendPeers.incl(peers[])
|
||||
|
|
|
@ -158,7 +158,7 @@ method onNewPeer(g: GossipSub, peer: PubSubPeer) =
|
|||
peer.iWantBudget = IWantPeerBudget
|
||||
peer.iHaveBudget = IHavePeerBudget
|
||||
|
||||
method onPubSubPeerEvent*(p: GossipSub, peer: PubSubPeer, event: PubsubPeerEvent) {.gcsafe.} =
|
||||
method onPubSubPeerEvent*(p: GossipSub, peer: PubSubPeer, event: PubSubPeerEvent) {.gcsafe.} =
|
||||
case event.kind
|
||||
of PubSubPeerEventKind.Connected:
|
||||
discard
|
||||
|
@ -261,7 +261,7 @@ proc handleControl(g: GossipSub, peer: PubSubPeer, control: ControlMessage) =
|
|||
|
||||
var respControl: ControlMessage
|
||||
let iwant = g.handleIHave(peer, control.ihave)
|
||||
if iwant.messageIDs.len > 0:
|
||||
if iwant.messageIds.len > 0:
|
||||
respControl.iwant.add(iwant)
|
||||
respControl.prune.add(g.handleGraft(peer, control.graft))
|
||||
let messages = g.handleIWant(peer, control.iwant)
|
||||
|
@ -273,7 +273,7 @@ proc handleControl(g: GossipSub, peer: PubSubPeer, control: ControlMessage) =
|
|||
# iwant and prunes from here, also messages
|
||||
|
||||
for smsg in messages:
|
||||
for topic in smsg.topicIDs:
|
||||
for topic in smsg.topicIds:
|
||||
if g.knownTopics.contains(topic):
|
||||
libp2p_pubsub_broadcast_messages.inc(labelValues = [topic])
|
||||
else:
|
||||
|
@ -307,7 +307,7 @@ proc validateAndRelay(g: GossipSub,
|
|||
of ValidationResult.Reject:
|
||||
debug "Dropping message after validation, reason: reject",
|
||||
msgId = shortLog(msgId), peer
|
||||
g.punishInvalidMessage(peer, msg.topicIDs)
|
||||
g.punishInvalidMessage(peer, msg.topicIds)
|
||||
return
|
||||
of ValidationResult.Ignore:
|
||||
debug "Dropping message after validation, reason: ignore",
|
||||
|
@ -319,10 +319,10 @@ proc validateAndRelay(g: GossipSub,
|
|||
# store in cache only after validation
|
||||
g.mcache.put(msgId, msg)
|
||||
|
||||
g.rewardDelivered(peer, msg.topicIDs, true)
|
||||
g.rewardDelivered(peer, msg.topicIds, true)
|
||||
|
||||
var toSendPeers = HashSet[PubSubPeer]()
|
||||
for t in msg.topicIDs: # for every topic in the message
|
||||
for t in msg.topicIds: # for every topic in the message
|
||||
if t notin g.topics:
|
||||
continue
|
||||
|
||||
|
@ -338,7 +338,7 @@ proc validateAndRelay(g: GossipSub,
|
|||
# also have to be careful to only include validated messages
|
||||
g.broadcast(toSendPeers, RPCMsg(messages: @[msg]))
|
||||
trace "forwared message to peers", peers = toSendPeers.len, msgId, peer
|
||||
for topic in msg.topicIDs:
|
||||
for topic in msg.topicIds:
|
||||
if topic notin g.topics: continue
|
||||
|
||||
if g.knownTopics.contains(topic):
|
||||
|
@ -392,7 +392,7 @@ method rpcHandler*(g: GossipSub,
|
|||
|
||||
if not alreadyReceived:
|
||||
let delay = Moment.now() - g.firstSeen(msgId)
|
||||
g.rewardDelivered(peer, msg.topicIDs, false, delay)
|
||||
g.rewardDelivered(peer, msg.topicIds, false, delay)
|
||||
|
||||
libp2p_gossipsub_duplicate.inc()
|
||||
|
||||
|
@ -402,7 +402,7 @@ method rpcHandler*(g: GossipSub,
|
|||
libp2p_gossipsub_received.inc()
|
||||
|
||||
# avoid processing messages we are not interested in
|
||||
if msg.topicIDs.allIt(it notin g.topics):
|
||||
if msg.topicIds.allIt(it notin g.topics):
|
||||
debug "Dropping message of topic without subscription", msgId = shortLog(msgId), peer
|
||||
continue
|
||||
|
||||
|
@ -410,14 +410,14 @@ method rpcHandler*(g: GossipSub,
|
|||
# always validate if signature is present or required
|
||||
debug "Dropping message due to failed signature verification",
|
||||
msgId = shortLog(msgId), peer
|
||||
g.punishInvalidMessage(peer, msg.topicIDs)
|
||||
g.punishInvalidMessage(peer, msg.topicIds)
|
||||
continue
|
||||
|
||||
if msg.seqno.len > 0 and msg.seqno.len != 8:
|
||||
# if we have seqno should be 8 bytes long
|
||||
debug "Dropping message due to invalid seqno length",
|
||||
msgId = shortLog(msgId), peer
|
||||
g.punishInvalidMessage(peer, msg.topicIDs)
|
||||
g.punishInvalidMessage(peer, msg.topicIds)
|
||||
continue
|
||||
|
||||
# g.anonymize needs no evaluation when receiving messages
|
||||
|
|
|
@ -246,21 +246,21 @@ proc handleIHave*(g: GossipSub,
|
|||
let deIhaves = ihaves.deduplicate()
|
||||
for ihave in deIhaves:
|
||||
trace "peer sent ihave",
|
||||
peer, topic = ihave.topicId, msgs = ihave.messageIDs
|
||||
peer, topic = ihave.topicId, msgs = ihave.messageIds
|
||||
if ihave.topicId in g.mesh:
|
||||
# also avoid duplicates here!
|
||||
let deIhavesMsgs = ihave.messageIDs.deduplicate()
|
||||
let deIhavesMsgs = ihave.messageIds.deduplicate()
|
||||
for msgId in deIhavesMsgs:
|
||||
if not g.hasSeen(msgId):
|
||||
if peer.iHaveBudget > 0:
|
||||
res.messageIDs.add(msgId)
|
||||
res.messageIds.add(msgId)
|
||||
dec peer.iHaveBudget
|
||||
trace "requested message via ihave", messageID=msgId
|
||||
else:
|
||||
break
|
||||
# shuffling res.messageIDs before sending it out to increase the likelihood
|
||||
# of getting an answer if the peer truncates the list due to internal size restrictions.
|
||||
g.rng.shuffle(res.messageIDs)
|
||||
g.rng.shuffle(res.messageIds)
|
||||
return res
|
||||
|
||||
proc handleIWant*(g: GossipSub,
|
||||
|
@ -274,7 +274,7 @@ proc handleIWant*(g: GossipSub,
|
|||
else:
|
||||
let deIwants = iwants.deduplicate()
|
||||
for iwant in deIwants:
|
||||
let deIwantsMsgs = iwant.messageIDs.deduplicate()
|
||||
let deIwantsMsgs = iwant.messageIds.deduplicate()
|
||||
for mid in deIwantsMsgs:
|
||||
trace "peer sent iwant", peer, messageID = mid
|
||||
let msg = g.mcache.get(mid)
|
||||
|
|
|
@ -17,7 +17,7 @@ export sets, tables, messages, options
|
|||
type
|
||||
CacheEntry* = object
|
||||
mid*: MessageId
|
||||
topicIDs*: seq[string]
|
||||
topicIds*: seq[string]
|
||||
|
||||
MCache* = object of RootObj
|
||||
msgs*: Table[MessageId, Message]
|
||||
|
@ -37,7 +37,7 @@ func contains*(c: MCache, mid: MessageId): bool =
|
|||
func put*(c: var MCache, msgId: MessageId, msg: Message) =
|
||||
if not c.msgs.hasKeyOrPut(msgId, msg):
|
||||
# Only add cache entry if the message was not already in the cache
|
||||
c.history[0].add(CacheEntry(mid: msgId, topicIDs: msg.topicIDs))
|
||||
c.history[0].add(CacheEntry(mid: msgId, topicIds: msg.topicIds))
|
||||
|
||||
func window*(c: MCache, topic: string): HashSet[MessageId] =
|
||||
let
|
||||
|
@ -45,7 +45,7 @@ func window*(c: MCache, topic: string): HashSet[MessageId] =
|
|||
|
||||
for i in 0..<len:
|
||||
for entry in c.history[i]:
|
||||
for t in entry.topicIDs:
|
||||
for t in entry.topicIds:
|
||||
if t == topic:
|
||||
result.incl(entry.mid)
|
||||
break
|
||||
|
|
|
@ -163,7 +163,7 @@ proc broadcast*(
|
|||
libp2p_pubsub_broadcast_unsubscriptions.inc(npeers, labelValues = ["generic"])
|
||||
|
||||
for smsg in msg.messages:
|
||||
for topic in smsg.topicIDs:
|
||||
for topic in smsg.topicIds:
|
||||
if p.knownTopics.contains(topic):
|
||||
libp2p_pubsub_broadcast_messages.inc(npeers, labelValues = [topic])
|
||||
else:
|
||||
|
@ -236,8 +236,8 @@ proc updateMetrics*(p: PubSub, rpcMsg: RPCMsg) =
|
|||
|
||||
for i in 0..<rpcMsg.messages.len():
|
||||
template smsg: untyped = rpcMsg.messages[i]
|
||||
for j in 0..<smsg.topicIDs.len():
|
||||
template topic: untyped = smsg.topicIDs[j]
|
||||
for j in 0..<smsg.topicIds.len():
|
||||
template topic: untyped = smsg.topicIds[j]
|
||||
if p.knownTopics.contains(topic):
|
||||
libp2p_pubsub_received_messages.inc(labelValues = [topic])
|
||||
else:
|
||||
|
@ -270,7 +270,7 @@ method rpcHandler*(p: PubSub,
|
|||
|
||||
method onNewPeer(p: PubSub, peer: PubSubPeer) {.base.} = discard
|
||||
|
||||
method onPubSubPeerEvent*(p: PubSub, peer: PubSubPeer, event: PubsubPeerEvent) {.base, gcsafe.} =
|
||||
method onPubSubPeerEvent*(p: PubSub, peer: PubSubPeer, event: PubSubPeerEvent) {.base, gcsafe.} =
|
||||
# Peer event is raised for the send connection in particular
|
||||
case event.kind
|
||||
of PubSubPeerEventKind.Connected:
|
||||
|
@ -297,7 +297,7 @@ proc getOrCreatePeer*(
|
|||
trace "Failed to close connection", peer, error = exc.name, msg = exc.msg
|
||||
asyncSpawn dropConnAsync(peer)
|
||||
|
||||
proc onEvent(peer: PubSubPeer, event: PubsubPeerEvent) {.gcsafe.} =
|
||||
proc onEvent(peer: PubSubPeer, event: PubSubPeerEvent) {.gcsafe.} =
|
||||
p.onPubSubPeerEvent(peer, event)
|
||||
|
||||
# create new pubsub peer
|
||||
|
@ -520,11 +520,11 @@ method removeValidator*(p: PubSub,
|
|||
method validate*(p: PubSub, message: Message): Future[ValidationResult] {.async, base.} =
|
||||
var pending: seq[Future[ValidationResult]]
|
||||
trace "about to validate message"
|
||||
for topic in message.topicIDs:
|
||||
trace "looking for validators on topic", topicID = topic,
|
||||
for topic in message.topicIds:
|
||||
trace "looking for validators on topic", topicId = topic,
|
||||
registered = toSeq(p.validators.keys)
|
||||
if topic in p.validators:
|
||||
trace "running validators for topic", topicID = topic
|
||||
trace "running validators for topic", topicId = topic
|
||||
for validator in p.validators[topic]:
|
||||
pending.add(validator(topic, message))
|
||||
|
||||
|
|
|
@ -39,12 +39,12 @@ type
|
|||
Connected
|
||||
Disconnected
|
||||
|
||||
PubsubPeerEvent* = object
|
||||
PubSubPeerEvent* = object
|
||||
kind*: PubSubPeerEventKind
|
||||
|
||||
GetConn* = proc(): Future[Connection] {.gcsafe, raises: [Defect].}
|
||||
DropConn* = proc(peer: PubSubPeer) {.gcsafe, raises: [Defect].} # have to pass peer as it's unknown during init
|
||||
OnEvent* = proc(peer: PubSubPeer, event: PubsubPeerEvent) {.gcsafe, raises: [Defect].}
|
||||
OnEvent* = proc(peer: PubSubPeer, event: PubSubPeerEvent) {.gcsafe, raises: [Defect].}
|
||||
|
||||
PubSubPeer* = ref object of RootObj
|
||||
getConn*: GetConn # callback to establish a new send connection
|
||||
|
@ -175,7 +175,7 @@ proc connectOnce(p: PubSubPeer): Future[void] {.async.} =
|
|||
p.address = some(p.sendConn.observedAddr)
|
||||
|
||||
if p.onEvent != nil:
|
||||
p.onEvent(p, PubsubPeerEvent(kind: PubSubPeerEventKind.Connected))
|
||||
p.onEvent(p, PubSubPeerEvent(kind: PubSubPeerEventKind.Connected))
|
||||
|
||||
await handle(p, newConn)
|
||||
finally:
|
||||
|
@ -186,7 +186,7 @@ proc connectOnce(p: PubSubPeer): Future[void] {.async.} =
|
|||
|
||||
try:
|
||||
if p.onEvent != nil:
|
||||
p.onEvent(p, PubsubPeerEvent(kind: PubSubPeerEventKind.Disconnected))
|
||||
p.onEvent(p, PubSubPeerEvent(kind: PubSubPeerEventKind.Disconnected))
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
|
|
|
@ -33,7 +33,7 @@ type
|
|||
fromPeer*: PeerId
|
||||
data*: seq[byte]
|
||||
seqno*: seq[byte]
|
||||
topicIDs*: seq[string]
|
||||
topicIds*: seq[string]
|
||||
signature*: seq[byte]
|
||||
key*: seq[byte]
|
||||
|
||||
|
@ -45,10 +45,10 @@ type
|
|||
|
||||
ControlIHave* = object
|
||||
topicId*: string
|
||||
messageIDs*: seq[MessageId]
|
||||
messageIds*: seq[MessageId]
|
||||
|
||||
ControlIWant* = object
|
||||
messageIDs*: seq[MessageId]
|
||||
messageIds*: seq[MessageId]
|
||||
|
||||
ControlGraft* = object
|
||||
topicId*: string
|
||||
|
@ -71,12 +71,12 @@ func withSubs*(
|
|||
func shortLog*(s: ControlIHave): auto =
|
||||
(
|
||||
topicId: s.topicId.shortLog,
|
||||
messageIDs: mapIt(s.messageIDs, it.shortLog)
|
||||
messageIds: mapIt(s.messageIds, it.shortLog)
|
||||
)
|
||||
|
||||
func shortLog*(s: ControlIWant): auto =
|
||||
(
|
||||
messageIDs: mapIt(s.messageIDs, it.shortLog)
|
||||
messageIds: mapIt(s.messageIds, it.shortLog)
|
||||
)
|
||||
|
||||
func shortLog*(s: ControlGraft): auto =
|
||||
|
@ -102,7 +102,7 @@ func shortLog*(msg: Message): auto =
|
|||
fromPeer: msg.fromPeer.shortLog,
|
||||
data: msg.data.shortLog,
|
||||
seqno: msg.seqno.shortLog,
|
||||
topicIDs: $msg.topicIDs,
|
||||
topicIds: $msg.topicIds,
|
||||
signature: msg.signature.shortLog,
|
||||
key: msg.key.shortLog
|
||||
)
|
||||
|
|
|
@ -59,7 +59,7 @@ proc write*(pb: var ProtoBuffer, field: int, prune: ControlPrune) =
|
|||
proc write*(pb: var ProtoBuffer, field: int, ihave: ControlIHave) =
|
||||
var ipb = initProtoBuffer()
|
||||
ipb.write(1, ihave.topicId)
|
||||
for mid in ihave.messageIDs:
|
||||
for mid in ihave.messageIds:
|
||||
ipb.write(2, mid)
|
||||
ipb.finish()
|
||||
pb.write(field, ipb)
|
||||
|
@ -69,7 +69,7 @@ proc write*(pb: var ProtoBuffer, field: int, ihave: ControlIHave) =
|
|||
|
||||
proc write*(pb: var ProtoBuffer, field: int, iwant: ControlIWant) =
|
||||
var ipb = initProtoBuffer()
|
||||
for mid in iwant.messageIDs:
|
||||
for mid in iwant.messageIds:
|
||||
ipb.write(1, mid)
|
||||
if len(ipb.buffer) > 0:
|
||||
ipb.finish()
|
||||
|
@ -109,7 +109,7 @@ proc encodeMessage*(msg: Message, anonymize: bool): seq[byte] =
|
|||
pb.write(2, msg.data)
|
||||
if len(msg.seqno) > 0 and not anonymize:
|
||||
pb.write(3, msg.seqno)
|
||||
for topic in msg.topicIDs:
|
||||
for topic in msg.topicIds:
|
||||
pb.write(4, topic)
|
||||
if len(msg.signature) > 0 and not anonymize:
|
||||
pb.write(5, msg.signature)
|
||||
|
@ -182,8 +182,8 @@ proc decodeIHave*(pb: ProtoBuffer): ProtoResult[ControlIHave] {.
|
|||
trace "decodeIHave: read topicId", topic_id = control.topicId
|
||||
else:
|
||||
trace "decodeIHave: topicId is missing"
|
||||
if ? pb.getRepeatedField(2, control.messageIDs):
|
||||
trace "decodeIHave: read messageIDs", message_ids = control.messageIDs
|
||||
if ? pb.getRepeatedField(2, control.messageIds):
|
||||
trace "decodeIHave: read messageIDs", message_ids = control.messageIds
|
||||
else:
|
||||
trace "decodeIHave: no messageIDs"
|
||||
ok(control)
|
||||
|
@ -194,8 +194,8 @@ proc decodeIWant*(pb: ProtoBuffer): ProtoResult[ControlIWant] {.inline.} =
|
|||
|
||||
trace "decodeIWant: decoding message"
|
||||
var control = ControlIWant()
|
||||
if ? pb.getRepeatedField(1, control.messageIDs):
|
||||
trace "decodeIWant: read messageIDs", message_ids = control.messageIDs
|
||||
if ? pb.getRepeatedField(1, control.messageIds):
|
||||
trace "decodeIWant: read messageIDs", message_ids = control.messageIds
|
||||
else:
|
||||
trace "decodeIWant: no messageIDs"
|
||||
ok(control)
|
||||
|
@ -281,8 +281,8 @@ proc decodeMessage*(pb: ProtoBuffer): ProtoResult[Message] {.inline.} =
|
|||
trace "decodeMessage: read seqno", seqno = msg.seqno
|
||||
else:
|
||||
trace "decodeMessage: seqno is missing"
|
||||
if ? pb.getRepeatedField(4, msg.topicIDs):
|
||||
trace "decodeMessage: read topics", topic_ids = msg.topicIDs
|
||||
if ? pb.getRepeatedField(4, msg.topicIds):
|
||||
trace "decodeMessage: read topics", topic_ids = msg.topicIds
|
||||
else:
|
||||
trace "decodeMessage: topics are missing"
|
||||
if ? pb.getField(5, msg.signature):
|
||||
|
|
|
@ -690,7 +690,7 @@ suite "GossipSub internal":
|
|||
)
|
||||
peer.iHaveBudget = 0
|
||||
let iwants = gossipSub.handleIHave(peer, @[msg])
|
||||
check: iwants.messageIDs.len == 0
|
||||
check: iwants.messageIds.len == 0
|
||||
|
||||
block:
|
||||
# given duplicate ihave should generate only one iwant
|
||||
|
@ -705,7 +705,7 @@ suite "GossipSub internal":
|
|||
messageIDs: @[id, id, id]
|
||||
)
|
||||
let iwants = gossipSub.handleIHave(peer, @[msg])
|
||||
check: iwants.messageIDs.len == 1
|
||||
check: iwants.messageIds.len == 1
|
||||
|
||||
block:
|
||||
# given duplicate iwant should generate only one message
|
||||
|
|
|
@ -42,7 +42,7 @@ suite "MCache":
|
|||
check mids.len == 3
|
||||
|
||||
var id = toSeq(mids)[0]
|
||||
check mCache.get(id).get().topicIDs[0] == "foo"
|
||||
check mCache.get(id).get().topicIds[0] == "foo"
|
||||
|
||||
test "shift - shift 1 window at a time":
|
||||
var mCache = MCache.init(1, 5)
|
||||
|
|
|
@ -26,7 +26,7 @@ func defaultMsgIdProvider*(m: Message): Result[MessageId, ValidationResult] =
|
|||
# This part is irrelevant because it's not standard,
|
||||
# We use it exclusively for testing basically and users should
|
||||
# implement their own logic in the case they use anonymization
|
||||
$m.data.hash & $m.topicIDs.hash
|
||||
$m.data.hash & $m.topicIds.hash
|
||||
ok mid.toBytes()
|
||||
|
||||
proc generateNodes*(
|
||||
|
|
Loading…
Reference in New Issue