--styleCheck:error (#743)
* --styleCheck:error * PeerID -> PeerId * other libp2p styleCheck changes * MessageID -> messageId; topicID -> topicId
This commit is contained in:
parent
83ad890535
commit
4ca1c2d7ed
|
@ -18,12 +18,18 @@ requires "nim >= 1.2.0",
|
|||
"stew#head",
|
||||
"websock"
|
||||
|
||||
const styleCheckStyle =
|
||||
if (NimMajor, NimMinor) < (1, 6):
|
||||
"hint"
|
||||
else:
|
||||
"error"
|
||||
|
||||
const nimflags =
|
||||
"--verbosity:0 --hints:off " &
|
||||
"--warning[CaseTransition]:off --warning[ObservableStores]:off " &
|
||||
"--warning[LockLevel]:off " &
|
||||
"-d:chronosStrictException " &
|
||||
"--styleCheck:usages --styleCheck:hint "
|
||||
"--styleCheck:usages --styleCheck:" & styleCheckStyle & " "
|
||||
|
||||
proc runTest(filename: string, verify: bool = true, sign: bool = true,
|
||||
moreoptions: string = "") =
|
||||
|
|
|
@ -55,9 +55,9 @@ type
|
|||
buffer*: seq[byte]
|
||||
|
||||
EcCurveKind* = enum
|
||||
Secp256r1 = EC_SECP256R1,
|
||||
Secp384r1 = EC_SECP384R1,
|
||||
Secp521r1 = EC_SECP521R1
|
||||
Secp256r1 = EC_secp256r1,
|
||||
Secp384r1 = EC_secp384r1,
|
||||
Secp521r1 = EC_secp521r1
|
||||
|
||||
EcPKI* = EcPrivateKey | EcPublicKey | EcSignature
|
||||
|
||||
|
@ -373,11 +373,11 @@ proc toBytes*(seckey: EcPrivateKey, data: var openArray[byte]): EcResult[int] =
|
|||
var p = Asn1Composite.init(Asn1Tag.Sequence)
|
||||
var c0 = Asn1Composite.init(0)
|
||||
var c1 = Asn1Composite.init(1)
|
||||
if seckey.key.curve == EC_SECP256R1:
|
||||
if seckey.key.curve == EC_secp256r1:
|
||||
c0.write(Asn1Tag.Oid, Asn1OidSecp256r1)
|
||||
elif seckey.key.curve == EC_SECP384R1:
|
||||
elif seckey.key.curve == EC_secp384r1:
|
||||
c0.write(Asn1Tag.Oid, Asn1OidSecp384r1)
|
||||
elif seckey.key.curve == EC_SECP521R1:
|
||||
elif seckey.key.curve == EC_secp521r1:
|
||||
c0.write(Asn1Tag.Oid, Asn1OidSecp521r1)
|
||||
c0.finish()
|
||||
offset = pubkey.getOffset()
|
||||
|
@ -421,11 +421,11 @@ proc toBytes*(pubkey: EcPublicKey, data: var openArray[byte]): EcResult[int] =
|
|||
var p = Asn1Composite.init(Asn1Tag.Sequence)
|
||||
var c = Asn1Composite.init(Asn1Tag.Sequence)
|
||||
c.write(Asn1Tag.Oid, Asn1OidEcPublicKey)
|
||||
if pubkey.key.curve == EC_SECP256R1:
|
||||
if pubkey.key.curve == EC_secp256r1:
|
||||
c.write(Asn1Tag.Oid, Asn1OidSecp256r1)
|
||||
elif pubkey.key.curve == EC_SECP384R1:
|
||||
elif pubkey.key.curve == EC_secp384r1:
|
||||
c.write(Asn1Tag.Oid, Asn1OidSecp384r1)
|
||||
elif pubkey.key.curve == EC_SECP521R1:
|
||||
elif pubkey.key.curve == EC_secp521r1:
|
||||
c.write(Asn1Tag.Oid, Asn1OidSecp521r1)
|
||||
c.finish()
|
||||
p.write(c)
|
||||
|
@ -913,11 +913,11 @@ proc toSecret*(pubkey: EcPublicKey, seckey: EcPrivateKey,
|
|||
doAssert((not isNil(pubkey)) and (not isNil(seckey)))
|
||||
var mult = scalarMul(pubkey, seckey)
|
||||
if not isNil(mult):
|
||||
if seckey.key.curve == EC_SECP256R1:
|
||||
if seckey.key.curve == EC_secp256r1:
|
||||
result = Secret256Length
|
||||
elif seckey.key.curve == EC_SECP384R1:
|
||||
elif seckey.key.curve == EC_secp384r1:
|
||||
result = Secret384Length
|
||||
elif seckey.key.curve == EC_SECP521R1:
|
||||
elif seckey.key.curve == EC_secp521r1:
|
||||
result = Secret521Length
|
||||
if len(data) >= result:
|
||||
var qplus1 = cast[pointer](cast[uint](mult.key.q) + 1'u)
|
||||
|
|
|
@ -1022,7 +1022,7 @@ proc dhtGetSinglePeerId(pb: ProtoBuffer): PeerId
|
|||
if pb.getRequiredField(3, result).isErr():
|
||||
raise newException(DaemonLocalError, "Missing field `value`!")
|
||||
|
||||
proc enterDhtMessage(pb: ProtoBuffer, rt: DHTResponseType): Protobuffer
|
||||
proc enterDhtMessage(pb: ProtoBuffer, rt: DHTResponseType): ProtoBuffer
|
||||
{.inline, raises: [Defect, DaemonLocalError].} =
|
||||
var dhtResponse: seq[byte]
|
||||
if pb.getRequiredField(ResponseType.DHT.int, dhtResponse).isOk():
|
||||
|
|
|
@ -66,7 +66,7 @@ proc new*(
|
|||
except CatchableError:
|
||||
raise newException(PeerInfoError, "invalid private key")
|
||||
|
||||
let peerId = PeerID.init(key).tryGet()
|
||||
let peerId = PeerId.init(key).tryGet()
|
||||
|
||||
let peerInfo = PeerInfo(
|
||||
peerId: peerId,
|
||||
|
|
|
@ -32,23 +32,23 @@ const FloodSubCodec* = "/floodsub/1.0.0"
|
|||
type
|
||||
FloodSub* {.public.} = ref object of PubSub
|
||||
floodsub*: PeerTable # topic to remote peer map
|
||||
seen*: TimedCache[MessageID] # message id:s already seen on the network
|
||||
seen*: TimedCache[MessageId] # message id:s already seen on the network
|
||||
seenSalt*: seq[byte]
|
||||
|
||||
proc hasSeen*(f: FloodSub, msgId: MessageID): bool =
|
||||
proc hasSeen*(f: FloodSub, msgId: MessageId): bool =
|
||||
f.seenSalt & msgId in f.seen
|
||||
|
||||
proc addSeen*(f: FloodSub, msgId: MessageID): bool =
|
||||
proc addSeen*(f: FloodSub, msgId: MessageId): bool =
|
||||
# Salting the seen hash helps avoid attacks against the hash function used
|
||||
# in the nim hash table
|
||||
# Return true if the message has already been seen
|
||||
f.seen.put(f.seenSalt & msgId)
|
||||
|
||||
proc firstSeen*(f: FloodSub, msgId: MessageID): Moment =
|
||||
proc firstSeen*(f: FloodSub, msgId: MessageId): Moment =
|
||||
f.seen.addedAt(f.seenSalt & msgId)
|
||||
|
||||
proc handleSubscribe*(f: FloodSub,
|
||||
peer: PubsubPeer,
|
||||
peer: PubSubPeer,
|
||||
topic: string,
|
||||
subscribe: bool) =
|
||||
logScope:
|
||||
|
@ -222,7 +222,7 @@ method publish*(f: FloodSub,
|
|||
method initPubSub*(f: FloodSub)
|
||||
{.raises: [Defect, InitializationError].} =
|
||||
procCall PubSub(f).initPubSub()
|
||||
f.seen = TimedCache[MessageID].init(2.minutes)
|
||||
f.seen = TimedCache[MessageId].init(2.minutes)
|
||||
f.seenSalt = newSeqUninitialized[byte](sizeof(Hash))
|
||||
hmacDrbgGenerate(f.rng[], f.seenSalt)
|
||||
|
||||
|
|
|
@ -158,7 +158,7 @@ method onNewPeer(g: GossipSub, peer: PubSubPeer) =
|
|||
peer.iWantBudget = IWantPeerBudget
|
||||
peer.iHaveBudget = IHavePeerBudget
|
||||
|
||||
method onPubSubPeerEvent*(p: GossipSub, peer: PubsubPeer, event: PubSubPeerEvent) {.gcsafe.} =
|
||||
method onPubSubPeerEvent*(p: GossipSub, peer: PubSubPeer, event: PubsubPeerEvent) {.gcsafe.} =
|
||||
case event.kind
|
||||
of PubSubPeerEventKind.Connected:
|
||||
discard
|
||||
|
@ -282,8 +282,8 @@ proc handleControl(g: GossipSub, peer: PubSubPeer, control: ControlMessage) =
|
|||
libp2p_pubsub_broadcast_iwant.inc(respControl.iwant.len.int64)
|
||||
|
||||
for prune in respControl.prune:
|
||||
if g.knownTopics.contains(prune.topicID):
|
||||
libp2p_pubsub_broadcast_prune.inc(labelValues = [prune.topicID])
|
||||
if g.knownTopics.contains(prune.topicId):
|
||||
libp2p_pubsub_broadcast_prune.inc(labelValues = [prune.topicId])
|
||||
else:
|
||||
libp2p_pubsub_broadcast_prune.inc(labelValues = ["generic"])
|
||||
|
||||
|
@ -624,7 +624,7 @@ method initPubSub*(g: GossipSub)
|
|||
raise newException(InitializationError, $validationRes.error)
|
||||
|
||||
# init the floodsub stuff here, we customize timedcache in gossip!
|
||||
g.seen = TimedCache[MessageID].init(g.parameters.seenTTL)
|
||||
g.seen = TimedCache[MessageId].init(g.parameters.seenTTL)
|
||||
|
||||
# init gossip stuff
|
||||
g.mcache = MCache.init(g.parameters.historyGossip, g.parameters.historyLength)
|
||||
|
|
|
@ -102,7 +102,7 @@ proc handleGraft*(g: GossipSub,
|
|||
grafts: seq[ControlGraft]): seq[ControlPrune] = # {.raises: [Defect].} TODO chronicles exception on windows
|
||||
var prunes: seq[ControlPrune]
|
||||
for graft in grafts:
|
||||
let topic = graft.topicID
|
||||
let topic = graft.topicId
|
||||
trace "peer grafted topic", peer, topic
|
||||
|
||||
# It is an error to GRAFT on a explicit peer
|
||||
|
@ -188,7 +188,7 @@ proc getPeers(prune: ControlPrune, peer: PubSubPeer): seq[(PeerId, Option[PeerRe
|
|||
trace "peer sent invalid SPR", peer, error=signedRecord.error
|
||||
none(PeerRecord)
|
||||
else:
|
||||
if record.peerID != signedRecord.get().data.peerId:
|
||||
if record.peerId != signedRecord.get().data.peerId:
|
||||
trace "peer sent envelope with wrong public key", peer
|
||||
none(PeerRecord)
|
||||
else:
|
||||
|
@ -201,7 +201,7 @@ proc getPeers(prune: ControlPrune, peer: PubSubPeer): seq[(PeerId, Option[PeerRe
|
|||
|
||||
proc handlePrune*(g: GossipSub, peer: PubSubPeer, prunes: seq[ControlPrune]) {.raises: [Defect].} =
|
||||
for prune in prunes:
|
||||
let topic = prune.topicID
|
||||
let topic = prune.topicId
|
||||
|
||||
trace "peer pruned topic", peer, topic
|
||||
|
||||
|
@ -246,8 +246,8 @@ proc handleIHave*(g: GossipSub,
|
|||
let deIhaves = ihaves.deduplicate()
|
||||
for ihave in deIhaves:
|
||||
trace "peer sent ihave",
|
||||
peer, topic = ihave.topicID, msgs = ihave.messageIDs
|
||||
if ihave.topicID in g.mesh:
|
||||
peer, topic = ihave.topicId, msgs = ihave.messageIDs
|
||||
if ihave.topicId in g.mesh:
|
||||
# also avoid duplicates here!
|
||||
let deIhavesMsgs = ihave.messageIDs.deduplicate()
|
||||
for msgId in deIhavesMsgs:
|
||||
|
@ -323,7 +323,7 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
|
|||
candidates: seq[PubSubPeer]
|
||||
currentMesh = addr defaultMesh
|
||||
g.mesh.withValue(topic, v): currentMesh = v
|
||||
g.gossipSub.withValue(topic, peerList):
|
||||
g.gossipsub.withValue(topic, peerList):
|
||||
for it in peerList[]:
|
||||
if
|
||||
it.connected and
|
||||
|
@ -361,7 +361,7 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
|
|||
candidates: seq[PubSubPeer]
|
||||
currentMesh = addr defaultMesh
|
||||
g.mesh.withValue(topic, v): currentMesh = v
|
||||
g.gossipSub.withValue(topic, peerList):
|
||||
g.gossipsub.withValue(topic, peerList):
|
||||
for it in peerList[]:
|
||||
if
|
||||
it.connected and
|
||||
|
@ -466,7 +466,7 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
|
|||
avail: seq[PubSubPeer]
|
||||
currentMesh = addr defaultMesh
|
||||
g.mesh.withValue(topic, v): currentMesh = v
|
||||
g.gossipSub.withValue(topic, peerList):
|
||||
g.gossipsub.withValue(topic, peerList):
|
||||
for it in peerList[]:
|
||||
if
|
||||
# avoid negative score peers
|
||||
|
@ -611,7 +611,7 @@ proc getGossipPeers*(g: GossipSub): Table[PubSubPeer, ControlMessage] {.raises:
|
|||
allPeers.setLen(target)
|
||||
|
||||
for peer in allPeers:
|
||||
control.mGetOrPut(peer, ControlMessage()).ihave.add(ihave)
|
||||
control.mgetOrPut(peer, ControlMessage()).ihave.add(ihave)
|
||||
|
||||
libp2p_gossipsub_cache_window_size.set(cacheWindowSize.int64)
|
||||
|
||||
|
@ -667,8 +667,8 @@ proc onHeartbeat(g: GossipSub) {.raises: [Defect].} =
|
|||
for peer, control in peers:
|
||||
# only ihave from here
|
||||
for ihave in control.ihave:
|
||||
if g.knownTopics.contains(ihave.topicID):
|
||||
libp2p_pubsub_broadcast_ihave.inc(labelValues = [ihave.topicID])
|
||||
if g.knownTopics.contains(ihave.topicId):
|
||||
libp2p_pubsub_broadcast_ihave.inc(labelValues = [ihave.topicId])
|
||||
else:
|
||||
libp2p_pubsub_broadcast_ihave.inc(labelValues = ["generic"])
|
||||
g.send(peer, RPCMsg(control: some(control)))
|
||||
|
|
|
@ -141,7 +141,7 @@ type
|
|||
enablePX*: bool
|
||||
|
||||
BackoffTable* = Table[string, Table[PeerId, Moment]]
|
||||
ValidationSeenTable* = Table[MessageID, HashSet[PubSubPeer]]
|
||||
ValidationSeenTable* = Table[MessageId, HashSet[PubSubPeer]]
|
||||
|
||||
RoutingRecordsPair* = tuple[id: PeerId, record: Option[PeerRecord]]
|
||||
RoutingRecordsHandler* =
|
||||
|
|
|
@ -16,30 +16,30 @@ export sets, tables, messages, options
|
|||
|
||||
type
|
||||
CacheEntry* = object
|
||||
mid*: MessageID
|
||||
mid*: MessageId
|
||||
topicIDs*: seq[string]
|
||||
|
||||
MCache* = object of RootObj
|
||||
msgs*: Table[MessageID, Message]
|
||||
msgs*: Table[MessageId, Message]
|
||||
history*: seq[seq[CacheEntry]]
|
||||
windowSize*: Natural
|
||||
|
||||
func get*(c: MCache, mid: MessageID): Option[Message] =
|
||||
func get*(c: MCache, mid: MessageId): Option[Message] =
|
||||
if mid in c.msgs:
|
||||
try: some(c.msgs[mid])
|
||||
except KeyError: raiseAssert "checked"
|
||||
else:
|
||||
none(Message)
|
||||
|
||||
func contains*(c: MCache, mid: MessageID): bool =
|
||||
func contains*(c: MCache, mid: MessageId): bool =
|
||||
mid in c.msgs
|
||||
|
||||
func put*(c: var MCache, msgId: MessageID, msg: Message) =
|
||||
func put*(c: var MCache, msgId: MessageId, msg: Message) =
|
||||
if not c.msgs.hasKeyOrPut(msgId, msg):
|
||||
# Only add cache entry if the message was not already in the cache
|
||||
c.history[0].add(CacheEntry(mid: msgId, topicIDs: msg.topicIDs))
|
||||
|
||||
func window*(c: MCache, topic: string): HashSet[MessageID] =
|
||||
func window*(c: MCache, topic: string): HashSet[MessageId] =
|
||||
let
|
||||
len = min(c.windowSize, c.history.len)
|
||||
|
||||
|
|
|
@ -90,7 +90,7 @@ type
|
|||
TopicPair* = tuple[topic: string, handler: TopicHandler]
|
||||
|
||||
MsgIdProvider* {.public.} =
|
||||
proc(m: Message): Result[MessageID, ValidationResult] {.noSideEffect, raises: [Defect], gcsafe.}
|
||||
proc(m: Message): Result[MessageId, ValidationResult] {.noSideEffect, raises: [Defect], gcsafe.}
|
||||
|
||||
SubscriptionValidator* {.public.} =
|
||||
proc(topic: string): bool {.raises: [Defect], gcsafe.}
|
||||
|
@ -174,18 +174,18 @@ proc broadcast*(
|
|||
|
||||
let control = msg.control.get()
|
||||
for ihave in control.ihave:
|
||||
if p.knownTopics.contains(ihave.topicID):
|
||||
libp2p_pubsub_broadcast_ihave.inc(npeers, labelValues = [ihave.topicID])
|
||||
if p.knownTopics.contains(ihave.topicId):
|
||||
libp2p_pubsub_broadcast_ihave.inc(npeers, labelValues = [ihave.topicId])
|
||||
else:
|
||||
libp2p_pubsub_broadcast_ihave.inc(npeers, labelValues = ["generic"])
|
||||
for graft in control.graft:
|
||||
if p.knownTopics.contains(graft.topicID):
|
||||
libp2p_pubsub_broadcast_graft.inc(npeers, labelValues = [graft.topicID])
|
||||
if p.knownTopics.contains(graft.topicId):
|
||||
libp2p_pubsub_broadcast_graft.inc(npeers, labelValues = [graft.topicId])
|
||||
else:
|
||||
libp2p_pubsub_broadcast_graft.inc(npeers, labelValues = ["generic"])
|
||||
for prune in control.prune:
|
||||
if p.knownTopics.contains(prune.topicID):
|
||||
libp2p_pubsub_broadcast_prune.inc(npeers, labelValues = [prune.topicID])
|
||||
if p.knownTopics.contains(prune.topicId):
|
||||
libp2p_pubsub_broadcast_prune.inc(npeers, labelValues = [prune.topicId])
|
||||
else:
|
||||
libp2p_pubsub_broadcast_prune.inc(npeers, labelValues = ["generic"])
|
||||
|
||||
|
@ -247,18 +247,18 @@ proc updateMetrics*(p: PubSub, rpcMsg: RPCMsg) =
|
|||
libp2p_pubsub_received_iwant.inc(rpcMsg.control.get().iwant.len.int64)
|
||||
template control: untyped = rpcMsg.control.unsafeGet()
|
||||
for ihave in control.ihave:
|
||||
if p.knownTopics.contains(ihave.topicID):
|
||||
libp2p_pubsub_received_ihave.inc(labelValues = [ihave.topicID])
|
||||
if p.knownTopics.contains(ihave.topicId):
|
||||
libp2p_pubsub_received_ihave.inc(labelValues = [ihave.topicId])
|
||||
else:
|
||||
libp2p_pubsub_received_ihave.inc(labelValues = ["generic"])
|
||||
for graft in control.graft:
|
||||
if p.knownTopics.contains(graft.topicID):
|
||||
libp2p_pubsub_received_graft.inc(labelValues = [graft.topicID])
|
||||
if p.knownTopics.contains(graft.topicId):
|
||||
libp2p_pubsub_received_graft.inc(labelValues = [graft.topicId])
|
||||
else:
|
||||
libp2p_pubsub_received_graft.inc(labelValues = ["generic"])
|
||||
for prune in control.prune:
|
||||
if p.knownTopics.contains(prune.topicID):
|
||||
libp2p_pubsub_received_prune.inc(labelValues = [prune.topicID])
|
||||
if p.knownTopics.contains(prune.topicId):
|
||||
libp2p_pubsub_received_prune.inc(labelValues = [prune.topicId])
|
||||
else:
|
||||
libp2p_pubsub_received_prune.inc(labelValues = ["generic"])
|
||||
|
||||
|
@ -270,7 +270,7 @@ method rpcHandler*(p: PubSub,
|
|||
|
||||
method onNewPeer(p: PubSub, peer: PubSubPeer) {.base.} = discard
|
||||
|
||||
method onPubSubPeerEvent*(p: PubSub, peer: PubsubPeer, event: PubsubPeerEvent) {.base, gcsafe.} =
|
||||
method onPubSubPeerEvent*(p: PubSub, peer: PubSubPeer, event: PubsubPeerEvent) {.base, gcsafe.} =
|
||||
# Peer event is raised for the send connection in particular
|
||||
case event.kind
|
||||
of PubSubPeerEventKind.Connected:
|
||||
|
@ -290,14 +290,14 @@ proc getOrCreatePeer*(
|
|||
p.switch.dial(peerId, protos)
|
||||
|
||||
proc dropConn(peer: PubSubPeer) =
|
||||
proc dropConnAsync(peer: PubsubPeer) {.async.} =
|
||||
proc dropConnAsync(peer: PubSubPeer) {.async.} =
|
||||
try:
|
||||
await p.switch.disconnect(peer.peerId)
|
||||
except CatchableError as exc: # never cancelled
|
||||
trace "Failed to close connection", peer, error = exc.name, msg = exc.msg
|
||||
asyncSpawn dropConnAsync(peer)
|
||||
|
||||
proc onEvent(peer: PubsubPeer, event: PubsubPeerEvent) {.gcsafe.} =
|
||||
proc onEvent(peer: PubSubPeer, event: PubsubPeerEvent) {.gcsafe.} =
|
||||
p.onPubSubPeerEvent(peer, event)
|
||||
|
||||
# create new pubsub peer
|
||||
|
@ -312,7 +312,7 @@ proc getOrCreatePeer*(
|
|||
# metrics
|
||||
libp2p_pubsub_peers.set(p.peers.len.int64)
|
||||
|
||||
pubsubPeer.connect()
|
||||
pubSubPeer.connect()
|
||||
|
||||
return pubSubPeer
|
||||
|
||||
|
|
|
@ -43,7 +43,7 @@ type
|
|||
kind*: PubSubPeerEventKind
|
||||
|
||||
GetConn* = proc(): Future[Connection] {.gcsafe, raises: [Defect].}
|
||||
DropConn* = proc(peer: PubsubPeer) {.gcsafe, raises: [Defect].} # have to pass peer as it's unknown during init
|
||||
DropConn* = proc(peer: PubSubPeer) {.gcsafe, raises: [Defect].} # have to pass peer as it's unknown during init
|
||||
OnEvent* = proc(peer: PubSubPeer, event: PubsubPeerEvent) {.gcsafe, raises: [Defect].}
|
||||
|
||||
PubSubPeer* = ref object of RootObj
|
||||
|
|
|
@ -29,7 +29,7 @@ const PubSubPrefix = toBytes("libp2p-pubsub:")
|
|||
declareCounter(libp2p_pubsub_sig_verify_success, "pubsub successfully validated messages")
|
||||
declareCounter(libp2p_pubsub_sig_verify_failure, "pubsub failed validated messages")
|
||||
|
||||
func defaultMsgIdProvider*(m: Message): Result[MessageID, ValidationResult] =
|
||||
func defaultMsgIdProvider*(m: Message): Result[MessageId, ValidationResult] =
|
||||
if m.seqno.len > 0 and m.fromPeer.data.len > 0:
|
||||
let mid = byteutils.toHex(m.seqno) & $m.fromPeer
|
||||
ok mid.toBytes()
|
||||
|
|
|
@ -27,7 +27,7 @@ type
|
|||
subscribe*: bool
|
||||
topic*: string
|
||||
|
||||
MessageID* = seq[byte]
|
||||
MessageId* = seq[byte]
|
||||
|
||||
Message* = object
|
||||
fromPeer*: PeerId
|
||||
|
@ -44,17 +44,17 @@ type
|
|||
prune*: seq[ControlPrune]
|
||||
|
||||
ControlIHave* = object
|
||||
topicID*: string
|
||||
messageIDs*: seq[MessageID]
|
||||
topicId*: string
|
||||
messageIDs*: seq[MessageId]
|
||||
|
||||
ControlIWant* = object
|
||||
messageIDs*: seq[MessageID]
|
||||
messageIDs*: seq[MessageId]
|
||||
|
||||
ControlGraft* = object
|
||||
topicID*: string
|
||||
topicId*: string
|
||||
|
||||
ControlPrune* = object
|
||||
topicID*: string
|
||||
topicId*: string
|
||||
peers*: seq[PeerInfoMsg]
|
||||
backoff*: uint64
|
||||
|
||||
|
@ -70,7 +70,7 @@ func withSubs*(
|
|||
|
||||
func shortLog*(s: ControlIHave): auto =
|
||||
(
|
||||
topicID: s.topicID.shortLog,
|
||||
topicId: s.topicId.shortLog,
|
||||
messageIDs: mapIt(s.messageIDs, it.shortLog)
|
||||
)
|
||||
|
||||
|
@ -81,12 +81,12 @@ func shortLog*(s: ControlIWant): auto =
|
|||
|
||||
func shortLog*(s: ControlGraft): auto =
|
||||
(
|
||||
topicID: s.topicID.shortLog
|
||||
topicId: s.topicId.shortLog
|
||||
)
|
||||
|
||||
func shortLog*(s: ControlPrune): auto =
|
||||
(
|
||||
topicID: s.topicID.shortLog
|
||||
topicId: s.topicId.shortLog
|
||||
)
|
||||
|
||||
func shortLog*(c: ControlMessage): auto =
|
||||
|
|
|
@ -30,7 +30,7 @@ when defined(libp2p_protobuf_metrics):
|
|||
|
||||
proc write*(pb: var ProtoBuffer, field: int, graft: ControlGraft) =
|
||||
var ipb = initProtoBuffer()
|
||||
ipb.write(1, graft.topicID)
|
||||
ipb.write(1, graft.topicId)
|
||||
ipb.finish()
|
||||
pb.write(field, ipb)
|
||||
|
||||
|
@ -46,7 +46,7 @@ proc write*(pb: var ProtoBuffer, field: int, infoMsg: PeerInfoMsg) =
|
|||
|
||||
proc write*(pb: var ProtoBuffer, field: int, prune: ControlPrune) =
|
||||
var ipb = initProtoBuffer()
|
||||
ipb.write(1, prune.topicID)
|
||||
ipb.write(1, prune.topicId)
|
||||
for peer in prune.peers:
|
||||
ipb.write(2, peer)
|
||||
ipb.write(3, prune.backoff)
|
||||
|
@ -58,7 +58,7 @@ proc write*(pb: var ProtoBuffer, field: int, prune: ControlPrune) =
|
|||
|
||||
proc write*(pb: var ProtoBuffer, field: int, ihave: ControlIHave) =
|
||||
var ipb = initProtoBuffer()
|
||||
ipb.write(1, ihave.topicID)
|
||||
ipb.write(1, ihave.topicId)
|
||||
for mid in ihave.messageIDs:
|
||||
ipb.write(2, mid)
|
||||
ipb.finish()
|
||||
|
|
|
@ -58,7 +58,7 @@ type
|
|||
RelayError* = object of LPError
|
||||
|
||||
RelayPeer* = object
|
||||
peerId*: PeerID
|
||||
peerId*: PeerId
|
||||
addrs*: seq[MultiAddress]
|
||||
|
||||
AddConn* = proc(conn: Connection): Future[void] {.gcsafe, raises: [Defect].}
|
||||
|
@ -71,7 +71,7 @@ type
|
|||
|
||||
Relay* = ref object of LPProtocol
|
||||
switch*: Switch
|
||||
peerId: PeerID
|
||||
peerId: PeerId
|
||||
dialer: Dial
|
||||
canHop: bool
|
||||
streamCount: int
|
||||
|
|
|
@ -531,7 +531,7 @@ suite "GossipSub internal":
|
|||
await gossipSub.rpcHandler(peer, lotOfSubs)
|
||||
|
||||
check:
|
||||
gossipSub.gossipSub.len == gossipSub.topicsHigh
|
||||
gossipSub.gossipsub.len == gossipSub.topicsHigh
|
||||
peer.behaviourPenalty > 0.0
|
||||
|
||||
await conn.close()
|
||||
|
|
|
@ -18,7 +18,7 @@ export builders
|
|||
|
||||
randomize()
|
||||
|
||||
func defaultMsgIdProvider*(m: Message): Result[MessageID, ValidationResult] =
|
||||
func defaultMsgIdProvider*(m: Message): Result[MessageId, ValidationResult] =
|
||||
let mid =
|
||||
if m.seqno.len > 0 and m.fromPeer.data.len > 0:
|
||||
byteutils.toHex(m.seqno) & $m.fromPeer
|
||||
|
|
|
@ -26,7 +26,7 @@ suite "PeerInfo":
|
|||
|
||||
let
|
||||
seckey = PrivateKey.random(rng[]).tryGet()
|
||||
peerId = PeerID.init(seckey).get()
|
||||
peerId = PeerId.init(seckey).get()
|
||||
multiAddresses = @[MultiAddress.init("/ip4/0.0.0.0/tcp/24").tryGet(), MultiAddress.init("/ip4/0.0.0.0/tcp/25").tryGet()]
|
||||
peerInfo = PeerInfo.new(seckey, multiAddresses)
|
||||
|
||||
|
|
|
@ -59,9 +59,9 @@ suite "Circuit Relay":
|
|||
relaySrc {.threadvar.}: Relay
|
||||
relayDst {.threadvar.}: Relay
|
||||
relayRel {.threadvar.}: Relay
|
||||
conn {.threadVar.}: Connection
|
||||
msg {.threadVar.}: ProtoBuffer
|
||||
rcv {.threadVar.}: Option[RelayMessage]
|
||||
conn {.threadvar.}: Connection
|
||||
msg {.threadvar.}: ProtoBuffer
|
||||
rcv {.threadvar.}: Option[RelayMessage]
|
||||
|
||||
proc createMsg(
|
||||
msgType: Option[RelayType] = RelayType.none,
|
||||
|
@ -326,7 +326,7 @@ suite "Circuit Relay":
|
|||
|
||||
await allFutures(srcWR.stop(), dstWR.stop(), relWR.stop())
|
||||
|
||||
asynctest "Bad MultiAddress":
|
||||
asyncTest "Bad MultiAddress":
|
||||
await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
|
||||
await rel.connect(dst.peerInfo.peerId, dst.peerInfo.addrs)
|
||||
expect(CatchableError):
|
||||
|
|
Loading…
Reference in New Issue