mirror of https://github.com/waku-org/nwaku.git
fix: fix regresion + remove deprecated flag (#2556)
This commit is contained in:
parent
65530264d5
commit
47ad0fb001
|
@ -143,11 +143,11 @@ proc parseStore(
|
|||
return true
|
||||
|
||||
proc parseTopics(jsonNode: JsonNode, conf: var WakuNodeConf) =
|
||||
if jsonNode.contains("topics"):
|
||||
for topic in jsonNode["topics"].items:
|
||||
conf.topics.add(topic.getStr())
|
||||
if jsonNode.contains("pubsubTopics"):
|
||||
for topic in jsonNode["pubsubTopics"].items:
|
||||
conf.pubsubTopics.add(topic.getStr())
|
||||
else:
|
||||
conf.topics = @["/waku/2/default-waku/proto"]
|
||||
conf.pubsubTopics = @["/waku/2/default-waku/proto"]
|
||||
|
||||
proc parseConfig*(
|
||||
configNodeJson: string,
|
||||
|
|
|
@ -35,9 +35,9 @@ suite "Peer Manager":
|
|||
# Given two nodes with the same shardId
|
||||
let
|
||||
server =
|
||||
newTestWakuNode(serverKey, listenAddress, listenPort, topics = @[shardTopic0])
|
||||
newTestWakuNode(serverKey, listenAddress, listenPort, pubsubTopics = @[shardTopic0])
|
||||
client =
|
||||
newTestWakuNode(clientKey, listenAddress, listenPort, topics = @[shardTopic1])
|
||||
newTestWakuNode(clientKey, listenAddress, listenPort, pubsubTopics = @[shardTopic1])
|
||||
|
||||
# And both mount metadata and filter
|
||||
discard client.mountMetadata(0) # clusterId irrelevant, overridden by topic
|
||||
|
@ -68,9 +68,9 @@ suite "Peer Manager":
|
|||
# Given two nodes with the same shardId
|
||||
let
|
||||
server =
|
||||
newTestWakuNode(serverKey, listenAddress, listenPort, topics = @[shardTopic0])
|
||||
newTestWakuNode(serverKey, listenAddress, listenPort, pubsubTopics = @[shardTopic0])
|
||||
client =
|
||||
newTestWakuNode(clientKey, listenAddress, listenPort, topics = @[shardTopic0])
|
||||
newTestWakuNode(clientKey, listenAddress, listenPort, pubsubTopics = @[shardTopic0])
|
||||
|
||||
# And both mount metadata and relay
|
||||
discard client.mountMetadata(0) # clusterId irrelevant, overridden by topic
|
||||
|
@ -99,9 +99,9 @@ suite "Peer Manager":
|
|||
# Given two nodes with different shardIds
|
||||
let
|
||||
server =
|
||||
newTestWakuNode(serverKey, listenAddress, listenPort, topics = @[shardTopic0])
|
||||
newTestWakuNode(serverKey, listenAddress, listenPort, pubsubTopics = @[shardTopic0])
|
||||
client =
|
||||
newTestWakuNode(clientKey, listenAddress, listenPort, topics = @[shardTopic1])
|
||||
newTestWakuNode(clientKey, listenAddress, listenPort, pubsubTopics = @[shardTopic1])
|
||||
|
||||
# And both mount metadata and relay
|
||||
discard client.mountMetadata(0) # clusterId irrelevant, overridden by topic
|
||||
|
|
|
@ -420,7 +420,7 @@ procSuite "Peer Manager":
|
|||
ValidIpAddress.init("0.0.0.0"),
|
||||
Port(0),
|
||||
clusterId = clusterId3,
|
||||
topics = @["/waku/2/rs/3/0"],
|
||||
pubsubTopics = @["/waku/2/rs/3/0"],
|
||||
)
|
||||
|
||||
# same network
|
||||
|
@ -429,14 +429,14 @@ procSuite "Peer Manager":
|
|||
ValidIpAddress.init("0.0.0.0"),
|
||||
Port(0),
|
||||
clusterId = clusterId4,
|
||||
topics = @["/waku/2/rs/4/0"],
|
||||
pubsubTopics = @["/waku/2/rs/4/0"],
|
||||
)
|
||||
node3 = newTestWakuNode(
|
||||
generateSecp256k1Key(),
|
||||
ValidIpAddress.init("0.0.0.0"),
|
||||
Port(0),
|
||||
clusterId = clusterId4,
|
||||
topics = @["/waku/2/rs/4/0"],
|
||||
pubsubTopics = @["/waku/2/rs/4/0"],
|
||||
)
|
||||
|
||||
node1.mountMetadata(clusterId3).expect("Mounted Waku Metadata")
|
||||
|
|
|
@ -21,11 +21,11 @@ suite "Peer Storage":
|
|||
peerProto = "/waku/2/default-waku/codec"
|
||||
connectedness = Connectedness.CanConnect
|
||||
disconn = 999999
|
||||
topics = @["/waku/2/rs/2/0", "/waku/2/rs/2/1"]
|
||||
pubsubTopics = @["/waku/2/rs/2/0", "/waku/2/rs/2/1"]
|
||||
|
||||
# Create ENR
|
||||
var enrBuilder = EnrBuilder.init(peerKey)
|
||||
enrBuilder.withShardedTopics(topics).expect("Valid topics")
|
||||
enrBuilder.withShardedTopics(pubsubTopics).expect("Valid topics")
|
||||
let record = enrBuilder.build().expect("Valid record")
|
||||
|
||||
let stored = RemotePeerInfo(
|
||||
|
|
|
@ -33,7 +33,7 @@ proc defaultTestWakuNodeConf*(): WakuNodeConf =
|
|||
maxConnections: 50,
|
||||
maxMessageSize: "1024 KiB",
|
||||
clusterId: 1.uint32,
|
||||
topics: @["/waku/2/rs/1/0"],
|
||||
pubsubTopics: @["/waku/2/rs/1/0"],
|
||||
relay: true,
|
||||
storeMessageDbUrl: "sqlite://store.sqlite3",
|
||||
)
|
||||
|
@ -59,7 +59,7 @@ proc newTestWakuNode*(
|
|||
discv5UdpPort = none(Port),
|
||||
agentString = none(string),
|
||||
clusterId: uint32 = 1.uint32,
|
||||
topics: seq[string] = @["/waku/2/rs/1/0"],
|
||||
pubsubTopics: seq[string] = @["/waku/2/rs/1/0"],
|
||||
peerStoreCapacity = none(int),
|
||||
): WakuNode =
|
||||
var resolvedExtIp = extIp
|
||||
|
@ -74,7 +74,7 @@ proc newTestWakuNode*(
|
|||
var conf = defaultTestWakuNodeConf()
|
||||
|
||||
conf.clusterId = clusterId
|
||||
conf.topics = topics
|
||||
conf.pubsubTopics = pubsubTopics
|
||||
|
||||
if dns4DomainName.isSome() and extIp.isNone():
|
||||
# If there's an error resolving the IP, an exception is thrown and test fails
|
||||
|
@ -101,7 +101,7 @@ proc newTestWakuNode*(
|
|||
|
||||
var enrBuilder = EnrBuilder.init(nodeKey)
|
||||
|
||||
enrBuilder.withShardedTopics(topics).isOkOr:
|
||||
enrBuilder.withShardedTopics(pubsubTopics).isOkOr:
|
||||
raise newException(Defect, "Invalid record: " & error)
|
||||
|
||||
enrBuilder.withIpAddressAndPorts(
|
||||
|
|
|
@ -297,13 +297,6 @@ type WakuNodeConf* = object
|
|||
name: "keep-alive"
|
||||
.}: bool
|
||||
|
||||
topics* {.
|
||||
desc:
|
||||
"Default topic to subscribe to. Argument may be repeated. Deprecated! Please use pubsub-topic and/or content-topic instead.",
|
||||
defaultValue: @["/waku/2/default-waku/proto"],
|
||||
name: "topic"
|
||||
.}: seq[string]
|
||||
|
||||
pubsubTopics* {.
|
||||
desc: "Default pubsub topic to subscribe to. Argument may be repeated.",
|
||||
name: "pubsub-topic"
|
||||
|
|
|
@ -32,7 +32,7 @@ proc enrConfiguration*(
|
|||
let shards: seq[uint16] =
|
||||
# no shards configured
|
||||
if conf.shards.len == 0:
|
||||
toSeq(0 ..< conf.topics.len).mapIt(uint16(it))
|
||||
toSeq(0 ..< conf.pubsubTopics.len).mapIt(uint16(it))
|
||||
# some shards configured
|
||||
else:
|
||||
toSeq(conf.shards.mapIt(uint16(it)))
|
||||
|
|
|
@ -140,15 +140,9 @@ proc setupProtocols(
|
|||
peerExchangeHandler = some(handlePeerExchange)
|
||||
|
||||
if conf.relay:
|
||||
let pubsubTopics =
|
||||
if conf.pubsubTopics.len > 0 or conf.contentTopics.len > 0:
|
||||
# TODO autoshard content topics only once.
|
||||
# Already checked for errors in app.init
|
||||
let shards =
|
||||
conf.contentTopics.mapIt(node.wakuSharding.getShard(it).expect("Valid Shard"))
|
||||
conf.pubsubTopics & shards
|
||||
else:
|
||||
conf.topics
|
||||
let shards =
|
||||
conf.contentTopics.mapIt(node.wakuSharding.getShard(it).expect("Valid Shard"))
|
||||
let pubsubTopics = conf.pubsubTopics & shards
|
||||
|
||||
let parsedMaxMsgSize = parseMsgSize(conf.maxMessageSize).valueOr:
|
||||
return err("failed to parse 'max-num-bytes-msg-size' param: " & $error)
|
||||
|
|
Loading…
Reference in New Issue