Merge branch 'waku-org:master' into fix-url-encoding

This commit is contained in:
Vishwanath Martur 2024-12-14 20:23:02 +05:30 committed by GitHub
commit f24c15755b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
26 changed files with 530 additions and 142 deletions

View File

@ -87,7 +87,7 @@ proc readValue*(
) )
size = some(reader.readValue(uint64)) size = some(reader.readValue(uint64))
else: else:
unrecognizedFieldWarning() unrecognizedFieldWarning(value)
if sender.isNone(): if sender.isNone():
reader.raiseUnexpectedValue("Field `sender` is missing") reader.raiseUnexpectedValue("Field `sender` is missing")

View File

@ -1,9 +1,10 @@
import system, results, std/json import system, results, std/json, std/strutils
import stew/byteutils import stew/byteutils
import import
../../waku/common/base64, ../../waku/common/base64,
../../waku/waku_core/message, ../../waku/waku_core/message,
../../waku/waku_core/message/message, ../../waku/waku_core/message/message,
../utils,
./json_base_event ./json_base_event
type JsonMessage* = ref object # https://rfc.vac.dev/spec/36/#jsonmessage-type type JsonMessage* = ref object # https://rfc.vac.dev/spec/36/#jsonmessage-type
@ -15,16 +16,20 @@ type JsonMessage* = ref object # https://rfc.vac.dev/spec/36/#jsonmessage-type
meta*: Base64String meta*: Base64String
proof*: Base64String proof*: Base64String
func fromJsonNode*(T: type JsonMessage, jsonContent: JsonNode): JsonMessage = func fromJsonNode*(
T: type JsonMessage, jsonContent: JsonNode
): Result[JsonMessage, string] =
# Visit https://rfc.vac.dev/spec/14/ for further details # Visit https://rfc.vac.dev/spec/14/ for further details
JsonMessage( ok(
payload: Base64String(jsonContent["payload"].getStr()), JsonMessage(
contentTopic: jsonContent["contentTopic"].getStr(), payload: Base64String(jsonContent["payload"].getStr()),
version: uint32(jsonContent{"version"}.getInt()), contentTopic: jsonContent["contentTopic"].getStr(),
timestamp: int64(jsonContent{"timestamp"}.getBiggestInt()), version: uint32(jsonContent{"version"}.getInt()),
ephemeral: jsonContent{"ephemeral"}.getBool(), timestamp: (?jsonContent.getProtoInt64("timestamp")).get(0),
meta: Base64String(jsonContent{"meta"}.getStr()), ephemeral: jsonContent{"ephemeral"}.getBool(),
proof: Base64String(jsonContent{"proof"}.getStr()), meta: Base64String(jsonContent{"meta"}.getStr()),
proof: Base64String(jsonContent{"proof"}.getStr()),
)
) )
proc toWakuMessage*(self: JsonMessage): Result[WakuMessage, string] = proc toWakuMessage*(self: JsonMessage): Result[WakuMessage, string] =

View File

@ -27,7 +27,8 @@ import
./waku_thread/inter_thread_communication/requests/ping_request, ./waku_thread/inter_thread_communication/requests/ping_request,
./waku_thread/inter_thread_communication/waku_thread_request, ./waku_thread/inter_thread_communication/waku_thread_request,
./alloc, ./alloc,
./ffi_types ./ffi_types,
../waku/factory/app_callbacks
################################################################################ ################################################################################
### Wrapper around the waku node ### Wrapper around the waku node
@ -138,10 +139,14 @@ proc waku_new(
ctx.userData = userData ctx.userData = userData
let appCallbacks = AppCallbacks(relayHandler: onReceivedMessage(ctx))
let retCode = handleRequest( let retCode = handleRequest(
ctx, ctx,
RequestType.LIFECYCLE, RequestType.LIFECYCLE,
NodeLifecycleRequest.createShared(NodeLifecycleMsgType.CREATE_NODE, configJson), NodeLifecycleRequest.createShared(
NodeLifecycleMsgType.CREATE_NODE, configJson, appCallbacks
),
callback, callback,
userData, userData,
) )
@ -267,7 +272,8 @@ proc waku_relay_publish(
var jsonMessage: JsonMessage var jsonMessage: JsonMessage
try: try:
let jsonContent = parseJson($jwm) let jsonContent = parseJson($jwm)
jsonMessage = JsonMessage.fromJsonNode(jsonContent) jsonMessage = JsonMessage.fromJsonNode(jsonContent).valueOr:
raise newException(JsonParsingError, $error)
except JsonParsingError: except JsonParsingError:
deallocShared(jwm) deallocShared(jwm)
let msg = fmt"Error parsing json message: {getCurrentExceptionMsg()}" let msg = fmt"Error parsing json message: {getCurrentExceptionMsg()}"
@ -371,7 +377,7 @@ proc waku_relay_unsubscribe(
ctx, ctx,
RequestType.RELAY, RequestType.RELAY,
RelayRequest.createShared( RelayRequest.createShared(
RelayMsgType.SUBSCRIBE, RelayMsgType.UNSUBSCRIBE,
PubsubTopic($pst), PubsubTopic($pst),
WakuRelayHandler(onReceivedMessage(ctx)), WakuRelayHandler(onReceivedMessage(ctx)),
), ),
@ -495,7 +501,8 @@ proc waku_lightpush_publish(
var jsonMessage: JsonMessage var jsonMessage: JsonMessage
try: try:
let jsonContent = parseJson($jwm) let jsonContent = parseJson($jwm)
jsonMessage = JsonMessage.fromJsonNode(jsonContent) jsonMessage = JsonMessage.fromJsonNode(jsonContent).valueOr:
raise newException(JsonParsingError, $error)
except JsonParsingError: except JsonParsingError:
let msg = fmt"Error parsing json message: {getCurrentExceptionMsg()}" let msg = fmt"Error parsing json message: {getCurrentExceptionMsg()}"
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData) callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)

20
library/utils.nim Normal file
View File

@ -0,0 +1,20 @@
import std/[json, options, strutils]
import results
proc getProtoInt64*(node: JsonNode, key: string): Result[Option[int64], string] =
try:
let (value, ok) =
if node.hasKey(key):
if node[key].kind == JString:
(parseBiggestInt(node[key].getStr()), true)
else:
(node[key].getBiggestInt(), true)
else:
(0, false)
if ok:
return ok(some(value))
return ok(none(int64))
except CatchableError:
return err("Invalid int64 value in `" & key & "`")

View File

@ -7,6 +7,7 @@ import
../../../../waku/factory/waku, ../../../../waku/factory/waku,
../../../../waku/factory/node_factory, ../../../../waku/factory/node_factory,
../../../../waku/factory/networks_config, ../../../../waku/factory/networks_config,
../../../../waku/factory/app_callbacks,
../../../alloc ../../../alloc
type NodeLifecycleMsgType* = enum type NodeLifecycleMsgType* = enum
@ -17,12 +18,17 @@ type NodeLifecycleMsgType* = enum
type NodeLifecycleRequest* = object type NodeLifecycleRequest* = object
operation: NodeLifecycleMsgType operation: NodeLifecycleMsgType
configJson: cstring ## Only used in 'CREATE_NODE' operation configJson: cstring ## Only used in 'CREATE_NODE' operation
appCallbacks: AppCallbacks
proc createShared*( proc createShared*(
T: type NodeLifecycleRequest, op: NodeLifecycleMsgType, configJson: cstring = "" T: type NodeLifecycleRequest,
op: NodeLifecycleMsgType,
configJson: cstring = "",
appCallbacks: AppCallbacks = nil,
): ptr type T = ): ptr type T =
var ret = createShared(T) var ret = createShared(T)
ret[].operation = op ret[].operation = op
ret[].appCallbacks = appCallbacks
ret[].configJson = configJson.alloc() ret[].configJson = configJson.alloc()
return ret return ret
@ -30,7 +36,9 @@ proc destroyShared(self: ptr NodeLifecycleRequest) =
deallocShared(self[].configJson) deallocShared(self[].configJson)
deallocShared(self) deallocShared(self)
proc createWaku(configJson: cstring): Future[Result[Waku, string]] {.async.} = proc createWaku(
configJson: cstring, appCallbacks: AppCallbacks = nil
): Future[Result[Waku, string]] {.async.} =
var conf = defaultWakuNodeConf().valueOr: var conf = defaultWakuNodeConf().valueOr:
return err("Failed creating node: " & error) return err("Failed creating node: " & error)
@ -59,7 +67,7 @@ proc createWaku(configJson: cstring): Future[Result[Waku, string]] {.async.} =
formattedString & ". expected type: " & $typeof(confValue) formattedString & ". expected type: " & $typeof(confValue)
) )
let wakuRes = Waku.new(conf).valueOr: let wakuRes = Waku.new(conf, appCallbacks).valueOr:
error "waku initialization failed", error = error error "waku initialization failed", error = error
return err("Failed setting up Waku: " & $error) return err("Failed setting up Waku: " & $error)
@ -73,7 +81,7 @@ proc process*(
case self.operation case self.operation
of CREATE_NODE: of CREATE_NODE:
waku[] = (await createWaku(self.configJson)).valueOr: waku[] = (await createWaku(self.configJson, self.appCallbacks)).valueOr:
error "CREATE_NODE failed", error = error error "CREATE_NODE failed", error = error
return err("error processing createWaku request: " & $error) return err("error processing createWaku request: " & $error)
of START_NODE: of START_NODE:

View File

@ -3,6 +3,7 @@ import chronos, chronicles, results
import import
../../../../../waku/factory/waku, ../../../../../waku/factory/waku,
../../../../alloc, ../../../../alloc,
../../../../utils,
../../../../../waku/waku_core/peers, ../../../../../waku/waku_core/peers,
../../../../../waku/waku_core/time, ../../../../../waku/waku_core/time,
../../../../../waku/waku_core/message/digest, ../../../../../waku/waku_core/message/digest,
@ -24,7 +25,7 @@ type StoreRequest* = object
func fromJsonNode( func fromJsonNode(
T: type JsonStoreQueryRequest, jsonContent: JsonNode T: type JsonStoreQueryRequest, jsonContent: JsonNode
): StoreQueryRequest = ): Result[StoreQueryRequest, string] =
let contentTopics = collect(newSeq): let contentTopics = collect(newSeq):
for cTopic in jsonContent["content_topics"].getElems(): for cTopic in jsonContent["content_topics"].getElems():
cTopic.getStr() cTopic.getStr()
@ -45,18 +46,6 @@ func fromJsonNode(
else: else:
none(string) none(string)
let startTime =
if jsonContent.contains("time_start"):
some(Timestamp(jsonContent["time_start"].getInt()))
else:
none(Timestamp)
let endTime =
if jsonContent.contains("time_end"):
some(Timestamp(jsonContent["time_end"].getInt()))
else:
none(Timestamp)
let paginationCursor = let paginationCursor =
if jsonContent.contains("pagination_cursor"): if jsonContent.contains("pagination_cursor"):
var hash: WakuMessageHash var hash: WakuMessageHash
@ -79,17 +68,19 @@ func fromJsonNode(
else: else:
none(uint64) none(uint64)
return StoreQueryRequest( return ok(
requestId: jsonContent["request_id"].getStr(), StoreQueryRequest(
includeData: jsonContent["include_data"].getBool(), requestId: jsonContent["request_id"].getStr(),
pubsubTopic: pubsubTopic, includeData: jsonContent["include_data"].getBool(),
contentTopics: contentTopics, pubsubTopic: pubsubTopic,
startTime: startTime, contentTopics: contentTopics,
endTime: endTime, startTime: ?jsonContent.getProtoInt64("time_start"),
messageHashes: msgHashes, endTime: ?jsonContent.getProtoInt64("time_end"),
paginationCursor: paginationCursor, messageHashes: msgHashes,
paginationForward: paginationForward, paginationCursor: paginationCursor,
paginationLimit: paginationLimit, paginationForward: paginationForward,
paginationLimit: paginationLimit,
)
) )
proc createShared*( proc createShared*(
@ -128,7 +119,7 @@ proc process(
let peer = peers.parsePeerInfo(($self[].peerAddr).split(",")).valueOr: let peer = peers.parsePeerInfo(($self[].peerAddr).split(",")).valueOr:
return err("JsonStoreQueryRequest failed to parse peer addr: " & $error) return err("JsonStoreQueryRequest failed to parse peer addr: " & $error)
let queryResponse = (await waku.node.wakuStoreClient.query(storeQueryRequest, peer)).valueOr: let queryResponse = (await waku.node.wakuStoreClient.query(?storeQueryRequest, peer)).valueOr:
return err("JsonStoreQueryRequest failed store query: " & $error) return err("JsonStoreQueryRequest failed store query: " & $error)
return ok($(%*queryResponse)) ## returning the response in json format return ok($(%*queryResponse)) ## returning the response in json format

View File

@ -17,7 +17,7 @@ suite "Node Factory":
node.wakuStore.isNil() node.wakuStore.isNil()
node.wakuFilter.isNil() node.wakuFilter.isNil()
not node.wakuStoreClient.isNil() not node.wakuStoreClient.isNil()
not node.rendezvous.isNil() not node.wakuRendezvous.isNil()
test "Set up a node with Store enabled": test "Set up a node with Store enabled":
var conf = defaultTestWakuNodeConf() var conf = defaultTestWakuNodeConf()

View File

@ -1,51 +1,63 @@
{.used.} {.used.}
import chronos, testutils/unittests, libp2p/builders, libp2p/protocols/rendezvous import std/options, chronos, testutils/unittests, libp2p/builders
import waku/node/waku_switch, ./testlib/common, ./testlib/wakucore import
waku/waku_core/peers,
proc newRendezvousClientSwitch(rdv: RendezVous): Switch = waku/node/waku_node,
SwitchBuilder waku/node/peer_manager/peer_manager,
.new() waku/waku_rendezvous/protocol,
.withRng(rng()) ./testlib/[wakucore, wakunode]
.withAddresses(@[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()])
.withTcpTransport()
.withMplex()
.withNoise()
.withRendezVous(rdv)
.build()
procSuite "Waku Rendezvous": procSuite "Waku Rendezvous":
asyncTest "Waku Switch uses Rendezvous": asyncTest "Simple remote test":
## Setup
let let
wakuClient = RendezVous.new() clusterId = 10.uint16
sourceClient = RendezVous.new() node1 = newTestWakuNode(
destClient = RendezVous.new() generateSecp256k1Key(),
wakuSwitch = newRendezvousClientSwitch(wakuClient) #rendezvous point parseIpAddress("0.0.0.0"),
sourceSwitch = newRendezvousClientSwitch(sourceClient) #client Port(0),
destSwitch = newRendezvousClientSwitch(destClient) #client clusterId = clusterId,
)
node2 = newTestWakuNode(
generateSecp256k1Key(),
parseIpAddress("0.0.0.0"),
Port(0),
clusterId = clusterId,
)
node3 = newTestWakuNode(
generateSecp256k1Key(),
parseIpAddress("0.0.0.0"),
Port(0),
clusterId = clusterId,
)
# Setup client rendezvous await allFutures(
wakuClient.setup(wakuSwitch) [node1.mountRendezvous(), node2.mountRendezvous(), node3.mountRendezvous()]
sourceClient.setup(sourceSwitch) )
destClient.setup(destSwitch) await allFutures([node1.start(), node2.start(), node3.start()])
await allFutures(wakuSwitch.start(), sourceSwitch.start(), destSwitch.start()) let peerInfo1 = node1.switch.peerInfo.toRemotePeerInfo()
let peerInfo2 = node2.switch.peerInfo.toRemotePeerInfo()
let peerInfo3 = node3.switch.peerInfo.toRemotePeerInfo()
# Connect clients to the rendezvous point node1.peerManager.addPeer(peerInfo2)
await sourceSwitch.connect(wakuSwitch.peerInfo.peerId, wakuSwitch.peerInfo.addrs) node2.peerManager.addPeer(peerInfo1)
await destSwitch.connect(wakuSwitch.peerInfo.peerId, wakuSwitch.peerInfo.addrs) node2.peerManager.addPeer(peerInfo3)
node3.peerManager.addPeer(peerInfo2)
let res0 = await sourceClient.request("empty") let namespace = "test/name/space"
check res0.len == 0
let res = await node1.wakuRendezvous.batchAdvertise(
namespace, 60.seconds, @[peerInfo2.peerId]
)
assert res.isOk(), $res.error
let response =
await node3.wakuRendezvous.batchRequest(namespace, 1, @[peerInfo2.peerId])
assert response.isOk(), $response.error
let records = response.get()
# Check that source client gets peer info of dest client from rendezvous point
await sourceClient.advertise("foo")
let res1 = await destClient.request("foo")
check: check:
res1.len == 1 records.len == 1
res1[0] == sourceSwitch.peerInfo.signedPeerRecord.data records[0].peerId == peerInfo1.peerId
await allFutures(wakuSwitch.stop(), sourceSwitch.stop(), destSwitch.stop())

View File

@ -40,6 +40,7 @@ proc defaultTestWakuNodeConf*(): WakuNodeConf =
clusterId: DefaultClusterId, clusterId: DefaultClusterId,
shards: @[DefaultShardId], shards: @[DefaultShardId],
relay: true, relay: true,
rendezvous: true,
storeMessageDbUrl: "sqlite://store.sqlite3", storeMessageDbUrl: "sqlite://store.sqlite3",
) )

View File

@ -62,7 +62,7 @@ suite "Wakunode2 - Waku initialization":
node.wakuArchive.isNil() node.wakuArchive.isNil()
node.wakuStore.isNil() node.wakuStore.isNil()
not node.wakuStoreClient.isNil() not node.wakuStoreClient.isNil()
not node.rendezvous.isNil() not node.wakuRendezvous.isNil()
## Cleanup ## Cleanup
waitFor waku.stop() waitFor waku.stop()
@ -92,7 +92,7 @@ suite "Wakunode2 - Waku initialization":
node.wakuArchive.isNil() node.wakuArchive.isNil()
node.wakuStore.isNil() node.wakuStore.isNil()
not node.wakuStoreClient.isNil() not node.wakuStoreClient.isNil()
not node.rendezvous.isNil() not node.wakuRendezvous.isNil()
# DS structures are updated with dynamic ports # DS structures are updated with dynamic ports
typedNodeEnr.get().tcp.get() != 0 typedNodeEnr.get().tcp.get() != 0

@ -1 +1 @@
Subproject commit 741274439ce72162ab3c740e7c0ef624d32725f9 Subproject commit 8fafcd0bac9f409091b7bcaee62ab6330f57441e

View File

@ -0,0 +1,4 @@
import ../waku_relay/protocol
type AppCallbacks* = ref object
relayHandler*: WakuRelayHandler

View File

@ -647,6 +647,13 @@ with the drawback of consuming some more bandwidth.""",
name: "peer-exchange-node" name: "peer-exchange-node"
.}: string .}: string
## Rendez vous
rendezvous* {.
desc: "Enable waku rendezvous discovery server",
defaultValue: true,
name: "rendezvous"
.}: bool
## websocket config ## websocket config
websocketSupport* {. websocketSupport* {.
desc: "Enable websocket: true|false", desc: "Enable websocket: true|false",

View File

@ -124,6 +124,16 @@ proc getNumShardsInNetwork*(conf: WakuNodeConf): uint32 =
# https://github.com/waku-org/specs/blob/master/standards/core/relay-sharding.md#static-sharding # https://github.com/waku-org/specs/blob/master/standards/core/relay-sharding.md#static-sharding
return uint32(MaxShardIndex + 1) return uint32(MaxShardIndex + 1)
proc getAutoshards*(
node: WakuNode, contentTopics: seq[string]
): Result[seq[RelayShard], string] =
var autoShards: seq[RelayShard]
for contentTopic in contentTopics:
let shard = node.wakuSharding.getShard(contentTopic).valueOr:
return err("Could not parse content topic: " & error)
autoShards.add(shard)
return ok(autoshards)
proc setupProtocols( proc setupProtocols(
node: WakuNode, conf: WakuNodeConf, nodeKey: crypto.PrivateKey node: WakuNode, conf: WakuNodeConf, nodeKey: crypto.PrivateKey
): Future[Result[void, string]] {.async.} = ): Future[Result[void, string]] {.async.} =
@ -169,11 +179,8 @@ proc setupProtocols(
peerExchangeHandler = some(handlePeerExchange) peerExchangeHandler = some(handlePeerExchange)
var autoShards: seq[RelayShard] let autoShards = node.getAutoshards(conf.contentTopics).valueOr:
for contentTopic in conf.contentTopics: return err("Could not get autoshards: " & error)
let shard = node.wakuSharding.getShard(contentTopic).valueOr:
return err("Could not parse content topic: " & error)
autoShards.add(shard)
debug "Shards created from content topics", debug "Shards created from content topics",
contentTopics = conf.contentTopics, shards = autoShards contentTopics = conf.contentTopics, shards = autoShards
@ -207,12 +214,9 @@ proc setupProtocols(
protectedShard = shardKey.shard, publicKey = shardKey.key protectedShard = shardKey.shard, publicKey = shardKey.key
node.wakuRelay.addSignedShardsValidator(subscribedProtectedShards, conf.clusterId) node.wakuRelay.addSignedShardsValidator(subscribedProtectedShards, conf.clusterId)
# Enable Rendezvous Discovery protocol when Relay is enabled # Only relay nodes should be rendezvous points.
try: if conf.rendezvous:
await mountRendezvous(node) await node.mountRendezvous()
except CatchableError:
return
err("failed to mount waku rendezvous protocol: " & getCurrentExceptionMsg())
# Keepalive mounted on all nodes # Keepalive mounted on all nodes
try: try:

View File

@ -42,6 +42,7 @@ import
../factory/node_factory, ../factory/node_factory,
../factory/internal_config, ../factory/internal_config,
../factory/external_config, ../factory/external_config,
../factory/app_callbacks,
../waku_enr/multiaddr ../waku_enr/multiaddr
logScope: logScope:
@ -67,6 +68,7 @@ type Waku* = ref object
restServer*: WakuRestServerRef restServer*: WakuRestServerRef
metricsServer*: MetricsHttpServerRef metricsServer*: MetricsHttpServerRef
appCallbacks*: AppCallbacks
proc logConfig(conf: WakuNodeConf) = proc logConfig(conf: WakuNodeConf) =
info "Configuration: Enabled protocols", info "Configuration: Enabled protocols",
@ -146,7 +148,32 @@ proc newCircuitRelay(isRelayClient: bool): Relay =
return RelayClient.new() return RelayClient.new()
return Relay.new() return Relay.new()
proc new*(T: type Waku, confCopy: var WakuNodeConf): Result[Waku, string] = proc setupAppCallbacks(
node: WakuNode, conf: WakuNodeConf, appCallbacks: AppCallbacks
): Result[void, string] =
if appCallbacks.isNil():
info "No external callbacks to be set"
return ok()
if not appCallbacks.relayHandler.isNil():
if node.wakuRelay.isNil():
return err("Cannot configure relayHandler callback without Relay mounted")
let autoShards = node.getAutoshards(conf.contentTopics).valueOr:
return err("Could not get autoshards: " & error)
let confShards =
conf.shards.mapIt(RelayShard(clusterId: conf.clusterId, shardId: uint16(it)))
let shards = confShards & autoShards
for shard in shards:
discard node.wakuRelay.subscribe($shard, appCallbacks.relayHandler)
return ok()
proc new*(
T: type Waku, confCopy: var WakuNodeConf, appCallbacks: AppCallbacks = nil
): Result[Waku, string] =
let rng = crypto.newRng() let rng = crypto.newRng()
logging.setupLog(confCopy.logLevel, confCopy.logFormat) logging.setupLog(confCopy.logLevel, confCopy.logFormat)
@ -225,6 +252,10 @@ proc new*(T: type Waku, confCopy: var WakuNodeConf): Result[Waku, string] =
let node = nodeRes.get() let node = nodeRes.get()
node.setupAppCallbacks(confCopy, appCallbacks).isOkOr:
error "Failed setting up app callbacks", error = error
return err("Failed setting up app callbacks: " & $error)
## Delivery Monitor ## Delivery Monitor
var deliveryMonitor: DeliveryMonitor var deliveryMonitor: DeliveryMonitor
if confCopy.reliabilityEnabled: if confCopy.reliabilityEnabled:
@ -246,6 +277,7 @@ proc new*(T: type Waku, confCopy: var WakuNodeConf): Result[Waku, string] =
key: confCopy.nodekey.get(), key: confCopy.nodekey.get(),
node: node, node: node,
deliveryMonitor: deliveryMonitor, deliveryMonitor: deliveryMonitor,
appCallbacks: appCallbacks,
) )
waku.setupSwitchServices(confCopy, relay, rng) waku.setupSwitchServices(confCopy, relay, rng)
@ -413,16 +445,6 @@ proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async.} =
if not waku[].deliveryMonitor.isNil(): if not waku[].deliveryMonitor.isNil():
waku[].deliveryMonitor.startDeliveryMonitor() waku[].deliveryMonitor.startDeliveryMonitor()
## libp2p DiscoveryManager
waku[].discoveryMngr = DiscoveryManager()
waku[].discoveryMngr.add(
RendezVousInterface.new(rdv = waku[].node.rendezvous, tta = 1.minutes)
)
if not isNil(waku[].node.wakuRelay):
for topic in waku[].node.wakuRelay.getSubscribedTopics():
debug "advertise rendezvous namespace", topic
waku[].discoveryMngr.advertise(RdvNamespace(topic))
return ok() return ok()
# Waku shutdown # Waku shutdown

View File

@ -17,7 +17,6 @@ import
libp2p/protocols/pubsub/rpc/messages, libp2p/protocols/pubsub/rpc/messages,
libp2p/protocols/connectivity/autonat/client, libp2p/protocols/connectivity/autonat/client,
libp2p/protocols/connectivity/autonat/service, libp2p/protocols/connectivity/autonat/service,
libp2p/protocols/rendezvous,
libp2p/builders, libp2p/builders,
libp2p/transports/transport, libp2p/transports/transport,
libp2p/transports/tcptransport, libp2p/transports/tcptransport,
@ -39,6 +38,7 @@ import
../waku_filter_v2/client as filter_client, ../waku_filter_v2/client as filter_client,
../waku_filter_v2/subscriptions as filter_subscriptions, ../waku_filter_v2/subscriptions as filter_subscriptions,
../waku_metadata, ../waku_metadata,
../waku_rendezvous/protocol,
../waku_lightpush/client as lightpush_client, ../waku_lightpush/client as lightpush_client,
../waku_lightpush/common, ../waku_lightpush/common,
../waku_lightpush/protocol, ../waku_lightpush/protocol,
@ -110,7 +110,7 @@ type
enr*: enr.Record enr*: enr.Record
libp2pPing*: Ping libp2pPing*: Ping
rng*: ref rand.HmacDrbgContext rng*: ref rand.HmacDrbgContext
rendezvous*: RendezVous wakuRendezvous*: WakuRendezVous
announcedAddresses*: seq[MultiAddress] announcedAddresses*: seq[MultiAddress]
started*: bool # Indicates that node has started listening started*: bool # Indicates that node has started listening
topicSubscriptionQueue*: AsyncEventQueue[SubscriptionEvent] topicSubscriptionQueue*: AsyncEventQueue[SubscriptionEvent]
@ -1217,22 +1217,16 @@ proc startKeepalive*(node: WakuNode, keepalive = 2.minutes) =
proc mountRendezvous*(node: WakuNode) {.async: (raises: []).} = proc mountRendezvous*(node: WakuNode) {.async: (raises: []).} =
info "mounting rendezvous discovery protocol" info "mounting rendezvous discovery protocol"
try: node.wakuRendezvous = WakuRendezVous.new(node.switch, node.peerManager, node.enr).valueOr:
node.rendezvous = RendezVous.new(node.switch) error "initializing waku rendezvous failed", error = error
except Exception as e:
error "failed to create rendezvous", error = getCurrentExceptionMsg()
return return
if node.started: # Always start discovering peers at startup
try: (await node.wakuRendezvous.initialRequestAll()).isOkOr:
await node.rendezvous.start() error "rendezvous failed initial requests", error = error
except CatchableError:
error "failed to start rendezvous", error = getCurrentExceptionMsg()
try: if node.started:
node.switch.mount(node.rendezvous) await node.wakuRendezvous.start()
except LPError:
error "failed to mount rendezvous", error = getCurrentExceptionMsg()
proc isBindIpWithZeroPort(inputMultiAdd: MultiAddress): bool = proc isBindIpWithZeroPort(inputMultiAdd: MultiAddress): bool =
let inputStr = $inputMultiAdd let inputStr = $inputMultiAdd
@ -1304,6 +1298,9 @@ proc start*(node: WakuNode) {.async.} =
if not node.wakuStoreResume.isNil(): if not node.wakuStoreResume.isNil():
await node.wakuStoreResume.start() await node.wakuStoreResume.start()
if not node.wakuRendezvous.isNil():
await node.wakuRendezvous.start()
## The switch uses this mapper to update peer info addrs ## The switch uses this mapper to update peer info addrs
## with announced addrs after start ## with announced addrs after start
let addressMapper = proc( let addressMapper = proc(
@ -1346,6 +1343,9 @@ proc stop*(node: WakuNode) {.async.} =
if not node.wakuPeerExchange.isNil() and not node.wakuPeerExchange.pxLoopHandle.isNil(): if not node.wakuPeerExchange.isNil() and not node.wakuPeerExchange.pxLoopHandle.isNil():
await node.wakuPeerExchange.pxLoopHandle.cancelAndWait() await node.wakuPeerExchange.pxLoopHandle.cancelAndWait()
if not node.wakuRendezvous.isNil():
await node.wakuRendezvous.stopWait()
node.started = false node.started = false
proc isReady*(node: WakuNode): Future[bool] {.async: (raises: [Exception]).} = proc isReady*(node: WakuNode): Future[bool] {.async: (raises: [Exception]).} =

View File

@ -83,7 +83,7 @@ proc readValue*(
) )
connected = some(reader.readValue(bool)) connected = some(reader.readValue(bool))
else: else:
unrecognizedFieldWarning() unrecognizedFieldWarning(value)
if connected.isNone(): if connected.isNone():
reader.raiseUnexpectedValue("Field `connected` is missing") reader.raiseUnexpectedValue("Field `connected` is missing")
@ -116,7 +116,7 @@ proc readValue*(
reader.raiseUnexpectedField("Multiple `origin` fields found", "WakuPeer") reader.raiseUnexpectedField("Multiple `origin` fields found", "WakuPeer")
origin = some(reader.readValue(PeerOrigin)) origin = some(reader.readValue(PeerOrigin))
else: else:
unrecognizedFieldWarning() unrecognizedFieldWarning(value)
if multiaddr.isNone(): if multiaddr.isNone():
reader.raiseUnexpectedValue("Field `multiaddr` is missing") reader.raiseUnexpectedValue("Field `multiaddr` is missing")
@ -153,7 +153,7 @@ proc readValue*(
) )
contentTopic = some(reader.readValue(string)) contentTopic = some(reader.readValue(string))
else: else:
unrecognizedFieldWarning() unrecognizedFieldWarning(value)
if pubsubTopic.isNone(): if pubsubTopic.isNone():
reader.raiseUnexpectedValue("Field `pubsubTopic` is missing") reader.raiseUnexpectedValue("Field `pubsubTopic` is missing")
@ -185,7 +185,7 @@ proc readValue*(
) )
filterCriteria = some(reader.readValue(seq[FilterTopic])) filterCriteria = some(reader.readValue(seq[FilterTopic]))
else: else:
unrecognizedFieldWarning() unrecognizedFieldWarning(value)
if peerId.isNone(): if peerId.isNone():
reader.raiseUnexpectedValue("Field `peerId` is missing") reader.raiseUnexpectedValue("Field `peerId` is missing")

View File

@ -2,6 +2,7 @@
import chronicles, json_serialization, json_serialization/std/options import chronicles, json_serialization, json_serialization/std/options
import ../../../waku_node, ../serdes import ../../../waku_node, ../serdes
import std/typetraits
#### Types #### Types
@ -47,7 +48,7 @@ proc readValue*(
reader.raiseUnexpectedField("Multiple `enrUri` fields found", "DebugWakuInfo") reader.raiseUnexpectedField("Multiple `enrUri` fields found", "DebugWakuInfo")
enrUri = some(reader.readValue(string)) enrUri = some(reader.readValue(string))
else: else:
unrecognizedFieldWarning() unrecognizedFieldWarning(value)
if listenAddresses.isNone(): if listenAddresses.isNone():
reader.raiseUnexpectedValue("Field `listenAddresses` is missing") reader.raiseUnexpectedValue("Field `listenAddresses` is missing")

View File

@ -187,7 +187,7 @@ proc readValue*(
of "ephemeral": of "ephemeral":
ephemeral = some(reader.readValue(bool)) ephemeral = some(reader.readValue(bool))
else: else:
unrecognizedFieldWarning() unrecognizedFieldWarning(value)
if payload.isNone(): if payload.isNone():
reader.raiseUnexpectedValue("Field `payload` is missing") reader.raiseUnexpectedValue("Field `payload` is missing")
@ -225,7 +225,7 @@ proc readValue*(
of "contentFilters": of "contentFilters":
contentFilters = some(reader.readValue(seq[ContentTopic])) contentFilters = some(reader.readValue(seq[ContentTopic]))
else: else:
unrecognizedFieldWarning() unrecognizedFieldWarning(value)
if contentFilters.isNone(): if contentFilters.isNone():
reader.raiseUnexpectedValue("Field `contentFilters` is missing") reader.raiseUnexpectedValue("Field `contentFilters` is missing")
@ -262,7 +262,7 @@ proc readValue*(
of "requestId": of "requestId":
requestId = some(reader.readValue(string)) requestId = some(reader.readValue(string))
else: else:
unrecognizedFieldWarning() unrecognizedFieldWarning(value)
if requestId.isNone(): if requestId.isNone():
reader.raiseUnexpectedValue("Field `requestId` is missing") reader.raiseUnexpectedValue("Field `requestId` is missing")
@ -296,7 +296,7 @@ proc readValue*(
of "contentFilters": of "contentFilters":
contentFilters = some(reader.readValue(seq[ContentTopic])) contentFilters = some(reader.readValue(seq[ContentTopic]))
else: else:
unrecognizedFieldWarning() unrecognizedFieldWarning(value)
if requestId.isNone(): if requestId.isNone():
reader.raiseUnexpectedValue("Field `requestId` is missing") reader.raiseUnexpectedValue("Field `requestId` is missing")
@ -344,7 +344,7 @@ proc readValue*(
of "contentFilters": of "contentFilters":
contentFilters = some(reader.readValue(seq[ContentTopic])) contentFilters = some(reader.readValue(seq[ContentTopic]))
else: else:
unrecognizedFieldWarning() unrecognizedFieldWarning(value)
if requestId.isNone(): if requestId.isNone():
reader.raiseUnexpectedValue("Field `requestId` is missing") reader.raiseUnexpectedValue("Field `requestId` is missing")
@ -385,7 +385,7 @@ proc readValue*(
of "requestId": of "requestId":
requestId = some(reader.readValue(string)) requestId = some(reader.readValue(string))
else: else:
unrecognizedFieldWarning() unrecognizedFieldWarning(value)
if requestId.isNone(): if requestId.isNone():
reader.raiseUnexpectedValue("Field `requestId` is missing") reader.raiseUnexpectedValue("Field `requestId` is missing")
@ -416,7 +416,7 @@ proc readValue*(
of "statusDesc": of "statusDesc":
statusDesc = some(reader.readValue(string)) statusDesc = some(reader.readValue(string))
else: else:
unrecognizedFieldWarning() unrecognizedFieldWarning(value)
if requestId.isNone(): if requestId.isNone():
reader.raiseUnexpectedValue("Field `requestId` is missing") reader.raiseUnexpectedValue("Field `requestId` is missing")

View File

@ -65,7 +65,7 @@ proc readValue*(
protocolsHealth = some(reader.readValue(seq[ProtocolHealth])) protocolsHealth = some(reader.readValue(seq[ProtocolHealth]))
else: else:
unrecognizedFieldWarning() unrecognizedFieldWarning(value)
if nodeHealth.isNone(): if nodeHealth.isNone():
reader.raiseUnexpectedValue("Field `nodeHealth` is missing") reader.raiseUnexpectedValue("Field `nodeHealth` is missing")

View File

@ -52,7 +52,7 @@ proc readValue*(
of "message": of "message":
message = some(reader.readValue(RelayWakuMessage)) message = some(reader.readValue(RelayWakuMessage))
else: else:
unrecognizedFieldWarning() unrecognizedFieldWarning(value)
if message.isNone(): if message.isNone():
reader.raiseUnexpectedValue("Field `message` is missing") reader.raiseUnexpectedValue("Field `message` is missing")

View File

@ -117,7 +117,7 @@ proc readValue*(
of "ephemeral": of "ephemeral":
ephemeral = some(reader.readValue(bool)) ephemeral = some(reader.readValue(bool))
else: else:
unrecognizedFieldWarning() unrecognizedFieldWarning(value)
if payload.isNone() or isEmptyOrWhitespace(string(payload.get())): if payload.isNone() or isEmptyOrWhitespace(string(payload.get())):
reader.raiseUnexpectedValue("Field `payload` is missing or empty") reader.raiseUnexpectedValue("Field `payload` is missing or empty")

View File

@ -20,12 +20,12 @@ createJsonFlavor RestJson
Json.setWriter JsonWriter, PreferredOutput = string Json.setWriter JsonWriter, PreferredOutput = string
template unrecognizedFieldWarning*() = template unrecognizedFieldWarning*(field: typed) =
# TODO: There should be a different notification mechanism for informing the # TODO: There should be a different notification mechanism for informing the
# caller of a deserialization routine for unexpected fields. # caller of a deserialization routine for unexpected fields.
# The chonicles import in this module should be removed. # The chonicles import in this module should be removed.
debug "JSON field not recognized by the current version of nwaku. Consider upgrading", debug "JSON field not recognized by the current version of nwaku. Consider upgrading",
fieldName, typeName = typetraits.name(typeof value) fieldName, typeName = typetraits.name(typeof field)
type SerdesResult*[T] = Result[T, cstring] type SerdesResult*[T] = Result[T, cstring]

3
waku/waku_rendezvous.nim Normal file
View File

@ -0,0 +1,3 @@
import ./waku_rendezvous/protocol
export protocol

View File

@ -0,0 +1,36 @@
{.push raises: [].}
import std/options, chronos
import ../common/enr, ../waku_enr/capabilities, ../waku_enr/sharding
const DiscoverLimit* = 1000
const DefaultRegistrationTTL* = 60.seconds
const DefaultRegistrationInterval* = 10.seconds
const PeersRequestedCount* = 12
proc computeNamespace*(clusterId: uint16, shard: uint16): string =
var namespace = "rs/"
namespace &= $clusterId
namespace &= '/'
namespace &= $shard
return namespace
proc computeNamespace*(clusterId: uint16, shard: uint16, cap: Capabilities): string =
var namespace = "rs/"
namespace &= $clusterId
namespace &= '/'
namespace &= $shard
namespace &= '/'
namespace &= $cap
return namespace
proc getRelayShards*(enr: enr.Record): Option[RelayShards] =
let typedRecord = enr.toTyped().valueOr:
return none(RelayShards)
return typedRecord.relaySharding()

View File

@ -0,0 +1,267 @@
{.push raises: [].}
import
std/[sugar, options],
results,
chronos,
chronicles,
metrics,
libp2p/protocols/rendezvous,
libp2p/switch,
libp2p/utility
import
../node/peer_manager,
../common/enr,
../waku_enr/capabilities,
../waku_enr/sharding,
../waku_core/peers,
../waku_core/topics,
./common
logScope:
topics = "waku rendezvous"
declarePublicCounter rendezvousPeerFoundTotal,
"total number of peers found via rendezvous"
type WakuRendezVous* = ref object
rendezvous: Rendezvous
peerManager: PeerManager
relayShard: RelayShards
capabilities: seq[Capabilities]
periodicRegistrationFut: Future[void]
proc batchAdvertise*(
self: WakuRendezVous,
namespace: string,
ttl: Duration = DefaultRegistrationTTL,
peers: seq[PeerId],
): Future[Result[void, string]] {.async: (raises: []).} =
## Register with all rendezvous peers under a namespace
# rendezvous.advertise except already opened connections
# must dial first
var futs = collect(newSeq):
for peerId in peers:
self.peerManager.dialPeer(peerId, RendezVousCodec)
let dialCatch = catch:
await allFinished(futs)
if dialCatch.isErr():
return err("batchAdvertise: " & dialCatch.error.msg)
futs = dialCatch.get()
let conns = collect(newSeq):
for fut in futs:
let catchable = catch:
fut.read()
if catchable.isErr():
error "rendezvous dial failed", error = catchable.error.msg
continue
let connOpt = catchable.get()
let conn = connOpt.valueOr:
continue
conn
let advertCatch = catch:
await self.rendezvous.advertise(namespace, ttl, peers)
for conn in conns:
await conn.close()
if advertCatch.isErr():
return err("batchAdvertise: " & advertCatch.error.msg)
return ok()
proc batchRequest*(
self: WakuRendezVous,
namespace: string,
count: int = DiscoverLimit,
peers: seq[PeerId],
): Future[Result[seq[PeerRecord], string]] {.async: (raises: []).} =
## Request all records from all rendezvous peers matching a namespace
# rendezvous.request except already opened connections
# must dial first
var futs = collect(newSeq):
for peerId in peers:
self.peerManager.dialPeer(peerId, RendezVousCodec)
let dialCatch = catch:
await allFinished(futs)
if dialCatch.isErr():
return err("batchRequest: " & dialCatch.error.msg)
futs = dialCatch.get()
let conns = collect(newSeq):
for fut in futs:
let catchable = catch:
fut.read()
if catchable.isErr():
error "rendezvous dial failed", error = catchable.error.msg
continue
let connOpt = catchable.get()
let conn = connOpt.valueOr:
continue
conn
let reqCatch = catch:
await self.rendezvous.request(namespace, count, peers)
for conn in conns:
await conn.close()
if reqCatch.isErr():
return err("batchRequest: " & reqCatch.error.msg)
return ok(reqCatch.get())
proc advertiseAll(
self: WakuRendezVous
): Future[Result[void, string]] {.async: (raises: []).} =
debug "waku rendezvous advertisements started"
let pubsubTopics = self.relayShard.topics()
let futs = collect(newSeq):
for pubsubTopic in pubsubTopics:
# Get a random RDV peer for that shard
let rpi = self.peerManager.selectPeer(RendezVousCodec, some($pubsubTopic)).valueOr:
error "could not get a peer supporting RendezVousCodec"
continue
let namespace = computeNamespace(pubsubTopic.clusterId, pubsubTopic.shardId)
# Advertise yourself on that peer
self.batchAdvertise(namespace, DefaultRegistrationTTL, @[rpi.peerId])
let catchable = catch:
await allFinished(futs)
if catchable.isErr():
return err(catchable.error.msg)
for fut in catchable.get():
if fut.failed():
error "rendezvous advertisement failed", error = fut.error.msg
debug "waku rendezvous advertisements finished"
return ok()
proc initialRequestAll*(
self: WakuRendezVous
): Future[Result[void, string]] {.async: (raises: []).} =
debug "waku rendezvous initial requests started"
let pubsubTopics = self.relayShard.topics()
let futs = collect(newSeq):
for pubsubTopic in pubsubTopics:
let namespace = computeNamespace(pubsubTopic.clusterId, pubsubTopic.shardId)
# Get a random RDV peer for that shard
let rpi = self.peerManager.selectPeer(RendezVousCodec, some($pubsubTopic)).valueOr:
error "could not get a peer supporting RendezVousCodec"
continue
# Ask for peer records for that shard
self.batchRequest(namespace, PeersRequestedCount, @[rpi.peerId])
let catchable = catch:
await allFinished(futs)
if catchable.isErr():
return err(catchable.error.msg)
for fut in catchable.get():
if fut.failed():
error "rendezvous request failed", error = fut.error.msg
elif fut.finished():
let res = fut.value()
let records = res.valueOr:
return err($res.error)
for record in records:
rendezvousPeerFoundTotal.inc()
self.peerManager.addPeer(record)
debug "waku rendezvous initial requests finished"
return ok()
proc periodicRegistration(self: WakuRendezVous) {.async.} =
debug "waku rendezvous periodic registration started",
interval = DefaultRegistrationInterval
# infinite loop
while true:
await sleepAsync(DefaultRegistrationInterval)
(await self.advertiseAll()).isOkOr:
debug "waku rendezvous advertisements failed", error = error
proc new*(
T: type WakuRendezVous, switch: Switch, peerManager: PeerManager, enr: Record
): Result[T, string] {.raises: [].} =
let relayshard = getRelayShards(enr).valueOr:
warn "Using default cluster id 0"
RelayShards(clusterID: 0, shardIds: @[])
let capabilities = enr.getCapabilities()
let rvCatchable = catch:
RendezVous.new(switch = switch, minDuration = DefaultRegistrationTTL)
if rvCatchable.isErr():
return err(rvCatchable.error.msg)
let rv = rvCatchable.get()
let mountCatchable = catch:
switch.mount(rv)
if mountCatchable.isErr():
return err(mountCatchable.error.msg)
var wrv = WakuRendezVous()
wrv.rendezvous = rv
wrv.peerManager = peerManager
wrv.relayshard = relayshard
wrv.capabilities = capabilities
debug "waku rendezvous initialized",
cluster = relayshard.clusterId,
shards = relayshard.shardIds,
capabilities = capabilities
return ok(wrv)
proc start*(self: WakuRendezVous) {.async: (raises: []).} =
# start registering forever
self.periodicRegistrationFut = self.periodicRegistration()
debug "waku rendezvous discovery started"
proc stopWait*(self: WakuRendezVous) {.async: (raises: []).} =
if not self.periodicRegistrationFut.isNil():
await self.periodicRegistrationFut.cancelAndWait()
debug "waku rendezvous discovery stopped"