From a117143ca1322cefd1508b7e022382b8d983415c Mon Sep 17 00:00:00 2001 From: gabrielmer <101006718+gabrielmer@users.noreply.github.com> Date: Wed, 5 Feb 2025 18:16:37 +0200 Subject: [PATCH 01/22] fix: avoid sending relay callbacks if relay is disabled (#3276) --- .../requests/node_lifecycle_request.nim | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/library/waku_thread/inter_thread_communication/requests/node_lifecycle_request.nim b/library/waku_thread/inter_thread_communication/requests/node_lifecycle_request.nim index 087a78d3e..9bd0017ab 100644 --- a/library/waku_thread/inter_thread_communication/requests/node_lifecycle_request.nim +++ b/library/waku_thread/inter_thread_communication/requests/node_lifecycle_request.nim @@ -67,6 +67,11 @@ proc createWaku( formattedString & ". expected type: " & $typeof(confValue) ) + # Don't send relay app callbacks if relay is disabled + if not conf.relay and not appCallbacks.isNil(): + appCallbacks.relayHandler = nil + appCallbacks.topicHealthChangeHandler = nil + let wakuRes = Waku.new(conf, appCallbacks).valueOr: error "waku initialization failed", error = error return err("Failed setting up Waku: " & $error) From 32ba56d77c8b572622bf49b7d17d2294b0780864 Mon Sep 17 00:00:00 2001 From: Ivan FB <128452529+Ivansete-status@users.noreply.github.com> Date: Thu, 6 Feb 2025 17:21:23 +0100 Subject: [PATCH 02/22] chore: refactor filter to react when the remote peer closes the stream (#3281) Better control when the remote peer closes the WakuFilterPushCodec stream. For example, go-waku closes the stream for every received message. On the other hand, js-waku keeps the stream opened. Therefore, we support both scenarios. --- vendor/nim-libp2p | 2 +- waku/incentivization/eligibility_manager.nim | 3 +- waku/node/peer_manager/peer_manager.nim | 36 ++++++++++++++++++++ waku/waku_filter_v2/protocol.nim | 28 ++++----------- 4 files changed, 46 insertions(+), 23 deletions(-) diff --git a/vendor/nim-libp2p b/vendor/nim-libp2p index c5aa3736f..a4f0a638e 160000 --- a/vendor/nim-libp2p +++ b/vendor/nim-libp2p @@ -1 +1 @@ -Subproject commit c5aa3736f96e4d66f6aa653a2351ded74b7d21a9 +Subproject commit a4f0a638e718f05ecec01ae3a6ad2838714e7e40 diff --git a/waku/incentivization/eligibility_manager.nim b/waku/incentivization/eligibility_manager.nim index 3343f7186..da8280da3 100644 --- a/waku/incentivization/eligibility_manager.nim +++ b/waku/incentivization/eligibility_manager.nim @@ -13,7 +13,8 @@ type EligibilityManager* = ref object # FIXME: make web3 private? proc init*( T: type EligibilityManager, ethClient: string ): Future[EligibilityManager] {.async.} = - return EligibilityManager(web3: await newWeb3(ethClient), seenTxIds: initHashSet[TxHash]()) + return + EligibilityManager(web3: await newWeb3(ethClient), seenTxIds: initHashSet[TxHash]()) # TODO: handle error if web3 instance is not established # Clean up the web3 instance diff --git a/waku/node/peer_manager/peer_manager.nim b/waku/node/peer_manager/peer_manager.nim index 6894f5578..ba04b6b00 100644 --- a/waku/node/peer_manager/peer_manager.nim +++ b/waku/node/peer_manager/peer_manager.nim @@ -518,6 +518,42 @@ proc connectedPeers*( return (inPeers, outPeers) +proc getStreamByPeerIdAndProtocol*( + pm: PeerManager, peerId: PeerId, protocol: string +): Future[Result[Connection, string]] {.async.} = + ## Establishes a new stream to the given peer and protocol or returns the existing stream, if any. + ## Notice that the "Connection" type represents a stream within a transport connection + ## (we will need to adapt this term.) + + let peerIdsMuxers: Table[PeerId, seq[Muxer]] = pm.switch.connManager.getConnections() + if not peerIdsMuxers.contains(peerId): + return err("peerId not found in connManager: " & $peerId) + + let muxers = peerIdsMuxers[peerId] + + var streams = newSeq[Connection](0) + for m in muxers: + for s in m.getStreams(): + ## getStreams is defined in nim-libp2p + streams.add(s) + + ## Try to get the opened streams for the given protocol + let streamsOfInterest = streams.filterIt( + it.protocol == protocol and not LPStream(it).isClosed and + not LPStream(it).isClosedRemotely + ) + + if streamsOfInterest.len > 0: + ## In theory there should be one stream per protocol. Then we just pick up the 1st + return ok(streamsOfInterest[0]) + + ## There isn't still a stream. Let's dial to create one + let streamRes = await pm.dialPeer(peerId, protocol) + if streamRes.isNone(): + return err("getStreamByPeerIdProto no connection to peer: " & $peerId) + + return ok(streamRes.get()) + proc connectToRelayPeers*(pm: PeerManager) {.async.} = var (inRelayPeers, outRelayPeers) = pm.connectedPeers(WakuRelayCodec) let totalRelayPeers = inRelayPeers.len + outRelayPeers.len diff --git a/waku/waku_filter_v2/protocol.nim b/waku/waku_filter_v2/protocol.nim index 22504488e..d8b79ab67 100644 --- a/waku/waku_filter_v2/protocol.nim +++ b/waku/waku_filter_v2/protocol.nim @@ -172,29 +172,15 @@ proc pushToPeer( ): Future[Result[void, string]] {.async.} = debug "pushing message to subscribed peer", peerId = shortLog(peerId) - if not wf.peerManager.wakuPeerStore.hasPeer(peerId, WakuFilterPushCodec): - # Check that peer has not been removed from peer store - error "no addresses for peer", peerId = shortLog(peerId) - return err("no addresses for peer: " & $peerId) + let stream = ( + await wf.peerManager.getStreamByPeerIdAndProtocol(peerId, WakuFilterPushCodec) + ).valueOr: + error "pushToPeer failed", error + return err("pushToPeer failed: " & $error) - let conn = - if wf.peerConnections.contains(peerId): - wf.peerConnections[peerId] - else: - ## we never pushed a message before, let's dial then - let connRes = await wf.peerManager.dialPeer(peerId, WakuFilterPushCodec) - if connRes.isNone(): - ## We do not remove this peer, but allow the underlying peer manager - ## to do so if it is deemed necessary - error "pushToPeer no connection to peer", peerId = shortLog(peerId) - return err("pushToPeer no connection to peer: " & shortLog(peerId)) + await stream.writeLp(buffer) - let newConn = connRes.get() - wf.peerConnections[peerId] = newConn - newConn - - await conn.writeLp(buffer) - debug "published successful", peerId = shortLog(peerId), conn + debug "published successful", peerId = shortLog(peerId), stream waku_service_network_bytes.inc( amount = buffer.len().int64, labelValues = [WakuFilterPushCodec, "out"] ) From f65bea0f8edb3c9dcacfc91615e796ad57ae6674 Mon Sep 17 00:00:00 2001 From: Ivan FB <128452529+Ivansete-status@users.noreply.github.com> Date: Thu, 6 Feb 2025 17:44:12 +0100 Subject: [PATCH 03/22] Revert "chore: waku_archive add protection against queries longer than 24h" (#3278) This reverts commit 401402368d9075f93692d180cb30156785eed5a8. --- tests/node/test_wakunode_legacy_store.nim | 65 ++++++------- tests/node/test_wakunode_store.nim | 91 ++++++++++--------- tests/waku_archive/test_waku_archive.nim | 43 +++------ .../test_rln_group_manager_onchain.nim | 1 + waku/waku_archive/archive.nim | 30 ------ waku/waku_store_sync.nim | 4 +- 6 files changed, 99 insertions(+), 135 deletions(-) diff --git a/tests/node/test_wakunode_legacy_store.nim b/tests/node/test_wakunode_legacy_store.nim index b52dc6e6e..5b0409d86 100644 --- a/tests/node/test_wakunode_legacy_store.nim +++ b/tests/node/test_wakunode_legacy_store.nim @@ -46,16 +46,16 @@ suite "Waku Store - End to End - Sorted Archive": let timeOrigin = now() archiveMessages = @[ - fakeWakuMessage(@[byte 00], ts = ts(-90, timeOrigin)), - fakeWakuMessage(@[byte 01], ts = ts(-80, timeOrigin)), - fakeWakuMessage(@[byte 02], ts = ts(-70, timeOrigin)), - fakeWakuMessage(@[byte 03], ts = ts(-60, timeOrigin)), - fakeWakuMessage(@[byte 04], ts = ts(-50, timeOrigin)), - fakeWakuMessage(@[byte 05], ts = ts(-40, timeOrigin)), - fakeWakuMessage(@[byte 06], ts = ts(-30, timeOrigin)), - fakeWakuMessage(@[byte 07], ts = ts(-20, timeOrigin)), - fakeWakuMessage(@[byte 08], ts = ts(-10, timeOrigin)), - fakeWakuMessage(@[byte 09], ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 00], ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 01], ts = ts(10, timeOrigin)), + fakeWakuMessage(@[byte 02], ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 03], ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 04], ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 05], ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 06], ts = ts(60, timeOrigin)), + fakeWakuMessage(@[byte 07], ts = ts(70, timeOrigin)), + fakeWakuMessage(@[byte 08], ts = ts(80, timeOrigin)), + fakeWakuMessage(@[byte 09], ts = ts(90, timeOrigin)), ] historyQuery = HistoryQuery( @@ -657,23 +657,23 @@ suite "Waku Store - End to End - Archive with Multiple Topics": pageSize: 5, ) - let timeOrigin = now() - 90 + let timeOrigin = now() originTs = proc(offset = 0): Timestamp {.gcsafe, raises: [].} = ts(offset, timeOrigin) archiveMessages = @[ - fakeWakuMessage(@[byte 00], ts = originTs(-90), contentTopic = contentTopic), - fakeWakuMessage(@[byte 01], ts = originTs(-80), contentTopic = contentTopicB), - fakeWakuMessage(@[byte 02], ts = originTs(-70), contentTopic = contentTopicC), - fakeWakuMessage(@[byte 03], ts = originTs(-60), contentTopic = contentTopic), - fakeWakuMessage(@[byte 04], ts = originTs(-50), contentTopic = contentTopicB), - fakeWakuMessage(@[byte 05], ts = originTs(-40), contentTopic = contentTopicC), - fakeWakuMessage(@[byte 06], ts = originTs(-30), contentTopic = contentTopic), - fakeWakuMessage(@[byte 07], ts = originTs(-20), contentTopic = contentTopicB), - fakeWakuMessage(@[byte 08], ts = originTs(-10), contentTopic = contentTopicC), + fakeWakuMessage(@[byte 00], ts = originTs(00), contentTopic = contentTopic), + fakeWakuMessage(@[byte 01], ts = originTs(10), contentTopic = contentTopicB), + fakeWakuMessage(@[byte 02], ts = originTs(20), contentTopic = contentTopicC), + fakeWakuMessage(@[byte 03], ts = originTs(30), contentTopic = contentTopic), + fakeWakuMessage(@[byte 04], ts = originTs(40), contentTopic = contentTopicB), + fakeWakuMessage(@[byte 05], ts = originTs(50), contentTopic = contentTopicC), + fakeWakuMessage(@[byte 06], ts = originTs(60), contentTopic = contentTopic), + fakeWakuMessage(@[byte 07], ts = originTs(70), contentTopic = contentTopicB), + fakeWakuMessage(@[byte 08], ts = originTs(80), contentTopic = contentTopicC), fakeWakuMessage( - @[byte 09], ts = originTs(00), contentTopic = contentTopicSpecials + @[byte 09], ts = originTs(90), contentTopic = contentTopicSpecials ), ] @@ -827,9 +827,8 @@ suite "Waku Store - End to End - Archive with Multiple Topics": suite "Validation of Time-based Filtering": asyncTest "Basic Time Filtering": # Given a history query with start and end time - - historyQuery.startTime = some(originTs(-90)) - historyQuery.endTime = some(originTs(-70)) + historyQuery.startTime = some(originTs(20)) + historyQuery.endTime = some(originTs(40)) # When making a history query let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) @@ -837,13 +836,12 @@ suite "Waku Store - End to End - Archive with Multiple Topics": # Then the response contains the messages check: queryResponse.get().messages == - @[archiveMessages[0], archiveMessages[1], archiveMessages[2]] + @[archiveMessages[2], archiveMessages[3], archiveMessages[4]] asyncTest "Only Start Time Specified": # Given a history query with only start time - historyQuery.startTime = some(originTs(-20)) + historyQuery.startTime = some(originTs(20)) historyQuery.endTime = none(Timestamp) - historyQuery.pubsubTopic = none(string) # When making a history query let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) @@ -851,7 +849,12 @@ suite "Waku Store - End to End - Archive with Multiple Topics": # Then the response contains the messages check: queryResponse.get().messages == - @[archiveMessages[7], archiveMessages[8], archiveMessages[9]] + @[ + archiveMessages[2], + archiveMessages[3], + archiveMessages[4], + archiveMessages[5], + ] asyncTest "Only End Time Specified": # Given a history query with only end time @@ -886,8 +889,8 @@ suite "Waku Store - End to End - Archive with Multiple Topics": asyncTest "Time Filtering with Content Filtering": # Given a history query with time and content filtering - historyQuery.startTime = some(originTs(-90)) - historyQuery.endTime = some(originTs(-60)) + historyQuery.startTime = some(originTs(20)) + historyQuery.endTime = some(originTs(60)) historyQuery.contentTopics = @[contentTopicC] # When making a history query @@ -895,7 +898,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics": # Then the response contains the messages check: - queryResponse.get().messages == @[archiveMessages[2]] + queryResponse.get().messages == @[archiveMessages[2], archiveMessages[5]] asyncTest "Messages Outside of Time Range": # Given a history query with a valid time range which does not contain any messages diff --git a/tests/node/test_wakunode_store.nim b/tests/node/test_wakunode_store.nim index 4442fb8fe..49c24c6d8 100644 --- a/tests/node/test_wakunode_store.nim +++ b/tests/node/test_wakunode_store.nim @@ -47,16 +47,16 @@ suite "Waku Store - End to End - Sorted Archive": let timeOrigin = now() let messages = @[ - fakeWakuMessage(@[byte 00], ts = ts(-90, timeOrigin)), - fakeWakuMessage(@[byte 01], ts = ts(-80, timeOrigin)), - fakeWakuMessage(@[byte 02], ts = ts(-70, timeOrigin)), - fakeWakuMessage(@[byte 03], ts = ts(-60, timeOrigin)), - fakeWakuMessage(@[byte 04], ts = ts(-50, timeOrigin)), - fakeWakuMessage(@[byte 05], ts = ts(-40, timeOrigin)), - fakeWakuMessage(@[byte 06], ts = ts(-30, timeOrigin)), - fakeWakuMessage(@[byte 07], ts = ts(-20, timeOrigin)), - fakeWakuMessage(@[byte 08], ts = ts(-10, timeOrigin)), - fakeWakuMessage(@[byte 09], ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 00], ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 01], ts = ts(10, timeOrigin)), + fakeWakuMessage(@[byte 02], ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 03], ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 04], ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 05], ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 06], ts = ts(60, timeOrigin)), + fakeWakuMessage(@[byte 07], ts = ts(70, timeOrigin)), + fakeWakuMessage(@[byte 08], ts = ts(80, timeOrigin)), + fakeWakuMessage(@[byte 09], ts = ts(90, timeOrigin)), ] archiveMessages = messages.mapIt( WakuMessageKeyValue( @@ -909,17 +909,17 @@ suite "Waku Store - End to End - Archive with Multiple Topics": let messages = @[ - fakeWakuMessage(@[byte 00], ts = originTs(-90), contentTopic = contentTopic), - fakeWakuMessage(@[byte 01], ts = originTs(-80), contentTopic = contentTopicB), - fakeWakuMessage(@[byte 02], ts = originTs(-70), contentTopic = contentTopicC), - fakeWakuMessage(@[byte 03], ts = originTs(-60), contentTopic = contentTopic), - fakeWakuMessage(@[byte 04], ts = originTs(-50), contentTopic = contentTopicB), - fakeWakuMessage(@[byte 05], ts = originTs(-40), contentTopic = contentTopicC), - fakeWakuMessage(@[byte 06], ts = originTs(-30), contentTopic = contentTopic), - fakeWakuMessage(@[byte 07], ts = originTs(-20), contentTopic = contentTopicB), - fakeWakuMessage(@[byte 08], ts = originTs(-10), contentTopic = contentTopicC), + fakeWakuMessage(@[byte 00], ts = originTs(00), contentTopic = contentTopic), + fakeWakuMessage(@[byte 01], ts = originTs(10), contentTopic = contentTopicB), + fakeWakuMessage(@[byte 02], ts = originTs(20), contentTopic = contentTopicC), + fakeWakuMessage(@[byte 03], ts = originTs(30), contentTopic = contentTopic), + fakeWakuMessage(@[byte 04], ts = originTs(40), contentTopic = contentTopicB), + fakeWakuMessage(@[byte 05], ts = originTs(50), contentTopic = contentTopicC), + fakeWakuMessage(@[byte 06], ts = originTs(60), contentTopic = contentTopic), + fakeWakuMessage(@[byte 07], ts = originTs(70), contentTopic = contentTopicB), + fakeWakuMessage(@[byte 08], ts = originTs(80), contentTopic = contentTopicC), fakeWakuMessage( - @[byte 09], ts = originTs(00), contentTopic = contentTopicSpecials + @[byte 09], ts = originTs(90), contentTopic = contentTopicSpecials ), ] @@ -1089,9 +1089,21 @@ suite "Waku Store - End to End - Archive with Multiple Topics": suite "Validation of Time-based Filtering": asyncTest "Basic Time Filtering": # Given a history query with start and end time - storeQuery.startTime = some(originTs(-90)) - storeQuery.endTime = some(originTs(-60)) - storeQuery.contentTopics = @[contentTopic, contentTopicB, contentTopicC] + storeQuery.startTime = some(originTs(20)) + storeQuery.endTime = some(originTs(40)) + + # When making a history query + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == + @[archiveMessages[2], archiveMessages[3], archiveMessages[4]] + + asyncTest "Only Start Time Specified": + # Given a history query with only start time + storeQuery.startTime = some(originTs(20)) + storeQuery.endTime = none(Timestamp) # When making a history query let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) @@ -1100,35 +1112,30 @@ suite "Waku Store - End to End - Archive with Multiple Topics": check: queryResponse.get().messages == @[ - archiveMessages[0], - archiveMessages[1], archiveMessages[2], archiveMessages[3], + archiveMessages[4], + archiveMessages[5], ] - asyncTest "Only Start Time Specified": - # Given a history query with only start time - storeQuery.startTime = some(originTs(-40)) - storeQuery.endTime = none(Timestamp) - - # When making a history query - let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) - - # Then the response contains the messages - check: - queryResponse.get().messages == @[archiveMessages[5]] - asyncTest "Only End Time Specified": # Given a history query with only end time storeQuery.startTime = none(Timestamp) - storeQuery.endTime = some(originTs(-80)) + storeQuery.endTime = some(originTs(40)) # When making a history query let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) # Then the response contains no messages check: - queryResponse.get().messages == @[archiveMessages[0], archiveMessages[1]] + queryResponse.get().messages == + @[ + archiveMessages[0], + archiveMessages[1], + archiveMessages[2], + archiveMessages[3], + archiveMessages[4], + ] asyncTest "Invalid Time Range": # Given a history query with invalid time range @@ -1144,8 +1151,8 @@ suite "Waku Store - End to End - Archive with Multiple Topics": asyncTest "Time Filtering with Content Filtering": # Given a history query with time and content filtering - storeQuery.startTime = some(originTs(-60)) - storeQuery.endTime = some(originTs(-20)) + storeQuery.startTime = some(originTs(20)) + storeQuery.endTime = some(originTs(60)) storeQuery.contentTopics = @[contentTopicC] # When making a history query @@ -1153,7 +1160,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics": # Then the response contains the messages check: - queryResponse.get().messages == @[archiveMessages[5]] + queryResponse.get().messages == @[archiveMessages[2], archiveMessages[5]] asyncTest "Messages Outside of Time Range": # Given a history query with a valid time range which does not contain any messages diff --git a/tests/waku_archive/test_waku_archive.nim b/tests/waku_archive/test_waku_archive.nim index 5162d310c..9e1b927e0 100644 --- a/tests/waku_archive/test_waku_archive.nim +++ b/tests/waku_archive/test_waku_archive.nim @@ -491,8 +491,7 @@ procSuite "Waku Archive - find messages": response.messages.mapIt(it.timestamp) == @[ts(30, timeOrigin), ts(50, timeOrigin)] test "handle temporal history query with a zero-size time window": - ## A zero-size window results in an error to the store client. That kind of queries - ## are pointless and we need to rapidly inform about that to the client. + ## A zero-size window results in an empty list of history messages ## Given let req = ArchiveQuery( contentTopics: @[ContentTopic("1")], @@ -504,45 +503,27 @@ procSuite "Waku Archive - find messages": let res = waitFor archiveA.findMessages(req) ## Then - check not res.isOk() + check res.isOk() + + let response = res.tryGet() + check: + response.messages.len == 0 test "handle temporal history query with an invalid time window": - ## A query with an invalid time range should immediately return a query error to the client + ## A history query with an invalid time range results in an empty list of history messages ## Given let req = ArchiveQuery( contentTopics: @[ContentTopic("1")], startTime: some(Timestamp(5)), - endTime: some(Timestamp(4)), + endTime: some(Timestamp(2)), ) ## When let res = waitFor archiveA.findMessages(req) ## Then - check not res.isOk() - - test "time range should be smaller than 24h": - let oneDayRangeNanos = 86_400_000_000_000 - let now = getNowInNanosecondTime() - - var res = waitFor archiveA.findMessages( - ArchiveQuery( - contentTopics: @[ContentTopic("1")], - startTime: some(Timestamp(now - oneDayRangeNanos - 1)), - endTime: some(Timestamp(now)), - ) - ) - - ## It fails if range is a bit bigger than 24h - check not res.isOk() - - res = waitFor archiveA.findMessages( - ArchiveQuery( - contentTopics: @[ContentTopic("1")], - startTime: some(Timestamp(now - oneDayRangeNanos)), - endTime: some(Timestamp(now)), - ) - ) - - ## Ok if range is 24h check res.isOk() + + let response = res.tryGet() + check: + response.messages.len == 0 diff --git a/tests/waku_rln_relay/test_rln_group_manager_onchain.nim b/tests/waku_rln_relay/test_rln_group_manager_onchain.nim index 3d7be7220..0dd143931 100644 --- a/tests/waku_rln_relay/test_rln_group_manager_onchain.nim +++ b/tests/waku_rln_relay/test_rln_group_manager_onchain.nim @@ -128,6 +128,7 @@ suite "Onchain group manager": (await manager.startGroupSync()).isOkOr: raiseAssert $error + asyncTest "startGroupSync: should guard against uninitialized state": (await manager.startGroupSync()).isErrOr: raiseAssert "Expected error when not initialized" diff --git a/waku/waku_archive/archive.nim b/waku/waku_archive/archive.nim index 4c22521c2..4088b42a8 100644 --- a/waku/waku_archive/archive.nim +++ b/waku/waku_archive/archive.nim @@ -163,41 +163,11 @@ proc syncMessageIngress*( return ok() -proc validateTimeRange( - startTime: Option[Timestamp], endTime: Option[Timestamp] -): Result[void, ArchiveError] = - ## Returns ok if the given time range is shorter than one day, and error otherwise. - ## We restrict the maximum allowed time of 24h to prevent excessive big queries. - - let oneDayRangeNanos = 86_400_000_000_000 - let now = getNowInNanosecondTime() - - var startTimeToValidate = now - oneDayRangeNanos - if startTime.isSome(): - startTimeToValidate = startTime.get() - - var endTimeToValidate = now - if endTime.isSome(): - endTimeToValidate = endTime.get() - - if startTimeToValidate >= endTimeToValidate: - return err(ArchiveError.invalidQuery("startTime should be before endTime")) - - if (endTimeToValidate - startTimeToValidate) > oneDayRangeNanos: - return err( - ArchiveError.invalidQuery("time range should be smaller than one day in nanos") - ) - - return ok() - proc findMessages*( self: WakuArchive, query: ArchiveQuery ): Future[ArchiveResult] {.async, gcsafe.} = ## Search the archive to return a single page of messages matching the query criteria - validateTimeRange(query.startTime, query.endTime).isOkOr: - return err(error) - if query.cursor.isSome(): let cursor = query.cursor.get() diff --git a/waku/waku_store_sync.nim b/waku/waku_store_sync.nim index 03c1b33af..06699d9fd 100644 --- a/waku/waku_store_sync.nim +++ b/waku/waku_store_sync.nim @@ -1,6 +1,8 @@ {.push raises: [].} import - ./waku_store_sync/reconciliation, ./waku_store_sync/transfer, ./waku_store_sync/common + ./waku_store_sync/reconciliation, + ./waku_store_sync/transfer, + ./waku_store_sync/common export reconciliation, transfer, common From 93dac1c2c44db24652c2841a125945e4f203a5db Mon Sep 17 00:00:00 2001 From: Prem Chaitanya Prathi Date: Fri, 7 Feb 2025 11:04:48 +0530 Subject: [PATCH 04/22] fix: make light client examples work with sandbox fleet (#3237) --- examples/filter_subscriber.nim | 133 +++++++++++++++++------------ examples/lightpush_publisher.nim | 138 +++++++++++++++++++++---------- 2 files changed, 174 insertions(+), 97 deletions(-) diff --git a/examples/filter_subscriber.nim b/examples/filter_subscriber.nim index 8fb52963a..2216e4a41 100644 --- a/examples/filter_subscriber.nim +++ b/examples/filter_subscriber.nim @@ -1,30 +1,39 @@ -## Example showing how a resource restricted client may -## subscribe to messages without relay +import + std/[tables, sequtils], + stew/byteutils, + stew/shims/net, + chronicles, + chronos, + confutils, + libp2p/crypto/crypto, + eth/keys, + eth/p2p/discoveryv5/enr -import chronicles, chronos, stew/byteutils, results -import waku/[common/logging, node/peer_manager, waku_core, waku_filter_v2/client] +import + waku/[ + common/logging, + node/peer_manager, + waku_core, + waku_node, + waku_enr, + discovery/waku_discv5, + factory/builder, + waku_relay, + waku_filter_v2/client, + ] + +# careful if running pub and sub in the same machine +const wakuPort = 50000 + +const clusterId = 1 +const shardId = @[0'u16] const FilterPeer = - "/ip4/34.16.1.67/tcp/30303/p2p/16Uiu2HAmDCp8XJ9z1ev18zuv8NHekAsjNyezAvmMfFEJkiharitG" - # node-01.gc-us-central1-a.waku.test.status.im on waku.test - FilterPubsubTopic = PubsubTopic("/waku/2/rs/0/0") + "/ip4/64.225.80.192/tcp/30303/p2p/16Uiu2HAmNaeL4p3WEYzC9mgXBmBWSgWjPHRvatZTXnp8Jgv3iKsb" + FilterPubsubTopic = PubsubTopic("/waku/2/rs/1/0") FilterContentTopic = ContentTopic("/examples/1/light-pubsub-example/proto") -proc unsubscribe( - wfc: WakuFilterClient, - filterPeer: RemotePeerInfo, - filterPubsubTopic: PubsubTopic, - filterContentTopic: ContentTopic, -) {.async.} = - notice "unsubscribing from filter" - let unsubscribeRes = - await wfc.unsubscribe(filterPeer, filterPubsubTopic, @[filterContentTopic]) - if unsubscribeRes.isErr: - notice "unsubscribe request failed", err = unsubscribeRes.error - else: - notice "unsubscribe request successful" - proc messagePushHandler( pubsubTopic: PubsubTopic, message: WakuMessage ) {.async, gcsafe.} = @@ -35,22 +44,61 @@ proc messagePushHandler( contentTopic = message.contentTopic, timestamp = message.timestamp -proc maintainSubscription( - wfc: WakuFilterClient, - filterPeer: RemotePeerInfo, - filterPubsubTopic: PubsubTopic, - filterContentTopic: ContentTopic, -) {.async.} = +proc setupAndSubscribe(rng: ref HmacDrbgContext) {.async.} = + # use notice to filter all waku messaging + setupLog(logging.LogLevel.NOTICE, logging.LogFormat.TEXT) + + notice "starting subscriber", wakuPort = wakuPort + let + nodeKey = crypto.PrivateKey.random(Secp256k1, rng[])[] + ip = parseIpAddress("0.0.0.0") + flags = CapabilitiesBitfield.init(relay = true) + + let relayShards = RelayShards.init(clusterId, shardId).valueOr: + error "Relay shards initialization failed", error = error + quit(QuitFailure) + + var enrBuilder = EnrBuilder.init(nodeKey) + enrBuilder.withWakuRelaySharding(relayShards).expect( + "Building ENR with relay sharding failed" + ) + + let recordRes = enrBuilder.build() + let record = + if recordRes.isErr(): + error "failed to create enr record", error = recordRes.error + quit(QuitFailure) + else: + recordRes.get() + + var builder = WakuNodeBuilder.init() + builder.withNodeKey(nodeKey) + builder.withRecord(record) + builder.withNetworkConfigurationDetails(ip, Port(wakuPort)).tryGet() + let node = builder.build().tryGet() + + node.mountMetadata(clusterId).expect("failed to mount waku metadata protocol") + waitFor node.mountFilterClient() + + await node.start() + + node.peerManager.start() + + node.wakuFilterClient.registerPushHandler(messagePushHandler) + + let filterPeer = parsePeerInfo(FilterPeer).get() + while true: notice "maintaining subscription" # First use filter-ping to check if we have an active subscription - let pingRes = await wfc.ping(filterPeer) + let pingRes = await node.wakuFilterClient.ping(filterPeer) if pingRes.isErr(): # No subscription found. Let's subscribe. notice "no subscription found. Sending subscribe request" - let subscribeRes = - await wfc.subscribe(filterPeer, filterPubsubTopic, @[filterContentTopic]) + let subscribeRes = await node.wakuFilterClient.subscribe( + filterPeer, FilterPubsubTopic, @[FilterContentTopic] + ) if subscribeRes.isErr(): notice "subscribe request failed. Quitting.", err = subscribeRes.error @@ -62,28 +110,7 @@ proc maintainSubscription( await sleepAsync(60.seconds) # Subscription maintenance interval -proc setupAndSubscribe(rng: ref HmacDrbgContext) = - let filterPeer = parsePeerInfo(FilterPeer).get() - - setupLog(logging.LogLevel.NOTICE, logging.LogFormat.TEXT) - notice "starting filter subscriber" - - var - switch = newStandardSwitch() - pm = PeerManager.new(switch) - wfc = WakuFilterClient.new(pm, rng) - - # Mount filter client protocol - switch.mount(wfc) - - wfc.registerPushHandler(messagePushHandler) - - # Start maintaining subscription - asyncSpawn maintainSubscription( - wfc, filterPeer, FilterPubsubTopic, FilterContentTopic - ) - when isMainModule: - let rng = newRng() - setupAndSubscribe(rng) + let rng = crypto.newRng() + asyncSpawn setupAndSubscribe(rng) runForever() diff --git a/examples/lightpush_publisher.nim b/examples/lightpush_publisher.nim index 8ef3f4446..0615c1f6b 100644 --- a/examples/lightpush_publisher.nim +++ b/examples/lightpush_publisher.nim @@ -1,57 +1,107 @@ -## Example showing how a resource restricted client may -## use lightpush to publish messages without relay +import + std/[tables, times, sequtils], + stew/byteutils, + stew/shims/net, + chronicles, + results, + chronos, + confutils, + libp2p/crypto/crypto, + eth/keys, + eth/p2p/discoveryv5/enr -import chronicles, chronos, stew/byteutils, results -import waku/[common/logging, node/peer_manager, waku_core, waku_lightpush/client] +import + waku/[ + common/logging, + node/peer_manager, + waku_core, + waku_node, + waku_enr, + discovery/waku_discv5, + factory/builder, + ] + +proc now*(): Timestamp = + getNanosecondTime(getTime().toUnixFloat()) + +# careful if running pub and sub in the same machine +const wakuPort = 60000 + +const clusterId = 1 +const shardId = @[0'u16] const LightpushPeer = - "/ip4/178.128.141.171/tcp/30303/p2p/16Uiu2HAkykgaECHswi3YKJ5dMLbq2kPVCo89fcyTd38UcQD6ej5W" - # node-01.do-ams3.waku.test.status.im on waku.test - LightpushPubsubTopic = PubsubTopic("/waku/2/rs/0/0") + "/ip4/64.225.80.192/tcp/30303/p2p/16Uiu2HAmNaeL4p3WEYzC9mgXBmBWSgWjPHRvatZTXnp8Jgv3iKsb" + LightpushPubsubTopic = PubsubTopic("/waku/2/rs/1/0") LightpushContentTopic = ContentTopic("/examples/1/light-pubsub-example/proto") -proc publishMessages( - wlc: WakuLightpushClient, - lightpushPeer: RemotePeerInfo, - lightpushPubsubTopic: PubsubTopic, - lightpushContentTopic: ContentTopic, -) {.async.} = - while true: - let text = "hi there i'm a lightpush publisher" - let message = WakuMessage( - payload: toBytes(text), # content of the message - contentTopic: lightpushContentTopic, # content topic to publish to - ephemeral: true, # tell store nodes to not store it - timestamp: getNowInNanosecondTime(), - ) # current timestamp - - let wlpRes = await wlc.publish(lightpushPubsubTopic, message, lightpushPeer) - - if wlpRes.isOk(): - notice "published message using lightpush", message = message - else: - notice "failed to publish message using lightpush", err = wlpRes.error() - - await sleepAsync(5000) # Publish every 5 seconds - -proc setupAndPublish(rng: ref HmacDrbgContext) = - let lightpushPeer = parsePeerInfo(LightpushPeer).get() - +proc setupAndPublish(rng: ref HmacDrbgContext) {.async.} = + # use notice to filter all waku messaging setupLog(logging.LogLevel.NOTICE, logging.LogFormat.TEXT) - notice "starting lightpush publisher" - var - switch = newStandardSwitch() - pm = PeerManager.new(switch) - wlc = WakuLightpushClient.new(pm, rng) + notice "starting publisher", wakuPort = wakuPort + let + nodeKey = crypto.PrivateKey.random(Secp256k1, rng[]).get() + ip = parseIpAddress("0.0.0.0") + flags = CapabilitiesBitfield.init(relay = true) - # Start maintaining subscription - asyncSpawn publishMessages( - wlc, lightpushPeer, LightpushPubsubTopic, LightpushContentTopic + let relayShards = RelayShards.init(clusterId, shardId).valueOr: + error "Relay shards initialization failed", error = error + quit(QuitFailure) + + var enrBuilder = EnrBuilder.init(nodeKey) + enrBuilder.withWakuRelaySharding(relayShards).expect( + "Building ENR with relay sharding failed" ) + let recordRes = enrBuilder.build() + let record = + if recordRes.isErr(): + error "failed to create enr record", error = recordRes.error + quit(QuitFailure) + else: + recordRes.get() + + var builder = WakuNodeBuilder.init() + builder.withNodeKey(nodeKey) + builder.withRecord(record) + builder.withNetworkConfigurationDetails(ip, Port(wakuPort)).tryGet() + let node = builder.build().tryGet() + + node.mountMetadata(clusterId).expect("failed to mount waku metadata protocol") + node.mountLightPushClient() + + await node.start() + node.peerManager.start() + + notice "publisher service started" + while true: + let text = "hi there i'm a publisher" + let message = WakuMessage( + payload: toBytes(text), # content of the message + contentTopic: LightpushContentTopic, # content topic to publish to + ephemeral: true, # tell store nodes to not store it + timestamp: now(), + ) # current timestamp + + let lightpushPeer = parsePeerInfo(LightpushPeer).get() + + let res = + await node.lightpushPublish(some(LightpushPubsubTopic), message, lightpushPeer) + + if res.isOk: + notice "published message", + text = text, + timestamp = message.timestamp, + psTopic = LightpushPubsubTopic, + contentTopic = LightpushContentTopic + else: + error "failed to publish message", error = res.error + + await sleepAsync(5000) + when isMainModule: - let rng = newRng() - setupAndPublish(rng) + let rng = crypto.newRng() + asyncSpawn setupAndPublish(rng) runForever() From 34442390e9acc50e640b208402a21a49540a1c7a Mon Sep 17 00:00:00 2001 From: Ivan FB <128452529+Ivansete-status@users.noreply.github.com> Date: Fri, 7 Feb 2025 20:23:31 +0100 Subject: [PATCH 05/22] chore: dbconn truncate possible too long error messages (#3283) * also: dbconn restrict the max metric label value to 128 --- waku/common/databases/db_postgres/dbconn.nim | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/waku/common/databases/db_postgres/dbconn.nim b/waku/common/databases/db_postgres/dbconn.nim index 84584a10d..287ed4e8d 100644 --- a/waku/common/databases/db_postgres/dbconn.nim +++ b/waku/common/databases/db_postgres/dbconn.nim @@ -49,7 +49,9 @@ proc check(db: DbConn): Result[void, string] = return err("exception in check: " & getCurrentExceptionMsg()) if message.len > 0: - return err($message) + let truncatedErr = message[0 .. 80] + ## libpq sometimes gives extremely long error messages + return err(truncatedErr) return ok() @@ -249,7 +251,7 @@ proc dbConnQuery*( ## remove everything between ' or " all possible sequence of numbers. e.g. rm partition partition var querySummary = cleanedQuery.replace(re2("""(['"]).*?\\1"""), "") querySummary = querySummary.replace(re2"\d+", "") - querySummary = "query_tag_" & querySummary[0 ..< min(querySummary.len, 200)] + querySummary = "query_tag_" & querySummary[0 ..< min(querySummary.len, 128)] var queryStartTime = getTime().toUnixFloat() @@ -300,8 +302,9 @@ proc dbConnQueryPrepared*( error "error in dbConnQueryPrepared", error = $error return err("error in dbConnQueryPrepared calling sendQuery: " & $error) + let stmtNameSummary = stmtName[0 ..< min(stmtName.len, 128)] let sendDuration = getTime().toUnixFloat() - queryStartTime - query_time_secs.set(sendDuration, [stmtName, "sendToDBQuery"]) + query_time_secs.set(sendDuration, [stmtNameSummary, "sendToDBQuery"]) queryStartTime = getTime().toUnixFloat() @@ -309,9 +312,9 @@ proc dbConnQueryPrepared*( return err("error in dbConnQueryPrepared calling waitQueryToFinish: " & $error) let waitDuration = getTime().toUnixFloat() - queryStartTime - query_time_secs.set(waitDuration, [stmtName, "waitFinish"]) + query_time_secs.set(waitDuration, [stmtNameSummary, "waitFinish"]) - query_count.inc(labelValues = [stmtName]) + query_count.inc(labelValues = [stmtNameSummary]) if "insert" notin stmtName.toLower(): debug "dbConnQueryPrepared", From b3e1dc3f4965a17b7da5f73dee5d76fe6bdcfd63 Mon Sep 17 00:00:00 2001 From: Ivan FB <128452529+Ivansete-status@users.noreply.github.com> Date: Mon, 10 Feb 2025 23:30:56 +0100 Subject: [PATCH 06/22] add waku-rlnv2-contract as vendor dependency (#3289) The waku-rlnv2-contract commit (a576a89) that is being added is the one that generated the currently deployed waku network --- .gitmodules | 5 +++++ vendor/waku-rlnv2-contract | 1 + 2 files changed, 6 insertions(+) create mode 160000 vendor/waku-rlnv2-contract diff --git a/.gitmodules b/.gitmodules index ad8ba817f..bde56a76e 100644 --- a/.gitmodules +++ b/.gitmodules @@ -189,3 +189,8 @@ url = https://github.com/vacp2p/nim-ngtcp2.git ignore = untracked branch = master +[submodule "vendor/waku-rlnv2-contract"] + path = vendor/waku-rlnv2-contract + url = https://github.com/waku-org/waku-rlnv2-contract.git + ignore = untracked + branch = master diff --git a/vendor/waku-rlnv2-contract b/vendor/waku-rlnv2-contract new file mode 160000 index 000000000..a576a8949 --- /dev/null +++ b/vendor/waku-rlnv2-contract @@ -0,0 +1 @@ +Subproject commit a576a8949ca20e310f2fbb4ec0bd05a57ac3045f From 906360566944653e891914e0b41a6dbcb1661a10 Mon Sep 17 00:00:00 2001 From: gabrielmer <101006718+gabrielmer@users.noreply.github.com> Date: Wed, 12 Feb 2025 18:35:50 +0200 Subject: [PATCH 07/22] fix: libwaku store request parsing (#3294) --- .../requests/protocols/store_request.nim | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/library/waku_thread/inter_thread_communication/requests/protocols/store_request.nim b/library/waku_thread/inter_thread_communication/requests/protocols/store_request.nim index 59e27dfec..aa4071fcf 100644 --- a/library/waku_thread/inter_thread_communication/requests/protocols/store_request.nim +++ b/library/waku_thread/inter_thread_communication/requests/protocols/store_request.nim @@ -23,9 +23,11 @@ type StoreRequest* = object func fromJsonNode( T: type StoreRequest, jsonContent: JsonNode ): Result[StoreQueryRequest, string] = - let contentTopics = collect(newSeq): - for cTopic in jsonContent["content_topics"].getElems(): - cTopic.getStr() + var contentTopics: seq[string] + if jsonContent.contains("content_topics"): + contentTopics = collect(newSeq): + for cTopic in jsonContent["content_topics"].getElems(): + cTopic.getStr() var msgHashes: seq[WakuMessageHash] if jsonContent.contains("message_hashes"): From 9b55665f419bfd2df198ece9976919daade51897 Mon Sep 17 00:00:00 2001 From: Ivan FB <128452529+Ivansete-status@users.noreply.github.com> Date: Thu, 13 Feb 2025 00:48:36 +0100 Subject: [PATCH 08/22] lightpush enhance log when handling request (#3297) --- waku/waku_lightpush/protocol.nim | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/waku/waku_lightpush/protocol.nim b/waku/waku_lightpush/protocol.nim index b47f6e7ad..2967146db 100644 --- a/waku/waku_lightpush/protocol.nim +++ b/waku/waku_lightpush/protocol.nim @@ -46,10 +46,12 @@ proc handleRequest*( waku_lightpush_messages.inc(labelValues = ["PushRequest"]) notice "handling lightpush request", + my_peer_id = wl.peerManager.switch.peerInfo.peerId, peer_id = peerId, requestId = requestId, pubsubTopic = pubsubTopic, - msg_hash = pubsubTopic.computeMessageHash(message).to0xHex() + msg_hash = pubsubTopic.computeMessageHash(message).to0xHex(), + receivedTime = getNowInNanosecondTime() let handleRes = await wl.pushHandler(peerId, pubsubTopic, message) isSuccess = handleRes.isOk() From 6b00684ad1552c5dc608e871adef2ac7d9492611 Mon Sep 17 00:00:00 2001 From: gabrielmer <101006718+gabrielmer@users.noreply.github.com> Date: Thu, 13 Feb 2025 15:08:32 +0200 Subject: [PATCH 09/22] chore: supporting parallel libwaku requests (#3296) --- library/waku_thread/waku_thread.nim | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/library/waku_thread/waku_thread.nim b/library/waku_thread/waku_thread.nim index 4e8019b08..640389e32 100644 --- a/library/waku_thread/waku_thread.nim +++ b/library/waku_thread/waku_thread.nim @@ -2,12 +2,13 @@ {.pragma: callback, cdecl, raises: [], gcsafe.} {.passc: "-fPIC".} -import std/[options, atomics, os, net] +import std/[options, atomics, os, net, locks] import chronicles, chronos, chronos/threadsync, taskpools/channels_spsc_single, results import waku/factory/waku, ./inter_thread_communication/waku_thread_request, ../ffi_types type WakuContext* = object thread: Thread[(ptr WakuContext)] + lock: Lock reqChannel: ChannelSPSCSingle[ptr WakuThreadRequest] reqSignal: ThreadSignalPtr # to inform The Waku Thread (a.k.a TWT) that a new request is sent @@ -59,6 +60,7 @@ proc createWakuThread*(): Result[ptr WakuContext, string] = return err("couldn't create reqSignal ThreadSignalPtr") ctx.reqReceivedSignal = ThreadSignalPtr.new().valueOr: return err("couldn't create reqReceivedSignal ThreadSignalPtr") + ctx.lock.initLock() ctx.running.store(true) @@ -81,6 +83,7 @@ proc destroyWakuThread*(ctx: ptr WakuContext): Result[void, string] = return err("failed to signal reqSignal on time in destroyWakuThread") joinThread(ctx.thread) + ctx.lock.deinitLock() ?ctx.reqSignal.close() ?ctx.reqReceivedSignal.close() freeShared(ctx) @@ -95,6 +98,14 @@ proc sendRequestToWakuThread*( userData: pointer, ): Result[void, string] = let req = WakuThreadRequest.createShared(reqType, reqContent, callback, userData) + + # This lock is only necessary while we use a SP Channel and while the signalling + # between threads assumes that there aren't concurrent requests. + # Rearchitecting the signaling + migrating to a MP Channel will allow us to receive + # requests concurrently and spare us the need of locks + ctx.lock.acquire() + defer: + ctx.lock.release() ## Sending the request let sentOk = ctx.reqChannel.trySend(req) if not sentOk: From 9bb567eb0e3c6c811ea1f70f9fe87480e5172960 Mon Sep 17 00:00:00 2001 From: Ivan FB <128452529+Ivansete-status@users.noreply.github.com> Date: Fri, 14 Feb 2025 11:14:38 +0100 Subject: [PATCH 10/22] chore: better proof handling in REST (#3286) * better proof handling in REST --- tests/testlib/wakucore.nim | 2 ++ .../test_rln_group_manager_onchain.nim | 1 - tests/wakunode_rest/test_rest_lightpush.nim | 23 +++++++++++++++++++ waku/waku_api/rest/relay/types.nim | 10 ++++++++ waku/waku_api/rest/serdes.nim | 3 +-- waku/waku_store_sync.nim | 4 +--- 6 files changed, 37 insertions(+), 6 deletions(-) diff --git a/tests/testlib/wakucore.nim b/tests/testlib/wakucore.nim index b4daca105..d18a87e7d 100644 --- a/tests/testlib/wakucore.nim +++ b/tests/testlib/wakucore.nim @@ -50,6 +50,7 @@ proc fakeWakuMessage*( meta: string | seq[byte] = newSeq[byte](), ts = now(), ephemeral = false, + proof = newSeq[byte](), ): WakuMessage = var payloadBytes: seq[byte] var metaBytes: seq[byte] @@ -71,4 +72,5 @@ proc fakeWakuMessage*( version: 2, timestamp: ts, ephemeral: ephemeral, + proof: proof, ) diff --git a/tests/waku_rln_relay/test_rln_group_manager_onchain.nim b/tests/waku_rln_relay/test_rln_group_manager_onchain.nim index 0dd143931..3d7be7220 100644 --- a/tests/waku_rln_relay/test_rln_group_manager_onchain.nim +++ b/tests/waku_rln_relay/test_rln_group_manager_onchain.nim @@ -128,7 +128,6 @@ suite "Onchain group manager": (await manager.startGroupSync()).isOkOr: raiseAssert $error - asyncTest "startGroupSync: should guard against uninitialized state": (await manager.startGroupSync()).isErrOr: raiseAssert "Expected error when not initialized" diff --git a/tests/wakunode_rest/test_rest_lightpush.nim b/tests/wakunode_rest/test_rest_lightpush.nim index 1e6ef9e83..2ff0bf26a 100644 --- a/tests/wakunode_rest/test_rest_lightpush.nim +++ b/tests/wakunode_rest/test_rest_lightpush.nim @@ -96,6 +96,29 @@ proc shutdown(self: RestLightPushTest) {.async.} = await allFutures(self.serviceNode.stop(), self.pushNode.stop()) suite "Waku v2 Rest API - lightpush": + asyncTest "Push message with proof": + let restLightPushTest = await RestLightPushTest.init() + + let message: RelayWakuMessage = fakeWakuMessage( + contentTopic = DefaultContentTopic, + payload = toBytes("TEST-1"), + proof = toBytes("proof-test"), + ) + .toRelayWakuMessage() + + check message.proof.isSome() + + let requestBody = + PushRequest(pubsubTopic: some(DefaultPubsubTopic), message: message) + + let response = await restLightPushTest.client.sendPushRequest(body = requestBody) + + ## Validate that the push request failed because the node is not + ## connected to other node but, doesn't fail because of not properly + ## handling the proof message attribute within the REST request. + check: + response.data == "Failed to request a message push: not_published_to_any_peer" + asyncTest "Push message request": # Given let restLightPushTest = await RestLightPushTest.init() diff --git a/waku/waku_api/rest/relay/types.nim b/waku/waku_api/rest/relay/types.nim index 6da0f401e..ca7d1ff05 100644 --- a/waku/waku_api/rest/relay/types.nim +++ b/waku/waku_api/rest/relay/types.nim @@ -17,6 +17,7 @@ type RelayWakuMessage* = object timestamp*: Option[int64] meta*: Option[Base64String] ephemeral*: Option[bool] + proof*: Option[Base64String] type RelayGetMessagesResponse* = seq[RelayWakuMessage] @@ -36,6 +37,7 @@ proc toRelayWakuMessage*(msg: WakuMessage): RelayWakuMessage = else: none(Base64String), ephemeral: some(msg.ephemeral), + proof: some(base64.encode(msg.proof)), ) proc toWakuMessage*(msg: RelayWakuMessage, version = 0): Result[WakuMessage, string] = @@ -45,6 +47,7 @@ proc toWakuMessage*(msg: RelayWakuMessage, version = 0): Result[WakuMessage, str version = uint32(msg.version.get(version)) meta = ?msg.meta.get(Base64String("")).decode() ephemeral = msg.ephemeral.get(false) + proof = ?msg.proof.get(Base64String("")).decode() var timestamp = msg.timestamp.get(0) @@ -59,6 +62,7 @@ proc toWakuMessage*(msg: RelayWakuMessage, version = 0): Result[WakuMessage, str timestamp: timestamp, meta: meta, ephemeral: ephemeral, + proof: proof, ) ) @@ -79,6 +83,8 @@ proc writeValue*( writer.writeField("meta", value.meta.get()) if value.ephemeral.isSome(): writer.writeField("ephemeral", value.ephemeral.get()) + if value.proof.isSome(): + writer.writeField("proof", value.proof.get()) writer.endRecord() proc readValue*( @@ -91,6 +97,7 @@ proc readValue*( timestamp = none(int64) meta = none(Base64String) ephemeral = none(bool) + proof = none(Base64String) var keys = initHashSet[string]() for fieldName in readObjectFields(reader): @@ -116,6 +123,8 @@ proc readValue*( meta = some(reader.readValue(Base64String)) of "ephemeral": ephemeral = some(reader.readValue(bool)) + of "proof": + proof = some(reader.readValue(Base64String)) else: unrecognizedFieldWarning(value) @@ -132,4 +141,5 @@ proc readValue*( timestamp: timestamp, meta: meta, ephemeral: ephemeral, + proof: proof, ) diff --git a/waku/waku_api/rest/serdes.nim b/waku/waku_api/rest/serdes.nim index 2c8ebb4b7..eb6bc1545 100644 --- a/waku/waku_api/rest/serdes.nim +++ b/waku/waku_api/rest/serdes.nim @@ -65,8 +65,7 @@ proc decodeFromJsonBytes*[T]( ) ) except SerializationError: - # TODO: Do better error reporting here - err("Unable to deserialize data") + err("Unable to deserialize data: " & getCurrentExceptionMsg()) proc encodeIntoJsonString*(value: auto): SerdesResult[string] = var encoded: string diff --git a/waku/waku_store_sync.nim b/waku/waku_store_sync.nim index 06699d9fd..03c1b33af 100644 --- a/waku/waku_store_sync.nim +++ b/waku/waku_store_sync.nim @@ -1,8 +1,6 @@ {.push raises: [].} import - ./waku_store_sync/reconciliation, - ./waku_store_sync/transfer, - ./waku_store_sync/common + ./waku_store_sync/reconciliation, ./waku_store_sync/transfer, ./waku_store_sync/common export reconciliation, transfer, common From 8275d70f35688479cf5c24a3e6292ac6f737549e Mon Sep 17 00:00:00 2001 From: gabrielmer <101006718+gabrielmer@users.noreply.github.com> Date: Mon, 17 Feb 2025 18:37:43 +0200 Subject: [PATCH 11/22] fix: libwaku's invalid waku message error handling (#3301) --- library/events/json_message_event.nim | 7 +++++++ library/libwaku.nim | 5 ++--- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/library/events/json_message_event.nim b/library/events/json_message_event.nim index 2fbfb0d92..6f9dafa9f 100644 --- a/library/events/json_message_event.nim +++ b/library/events/json_message_event.nim @@ -20,6 +20,13 @@ func fromJsonNode*( T: type JsonMessage, jsonContent: JsonNode ): Result[JsonMessage, string] = # Visit https://rfc.vac.dev/spec/14/ for further details + + # Check if required fields exist + if not jsonContent.hasKey("payload"): + return err("Missing required field in WakuMessage: payload") + if not jsonContent.hasKey("contentTopic"): + return err("Missing required field in WakuMessage: contentTopic") + ok( JsonMessage( payload: Base64String(jsonContent["payload"].getStr()), diff --git a/library/libwaku.nim b/library/libwaku.nim index 204b73d0d..a2290a60c 100644 --- a/library/libwaku.nim +++ b/library/libwaku.nim @@ -291,18 +291,17 @@ proc waku_relay_publish( checkLibwakuParams(ctx, callback, userData) let jwm = jsonWakuMessage.alloc() + defer: + deallocShared(jwm) var jsonMessage: JsonMessage try: let jsonContent = parseJson($jwm) jsonMessage = JsonMessage.fromJsonNode(jsonContent).valueOr: raise newException(JsonParsingError, $error) except JsonParsingError: - deallocShared(jwm) let msg = fmt"Error parsing json message: {getCurrentExceptionMsg()}" callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData) return RET_ERR - finally: - deallocShared(jwm) let wakuMessage = jsonMessage.toWakuMessage().valueOr: let msg = "Problem building the WakuMessage: " & $error From fb55ed0b70c2b1ec77b1a1e4e3ac36abae263448 Mon Sep 17 00:00:00 2001 From: Sergei Tikhomirov Date: Thu, 20 Feb 2025 16:07:21 +0100 Subject: [PATCH 12/22] feat: incentivization PoC: client-side reputation system basics (#3293) * chore: rename test file for eligibility tests * add reputation manager * add simple boolean reputation with dummy response * set default reputation to true * use reputation indicator term; remove unnecessary updateReputation * use PushResponse in reputation manager * add custom type for reputation * add reputation update from response quality * encode reputation indicators as Option[bool] --- tests/incentivization/test_all.nim | 2 +- ...{test_poc.nim => test_poc_eligibility.nim} | 0 tests/incentivization/test_poc_reputation.nim | 54 +++++++++++++++++++ waku/incentivization/reputation_manager.nim | 48 +++++++++++++++++ 4 files changed, 103 insertions(+), 1 deletion(-) rename tests/incentivization/{test_poc.nim => test_poc_eligibility.nim} (100%) create mode 100644 tests/incentivization/test_poc_reputation.nim create mode 100644 waku/incentivization/reputation_manager.nim diff --git a/tests/incentivization/test_all.nim b/tests/incentivization/test_all.nim index 756db896d..4657ea0d3 100644 --- a/tests/incentivization/test_all.nim +++ b/tests/incentivization/test_all.nim @@ -1 +1 @@ -import ./test_rpc_codec, ./test_poc +import ./test_rpc_codec, ./test_poc_eligibility, ./test_poc_reputation diff --git a/tests/incentivization/test_poc.nim b/tests/incentivization/test_poc_eligibility.nim similarity index 100% rename from tests/incentivization/test_poc.nim rename to tests/incentivization/test_poc_eligibility.nim diff --git a/tests/incentivization/test_poc_reputation.nim b/tests/incentivization/test_poc_reputation.nim new file mode 100644 index 000000000..b35c4b92f --- /dev/null +++ b/tests/incentivization/test_poc_reputation.nim @@ -0,0 +1,54 @@ +import + std/options, + testutils/unittests, + chronos, + web3, + stew/byteutils, + stint, + strutils, + tests/testlib/testasync + +import + waku/[node/peer_manager, waku_core], + waku/incentivization/[rpc, reputation_manager], + waku/waku_lightpush/rpc + +suite "Waku Incentivization PoC Reputation": + var manager {.threadvar.}: ReputationManager + + setup: + manager = ReputationManager.init() + + test "incentivization PoC: reputation: reputation table is empty after initialization": + check manager.reputationOf.len == 0 + + test "incentivization PoC: reputation: set and get reputation": + manager.setReputation("peer1", some(true)) # Encodes GoodRep + check manager.getReputation("peer1") == some(true) + + test "incentivization PoC: reputation: evaluate PushResponse valid": + let validLightpushResponse = + PushResponse(isSuccess: true, info: some("Everything is OK")) + # We expect evaluateResponse to return GoodResponse if isSuccess is true + check evaluateResponse(validLightpushResponse) == GoodResponse + + test "incentivization PoC: reputation: evaluate PushResponse invalid": + let invalidLightpushResponse = PushResponse(isSuccess: false, info: none(string)) + check evaluateResponse(invalidLightpushResponse) == BadResponse + + test "incentivization PoC: reputation: updateReputationFromResponse valid": + let peerId = "peerWithValidResponse" + let validResp = PushResponse(isSuccess: true, info: some("All good")) + manager.updateReputationFromResponse(peerId, validResp) + check manager.getReputation(peerId) == some(true) + + test "incentivization PoC: reputation: updateReputationFromResponse invalid": + let peerId = "peerWithInvalidResponse" + let invalidResp = PushResponse(isSuccess: false, info: none(string)) + manager.updateReputationFromResponse(peerId, invalidResp) + check manager.getReputation(peerId) == some(false) + + test "incentivization PoC: reputation: default is None": + let unknownPeerId = "unknown_peer" + # The peer is not in the table yet + check manager.getReputation(unknownPeerId) == none(bool) diff --git a/waku/incentivization/reputation_manager.nim b/waku/incentivization/reputation_manager.nim new file mode 100644 index 000000000..d5097b711 --- /dev/null +++ b/waku/incentivization/reputation_manager.nim @@ -0,0 +1,48 @@ +import tables, std/options +import waku/waku_lightpush/rpc + +type + PeerId = string + + ResponseQuality* = enum + BadResponse + GoodResponse + + # Encode reputation indicator as Option[bool]: + # some(true) => GoodRep + # some(false) => BadRep + # none(bool) => unknown / not set + ReputationManager* = ref object + reputationOf*: Table[PeerId, Option[bool]] + +proc init*(T: type ReputationManager): ReputationManager = + return ReputationManager(reputationOf: initTable[PeerId, Option[bool]]()) + +proc setReputation*( + manager: var ReputationManager, peer: PeerId, repValue: Option[bool] +) = + manager.reputationOf[peer] = repValue + +proc getReputation*(manager: ReputationManager, peer: PeerId): Option[bool] = + if peer in manager.reputationOf: + result = manager.reputationOf[peer] + else: + result = none(bool) + +# Evaluate the quality of a PushResponse by checking its isSuccess field +proc evaluateResponse*(response: PushResponse): ResponseQuality = + if response.isSuccess: + return GoodResponse + else: + return BadResponse + +# Update reputation of the peer based on the quality of the response +proc updateReputationFromResponse*( + manager: var ReputationManager, peer: PeerId, response: PushResponse +) = + let respQuality = evaluateResponse(response) + case respQuality + of BadResponse: + manager.setReputation(peer, some(false)) # false => BadRep + of GoodResponse: + manager.setReputation(peer, some(true)) # true => GoodRep From a1901a044e019d1dbcb17c3a9bd4f4773796356d Mon Sep 17 00:00:00 2001 From: gabrielmer <101006718+gabrielmer@users.noreply.github.com> Date: Mon, 24 Feb 2025 22:06:48 +0200 Subject: [PATCH 13/22] chore: deprecating dnsDiscovery flag (#3305) --- apps/chat2/chat2.nim | 2 +- apps/chat2/config_chat2.nim | 3 ++- apps/networkmonitor/networkmonitor.nim | 6 +++--- examples/cbindings/waku_example.c | 1 - .../requests/discovery_request.nim | 2 +- waku/discovery/waku_dnsdisc.nim | 4 ++-- waku/factory/external_config.nim | 3 ++- waku/factory/waku.nim | 5 ++--- 8 files changed, 13 insertions(+), 13 deletions(-) diff --git a/apps/chat2/chat2.nim b/apps/chat2/chat2.nim index ae2dc141e..da57991c1 100644 --- a/apps/chat2/chat2.nim +++ b/apps/chat2/chat2.nim @@ -418,7 +418,7 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} = dnsDiscoveryUrl = some( "enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im" ) - elif conf.dnsDiscovery and conf.dnsDiscoveryUrl != "": + elif conf.dnsDiscoveryUrl != "": # No pre-selected fleet. Discover nodes via DNS using user config debug "Discovering nodes using Waku DNS discovery", url = conf.dnsDiscoveryUrl dnsDiscoveryUrl = some(conf.dnsDiscoveryUrl) diff --git a/apps/chat2/config_chat2.nim b/apps/chat2/config_chat2.nim index 5086aef51..4bdc0d586 100644 --- a/apps/chat2/config_chat2.nim +++ b/apps/chat2/config_chat2.nim @@ -157,7 +157,8 @@ type ## DNS discovery config dnsDiscovery* {. - desc: "Enable discovering nodes via DNS", + desc: + "Deprecated, please set dns-discovery-url instead. Enable discovering nodes via DNS", defaultValue: false, name: "dns-discovery" .}: bool diff --git a/apps/networkmonitor/networkmonitor.nim b/apps/networkmonitor/networkmonitor.nim index 515bdae63..2861c85ae 100644 --- a/apps/networkmonitor/networkmonitor.nim +++ b/apps/networkmonitor/networkmonitor.nim @@ -354,11 +354,11 @@ proc crawlNetwork( await sleepAsync(crawlInterval.millis - elapsed.millis) proc retrieveDynamicBootstrapNodes( - dnsDiscovery: bool, dnsDiscoveryUrl: string, dnsDiscoveryNameServers: seq[IpAddress] + dnsDiscoveryUrl: string, dnsDiscoveryNameServers: seq[IpAddress] ): Future[Result[seq[RemotePeerInfo], string]] {.async.} = ## Retrieve dynamic bootstrap nodes (DNS discovery) - if dnsDiscovery and dnsDiscoveryUrl != "": + if dnsDiscoveryUrl != "": # DNS discovery debug "Discovering nodes using Waku DNS discovery", url = dnsDiscoveryUrl @@ -392,7 +392,7 @@ proc getBootstrapFromDiscDns( try: let dnsNameServers = @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")] let dynamicBootstrapNodesRes = - await retrieveDynamicBootstrapNodes(true, conf.dnsDiscoveryUrl, dnsNameServers) + await retrieveDynamicBootstrapNodes(conf.dnsDiscoveryUrl, dnsNameServers) if not dynamicBootstrapNodesRes.isOk(): error("failed discovering peers from DNS") let dynamicBootstrapNodes = dynamicBootstrapNodesRes.get() diff --git a/examples/cbindings/waku_example.c b/examples/cbindings/waku_example.c index d6a0cbc7d..bbb76c862 100644 --- a/examples/cbindings/waku_example.c +++ b/examples/cbindings/waku_example.c @@ -324,7 +324,6 @@ int main(int argc, char** argv) { \"discv5BootstrapNodes\": \ [\"enr:-QESuEB4Dchgjn7gfAvwB00CxTA-nGiyk-aALI-H4dYSZD3rUk7bZHmP8d2U6xDiQ2vZffpo45Jp7zKNdnwDUx6g4o6XAYJpZIJ2NIJpcIRA4VDAim11bHRpYWRkcnO4XAArNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwAtNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQOvD3S3jUNICsrOILlmhENiWAMmMVlAl6-Q8wRB7hidY4N0Y3CCdl-DdWRwgiMohXdha3UyDw\", \"enr:-QEkuEBIkb8q8_mrorHndoXH9t5N6ZfD-jehQCrYeoJDPHqT0l0wyaONa2-piRQsi3oVKAzDShDVeoQhy0uwN1xbZfPZAYJpZIJ2NIJpcIQiQlleim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQKnGt-GSgqPSf3IAPM7bFgTlpczpMZZLF3geeoNNsxzSoN0Y3CCdl-DdWRwgiMohXdha3UyDw\"], \ \"discv5UdpPort\": 9999, \ - \"dnsDiscovery\": true, \ \"dnsDiscoveryUrl\": \"enrtree://AOGYWMBYOUIMOENHXCHILPKY3ZRFEULMFI4DOM442QSZ73TT2A7VI@test.waku.nodes.status.im\", \ \"dnsDiscoveryNameServers\": [\"8.8.8.8\", \"1.0.0.1\"] \ }", cfgNode.host, diff --git a/library/waku_thread/inter_thread_communication/requests/discovery_request.nim b/library/waku_thread/inter_thread_communication/requests/discovery_request.nim index e1da7a68d..078a43030 100644 --- a/library/waku_thread/inter_thread_communication/requests/discovery_request.nim +++ b/library/waku_thread/inter_thread_communication/requests/discovery_request.nim @@ -83,7 +83,7 @@ proc retrieveBootstrapNodes( ): Future[Result[seq[string], string]] {.async.} = let dnsNameServers = @[parseIpAddress(ipDnsServer)] let discoveredPeers: seq[RemotePeerInfo] = ( - await retrieveDynamicBootstrapNodes(true, enrTreeUrl, dnsNameServers) + await retrieveDynamicBootstrapNodes(enrTreeUrl, dnsNameServers) ).valueOr: return err("failed discovering peers from DNS: " & $error) diff --git a/waku/discovery/waku_dnsdisc.nim b/waku/discovery/waku_dnsdisc.nim index b8078feed..10af99b4c 100644 --- a/waku/discovery/waku_dnsdisc.nim +++ b/waku/discovery/waku_dnsdisc.nim @@ -97,11 +97,11 @@ proc init*( return ok(wakuDnsDisc) proc retrieveDynamicBootstrapNodes*( - dnsDiscovery: bool, dnsDiscoveryUrl: string, dnsDiscoveryNameServers: seq[IpAddress] + dnsDiscoveryUrl: string, dnsDiscoveryNameServers: seq[IpAddress] ): Future[Result[seq[RemotePeerInfo], string]] {.async.} = ## Retrieve dynamic bootstrap nodes (DNS discovery) - if dnsDiscovery and dnsDiscoveryUrl != "": + if dnsDiscoveryUrl != "": # DNS discovery debug "Discovering nodes using Waku DNS discovery", url = dnsDiscoveryUrl diff --git a/waku/factory/external_config.nim b/waku/factory/external_config.nim index 3ba24d54d..9bc073426 100644 --- a/waku/factory/external_config.nim +++ b/waku/factory/external_config.nim @@ -570,7 +570,8 @@ with the drawback of consuming some more bandwidth.""", ## DNS discovery config dnsDiscovery* {. - desc: "Enable discovering nodes via DNS", + desc: + "Deprecated, please set dns-discovery-url instead. Enable discovering nodes via DNS", defaultValue: false, name: "dns-discovery" .}: bool diff --git a/waku/factory/waku.nim b/waku/factory/waku.nim index 37b5b3728..546253176 100644 --- a/waku/factory/waku.nim +++ b/waku/factory/waku.nim @@ -388,8 +388,7 @@ proc startDnsDiscoveryRetryLoop(waku: ptr Waku): Future[void] {.async.} = while true: await sleepAsync(30.seconds) let dynamicBootstrapNodesRes = await waku_dnsdisc.retrieveDynamicBootstrapNodes( - waku.conf.dnsDiscovery, waku.conf.dnsDiscoveryUrl, - waku.conf.dnsDiscoveryNameServers, + waku.conf.dnsDiscoveryUrl, waku.conf.dnsDiscoveryNameServers ) if dynamicBootstrapNodesRes.isErr(): error "Retrieving dynamic bootstrap nodes failed", @@ -424,7 +423,7 @@ proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async.} = debug "Retrieve dynamic bootstrap nodes" let dynamicBootstrapNodesRes = await waku_dnsdisc.retrieveDynamicBootstrapNodes( - waku.conf.dnsDiscovery, waku.conf.dnsDiscoveryUrl, waku.conf.dnsDiscoveryNameServers + waku.conf.dnsDiscoveryUrl, waku.conf.dnsDiscoveryNameServers ) if dynamicBootstrapNodesRes.isErr(): From 798b4bb57b355d66a94dc1ad2ab7af1520c5ed07 Mon Sep 17 00:00:00 2001 From: gabrielmer <101006718+gabrielmer@users.noreply.github.com> Date: Wed, 26 Feb 2025 18:04:13 +0200 Subject: [PATCH 14/22] fix: subscribing to RelaydefaultHandler in libwaku (#3308) --- .../requests/protocols/relay_request.nim | 2 ++ waku/node/waku_node.nim | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/library/waku_thread/inter_thread_communication/requests/protocols/relay_request.nim b/library/waku_thread/inter_thread_communication/requests/protocols/relay_request.nim index 6a940e27e..4f7c8ac5e 100644 --- a/library/waku_thread/inter_thread_communication/requests/protocols/relay_request.nim +++ b/library/waku_thread/inter_thread_communication/requests/protocols/relay_request.nim @@ -3,6 +3,7 @@ import chronicles, chronos, stew/byteutils, results import ../../../../../waku/waku_core/message/message, ../../../../../waku/factory/[external_config, validator_signed, waku], + ../../../../../waku/waku_node, ../../../../../waku/waku_core/message, ../../../../../waku/waku_core/time, # Timestamp ../../../../../waku/waku_core/topics/pubsub_topic, @@ -105,6 +106,7 @@ proc process*( case self.operation of SUBSCRIBE: # TO DO: properly perform 'subscribe' + waku.node.registerRelayDefaultHandler($self.pubsubTopic) discard waku.node.wakuRelay.subscribe($self.pubsubTopic, self.relayEventCallback) of UNSUBSCRIBE: # TODO: properly perform 'unsubscribe' diff --git a/waku/node/waku_node.nim b/waku/node/waku_node.nim index 5b4f9900f..7ce23914d 100644 --- a/waku/node/waku_node.nim +++ b/waku/node/waku_node.nim @@ -245,7 +245,7 @@ proc mountStoreSync*( ## Waku relay -proc registerRelayDefaultHandler(node: WakuNode, topic: PubsubTopic) = +proc registerRelayDefaultHandler*(node: WakuNode, topic: PubsubTopic) = if node.wakuRelay.isSubscribed(topic): return From 92f893987fca53bb7009e903b76f53139a778ffd Mon Sep 17 00:00:00 2001 From: NagyZoltanPeter <113987313+NagyZoltanPeter@users.noreply.github.com> Date: Fri, 28 Feb 2025 15:36:50 +0100 Subject: [PATCH 15/22] chore: remove flaky test debug logs from rln and store tests (#3303) * chore: remove flaky test debug logs from rln tests * Remove flaky test logs from store and legacy store tests --- .../test_wakunode_rln_relay.nim | 57 +++---------------- tests/waku_store/test_waku_store.nim | 12 ---- tests/waku_store_legacy/test_waku_store.nim | 14 ----- 3 files changed, 9 insertions(+), 74 deletions(-) diff --git a/tests/waku_rln_relay/test_wakunode_rln_relay.nim b/tests/waku_rln_relay/test_wakunode_rln_relay.nim index 672fb85f1..186343727 100644 --- a/tests/waku_rln_relay/test_wakunode_rln_relay.nim +++ b/tests/waku_rln_relay/test_wakunode_rln_relay.nim @@ -487,7 +487,6 @@ procSuite "WakuNode - RLN relay": await node3.stop() asyncTest "clearNullifierLog: should clear epochs > MaxEpochGap": - debug "tmp debug log analyze flaky test" # Given two nodes let contentTopic = ContentTopic("/waku/2/default-content/proto") @@ -502,18 +501,15 @@ procSuite "WakuNode - RLN relay": await node1.mountRelay(shardSeq) let wakuRlnConfig1 = buildWakuRlnConfig(1, epochSizeSec, "wakunode_10") await node1.mountRlnRelay(wakuRlnConfig1) - debug "tmp debug log analyze flaky test" # Mount rlnrelay in node2 in off-chain mode await node2.mountRelay(@[DefaultRelayShard]) let wakuRlnConfig2 = buildWakuRlnConfig(2, epochSizeSec, "wakunode_11") await node2.mountRlnRelay(wakuRlnConfig2) - debug "tmp debug log analyze flaky test" # Given the two nodes are started and connected waitFor allFutures(node1.start(), node2.start()) await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) - debug "tmp debug log analyze flaky test" # Given some messages var @@ -550,9 +546,7 @@ procSuite "WakuNode - RLN relay": if msg == wm6: completionFut6.complete(true) - debug "tmp debug log analyze flaky test" node2.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler)) - debug "tmp debug log analyze flaky test" # Given all messages have an rln proof and are published by the node 1 let publishSleepDuration: Duration = 5000.millis @@ -561,103 +555,70 @@ procSuite "WakuNode - RLN relay": # Epoch 1 node1.wakuRlnRelay.unsafeAppendRLNProof(wm1, startTime).isOkOr: raiseAssert $error - debug "tmp debug log analyze flaky test" + # Message wm2 is published in the same epoch as wm1, so it'll be considered spam node1.wakuRlnRelay.unsafeAppendRLNProof(wm2, startTime).isOkOr: raiseAssert $error - debug "tmp debug log analyze flaky test" + discard await node1.publish(some(DefaultPubsubTopic), wm1) - debug "tmp debug log analyze flaky test" discard await node1.publish(some(DefaultPubsubTopic), wm2) - debug "tmp debug log analyze flaky test" await sleepAsync(publishSleepDuration) - debug "tmp debug log analyze flaky test" check: await node1.waitForNullifierLog(0) - debug "tmp debug log analyze flaky test" - check: await node2.waitForNullifierLog(1) # Epoch 2 - debug "tmp debug log analyze flaky test" + node1.wakuRlnRelay.unsafeAppendRLNProof(wm3, startTime + float(1 * epochSizeSec)).isOkOr: raiseAssert $error - debug "tmp debug log analyze flaky test" + discard await node1.publish(some(DefaultPubsubTopic), wm3) - debug "tmp debug log analyze flaky test" + await sleepAsync(publishSleepDuration) - debug "tmp debug log analyze flaky test" + check: await node1.waitForNullifierLog(0) - debug "tmp debug log analyze flaky test" - check: await node2.waitForNullifierLog(2) - debug "tmp debug log analyze flaky test" # Epoch 3 node1.wakuRlnRelay.unsafeAppendRLNProof(wm4, startTime + float(2 * epochSizeSec)).isOkOr: raiseAssert $error - debug "tmp debug log analyze flaky test" + discard await node1.publish(some(DefaultPubsubTopic), wm4) - debug "tmp debug log analyze flaky test" await sleepAsync(publishSleepDuration) - debug "tmp debug log analyze flaky test" check: await node1.waitForNullifierLog(0) - debug "tmp debug log analyze flaky test" - check: await node2.waitForNullifierLog(3) - debug "tmp debug log analyze flaky test" # Epoch 4 node1.wakuRlnRelay.unsafeAppendRLNProof(wm5, startTime + float(3 * epochSizeSec)).isOkOr: raiseAssert $error - debug "tmp debug log analyze flaky test" + discard await node1.publish(some(DefaultPubsubTopic), wm5) - debug "tmp debug log analyze flaky test" await sleepAsync(publishSleepDuration) - debug "tmp debug log analyze flaky test" check: await node1.waitForNullifierLog(0) - debug "tmp debug log analyze flaky test" - check: await node2.waitForNullifierLog(4) - debug "tmp debug log analyze flaky test" # Epoch 5 node1.wakuRlnRelay.unsafeAppendRLNProof(wm6, startTime + float(4 * epochSizeSec)).isOkOr: raiseAssert $error - debug "tmp debug log analyze flaky test" + discard await node1.publish(some(DefaultPubsubTopic), wm6) - debug "tmp debug log analyze flaky test" await sleepAsync(publishSleepDuration) - debug "tmp debug log analyze flaky test" check: await node1.waitForNullifierLog(0) - debug "tmp debug log analyze flaky test" - check: await node2.waitForNullifierLog(4) # Then the node 2 should have cleared the nullifier log for epochs > MaxEpochGap # Therefore, with 4 max epochs, the first 4 messages will be published (except wm2, which shares epoch with wm1) check: (await completionFut1.waitForResult()).value() == true - debug "tmp debug log analyze flaky test" - check: (await completionFut2.waitForResult()).isErr() - debug "tmp debug log analyze flaky test" - check: (await completionFut3.waitForResult()).value() == true - debug "tmp debug log analyze flaky test" - check: (await completionFut4.waitForResult()).value() == true - debug "tmp debug log analyze flaky test" - check: (await completionFut5.waitForResult()).value() == true - debug "tmp debug log analyze flaky test" - check: (await completionFut6.waitForResult()).value() == true - debug "tmp debug log analyze flaky test" # Cleanup waitFor allFutures(node1.stop(), node2.stop()) diff --git a/tests/waku_store/test_waku_store.nim b/tests/waku_store/test_waku_store.nim index ab2c888aa..b21c66be0 100644 --- a/tests/waku_store/test_waku_store.nim +++ b/tests/waku_store/test_waku_store.nim @@ -17,15 +17,12 @@ import suite "Waku Store - query handler": asyncTest "history query handler should be called": - info "check point" # log added to track flaky test ## Setup let serverSwitch = newTestSwitch() clientSwitch = newTestSwitch() - info "check point" # log added to track flaky test await allFutures(serverSwitch.start(), clientSwitch.start()) - info "check point" # log added to track flaky test ## Given let serverPeerInfo = serverSwitch.peerInfo.toRemotePeerInfo() @@ -55,9 +52,7 @@ suite "Waku Store - query handler": ) ## When - info "check point" # log added to track flaky test let queryRes = await client.query(req, peer = serverPeerInfo) - info "check point" # log added to track flaky test ## Then check: @@ -74,20 +69,15 @@ suite "Waku Store - query handler": response.messages == @[kv] ## Cleanup - info "check point" # log added to track flaky test await allFutures(serverSwitch.stop(), clientSwitch.stop()) - info "check point" # log added to track flaky test asyncTest "history query handler should be called and return an error": - info "check point" # log added to track flaky test ## Setup let serverSwitch = newTestSwitch() clientSwitch = newTestSwitch() - info "check point" # log added to track flaky test await allFutures(serverSwitch.start(), clientSwitch.start()) - info "check point" # log added to track flaky test ## Given let serverPeerInfo = serverSwitch.peerInfo.toRemotePeerInfo() @@ -126,6 +116,4 @@ suite "Waku Store - query handler": error.kind == ErrorCode.BAD_REQUEST ## Cleanup - info "check point" # log added to track flaky test await allFutures(serverSwitch.stop(), clientSwitch.stop()) - info "check point" # log added to track flaky test diff --git a/tests/waku_store_legacy/test_waku_store.nim b/tests/waku_store_legacy/test_waku_store.nim index 8ff4eaf09..e5e38b208 100644 --- a/tests/waku_store_legacy/test_waku_store.nim +++ b/tests/waku_store_legacy/test_waku_store.nim @@ -15,19 +15,15 @@ import suite "Waku Store - query handler legacy": asyncTest "history query handler should be called": - info "check point" # log added to track flaky test ## Setup let serverSwitch = newTestSwitch() clientSwitch = newTestSwitch() - info "check point" # log added to track flaky test await allFutures(serverSwitch.start(), clientSwitch.start()) - info "check point" # log added to track flaky test ## Given let serverPeerInfo = serverSwitch.peerInfo.toRemotePeerInfo() - info "check point" # log added to track flaky test let msg = fakeWakuMessage(contentTopic = DefaultContentTopic) @@ -50,9 +46,7 @@ suite "Waku Store - query handler legacy": ) ## When - info "check point" # log added to track flaky test let queryRes = await client.query(req, peer = serverPeerInfo) - info "check point" # log added to track flaky test ## Then check: @@ -69,19 +63,15 @@ suite "Waku Store - query handler legacy": response.messages == @[msg] ## Cleanup - info "check point" # log added to track flaky test await allFutures(serverSwitch.stop(), clientSwitch.stop()) - info "check point" # log added to track flaky test asyncTest "history query handler should be called and return an error": - info "check point" # log added to track flaky test ## Setup let serverSwitch = newTestSwitch() clientSwitch = newTestSwitch() await allFutures(serverSwitch.start(), clientSwitch.start()) - info "check point" # log added to track flaky test ## Given let serverPeerInfo = serverSwitch.peerInfo.toRemotePeerInfo() @@ -103,10 +93,8 @@ suite "Waku Store - query handler legacy": requestId: "reqId", ) - info "check point" # log added to track flaky test ## When let queryRes = await client.query(req, peer = serverPeerInfo) - info "check point" # log added to track flaky test ## Then check: @@ -122,6 +110,4 @@ suite "Waku Store - query handler legacy": error.kind == HistoryErrorKind.BAD_REQUEST ## Cleanup - info "check point" # log added to track flaky test await allFutures(serverSwitch.stop(), clientSwitch.stop()) - info "check point" # log added to track flaky test From 57514f5c9e20bfca1a8f233b23c6dcb777db5baf Mon Sep 17 00:00:00 2001 From: Ivan FB <128452529+Ivansete-status@users.noreply.github.com> Date: Fri, 28 Feb 2025 20:28:45 +0100 Subject: [PATCH 16/22] chore: add simple Qt example that uses libwaku (#3310) --- .gitignore | 4 +++ examples/qt/Makefile | 26 ++++++++++++++++ examples/qt/main.qml | 64 ++++++++++++++++++++++++++++++++++++++ examples/qt/main_qt.cpp | 46 +++++++++++++++++++++++++++ examples/qt/qt.pro | 18 +++++++++++ examples/qt/waku_handler.h | 56 +++++++++++++++++++++++++++++++++ 6 files changed, 214 insertions(+) create mode 100644 examples/qt/Makefile create mode 100644 examples/qt/main.qml create mode 100644 examples/qt/main_qt.cpp create mode 100644 examples/qt/qt.pro create mode 100644 examples/qt/waku_handler.h diff --git a/.gitignore b/.gitignore index b46870cc3..69106b9df 100644 --- a/.gitignore +++ b/.gitignore @@ -72,3 +72,7 @@ coverage_html_report/ **/rln_tree/ **/certs/ +# simple qt example +.qmake.stash +main-qt +waku_handler.moc.cpp diff --git a/examples/qt/Makefile b/examples/qt/Makefile new file mode 100644 index 000000000..aa2147edb --- /dev/null +++ b/examples/qt/Makefile @@ -0,0 +1,26 @@ + +## Has been compiled with Qt 5.15.2 + +## If change the main.qml, the qmake should be called +## This may be needed in Ubuntu: sudo apt install qtdeclarative5-dev qtquickcontrols2-5-dev + +CXX = g++ +CXXFLAGS = -g3 -fpermissive -fPIC `pkg-config --cflags Qt5Core Qt5Gui Qt5Qml Qt5Quick` +LDFLAGS = `pkg-config --libs Qt5Core Qt5Gui Qt5Qml Qt5Quick` -lwaku -L../../build/ +MOC = moc + +TARGET = main-qt +SRC = main_qt.cpp +MOC_SRC = waku_handler.moc.cpp +HEADERS = waku_handler.h + +all: $(TARGET) + +$(MOC_SRC): $(HEADERS) + $(MOC) $< -o $@ + +$(TARGET): $(SRC) $(MOC_SRC) + $(CXX) $(CXXFLAGS) -o $(TARGET) $(SRC) $(MOC_SRC) $(LDFLAGS) + +clean: + rm -f $(TARGET) $(MOC_SRC) diff --git a/examples/qt/main.qml b/examples/qt/main.qml new file mode 100644 index 000000000..7ef2dcc55 --- /dev/null +++ b/examples/qt/main.qml @@ -0,0 +1,64 @@ +import QtQuick 2.15 +import QtQuick.Controls 2.15 + +ApplicationWindow { + visible: true + width: 400 + height: 300 + title: "Hello, World!" + + Column { + anchors.centerIn: parent + spacing: 20 + + Label { + text: "Hello, World!" + font.pixelSize: 24 + horizontalAlignment: Text.AlignHCenter + } + } + + Rectangle { + width: parent.width + height: 60 + anchors.bottom: parent.bottom + color: "transparent" + + Row { + anchors.centerIn: parent + spacing: 30 + + Button { + text: "Start Waku Node" + width: 150 + height: 40 + font.pixelSize: 16 + MouseArea { + anchors.fill: parent + cursorShape: Qt.PointingHandCursor + onClicked: wakuHandler.start() + } + background: Rectangle { + color: "#2196F3" + radius: 10 + } + } + + Button { + text: "Stop Waku Node" + width: 150 + height: 40 + font.pixelSize: 16 + MouseArea { + anchors.fill: parent + cursorShape: Qt.PointingHandCursor + onClicked: wakuHandler.stop() + } + background: Rectangle { + color: "#F44336" + radius: 10 + } + } + } + } +} diff --git a/examples/qt/main_qt.cpp b/examples/qt/main_qt.cpp new file mode 100644 index 000000000..f16660c17 --- /dev/null +++ b/examples/qt/main_qt.cpp @@ -0,0 +1,46 @@ +#include +#include +#include + +#include "waku_handler.h" + +void event_handler(int callerRet, const char* msg, size_t len, void* userData) { + printf("Receiving message %s\n", msg); +} + +int main(int argc, char *argv[]) { + QGuiApplication app(argc, argv); + QQmlApplicationEngine engine; + + WakuHandler wakuHandler; + void* userData = nullptr; + + QString jsonConfig = R"( + { + "tcpPort": 60000, + "relay": true, + "logLevel": "TRACE", + "discv5Discovery": true, + "discv5BootstrapNodes": [ + "enr:-QESuEB4Dchgjn7gfAvwB00CxTA-nGiyk-aALI-H4dYSZD3rUk7bZHmP8d2U6xDiQ2vZffpo45Jp7zKNdnwDUx6g4o6XAYJpZIJ2NIJpcIRA4VDAim11bHRpYWRkcnO4XAArNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwAtNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQOvD3S3jUNICsrOILlmhENiWAMmMVlAl6-Q8wRB7hidY4N0Y3CCdl-DdWRwgiMohXdha3UyDw", + "enr:-QEkuEBIkb8q8_mrorHndoXH9t5N6ZfD-jehQCrYeoJDPHqT0l0wyaONa2-piRQsi3oVKAzDShDVeoQhy0uwN1xbZfPZAYJpZIJ2NIJpcIQiQlleim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQKnGt-GSgqPSf3IAPM7bFgTlpczpMZZLF3geeoNNsxzSoN0Y3CCdl-DdWRwgiMohXdha3UyDw" + ], + "discv5UdpPort": 9999, + "dnsDiscovery": true, + "dnsDiscoveryUrl": "enrtree://AOGYWMBYOUIMOENHXCHILPKY3ZRFEULMFI4DOM442QSZ73TT2A7VI@test.waku.nodes.status.im", + "dnsDiscoveryNameServers": ["8.8.8.8", "1.0.0.1"] + } + )"; + + wakuHandler.initialize(jsonConfig, event_handler, userData); + + engine.rootContext()->setContextProperty("wakuHandler", &wakuHandler); + + engine.load(QUrl::fromLocalFile("main.qml")); + + if (engine.rootObjects().isEmpty()) + return -1; + + return app.exec(); +} + diff --git a/examples/qt/qt.pro b/examples/qt/qt.pro new file mode 100644 index 000000000..7e1770d8d --- /dev/null +++ b/examples/qt/qt.pro @@ -0,0 +1,18 @@ +###################################################################### +# Automatically generated by qmake (3.1) Thu Feb 27 21:42:11 2025 +###################################################################### + +TEMPLATE = app +TARGET = qt +INCLUDEPATH += . + +# You can make your code fail to compile if you use deprecated APIs. +# In order to do so, uncomment the following line. +# Please consult the documentation of the deprecated API in order to know +# how to port your code away from it. +# You can also select to disable deprecated APIs only up to a certain version of Qt. +#DEFINES += QT_DISABLE_DEPRECATED_BEFORE=0x060000 # disables all the APIs deprecated before Qt 6.0.0 + +# Input +HEADERS += waku_handler.h +SOURCES += main_qt.cpp waku_hand.moc.cpp waku_handler.moc.cpp diff --git a/examples/qt/waku_handler.h b/examples/qt/waku_handler.h new file mode 100644 index 000000000..161a17c82 --- /dev/null +++ b/examples/qt/waku_handler.h @@ -0,0 +1,56 @@ +#include +#include +#include + +#include "../../library/libwaku.h" + +class WakuHandler : public QObject { + Q_OBJECT +private: + static void event_handler(int callerRet, const char* msg, size_t len, void* userData) { + printf("Receiving message %s\n", msg); + } + + static void on_event_received(int callerRet, const char* msg, size_t len, void* userData) { + if (callerRet == RET_ERR) { + printf("Error: %s\n", msg); + exit(1); + } + else if (callerRet == RET_OK) { + printf("Receiving event: %s\n", msg); + } + } + +public: + WakuHandler() : QObject(), ctx(nullptr) {} + + void initialize(const QString& jsonConfig, WakuCallBack event_handler, void* userData) { + ctx = waku_new(jsonConfig.toUtf8().constData(), WakuCallBack(event_handler), userData); + + waku_set_event_callback(ctx, on_event_received, userData); + qDebug() << "Waku context initialized, ready to start."; + } + + Q_INVOKABLE void start() { + if (ctx) { + waku_start(ctx, event_handler, nullptr); + qDebug() << "Waku start called with event_handler and userData."; + } else { + qDebug() << "Context is not initialized in start."; + } + } + + Q_INVOKABLE void stop() { + if (ctx) { + waku_stop(ctx, event_handler, nullptr); + qDebug() << "Waku stop called with event_handler and userData."; + } else { + qDebug() << "Context is not initialized in stop."; + } + } + + virtual ~WakuHandler() {} + +private: + void* ctx; +}; From f90baa1d2f14a802e19cbdca10312c5c17a62190 Mon Sep 17 00:00:00 2001 From: Ivan FB <128452529+Ivansete-status@users.noreply.github.com> Date: Sun, 2 Mar 2025 22:19:07 +0100 Subject: [PATCH 17/22] chore: more efficient metrics usage (#3298) * Enhance metrics labels * Bound the metrics-label-values in arbitrary queries * The metrics-label-values for prepared statements are kept as they already represent a fixed set --- waku/common/databases/db_postgres/dbconn.nim | 28 ++++++++++--------- .../databases/db_postgres/query_metrics.nim | 24 ++++++++++++++++ waku/waku_store/client.nim | 10 +++---- waku/waku_store/protocol.nim | 2 +- waku/waku_store/protocol_metrics.nim | 11 ++++---- waku/waku_store_legacy/protocol_metrics.nim | 10 +++---- 6 files changed, 56 insertions(+), 29 deletions(-) diff --git a/waku/common/databases/db_postgres/dbconn.nim b/waku/common/databases/db_postgres/dbconn.nim index 287ed4e8d..e711a5ca2 100644 --- a/waku/common/databases/db_postgres/dbconn.nim +++ b/waku/common/databases/db_postgres/dbconn.nim @@ -235,6 +235,13 @@ proc isSecureString(input: string): bool = return true +proc convertQueryToMetricLabel(query: string): string = + ## Simple query categorization. The output label is the one that should be used in query metrics + for snippetQuery, metric in QueriesToMetricMap.pairs(): + if query.contains($snippetQuery): + return $metric + return "unknown_query_metric" + proc dbConnQuery*( dbConnWrapper: DbConnWrapper, query: SqlQuery, @@ -247,11 +254,7 @@ proc dbConnQuery*( dbConnWrapper.futBecomeFree = newFuture[void]("dbConnQuery") - let cleanedQuery = ($query).replace(" ", "").replace("\n", "") - ## remove everything between ' or " all possible sequence of numbers. e.g. rm partition partition - var querySummary = cleanedQuery.replace(re2("""(['"]).*?\\1"""), "") - querySummary = querySummary.replace(re2"\d+", "") - querySummary = "query_tag_" & querySummary[0 ..< min(querySummary.len, 128)] + let metricLabel = convertQueryToMetricLabel($query) var queryStartTime = getTime().toUnixFloat() @@ -262,7 +265,7 @@ proc dbConnQuery*( return err("error in dbConnQuery calling sendQuery: " & $error) let sendDuration = getTime().toUnixFloat() - queryStartTime - query_time_secs.set(sendDuration, [querySummary, "sendToDBQuery"]) + query_time_secs.set(sendDuration, [metricLabel, "sendToDBQuery"]) queryStartTime = getTime().toUnixFloat() @@ -270,16 +273,16 @@ proc dbConnQuery*( return err("error in dbConnQuery calling waitQueryToFinish: " & $error) let waitDuration = getTime().toUnixFloat() - queryStartTime - query_time_secs.set(waitDuration, [querySummary, "waitFinish"]) + query_time_secs.set(waitDuration, [metricLabel, "waitFinish"]) - query_count.inc(labelValues = [querySummary]) + query_count.inc(labelValues = [metricLabel]) if "insert" notin ($query).toLower(): debug "dbConnQuery", requestId, query = $query, args, - querySummary, + metricLabel, waitDbQueryDurationSecs = waitDuration, sendToDBDurationSecs = sendDuration @@ -302,9 +305,8 @@ proc dbConnQueryPrepared*( error "error in dbConnQueryPrepared", error = $error return err("error in dbConnQueryPrepared calling sendQuery: " & $error) - let stmtNameSummary = stmtName[0 ..< min(stmtName.len, 128)] let sendDuration = getTime().toUnixFloat() - queryStartTime - query_time_secs.set(sendDuration, [stmtNameSummary, "sendToDBQuery"]) + query_time_secs.set(sendDuration, [stmtName, "sendToDBQuery"]) queryStartTime = getTime().toUnixFloat() @@ -312,9 +314,9 @@ proc dbConnQueryPrepared*( return err("error in dbConnQueryPrepared calling waitQueryToFinish: " & $error) let waitDuration = getTime().toUnixFloat() - queryStartTime - query_time_secs.set(waitDuration, [stmtNameSummary, "waitFinish"]) + query_time_secs.set(waitDuration, [stmtName, "waitFinish"]) - query_count.inc(labelValues = [stmtNameSummary]) + query_count.inc(labelValues = [stmtName]) if "insert" notin stmtName.toLower(): debug "dbConnQueryPrepared", diff --git a/waku/common/databases/db_postgres/query_metrics.nim b/waku/common/databases/db_postgres/query_metrics.nim index 06209cac0..124d7cdf4 100644 --- a/waku/common/databases/db_postgres/query_metrics.nim +++ b/waku/common/databases/db_postgres/query_metrics.nim @@ -5,3 +5,27 @@ declarePublicGauge query_time_secs, declarePublicCounter query_count, "number of times a query is being performed", labels = ["query"] + +## Maps parts of the possible known queries with a fixed and shorter query label. +const QueriesToMetricMap* = { + "contentTopic IN": "content_topic", + "SELECT version()": "select_version", + "WITH min_timestamp": "messages_lookup", + "SELECT messageHash FROM messages WHERE pubsubTopic = ? AND timestamp >= ? AND timestamp <= ? ORDER BY timestamp DESC, messageHash DESC LIMIT ?": + "msg_hash_no_ctopic", + "AS partition_name": "get_partitions_list", + "SELECT COUNT(1) FROM messages": "count_msgs", + "SELECT messageHash FROM messages WHERE (timestamp, messageHash) < (?,?) AND pubsubTopic = ? AND timestamp >= ? AND timestamp <= ? ORDER BY timestamp DESC, messageHash DESC LIMIT ?": + "msg_hash_with_cursor", + "SELECT pg_database_size(current_database())": "get_database_size", + "DELETE FROM messages_lookup WHERE timestamp": "delete_from_msgs_lookup", + "DROP TABLE messages_": "drop_partition_table", + "ALTER TABLE messages DETACH PARTITION": "detach_partition", + "SELECT pg_size_pretty(pg_total_relation_size(C.oid))": "get_partition_size", + "pg_try_advisory_lock": "try_advisory_lock", + "SELECT messageHash FROM messages ORDER BY timestamp DESC, messageHash DESC LIMIT ?": + "get_all_msg_hash", + "SELECT pg_advisory_unlock": "advisory_unlock", + "ANALYZE messages": "analyze_messages", + "SELECT EXISTS": "check_version_table_exists", +} diff --git a/waku/waku_store/client.nim b/waku/waku_store/client.nim index 61229576a..082120823 100644 --- a/waku/waku_store/client.nim +++ b/waku/waku_store/client.nim @@ -39,11 +39,11 @@ proc sendStoreRequest( return err(StoreError(kind: ErrorCode.BAD_RESPONSE, cause: error.msg)) let res = StoreQueryResponse.decode(buf).valueOr: - waku_store_errors.inc(labelValues = [decodeRpcFailure]) - return err(StoreError(kind: ErrorCode.BAD_RESPONSE, cause: decodeRpcFailure)) + waku_store_errors.inc(labelValues = [DecodeRpcFailure]) + return err(StoreError(kind: ErrorCode.BAD_RESPONSE, cause: DecodeRpcFailure)) if res.statusCode != uint32(StatusCode.SUCCESS): - waku_store_errors.inc(labelValues = [res.statusDesc]) + waku_store_errors.inc(labelValues = [NoSuccessStatusCode]) return err(StoreError.new(res.statusCode, res.statusDesc)) return ok(res) @@ -55,7 +55,7 @@ proc query*( return err(StoreError(kind: ErrorCode.BAD_REQUEST, cause: "invalid cursor")) let connection = (await self.peerManager.dialPeer(peer, WakuStoreCodec)).valueOr: - waku_store_errors.inc(labelValues = [dialFailure]) + waku_store_errors.inc(labelValues = [DialFailure]) return err(StoreError(kind: ErrorCode.PEER_DIAL_FAILURE, address: $peer)) @@ -74,7 +74,7 @@ proc queryToAny*( return err(StoreError(kind: BAD_RESPONSE, cause: "no service store peer connected")) let connection = (await self.peerManager.dialPeer(peer, WakuStoreCodec)).valueOr: - waku_store_errors.inc(labelValues = [dialFailure]) + waku_store_errors.inc(labelValues = [DialFailure]) return err(StoreError(kind: ErrorCode.PEER_DIAL_FAILURE, address: $peer)) diff --git a/waku/waku_store/protocol.nim b/waku/waku_store/protocol.nim index 5f986983e..aa22fe5cd 100644 --- a/waku/waku_store/protocol.nim +++ b/waku/waku_store/protocol.nim @@ -45,7 +45,7 @@ proc handleQueryRequest( let req = StoreQueryRequest.decode(raw_request).valueOr: error "failed to decode rpc", peerId = requestor, error = $error - waku_store_errors.inc(labelValues = [decodeRpcFailure]) + waku_store_errors.inc(labelValues = [DecodeRpcFailure]) res.statusCode = uint32(ErrorCode.BAD_REQUEST) res.statusDesc = "decoding rpc failed: " & $error diff --git a/waku/waku_store/protocol_metrics.nim b/waku/waku_store/protocol_metrics.nim index b077147a6..5d9e69420 100644 --- a/waku/waku_store/protocol_metrics.nim +++ b/waku/waku_store/protocol_metrics.nim @@ -12,8 +12,9 @@ declarePublicGauge waku_store_time_seconds, # Error types (metric label values) const - dialFailure* = "dial_failure" - decodeRpcFailure* = "decode_rpc_failure" - peerNotFoundFailure* = "peer_not_found_failure" - emptyRpcQueryFailure* = "empty_rpc_query_failure" - emptyRpcResponseFailure* = "empty_rpc_response_failure" + DialFailure* = "dial_failure" + DecodeRpcFailure* = "decode_rpc_failure" + PeerNotFoundFailure* = "peer_not_found_failure" + EmptyRpcQueryFailure* = "empty_rpc_query_failure" + EmptyRpcResponseFailure* = "empty_rpc_response_failure" + NoSuccessStatusCode* = "status_code_no_success" diff --git a/waku/waku_store_legacy/protocol_metrics.nim b/waku/waku_store_legacy/protocol_metrics.nim index 53cc71427..c293f09ca 100644 --- a/waku/waku_store_legacy/protocol_metrics.nim +++ b/waku/waku_store_legacy/protocol_metrics.nim @@ -13,8 +13,8 @@ declarePublicGauge waku_legacy_store_time_seconds, # Error types (metric label values) const - dialFailure* = "dial_failure" - decodeRpcFailure* = "decode_rpc_failure" - peerNotFoundFailure* = "peer_not_found_failure" - emptyRpcQueryFailure* = "empty_rpc_query_failure" - emptyRpcResponseFailure* = "empty_rpc_response_failure" + dialFailure* = "dial_failure_legacy" + decodeRpcFailure* = "decode_rpc_failure_legacy" + peerNotFoundFailure* = "peer_not_found_failure_legacy" + emptyRpcQueryFailure* = "empty_rpc_query_failure_legacy" + emptyRpcResponseFailure* = "empty_rpc_response_failure_legacy" From 05b46239bac7457fc61c07b1ea9ecf293635e873 Mon Sep 17 00:00:00 2001 From: gabrielmer <101006718+gabrielmer@users.noreply.github.com> Date: Mon, 3 Mar 2025 11:22:48 +0200 Subject: [PATCH 18/22] fix: using nimMainPrefix in libwaku (#3311) --- library/libwaku.nim | 6 ++++-- waku.nimble | 4 ++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/library/libwaku.nim b/library/libwaku.nim index a2290a60c..258ac27b2 100644 --- a/library/libwaku.nim +++ b/library/libwaku.nim @@ -107,7 +107,7 @@ proc onTopicHealthChange(ctx: ptr WakuContext): TopicHealthChangeHandler = # Every Nim library must have this function called - the name is derived from # the `--nimMainPrefix` command line option -proc NimMain() {.importc.} +proc libwakuNimMain() {.importc.} # To control when the library has been initialized var initialized: Atomic[bool] @@ -122,7 +122,9 @@ if defined(android): proc initializeLibrary() {.exported.} = if not initialized.exchange(true): - NimMain() # Every Nim library needs to call `NimMain` once exactly + ## Every Nim library needs to call `NimMain` once exactly, to initialize the Nim runtime. + ## Being `` the value given in the optional compilation flag --nimMainPrefix:yourprefix + libwakuNimMain() when declared(setupForeignThreadGc): setupForeignThreadGc() when declared(nimGC_setStackBottom): diff --git a/waku.nimble b/waku.nimble index 14c374982..6cf804098 100644 --- a/waku.nimble +++ b/waku.nimble @@ -65,11 +65,11 @@ proc buildLibrary(name: string, srcDir = "./", params = "", `type` = "static") = extra_params &= " " & paramStr(i) if `type` == "static": exec "nim c" & " --out:build/" & name & - ".a --threads:on --app:staticlib --opt:size --noMain --mm:refc --header --undef:metrics --skipParentCfg:on " & + ".a --threads:on --app:staticlib --opt:size --noMain --mm:refc --header --undef:metrics --nimMainPrefix:libwaku --skipParentCfg:on " & extra_params & " " & srcDir & name & ".nim" else: exec "nim c" & " --out:build/" & name & - ".so --threads:on --app:lib --opt:size --noMain --mm:refc --header --undef:metrics --skipParentCfg:on " & + ".so --threads:on --app:lib --opt:size --noMain --mm:refc --header --undef:metrics --nimMainPrefix:libwaku --skipParentCfg:on " & extra_params & " " & srcDir & name & ".nim" proc buildMobileAndroid(srcDir = ".", params = "") = From 564b6466a8a717c539271530d6dd3596e9a53692 Mon Sep 17 00:00:00 2001 From: Ivan FB <128452529+Ivansete-status@users.noreply.github.com> Date: Tue, 4 Mar 2025 19:14:31 +0100 Subject: [PATCH 19/22] chore: better implementation to properly convert database query metrics (#3314) --- tests/waku_archive/test_waku_archive.nim | 16 +++++++++++++++- waku/common/databases/db_postgres/dbconn.nim | 4 ++-- .../databases/db_postgres/query_metrics.nim | 6 +++--- 3 files changed, 20 insertions(+), 6 deletions(-) diff --git a/tests/waku_archive/test_waku_archive.nim b/tests/waku_archive/test_waku_archive.nim index 9e1b927e0..fda1f54e6 100644 --- a/tests/waku_archive/test_waku_archive.nim +++ b/tests/waku_archive/test_waku_archive.nim @@ -5,11 +5,12 @@ import std/[options, sequtils], testutils/unittests, chronos, libp2p/crypto/cryp import waku/[ common/databases/db_sqlite, + common/databases/db_postgres/dbconn, common/paging, waku_core, waku_core/message/digest, waku_archive/driver/sqlite_driver, - waku_archive, + waku_archive ], ../waku_archive/archive_utils, ../testlib/wakucore @@ -109,6 +110,19 @@ suite "Waku Archive - message handling": check: (waitFor driver.getMessagesCount()).tryGet() == 0 + test "convert query to label": + check: + convertQueryToMetricLabel("SELECT version();") == "select_version" + convertQueryToMetricLabel("SELECT messageHash FROM messages WHERE pubsubTopic = ? AND timestamp >= ? AND timestamp <= ? ORDER BY timestamp DESC, messageHash DESC LIMIT ?") == "msg_hash_no_ctopic" + convertQueryToMetricLabel(""" SELECT child.relname AS partition_name + FROM pg_inherits + JOIN pg_class parent ON pg_inherits.inhparent = parent.oid + JOIN pg_class child ON pg_inherits.inhrelid = child.oid + JOIN pg_namespace nmsp_parent ON nmsp_parent.oid = parent.relnamespace + JOIN pg_namespace nmsp_child ON nmsp_child.oid = child.relnamespace + WHERE parent.relname='messages""") == "get_partitions_list" + + procSuite "Waku Archive - find messages": ## Fixtures let timeOrigin = now() diff --git a/waku/common/databases/db_postgres/dbconn.nim b/waku/common/databases/db_postgres/dbconn.nim index e711a5ca2..5aa852446 100644 --- a/waku/common/databases/db_postgres/dbconn.nim +++ b/waku/common/databases/db_postgres/dbconn.nim @@ -235,10 +235,10 @@ proc isSecureString(input: string): bool = return true -proc convertQueryToMetricLabel(query: string): string = +proc convertQueryToMetricLabel*(query: string): string = ## Simple query categorization. The output label is the one that should be used in query metrics for snippetQuery, metric in QueriesToMetricMap.pairs(): - if query.contains($snippetQuery): + if $snippetQuery in query: return $metric return "unknown_query_metric" diff --git a/waku/common/databases/db_postgres/query_metrics.nim b/waku/common/databases/db_postgres/query_metrics.nim index 124d7cdf4..8553763b5 100644 --- a/waku/common/databases/db_postgres/query_metrics.nim +++ b/waku/common/databases/db_postgres/query_metrics.nim @@ -1,4 +1,4 @@ -import metrics +import metrics, tables declarePublicGauge query_time_secs, "query time measured in nanoseconds", labels = ["query", "phase"] @@ -7,7 +7,7 @@ declarePublicCounter query_count, "number of times a query is being performed", labels = ["query"] ## Maps parts of the possible known queries with a fixed and shorter query label. -const QueriesToMetricMap* = { +const QueriesToMetricMap* = toTable({ "contentTopic IN": "content_topic", "SELECT version()": "select_version", "WITH min_timestamp": "messages_lookup", @@ -28,4 +28,4 @@ const QueriesToMetricMap* = { "SELECT pg_advisory_unlock": "advisory_unlock", "ANALYZE messages": "analyze_messages", "SELECT EXISTS": "check_version_table_exists", -} +}) From dcf09dd365151b8ced2fc4a8d7c5f8b71ef9455a Mon Sep 17 00:00:00 2001 From: NagyZoltanPeter <113987313+NagyZoltanPeter@users.noreply.github.com> Date: Wed, 5 Mar 2025 12:07:56 +0100 Subject: [PATCH 20/22] feat: lightpush v3 (#3279) * Separate new lightpush protocol New RPC defined Rename al occurence of old lightpush to legacy lightpush, fix rest tests of lightpush New lightpush protocol added back Setup new lightpush protocol, mounting and rest api for it modified: apps/chat2/chat2.nim modified: tests/node/test_wakunode_lightpush.nim modified: tests/node/test_wakunode_sharding.nim modified: tests/test_peer_manager.nim modified: tests/test_wakunode_lightpush.nim renamed: tests/waku_lightpush/lightpush_utils.nim -> tests/waku_lightpush_legacy/lightpush_utils.nim renamed: tests/waku_lightpush/test_all.nim -> tests/waku_lightpush_legacy/test_all.nim renamed: tests/waku_lightpush/test_client.nim -> tests/waku_lightpush_legacy/test_client.nim renamed: tests/waku_lightpush/test_ratelimit.nim -> tests/waku_lightpush_legacy/test_ratelimit.nim modified: tests/wakunode_rest/test_all.nim renamed: tests/wakunode_rest/test_rest_lightpush.nim -> tests/wakunode_rest/test_rest_lightpush_legacy.nim modified: waku/factory/node_factory.nim modified: waku/node/waku_node.nim modified: waku/waku_api/rest/admin/handlers.nim modified: waku/waku_api/rest/builder.nim new file: waku/waku_api/rest/legacy_lightpush/client.nim new file: waku/waku_api/rest/legacy_lightpush/handlers.nim new file: waku/waku_api/rest/legacy_lightpush/types.nim modified: waku/waku_api/rest/lightpush/client.nim modified: waku/waku_api/rest/lightpush/handlers.nim modified: waku/waku_api/rest/lightpush/types.nim modified: waku/waku_core/codecs.nim modified: waku/waku_lightpush.nim modified: waku/waku_lightpush/callbacks.nim modified: waku/waku_lightpush/client.nim modified: waku/waku_lightpush/common.nim modified: waku/waku_lightpush/protocol.nim modified: waku/waku_lightpush/rpc.nim modified: waku/waku_lightpush/rpc_codec.nim modified: waku/waku_lightpush/self_req_handler.nim new file: waku/waku_lightpush_legacy.nim renamed: waku/waku_lightpush/README.md -> waku/waku_lightpush_legacy/README.md new file: waku/waku_lightpush_legacy/callbacks.nim new file: waku/waku_lightpush_legacy/client.nim new file: waku/waku_lightpush_legacy/common.nim new file: waku/waku_lightpush_legacy/protocol.nim new file: waku/waku_lightpush_legacy/protocol_metrics.nim new file: waku/waku_lightpush_legacy/rpc.nim new file: waku/waku_lightpush_legacy/rpc_codec.nim new file: waku/waku_lightpush_legacy/self_req_handler.nim Adapt to non-invasive libp2p observers cherry pick latest lightpush (v1) changes into legacy lightpush code after rebase to latest master Fix vendor dependencies from origin/master after failed rebase of them Adjust examples, test to new lightpush - keep using of legacy Fixup error code mappings Fix REST admin interface with distinct legacy and new lightpush Fix lightpush v2 tests * Utilize new publishEx interface of pubsub libp2p * Adapt to latest libp2p pubslih design changes. publish returns an outcome as Result error. * Fix review findings * Fix tests, re-added lost one * Fix rebase * Apply suggestions from code review Co-authored-by: Ivan FB <128452529+Ivansete-status@users.noreply.github.com> * Addressing review comments * Fix incentivization tests * Fix build failed on libwaku * Change new lightpush endpoint version to 3 instead of 2. Noticed that old and new lightpush metrics can cause trouble in monitoring dashboards so decided to give new name as v3 for the new lightpush metrics and change legacy ones back - temporarly till old lightpush will be decommissioned * Fixing flaky test with rate limit timing * Fixing logscope of lightpush and legacy lightpush --------- Co-authored-by: Ivan FB <128452529+Ivansete-status@users.noreply.github.com> --- apps/chat2/chat2.nim | 12 +- .../lightpush_publisher.nim | 4 +- .../liteprotocoltester/liteprotocoltester.nim | 2 +- examples/lightpush_publisher.nim | 7 +- .../requests/protocols/lightpush_request.nim | 7 +- .../requests/protocols/relay_request.nim | 11 +- tests/all_tests_waku.nim | 4 +- tests/incentivization/test_poc_reputation.nim | 2 +- tests/node/test_all.nim | 1 + tests/node/test_wakunode_legacy_lightpush.nim | 233 ++++++++++++ tests/node/test_wakunode_lightpush.nim | 89 +++-- tests/node/test_wakunode_sharding.nim | 22 +- tests/test_peer_manager.nim | 10 +- tests/test_waku_enr.nim | 2 +- tests/test_wakunode_lightpush.nim | 58 --- tests/waku_core/test_peers.nim | 2 +- tests/waku_lightpush/lightpush_utils.nim | 4 +- tests/waku_lightpush/test_client.nim | 74 ++-- tests/waku_lightpush/test_ratelimit.nim | 25 +- .../waku_lightpush_legacy/lightpush_utils.nim | 29 ++ tests/waku_lightpush_legacy/test_all.nim | 1 + tests/waku_lightpush_legacy/test_client.nim | 339 ++++++++++++++++++ .../waku_lightpush_legacy/test_ratelimit.nim | 153 ++++++++ tests/wakunode_rest/test_all.nim | 1 + ...ush.nim => test_rest_lightpush_legacy.nim} | 19 +- vendor/nim-libp2p | 2 +- waku/factory/node_factory.nim | 6 +- waku/incentivization/reputation_manager.nim | 2 +- .../delivery_monitor/delivery_monitor.nim | 2 +- waku/node/delivery_monitor/send_monitor.nim | 6 +- waku/node/waku_node.nim | 185 +++++++--- waku/waku_api/rest/admin/handlers.nim | 14 +- waku/waku_api/rest/builder.nim | 8 +- .../waku_api/rest/legacy_lightpush/client.nim | 23 ++ .../rest/legacy_lightpush/handlers.nim | 91 +++++ waku/waku_api/rest/legacy_lightpush/types.nim | 67 ++++ waku/waku_api/rest/lightpush/client.nim | 7 +- waku/waku_api/rest/lightpush/handlers.nim | 56 +-- waku/waku_api/rest/lightpush/types.nim | 55 ++- waku/waku_core.nim | 5 +- waku/waku_core/codecs.nim | 3 +- waku/waku_core/peers.nim | 8 +- waku/waku_lightpush.nim | 4 +- waku/waku_lightpush/callbacks.nim | 30 +- waku/waku_lightpush/client.nim | 95 ++--- waku/waku_lightpush/common.nim | 77 +++- waku/waku_lightpush/protocol.nim | 107 ++++-- waku/waku_lightpush/protocol_metrics.nim | 4 +- waku/waku_lightpush/rpc.nim | 16 +- waku/waku_lightpush/rpc_codec.nim | 127 +++---- waku/waku_lightpush/self_req_handler.nim | 36 +- waku/waku_lightpush_legacy.nim | 5 + .../README.md | 0 waku/waku_lightpush_legacy/callbacks.nim | 62 ++++ waku/waku_lightpush_legacy/client.nim | 111 ++++++ waku/waku_lightpush_legacy/common.nim | 15 + waku/waku_lightpush_legacy/protocol.nim | 113 ++++++ .../protocol_metrics.nim | 19 + waku/waku_lightpush_legacy/rpc.nim | 18 + waku/waku_lightpush_legacy/rpc_codec.nim | 96 +++++ .../self_req_handler.nim | 59 +++ waku/waku_relay/protocol.nim | 25 +- 62 files changed, 2180 insertions(+), 490 deletions(-) create mode 100644 tests/node/test_wakunode_legacy_lightpush.nim delete mode 100644 tests/test_wakunode_lightpush.nim create mode 100644 tests/waku_lightpush_legacy/lightpush_utils.nim create mode 100644 tests/waku_lightpush_legacy/test_all.nim create mode 100644 tests/waku_lightpush_legacy/test_client.nim create mode 100644 tests/waku_lightpush_legacy/test_ratelimit.nim rename tests/wakunode_rest/{test_rest_lightpush.nim => test_rest_lightpush_legacy.nim} (94%) create mode 100644 waku/waku_api/rest/legacy_lightpush/client.nim create mode 100644 waku/waku_api/rest/legacy_lightpush/handlers.nim create mode 100644 waku/waku_api/rest/legacy_lightpush/types.nim create mode 100644 waku/waku_lightpush_legacy.nim rename waku/{waku_lightpush => waku_lightpush_legacy}/README.md (100%) create mode 100644 waku/waku_lightpush_legacy/callbacks.nim create mode 100644 waku/waku_lightpush_legacy/client.nim create mode 100644 waku/waku_lightpush_legacy/common.nim create mode 100644 waku/waku_lightpush_legacy/protocol.nim create mode 100644 waku/waku_lightpush_legacy/protocol_metrics.nim create mode 100644 waku/waku_lightpush_legacy/rpc.nim create mode 100644 waku/waku_lightpush_legacy/rpc_codec.nim create mode 100644 waku/waku_lightpush_legacy/self_req_handler.nim diff --git a/apps/chat2/chat2.nim b/apps/chat2/chat2.nim index da57991c1..3723291e3 100644 --- a/apps/chat2/chat2.nim +++ b/apps/chat2/chat2.nim @@ -33,8 +33,8 @@ import import waku/[ waku_core, - waku_lightpush/common, - waku_lightpush/rpc, + waku_lightpush_legacy/common, + waku_lightpush_legacy/rpc, waku_enr, discovery/waku_dnsdisc, waku_store_legacy, @@ -227,9 +227,9 @@ proc publish(c: Chat, line: string) = c.node.wakuRlnRelay.lastEpoch = proof.epoch try: - if not c.node.wakuLightPush.isNil(): + if not c.node.wakuLegacyLightPush.isNil(): # Attempt lightpush - (waitFor c.node.lightpushPublish(some(DefaultPubsubTopic), message)).isOkOr: + (waitFor c.node.legacyLightpushPublish(some(DefaultPubsubTopic), message)).isOkOr: error "failed to publish lightpush message", error = error else: (waitFor c.node.publish(some(DefaultPubsubTopic), message)).isOkOr: @@ -502,8 +502,8 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} = if conf.lightpushnode != "": let peerInfo = parsePeerInfo(conf.lightpushnode) if peerInfo.isOk(): - await mountLightPush(node) - node.mountLightPushClient() + await mountLegacyLightPush(node) + node.mountLegacyLightPushClient() node.peerManager.addServicePeer(peerInfo.value, WakuLightpushCodec) else: error "LightPush not mounted. Couldn't parse conf.lightpushnode", diff --git a/apps/liteprotocoltester/lightpush_publisher.nim b/apps/liteprotocoltester/lightpush_publisher.nim index 2d48348b2..32f802fe4 100644 --- a/apps/liteprotocoltester/lightpush_publisher.nim +++ b/apps/liteprotocoltester/lightpush_publisher.nim @@ -145,7 +145,7 @@ proc publishMessages( lightpushContentTopic, renderMsgSize, ) - let wlpRes = await wakuNode.lightpushPublish( + let wlpRes = await wakuNode.legacyLightpushPublish( some(lightpushPubsubTopic), message, actualServicePeer ) @@ -209,7 +209,7 @@ proc setupAndPublish*( if isNil(wakuNode.wakuLightpushClient): # if we have not yet initialized lightpush client, then do it as the only way we can get here is # by having a service peer discovered. - wakuNode.mountLightPushClient() + wakuNode.mountLegacyLightPushClient() # give some time to receiver side to set up let waitTillStartTesting = conf.startPublishingAfter.seconds diff --git a/apps/liteprotocoltester/liteprotocoltester.nim b/apps/liteprotocoltester/liteprotocoltester.nim index 5f6ec4ee0..c23b80e72 100644 --- a/apps/liteprotocoltester/liteprotocoltester.nim +++ b/apps/liteprotocoltester/liteprotocoltester.nim @@ -202,7 +202,7 @@ when isMainModule: var codec = WakuLightPushCodec # mounting relevant client, for PX filter client must be mounted ahead if conf.testFunc == TesterFunctionality.SENDER: - wakuApp.node.mountLightPushClient() + wakuApp.node.mountLegacyLightPushClient() codec = WakuLightPushCodec else: waitFor wakuApp.node.mountFilterClient() diff --git a/examples/lightpush_publisher.nim b/examples/lightpush_publisher.nim index 0615c1f6b..b0f919a89 100644 --- a/examples/lightpush_publisher.nim +++ b/examples/lightpush_publisher.nim @@ -70,7 +70,7 @@ proc setupAndPublish(rng: ref HmacDrbgContext) {.async.} = let node = builder.build().tryGet() node.mountMetadata(clusterId).expect("failed to mount waku metadata protocol") - node.mountLightPushClient() + node.mountLegacyLightPushClient() await node.start() node.peerManager.start() @@ -87,8 +87,9 @@ proc setupAndPublish(rng: ref HmacDrbgContext) {.async.} = let lightpushPeer = parsePeerInfo(LightpushPeer).get() - let res = - await node.lightpushPublish(some(LightpushPubsubTopic), message, lightpushPeer) + let res = await node.legacyLightpushPublish( + some(LightpushPubsubTopic), message, lightpushPeer + ) if res.isOk: notice "published message", diff --git a/library/waku_thread/inter_thread_communication/requests/protocols/lightpush_request.nim b/library/waku_thread/inter_thread_communication/requests/protocols/lightpush_request.nim index 70a6b6116..e7006ad06 100644 --- a/library/waku_thread/inter_thread_communication/requests/protocols/lightpush_request.nim +++ b/library/waku_thread/inter_thread_communication/requests/protocols/lightpush_request.nim @@ -2,12 +2,13 @@ import options import chronicles, chronos, results import ../../../../../waku/waku_core/message/message, + ../../../../../waku/waku_core/codecs, ../../../../../waku/factory/waku, ../../../../../waku/waku_core/message, ../../../../../waku/waku_core/time, # Timestamp ../../../../../waku/waku_core/topics/pubsub_topic, - ../../../../../waku/waku_lightpush/client, - ../../../../../waku/waku_lightpush/common, + ../../../../../waku/waku_lightpush_legacy/client, + ../../../../../waku/waku_lightpush_legacy/common, ../../../../../waku/node/peer_manager/peer_manager, ../../../../alloc @@ -98,7 +99,7 @@ proc process*( return err(errorMsg) let msgHashHex = ( - await waku.node.wakuLightpushClient.publish( + await waku.node.wakuLegacyLightpushClient.publish( pubsubTopic, msg, peer = peerOpt.get() ) ).valueOr: diff --git a/library/waku_thread/inter_thread_communication/requests/protocols/relay_request.nim b/library/waku_thread/inter_thread_communication/requests/protocols/relay_request.nim index 4f7c8ac5e..232630591 100644 --- a/library/waku_thread/inter_thread_communication/requests/protocols/relay_request.nim +++ b/library/waku_thread/inter_thread_communication/requests/protocols/relay_request.nim @@ -115,14 +115,13 @@ proc process*( let msg = self.message.toWakuMessage() let pubsubTopic = $self.pubsubTopic - let numPeers = await waku.node.wakuRelay.publish(pubsubTopic, msg) - if numPeers == 0: - let errorMsg = "Message not sent because no peers found." + (await waku.node.wakuRelay.publish(pubsubTopic, msg)).isOkOr: + let errorMsg = "Message not sent." & $error error "PUBLISH failed", error = errorMsg return err(errorMsg) - elif numPeers > 0: - let msgHash = computeMessageHash(pubSubTopic, msg).to0xHex - return ok(msgHash) + + let msgHash = computeMessageHash(pubSubTopic, msg).to0xHex + return ok(msgHash) of LIST_CONNECTED_PEERS: let numConnPeers = waku.node.wakuRelay.getNumConnectedPeers($self.pubsubTopic).valueOr: error "LIST_CONNECTED_PEERS failed", error = error diff --git a/tests/all_tests_waku.nim b/tests/all_tests_waku.nim index 46c235b51..39ac57caf 100644 --- a/tests/all_tests_waku.nim +++ b/tests/all_tests_waku.nim @@ -65,6 +65,7 @@ import ./node/test_all, ./waku_filter_v2/test_all, ./waku_peer_exchange/test_all, + ./waku_lightpush_legacy/test_all, ./waku_lightpush/test_all, ./waku_relay/test_all, ./incentivization/test_all @@ -72,7 +73,6 @@ import import # Waku v2 tests ./test_wakunode, - ./test_wakunode_lightpush, ./test_peer_store_extended, ./test_message_cache, ./test_peer_manager, @@ -98,7 +98,7 @@ import ./wakunode_rest/test_rest_relay_serdes, ./wakunode_rest/test_rest_serdes, ./wakunode_rest/test_rest_filter, - ./wakunode_rest/test_rest_lightpush, + ./wakunode_rest/test_rest_lightpush_legacy, ./wakunode_rest/test_rest_admin, ./wakunode_rest/test_rest_cors, ./wakunode_rest/test_rest_health diff --git a/tests/incentivization/test_poc_reputation.nim b/tests/incentivization/test_poc_reputation.nim index b35c4b92f..d601d1e24 100644 --- a/tests/incentivization/test_poc_reputation.nim +++ b/tests/incentivization/test_poc_reputation.nim @@ -11,7 +11,7 @@ import import waku/[node/peer_manager, waku_core], waku/incentivization/[rpc, reputation_manager], - waku/waku_lightpush/rpc + waku/waku_lightpush_legacy/rpc suite "Waku Incentivization PoC Reputation": var manager {.threadvar.}: ReputationManager diff --git a/tests/node/test_all.nim b/tests/node/test_all.nim index 6c3d76175..4840f49a2 100644 --- a/tests/node/test_all.nim +++ b/tests/node/test_all.nim @@ -1,5 +1,6 @@ import ./test_wakunode_filter, + ./test_wakunode_legacy_lightpush, ./test_wakunode_lightpush, ./test_wakunode_peer_exchange, ./test_wakunode_store, diff --git a/tests/node/test_wakunode_legacy_lightpush.nim b/tests/node/test_wakunode_legacy_lightpush.nim new file mode 100644 index 000000000..ab23921a0 --- /dev/null +++ b/tests/node/test_wakunode_legacy_lightpush.nim @@ -0,0 +1,233 @@ +{.used.} + +import + std/[options, tables, sequtils, tempfiles, strutils], + stew/shims/net as stewNet, + testutils/unittests, + chronos, + chronicles, + std/strformat, + os, + libp2p/[peerstore, crypto/crypto] + +import + waku/[ + waku_core, + node/peer_manager, + node/waku_node, + waku_filter_v2, + waku_filter_v2/client, + waku_filter_v2/subscriptions, + waku_lightpush_legacy, + waku_lightpush_legacy/common, + waku_lightpush_legacy/client, + waku_lightpush_legacy/protocol_metrics, + waku_lightpush_legacy/rpc, + waku_rln_relay, + ], + ../testlib/[assertions, common, wakucore, wakunode, testasync, futures, testutils], + ../resources/payloads + +suite "Waku Legacy Lightpush - End To End": + var + handlerFuture {.threadvar.}: Future[(PubsubTopic, WakuMessage)] + handler {.threadvar.}: PushMessageHandler + + server {.threadvar.}: WakuNode + client {.threadvar.}: WakuNode + + serverRemotePeerInfo {.threadvar.}: RemotePeerInfo + pubsubTopic {.threadvar.}: PubsubTopic + contentTopic {.threadvar.}: ContentTopic + message {.threadvar.}: WakuMessage + + asyncSetup: + handlerFuture = newPushHandlerFuture() + handler = proc( + peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage + ): Future[WakuLightPushResult[void]] {.async.} = + handlerFuture.complete((pubsubTopic, message)) + return ok() + + let + serverKey = generateSecp256k1Key() + clientKey = generateSecp256k1Key() + + server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0)) + client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0)) + + await allFutures(server.start(), client.start()) + await server.start() + + await server.mountRelay() + await server.mountLegacyLightpush() # without rln-relay + client.mountLegacyLightpushClient() + + serverRemotePeerInfo = server.peerInfo.toRemotePeerInfo() + pubsubTopic = DefaultPubsubTopic + contentTopic = DefaultContentTopic + message = fakeWakuMessage() + + asyncTeardown: + await server.stop() + + suite "Assessment of Message Relaying Mechanisms": + asyncTest "Via 11/WAKU2-RELAY from Relay/Full Node": + # Given a light lightpush client + let lightpushClient = + newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)) + lightpushClient.mountLegacyLightpushClient() + + # When the client publishes a message + let publishResponse = await lightpushClient.legacyLightpushPublish( + some(pubsubTopic), message, serverRemotePeerInfo + ) + + if not publishResponse.isOk(): + echo "Publish failed: ", publishResponse.error() + + # Then the message is not relayed but not due to RLN + assert publishResponse.isErr(), "We expect an error response" + + assert (publishResponse.error == protocol_metrics.notPublishedAnyPeer), + "incorrect error response" + + suite "Waku LightPush Validation Tests": + asyncTest "Validate message size exceeds limit": + let msgOverLimit = fakeWakuMessage( + contentTopic = contentTopic, + payload = getByteSequence(DefaultMaxWakuMessageSize + 64 * 1024), + ) + + # When the client publishes an over-limit message + let publishResponse = await client.legacyLightpushPublish( + some(pubsubTopic), msgOverLimit, serverRemotePeerInfo + ) + + check: + publishResponse.isErr() + publishResponse.error == + fmt"Message size exceeded maximum of {DefaultMaxWakuMessageSize} bytes" + +suite "RLN Proofs as a Lightpush Service": + var + handlerFuture {.threadvar.}: Future[(PubsubTopic, WakuMessage)] + handler {.threadvar.}: PushMessageHandler + + server {.threadvar.}: WakuNode + client {.threadvar.}: WakuNode + + serverRemotePeerInfo {.threadvar.}: RemotePeerInfo + pubsubTopic {.threadvar.}: PubsubTopic + contentTopic {.threadvar.}: ContentTopic + message {.threadvar.}: WakuMessage + + asyncSetup: + handlerFuture = newPushHandlerFuture() + handler = proc( + peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage + ): Future[WakuLightPushResult[void]] {.async.} = + handlerFuture.complete((pubsubTopic, message)) + return ok() + + let + serverKey = generateSecp256k1Key() + clientKey = generateSecp256k1Key() + + server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0)) + client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0)) + + # mount rln-relay + let wakuRlnConfig = WakuRlnConfig( + rlnRelayDynamic: false, + rlnRelayCredIndex: some(1.uint), + rlnRelayUserMessageLimit: 1, + rlnEpochSizeSec: 1, + rlnRelayTreePath: genTempPath("rln_tree", "wakunode"), + ) + + await allFutures(server.start(), client.start()) + await server.start() + + await server.mountRelay() + await server.mountRlnRelay(wakuRlnConfig) + await server.mountLegacyLightPush() + client.mountLegacyLightPushClient() + + serverRemotePeerInfo = server.peerInfo.toRemotePeerInfo() + pubsubTopic = DefaultPubsubTopic + contentTopic = DefaultContentTopic + message = fakeWakuMessage() + + asyncTeardown: + await server.stop() + + suite "Lightpush attaching RLN proofs": + asyncTest "Message is published when RLN enabled": + # Given a light lightpush client + let lightpushClient = + newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)) + lightpushClient.mountLegacyLightPushClient() + + # When the client publishes a message + let publishResponse = await lightpushClient.legacyLightpushPublish( + some(pubsubTopic), message, serverRemotePeerInfo + ) + + if not publishResponse.isOk(): + echo "Publish failed: ", publishResponse.error() + + # Then the message is not relayed but not due to RLN + assert publishResponse.isErr(), "We expect an error response" + check publishResponse.error == protocol_metrics.notPublishedAnyPeer + +suite "Waku Legacy Lightpush message delivery": + asyncTest "Legacy lightpush message flow succeed": + ## Setup + let + lightNodeKey = generateSecp256k1Key() + lightNode = newTestWakuNode(lightNodeKey, parseIpAddress("0.0.0.0"), Port(0)) + bridgeNodeKey = generateSecp256k1Key() + bridgeNode = newTestWakuNode(bridgeNodeKey, parseIpAddress("0.0.0.0"), Port(0)) + destNodeKey = generateSecp256k1Key() + destNode = newTestWakuNode(destNodeKey, parseIpAddress("0.0.0.0"), Port(0)) + + await allFutures(destNode.start(), bridgeNode.start(), lightNode.start()) + + await destNode.mountRelay(@[DefaultRelayShard]) + await bridgeNode.mountRelay(@[DefaultRelayShard]) + await bridgeNode.mountLegacyLightPush() + lightNode.mountLegacyLightPushClient() + + discard await lightNode.peerManager.dialPeer( + bridgeNode.peerInfo.toRemotePeerInfo(), WakuLegacyLightPushCodec + ) + await sleepAsync(100.milliseconds) + await destNode.connectToNodes(@[bridgeNode.peerInfo.toRemotePeerInfo()]) + + ## Given + let message = fakeWakuMessage() + + var completionFutRelay = newFuture[bool]() + proc relayHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + check: + topic == DefaultPubsubTopic + msg == message + completionFutRelay.complete(true) + + destNode.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler)) + + # Wait for subscription to take effect + await sleepAsync(100.millis) + + ## When + let res = await lightNode.legacyLightpushPublish(some(DefaultPubsubTopic), message) + assert res.isOk(), $res.error + + ## Then + check await completionFutRelay.withTimeout(5.seconds) + + ## Cleanup + await allFutures(lightNode.stop(), bridgeNode.stop(), destNode.stop()) diff --git a/tests/node/test_wakunode_lightpush.nim b/tests/node/test_wakunode_lightpush.nim index 30158ebd1..eeef02a32 100644 --- a/tests/node/test_wakunode_lightpush.nim +++ b/tests/node/test_wakunode_lightpush.nim @@ -19,15 +19,13 @@ import waku_filter_v2/client, waku_filter_v2/subscriptions, waku_lightpush, - waku_lightpush/common, - waku_lightpush/client, - waku_lightpush/protocol_metrics, - waku_lightpush/rpc, waku_rln_relay, ], ../testlib/[assertions, common, wakucore, wakunode, testasync, futures, testutils], ../resources/payloads +const PublishedToOnePeer = 1 + suite "Waku Lightpush - End To End": var handlerFuture {.threadvar.}: Future[(PubsubTopic, WakuMessage)] @@ -45,9 +43,9 @@ suite "Waku Lightpush - End To End": handlerFuture = newPushHandlerFuture() handler = proc( peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage - ): Future[WakuLightPushResult[void]] {.async.} = + ): Future[WakuLightPushResult] {.async.} = handlerFuture.complete((pubsubTopic, message)) - return ok() + return ok(PublishedToOnePeer) let serverKey = generateSecp256k1Key() @@ -80,16 +78,16 @@ suite "Waku Lightpush - End To End": # When the client publishes a message let publishResponse = await lightpushClient.lightpushPublish( - some(pubsubTopic), message, serverRemotePeerInfo + some(pubsubTopic), message, some(serverRemotePeerInfo) ) if not publishResponse.isOk(): - echo "Publish failed: ", publishResponse.error() + echo "Publish failed: ", publishResponse.error.code # Then the message is not relayed but not due to RLN assert publishResponse.isErr(), "We expect an error response" - assert (publishResponse.error == protocol_metrics.notPublishedAnyPeer), + assert (publishResponse.error.code == NO_PEERS_TO_RELAY), "incorrect error response" suite "Waku LightPush Validation Tests": @@ -101,13 +99,14 @@ suite "Waku Lightpush - End To End": # When the client publishes an over-limit message let publishResponse = await client.lightpushPublish( - some(pubsubTopic), msgOverLimit, serverRemotePeerInfo + some(pubsubTopic), msgOverLimit, some(serverRemotePeerInfo) ) check: publishResponse.isErr() - publishResponse.error == - fmt"Message size exceeded maximum of {DefaultMaxWakuMessageSize} bytes" + publishResponse.error.code == INVALID_MESSAGE_ERROR + publishResponse.error.desc == + some(fmt"Message size exceeded maximum of {DefaultMaxWakuMessageSize} bytes") suite "RLN Proofs as a Lightpush Service": var @@ -126,9 +125,9 @@ suite "RLN Proofs as a Lightpush Service": handlerFuture = newPushHandlerFuture() handler = proc( peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage - ): Future[WakuLightPushResult[void]] {.async.} = + ): Future[WakuLightPushResult] {.async.} = handlerFuture.complete((pubsubTopic, message)) - return ok() + return ok(PublishedToOnePeer) let serverKey = generateSecp256k1Key() @@ -151,8 +150,8 @@ suite "RLN Proofs as a Lightpush Service": await server.mountRelay() await server.mountRlnRelay(wakuRlnConfig) - await server.mountLightpush() - client.mountLightpushClient() + await server.mountLightPush() + client.mountLightPushClient() serverRemotePeerInfo = server.peerInfo.toRemotePeerInfo() pubsubTopic = DefaultPubsubTopic @@ -167,11 +166,11 @@ suite "RLN Proofs as a Lightpush Service": # Given a light lightpush client let lightpushClient = newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)) - lightpushClient.mountLightpushClient() + lightpushClient.mountLightPushClient() # When the client publishes a message let publishResponse = await lightpushClient.lightpushPublish( - some(pubsubTopic), message, serverRemotePeerInfo + some(pubsubTopic), message, some(serverRemotePeerInfo) ) if not publishResponse.isOk(): @@ -179,5 +178,55 @@ suite "RLN Proofs as a Lightpush Service": # Then the message is not relayed but not due to RLN assert publishResponse.isErr(), "We expect an error response" - assert (publishResponse.error == protocol_metrics.notPublishedAnyPeer), - "incorrect error response" + check publishResponse.error.code == NO_PEERS_TO_RELAY + +suite "Waku Lightpush message delivery": + asyncTest "lightpush message flow succeed": + ## Setup + let + lightNodeKey = generateSecp256k1Key() + lightNode = newTestWakuNode(lightNodeKey, parseIpAddress("0.0.0.0"), Port(0)) + bridgeNodeKey = generateSecp256k1Key() + bridgeNode = newTestWakuNode(bridgeNodeKey, parseIpAddress("0.0.0.0"), Port(0)) + destNodeKey = generateSecp256k1Key() + destNode = newTestWakuNode(destNodeKey, parseIpAddress("0.0.0.0"), Port(0)) + + await allFutures(destNode.start(), bridgeNode.start(), lightNode.start()) + + await destNode.mountRelay(@[DefaultRelayShard]) + await bridgeNode.mountRelay(@[DefaultRelayShard]) + await bridgeNode.mountLightPush() + lightNode.mountLightPushClient() + + discard await lightNode.peerManager.dialPeer( + bridgeNode.peerInfo.toRemotePeerInfo(), WakuLightPushCodec + ) + await sleepAsync(100.milliseconds) + await destNode.connectToNodes(@[bridgeNode.peerInfo.toRemotePeerInfo()]) + + ## Given + let message = fakeWakuMessage() + + var completionFutRelay = newFuture[bool]() + proc relayHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + check: + topic == DefaultPubsubTopic + msg == message + completionFutRelay.complete(true) + + destNode.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler)) + + # Wait for subscription to take effect + await sleepAsync(100.millis) + + ## When + let res = await lightNode.lightpushPublish(some(DefaultPubsubTopic), message) + assert res.isOk(), $res.error + + ## Then + check await completionFutRelay.withTimeout(5.seconds) + + ## Cleanup + await allFutures(lightNode.stop(), bridgeNode.stop(), destNode.stop()) diff --git a/tests/node/test_wakunode_sharding.nim b/tests/node/test_wakunode_sharding.nim index 95b4043d9..bdd6859b9 100644 --- a/tests/node/test_wakunode_sharding.nim +++ b/tests/node/test_wakunode_sharding.nim @@ -286,7 +286,7 @@ suite "Sharding": asyncTest "lightpush": # Given a connected server and client subscribed to the same pubsub topic - client.mountLightPushClient() + client.mountLegacyLightPushClient() await server.mountLightpush() let @@ -299,7 +299,7 @@ suite "Sharding": let msg = WakuMessage(payload: "message".toBytes(), contentTopic: "myContentTopic") - lightpublishRespnse = await client.lightpushPublish( + lightpublishRespnse = await client.legacyLightpushPublish( some(topic), msg, server.switch.peerInfo.toRemotePeerInfo() ) @@ -409,7 +409,7 @@ suite "Sharding": asyncTest "lightpush (automatic sharding filtering)": # Given a connected server and client using the same content topic (with two different formats) - client.mountLightPushClient() + client.mountLegacyLightPushClient() await server.mountLightpush() let @@ -424,7 +424,7 @@ suite "Sharding": let msg = WakuMessage(payload: "message".toBytes(), contentTopic: contentTopicFull) - lightpublishRespnse = await client.lightpushPublish( + lightpublishRespnse = await client.legacyLightpushPublish( some(pubsubTopic), msg, server.switch.peerInfo.toRemotePeerInfo() ) @@ -567,7 +567,7 @@ suite "Sharding": asyncTest "lightpush - exclusion (automatic sharding filtering)": # Given a connected server and client using different content topics - client.mountLightPushClient() + client.mountLegacyLightPushClient() await server.mountLightpush() let @@ -584,7 +584,7 @@ suite "Sharding": # When a peer publishes a message in the server's subscribed topic (the client, for testing easeness) let msg = WakuMessage(payload: "message".toBytes(), contentTopic: contentTopic2) - lightpublishRespnse = await client.lightpushPublish( + lightpublishRespnse = await client.legacyLightpushPublish( some(pubsubTopic2), msg, server.switch.peerInfo.toRemotePeerInfo() ) @@ -854,12 +854,12 @@ suite "Sharding": (await clientHandler3.waitForResult(FUTURE_TIMEOUT)).isErr() asyncTest "Protocol with Unconfigured PubSub Topic Fails": - # Given a + # Given a let contentTopic = "myContentTopic" topic = "/waku/2/rs/0/1" # Using a different topic to simulate "unconfigured" pubsub topic - # but to have a handler (and be able to assert the test) + # but to have a handler (and be able to assert the test) serverHandler = server.subscribeCompletionHandler("/waku/2/rs/0/0") clientHandler = client.subscribeCompletionHandler("/waku/2/rs/0/0") @@ -878,7 +878,7 @@ suite "Sharding": asyncTest "Waku LightPush Sharding (Static Sharding)": # Given a connected server and client using two different pubsub topics - client.mountLightPushClient() + client.mountLegacyLightPushClient() await server.mountLightpush() # Given a connected server and client subscribed to multiple pubsub topics @@ -898,7 +898,7 @@ suite "Sharding": # When a peer publishes a message (the client, for testing easeness) in topic1 let msg1 = WakuMessage(payload: "message1".toBytes(), contentTopic: contentTopic) - lightpublishRespnse = await client.lightpushPublish( + lightpublishRespnse = await client.legacyLightpushPublish( some(topic1), msg1, server.switch.peerInfo.toRemotePeerInfo() ) @@ -916,7 +916,7 @@ suite "Sharding": clientHandler2.reset() let msg2 = WakuMessage(payload: "message2".toBytes(), contentTopic: contentTopic) - lightpublishResponse2 = await client.lightpushPublish( + lightpublishResponse2 = await client.legacyLightpushPublish( some(topic2), msg2, server.switch.peerInfo.toRemotePeerInfo() ) diff --git a/tests/test_peer_manager.nim b/tests/test_peer_manager.nim index 333433dc2..4fd148b81 100644 --- a/tests/test_peer_manager.nim +++ b/tests/test_peer_manager.nim @@ -770,7 +770,7 @@ procSuite "Peer Manager": # service peers node.peerManager.addServicePeer(peers[0], WakuStoreCodec) - node.peerManager.addServicePeer(peers[1], WakuLightPushCodec) + node.peerManager.addServicePeer(peers[1], WakuLegacyLightPushCodec) node.peerManager.addServicePeer(peers[2], WakuPeerExchangeCodec) # relay peers (should not be added) @@ -788,7 +788,7 @@ procSuite "Peer Manager": # all service peers are added to its service slot check: node.peerManager.serviceSlots[WakuStoreCodec].peerId == peers[0].peerId - node.peerManager.serviceSlots[WakuLightPushCodec].peerId == peers[1].peerId + node.peerManager.serviceSlots[WakuLegacyLightPushCodec].peerId == peers[1].peerId node.peerManager.serviceSlots[WakuPeerExchangeCodec].peerId == peers[2].peerId # but the relay peer is not @@ -917,13 +917,13 @@ procSuite "Peer Manager": selectedPeer2.get().peerId == peers[0].peerId # And return none if we dont have any peer for that protocol - let selectedPeer3 = pm.selectPeer(WakuLightPushCodec) + let selectedPeer3 = pm.selectPeer(WakuLegacyLightPushCodec) check: selectedPeer3.isSome() == false # Now we add service peers for different protocols peer[1..3] pm.addServicePeer(peers[1], WakuStoreCodec) - pm.addServicePeer(peers[2], WakuLightPushCodec) + pm.addServicePeer(peers[2], WakuLegacyLightPushCodec) # We no longer get one from the peerstore. Slots are being used instead. let selectedPeer4 = pm.selectPeer(WakuStoreCodec) @@ -931,7 +931,7 @@ procSuite "Peer Manager": selectedPeer4.isSome() == true selectedPeer4.get().peerId == peers[1].peerId - let selectedPeer5 = pm.selectPeer(WakuLightPushCodec) + let selectedPeer5 = pm.selectPeer(WakuLegacyLightPushCodec) check: selectedPeer5.isSome() == true selectedPeer5.get().peerId == peers[2].peerId diff --git a/tests/test_waku_enr.nim b/tests/test_waku_enr.nim index 2b91e6147..b6571b09f 100644 --- a/tests/test_waku_enr.nim +++ b/tests/test_waku_enr.nim @@ -1,7 +1,7 @@ {.used.} import std/[options, sequtils], stew/results, testutils/unittests -import waku/waku_core, waku/waku_enr, ./testlib/wakucore, waku/waku_core/codecs +import waku/waku_core, waku/waku_enr, ./testlib/wakucore suite "Waku ENR - Capabilities bitfield": test "check capabilities support": diff --git a/tests/test_wakunode_lightpush.nim b/tests/test_wakunode_lightpush.nim deleted file mode 100644 index c680fb468..000000000 --- a/tests/test_wakunode_lightpush.nim +++ /dev/null @@ -1,58 +0,0 @@ -{.used.} - -import std/options, stew/shims/net as stewNet, testutils/unittests, chronos -import - waku/[waku_core, waku_lightpush/common, node/peer_manager, waku_node], - ./testlib/wakucore, - ./testlib/wakunode - -suite "WakuNode - Lightpush": - asyncTest "Lightpush message return success": - ## Setup - let - lightNodeKey = generateSecp256k1Key() - lightNode = newTestWakuNode(lightNodeKey, parseIpAddress("0.0.0.0"), Port(0)) - bridgeNodeKey = generateSecp256k1Key() - bridgeNode = newTestWakuNode(bridgeNodeKey, parseIpAddress("0.0.0.0"), Port(0)) - destNodeKey = generateSecp256k1Key() - destNode = newTestWakuNode(destNodeKey, parseIpAddress("0.0.0.0"), Port(0)) - - await allFutures(destNode.start(), bridgeNode.start(), lightNode.start()) - - await destNode.mountRelay(@[DefaultRelayShard]) - await bridgeNode.mountRelay(@[DefaultRelayShard]) - await bridgeNode.mountLightPush() - lightNode.mountLightPushClient() - - discard await lightNode.peerManager.dialPeer( - bridgeNode.peerInfo.toRemotePeerInfo(), WakuLightPushCodec - ) - await sleepAsync(100.milliseconds) - await destNode.connectToNodes(@[bridgeNode.peerInfo.toRemotePeerInfo()]) - - ## Given - let message = fakeWakuMessage() - - var completionFutRelay = newFuture[bool]() - proc relayHandler( - topic: PubsubTopic, msg: WakuMessage - ): Future[void] {.async, gcsafe.} = - check: - topic == DefaultPubsubTopic - msg == message - completionFutRelay.complete(true) - - destNode.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler)) - - # Wait for subscription to take effect - await sleepAsync(100.millis) - - ## When - let res = await lightNode.lightpushPublish(some(DefaultPubsubTopic), message) - assert res.isOk(), $res.error - - ## Then - check await completionFutRelay.withTimeout(5.seconds) - - ## Cleanup - await allFutures(lightNode.stop(), bridgeNode.stop(), destNode.stop()) diff --git a/tests/waku_core/test_peers.nim b/tests/waku_core/test_peers.nim index 366b1d25f..3dc68fa1a 100644 --- a/tests/waku_core/test_peers.nim +++ b/tests/waku_core/test_peers.nim @@ -7,7 +7,7 @@ import libp2p/peerid, libp2p/errors, confutils/toml/std/net -import waku/[waku_core, waku_core/codecs, waku_enr], ../testlib/wakucore +import waku/[waku_core, waku_enr], ../testlib/wakucore suite "Waku Core - Peers": test "Peer info parses correctly": diff --git a/tests/waku_lightpush/lightpush_utils.nim b/tests/waku_lightpush/lightpush_utils.nim index 45bbe125c..f3e94cb47 100644 --- a/tests/waku_lightpush/lightpush_utils.nim +++ b/tests/waku_lightpush/lightpush_utils.nim @@ -5,6 +5,7 @@ import std/options, chronicles, chronos, libp2p/crypto/crypto import waku/node/peer_manager, waku/waku_core, + waku/waku_core/topics/sharding, waku/waku_lightpush, waku/waku_lightpush/[client, common], waku/common/rate_limit/setting, @@ -17,7 +18,8 @@ proc newTestWakuLightpushNode*( ): Future[WakuLightPush] {.async.} = let peerManager = PeerManager.new(switch) - proto = WakuLightPush.new(peerManager, rng, handler, rateLimitSetting) + wakuSharding = Sharding(clusterId: 1, shardCountGenZero: 8) + proto = WakuLightPush.new(peerManager, rng, handler, wakuSharding, rateLimitSetting) await proto.start() switch.mount(proto) diff --git a/tests/waku_lightpush/test_client.nim b/tests/waku_lightpush/test_client.nim index 8b8e5529e..060a8c22b 100644 --- a/tests/waku_lightpush/test_client.nim +++ b/tests/waku_lightpush/test_client.nim @@ -13,10 +13,7 @@ import waku_core, waku_lightpush, waku_lightpush/client, - waku_lightpush/common, waku_lightpush/protocol_metrics, - waku_lightpush/rpc, - waku_lightpush/rpc_codec, ], ../testlib/[assertions, wakucore, testasync, futures, testutils], ./lightpush_utils, @@ -42,12 +39,14 @@ suite "Waku Lightpush Client": handlerFuture = newPushHandlerFuture() handler = proc( peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage - ): Future[WakuLightPushResult[void]] {.async.} = + ): Future[WakuLightPushResult] {.async.} = let msgLen = message.encode().buffer.len if msgLen > int(DefaultMaxWakuMessageSize) + 64 * 1024: - return err("length greater than maxMessageSize") + return + lighpushErrorResult(PAYLOAD_TOO_LARGE, "length greater than maxMessageSize") handlerFuture.complete((pubsubTopic, message)) - return ok() + # return that we published the message to 1 peer. + return ok(1) serverSwitch = newTestSwitch() clientSwitch = newTestSwitch() @@ -80,7 +79,7 @@ suite "Waku Lightpush Client": # When publishing a valid payload let publishResponse = - await client.publish(pubsubTopic, message, serverRemotePeerInfo) + await client.publish(some(pubsubTopic), message, serverRemotePeerInfo) # Then the message is received by the server discard await handlerFuture.withTimeout(FUTURE_TIMEOUT) @@ -92,8 +91,9 @@ suite "Waku Lightpush Client": # When publishing a valid payload handlerFuture = newPushHandlerFuture() - let publishResponse2 = - await client.publish(pubsub_topics.CURRENT, message2, serverRemotePeerInfo) + let publishResponse2 = await client.publish( + some(pubsub_topics.CURRENT), message2, serverRemotePeerInfo + ) # Then the message is received by the server discard await handlerFuture.withTimeout(FUTURE_TIMEOUT) @@ -106,7 +106,7 @@ suite "Waku Lightpush Client": # When publishing a valid payload handlerFuture = newPushHandlerFuture() let publishResponse3 = await client.publish( - pubsub_topics.CURRENT_NESTED, message3, serverRemotePeerInfo + some(pubsub_topics.CURRENT_NESTED), message3, serverRemotePeerInfo ) # Then the message is received by the server @@ -119,8 +119,9 @@ suite "Waku Lightpush Client": # When publishing a valid payload handlerFuture = newPushHandlerFuture() - let publishResponse4 = - await client.publish(pubsub_topics.SHARDING, message4, serverRemotePeerInfo) + let publishResponse4 = await client.publish( + some(pubsub_topics.SHARDING), message4, serverRemotePeerInfo + ) # Then the message is received by the server discard await handlerFuture.withTimeout(FUTURE_TIMEOUT) @@ -133,7 +134,7 @@ suite "Waku Lightpush Client": # When publishing a valid payload handlerFuture = newPushHandlerFuture() let publishResponse5 = - await client.publish(pubsub_topics.PLAIN, message5, serverRemotePeerInfo) + await client.publish(some(pubsub_topics.PLAIN), message5, serverRemotePeerInfo) # Then the message is received by the server discard await handlerFuture.withTimeout(FUTURE_TIMEOUT) @@ -146,7 +147,7 @@ suite "Waku Lightpush Client": # When publishing a valid payload handlerFuture = newPushHandlerFuture() let publishResponse6 = - await client.publish(pubsub_topics.LEGACY, message6, serverRemotePeerInfo) + await client.publish(some(pubsub_topics.LEGACY), message6, serverRemotePeerInfo) # Then the message is received by the server discard await handlerFuture.withTimeout(FUTURE_TIMEOUT) @@ -159,7 +160,7 @@ suite "Waku Lightpush Client": # When publishing a valid payload handlerFuture = newPushHandlerFuture() let publishResponse7 = await client.publish( - pubsub_topics.LEGACY_NESTED, message7, serverRemotePeerInfo + some(pubsub_topics.LEGACY_NESTED), message7, serverRemotePeerInfo ) # Then the message is received by the server @@ -173,7 +174,7 @@ suite "Waku Lightpush Client": # When publishing a valid payload handlerFuture = newPushHandlerFuture() let publishResponse8 = await client.publish( - pubsub_topics.LEGACY_ENCODING, message8, serverRemotePeerInfo + some(pubsub_topics.LEGACY_ENCODING), message8, serverRemotePeerInfo ) # Then the message is received by the server @@ -187,7 +188,7 @@ suite "Waku Lightpush Client": # When publishing a valid payload handlerFuture = newPushHandlerFuture() let publishResponse9 = - await client.publish(pubsubTopic, message9, serverRemotePeerInfo) + await client.publish(some(pubsubTopic), message9, serverRemotePeerInfo) # Then the message is received by the server discard await handlerFuture.withTimeout(FUTURE_TIMEOUT) @@ -221,7 +222,7 @@ suite "Waku Lightpush Client": # When publishing the 1KiB payload let publishResponse1 = - await client.publish(pubsubTopic, message1, serverRemotePeerInfo) + await client.publish(some(pubsubTopic), message1, serverRemotePeerInfo) # Then the message is received by the server assertResultOk publishResponse1 @@ -230,7 +231,7 @@ suite "Waku Lightpush Client": # When publishing the 10KiB payload handlerFuture = newPushHandlerFuture() let publishResponse2 = - await client.publish(pubsubTopic, message2, serverRemotePeerInfo) + await client.publish(some(pubsubTopic), message2, serverRemotePeerInfo) # Then the message is received by the server assertResultOk publishResponse2 @@ -239,7 +240,7 @@ suite "Waku Lightpush Client": # When publishing the 100KiB payload handlerFuture = newPushHandlerFuture() let publishResponse3 = - await client.publish(pubsubTopic, message3, serverRemotePeerInfo) + await client.publish(some(pubsubTopic), message3, serverRemotePeerInfo) # Then the message is received by the server assertResultOk publishResponse3 @@ -248,7 +249,7 @@ suite "Waku Lightpush Client": # When publishing the 1MiB + 63KiB + 911B payload (1113999B) handlerFuture = newPushHandlerFuture() let publishResponse4 = - await client.publish(pubsubTopic, message4, serverRemotePeerInfo) + await client.publish(some(pubsubTopic), message4, serverRemotePeerInfo) # Then the message is received by the server assertResultOk publishResponse4 @@ -257,11 +258,12 @@ suite "Waku Lightpush Client": # When publishing the 1MiB + 63KiB + 912B payload (1114000B) handlerFuture = newPushHandlerFuture() let publishResponse5 = - await client.publish(pubsubTopic, message5, serverRemotePeerInfo) + await client.publish(some(pubsubTopic), message5, serverRemotePeerInfo) # Then the message is not received by the server check: - not publishResponse5.isOk() + publishResponse5.isErr() + publishResponse5.error.code == PAYLOAD_TOO_LARGE (await handlerFuture.waitForResult()).isErr() asyncTest "Invalid Encoding Payload": @@ -271,16 +273,12 @@ suite "Waku Lightpush Client": # When publishing the payload let publishResponse = await server.handleRequest(clientPeerId, fakeBuffer) - # Then the response is negative - check: - publishResponse.requestId == "" - # And the error is returned - let response = publishResponse.response.get() check: - response.isSuccess == false - response.info.isSome() - scanf(response.info.get(), decodeRpcFailure) + publishResponse.requestId == "N/A" + publishResponse.statusCode == LightpushStatusCode.BAD_REQUEST.uint32 + publishResponse.statusDesc.isSome() + scanf(publishResponse.statusDesc.get(), decodeRpcFailure) asyncTest "Handle Error": # Given a lightpush server that fails @@ -289,9 +287,9 @@ suite "Waku Lightpush Client": handlerFuture2 = newFuture[void]() handler2 = proc( peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage - ): Future[WakuLightPushResult[void]] {.async.} = + ): Future[WakuLightPushResult] {.async.} = handlerFuture2.complete() - return err(handlerError) + return lighpushErrorResult(PAYLOAD_TOO_LARGE, handlerError) let serverSwitch2 = newTestSwitch() @@ -303,11 +301,12 @@ suite "Waku Lightpush Client": # When publishing a payload let publishResponse = - await client.publish(pubsubTopic, message, serverRemotePeerInfo2) + await client.publish(some(pubsubTopic), message, serverRemotePeerInfo2) # Then the response is negative check: - publishResponse.error() == handlerError + publishResponse.error.code == PAYLOAD_TOO_LARGE + publishResponse.error.desc == some(handlerError) (await handlerFuture2.waitForResult()).isOk() # Cleanup @@ -317,7 +316,7 @@ suite "Waku Lightpush Client": asyncTest "Positive Responses": # When sending a valid PushRequest let publishResponse = - await client.publish(pubsubTopic, message, serverRemotePeerInfo) + await client.publish(some(pubsubTopic), message, serverRemotePeerInfo) # Then the response is positive assertResultOk publishResponse @@ -333,7 +332,8 @@ suite "Waku Lightpush Client": # When sending an invalid PushRequest let publishResponse = - await client.publish(pubsubTopic, message, serverRemotePeerInfo2) + await client.publish(some(pubsubTopic), message, serverRemotePeerInfo2) # Then the response is negative check not publishResponse.isOk() + check publishResponse.error.code == LightpushStatusCode.NO_PEERS_TO_RELAY diff --git a/tests/waku_lightpush/test_ratelimit.nim b/tests/waku_lightpush/test_ratelimit.nim index 148cca3c9..7148be37a 100644 --- a/tests/waku_lightpush/test_ratelimit.nim +++ b/tests/waku_lightpush/test_ratelimit.nim @@ -14,10 +14,7 @@ import waku_core, waku_lightpush, waku_lightpush/client, - waku_lightpush/common, waku_lightpush/protocol_metrics, - waku_lightpush/rpc, - waku_lightpush/rpc_codec, ], ../testlib/[assertions, wakucore, testasync, futures, testutils], ./lightpush_utils, @@ -36,9 +33,9 @@ suite "Rate limited push service": var handlerFuture = newFuture[(string, WakuMessage)]() let handler: PushMessageHandler = proc( peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage - ): Future[WakuLightPushResult[void]] {.async.} = + ): Future[WakuLightPushResult] {.async.} = handlerFuture.complete((pubsubTopic, message)) - return ok() + return lightpushSuccessResult(1) # succeed to publish to 1 peer. let tokenPeriod = 500.millis @@ -53,12 +50,13 @@ suite "Rate limited push service": handlerFuture = newFuture[(string, WakuMessage)]() let requestRes = - await client.publish(DefaultPubsubTopic, message, peer = serverPeerId) + await client.publish(some(DefaultPubsubTopic), message, peer = serverPeerId) check await handlerFuture.withTimeout(50.millis) - assert requestRes.isOk(), requestRes.error - check handlerFuture.finished() + check: + requestRes.isOk() + handlerFuture.finished() let (handledMessagePubsubTopic, handledMessage) = handlerFuture.read() @@ -98,9 +96,9 @@ suite "Rate limited push service": var handlerFuture = newFuture[(string, WakuMessage)]() let handler = proc( peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage - ): Future[WakuLightPushResult[void]] {.async.} = + ): Future[WakuLightPushResult] {.async.} = handlerFuture.complete((pubsubTopic, message)) - return ok() + return lightpushSuccessResult(1) let server = @@ -114,7 +112,7 @@ suite "Rate limited push service": let message = fakeWakuMessage() handlerFuture = newFuture[(string, WakuMessage)]() let requestRes = - await client.publish(DefaultPubsubTopic, message, peer = serverPeerId) + await client.publish(some(DefaultPubsubTopic), message, peer = serverPeerId) discard await handlerFuture.withTimeout(10.millis) check: @@ -129,12 +127,13 @@ suite "Rate limited push service": let message = fakeWakuMessage() handlerFuture = newFuture[(string, WakuMessage)]() let requestRes = - await client.publish(DefaultPubsubTopic, message, peer = serverPeerId) + await client.publish(some(DefaultPubsubTopic), message, peer = serverPeerId) discard await handlerFuture.withTimeout(10.millis) check: requestRes.isErr() - requestRes.error == "TOO_MANY_REQUESTS" + requestRes.error.code == TOO_MANY_REQUESTS + requestRes.error.desc == some(TooManyRequestsMessage) for testCnt in 0 .. 2: await successProc() diff --git a/tests/waku_lightpush_legacy/lightpush_utils.nim b/tests/waku_lightpush_legacy/lightpush_utils.nim new file mode 100644 index 000000000..733fbc8b1 --- /dev/null +++ b/tests/waku_lightpush_legacy/lightpush_utils.nim @@ -0,0 +1,29 @@ +{.used.} + +import std/options, chronicles, chronos, libp2p/crypto/crypto + +import + waku/node/peer_manager, + waku/waku_core, + waku/waku_lightpush_legacy, + waku/waku_lightpush_legacy/[client, common], + waku/common/rate_limit/setting, + ../testlib/[common, wakucore] + +proc newTestWakuLegacyLightpushNode*( + switch: Switch, + handler: PushMessageHandler, + rateLimitSetting: Option[RateLimitSetting] = none[RateLimitSetting](), +): Future[WakuLegacyLightPush] {.async.} = + let + peerManager = PeerManager.new(switch) + proto = WakuLegacyLightPush.new(peerManager, rng, handler, rateLimitSetting) + + await proto.start() + switch.mount(proto) + + return proto + +proc newTestWakuLegacyLightpushClient*(switch: Switch): WakuLegacyLightPushClient = + let peerManager = PeerManager.new(switch) + WakuLegacyLightPushClient.new(peerManager, rng) diff --git a/tests/waku_lightpush_legacy/test_all.nim b/tests/waku_lightpush_legacy/test_all.nim new file mode 100644 index 000000000..4e4980929 --- /dev/null +++ b/tests/waku_lightpush_legacy/test_all.nim @@ -0,0 +1 @@ +import ./test_client, ./test_ratelimit diff --git a/tests/waku_lightpush_legacy/test_client.nim b/tests/waku_lightpush_legacy/test_client.nim new file mode 100644 index 000000000..b71b7d5c3 --- /dev/null +++ b/tests/waku_lightpush_legacy/test_client.nim @@ -0,0 +1,339 @@ +{.used.} + +import + std/[options, strscans], + testutils/unittests, + chronicles, + chronos, + libp2p/crypto/crypto + +import + waku/[ + node/peer_manager, + waku_core, + waku_lightpush_legacy, + waku_lightpush_legacy/client, + waku_lightpush_legacy/common, + waku_lightpush_legacy/protocol_metrics, + waku_lightpush_legacy/rpc, + waku_lightpush_legacy/rpc_codec, + ], + ../testlib/[assertions, wakucore, testasync, futures, testutils], + ./lightpush_utils, + ../resources/[pubsub_topics, content_topics, payloads] + +suite "Waku Legacy Lightpush Client": + var + handlerFuture {.threadvar.}: Future[(PubsubTopic, WakuMessage)] + handler {.threadvar.}: PushMessageHandler + + serverSwitch {.threadvar.}: Switch + clientSwitch {.threadvar.}: Switch + server {.threadvar.}: WakuLegacyLightPush + client {.threadvar.}: WakuLegacyLightPushClient + + serverRemotePeerInfo {.threadvar.}: RemotePeerInfo + clientPeerId {.threadvar.}: PeerId + pubsubTopic {.threadvar.}: PubsubTopic + contentTopic {.threadvar.}: ContentTopic + message {.threadvar.}: WakuMessage + + asyncSetup: + handlerFuture = newPushHandlerFuture() + handler = proc( + peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage + ): Future[WakuLightPushResult[void]] {.async.} = + let msgLen = message.encode().buffer.len + if msgLen > int(DefaultMaxWakuMessageSize) + 64 * 1024: + return err("length greater than maxMessageSize") + handlerFuture.complete((pubsubTopic, message)) + return ok() + + serverSwitch = newTestSwitch() + clientSwitch = newTestSwitch() + server = await newTestWakuLegacyLightpushNode(serverSwitch, handler) + client = newTestWakuLegacyLightpushClient(clientSwitch) + + await allFutures(serverSwitch.start(), clientSwitch.start()) + + serverRemotePeerInfo = serverSwitch.peerInfo.toRemotePeerInfo() + clientPeerId = clientSwitch.peerInfo.peerId + pubsubTopic = DefaultPubsubTopic + contentTopic = DefaultContentTopic + message = fakeWakuMessage() + + asyncTeardown: + await allFutures(clientSwitch.stop(), serverSwitch.stop()) + + suite "Verification of PushRequest Payload": + asyncTest "Valid Payload Types": + # Given the following payloads + let + message2 = fakeWakuMessage(payloads.ALPHABETIC, content_topics.CURRENT) + message3 = fakeWakuMessage(payloads.ALPHANUMERIC, content_topics.TESTNET) + message4 = fakeWakuMessage(payloads.ALPHANUMERIC_SPECIAL, content_topics.PLAIN) + message5 = fakeWakuMessage(payloads.EMOJI, content_topics.CURRENT) + message6 = fakeWakuMessage(payloads.CODE, content_topics.TESTNET) + message7 = fakeWakuMessage(payloads.QUERY, content_topics.PLAIN) + message8 = fakeWakuMessage(payloads.TEXT_SMALL, content_topics.CURRENT) + message9 = fakeWakuMessage(payloads.TEXT_LARGE, content_topics.TESTNET) + + # When publishing a valid payload + let publishResponse = + await client.publish(pubsubTopic, message, serverRemotePeerInfo) + + # Then the message is received by the server + discard await handlerFuture.withTimeout(FUTURE_TIMEOUT) + assertResultOk publishResponse + check handlerFuture.finished() + + # And the message is received with the correct topic and payload + check (pubsubTopic, message) == handlerFuture.read() + + # When publishing a valid payload + handlerFuture = newPushHandlerFuture() + let publishResponse2 = + await client.publish(pubsub_topics.CURRENT, message2, serverRemotePeerInfo) + + # Then the message is received by the server + discard await handlerFuture.withTimeout(FUTURE_TIMEOUT) + assertResultOk publishResponse2 + check handlerFuture.finished() + + # And the message is received with the correct topic and payload + check (pubsub_topics.CURRENT, message2) == handlerFuture.read() + + # When publishing a valid payload + handlerFuture = newPushHandlerFuture() + let publishResponse3 = await client.publish( + pubsub_topics.CURRENT_NESTED, message3, serverRemotePeerInfo + ) + + # Then the message is received by the server + discard await handlerFuture.withTimeout(FUTURE_TIMEOUT) + assertResultOk publishResponse3 + check handlerFuture.finished() + + # And the message is received with the correct topic and payload + check (pubsub_topics.CURRENT_NESTED, message3) == handlerFuture.read() + + # When publishing a valid payload + handlerFuture = newPushHandlerFuture() + let publishResponse4 = + await client.publish(pubsub_topics.SHARDING, message4, serverRemotePeerInfo) + + # Then the message is received by the server + discard await handlerFuture.withTimeout(FUTURE_TIMEOUT) + assertResultOk publishResponse4 + check handlerFuture.finished() + + # And the message is received with the correct topic and payload + check (pubsub_topics.SHARDING, message4) == handlerFuture.read() + + # When publishing a valid payload + handlerFuture = newPushHandlerFuture() + let publishResponse5 = + await client.publish(pubsub_topics.PLAIN, message5, serverRemotePeerInfo) + + # Then the message is received by the server + discard await handlerFuture.withTimeout(FUTURE_TIMEOUT) + assertResultOk publishResponse5 + check handlerFuture.finished() + + # And the message is received with the correct topic and payload + check (pubsub_topics.PLAIN, message5) == handlerFuture.read() + + # When publishing a valid payload + handlerFuture = newPushHandlerFuture() + let publishResponse6 = + await client.publish(pubsub_topics.LEGACY, message6, serverRemotePeerInfo) + + # Then the message is received by the server + discard await handlerFuture.withTimeout(FUTURE_TIMEOUT) + assertResultOk publishResponse6 + check handlerFuture.finished() + + # And the message is received with the correct topic and payload + check (pubsub_topics.LEGACY, message6) == handlerFuture.read() + + # When publishing a valid payload + handlerFuture = newPushHandlerFuture() + let publishResponse7 = await client.publish( + pubsub_topics.LEGACY_NESTED, message7, serverRemotePeerInfo + ) + + # Then the message is received by the server + discard await handlerFuture.withTimeout(FUTURE_TIMEOUT) + assertResultOk publishResponse7 + check handlerFuture.finished() + + # And the message is received with the correct topic and payload + check (pubsub_topics.LEGACY_NESTED, message7) == handlerFuture.read() + + # When publishing a valid payload + handlerFuture = newPushHandlerFuture() + let publishResponse8 = await client.publish( + pubsub_topics.LEGACY_ENCODING, message8, serverRemotePeerInfo + ) + + # Then the message is received by the server + discard await handlerFuture.withTimeout(FUTURE_TIMEOUT) + assertResultOk publishResponse8 + check handlerFuture.finished() + + # And the message is received with the correct topic and payload + check (pubsub_topics.LEGACY_ENCODING, message8) == handlerFuture.read() + + # When publishing a valid payload + handlerFuture = newPushHandlerFuture() + let publishResponse9 = + await client.publish(pubsubTopic, message9, serverRemotePeerInfo) + + # Then the message is received by the server + discard await handlerFuture.withTimeout(FUTURE_TIMEOUT) + assertResultOk publishResponse9 + check handlerFuture.finished() + + # And the message is received with the correct topic and payload + check (pubsubTopic, message9) == handlerFuture.read() + + asyncTest "Valid Payload Sizes": + # Given some valid payloads + let + overheadBytes: uint64 = 112 + message1 = + fakeWakuMessage(contentTopic = contentTopic, payload = getByteSequence(1024)) + # 1KiB + message2 = fakeWakuMessage( + contentTopic = contentTopic, payload = getByteSequence(10 * 1024) + ) # 10KiB + message3 = fakeWakuMessage( + contentTopic = contentTopic, payload = getByteSequence(100 * 1024) + ) # 100KiB + message4 = fakeWakuMessage( + contentTopic = contentTopic, + payload = getByteSequence(DefaultMaxWakuMessageSize - overheadBytes - 1), + ) # Inclusive Limit + message5 = fakeWakuMessage( + contentTopic = contentTopic, + payload = getByteSequence(DefaultMaxWakuMessageSize + 64 * 1024), + ) # Exclusive Limit + + # When publishing the 1KiB payload + let publishResponse1 = + await client.publish(pubsubTopic, message1, serverRemotePeerInfo) + + # Then the message is received by the server + assertResultOk publishResponse1 + check (pubsubTopic, message1) == (await handlerFuture.waitForResult()).value() + + # When publishing the 10KiB payload + handlerFuture = newPushHandlerFuture() + let publishResponse2 = + await client.publish(pubsubTopic, message2, serverRemotePeerInfo) + + # Then the message is received by the server + assertResultOk publishResponse2 + check (pubsubTopic, message2) == (await handlerFuture.waitForResult()).value() + + # When publishing the 100KiB payload + handlerFuture = newPushHandlerFuture() + let publishResponse3 = + await client.publish(pubsubTopic, message3, serverRemotePeerInfo) + + # Then the message is received by the server + assertResultOk publishResponse3 + check (pubsubTopic, message3) == (await handlerFuture.waitForResult()).value() + + # When publishing the 1MiB + 63KiB + 911B payload (1113999B) + handlerFuture = newPushHandlerFuture() + let publishResponse4 = + await client.publish(pubsubTopic, message4, serverRemotePeerInfo) + + # Then the message is received by the server + assertResultOk publishResponse4 + check (pubsubTopic, message4) == (await handlerFuture.waitForResult()).value() + + # When publishing the 1MiB + 63KiB + 912B payload (1114000B) + handlerFuture = newPushHandlerFuture() + let publishResponse5 = + await client.publish(pubsubTopic, message5, serverRemotePeerInfo) + + # Then the message is not received by the server + check: + not publishResponse5.isOk() + (await handlerFuture.waitForResult()).isErr() + + asyncTest "Invalid Encoding Payload": + # Given a payload with an invalid encoding + let fakeBuffer = @[byte(42)] + + # When publishing the payload + let publishResponse = await server.handleRequest(clientPeerId, fakeBuffer) + + # Then the response is negative + check: + publishResponse.requestId == "" + + # And the error is returned + let response = publishResponse.response.get() + check: + response.isSuccess == false + response.info.isSome() + scanf(response.info.get(), decodeRpcFailure) + + asyncTest "Handle Error": + # Given a lightpush server that fails + let + handlerError = "handler-error" + handlerFuture2 = newFuture[void]() + handler2 = proc( + peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage + ): Future[WakuLightPushResult[void]] {.async.} = + handlerFuture2.complete() + return err(handlerError) + + let + serverSwitch2 = newTestSwitch() + server2 = await newTestWakuLegacyLightpushNode(serverSwitch2, handler2) + + await serverSwitch2.start() + + let serverRemotePeerInfo2 = serverSwitch2.peerInfo.toRemotePeerInfo() + + # When publishing a payload + let publishResponse = + await client.publish(pubsubTopic, message, serverRemotePeerInfo2) + + # Then the response is negative + check: + publishResponse.error() == handlerError + (await handlerFuture2.waitForResult()).isOk() + + # Cleanup + await serverSwitch2.stop() + + suite "Verification of PushResponse Payload": + asyncTest "Positive Responses": + # When sending a valid PushRequest + let publishResponse = + await client.publish(pubsubTopic, message, serverRemotePeerInfo) + + # Then the response is positive + assertResultOk publishResponse + + # TODO: Improve: Add more negative responses variations + asyncTest "Negative Responses": + # Given a server that does not support Waku Lightpush + let + serverSwitch2 = newTestSwitch() + serverRemotePeerInfo2 = serverSwitch2.peerInfo.toRemotePeerInfo() + + await serverSwitch2.start() + + # When sending an invalid PushRequest + let publishResponse = + await client.publish(pubsubTopic, message, serverRemotePeerInfo2) + + # Then the response is negative + check not publishResponse.isOk() diff --git a/tests/waku_lightpush_legacy/test_ratelimit.nim b/tests/waku_lightpush_legacy/test_ratelimit.nim new file mode 100644 index 000000000..1d033302f --- /dev/null +++ b/tests/waku_lightpush_legacy/test_ratelimit.nim @@ -0,0 +1,153 @@ +{.used.} + +import + std/[options, strscans], + testutils/unittests, + chronicles, + chronos, + libp2p/crypto/crypto + +import + waku/[ + node/peer_manager, + common/rate_limit/setting, + waku_core, + waku_lightpush_legacy, + waku_lightpush_legacy/client, + waku_lightpush_legacy/common, + waku_lightpush_legacy/protocol_metrics, + waku_lightpush_legacy/rpc, + waku_lightpush_legacy/rpc_codec, + ], + ../testlib/[assertions, wakucore, testasync, futures, testutils], + ./lightpush_utils, + ../resources/[pubsub_topics, content_topics, payloads] + +suite "Rate limited push service": + asyncTest "push message with rate limit not violated": + ## Setup + let + serverSwitch = newTestSwitch() + clientSwitch = newTestSwitch() + + await allFutures(serverSwitch.start(), clientSwitch.start()) + + ## Given + var handlerFuture = newFuture[(string, WakuMessage)]() + let handler: PushMessageHandler = proc( + peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage + ): Future[WakuLightPushResult[void]] {.async.} = + handlerFuture.complete((pubsubTopic, message)) + return ok() + + let + tokenPeriod = 500.millis + server = await newTestWakuLegacyLightpushNode( + serverSwitch, handler, some((3, tokenPeriod)) + ) + client = newTestWakuLegacyLightpushClient(clientSwitch) + + let serverPeerId = serverSwitch.peerInfo.toRemotePeerInfo() + + let sendMsgProc = proc(): Future[void] {.async.} = + let message = fakeWakuMessage() + + handlerFuture = newFuture[(string, WakuMessage)]() + let requestRes = + await client.publish(DefaultPubsubTopic, message, peer = serverPeerId) + + check await handlerFuture.withTimeout(50.millis) + + assert requestRes.isOk(), requestRes.error + check handlerFuture.finished() + + let (handledMessagePubsubTopic, handledMessage) = handlerFuture.read() + + check: + handledMessagePubsubTopic == DefaultPubsubTopic + handledMessage == message + + let waitInBetweenFor = 20.millis + + # Test cannot be too explicit about the time when the TokenBucket resets + # the internal timer, although in normal use there is no use case to care about it. + var firstWaitExtend = 300.millis + + for runCnt in 0 ..< 3: + let startTime = Moment.now() + for testCnt in 0 ..< 3: + await sendMsgProc() + await sleepAsync(20.millis) + + var endTime = Moment.now() + var elapsed: Duration = (endTime - startTime) + await sleepAsync(tokenPeriod - elapsed + firstWaitExtend) + firstWaitEXtend = 100.millis + + ## Cleanup + await allFutures(clientSwitch.stop(), serverSwitch.stop()) + + asyncTest "push message with rate limit reject": + ## Setup + let + serverSwitch = newTestSwitch() + clientSwitch = newTestSwitch() + + await allFutures(serverSwitch.start(), clientSwitch.start()) + + ## Given + var handlerFuture = newFuture[(string, WakuMessage)]() + let handler = proc( + peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage + ): Future[WakuLightPushResult[void]] {.async.} = + handlerFuture.complete((pubsubTopic, message)) + return ok() + + let + server = await newTestWakuLegacyLightpushNode( + serverSwitch, handler, some((3, 500.millis)) + ) + client = newTestWakuLegacyLightpushClient(clientSwitch) + + let serverPeerId = serverSwitch.peerInfo.toRemotePeerInfo() + let topic = DefaultPubsubTopic + + let successProc = proc(): Future[void] {.async.} = + let message = fakeWakuMessage() + handlerFuture = newFuture[(string, WakuMessage)]() + let requestRes = + await client.publish(DefaultPubsubTopic, message, peer = serverPeerId) + discard await handlerFuture.withTimeout(10.millis) + + check: + requestRes.isOk() + handlerFuture.finished() + let (handledMessagePubsubTopic, handledMessage) = handlerFuture.read() + check: + handledMessagePubsubTopic == DefaultPubsubTopic + handledMessage == message + + let rejectProc = proc(): Future[void] {.async.} = + let message = fakeWakuMessage() + handlerFuture = newFuture[(string, WakuMessage)]() + let requestRes = + await client.publish(DefaultPubsubTopic, message, peer = serverPeerId) + discard await handlerFuture.withTimeout(10.millis) + + check: + requestRes.isErr() + requestRes.error == "TOO_MANY_REQUESTS" + + for testCnt in 0 .. 2: + await successProc() + await sleepAsync(20.millis) + + await rejectProc() + + await sleepAsync(500.millis) + + ## next one shall succeed due to the rate limit time window has passed + await successProc() + + ## Cleanup + await allFutures(clientSwitch.stop(), serverSwitch.stop()) diff --git a/tests/wakunode_rest/test_all.nim b/tests/wakunode_rest/test_all.nim index 9c3de0f13..6e34b6fdd 100644 --- a/tests/wakunode_rest/test_all.nim +++ b/tests/wakunode_rest/test_all.nim @@ -4,6 +4,7 @@ import ./test_rest_debug_serdes, ./test_rest_debug, ./test_rest_filter, + ./test_rest_lightpush_legacy, ./test_rest_health, ./test_rest_relay_serdes, ./test_rest_relay, diff --git a/tests/wakunode_rest/test_rest_lightpush.nim b/tests/wakunode_rest/test_rest_lightpush_legacy.nim similarity index 94% rename from tests/wakunode_rest/test_rest_lightpush.nim rename to tests/wakunode_rest/test_rest_lightpush_legacy.nim index 2ff0bf26a..3490a5f80 100644 --- a/tests/wakunode_rest/test_rest_lightpush.nim +++ b/tests/wakunode_rest/test_rest_lightpush_legacy.nim @@ -15,13 +15,13 @@ import waku_core, waku_node, node/peer_manager, - waku_lightpush/common, + waku_lightpush_legacy/common, waku_api/rest/server, waku_api/rest/client, waku_api/rest/responses, - waku_api/rest/lightpush/types, - waku_api/rest/lightpush/handlers as lightpush_api, - waku_api/rest/lightpush/client as lightpush_api_client, + waku_api/rest/legacy_lightpush/types, + waku_api/rest/legacy_lightpush/handlers as lightpush_api, + waku_api/rest/legacy_lightpush/client as lightpush_api_client, waku_relay, common/rate_limit/setting, ], @@ -61,8 +61,8 @@ proc init( await testSetup.consumerNode.mountRelay() await testSetup.serviceNode.mountRelay() - await testSetup.serviceNode.mountLightPush(rateLimit) - testSetup.pushNode.mountLightPushClient() + await testSetup.serviceNode.mountLegacyLightPush(rateLimit) + testSetup.pushNode.mountLegacyLightPushClient() testSetup.serviceNode.peerManager.addServicePeer( testSetup.consumerNode.peerInfo.toRemotePeerInfo(), WakuRelayCodec @@ -73,7 +73,7 @@ proc init( ) testSetup.pushNode.peerManager.addServicePeer( - testSetup.serviceNode.peerInfo.toRemotePeerInfo(), WakuLightPushCodec + testSetup.serviceNode.peerInfo.toRemotePeerInfo(), WakuLegacyLightPushCodec ) var restPort = Port(0) @@ -209,8 +209,7 @@ suite "Waku v2 Rest API - lightpush": await restLightPushTest.shutdown() - # disabled due to this bug in nim-chronos https://github.com/status-im/nim-chronos/issues/500 - xasyncTest "Request rate limit push message": + asyncTest "Request rate limit push message": # Given let budgetCap = 3 let tokenPeriod = 500.millis @@ -273,7 +272,7 @@ suite "Waku v2 Rest API - lightpush": let endTime = Moment.now() let elapsed: Duration = (endTime - startTime) - await sleepAsync(tokenPeriod - elapsed) + await sleepAsync(tokenPeriod - elapsed + 10.millis) await restLightPushTest.shutdown() diff --git a/vendor/nim-libp2p b/vendor/nim-libp2p index a4f0a638e..78a434405 160000 --- a/vendor/nim-libp2p +++ b/vendor/nim-libp2p @@ -1 +1 @@ -Subproject commit a4f0a638e718f05ecec01ae3a6ad2838714e7e40 +Subproject commit 78a434405435b69a24e8b263d48d622d57c4db5b diff --git a/waku/factory/node_factory.nim b/waku/factory/node_factory.nim index 625f1a77b..d2d6b1d99 100644 --- a/waku/factory/node_factory.nim +++ b/waku/factory/node_factory.nim @@ -16,6 +16,7 @@ import ../waku_enr/sharding, ../waku_node, ../waku_core, + ../waku_core/codecs, ../waku_rln_relay, ../discovery/waku_dnsdisc, ../waku_archive/retention_policy as policy, @@ -33,7 +34,7 @@ import ../node/peer_manager, ../node/peer_manager/peer_store/waku_peer_storage, ../node/peer_manager/peer_store/migrations as peer_store_sqlite_migrations, - ../waku_lightpush/common, + ../waku_lightpush_legacy/common, ../common/utils/parse_size_units, ../common/rate_limit/setting, ../common/databases/dburl @@ -359,14 +360,17 @@ proc setupProtocols( if conf.lightpush: try: await mountLightPush(node, node.rateLimitSettings.getSetting(LIGHTPUSH)) + await mountLegacyLightPush(node, node.rateLimitSettings.getSetting(LIGHTPUSH)) except CatchableError: return err("failed to mount waku lightpush protocol: " & getCurrentExceptionMsg()) mountLightPushClient(node) + mountLegacyLightPushClient(node) if conf.lightpushnode != "": let lightPushNode = parsePeerInfo(conf.lightpushnode) if lightPushNode.isOk(): node.peerManager.addServicePeer(lightPushNode.value, WakuLightPushCodec) + node.peerManager.addServicePeer(lightPushNode.value, WakuLegacyLightPushCodec) else: return err("failed to set node waku lightpush peer: " & lightPushNode.error) diff --git a/waku/incentivization/reputation_manager.nim b/waku/incentivization/reputation_manager.nim index d5097b711..3177c0fdf 100644 --- a/waku/incentivization/reputation_manager.nim +++ b/waku/incentivization/reputation_manager.nim @@ -1,5 +1,5 @@ import tables, std/options -import waku/waku_lightpush/rpc +import ../waku_lightpush_legacy/rpc type PeerId = string diff --git a/waku/node/delivery_monitor/delivery_monitor.nim b/waku/node/delivery_monitor/delivery_monitor.nim index 28f9e2507..4dda542cc 100644 --- a/waku/node/delivery_monitor/delivery_monitor.nim +++ b/waku/node/delivery_monitor/delivery_monitor.nim @@ -20,7 +20,7 @@ proc new*( T: type DeliveryMonitor, storeClient: WakuStoreClient, wakuRelay: protocol.WakuRelay, - wakuLightpushClient: WakuLightPushClient, + wakuLightpushClient: WakuLightpushClient, wakuFilterClient: WakuFilterClient, ): Result[T, string] = ## storeClient is needed to give store visitility to DeliveryMonitor diff --git a/waku/node/delivery_monitor/send_monitor.nim b/waku/node/delivery_monitor/send_monitor.nim index 8a67e46b1..adc9f03bd 100644 --- a/waku/node/delivery_monitor/send_monitor.nim +++ b/waku/node/delivery_monitor/send_monitor.nim @@ -171,9 +171,9 @@ proc processMessages(self: SendMonitor) {.async.} = let msg = deliveryInfo.msg if not self.wakuRelay.isNil(): debug "trying to publish again with wakuRelay", msgHash, pubsubTopic - let ret = await self.wakuRelay.publish(pubsubTopic, msg) - if ret == 0: - error "could not publish with wakuRelay.publish", msgHash, pubsubTopic + (await self.wakuRelay.publish(pubsubTopic, msg)).isOkOr: + error "could not publish with wakuRelay.publish", + msgHash, pubsubTopic, error = $error continue if not self.wakuLightpushClient.isNil(): diff --git a/waku/node/waku_node.nim b/waku/node/waku_node.nim index 7ce23914d..e10d705ff 100644 --- a/waku/node/waku_node.nim +++ b/waku/node/waku_node.nim @@ -20,7 +20,8 @@ import libp2p/builders, libp2p/transports/transport, libp2p/transports/tcptransport, - libp2p/transports/wstransport + libp2p/transports/wstransport, + libp2p/utility import ../waku_core, ../waku_core/topics/sharding, @@ -40,11 +41,10 @@ import ../waku_filter_v2/subscriptions as filter_subscriptions, ../waku_metadata, ../waku_rendezvous/protocol, - ../waku_lightpush/client as lightpush_client, - ../waku_lightpush/common, - ../waku_lightpush/protocol, - ../waku_lightpush/self_req_handler, - ../waku_lightpush/callbacks, + ../waku_lightpush_legacy/client as legacy_ligntpuhs_client, + ../waku_lightpush_legacy as legacy_lightpush_protocol, + ../waku_lightpush/client as ligntpuhs_client, + ../waku_lightpush as lightpush_protocol, ../waku_enr, ../waku_peer_exchange, ../waku_rln_relay, @@ -105,6 +105,8 @@ type wakuFilter*: waku_filter_v2.WakuFilter wakuFilterClient*: filter_client.WakuFilterClient wakuRlnRelay*: WakuRLNRelay + wakuLegacyLightPush*: WakuLegacyLightPush + wakuLegacyLightpushClient*: WakuLegacyLightPushClient wakuLightPush*: WakuLightPush wakuLightpushClient*: WakuLightPushClient wakuPeerExchange*: WakuPeerExchange @@ -250,7 +252,6 @@ proc registerRelayDefaultHandler*(node: WakuNode, topic: PubsubTopic) = return proc traceHandler(topic: PubsubTopic, msg: WakuMessage) {.async, gcsafe.} = - let msg_hash = topic.computeMessageHash(msg).to0xHex() let msgSizeKB = msg.payload.len / 1000 waku_node_messages.inc(labelValues = ["relay"]) @@ -979,53 +980,53 @@ proc setupStoreResume*(node: WakuNode) = return ## Waku lightpush - -proc mountLightPush*( +proc mountLegacyLightPush*( node: WakuNode, rateLimit: RateLimitSetting = DefaultGlobalNonRelayRateLimit ) {.async.} = - info "mounting light push" + info "mounting legacy light push" - var pushHandler = + let pushHandler = if node.wakuRelay.isNil: - debug "mounting lightpush without relay (nil)" - getNilPushHandler() + debug "mounting legacy lightpush without relay (nil)" + legacy_lightpush_protocol.getNilPushHandler() else: - debug "mounting lightpush with relay" + debug "mounting legacy lightpush with relay" let rlnPeer = if isNil(node.wakuRlnRelay): - debug "mounting lightpush without rln-relay" + debug "mounting legacy lightpush without rln-relay" none(WakuRLNRelay) else: - debug "mounting lightpush with rln-relay" + debug "mounting legacy lightpush with rln-relay" some(node.wakuRlnRelay) - getRelayPushHandler(node.wakuRelay, rlnPeer) + legacy_lightpush_protocol.getRelayPushHandler(node.wakuRelay, rlnPeer) - node.wakuLightPush = - WakuLightPush.new(node.peerManager, node.rng, pushHandler, some(rateLimit)) + node.wakuLegacyLightPush = + WakuLegacyLightPush.new(node.peerManager, node.rng, pushHandler, some(rateLimit)) if node.started: # Node has started already. Let's start lightpush too. - await node.wakuLightPush.start() + await node.wakuLegacyLightPush.start() - node.switch.mount(node.wakuLightPush, protocolMatcher(WakuLightPushCodec)) + node.switch.mount(node.wakuLegacyLightPush, protocolMatcher(WakuLegacyLightPushCodec)) -proc mountLightPushClient*(node: WakuNode) = - info "mounting light push client" +proc mountLegacyLightPushClient*(node: WakuNode) = + info "mounting legacy light push client" - node.wakuLightpushClient = WakuLightPushClient.new(node.peerManager, node.rng) + node.wakuLegacyLightpushClient = + WakuLegacyLightPushClient.new(node.peerManager, node.rng) -proc lightpushPublish*( +proc legacyLightpushPublish*( node: WakuNode, pubsubTopic: Option[PubsubTopic], message: WakuMessage, peer: RemotePeerInfo, -): Future[WakuLightPushResult[string]] {.async, gcsafe.} = +): Future[legacy_lightpush_protocol.WakuLightPushResult[string]] {.async, gcsafe.} = ## Pushes a `WakuMessage` to a node which relays it further on PubSub topic. ## Returns whether relaying was successful or not. ## `WakuMessage` should contain a `contentTopic` field for light node ## functionality. - if node.wakuLightpushClient.isNil() and node.wakuLightPush.isNil(): - error "failed to publish message as lightpush not available" + if node.wakuLegacyLightpushClient.isNil() and node.wakuLegacyLightPush.isNil(): + error "failed to publish message as legacy lightpush not available" return err("Waku lightpush not available") let internalPublish = proc( @@ -1033,23 +1034,24 @@ proc lightpushPublish*( pubsubTopic: PubsubTopic, message: WakuMessage, peer: RemotePeerInfo, - ): Future[WakuLightPushResult[string]] {.async, gcsafe.} = + ): Future[legacy_lightpush_protocol.WakuLightPushResult[string]] {.async, gcsafe.} = let msgHash = pubsubTopic.computeMessageHash(message).to0xHex() - if not node.wakuLightpushClient.isNil(): - notice "publishing message with lightpush", + if not node.wakuLegacyLightpushClient.isNil(): + notice "publishing message with legacy lightpush", pubsubTopic = pubsubTopic, contentTopic = message.contentTopic, target_peer_id = peer.peerId, msg_hash = msgHash - return await node.wakuLightpushClient.publish(pubsubTopic, message, peer) + return await node.wakuLegacyLightpushClient.publish(pubsubTopic, message, peer) - if not node.wakuLightPush.isNil(): - notice "publishing message with self hosted lightpush", + if not node.wakuLegacyLightPush.isNil(): + notice "publishing message with self hosted legacy lightpush", pubsubTopic = pubsubTopic, contentTopic = message.contentTopic, target_peer_id = peer.peerId, msg_hash = msgHash - return await node.wakuLightPush.handleSelfLightPushRequest(pubsubTopic, message) + return + await node.wakuLegacyLightPush.handleSelfLightPushRequest(pubsubTopic, message) try: if pubsubTopic.isSome(): return await internalPublish(node, pubsubTopic.get(), message, peer) @@ -1068,26 +1070,119 @@ proc lightpushPublish*( return err(getCurrentExceptionMsg()) # TODO: Move to application module (e.g., wakunode2.nim) -proc lightpushPublish*( +proc legacyLightpushPublish*( node: WakuNode, pubsubTopic: Option[PubsubTopic], message: WakuMessage -): Future[WakuLightPushResult[string]] {. - async, gcsafe, deprecated: "Use 'node.lightpushPublish()' instead" +): Future[legacy_lightpush_protocol.WakuLightPushResult[string]] {. + async, gcsafe, deprecated: "Use 'node.legacyLightpushPublish()' instead" .} = - if node.wakuLightpushClient.isNil() and node.wakuLightPush.isNil(): - error "failed to publish message as lightpush not available" - return err("waku lightpush not available") + if node.wakuLegacyLightpushClient.isNil() and node.wakuLegacyLightPush.isNil(): + error "failed to publish message as legacy lightpush not available" + return err("waku legacy lightpush not available") var peerOpt: Option[RemotePeerInfo] = none(RemotePeerInfo) - if not node.wakuLightpushClient.isNil(): - peerOpt = node.peerManager.selectPeer(WakuLightPushCodec) + if not node.wakuLegacyLightpushClient.isNil(): + peerOpt = node.peerManager.selectPeer(WakuLegacyLightPushCodec) if peerOpt.isNone(): let msg = "no suitable remote peers" error "failed to publish message", err = msg return err(msg) - elif not node.wakuLightPush.isNil(): + elif not node.wakuLegacyLightPush.isNil(): peerOpt = some(RemotePeerInfo.init($node.switch.peerInfo.peerId)) - return await node.lightpushPublish(pubsubTopic, message, peer = peerOpt.get()) + return await node.legacyLightpushPublish(pubsubTopic, message, peer = peerOpt.get()) + +proc mountLightPush*( + node: WakuNode, rateLimit: RateLimitSetting = DefaultGlobalNonRelayRateLimit +) {.async.} = + info "mounting light push" + + let pushHandler = + if node.wakuRelay.isNil(): + debug "mounting lightpush v2 without relay (nil)" + lightpush_protocol.getNilPushHandler() + else: + debug "mounting lightpush with relay" + let rlnPeer = + if isNil(node.wakuRlnRelay): + debug "mounting lightpush without rln-relay" + none(WakuRLNRelay) + else: + debug "mounting lightpush with rln-relay" + some(node.wakuRlnRelay) + lightpush_protocol.getRelayPushHandler(node.wakuRelay, rlnPeer) + + node.wakuLightPush = WakuLightPush.new( + node.peerManager, node.rng, pushHandler, node.wakuSharding, some(rateLimit) + ) + + if node.started: + # Node has started already. Let's start lightpush too. + await node.wakuLightPush.start() + + node.switch.mount(node.wakuLightPush, protocolMatcher(WakuLightPushCodec)) + +proc mountLightPushClient*(node: WakuNode) = + info "mounting light push client" + + node.wakuLightpushClient = WakuLightPushClient.new(node.peerManager, node.rng) + +proc lightpushPublishHandler( + node: WakuNode, + pubsubTopic: PubsubTopic, + message: WakuMessage, + peer: RemotePeerInfo | PeerInfo, +): Future[lightpush_protocol.WakuLightPushResult] {.async.} = + let msgHash = pubsubTopic.computeMessageHash(message).to0xHex() + if not node.wakuLightpushClient.isNil(): + notice "publishing message with legacy lightpush", + pubsubTopic = pubsubTopic, + contentTopic = message.contentTopic, + target_peer_id = peer.peerId, + msg_hash = msgHash + return await node.wakuLightpushClient.publish(some(pubsubTopic), message, peer) + + if not node.wakuLightPush.isNil(): + notice "publishing message with self hosted legacy lightpush", + pubsubTopic = pubsubTopic, + contentTopic = message.contentTopic, + target_peer_id = peer.peerId, + msg_hash = msgHash + return + await node.wakuLightPush.handleSelfLightPushRequest(some(pubsubTopic), message) + +proc lightpushPublish*( + node: WakuNode, + pubsubTopic: Option[PubsubTopic], + message: WakuMessage, + peerOpt: Option[RemotePeerInfo] = none(RemotePeerInfo), +): Future[lightpush_protocol.WakuLightPushResult] {.async.} = + if node.wakuLightpushClient.isNil() and node.wakuLightPush.isNil(): + error "failed to publish message as lightpush not available" + return lighpushErrorResult(SERVICE_NOT_AVAILABLE, "Waku lightpush not available") + + let toPeer: RemotePeerInfo = peerOpt.valueOr: + if not node.wakuLightPush.isNil(): + RemotePeerInfo.init(node.peerId()) + elif not node.wakuLightpushClient.isNil(): + node.peerManager.selectPeer(WakuLightPushCodec).valueOr: + let msg = "no suitable remote peers" + error "failed to publish message", msg = msg + return lighpushErrorResult(NO_PEERS_TO_RELAY, msg) + else: + return lighpushErrorResult(NO_PEERS_TO_RELAY, "no suitable remote peers") + + let pubsubForPublish = pubSubTopic.valueOr: + let parsedTopic = NsContentTopic.parse(message.contentTopic).valueOr: + let msg = "Invalid content-topic:" & $error + error "lightpush request handling error", error = msg + return lighpushErrorResult(INVALID_MESSAGE_ERROR, msg) + + node.wakuSharding.getShard(parsedTopic).valueOr: + let msg = "Autosharding error: " & error + error "lightpush publish error", error = msg + return lighpushErrorResult(INTERNAL_SERVER_ERROR, msg) + + return await lightpushPublishHandler(node, pubsubForPublish, message, toPeer) ## Waku RLN Relay proc mountRlnRelay*( diff --git a/waku/waku_api/rest/admin/handlers.nim b/waku/waku_api/rest/admin/handlers.nim index 1e24ad56d..c140c46d6 100644 --- a/waku/waku_api/rest/admin/handlers.nim +++ b/waku/waku_api/rest/admin/handlers.nim @@ -12,7 +12,7 @@ import ../../../waku_store_legacy/common, ../../../waku_store/common, ../../../waku_filter_v2, - ../../../waku_lightpush/common, + ../../../waku_lightpush_legacy/common, ../../../waku_relay, ../../../waku_peer_exchange, ../../../waku_node, @@ -85,6 +85,18 @@ proc installAdminV1GetPeersHandler(router: var RestRouter, node: WakuNode) = ) tuplesToWakuPeers(peers, legacyStorePeers) + let legacyLightpushPeers = node.peerManager.wakuPeerStore + .peers(WakuLegacyLightPushCodec) + .mapIt( + ( + multiaddr: constructMultiaddrStr(it), + protocol: WakuLegacyLightPushCodec, + connected: it.connectedness == Connectedness.Connected, + origin: it.origin, + ) + ) + tuplesToWakuPeers(peers, legacyLightpushPeers) + let lightpushPeers = node.peerManager.wakuPeerStore.peers(WakuLightPushCodec).mapIt( ( multiaddr: constructMultiaddrStr(it), diff --git a/waku/waku_api/rest/builder.nim b/waku/waku_api/rest/builder.nim index 325dcce06..6fe1b5e2b 100644 --- a/waku/waku_api/rest/builder.nim +++ b/waku/waku_api/rest/builder.nim @@ -12,6 +12,7 @@ import waku/waku_api/rest/debug/handlers as rest_debug_api, waku/waku_api/rest/relay/handlers as rest_relay_api, waku/waku_api/rest/filter/handlers as rest_filter_api, + waku/waku_api/rest/legacy_lightpush/handlers as rest_legacy_lightpush_api, waku/waku_api/rest/lightpush/handlers as rest_lightpush_api, waku/waku_api/rest/store/handlers as rest_store_api, waku/waku_api/rest/legacy_store/handlers as rest_store_legacy_api, @@ -176,14 +177,17 @@ proc startRestServerProtocolSupport*( ## Light push API ## Install it either if lightpushnode (lightpush service node) is configured and client is mounted) ## or install it to be used with self-hosted lightpush service - if (conf.lightpushnode != "" and node.wakuLightpushClient != nil) or - (conf.lightpush and node.wakuLightPush != nil and node.wakuRelay != nil): + if (conf.lightpushnode != "" and node.wakuLegacyLightpushClient != nil) or + (conf.lightpush and node.wakuLegacyLightPush != nil and node.wakuRelay != nil): let lightDiscoHandler = if not wakuDiscv5.isNil(): some(defaultDiscoveryHandler(wakuDiscv5, Lightpush)) else: none(DiscoveryHandler) + rest_legacy_lightpush_api.installLightPushRequestHandler( + router, node, lightDiscoHandler + ) rest_lightpush_api.installLightPushRequestHandler(router, node, lightDiscoHandler) else: restServerNotInstalledTab["lightpush"] = diff --git a/waku/waku_api/rest/legacy_lightpush/client.nim b/waku/waku_api/rest/legacy_lightpush/client.nim new file mode 100644 index 000000000..f0932e99f --- /dev/null +++ b/waku/waku_api/rest/legacy_lightpush/client.nim @@ -0,0 +1,23 @@ +{.push raises: [].} + +import + json, + std/sets, + stew/byteutils, + strformat, + chronicles, + json_serialization, + json_serialization/std/options, + presto/[route, client, common] +import ../../../waku_core, ../serdes, ../responses, ../rest_serdes, ./types + +export types + +proc encodeBytes*(value: PushRequest, contentType: string): RestResult[seq[byte]] = + return encodeBytesOf(value, contentType) + +proc sendPushRequest*( + body: PushRequest +): RestResponse[string] {. + rest, endpoint: "/lightpush/v1/message", meth: HttpMethod.MethodPost +.} diff --git a/waku/waku_api/rest/legacy_lightpush/handlers.nim b/waku/waku_api/rest/legacy_lightpush/handlers.nim new file mode 100644 index 000000000..5d7c66bb1 --- /dev/null +++ b/waku/waku_api/rest/legacy_lightpush/handlers.nim @@ -0,0 +1,91 @@ +{.push raises: [].} + +import + std/strformat, + stew/byteutils, + chronicles, + json_serialization, + json_serialization/std/options, + presto/route, + presto/common + +import + waku/node/peer_manager, + waku/waku_lightpush_legacy/common, + ../../../waku_node, + ../../handlers, + ../serdes, + ../responses, + ../rest_serdes, + ./types + +export types + +logScope: + topics = "waku node rest legacy lightpush api" + +const FutTimeoutForPushRequestProcessing* = 5.seconds + +const NoPeerNoDiscoError = + RestApiResponse.serviceUnavailable("No suitable service peer & no discovery method") + +const NoPeerNoneFoundError = + RestApiResponse.serviceUnavailable("No suitable service peer & none discovered") + +proc useSelfHostedLightPush(node: WakuNode): bool = + return node.wakuLegacyLightPush != nil and node.wakuLegacyLightPushClient == nil + +#### Request handlers + +const ROUTE_LIGHTPUSH = "/lightpush/v1/message" + +proc installLightPushRequestHandler*( + router: var RestRouter, + node: WakuNode, + discHandler: Option[DiscoveryHandler] = none(DiscoveryHandler), +) = + router.api(MethodPost, ROUTE_LIGHTPUSH) do( + contentBody: Option[ContentBody] + ) -> RestApiResponse: + ## Send a request to push a waku message + debug "post", ROUTE_LIGHTPUSH, contentBody + + let decodedBody = decodeRequestBody[PushRequest](contentBody) + + if decodedBody.isErr(): + return decodedBody.error() + + let req: PushRequest = decodedBody.value() + + let msg = req.message.toWakuMessage().valueOr: + return RestApiResponse.badRequest("Invalid message: " & $error) + + var peer = RemotePeerInfo.init($node.switch.peerInfo.peerId) + if useSelfHostedLightPush(node): + discard + else: + peer = node.peerManager.selectPeer(WakuLegacyLightPushCodec).valueOr: + let handler = discHandler.valueOr: + return NoPeerNoDiscoError + + let peerOp = (await handler()).valueOr: + return RestApiResponse.internalServerError("No value in peerOp: " & $error) + + peerOp.valueOr: + return NoPeerNoneFoundError + + let subFut = node.legacyLightpushPublish(req.pubsubTopic, msg, peer) + + if not await subFut.withTimeout(FutTimeoutForPushRequestProcessing): + error "Failed to request a message push due to timeout!" + return RestApiResponse.serviceUnavailable("Push request timed out") + + if subFut.value().isErr(): + if subFut.value().error == TooManyRequestsMessage: + return RestApiResponse.tooManyRequests("Request rate limmit reached") + + return RestApiResponse.serviceUnavailable( + fmt("Failed to request a message push: {subFut.value().error}") + ) + + return RestApiResponse.ok() diff --git a/waku/waku_api/rest/legacy_lightpush/types.nim b/waku/waku_api/rest/legacy_lightpush/types.nim new file mode 100644 index 000000000..60368403f --- /dev/null +++ b/waku/waku_api/rest/legacy_lightpush/types.nim @@ -0,0 +1,67 @@ +{.push raises: [].} + +import + std/[sets, strformat], + chronicles, + json_serialization, + json_serialization/std/options, + presto/[route, client] + +import ../../../waku_core, ../relay/types as relay_types, ../serdes + +export relay_types + +#### Types + +type PushRequest* = object + pubsubTopic*: Option[PubSubTopic] + message*: RelayWakuMessage + +#### Serialization and deserialization + +proc writeValue*( + writer: var JsonWriter[RestJson], value: PushRequest +) {.raises: [IOError].} = + writer.beginRecord() + if value.pubsubTopic.isSome(): + writer.writeField("pubsubTopic", value.pubsubTopic.get()) + writer.writeField("message", value.message) + writer.endRecord() + +proc readValue*( + reader: var JsonReader[RestJson], value: var PushRequest +) {.raises: [SerializationError, IOError].} = + var + pubsubTopic = none(PubsubTopic) + message = none(RelayWakuMessage) + + var keys = initHashSet[string]() + for fieldName in readObjectFields(reader): + # Check for reapeated keys + if keys.containsOrIncl(fieldName): + let err = + try: + fmt"Multiple `{fieldName}` fields found" + except CatchableError: + "Multiple fields with the same name found" + reader.raiseUnexpectedField(err, "PushRequest") + + case fieldName + of "pubsubTopic": + pubsubTopic = some(reader.readValue(PubsubTopic)) + of "message": + message = some(reader.readValue(RelayWakuMessage)) + else: + unrecognizedFieldWarning(value) + + if message.isNone(): + reader.raiseUnexpectedValue("Field `message` is missing") + + value = PushRequest( + pubsubTopic: + if pubsubTopic.isNone() or pubsubTopic.get() == "": + none(string) + else: + some(pubsubTopic.get()), + message: message.get(), + ) diff --git a/waku/waku_api/rest/lightpush/client.nim b/waku/waku_api/rest/lightpush/client.nim index 3e7f85524..abf832a3c 100644 --- a/waku/waku_api/rest/lightpush/client.nim +++ b/waku/waku_api/rest/lightpush/client.nim @@ -13,14 +13,11 @@ import ../../../waku_core, ../serdes, ../responses, ../rest_serdes, ./types export types -logScope: - topics = "waku node rest client v2" - proc encodeBytes*(value: PushRequest, contentType: string): RestResult[seq[byte]] = return encodeBytesOf(value, contentType) proc sendPushRequest*( body: PushRequest -): RestResponse[string] {. - rest, endpoint: "/lightpush/v1/message", meth: HttpMethod.MethodPost +): RestResponse[PushResponse] {. + rest, endpoint: "/lightpush/v3/message", meth: HttpMethod.MethodPost .} diff --git a/waku/waku_api/rest/lightpush/handlers.nim b/waku/waku_api/rest/lightpush/handlers.nim index 6003c8a59..cbb94e16e 100644 --- a/waku/waku_api/rest/lightpush/handlers.nim +++ b/waku/waku_api/rest/lightpush/handlers.nim @@ -24,7 +24,7 @@ export types logScope: topics = "waku node rest lightpush api" -const futTimeoutForPushRequestProcessing* = 5.seconds +const FutTimeoutForPushRequestProcessing* = 5.seconds const NoPeerNoDiscoError = RestApiResponse.serviceUnavailable("No suitable service peer & no discovery method") @@ -33,11 +33,32 @@ const NoPeerNoneFoundError = RestApiResponse.serviceUnavailable("No suitable service peer & none discovered") proc useSelfHostedLightPush(node: WakuNode): bool = - return node.wakuLightPush != nil and node.wakuLightPushClient == nil + return node.wakuLegacyLightPush != nil and node.wakuLegacyLightPushClient == nil + +proc convertErrorKindToHttpStatus(statusCode: LightpushStatusCode): HttpCode = + ## Lightpush status codes are matching HTTP status codes by design + return HttpCode(statusCode.int32) + +proc makeRestResponse(response: WakuLightPushResult): RestApiResponse = + var httpStatus: HttpCode = Http200 + var apiResponse: PushResponse + + if response.isOk(): + apiResponse.relayPeerCount = some(response.get()) + else: + httpStatus = convertErrorKindToHttpStatus(response.error().code) + apiResponse.statusDesc = response.error().desc + + let restResp = RestApiResponse.jsonResponse(apiResponse, status = httpStatus).valueOr: + error "An error ocurred while building the json respose: ", error = error + return RestApiResponse.internalServerError( + fmt("An error ocurred while building the json respose: {error}") + ) + + return restResp #### Request handlers - -const ROUTE_LIGHTPUSH* = "/lightpush/v1/message" +const ROUTE_LIGHTPUSH = "/lightpush/v3/message" proc installLightPushRequestHandler*( router: var RestRouter, @@ -50,21 +71,17 @@ proc installLightPushRequestHandler*( ## Send a request to push a waku message debug "post", ROUTE_LIGHTPUSH, contentBody - let decodedBody = decodeRequestBody[PushRequest](contentBody) - - if decodedBody.isErr(): - return decodedBody.error() - - let req: PushRequest = decodedBody.value() + let req: PushRequest = decodeRequestBody[PushRequest](contentBody).valueOr: + return RestApiResponse.badRequest("Invalid push request: " & $error) let msg = req.message.toWakuMessage().valueOr: return RestApiResponse.badRequest("Invalid message: " & $error) - var peer = RemotePeerInfo.init($node.switch.peerInfo.peerId) + var toPeer = none(RemotePeerInfo) if useSelfHostedLightPush(node): discard else: - peer = node.peerManager.selectPeer(WakuLightPushCodec).valueOr: + let aPeer = node.peerManager.selectPeer(WakuLightPushCodec).valueOr: let handler = discHandler.valueOr: return NoPeerNoDiscoError @@ -73,19 +90,12 @@ proc installLightPushRequestHandler*( peerOp.valueOr: return NoPeerNoneFoundError + toPeer = some(aPeer) - let subFut = node.lightpushPublish(req.pubsubTopic, msg, peer) + let subFut = node.lightpushPublish(req.pubsubTopic, msg, toPeer) - if not await subFut.withTimeout(futTimeoutForPushRequestProcessing): + if not await subFut.withTimeout(FutTimeoutForPushRequestProcessing): error "Failed to request a message push due to timeout!" return RestApiResponse.serviceUnavailable("Push request timed out") - if subFut.value().isErr(): - if subFut.value().error == TooManyRequestsMessage: - return RestApiResponse.tooManyRequests("Request rate limmit reached") - - return RestApiResponse.serviceUnavailable( - fmt("Failed to request a message push: {subFut.value().error}") - ) - - return RestApiResponse.ok() + return makeRestResponse(subFut.value()) diff --git a/waku/waku_api/rest/lightpush/types.nim b/waku/waku_api/rest/lightpush/types.nim index 60368403f..1fb87ab45 100644 --- a/waku/waku_api/rest/lightpush/types.nim +++ b/waku/waku_api/rest/lightpush/types.nim @@ -13,12 +13,16 @@ export relay_types #### Types -type PushRequest* = object - pubsubTopic*: Option[PubSubTopic] - message*: RelayWakuMessage +type + PushRequest* = object + pubsubTopic*: Option[PubSubTopic] + message*: RelayWakuMessage + + PushResponse* = object + statusDesc*: Option[string] + relayPeerCount*: Option[uint32] #### Serialization and deserialization - proc writeValue*( writer: var JsonWriter[RestJson], value: PushRequest ) {.raises: [IOError].} = @@ -65,3 +69,46 @@ proc readValue*( some(pubsubTopic.get()), message: message.get(), ) + +proc writeValue*( + writer: var JsonWriter[RestJson], value: PushResponse +) {.raises: [IOError].} = + writer.beginRecord() + if value.statusDesc.isSome(): + writer.writeField("statusDesc", value.statusDesc.get()) + if value.relayPeerCount.isSome(): + writer.writeField("relayPeerCount", value.relayPeerCount.get()) + writer.endRecord() + +proc readValue*( + reader: var JsonReader[RestJson], value: var PushResponse +) {.raises: [SerializationError, IOError].} = + var + statusDesc = none(string) + relayPeerCount = none(uint32) + + var keys = initHashSet[string]() + for fieldName in readObjectFields(reader): + # Check for reapeated keys + if keys.containsOrIncl(fieldName): + let err = + try: + fmt"Multiple `{fieldName}` fields found" + except CatchableError: + "Multiple fields with the same name found" + reader.raiseUnexpectedField(err, "PushResponse") + + case fieldName + of "statusDesc": + statusDesc = some(reader.readValue(string)) + of "relayPeerCount": + relayPeerCount = some(reader.readValue(uint32)) + else: + unrecognizedFieldWarning(value) + + if relayPeerCount.isNone() and statusDesc.isNone(): + reader.raiseUnexpectedValue( + "Fields are missing, either `relayPeerCount` or `statusDesc` must be present" + ) + + value = PushResponse(statusDesc: statusDesc, relayPeerCount: relayPeerCount) diff --git a/waku/waku_core.nim b/waku/waku_core.nim index 33021d5a8..44dcce37d 100644 --- a/waku/waku_core.nim +++ b/waku/waku_core.nim @@ -4,6 +4,7 @@ import ./waku_core/message, ./waku_core/peers, ./waku_core/subscription, - ./waku_core/multiaddrstr + ./waku_core/multiaddrstr, + ./waku_core/codecs -export topics, time, message, peers, subscription, multiaddrstr +export topics, time, message, peers, subscription, multiaddrstr, codecs diff --git a/waku/waku_core/codecs.nim b/waku/waku_core/codecs.nim index 35a050b72..32a4af9d8 100644 --- a/waku/waku_core/codecs.nim +++ b/waku/waku_core/codecs.nim @@ -3,7 +3,8 @@ const WakuStoreCodec* = "/vac/waku/store-query/3.0.0" WakuFilterSubscribeCodec* = "/vac/waku/filter-subscribe/2.0.0-beta1" WakuFilterPushCodec* = "/vac/waku/filter-push/2.0.0-beta1" - WakuLightPushCodec* = "/vac/waku/lightpush/2.0.0-beta1" + WakuLightPushCodec* = "/vac/waku/lightpush/3.0.0" + WakuLegacyLightPushCodec* = "/vac/waku/lightpush/2.0.0-beta1" WakuSyncCodec* = "/vac/waku/sync/1.0.0" WakuReconciliationCodec* = "/vac/waku/reconciliation/1.0.0" WakuTransferCodec* = "/vac/waku/transfer/1.0.0" diff --git a/waku/waku_core/peers.nim b/waku/waku_core/peers.nim index a821a0474..fdd3d7948 100644 --- a/waku/waku_core/peers.nim +++ b/waku/waku_core/peers.nim @@ -257,7 +257,7 @@ proc parseUrlPeerAddr*( proc toRemotePeerInfo*(enr: enr.Record): Result[RemotePeerInfo, cstring] = ## Converts an ENR to dialable RemotePeerInfo - let typedR = ?enr.toTypedRecord() + let typedR = TypedRecord.fromRecord(enr) if not typedR.secp256k1.isSome(): return err("enr: no secp256k1 key in record") @@ -351,12 +351,8 @@ func hasUdpPort*(peer: RemotePeerInfo): bool = let enr = peer.enr.get() - typedEnrRes = enr.toTypedRecord() + typedEnr = TypedRecord.fromRecord(enr) - if typedEnrRes.isErr(): - return false - - let typedEnr = typedEnrRes.get() typedEnr.udp.isSome() or typedEnr.udp6.isSome() proc getAgent*(peer: RemotePeerInfo): string = diff --git a/waku/waku_lightpush.nim b/waku/waku_lightpush.nim index 373478fd9..a90557056 100644 --- a/waku/waku_lightpush.nim +++ b/waku/waku_lightpush.nim @@ -1,3 +1,3 @@ -import ./waku_lightpush/protocol +import ./waku_lightpush/[protocol, common, rpc, rpc_codec, callbacks, self_req_handler] -export protocol +export protocol, common, rpc, rpc_codec, callbacks, self_req_handler diff --git a/waku/waku_lightpush/callbacks.nim b/waku/waku_lightpush/callbacks.nim index 363ebfd3b..d6700412f 100644 --- a/waku/waku_lightpush/callbacks.nim +++ b/waku/waku_lightpush/callbacks.nim @@ -1,5 +1,7 @@ {.push raises: [].} +import stew/results + import ../waku_core, ../waku_relay, @@ -32,28 +34,28 @@ proc checkAndGenerateRLNProof*( proc getNilPushHandler*(): PushMessageHandler = return proc( peer: PeerId, pubsubTopic: string, message: WakuMessage - ): Future[WakuLightPushResult[void]] {.async.} = - return err("no waku relay found") + ): Future[WakuLightPushResult] {.async.} = + return lightpushResultInternalError("no waku relay found") proc getRelayPushHandler*( wakuRelay: WakuRelay, rlnPeer: Option[WakuRLNRelay] = none[WakuRLNRelay]() ): PushMessageHandler = return proc( peer: PeerId, pubsubTopic: string, message: WakuMessage - ): Future[WakuLightPushResult[void]] {.async.} = + ): Future[WakuLightPushResult] {.async.} = # append RLN proof - let msgWithProof = checkAndGenerateRLNProof(rlnPeer, message) - if msgWithProof.isErr(): - return err(msgWithProof.error) + let msgWithProof = checkAndGenerateRLNProof(rlnPeer, message).valueOr: + return lighpushErrorResult(OUT_OF_RLN_PROOF, error) - (await wakuRelay.validateMessage(pubSubTopic, msgWithProof.value)).isOkOr: - return err(error) + (await wakuRelay.validateMessage(pubSubTopic, msgWithProof)).isOkOr: + return lighpushErrorResult(INVALID_MESSAGE_ERROR, $error) - let publishedCount = await wakuRelay.publish(pubsubTopic, msgWithProof.value) - if publishedCount == 0: - ## Agreed change expected to the lightpush protocol to better handle such case. https://github.com/waku-org/pm/issues/93 + let publishedResult = await wakuRelay.publish(pubsubTopic, msgWithProof) + + if publishedResult.isErr(): let msgHash = computeMessageHash(pubsubTopic, message).to0xHex() - notice "Lightpush request has not been published to any peers", msg_hash = msgHash - return err(protocol_metrics.notPublishedAnyPeer) + notice "Lightpush request has not been published to any peers", + msg_hash = msgHash, reason = $publishedResult.error + return mapPubishingErrorToPushResult(publishedResult.error) - return ok() + return lightpushSuccessResult(publishedResult.get().uint32) diff --git a/waku/waku_lightpush/client.nim b/waku/waku_lightpush/client.nim index 3e20bf9e3..7aa2d9fa9 100644 --- a/waku/waku_lightpush/client.nim +++ b/waku/waku_lightpush/client.nim @@ -30,80 +30,83 @@ proc addPublishObserver*(wl: WakuLightPushClient, obs: PublishObserver) = wl.publishObservers.add(obs) proc sendPushRequest( - wl: WakuLightPushClient, req: PushRequest, peer: PeerId | RemotePeerInfo -): Future[WakuLightPushResult[void]] {.async, gcsafe.} = - let connOpt = await wl.peerManager.dialPeer(peer, WakuLightPushCodec) - if connOpt.isNone(): - waku_lightpush_errors.inc(labelValues = [dialFailure]) - return err(dialFailure) - let connection = connOpt.get() + wl: WakuLightPushClient, req: LightPushRequest, peer: PeerId | RemotePeerInfo +): Future[WakuLightPushResult] {.async.} = + let connection = (await wl.peerManager.dialPeer(peer, WakuLightPushCodec)).valueOr: + waku_lightpush_v3_errors.inc(labelValues = [dialFailure]) + return lighpushErrorResult( + NO_PEERS_TO_RELAY, dialFailure & ": " & $peer & " is not accessible" + ) - let rpc = PushRPC(requestId: generateRequestId(wl.rng), request: some(req)) - await connection.writeLP(rpc.encode().buffer) + await connection.writeLP(req.encode().buffer) var buffer: seq[byte] try: buffer = await connection.readLp(DefaultMaxRpcSize.int) except LPStreamRemoteClosedError: - return err("Exception reading: " & getCurrentExceptionMsg()) + error "Failed to read responose from peer", error = getCurrentExceptionMsg() + return lightpushResultInternalError( + "Failed to read response from peer: " & getCurrentExceptionMsg() + ) - let decodeRespRes = PushRPC.decode(buffer) - if decodeRespRes.isErr(): + let response = LightpushResponse.decode(buffer).valueOr: error "failed to decode response" - waku_lightpush_errors.inc(labelValues = [decodeRpcFailure]) - return err(decodeRpcFailure) + waku_lightpush_v3_errors.inc(labelValues = [decodeRpcFailure]) + return lightpushResultInternalError(decodeRpcFailure) - let pushResponseRes = decodeRespRes.get() - if pushResponseRes.response.isNone(): - waku_lightpush_errors.inc(labelValues = [emptyResponseBodyFailure]) - return err(emptyResponseBodyFailure) + if response.requestId != req.requestId and + response.statusCode != TOO_MANY_REQUESTS.uint32: + error "response failure, requestId mismatch", + requestId = req.requestId, responseRequestId = response.requestId + return lightpushResultInternalError("response failure, requestId mismatch") - let response = pushResponseRes.response.get() - if not response.isSuccess: - if response.info.isSome(): - return err(response.info.get()) - else: - return err("unknown failure") - - return ok() + return toPushResult(response) proc publish*( wl: WakuLightPushClient, - pubSubTopic: PubsubTopic, + pubSubTopic: Option[PubsubTopic] = none(PubsubTopic), message: WakuMessage, - peer: RemotePeerInfo, -): Future[WakuLightPushResult[string]] {.async, gcsafe.} = - ## On success, returns the msg_hash of the published message - let msg_hash_hex_str = computeMessageHash(pubsubTopic, message).to0xHex() - let pushRequest = PushRequest(pubSubTopic: pubSubTopic, message: message) - ?await wl.sendPushRequest(pushRequest, peer) + peer: PeerId | RemotePeerInfo, +): Future[WakuLightPushResult] {.async, gcsafe.} = + when peer is PeerId: + info "publish", + peerId = shortLog(peer), + msg_hash = computeMessageHash(pubsubTopic.get(""), message).to0xHex + else: + info "publish", + peerId = shortLog(peer.peerId), + msg_hash = computeMessageHash(pubsubTopic.get(""), message).to0xHex + + let pushRequest = LightpushRequest( + requestId: generateRequestId(wl.rng), pubSubTopic: pubSubTopic, message: message + ) + let publishedCount = ?await wl.sendPushRequest(pushRequest, peer) for obs in wl.publishObservers: - obs.onMessagePublished(pubSubTopic, message) + obs.onMessagePublished(pubSubTopic.get(""), message) - notice "publishing message with lightpush", - pubsubTopic = pubsubTopic, - contentTopic = message.contentTopic, - target_peer_id = peer.peerId, - msg_hash = msg_hash_hex_str - - return ok(msg_hash_hex_str) + return lightpushSuccessResult(publishedCount) proc publishToAny*( wl: WakuLightPushClient, pubSubTopic: PubsubTopic, message: WakuMessage -): Future[WakuLightPushResult[void]] {.async, gcsafe.} = +): Future[WakuLightPushResult] {.async, gcsafe.} = ## This proc is similar to the publish one but in this case ## we don't specify a particular peer and instead we get it from peer manager info "publishToAny", msg_hash = computeMessageHash(pubsubTopic, message).to0xHex let peer = wl.peerManager.selectPeer(WakuLightPushCodec).valueOr: - return err("could not retrieve a peer supporting WakuLightPushCodec") + # TODO: check if it is matches the situation - shall we distinguish client side missing peers from server side? + return lighpushErrorResult(NO_PEERS_TO_RELAY, "no suitable remote peers") - let pushRequest = PushRequest(pubSubTopic: pubSubTopic, message: message) - ?await wl.sendPushRequest(pushRequest, peer) + let pushRequest = LightpushRequest( + requestId: generateRequestId(wl.rng), + pubSubTopic: some(pubSubTopic), + message: message, + ) + let publishedCount = ?await wl.sendPushRequest(pushRequest, peer) for obs in wl.publishObservers: obs.onMessagePublished(pubSubTopic, message) - return ok() + return lightpushSuccessResult(publishedCount) diff --git a/waku/waku_lightpush/common.nim b/waku/waku_lightpush/common.nim index cbdec411f..502e23883 100644 --- a/waku/waku_lightpush/common.nim +++ b/waku/waku_lightpush/common.nim @@ -1,15 +1,82 @@ {.push raises: [].} -import results, chronos, libp2p/peerid -import ../waku_core +import std/options, results, chronos, libp2p/peerid +import ../waku_core, ./rpc, ../waku_relay/protocol from ../waku_core/codecs import WakuLightPushCodec export WakuLightPushCodec -type WakuLightPushResult*[T] = Result[T, string] +type LightpushStatusCode* = enum + SUCCESS = uint32(200) + BAD_REQUEST = uint32(400) + PAYLOAD_TOO_LARGE = uint32(413) + INVALID_MESSAGE_ERROR = uint32(420) + UNSUPPORTED_PUBSUB_TOPIC = uint32(421) + TOO_MANY_REQUESTS = uint32(429) + INTERNAL_SERVER_ERROR = uint32(500) + NO_PEERS_TO_RELAY = uint32(503) + OUT_OF_RLN_PROOF = uint32(504) + SERVICE_NOT_AVAILABLE = uint32(505) + +type ErrorStatus* = tuple[code: LightpushStatusCode, desc: Option[string]] +type WakuLightPushResult* = Result[uint32, ErrorStatus] type PushMessageHandler* = proc( peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage -): Future[WakuLightPushResult[void]] {.async.} +): Future[WakuLightPushResult] {.async.} -const TooManyRequestsMessage* = "TOO_MANY_REQUESTS" +const TooManyRequestsMessage* = "Request rejected due to too many requests" + +func isSuccess*(response: LightPushResponse): bool = + return response.statusCode == LightpushStatusCode.SUCCESS.uint32 + +func toPushResult*(response: LightPushResponse): WakuLightPushResult = + if isSuccess(response): + return ok(response.relayPeerCount.get(0)) + else: + return err((response.statusCode.LightpushStatusCode, response.statusDesc)) + +func lightpushSuccessResult*(relayPeerCount: uint32): WakuLightPushResult = + return ok(relayPeerCount) + +func lightpushResultInternalError*(msg: string): WakuLightPushResult = + return err((LightpushStatusCode.INTERNAL_SERVER_ERROR, some(msg))) + +func lighpushErrorResult*( + statusCode: LightpushStatusCode, desc: Option[string] +): WakuLightPushResult = + return err((statusCode, desc)) + +func lighpushErrorResult*( + statusCode: LightpushStatusCode, desc: string +): WakuLightPushResult = + return err((statusCode, some(desc))) + +func mapPubishingErrorToPushResult*( + publishOutcome: PublishOutcome +): WakuLightPushResult = + case publishOutcome + of NoTopicSpecified: + return err( + (LightpushStatusCode.INVALID_MESSAGE_ERROR, some("Empty topic, skipping publish")) + ) + of DuplicateMessage: + return err( + (LightpushStatusCode.INVALID_MESSAGE_ERROR, some("Dropping already-seen message")) + ) + of NoPeersToPublish: + return err( + ( + LightpushStatusCode.NO_PEERS_TO_RELAY, + some("No peers for topic, skipping publish"), + ) + ) + of CannotGenerateMessageId: + return err( + ( + LightpushStatusCode.INTERNAL_SERVER_ERROR, + some("Error generating message id, skipping publish"), + ) + ) + else: + return err((LightpushStatusCode.INTERNAL_SERVER_ERROR, none[string]())) diff --git a/waku/waku_lightpush/protocol.nim b/waku/waku_lightpush/protocol.nim index 2967146db..76a37c6df 100644 --- a/waku/waku_lightpush/protocol.nim +++ b/waku/waku_lightpush/protocol.nim @@ -1,9 +1,17 @@ {.push raises: [].} -import std/options, results, stew/byteutils, chronicles, chronos, metrics, bearssl/rand +import + std/[options, strutils], + results, + stew/byteutils, + chronicles, + chronos, + metrics, + bearssl/rand import ../node/peer_manager/peer_manager, ../waku_core, + ../waku_core/topics/sharding, ./common, ./rpc, ./rpc_codec, @@ -18,55 +26,90 @@ type WakuLightPush* = ref object of LPProtocol peerManager*: PeerManager pushHandler*: PushMessageHandler requestRateLimiter*: RequestRateLimiter + sharding: Sharding proc handleRequest*( wl: WakuLightPush, peerId: PeerId, buffer: seq[byte] -): Future[PushRPC] {.async.} = - let reqDecodeRes = PushRPC.decode(buffer) - var - isSuccess = false - pushResponseInfo = "" - requestId = "" +): Future[LightPushResponse] {.async.} = + let reqDecodeRes = LightpushRequest.decode(buffer) + var isSuccess = false + var pushResponse: LightpushResponse if reqDecodeRes.isErr(): - pushResponseInfo = decodeRpcFailure & ": " & $reqDecodeRes.error - elif reqDecodeRes.get().request.isNone(): - pushResponseInfo = emptyRequestBodyFailure + pushResponse = LightpushResponse( + requestId: "N/A", # due to decode failure we don't know requestId + statusCode: LightpushStatusCode.BAD_REQUEST.uint32, + statusDesc: some(decodeRpcFailure & ": " & $reqDecodeRes.error), + ) else: - let pushRpcRequest = reqDecodeRes.get() + let pushRequest = reqDecodeRes.get() - requestId = pushRpcRequest.requestId + let pubsubTopic = pushRequest.pubSubTopic.valueOr: + let parsedTopic = NsContentTopic.parse(pushRequest.message.contentTopic).valueOr: + let msg = "Invalid content-topic:" & $error + error "lightpush request handling error", error = msg + return LightpushResponse( + requestId: pushRequest.requestId, + statusCode: LightpushStatusCode.INVALID_MESSAGE_ERROR.uint32, + statusDesc: some(msg), + ) - let - request = pushRpcRequest.request + wl.sharding.getShard(parsedTopic).valueOr: + let msg = "Autosharding error: " & error + error "lightpush request handling error", error = msg + return LightpushResponse( + requestId: pushRequest.requestId, + statusCode: LightpushStatusCode.INTERNAL_SERVER_ERROR.uint32, + statusDesc: some(msg), + ) - pubSubTopic = request.get().pubSubTopic - message = request.get().message + # ensure checking topic will not cause error at gossipsub level + if pubsubTopic.isEmptyOrWhitespace(): + let msg = "topic must not be empty" + error "lightpush request handling error", error = msg + return LightPushResponse( + requestId: pushRequest.requestId, + statusCode: LightpushStatusCode.BAD_REQUEST.uint32, + statusDesc: some(msg), + ) - waku_lightpush_messages.inc(labelValues = ["PushRequest"]) + waku_lightpush_v3_messages.inc(labelValues = ["PushRequest"]) notice "handling lightpush request", my_peer_id = wl.peerManager.switch.peerInfo.peerId, peer_id = peerId, - requestId = requestId, - pubsubTopic = pubsubTopic, - msg_hash = pubsubTopic.computeMessageHash(message).to0xHex(), + requestId = pushRequest.requestId, + pubsubTopic = pushRequest.pubsubTopic, + msg_hash = pubsubTopic.computeMessageHash(pushRequest.message).to0xHex(), receivedTime = getNowInNanosecondTime() - let handleRes = await wl.pushHandler(peerId, pubsubTopic, message) + let handleRes = await wl.pushHandler(peerId, pubsubTopic, pushRequest.message) + isSuccess = handleRes.isOk() - pushResponseInfo = (if isSuccess: "OK" else: handleRes.error) + pushResponse = LightpushResponse( + requestId: pushRequest.requestId, + statusCode: + if isSuccess: + LightpushStatusCode.SUCCESS.uint32 + else: + handleRes.error.code.uint32, + statusDesc: + if isSuccess: + none[string]() + else: + handleRes.error.desc, + ) if not isSuccess: - waku_lightpush_errors.inc(labelValues = [pushResponseInfo]) - error "failed to push message", error = pushResponseInfo - let response = PushResponse(isSuccess: isSuccess, info: some(pushResponseInfo)) - let rpc = PushRPC(requestId: requestId, response: some(response)) - return rpc + waku_lightpush_v3_errors.inc( + labelValues = [pushResponse.statusDesc.valueOr("unknown")] + ) + error "failed to push message", error = pushResponse.statusDesc + return pushResponse proc initProtocolHandler(wl: WakuLightPush) = proc handle(conn: Connection, proto: string) {.async.} = - var rpc: PushRPC + var rpc: LightpushResponse wl.requestRateLimiter.checkUsageLimit(WakuLightPushCodec, conn): let buffer = await conn.readLp(DefaultMaxRpcSize) @@ -80,13 +123,13 @@ proc initProtocolHandler(wl: WakuLightPush) = peerId = conn.peerId, limit = $wl.requestRateLimiter.setting rpc = static( - PushRPC( + LightpushResponse( ## We will not copy and decode RPC buffer from stream only for requestId ## in reject case as it is comparably too expensive and opens possible ## attack surface requestId: "N/A", - response: - some(PushResponse(isSuccess: false, info: some(TooManyRequestsMessage))), + statusCode: LightpushStatusCode.TOO_MANY_REQUESTS.uint32, + statusDesc: some(TooManyRequestsMessage), ) ) @@ -103,6 +146,7 @@ proc new*( peerManager: PeerManager, rng: ref rand.HmacDrbgContext, pushHandler: PushMessageHandler, + sharding: Sharding, rateLimitSetting: Option[RateLimitSetting] = none[RateLimitSetting](), ): T = let wl = WakuLightPush( @@ -110,6 +154,7 @@ proc new*( peerManager: peerManager, pushHandler: pushHandler, requestRateLimiter: newRequestRateLimiter(rateLimitSetting), + sharding: sharding, ) wl.initProtocolHandler() setServiceLimitMetric(WakuLightpushCodec, rateLimitSetting) diff --git a/waku/waku_lightpush/protocol_metrics.nim b/waku/waku_lightpush/protocol_metrics.nim index ce48a7d3d..c906cd587 100644 --- a/waku/waku_lightpush/protocol_metrics.nim +++ b/waku/waku_lightpush/protocol_metrics.nim @@ -2,9 +2,9 @@ import metrics -declarePublicGauge waku_lightpush_errors, +declarePublicGauge waku_lightpush_v3_errors, "number of lightpush protocol errors", ["type"] -declarePublicGauge waku_lightpush_messages, +declarePublicGauge waku_lightpush_v3_messages, "number of lightpush messages received", ["type"] # Error types (metric label values) diff --git a/waku/waku_lightpush/rpc.nim b/waku/waku_lightpush/rpc.nim index 33ba3f5e3..5a1a6647d 100644 --- a/waku/waku_lightpush/rpc.nim +++ b/waku/waku_lightpush/rpc.nim @@ -4,15 +4,13 @@ import std/options import ../waku_core type - PushRequest* = object - pubSubTopic*: string + LightpushRequest* = object + requestId*: string + pubSubTopic*: Option[PubsubTopic] message*: WakuMessage - PushResponse* = object - isSuccess*: bool - info*: Option[string] - - PushRPC* = object + LightPushResponse* = object requestId*: string - request*: Option[PushRequest] - response*: Option[PushResponse] + statusCode*: uint32 + statusDesc*: Option[string] + relayPeerCount*: Option[uint32] diff --git a/waku/waku_lightpush/rpc_codec.nim b/waku/waku_lightpush/rpc_codec.nim index 25d2bd210..53bdda4c0 100644 --- a/waku/waku_lightpush/rpc_codec.nim +++ b/waku/waku_lightpush/rpc_codec.nim @@ -5,73 +5,19 @@ import ../common/protobuf, ../waku_core, ./rpc const DefaultMaxRpcSize* = -1 -proc encode*(rpc: PushRequest): ProtoBuffer = - var pb = initProtoBuffer() - - pb.write3(1, rpc.pubSubTopic) - pb.write3(2, rpc.message.encode()) - pb.finish3() - - pb - -proc decode*(T: type PushRequest, buffer: seq[byte]): ProtobufResult[T] = - let pb = initProtoBuffer(buffer) - var rpc = PushRequest() - - var pubSubTopic: PubsubTopic - if not ?pb.getField(1, pubSubTopic): - return err(ProtobufError.missingRequiredField("pubsub_topic")) - else: - rpc.pubSubTopic = pubSubTopic - - var messageBuf: seq[byte] - if not ?pb.getField(2, messageBuf): - return err(ProtobufError.missingRequiredField("message")) - else: - rpc.message = ?WakuMessage.decode(messageBuf) - - ok(rpc) - -proc encode*(rpc: PushResponse): ProtoBuffer = - var pb = initProtoBuffer() - - pb.write3(1, uint64(rpc.isSuccess)) - pb.write3(2, rpc.info) - pb.finish3() - - pb - -proc decode*(T: type PushResponse, buffer: seq[byte]): ProtobufResult[T] = - let pb = initProtoBuffer(buffer) - var rpc = PushResponse() - - var isSuccess: uint64 - if not ?pb.getField(1, isSuccess): - return err(ProtobufError.missingRequiredField("is_success")) - else: - rpc.isSuccess = bool(isSuccess) - - var info: string - if not ?pb.getField(2, info): - rpc.info = none(string) - else: - rpc.info = some(info) - - ok(rpc) - -proc encode*(rpc: PushRPC): ProtoBuffer = +proc encode*(rpc: LightpushRequest): ProtoBuffer = var pb = initProtoBuffer() pb.write3(1, rpc.requestId) - pb.write3(2, rpc.request.map(encode)) - pb.write3(3, rpc.response.map(encode)) + pb.write3(20, rpc.pubSubTopic) + pb.write3(21, rpc.message.encode()) pb.finish3() - pb + return pb -proc decode*(T: type PushRPC, buffer: seq[byte]): ProtobufResult[T] = +proc decode*(T: type LightpushRequest, buffer: seq[byte]): ProtobufResult[T] = let pb = initProtoBuffer(buffer) - var rpc = PushRPC() + var rpc = LightpushRequest() var requestId: string if not ?pb.getField(1, requestId): @@ -79,18 +25,57 @@ proc decode*(T: type PushRPC, buffer: seq[byte]): ProtobufResult[T] = else: rpc.requestId = requestId - var requestBuffer: seq[byte] - if not ?pb.getField(2, requestBuffer): - rpc.request = none(PushRequest) + var pubSubTopic: PubsubTopic + if not ?pb.getField(20, pubSubTopic): + rpc.pubSubTopic = none(PubsubTopic) else: - let request = ?PushRequest.decode(requestBuffer) - rpc.request = some(request) + rpc.pubSubTopic = some(pubSubTopic) - var responseBuffer: seq[byte] - if not ?pb.getField(3, responseBuffer): - rpc.response = none(PushResponse) + var messageBuf: seq[byte] + if not ?pb.getField(21, messageBuf): + return err(ProtobufError.missingRequiredField("message")) else: - let response = ?PushResponse.decode(responseBuffer) - rpc.response = some(response) + rpc.message = ?WakuMessage.decode(messageBuf) - ok(rpc) + return ok(rpc) + +proc encode*(rpc: LightPushResponse): ProtoBuffer = + var pb = initProtoBuffer() + + pb.write3(1, rpc.requestId) + pb.write3(10, rpc.statusCode) + pb.write3(11, rpc.statusDesc) + pb.write3(12, rpc.relayPeerCount) + pb.finish3() + + return pb + +proc decode*(T: type LightPushResponse, buffer: seq[byte]): ProtobufResult[T] = + let pb = initProtoBuffer(buffer) + var rpc = LightPushResponse() + + var requestId: string + if not ?pb.getField(1, requestId): + return err(ProtobufError.missingRequiredField("request_id")) + else: + rpc.requestId = requestId + + var statusCode: uint32 + if not ?pb.getField(10, statusCode): + return err(ProtobufError.missingRequiredField("status_code")) + else: + rpc.statusCode = statusCode + + var statusDesc: string + if not ?pb.getField(11, statusDesc): + rpc.statusDesc = none(string) + else: + rpc.statusDesc = some(statusDesc) + + var relayPeerCount: uint32 + if not ?pb.getField(12, relayPeerCount): + rpc.relayPeerCount = none(uint32) + else: + rpc.relayPeerCount = some(relayPeerCount) + + return ok(rpc) diff --git a/waku/waku_lightpush/self_req_handler.nim b/waku/waku_lightpush/self_req_handler.nim index 410d5808a..fffced40a 100644 --- a/waku/waku_lightpush/self_req_handler.nim +++ b/waku/waku_lightpush/self_req_handler.nim @@ -20,8 +20,8 @@ import ../utils/requests proc handleSelfLightPushRequest*( - self: WakuLightPush, pubSubTopic: PubsubTopic, message: WakuMessage -): Future[WakuLightPushResult[string]] {.async.} = + self: WakuLightPush, pubSubTopic: Option[PubsubTopic], message: WakuMessage +): Future[WakuLightPushResult] {.async.} = ## Handles the lightpush requests made by the node to itself. ## Normally used in REST-lightpush requests ## On success, returns the msg_hash of the published message. @@ -30,30 +30,14 @@ proc handleSelfLightPushRequest*( # provide self peerId as now this node is used directly, thus there is no light client sender peer. let selfPeerId = self.peerManager.switch.peerInfo.peerId - let req = PushRequest(pubSubTopic: pubSubTopic, message: message) - let rpc = PushRPC(requestId: generateRequestId(self.rng), request: some(req)) + let req = LightpushRequest( + requestId: generateRequestId(self.rng), pubSubTopic: pubSubTopic, message: message + ) - let respRpc = await self.handleRequest(selfPeerId, rpc.encode().buffer) + let response = await self.handleRequest(selfPeerId, req.encode().buffer) - if respRpc.response.isNone(): - waku_lightpush_errors.inc(labelValues = [emptyResponseBodyFailure]) - return err(emptyResponseBodyFailure) - - let response = respRpc.response.get() - if not response.isSuccess: - if response.info.isSome(): - return err(response.info.get()) - else: - return err("unknown failure") - - let msg_hash_hex_str = computeMessageHash(pubSubTopic, message).to0xHex() - - notice "publishing message with self hosted lightpush", - pubsubTopic = pubsubTopic, - contentTopic = message.contentTopic, - self_peer_id = selfPeerId, - msg_hash = msg_hash_hex_str - - return ok(msg_hash_hex_str) + return response.toPushResult() except Exception: - return err("exception in handleSelfLightPushRequest: " & getCurrentExceptionMsg()) + return lightPushResultInternalError( + "exception in handleSelfLightPushRequest: " & getCurrentExceptionMsg() + ) diff --git a/waku/waku_lightpush_legacy.nim b/waku/waku_lightpush_legacy.nim new file mode 100644 index 000000000..f1b25cbbe --- /dev/null +++ b/waku/waku_lightpush_legacy.nim @@ -0,0 +1,5 @@ +import + ./waku_lightpush_legacy/ + [protocol, common, rpc, rpc_codec, callbacks, self_req_handler] + +export protocol, common, rpc, rpc_codec, callbacks, self_req_handler diff --git a/waku/waku_lightpush/README.md b/waku/waku_lightpush_legacy/README.md similarity index 100% rename from waku/waku_lightpush/README.md rename to waku/waku_lightpush_legacy/README.md diff --git a/waku/waku_lightpush_legacy/callbacks.nim b/waku/waku_lightpush_legacy/callbacks.nim new file mode 100644 index 000000000..f5a79eadc --- /dev/null +++ b/waku/waku_lightpush_legacy/callbacks.nim @@ -0,0 +1,62 @@ +{.push raises: [].} + +import + ../waku_core, + ../waku_relay, + ./common, + ./protocol_metrics, + ../waku_rln_relay, + ../waku_rln_relay/protocol_types + +import std/times, libp2p/peerid, stew/byteutils + +proc checkAndGenerateRLNProof*( + rlnPeer: Option[WakuRLNRelay], message: WakuMessage +): Result[WakuMessage, string] = + # check if the message already has RLN proof + if message.proof.len > 0: + return ok(message) + + if rlnPeer.isNone(): + notice "Publishing message without RLN proof" + return ok(message) + # generate and append RLN proof + let + time = getTime().toUnix() + senderEpochTime = float64(time) + var msgWithProof = message + rlnPeer.get().appendRLNProof(msgWithProof, senderEpochTime).isOkOr: + return err(error) + return ok(msgWithProof) + +proc getNilPushHandler*(): PushMessageHandler = + return proc( + peer: PeerId, pubsubTopic: string, message: WakuMessage + ): Future[WakuLightPushResult[void]] {.async.} = + return err("no waku relay found") + +proc getRelayPushHandler*( + wakuRelay: WakuRelay, rlnPeer: Option[WakuRLNRelay] = none[WakuRLNRelay]() +): PushMessageHandler = + return proc( + peer: PeerId, pubsubTopic: string, message: WakuMessage + ): Future[WakuLightPushResult[void]] {.async.} = + # append RLN proof + let msgWithProof = checkAndGenerateRLNProof(rlnPeer, message) + if msgWithProof.isErr(): + return err(msgWithProof.error) + + (await wakuRelay.validateMessage(pubSubTopic, msgWithProof.value)).isOkOr: + return err(error) + + let publishResult = await wakuRelay.publish(pubsubTopic, msgWithProof.value) + if publishResult.isErr(): + ## Agreed change expected to the lightpush protocol to better handle such case. https://github.com/waku-org/pm/issues/93 + let msgHash = computeMessageHash(pubsubTopic, message).to0xHex() + notice "Lightpush request has not been published to any peers", + msg_hash = msgHash, reason = $publishResult.error + # for legacy lightpush we do not detail the reason towards clients. All error during publish result in not-published-to-any-peer + # this let client of the legacy protocol to react as they did so far. + return err(protocol_metrics.notPublishedAnyPeer) + + return ok() diff --git a/waku/waku_lightpush_legacy/client.nim b/waku/waku_lightpush_legacy/client.nim new file mode 100644 index 000000000..c3b4a158e --- /dev/null +++ b/waku/waku_lightpush_legacy/client.nim @@ -0,0 +1,111 @@ +{.push raises: [].} + +import std/options, results, chronicles, chronos, metrics, bearssl/rand, stew/byteutils +import libp2p/peerid +import + ../waku_core/peers, + ../node/peer_manager, + ../node/delivery_monitor/publish_observer, + ../utils/requests, + ../waku_core, + ./common, + ./protocol_metrics, + ./rpc, + ./rpc_codec + +logScope: + topics = "waku lightpush legacy client" + +type WakuLegacyLightPushClient* = ref object + peerManager*: PeerManager + rng*: ref rand.HmacDrbgContext + publishObservers: seq[PublishObserver] + +proc new*( + T: type WakuLegacyLightPushClient, + peerManager: PeerManager, + rng: ref rand.HmacDrbgContext, +): T = + WakuLegacyLightPushClient(peerManager: peerManager, rng: rng) + +proc addPublishObserver*(wl: WakuLegacyLightPushClient, obs: PublishObserver) = + wl.publishObservers.add(obs) + +proc sendPushRequest( + wl: WakuLegacyLightPushClient, req: PushRequest, peer: PeerId | RemotePeerInfo +): Future[WakuLightPushResult[void]] {.async, gcsafe.} = + let connOpt = await wl.peerManager.dialPeer(peer, WakuLegacyLightPushCodec) + if connOpt.isNone(): + waku_lightpush_errors.inc(labelValues = [dialFailure]) + return err(dialFailure) + let connection = connOpt.get() + + let rpc = PushRPC(requestId: generateRequestId(wl.rng), request: some(req)) + await connection.writeLP(rpc.encode().buffer) + + var buffer: seq[byte] + try: + buffer = await connection.readLp(DefaultMaxRpcSize.int) + except LPStreamRemoteClosedError: + return err("Exception reading: " & getCurrentExceptionMsg()) + + let decodeRespRes = PushRPC.decode(buffer) + if decodeRespRes.isErr(): + error "failed to decode response" + waku_lightpush_errors.inc(labelValues = [decodeRpcFailure]) + return err(decodeRpcFailure) + + let pushResponseRes = decodeRespRes.get() + if pushResponseRes.response.isNone(): + waku_lightpush_errors.inc(labelValues = [emptyResponseBodyFailure]) + return err(emptyResponseBodyFailure) + + let response = pushResponseRes.response.get() + if not response.isSuccess: + if response.info.isSome(): + return err(response.info.get()) + else: + return err("unknown failure") + + return ok() + +proc publish*( + wl: WakuLegacyLightPushClient, + pubSubTopic: PubsubTopic, + message: WakuMessage, + peer: RemotePeerInfo, +): Future[WakuLightPushResult[string]] {.async, gcsafe.} = + ## On success, returns the msg_hash of the published message + let msg_hash_hex_str = computeMessageHash(pubsubTopic, message).to0xHex() + let pushRequest = PushRequest(pubSubTopic: pubSubTopic, message: message) + ?await wl.sendPushRequest(pushRequest, peer) + + for obs in wl.publishObservers: + obs.onMessagePublished(pubSubTopic, message) + + notice "publishing message with lightpush", + pubsubTopic = pubsubTopic, + contentTopic = message.contentTopic, + target_peer_id = peer.peerId, + msg_hash = msg_hash_hex_str + + return ok(msg_hash_hex_str) + +proc publishToAny*( + wl: WakuLegacyLightPushClient, pubSubTopic: PubsubTopic, message: WakuMessage +): Future[WakuLightPushResult[void]] {.async, gcsafe.} = + ## This proc is similar to the publish one but in this case + ## we don't specify a particular peer and instead we get it from peer manager + + info "publishToAny", msg_hash = computeMessageHash(pubsubTopic, message).to0xHex + + let peer = wl.peerManager.selectPeer(WakuLegacyLightPushCodec).valueOr: + return err("could not retrieve a peer supporting WakuLegacyLightPushCodec") + + let pushRequest = PushRequest(pubSubTopic: pubSubTopic, message: message) + ?await wl.sendPushRequest(pushRequest, peer) + + for obs in wl.publishObservers: + obs.onMessagePublished(pubSubTopic, message) + + return ok() diff --git a/waku/waku_lightpush_legacy/common.nim b/waku/waku_lightpush_legacy/common.nim new file mode 100644 index 000000000..fcdf1814c --- /dev/null +++ b/waku/waku_lightpush_legacy/common.nim @@ -0,0 +1,15 @@ +{.push raises: [].} + +import results, chronos, libp2p/peerid +import ../waku_core + +from ../waku_core/codecs import WakuLegacyLightPushCodec +export WakuLegacyLightPushCodec + +type WakuLightPushResult*[T] = Result[T, string] + +type PushMessageHandler* = proc( + peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage +): Future[WakuLightPushResult[void]] {.async.} + +const TooManyRequestsMessage* = "TOO_MANY_REQUESTS" diff --git a/waku/waku_lightpush_legacy/protocol.nim b/waku/waku_lightpush_legacy/protocol.nim new file mode 100644 index 000000000..feb6a1320 --- /dev/null +++ b/waku/waku_lightpush_legacy/protocol.nim @@ -0,0 +1,113 @@ +{.push raises: [].} + +import std/options, results, stew/byteutils, chronicles, chronos, metrics, bearssl/rand +import + ../node/peer_manager/peer_manager, + ../waku_core, + ./common, + ./rpc, + ./rpc_codec, + ./protocol_metrics, + ../common/rate_limit/request_limiter + +logScope: + topics = "waku lightpush legacy" + +type WakuLegacyLightPush* = ref object of LPProtocol + rng*: ref rand.HmacDrbgContext + peerManager*: PeerManager + pushHandler*: PushMessageHandler + requestRateLimiter*: RequestRateLimiter + +proc handleRequest*( + wl: WakuLegacyLightPush, peerId: PeerId, buffer: seq[byte] +): Future[PushRPC] {.async.} = + let reqDecodeRes = PushRPC.decode(buffer) + var + isSuccess = false + pushResponseInfo = "" + requestId = "" + + if reqDecodeRes.isErr(): + pushResponseInfo = decodeRpcFailure & ": " & $reqDecodeRes.error + elif reqDecodeRes.get().request.isNone(): + pushResponseInfo = emptyRequestBodyFailure + else: + let pushRpcRequest = reqDecodeRes.get() + + requestId = pushRpcRequest.requestId + + let + request = pushRpcRequest.request + + pubSubTopic = request.get().pubSubTopic + message = request.get().message + waku_lightpush_messages.inc(labelValues = ["PushRequest"]) + notice "handling lightpush request", + peer_id = peerId, + requestId = requestId, + pubsubTopic = pubsubTopic, + msg_hash = pubsubTopic.computeMessageHash(message).to0xHex(), + receivedTime = getNowInNanosecondTime() + + let handleRes = await wl.pushHandler(peerId, pubsubTopic, message) + isSuccess = handleRes.isOk() + pushResponseInfo = (if isSuccess: "OK" else: handleRes.error) + + if not isSuccess: + waku_lightpush_errors.inc(labelValues = [pushResponseInfo]) + error "failed to push message", error = pushResponseInfo + let response = PushResponse(isSuccess: isSuccess, info: some(pushResponseInfo)) + let rpc = PushRPC(requestId: requestId, response: some(response)) + return rpc + +proc initProtocolHandler(wl: WakuLegacyLightPush) = + proc handle(conn: Connection, proto: string) {.async.} = + var rpc: PushRPC + wl.requestRateLimiter.checkUsageLimit(WakuLegacyLightPushCodec, conn): + let buffer = await conn.readLp(DefaultMaxRpcSize) + + waku_service_network_bytes.inc( + amount = buffer.len().int64, labelValues = [WakuLegacyLightPushCodec, "in"] + ) + + rpc = await handleRequest(wl, conn.peerId, buffer) + do: + debug "lightpush request rejected due rate limit exceeded", + peerId = conn.peerId, limit = $wl.requestRateLimiter.setting + + rpc = static( + PushRPC( + ## We will not copy and decode RPC buffer from stream only for requestId + ## in reject case as it is comparably too expensive and opens possible + ## attack surface + requestId: "N/A", + response: + some(PushResponse(isSuccess: false, info: some(TooManyRequestsMessage))), + ) + ) + + await conn.writeLp(rpc.encode().buffer) + + ## For lightpush might not worth to measure outgoing trafic as it is only + ## small respones about success/failure + + wl.handler = handle + wl.codec = WakuLegacyLightPushCodec + +proc new*( + T: type WakuLegacyLightPush, + peerManager: PeerManager, + rng: ref rand.HmacDrbgContext, + pushHandler: PushMessageHandler, + rateLimitSetting: Option[RateLimitSetting] = none[RateLimitSetting](), +): T = + let wl = WakuLegacyLightPush( + rng: rng, + peerManager: peerManager, + pushHandler: pushHandler, + requestRateLimiter: newRequestRateLimiter(rateLimitSetting), + ) + wl.initProtocolHandler() + setServiceLimitMetric(WakuLegacyLightPushCodec, rateLimitSetting) + return wl diff --git a/waku/waku_lightpush_legacy/protocol_metrics.nim b/waku/waku_lightpush_legacy/protocol_metrics.nim new file mode 100644 index 000000000..ce48a7d3d --- /dev/null +++ b/waku/waku_lightpush_legacy/protocol_metrics.nim @@ -0,0 +1,19 @@ +{.push raises: [].} + +import metrics + +declarePublicGauge waku_lightpush_errors, + "number of lightpush protocol errors", ["type"] +declarePublicGauge waku_lightpush_messages, + "number of lightpush messages received", ["type"] + +# Error types (metric label values) +const + dialFailure* = "dial_failure" + decodeRpcFailure* = "decode_rpc_failure" + peerNotFoundFailure* = "peer_not_found_failure" + emptyRequestBodyFailure* = "empty_request_body_failure" + emptyResponseBodyFailure* = "empty_response_body_failure" + messagePushFailure* = "message_push_failure" + requestLimitReachedFailure* = "request_limit_reached_failure" + notPublishedAnyPeer* = "not_published_to_any_peer" diff --git a/waku/waku_lightpush_legacy/rpc.nim b/waku/waku_lightpush_legacy/rpc.nim new file mode 100644 index 000000000..33ba3f5e3 --- /dev/null +++ b/waku/waku_lightpush_legacy/rpc.nim @@ -0,0 +1,18 @@ +{.push raises: [].} + +import std/options +import ../waku_core + +type + PushRequest* = object + pubSubTopic*: string + message*: WakuMessage + + PushResponse* = object + isSuccess*: bool + info*: Option[string] + + PushRPC* = object + requestId*: string + request*: Option[PushRequest] + response*: Option[PushResponse] diff --git a/waku/waku_lightpush_legacy/rpc_codec.nim b/waku/waku_lightpush_legacy/rpc_codec.nim new file mode 100644 index 000000000..25d2bd210 --- /dev/null +++ b/waku/waku_lightpush_legacy/rpc_codec.nim @@ -0,0 +1,96 @@ +{.push raises: [].} + +import std/options +import ../common/protobuf, ../waku_core, ./rpc + +const DefaultMaxRpcSize* = -1 + +proc encode*(rpc: PushRequest): ProtoBuffer = + var pb = initProtoBuffer() + + pb.write3(1, rpc.pubSubTopic) + pb.write3(2, rpc.message.encode()) + pb.finish3() + + pb + +proc decode*(T: type PushRequest, buffer: seq[byte]): ProtobufResult[T] = + let pb = initProtoBuffer(buffer) + var rpc = PushRequest() + + var pubSubTopic: PubsubTopic + if not ?pb.getField(1, pubSubTopic): + return err(ProtobufError.missingRequiredField("pubsub_topic")) + else: + rpc.pubSubTopic = pubSubTopic + + var messageBuf: seq[byte] + if not ?pb.getField(2, messageBuf): + return err(ProtobufError.missingRequiredField("message")) + else: + rpc.message = ?WakuMessage.decode(messageBuf) + + ok(rpc) + +proc encode*(rpc: PushResponse): ProtoBuffer = + var pb = initProtoBuffer() + + pb.write3(1, uint64(rpc.isSuccess)) + pb.write3(2, rpc.info) + pb.finish3() + + pb + +proc decode*(T: type PushResponse, buffer: seq[byte]): ProtobufResult[T] = + let pb = initProtoBuffer(buffer) + var rpc = PushResponse() + + var isSuccess: uint64 + if not ?pb.getField(1, isSuccess): + return err(ProtobufError.missingRequiredField("is_success")) + else: + rpc.isSuccess = bool(isSuccess) + + var info: string + if not ?pb.getField(2, info): + rpc.info = none(string) + else: + rpc.info = some(info) + + ok(rpc) + +proc encode*(rpc: PushRPC): ProtoBuffer = + var pb = initProtoBuffer() + + pb.write3(1, rpc.requestId) + pb.write3(2, rpc.request.map(encode)) + pb.write3(3, rpc.response.map(encode)) + pb.finish3() + + pb + +proc decode*(T: type PushRPC, buffer: seq[byte]): ProtobufResult[T] = + let pb = initProtoBuffer(buffer) + var rpc = PushRPC() + + var requestId: string + if not ?pb.getField(1, requestId): + return err(ProtobufError.missingRequiredField("request_id")) + else: + rpc.requestId = requestId + + var requestBuffer: seq[byte] + if not ?pb.getField(2, requestBuffer): + rpc.request = none(PushRequest) + else: + let request = ?PushRequest.decode(requestBuffer) + rpc.request = some(request) + + var responseBuffer: seq[byte] + if not ?pb.getField(3, responseBuffer): + rpc.response = none(PushResponse) + else: + let response = ?PushResponse.decode(responseBuffer) + rpc.response = some(response) + + ok(rpc) diff --git a/waku/waku_lightpush_legacy/self_req_handler.nim b/waku/waku_lightpush_legacy/self_req_handler.nim new file mode 100644 index 000000000..3c5d09a9c --- /dev/null +++ b/waku/waku_lightpush_legacy/self_req_handler.nim @@ -0,0 +1,59 @@ +{.push raises: [].} + +## Notice that the REST /lightpush requests normally assume that the node +## is acting as a lightpush-client that will trigger the service provider node +## to relay the message. +## In this module, we allow that a lightpush service node (full node) can be +## triggered directly through the REST /lightpush endpoint. +## The typical use case for that is when using `nwaku-compose`, +## which spawn a full service Waku node +## that could be used also as a lightpush client, helping testing and development. + +import results, chronos, chronicles, std/options, metrics, stew/byteutils +import + ../waku_core, + ./protocol, + ./common, + ./rpc, + ./rpc_codec, + ./protocol_metrics, + ../utils/requests + +proc handleSelfLightPushRequest*( + self: WakuLegacyLightPush, pubSubTopic: PubsubTopic, message: WakuMessage +): Future[WakuLightPushResult[string]] {.async.} = + ## Handles the lightpush requests made by the node to itself. + ## Normally used in REST-lightpush requests + ## On success, returns the msg_hash of the published message. + + try: + # provide self peerId as now this node is used directly, thus there is no light client sender peer. + let selfPeerId = self.peerManager.switch.peerInfo.peerId + + let req = PushRequest(pubSubTopic: pubSubTopic, message: message) + let rpc = PushRPC(requestId: generateRequestId(self.rng), request: some(req)) + + let respRpc = await self.handleRequest(selfPeerId, rpc.encode().buffer) + + if respRpc.response.isNone(): + waku_lightpush_errors.inc(labelValues = [emptyResponseBodyFailure]) + return err(emptyResponseBodyFailure) + + let response = respRpc.response.get() + if not response.isSuccess: + if response.info.isSome(): + return err(response.info.get()) + else: + return err("unknown failure") + + let msg_hash_hex_str = computeMessageHash(pubSubTopic, message).to0xHex() + + notice "publishing message with self hosted lightpush", + pubsubTopic = pubsubTopic, + contentTopic = message.contentTopic, + self_peer_id = selfPeerId, + msg_hash = msg_hash_hex_str + + return ok(msg_hash_hex_str) + except Exception: + return err("exception in handleSelfLightPushRequest: " & getCurrentExceptionMsg()) diff --git a/waku/waku_relay/protocol.nim b/waku/waku_relay/protocol.nim index 080f12edf..0222db0d1 100644 --- a/waku/waku_relay/protocol.nim +++ b/waku/waku_relay/protocol.nim @@ -5,7 +5,7 @@ {.push raises: [].} import - std/strformat, + std/[strformat, strutils], stew/byteutils, results, sequtils, @@ -13,7 +13,6 @@ import chronicles, metrics, libp2p/multihash, - libp2p/protocols/pubsub/pubsub, libp2p/protocols/pubsub/gossipsub, libp2p/protocols/pubsub/rpc/messages, libp2p/stream/connection, @@ -136,6 +135,13 @@ type onTopicHealthChange*: TopicHealthChangeHandler topicHealthLoopHandle*: Future[void] +# predefinition for more detailed results from publishing new message +type PublishOutcome* {.pure.} = enum + NoTopicSpecified + DuplicateMessage + NoPeersToPublish + CannotGenerateMessageId + proc initProtocolHandler(w: WakuRelay) = proc handler(conn: Connection, proto: string) {.async.} = ## main protocol handler that gets triggered on every @@ -514,7 +520,10 @@ proc unsubscribe*(w: WakuRelay, pubsubTopic: PubsubTopic, handler: TopicHandler) proc publish*( w: WakuRelay, pubsubTopic: PubsubTopic, message: WakuMessage -): Future[int] {.async.} = +): Future[Result[int, PublishOutcome]] {.async.} = + if pubsubTopic.isEmptyOrWhitespace(): + return err(NoTopicSpecified) + let data = message.encode().buffer let msgHash = computeMessageHash(pubsubTopic, message).to0xHex() @@ -522,11 +531,13 @@ proc publish*( let relayedPeerCount = await procCall GossipSub(w).publish(pubsubTopic, data) - if relayedPeerCount > 0: - for obs in w.publishObservers: - obs.onMessagePublished(pubSubTopic, message) + if relayedPeerCount <= 0: + return err(NoPeersToPublish) - return relayedPeerCount + for obs in w.publishObservers: + obs.onMessagePublished(pubSubTopic, message) + + return ok(relayedPeerCount) proc getNumConnectedPeers*( w: WakuRelay, pubsubTopic: PubsubTopic From 5f1a3406d1e273db3f6eb55dd319b5a8ac314f4f Mon Sep 17 00:00:00 2001 From: Darshan K <35736874+darshankabariya@users.noreply.github.com> Date: Wed, 5 Mar 2025 21:21:59 +0530 Subject: [PATCH 21/22] feat: remain windows support (#3162) Refine process so now it's look cleaner and simple --- Makefile | 16 ++--- README.md | 40 ++++++++++-- config.nims | 1 + scripts/build_rln.sh | 5 +- scripts/build_wakunode_windows.sh | 60 +++++++++++++++++ scripts/windows_setup.sh | 68 -------------------- waku/common/databases/db_postgres/dbconn.nim | 15 +++-- 7 files changed, 115 insertions(+), 90 deletions(-) create mode 100755 scripts/build_wakunode_windows.sh delete mode 100644 scripts/windows_setup.sh diff --git a/Makefile b/Makefile index 99ceadbb1..473bb7801 100644 --- a/Makefile +++ b/Makefile @@ -34,14 +34,14 @@ ifneq (,$(findstring MINGW,$(detected_OS))) endif ifeq ($(detected_OS),Windows) - # Define a new temporary directory for Windows - TMP_DIR := $(CURDIR)/tmp - $(shell mkdir -p $(TMP_DIR)) - export TMP := $(TMP_DIR) - export TEMP := $(TMP_DIR) - - # Add the necessary libraries to the linker flags - LIBS = -static -lws2_32 -lbcrypt -luserenv -lntdll -lminiupnpc + # Update MINGW_PATH to standard MinGW location + MINGW_PATH = /mingw64 + NIM_PARAMS += --passC:"-I$(MINGW_PATH)/include" + NIM_PARAMS += --passL:"-L$(MINGW_PATH)/lib" + NIM_PARAMS += --passL:"-Lvendor/nim-nat-traversal/vendor/miniupnp/miniupnpc" + NIM_PARAMS += --passL:"-Lvendor/nim-nat-traversal/vendor/libnatpmp-upstream" + + LIBS = -static -lws2_32 -lbcrypt -liphlpapi -luserenv -lntdll -lminiupnpc -lnatpmp -lpq NIM_PARAMS += $(foreach lib,$(LIBS),--passL:"$(lib)") endif diff --git a/README.md b/README.md index cb13d6cef..9d8b58110 100644 --- a/README.md +++ b/README.md @@ -52,14 +52,42 @@ If you encounter difficulties building the project on WSL, consider placing the ### How to Build & Run ( Windows ) -Note: This is a work in progress. The current setup procedure is as follows: -Goal: Get rid of windows specific procedures and make the build process the same as linux/macos. +### Windows Build Instructions -The current setup procedure is as follows: +#### 1. Install Required Tools +- **Git Bash Terminal**: Download and install from https://git-scm.com/download/win +- **MSYS2**: + a. Download installer from https://www.msys2.org + b. Install at "C:\" (default location). Remove/rename the msys folder in case of previous installation. + c. Use the mingw64 terminal from msys64 directory for package installation. -1. Clone the repository and checkout master branch -2. Ensure prerequisites are installed (Make, GCC, MSYS2/MinGW) -3. Run scripts/windows_setup.sh +#### 2. Install Dependencies +Open MSYS2 mingw64 terminal and run the following one-by-one : +```bash +pacman -Syu --noconfirm +pacman -S --noconfirm --needed mingw-w64-x86_64-toolchain +pacman -S --noconfirm --needed base-devel make cmake upx +pacman -S --noconfirm --needed mingw-w64-x86_64-rust +pacman -S --noconfirm --needed mingw-w64-x86_64-postgresql +pacman -S --noconfirm --needed mingw-w64-x86_64-gcc +pacman -S --noconfirm --needed mingw-w64-x86_64-gcc-libs +pacman -S --noconfirm --needed mingw-w64-x86_64-libwinpthread-git +pacman -S --noconfirm --needed mingw-w64-x86_64-zlib +pacman -S --noconfirm --needed mingw-w64-x86_64-openssl +pacman -S --noconfirm --needed mingw-w64-x86_64-python +``` + +#### 3. Build Wakunode +- Open Git Bash as administrator +- clone nwaku and cd nwaku +- Execute: `./scripts/build_wakunode_windows.sh` + +#### 4. Troubleshooting +If `wakunode2.exe` isn't generated: +- **Missing Dependencies**: Verify with: + `which make cmake gcc g++ rustc cargo python3 upx` + If missing, revisit Step 2 or ensure MSYS2 is at `C:\` +- **Installation Conflicts**: Remove existing MinGW/MSYS2/Git Bash installations and perform fresh install ### Developing diff --git a/config.nims b/config.nims index 25066dcb2..f74fe183f 100644 --- a/config.nims +++ b/config.nims @@ -7,6 +7,7 @@ else: if defined(windows): switch("passL", "rln.lib") + switch("define", "postgres=false") # Automatically add all vendor subdirectories for dir in walkDir("./vendor"): diff --git a/scripts/build_rln.sh b/scripts/build_rln.sh index 992c1f434..1cf9b9879 100755 --- a/scripts/build_rln.sh +++ b/scripts/build_rln.sh @@ -43,12 +43,13 @@ else # first, check if submodule version = version in Makefile cargo metadata --format-version=1 --no-deps --manifest-path "${build_dir}/rln/Cargo.toml" - if [[ "$OSTYPE" == "msys" || "$OSTYPE" == "win32" ]]; then + detected_OS=$(uname -s) + if [[ "$detected_OS" == MINGW* || "$detected_OS" == MSYS* ]]; then submodule_version=$(cargo metadata --format-version=1 --no-deps --manifest-path "${build_dir}/rln/Cargo.toml" | sed -n 's/.*"name":"rln","version":"\([^"]*\)".*/\1/p') else submodule_version=$(cargo metadata --format-version=1 --no-deps --manifest-path "${build_dir}/rln/Cargo.toml" | jq -r '.packages[] | select(.name == "rln") | .version') fi - + if [[ "v${submodule_version}" != "${rln_version}" ]]; then echo "Submodule version (v${submodule_version}) does not match version in Makefile (${rln_version})" echo "Please update the submodule to ${rln_version}" diff --git a/scripts/build_wakunode_windows.sh b/scripts/build_wakunode_windows.sh new file mode 100755 index 000000000..ef0881836 --- /dev/null +++ b/scripts/build_wakunode_windows.sh @@ -0,0 +1,60 @@ +#!/bin/sh + +echo "- - - - - - - - - - Windows Setup Script - - - - - - - - - -" + +success_count=0 +failure_count=0 + +# Function to execute a command and check its status +execute_command() { + echo "Executing: $1" + if eval "$1"; then + echo -e "✓ Command succeeded \n" + ((success_count++)) + else + echo -e "✗ Command failed \n" + ((failure_count++)) + fi +} + +echo "1. -.-.-.-- Set PATH -.-.-.-" +export PATH="/c/msys64/usr/bin:/c/msys64/mingw64/bin:/c/msys64/usr/lib:/c/msys64/mingw64/lib:$PATH" + +echo "2. -.-.-.- Verify dependencies -.-.-.-" +execute_command "which gcc g++ make cmake cargo upx rustc python" + +echo "3. -.-.-.- Updating submodules -.-.-.-" +execute_command "git submodule update --init --recursive" + +echo "4. -.-.-.- Creating tmp directory -.-.-.-" +execute_command "mkdir -p tmp" + +echo "5. -.-.-.- Building Nim -.-.-.-" +cd vendor/nimbus-build-system/vendor/Nim +execute_command "./build_all.bat" +cd ../../../.. + +echo "6. -.-.-.- Building libunwind -.-.-.-" +cd vendor/nim-libbacktrace +execute_command "make all V=1" +execute_command "make install/usr/lib/libunwind.a V=1" +cp ./vendor/libunwind/build/lib/libunwind.a install/usr/lib +cd ../../ + +echo "7. -.-.-.- Building miniupnpc -.-.-.- " +cd vendor/nim-nat-traversal/vendor/miniupnp/miniupnpc +execute_command "git checkout little_chore_windows_support" +execute_command "make -f Makefile.mingw CC=gcc CXX=g++ libminiupnpc.a V=1" +cd ../../../../.. + +echo "8. -.-.-.- Building libnatpmp -.-.-.- " +cd ./vendor/nim-nat-traversal/vendor/libnatpmp-upstream +make CC="gcc -fPIC -D_WIN32_WINNT=0x0600 -DNATPMP_STATICLIB" libnatpmp.a V=1 +cd ../../../../ + +echo "9. -.-.-.- Building wakunode2 -.-.-.- " +execute_command "make wakunode2 LOG_LEVEL=DEBUG V=1 -j8" + +echo "Windows setup completed successfully!" +echo "✓ Successful commands: $success_count" +echo "✗ Failed commands: $failure_count" diff --git a/scripts/windows_setup.sh b/scripts/windows_setup.sh deleted file mode 100644 index 02b9bdc4c..000000000 --- a/scripts/windows_setup.sh +++ /dev/null @@ -1,68 +0,0 @@ -#!/bin/bash - -set -e # Exit immediately if a command exits with a non-zero status - -echo "Windows Setup Script" -echo "====================" - -# Function to execute a command and check its status -execute_command() { - echo "Executing: $1" - if eval "$1"; then - echo "✓ Command succeeded" - else - echo "✗ Command failed" - exit 1 - fi -} - -# Function to change directory safely -change_directory() { - echo "Changing to directory: $1" - if cd "$1"; then - echo "✓ Changed directory successfully" - else - echo "✗ Failed to change directory" - exit 1 - fi -} - -# Function to build a component -build_component() { - local dir="$1" - local command="$2" - local name="$3" - - echo "Building $name" - if [ -d "$dir" ]; then - change_directory "$dir" - execute_command "$command" - change_directory - > /dev/null - else - echo "✗ $name directory not found: $dir" - exit 1 - fi -} - -echo "1. Updating submodules" -execute_command "git submodule update --init --recursive" - -echo "2. Creating tmp directory" -execute_command "mkdir -p tmp" - -echo "3. Building Nim" -build_component "vendor/nimbus-build-system/vendor/Nim" "./build_all.bat" "Nim" - -echo "4. Building miniupnpc" -build_component "vendor/nim-nat-traversal/vendor/miniupnp/miniupnpc" "./mingw32make.bat" "miniupnpc" - -echo "5. Building libnatpmp" -build_component "vendor/nim-nat-traversal/vendor/libnatpmp-upstream" "./build.bat" "libnatpmp" - -echo "6. Building libunwind" -build_component "vendor/nim-libbacktrace" "make install/usr/lib/libunwind.a" "libunwind" - -echo "7. Building wakunode2" -execute_command "make wakunode2 V=1 NIMFLAGS="-d:disableMarchNative -d:postgres -d:chronicles_colors:none" " - -echo "Windows setup completed successfully!" diff --git a/waku/common/databases/db_postgres/dbconn.nim b/waku/common/databases/db_postgres/dbconn.nim index 5aa852446..0edb74ede 100644 --- a/waku/common/databases/db_postgres/dbconn.nim +++ b/waku/common/databases/db_postgres/dbconn.nim @@ -182,12 +182,15 @@ proc waitQueryToFinish( let asyncFd = cast[asyncengine.AsyncFD](pqsocket(dbConnWrapper.dbConn)) - asyncengine.addReader2(asyncFd, onDataAvailable).isOkOr: - dbConnWrapper.futBecomeFree.fail(newException(ValueError, $error)) - return err("failed to add event reader in waitQueryToFinish: " & $error) - defer: - asyncengine.removeReader2(asyncFd).isOkOr: - return err("failed to remove event reader in waitQueryToFinish: " & $error) + when not defined(windows): + asyncengine.addReader2(asyncFd, onDataAvailable).isOkOr: + dbConnWrapper.futBecomeFree.fail(newException(ValueError, $error)) + return err("failed to add event reader in waitQueryToFinish: " & $error) + defer: + asyncengine.removeReader2(asyncFd).isOkOr: + return err("failed to remove event reader in waitQueryToFinish: " & $error) + else: + return err("Postgres not supported on Windows") await futDataAvailable From 05995f7ef9e34c5d4abf1b85f324aaad5469f7da Mon Sep 17 00:00:00 2001 From: NagyZoltanPeter <113987313+NagyZoltanPeter@users.noreply.github.com> Date: Mon, 10 Mar 2025 09:08:05 +0100 Subject: [PATCH 22/22] Make lightpush status code better align with http codes - this helps rest api while makes no harm on protocol level (#3315) --- waku/waku_lightpush/common.nim | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/waku/waku_lightpush/common.nim b/waku/waku_lightpush/common.nim index 502e23883..c9f39cca2 100644 --- a/waku/waku_lightpush/common.nim +++ b/waku/waku_lightpush/common.nim @@ -14,9 +14,9 @@ type LightpushStatusCode* = enum UNSUPPORTED_PUBSUB_TOPIC = uint32(421) TOO_MANY_REQUESTS = uint32(429) INTERNAL_SERVER_ERROR = uint32(500) - NO_PEERS_TO_RELAY = uint32(503) + SERVICE_NOT_AVAILABLE = uint32(503) OUT_OF_RLN_PROOF = uint32(504) - SERVICE_NOT_AVAILABLE = uint32(505) + NO_PEERS_TO_RELAY = uint32(505) type ErrorStatus* = tuple[code: LightpushStatusCode, desc: Option[string]] type WakuLightPushResult* = Result[uint32, ErrorStatus]