From 221cbf65996294aa6428aa2b951dee0e749fc1ed Mon Sep 17 00:00:00 2001 From: Prem Chaitanya Prathi Date: Tue, 9 Jul 2024 18:50:44 +0530 Subject: [PATCH 01/27] fix: for light node do not check for matching shards but only clusterID (#1154) --- waku/v2/protocol/metadata/waku_metadata.go | 71 +++++++++++-------- .../protocol/metadata/waku_metadata_test.go | 32 +++++++-- 2 files changed, 70 insertions(+), 33 deletions(-) diff --git a/waku/v2/protocol/metadata/waku_metadata.go b/waku/v2/protocol/metadata/waku_metadata.go index 87590203..f516ecd5 100644 --- a/waku/v2/protocol/metadata/waku_metadata.go +++ b/waku/v2/protocol/metadata/waku_metadata.go @@ -18,6 +18,7 @@ import ( "github.com/waku-org/go-waku/waku/v2/protocol" "github.com/waku-org/go-waku/waku/v2/protocol/enr" "github.com/waku-org/go-waku/waku/v2/protocol/metadata/pb" + "github.com/waku-org/go-waku/waku/v2/protocol/relay" "go.uber.org/zap" ) @@ -83,6 +84,7 @@ func (wakuM *WakuMetadata) RelayShard() (*protocol.RelayShards, error) { } func (wakuM *WakuMetadata) ClusterAndShards() (*uint32, []uint32, error) { + shard, err := wakuM.RelayShard() if err != nil { return nil, nil, err @@ -100,7 +102,7 @@ func (wakuM *WakuMetadata) ClusterAndShards() (*uint32, []uint32, error) { return &u32ClusterID, shards, nil } -func (wakuM *WakuMetadata) Request(ctx context.Context, peerID peer.ID) (*protocol.RelayShards, error) { +func (wakuM *WakuMetadata) Request(ctx context.Context, peerID peer.ID) (*pb.WakuMetadataResponse, error) { logger := wakuM.log.With(logging.HostID("peer", peerID)) stream, err := wakuM.h.NewStream(ctx, peerID, MetadataID_v1) @@ -149,31 +151,7 @@ func (wakuM *WakuMetadata) Request(ctx context.Context, peerID peer.ID) (*protoc stream.Close() logger.Debug("received metadata response") - - if response.ClusterId == nil { - return nil, errors.New("node did not provide a waku clusterid") - } - - rClusterID := uint16(*response.ClusterId) - var rShardIDs []uint16 - if len(response.Shards) != 0 { - for _, i := range response.Shards { - rShardIDs = append(rShardIDs, uint16(i)) - } - } else { - // TODO: remove with nwaku 0.28 deployment - for _, i := range response.ShardsDeprecated { // nolint: staticcheck - rShardIDs = append(rShardIDs, uint16(i)) - } - } - logger.Debug("getting remote cluster and shards") - - rs, err := protocol.NewRelayShards(rClusterID, rShardIDs...) - if err != nil { - return nil, err - } - - return &rs, nil + return response, nil } func (wakuM *WakuMetadata) onRequest(ctx context.Context) func(network.Stream) { @@ -259,14 +237,49 @@ func (wakuM *WakuMetadata) Connected(n network.Network, cc network.Conn) { } peerID := cc.RemotePeer() - shard, err := wakuM.Request(wakuM.ctx, peerID) + response, err := wakuM.Request(wakuM.ctx, peerID) + if err != nil { + wakuM.disconnectPeer(peerID, err) + return + } + if response.ClusterId == nil { + wakuM.disconnectPeer(peerID, errors.New("node did not provide a waku clusterid")) + return + } + rClusterID := uint16(*response.ClusterId) + var rs protocol.RelayShards + + if _, err = wakuM.h.Peerstore().SupportsProtocols(peerID, relay.WakuRelayID_v200); err == nil { + wakuM.log.Debug("light peer only checking clusterID") + if rClusterID != wakuM.clusterID { + wakuM.disconnectPeer(peerID, errors.New("different clusterID reported")) + } + return + } + + wakuM.log.Debug("relay peer checking cluster and shards") + + var rShardIDs []uint16 + if len(response.Shards) != 0 { + for _, i := range response.Shards { + rShardIDs = append(rShardIDs, uint16(i)) + } + } else { + // TODO: remove with nwaku 0.28 deployment + for _, i := range response.ShardsDeprecated { // nolint: staticcheck + rShardIDs = append(rShardIDs, uint16(i)) + } + } + wakuM.log.Debug("getting remote cluster and shards") + //if peer supports relay, then check for both clusterID and shards. + rs, err = protocol.NewRelayShards(rClusterID, rShardIDs...) if err != nil { wakuM.disconnectPeer(peerID, err) return } - if shard.ClusterID != wakuM.clusterID { + if rs.ClusterID != wakuM.clusterID { wakuM.disconnectPeer(peerID, errors.New("different clusterID reported")) return } @@ -274,7 +287,7 @@ func (wakuM *WakuMetadata) Connected(n network.Network, cc network.Conn) { // Store shards so they're used to verify if a relay peer supports the same shards we do wakuM.peerShardsMutex.Lock() defer wakuM.peerShardsMutex.Unlock() - wakuM.peerShards[peerID] = shard.ShardIDs + wakuM.peerShards[peerID] = rs.ShardIDs }() } diff --git a/waku/v2/protocol/metadata/waku_metadata_test.go b/waku/v2/protocol/metadata/waku_metadata_test.go index 1b5f1b70..547d9d13 100644 --- a/waku/v2/protocol/metadata/waku_metadata_test.go +++ b/waku/v2/protocol/metadata/waku_metadata_test.go @@ -17,6 +17,7 @@ import ( "github.com/waku-org/go-waku/tests" "github.com/waku-org/go-waku/waku/v2/protocol" "github.com/waku-org/go-waku/waku/v2/protocol/enr" + "github.com/waku-org/go-waku/waku/v2/protocol/relay" "github.com/waku-org/go-waku/waku/v2/utils" ) @@ -68,13 +69,28 @@ func TestWakuMetadataRequest(t *testing.T) { m_noRS := createWakuMetadata(t, nil) m16_1.h.Peerstore().AddAddrs(m16_2.h.ID(), m16_2.h.Network().ListenAddresses(), peerstore.PermanentAddrTTL) + err = m16_1.h.Peerstore().AddProtocols(m16_2.h.ID(), relay.WakuRelayID_v200) + require.NoError(t, err) + + err = m16_2.h.Peerstore().AddProtocols(m16_1.h.ID(), relay.WakuRelayID_v200) + require.NoError(t, err) + m16_1.h.Peerstore().AddAddrs(m_noRS.h.ID(), m_noRS.h.Network().ListenAddresses(), peerstore.PermanentAddrTTL) // Query a peer that is subscribed to a shard result, err := m16_1.Request(context.Background(), m16_2.h.ID()) require.NoError(t, err) - require.Equal(t, testShard16, result.ClusterID) - require.Equal(t, rs16_2.ShardIDs, result.ShardIDs) + + var rShardIDs []uint16 + if len(result.Shards) != 0 { + for _, i := range result.Shards { + rShardIDs = append(rShardIDs, uint16(i)) + } + } + rs, err := protocol.NewRelayShards(uint16(*result.ClusterId), rShardIDs...) + require.NoError(t, err) + require.Equal(t, testShard16, rs.ClusterID) + require.Equal(t, rs16_2.ShardIDs, rs.ShardIDs) // Updating the peer shards rs16_2.ShardIDs = append(rs16_2.ShardIDs, 3, 4) @@ -84,8 +100,16 @@ func TestWakuMetadataRequest(t *testing.T) { // Query same peer, after that peer subscribes to more shards result, err = m16_1.Request(context.Background(), m16_2.h.ID()) require.NoError(t, err) - require.Equal(t, testShard16, result.ClusterID) - require.ElementsMatch(t, rs16_2.ShardIDs, result.ShardIDs) + rShardIDs = make([]uint16, 0) + if len(result.Shards) != 0 { + for _, i := range result.Shards { + rShardIDs = append(rShardIDs, uint16(i)) + } + } + rs, err = protocol.NewRelayShards(uint16(*result.ClusterId), rShardIDs...) + require.NoError(t, err) + require.Equal(t, testShard16, rs.ClusterID) + require.ElementsMatch(t, rs16_2.ShardIDs, rs.ShardIDs) // Query a peer not subscribed to any shard _, err = m16_1.Request(context.Background(), m_noRS.h.ID()) From 3b0c8e920796883198c05fb873385cd454534a5d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?rich=CE=9Brd?= Date: Thu, 11 Jul 2024 11:26:04 -0400 Subject: [PATCH 02/27] chore: bump go-libp2p (#1155) --- .github/docker-compose/nwaku.yml | 2 +- examples/basic-light-client/go.mod | 8 ++-- examples/basic-light-client/go.sum | 16 +++---- examples/basic-relay/go.mod | 8 ++-- examples/basic-relay/go.sum | 16 +++---- examples/chat2/go.mod | 8 ++-- examples/chat2/go.sum | 16 +++---- examples/filter2/go.mod | 8 ++-- examples/filter2/go.sum | 16 +++---- examples/noise/go.mod | 8 ++-- examples/noise/go.sum | 16 +++---- examples/rln/go.mod | 8 ++-- examples/rln/go.sum | 16 +++---- flake.nix | 2 +- go.mod | 8 ++-- go.sum | 16 +++---- waku/v2/protocol/metadata/waku_metadata.go | 5 --- .../protocol/metadata/waku_metadata_test.go | 18 -------- waku/v2/protocol/store/client_test.go | 45 +++++++++++++------ 19 files changed, 118 insertions(+), 122 deletions(-) diff --git a/.github/docker-compose/nwaku.yml b/.github/docker-compose/nwaku.yml index b8371066..499aa15b 100644 --- a/.github/docker-compose/nwaku.yml +++ b/.github/docker-compose/nwaku.yml @@ -1,6 +1,6 @@ services: nwaku: image: "harbor.status.im/wakuorg/nwaku:latest" - command: ["--relay", "--store", "--nodekey=1122334455667788990011223344556677889900112233445566778899001122"] + command: ["--relay", "--store", "--nodekey=1122334455667788990011223344556677889900112233445566778899001122", "--cluster-id=99", "--pubsub-topic=/waku/2/rs/99/1"] ports: - "60000" diff --git a/examples/basic-light-client/go.mod b/examples/basic-light-client/go.mod index d24ecf1b..87361937 100644 --- a/examples/basic-light-client/go.mod +++ b/examples/basic-light-client/go.mod @@ -12,7 +12,7 @@ replace github.com/libp2p/go-libp2p-pubsub v0.11.0 => github.com/waku-org/go-lib require ( github.com/ethereum/go-ethereum v1.10.26 - github.com/libp2p/go-libp2p v0.35.0 + github.com/libp2p/go-libp2p v0.35.2 github.com/multiformats/go-multiaddr v0.12.4 github.com/urfave/cli/v2 v2.27.2 github.com/waku-org/go-waku v0.2.3-0.20221109195301-b2a5a68d28ba @@ -53,7 +53,7 @@ require ( github.com/google/gopacket v1.1.19 // indirect github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect github.com/google/uuid v1.4.0 // indirect - github.com/gorilla/websocket v1.5.1 // indirect + github.com/gorilla/websocket v1.5.3 // indirect github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/huin/goupnp v1.3.0 // indirect @@ -94,7 +94,7 @@ require ( github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pion/datachannel v1.5.6 // indirect github.com/pion/dtls/v2 v2.2.11 // indirect - github.com/pion/ice/v2 v2.3.24 // indirect + github.com/pion/ice/v2 v2.3.25 // indirect github.com/pion/interceptor v0.1.29 // indirect github.com/pion/logging v0.2.2 // indirect github.com/pion/mdns v0.0.12 // indirect @@ -136,7 +136,7 @@ require ( github.com/wk8/go-ordered-map v1.0.0 // indirect github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect go.uber.org/dig v1.17.1 // indirect - go.uber.org/fx v1.21.1 // indirect + go.uber.org/fx v1.22.1 // indirect go.uber.org/mock v0.4.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/crypto v0.23.0 // indirect diff --git a/examples/basic-light-client/go.sum b/examples/basic-light-client/go.sum index f1c7b7c1..2f3f8015 100644 --- a/examples/basic-light-client/go.sum +++ b/examples/basic-light-client/go.sum @@ -283,8 +283,8 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= -github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= @@ -382,8 +382,8 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6 github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= -github.com/libp2p/go-libp2p v0.35.0 h1:1xS1Bkr9X7GtdvV6ntLnDV9xB1kNjHK1lZ0eaO6gnhc= -github.com/libp2p/go-libp2p v0.35.0/go.mod h1:snyJQix4ET6Tj+LeI0VPjjxTtdWpeOhYt5lEY0KirkQ= +github.com/libp2p/go-libp2p v0.35.2 h1:287oHbuplkrLdAF+syB0n/qDgd50AUBtEODqS0e0HDs= +github.com/libp2p/go-libp2p v0.35.2/go.mod h1:RKCDNt30IkFipGL0tl8wQW/3zVWEGFUZo8g2gAKxwjU= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= @@ -523,8 +523,8 @@ github.com/pion/datachannel v1.5.6/go.mod h1:1eKT6Q85pRnr2mHiWHxJwO50SfZRtWHTsNI github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= github.com/pion/dtls/v2 v2.2.11 h1:9U/dpCYl1ySttROPWJgqWKEylUdT0fXp/xst6JwY5Ks= github.com/pion/dtls/v2 v2.2.11/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= -github.com/pion/ice/v2 v2.3.24 h1:RYgzhH/u5lH0XO+ABatVKCtRd+4U1GEaCXSMjNr13tI= -github.com/pion/ice/v2 v2.3.24/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw= +github.com/pion/ice/v2 v2.3.25 h1:M5rJA07dqhi3nobJIg+uPtcVjFECTrhcR3n0ns8kDZs= +github.com/pion/ice/v2 v2.3.25/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw= github.com/pion/interceptor v0.1.29 h1:39fsnlP1U8gw2JzOFWdfCU82vHvhW9o0rZnZF56wF+M= github.com/pion/interceptor v0.1.29/go.mod h1:ri+LGNjRUc5xUNtDEPzfdkmSqISixVTBF/z/Zms/6T4= github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= @@ -732,8 +732,8 @@ go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc= go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= -go.uber.org/fx v1.21.1 h1:RqBh3cYdzZS0uqwVeEjOX2p73dddLpym315myy/Bpb0= -go.uber.org/fx v1.21.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48= +go.uber.org/fx v1.22.1 h1:nvvln7mwyT5s1q201YE29V/BFrGor6vMiDNpU/78Mys= +go.uber.org/fx v1.22.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= diff --git a/examples/basic-relay/go.mod b/examples/basic-relay/go.mod index 1e1adad1..9262ff91 100644 --- a/examples/basic-relay/go.mod +++ b/examples/basic-relay/go.mod @@ -52,7 +52,7 @@ require ( github.com/google/gopacket v1.1.19 // indirect github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect github.com/google/uuid v1.4.0 // indirect - github.com/gorilla/websocket v1.5.1 // indirect + github.com/gorilla/websocket v1.5.3 // indirect github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c // indirect @@ -66,7 +66,7 @@ require ( github.com/koron/go-ssdp v0.0.4 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-flow-metrics v0.1.0 // indirect - github.com/libp2p/go-libp2p v0.35.0 // indirect + github.com/libp2p/go-libp2p v0.35.2 // indirect github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect github.com/libp2p/go-libp2p-pubsub v0.11.0 // indirect github.com/libp2p/go-msgio v0.3.0 // indirect @@ -95,7 +95,7 @@ require ( github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pion/datachannel v1.5.6 // indirect github.com/pion/dtls/v2 v2.2.11 // indirect - github.com/pion/ice/v2 v2.3.24 // indirect + github.com/pion/ice/v2 v2.3.25 // indirect github.com/pion/interceptor v0.1.29 // indirect github.com/pion/logging v0.2.2 // indirect github.com/pion/mdns v0.0.12 // indirect @@ -137,7 +137,7 @@ require ( github.com/wk8/go-ordered-map v1.0.0 // indirect github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect go.uber.org/dig v1.17.1 // indirect - go.uber.org/fx v1.21.1 // indirect + go.uber.org/fx v1.22.1 // indirect go.uber.org/mock v0.4.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/crypto v0.23.0 // indirect diff --git a/examples/basic-relay/go.sum b/examples/basic-relay/go.sum index 08067bd5..7feb9818 100644 --- a/examples/basic-relay/go.sum +++ b/examples/basic-relay/go.sum @@ -284,8 +284,8 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= -github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= @@ -384,8 +384,8 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6 github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= -github.com/libp2p/go-libp2p v0.35.0 h1:1xS1Bkr9X7GtdvV6ntLnDV9xB1kNjHK1lZ0eaO6gnhc= -github.com/libp2p/go-libp2p v0.35.0/go.mod h1:snyJQix4ET6Tj+LeI0VPjjxTtdWpeOhYt5lEY0KirkQ= +github.com/libp2p/go-libp2p v0.35.2 h1:287oHbuplkrLdAF+syB0n/qDgd50AUBtEODqS0e0HDs= +github.com/libp2p/go-libp2p v0.35.2/go.mod h1:RKCDNt30IkFipGL0tl8wQW/3zVWEGFUZo8g2gAKxwjU= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= @@ -525,8 +525,8 @@ github.com/pion/datachannel v1.5.6/go.mod h1:1eKT6Q85pRnr2mHiWHxJwO50SfZRtWHTsNI github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= github.com/pion/dtls/v2 v2.2.11 h1:9U/dpCYl1ySttROPWJgqWKEylUdT0fXp/xst6JwY5Ks= github.com/pion/dtls/v2 v2.2.11/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= -github.com/pion/ice/v2 v2.3.24 h1:RYgzhH/u5lH0XO+ABatVKCtRd+4U1GEaCXSMjNr13tI= -github.com/pion/ice/v2 v2.3.24/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw= +github.com/pion/ice/v2 v2.3.25 h1:M5rJA07dqhi3nobJIg+uPtcVjFECTrhcR3n0ns8kDZs= +github.com/pion/ice/v2 v2.3.25/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw= github.com/pion/interceptor v0.1.29 h1:39fsnlP1U8gw2JzOFWdfCU82vHvhW9o0rZnZF56wF+M= github.com/pion/interceptor v0.1.29/go.mod h1:ri+LGNjRUc5xUNtDEPzfdkmSqISixVTBF/z/Zms/6T4= github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= @@ -734,8 +734,8 @@ go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc= go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= -go.uber.org/fx v1.21.1 h1:RqBh3cYdzZS0uqwVeEjOX2p73dddLpym315myy/Bpb0= -go.uber.org/fx v1.21.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48= +go.uber.org/fx v1.22.1 h1:nvvln7mwyT5s1q201YE29V/BFrGor6vMiDNpU/78Mys= +go.uber.org/fx v1.22.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= diff --git a/examples/chat2/go.mod b/examples/chat2/go.mod index 17291a0f..2407fb63 100644 --- a/examples/chat2/go.mod +++ b/examples/chat2/go.mod @@ -16,7 +16,7 @@ require ( github.com/charmbracelet/lipgloss v0.5.0 github.com/ethereum/go-ethereum v1.10.26 github.com/ipfs/go-log/v2 v2.5.1 - github.com/libp2p/go-libp2p v0.35.0 + github.com/libp2p/go-libp2p v0.35.2 github.com/muesli/reflow v0.3.0 github.com/multiformats/go-multiaddr v0.12.4 github.com/urfave/cli/v2 v2.27.2 @@ -60,7 +60,7 @@ require ( github.com/google/gopacket v1.1.19 // indirect github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect github.com/google/uuid v1.4.0 // indirect - github.com/gorilla/websocket v1.5.1 // indirect + github.com/gorilla/websocket v1.5.3 // indirect github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c // indirect @@ -106,7 +106,7 @@ require ( github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pion/datachannel v1.5.6 // indirect github.com/pion/dtls/v2 v2.2.11 // indirect - github.com/pion/ice/v2 v2.3.24 // indirect + github.com/pion/ice/v2 v2.3.25 // indirect github.com/pion/interceptor v0.1.29 // indirect github.com/pion/logging v0.2.2 // indirect github.com/pion/mdns v0.0.12 // indirect @@ -149,7 +149,7 @@ require ( github.com/wk8/go-ordered-map v1.0.0 // indirect github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect go.uber.org/dig v1.17.1 // indirect - go.uber.org/fx v1.21.1 // indirect + go.uber.org/fx v1.22.1 // indirect go.uber.org/mock v0.4.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect diff --git a/examples/chat2/go.sum b/examples/chat2/go.sum index 791347d2..810f4931 100644 --- a/examples/chat2/go.sum +++ b/examples/chat2/go.sum @@ -296,8 +296,8 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= -github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= @@ -396,8 +396,8 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6 github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= -github.com/libp2p/go-libp2p v0.35.0 h1:1xS1Bkr9X7GtdvV6ntLnDV9xB1kNjHK1lZ0eaO6gnhc= -github.com/libp2p/go-libp2p v0.35.0/go.mod h1:snyJQix4ET6Tj+LeI0VPjjxTtdWpeOhYt5lEY0KirkQ= +github.com/libp2p/go-libp2p v0.35.2 h1:287oHbuplkrLdAF+syB0n/qDgd50AUBtEODqS0e0HDs= +github.com/libp2p/go-libp2p v0.35.2/go.mod h1:RKCDNt30IkFipGL0tl8wQW/3zVWEGFUZo8g2gAKxwjU= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= @@ -553,8 +553,8 @@ github.com/pion/datachannel v1.5.6/go.mod h1:1eKT6Q85pRnr2mHiWHxJwO50SfZRtWHTsNI github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= github.com/pion/dtls/v2 v2.2.11 h1:9U/dpCYl1ySttROPWJgqWKEylUdT0fXp/xst6JwY5Ks= github.com/pion/dtls/v2 v2.2.11/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= -github.com/pion/ice/v2 v2.3.24 h1:RYgzhH/u5lH0XO+ABatVKCtRd+4U1GEaCXSMjNr13tI= -github.com/pion/ice/v2 v2.3.24/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw= +github.com/pion/ice/v2 v2.3.25 h1:M5rJA07dqhi3nobJIg+uPtcVjFECTrhcR3n0ns8kDZs= +github.com/pion/ice/v2 v2.3.25/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw= github.com/pion/interceptor v0.1.29 h1:39fsnlP1U8gw2JzOFWdfCU82vHvhW9o0rZnZF56wF+M= github.com/pion/interceptor v0.1.29/go.mod h1:ri+LGNjRUc5xUNtDEPzfdkmSqISixVTBF/z/Zms/6T4= github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= @@ -766,8 +766,8 @@ go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc= go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= -go.uber.org/fx v1.21.1 h1:RqBh3cYdzZS0uqwVeEjOX2p73dddLpym315myy/Bpb0= -go.uber.org/fx v1.21.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48= +go.uber.org/fx v1.22.1 h1:nvvln7mwyT5s1q201YE29V/BFrGor6vMiDNpU/78Mys= +go.uber.org/fx v1.22.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= diff --git a/examples/filter2/go.mod b/examples/filter2/go.mod index 2bb37bde..6dda8f4a 100644 --- a/examples/filter2/go.mod +++ b/examples/filter2/go.mod @@ -48,7 +48,7 @@ require ( github.com/google/gopacket v1.1.19 // indirect github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect github.com/google/uuid v1.4.0 // indirect - github.com/gorilla/websocket v1.5.1 // indirect + github.com/gorilla/websocket v1.5.3 // indirect github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c // indirect @@ -61,7 +61,7 @@ require ( github.com/koron/go-ssdp v0.0.4 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-flow-metrics v0.1.0 // indirect - github.com/libp2p/go-libp2p v0.35.0 // indirect + github.com/libp2p/go-libp2p v0.35.2 // indirect github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect github.com/libp2p/go-libp2p-pubsub v0.11.0 // indirect github.com/libp2p/go-msgio v0.3.0 // indirect @@ -91,7 +91,7 @@ require ( github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pion/datachannel v1.5.6 // indirect github.com/pion/dtls/v2 v2.2.11 // indirect - github.com/pion/ice/v2 v2.3.24 // indirect + github.com/pion/ice/v2 v2.3.25 // indirect github.com/pion/interceptor v0.1.29 // indirect github.com/pion/logging v0.2.2 // indirect github.com/pion/mdns v0.0.12 // indirect @@ -131,7 +131,7 @@ require ( github.com/waku-org/go-zerokit-rln-x86_64 v0.0.0-20230916171518-2a77c3734dd1 // indirect github.com/wk8/go-ordered-map v1.0.0 // indirect go.uber.org/dig v1.17.1 // indirect - go.uber.org/fx v1.21.1 // indirect + go.uber.org/fx v1.22.1 // indirect go.uber.org/mock v0.4.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect diff --git a/examples/filter2/go.sum b/examples/filter2/go.sum index 1bedcb7a..a5203d1b 100644 --- a/examples/filter2/go.sum +++ b/examples/filter2/go.sum @@ -282,8 +282,8 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= -github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= @@ -382,8 +382,8 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6 github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= -github.com/libp2p/go-libp2p v0.35.0 h1:1xS1Bkr9X7GtdvV6ntLnDV9xB1kNjHK1lZ0eaO6gnhc= -github.com/libp2p/go-libp2p v0.35.0/go.mod h1:snyJQix4ET6Tj+LeI0VPjjxTtdWpeOhYt5lEY0KirkQ= +github.com/libp2p/go-libp2p v0.35.2 h1:287oHbuplkrLdAF+syB0n/qDgd50AUBtEODqS0e0HDs= +github.com/libp2p/go-libp2p v0.35.2/go.mod h1:RKCDNt30IkFipGL0tl8wQW/3zVWEGFUZo8g2gAKxwjU= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= @@ -523,8 +523,8 @@ github.com/pion/datachannel v1.5.6/go.mod h1:1eKT6Q85pRnr2mHiWHxJwO50SfZRtWHTsNI github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= github.com/pion/dtls/v2 v2.2.11 h1:9U/dpCYl1ySttROPWJgqWKEylUdT0fXp/xst6JwY5Ks= github.com/pion/dtls/v2 v2.2.11/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= -github.com/pion/ice/v2 v2.3.24 h1:RYgzhH/u5lH0XO+ABatVKCtRd+4U1GEaCXSMjNr13tI= -github.com/pion/ice/v2 v2.3.24/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw= +github.com/pion/ice/v2 v2.3.25 h1:M5rJA07dqhi3nobJIg+uPtcVjFECTrhcR3n0ns8kDZs= +github.com/pion/ice/v2 v2.3.25/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw= github.com/pion/interceptor v0.1.29 h1:39fsnlP1U8gw2JzOFWdfCU82vHvhW9o0rZnZF56wF+M= github.com/pion/interceptor v0.1.29/go.mod h1:ri+LGNjRUc5xUNtDEPzfdkmSqISixVTBF/z/Zms/6T4= github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= @@ -734,8 +734,8 @@ go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc= go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= -go.uber.org/fx v1.21.1 h1:RqBh3cYdzZS0uqwVeEjOX2p73dddLpym315myy/Bpb0= -go.uber.org/fx v1.21.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48= +go.uber.org/fx v1.22.1 h1:nvvln7mwyT5s1q201YE29V/BFrGor6vMiDNpU/78Mys= +go.uber.org/fx v1.22.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= diff --git a/examples/noise/go.mod b/examples/noise/go.mod index 6b7b3951..70bc5787 100644 --- a/examples/noise/go.mod +++ b/examples/noise/go.mod @@ -12,7 +12,7 @@ replace github.com/libp2p/go-libp2p-pubsub v0.11.0 => github.com/waku-org/go-lib require ( github.com/ipfs/go-log/v2 v2.5.1 - github.com/libp2p/go-libp2p v0.35.0 + github.com/libp2p/go-libp2p v0.35.2 github.com/waku-org/go-noise v0.0.4 github.com/waku-org/go-waku v0.2.3-0.20221109195301-b2a5a68d28ba go.uber.org/zap v1.27.0 @@ -51,7 +51,7 @@ require ( github.com/google/gopacket v1.1.19 // indirect github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect github.com/google/uuid v1.4.0 // indirect - github.com/gorilla/websocket v1.5.1 // indirect + github.com/gorilla/websocket v1.5.3 // indirect github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c // indirect @@ -93,7 +93,7 @@ require ( github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pion/datachannel v1.5.6 // indirect github.com/pion/dtls/v2 v2.2.11 // indirect - github.com/pion/ice/v2 v2.3.24 // indirect + github.com/pion/ice/v2 v2.3.25 // indirect github.com/pion/interceptor v0.1.29 // indirect github.com/pion/logging v0.2.2 // indirect github.com/pion/mdns v0.0.12 // indirect @@ -133,7 +133,7 @@ require ( github.com/waku-org/go-zerokit-rln-x86_64 v0.0.0-20230916171518-2a77c3734dd1 // indirect github.com/wk8/go-ordered-map v1.0.0 // indirect go.uber.org/dig v1.17.1 // indirect - go.uber.org/fx v1.21.1 // indirect + go.uber.org/fx v1.22.1 // indirect go.uber.org/mock v0.4.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/crypto v0.23.0 // indirect diff --git a/examples/noise/go.sum b/examples/noise/go.sum index c0a5f3d7..4cc14671 100644 --- a/examples/noise/go.sum +++ b/examples/noise/go.sum @@ -282,8 +282,8 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= -github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= @@ -382,8 +382,8 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6 github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= -github.com/libp2p/go-libp2p v0.35.0 h1:1xS1Bkr9X7GtdvV6ntLnDV9xB1kNjHK1lZ0eaO6gnhc= -github.com/libp2p/go-libp2p v0.35.0/go.mod h1:snyJQix4ET6Tj+LeI0VPjjxTtdWpeOhYt5lEY0KirkQ= +github.com/libp2p/go-libp2p v0.35.2 h1:287oHbuplkrLdAF+syB0n/qDgd50AUBtEODqS0e0HDs= +github.com/libp2p/go-libp2p v0.35.2/go.mod h1:RKCDNt30IkFipGL0tl8wQW/3zVWEGFUZo8g2gAKxwjU= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= @@ -523,8 +523,8 @@ github.com/pion/datachannel v1.5.6/go.mod h1:1eKT6Q85pRnr2mHiWHxJwO50SfZRtWHTsNI github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= github.com/pion/dtls/v2 v2.2.11 h1:9U/dpCYl1ySttROPWJgqWKEylUdT0fXp/xst6JwY5Ks= github.com/pion/dtls/v2 v2.2.11/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= -github.com/pion/ice/v2 v2.3.24 h1:RYgzhH/u5lH0XO+ABatVKCtRd+4U1GEaCXSMjNr13tI= -github.com/pion/ice/v2 v2.3.24/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw= +github.com/pion/ice/v2 v2.3.25 h1:M5rJA07dqhi3nobJIg+uPtcVjFECTrhcR3n0ns8kDZs= +github.com/pion/ice/v2 v2.3.25/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw= github.com/pion/interceptor v0.1.29 h1:39fsnlP1U8gw2JzOFWdfCU82vHvhW9o0rZnZF56wF+M= github.com/pion/interceptor v0.1.29/go.mod h1:ri+LGNjRUc5xUNtDEPzfdkmSqISixVTBF/z/Zms/6T4= github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= @@ -736,8 +736,8 @@ go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc= go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= -go.uber.org/fx v1.21.1 h1:RqBh3cYdzZS0uqwVeEjOX2p73dddLpym315myy/Bpb0= -go.uber.org/fx v1.21.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48= +go.uber.org/fx v1.22.1 h1:nvvln7mwyT5s1q201YE29V/BFrGor6vMiDNpU/78Mys= +go.uber.org/fx v1.22.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= diff --git a/examples/rln/go.mod b/examples/rln/go.mod index 89858217..5c9995ef 100644 --- a/examples/rln/go.mod +++ b/examples/rln/go.mod @@ -48,7 +48,7 @@ require ( github.com/google/gopacket v1.1.19 // indirect github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect github.com/google/uuid v1.4.0 // indirect - github.com/gorilla/websocket v1.5.1 // indirect + github.com/gorilla/websocket v1.5.3 // indirect github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c // indirect @@ -62,7 +62,7 @@ require ( github.com/koron/go-ssdp v0.0.4 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-flow-metrics v0.1.0 // indirect - github.com/libp2p/go-libp2p v0.35.0 // indirect + github.com/libp2p/go-libp2p v0.35.2 // indirect github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect github.com/libp2p/go-libp2p-pubsub v0.11.0 // indirect github.com/libp2p/go-msgio v0.3.0 // indirect @@ -92,7 +92,7 @@ require ( github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pion/datachannel v1.5.6 // indirect github.com/pion/dtls/v2 v2.2.11 // indirect - github.com/pion/ice/v2 v2.3.24 // indirect + github.com/pion/ice/v2 v2.3.25 // indirect github.com/pion/interceptor v0.1.29 // indirect github.com/pion/logging v0.2.2 // indirect github.com/pion/mdns v0.0.12 // indirect @@ -132,7 +132,7 @@ require ( github.com/waku-org/go-zerokit-rln-x86_64 v0.0.0-20230916171518-2a77c3734dd1 // indirect github.com/wk8/go-ordered-map v1.0.0 // indirect go.uber.org/dig v1.17.1 // indirect - go.uber.org/fx v1.21.1 // indirect + go.uber.org/fx v1.22.1 // indirect go.uber.org/mock v0.4.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/crypto v0.23.0 // indirect diff --git a/examples/rln/go.sum b/examples/rln/go.sum index 1bedcb7a..a5203d1b 100644 --- a/examples/rln/go.sum +++ b/examples/rln/go.sum @@ -282,8 +282,8 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= -github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= @@ -382,8 +382,8 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6 github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= -github.com/libp2p/go-libp2p v0.35.0 h1:1xS1Bkr9X7GtdvV6ntLnDV9xB1kNjHK1lZ0eaO6gnhc= -github.com/libp2p/go-libp2p v0.35.0/go.mod h1:snyJQix4ET6Tj+LeI0VPjjxTtdWpeOhYt5lEY0KirkQ= +github.com/libp2p/go-libp2p v0.35.2 h1:287oHbuplkrLdAF+syB0n/qDgd50AUBtEODqS0e0HDs= +github.com/libp2p/go-libp2p v0.35.2/go.mod h1:RKCDNt30IkFipGL0tl8wQW/3zVWEGFUZo8g2gAKxwjU= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= @@ -523,8 +523,8 @@ github.com/pion/datachannel v1.5.6/go.mod h1:1eKT6Q85pRnr2mHiWHxJwO50SfZRtWHTsNI github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= github.com/pion/dtls/v2 v2.2.11 h1:9U/dpCYl1ySttROPWJgqWKEylUdT0fXp/xst6JwY5Ks= github.com/pion/dtls/v2 v2.2.11/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= -github.com/pion/ice/v2 v2.3.24 h1:RYgzhH/u5lH0XO+ABatVKCtRd+4U1GEaCXSMjNr13tI= -github.com/pion/ice/v2 v2.3.24/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw= +github.com/pion/ice/v2 v2.3.25 h1:M5rJA07dqhi3nobJIg+uPtcVjFECTrhcR3n0ns8kDZs= +github.com/pion/ice/v2 v2.3.25/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw= github.com/pion/interceptor v0.1.29 h1:39fsnlP1U8gw2JzOFWdfCU82vHvhW9o0rZnZF56wF+M= github.com/pion/interceptor v0.1.29/go.mod h1:ri+LGNjRUc5xUNtDEPzfdkmSqISixVTBF/z/Zms/6T4= github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= @@ -734,8 +734,8 @@ go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc= go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= -go.uber.org/fx v1.21.1 h1:RqBh3cYdzZS0uqwVeEjOX2p73dddLpym315myy/Bpb0= -go.uber.org/fx v1.21.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48= +go.uber.org/fx v1.22.1 h1:nvvln7mwyT5s1q201YE29V/BFrGor6vMiDNpU/78Mys= +go.uber.org/fx v1.22.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= diff --git a/flake.nix b/flake.nix index 42ac2717..fff9716b 100644 --- a/flake.nix +++ b/flake.nix @@ -29,7 +29,7 @@ ]; doCheck = false; # FIXME: This needs to be manually changed when updating modules. - vendorHash = "sha256-9AnVgIcsQyB8xfxJqj17TrdWqQYeAHrUaUDQe10gAzE="; + vendorHash = "sha256-zwvZVTiwv7cc4vAM2Fil+qAG1v1J8q4BqX5lCgCStIc="; # Fix for 'nix run' trying to execute 'go-waku'. meta = { mainProgram = "waku"; }; }; diff --git a/go.mod b/go.mod index 587e48e5..7e948001 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/golang-migrate/migrate/v4 v4.15.2 github.com/ipfs/go-ds-sql v0.3.0 github.com/ipfs/go-log/v2 v2.5.1 - github.com/libp2p/go-libp2p v0.35.0 + github.com/libp2p/go-libp2p v0.35.2 github.com/libp2p/go-libp2p-pubsub v0.11.0 github.com/libp2p/go-msgio v0.3.0 github.com/mattn/go-sqlite3 v1.14.17 @@ -68,7 +68,7 @@ require ( github.com/onsi/ginkgo/v2 v2.15.0 // indirect github.com/pion/datachannel v1.5.6 // indirect github.com/pion/dtls/v2 v2.2.11 // indirect - github.com/pion/ice/v2 v2.3.24 // indirect + github.com/pion/ice/v2 v2.3.25 // indirect github.com/pion/interceptor v0.1.29 // indirect github.com/pion/logging v0.2.2 // indirect github.com/pion/mdns v0.0.12 // indirect @@ -91,7 +91,7 @@ require ( github.com/waku-org/go-zerokit-rln-arm v0.0.0-20230916171929-1dd9494ff065 // indirect github.com/waku-org/go-zerokit-rln-x86_64 v0.0.0-20230916171518-2a77c3734dd1 // indirect go.uber.org/dig v1.17.1 // indirect - go.uber.org/fx v1.21.1 // indirect + go.uber.org/fx v1.22.1 // indirect go.uber.org/mock v0.4.0 // indirect golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect ) @@ -120,7 +120,7 @@ require ( github.com/golang/snappy v0.0.4 // indirect github.com/google/gopacket v1.1.19 // indirect github.com/google/uuid v1.4.0 - github.com/gorilla/websocket v1.5.1 // indirect + github.com/gorilla/websocket v1.5.3 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d diff --git a/go.sum b/go.sum index 6feeb667..21d942b5 100644 --- a/go.sum +++ b/go.sum @@ -785,8 +785,8 @@ github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB7 github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= -github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= @@ -1040,8 +1040,8 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6 github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= -github.com/libp2p/go-libp2p v0.35.0 h1:1xS1Bkr9X7GtdvV6ntLnDV9xB1kNjHK1lZ0eaO6gnhc= -github.com/libp2p/go-libp2p v0.35.0/go.mod h1:snyJQix4ET6Tj+LeI0VPjjxTtdWpeOhYt5lEY0KirkQ= +github.com/libp2p/go-libp2p v0.35.2 h1:287oHbuplkrLdAF+syB0n/qDgd50AUBtEODqS0e0HDs= +github.com/libp2p/go-libp2p v0.35.2/go.mod h1:RKCDNt30IkFipGL0tl8wQW/3zVWEGFUZo8g2gAKxwjU= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= @@ -1302,8 +1302,8 @@ github.com/pion/datachannel v1.5.6/go.mod h1:1eKT6Q85pRnr2mHiWHxJwO50SfZRtWHTsNI github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= github.com/pion/dtls/v2 v2.2.11 h1:9U/dpCYl1ySttROPWJgqWKEylUdT0fXp/xst6JwY5Ks= github.com/pion/dtls/v2 v2.2.11/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= -github.com/pion/ice/v2 v2.3.24 h1:RYgzhH/u5lH0XO+ABatVKCtRd+4U1GEaCXSMjNr13tI= -github.com/pion/ice/v2 v2.3.24/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw= +github.com/pion/ice/v2 v2.3.25 h1:M5rJA07dqhi3nobJIg+uPtcVjFECTrhcR3n0ns8kDZs= +github.com/pion/ice/v2 v2.3.25/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw= github.com/pion/interceptor v0.1.29 h1:39fsnlP1U8gw2JzOFWdfCU82vHvhW9o0rZnZF56wF+M= github.com/pion/interceptor v0.1.29/go.mod h1:ri+LGNjRUc5xUNtDEPzfdkmSqISixVTBF/z/Zms/6T4= github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= @@ -1680,8 +1680,8 @@ go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc= go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= -go.uber.org/fx v1.21.1 h1:RqBh3cYdzZS0uqwVeEjOX2p73dddLpym315myy/Bpb0= -go.uber.org/fx v1.21.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48= +go.uber.org/fx v1.22.1 h1:nvvln7mwyT5s1q201YE29V/BFrGor6vMiDNpU/78Mys= +go.uber.org/fx v1.22.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= diff --git a/waku/v2/protocol/metadata/waku_metadata.go b/waku/v2/protocol/metadata/waku_metadata.go index f516ecd5..6129d6c0 100644 --- a/waku/v2/protocol/metadata/waku_metadata.go +++ b/waku/v2/protocol/metadata/waku_metadata.go @@ -60,11 +60,6 @@ func (wakuM *WakuMetadata) SetHost(h host.Host) { // Start inits the metadata protocol func (wakuM *WakuMetadata) Start(ctx context.Context) error { - if wakuM.clusterID == 0 { - wakuM.log.Warn("no clusterID is specified. Protocol will not be initialized") - return nil - } - ctx, cancel := context.WithCancel(ctx) wakuM.ctx = ctx diff --git a/waku/v2/protocol/metadata/waku_metadata_test.go b/waku/v2/protocol/metadata/waku_metadata_test.go index 547d9d13..c2355a55 100644 --- a/waku/v2/protocol/metadata/waku_metadata_test.go +++ b/waku/v2/protocol/metadata/waku_metadata_test.go @@ -3,16 +3,12 @@ package metadata import ( "context" "crypto/rand" - "errors" - "strings" "testing" "time" gcrypto "github.com/ethereum/go-ethereum/crypto" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/peerstore" - libp2pProtocol "github.com/libp2p/go-libp2p/core/protocol" - "github.com/multiformats/go-multistream" "github.com/stretchr/testify/require" "github.com/waku-org/go-waku/tests" "github.com/waku-org/go-waku/waku/v2/protocol" @@ -47,15 +43,6 @@ func createWakuMetadata(t *testing.T, rs *protocol.RelayShards) *WakuMetadata { return m1 } -func isProtocolNotSupported(err error) bool { - notSupportedErr := multistream.ErrNotSupported[libp2pProtocol.ID]{} - return errors.Is(err, notSupportedErr) -} - -func isStreamReset(err error) bool { - return strings.Contains(err.Error(), "stream reset") -} - func TestWakuMetadataRequest(t *testing.T) { testShard16 := uint16(16) @@ -110,10 +97,6 @@ func TestWakuMetadataRequest(t *testing.T) { require.NoError(t, err) require.Equal(t, testShard16, rs.ClusterID) require.ElementsMatch(t, rs16_2.ShardIDs, rs.ShardIDs) - - // Query a peer not subscribed to any shard - _, err = m16_1.Request(context.Background(), m_noRS.h.ID()) - require.True(t, isProtocolNotSupported(err) || isStreamReset(err)) } func TestNoNetwork(t *testing.T) { @@ -183,5 +166,4 @@ func TestDropConnectionOnDiffNetworks(t *testing.T) { require.Len(t, m3.h.Network().Peers(), 1) require.Equal(t, []peer.ID{m3.h.ID()}, m2.h.Network().Peers()) require.Equal(t, []peer.ID{m2.h.ID()}, m3.h.Network().Peers()) - } diff --git a/waku/v2/protocol/store/client_test.go b/waku/v2/protocol/store/client_test.go index d98d14c9..357dc270 100644 --- a/waku/v2/protocol/store/client_test.go +++ b/waku/v2/protocol/store/client_test.go @@ -10,6 +10,8 @@ import ( "testing" "time" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/p2p/enode" "github.com/libp2p/go-libp2p/core/peer" "github.com/multiformats/go-multiaddr" "github.com/prometheus/client_golang/prometheus" @@ -17,6 +19,8 @@ import ( "github.com/waku-org/go-waku/tests" "github.com/waku-org/go-waku/waku/v2/peermanager" "github.com/waku-org/go-waku/waku/v2/protocol" + "github.com/waku-org/go-waku/waku/v2/protocol/enr" + "github.com/waku-org/go-waku/waku/v2/protocol/metadata" "github.com/waku-org/go-waku/waku/v2/protocol/pb" "github.com/waku-org/go-waku/waku/v2/protocol/relay" "github.com/waku-org/go-waku/waku/v2/timesource" @@ -34,6 +38,21 @@ func TestStoreClient(t *testing.T) { host, err := tests.MakeHost(context.Background(), port, rand.Reader) require.NoError(t, err) + db, err := enode.OpenDB("") + require.NoError(t, err) + priv, err := crypto.GenerateKey() + require.NoError(t, err) + localnode := enode.NewLocalNode(db, priv) + + pubsubTopic := "/waku/2/rs/99/1" + clusterID := uint16(99) + rs, _ := protocol.NewRelayShards(clusterID, 1) + enr.Update(utils.Logger(), localnode, enr.WithWakuRelaySharding(rs)) + + metadata := metadata.NewWakuMetadata(clusterID, localnode, utils.Logger()) + metadata.SetHost(host) + metadata.Start(context.Background()) + // Creating a relay instance for pushing messages to the store node b := relay.NewBroadcaster(10) require.NoError(t, b.Start(context.Background())) @@ -53,7 +72,7 @@ func TestStoreClient(t *testing.T) { wakuStore := NewWakuStore(pm, timesource.NewDefaultClock(), utils.Logger()) wakuStore.SetHost(host) - _, err = wakuRelay.Subscribe(context.Background(), protocol.NewContentFilter(protocol.DefaultPubsubTopic{}.String()), relay.WithoutConsumer()) + _, err = wakuRelay.Subscribe(context.Background(), protocol.NewContentFilter(pubsubTopic), relay.WithoutConsumer()) require.NoError(t, err) // Obtain multiaddr from env @@ -83,7 +102,7 @@ func TestStoreClient(t *testing.T) { Version: proto.Uint32(0), Timestamp: utils.GetUnixEpoch(timesource.NewDefaultClock()), } - _, err := wakuRelay.Publish(ctx, msg, relay.WithDefaultPubsubTopic()) + _, err := wakuRelay.Publish(ctx, msg, relay.WithPubSubTopic(pubsubTopic)) require.NoError(t, err) messages = append(messages, msg) @@ -94,7 +113,7 @@ func TestStoreClient(t *testing.T) { time.Sleep(1 * time.Second) // Check for message existence - exists, err := wakuStore.Exists(ctx, messages[0].Hash(relay.DefaultWakuTopic), WithPeer(storenode.ID)) + exists, err := wakuStore.Exists(ctx, messages[0].Hash(pubsubTopic), WithPeer(storenode.ID)) require.NoError(t, err) require.True(t, exists) @@ -104,7 +123,7 @@ func TestStoreClient(t *testing.T) { require.False(t, exists) // Query messages with forward pagination - response, err := wakuStore.Query(ctx, FilterCriteria{ContentFilter: protocol.NewContentFilter(relay.DefaultWakuTopic, "test"), TimeStart: startTime, TimeEnd: endTime}, WithPaging(true, 2)) + response, err := wakuStore.Query(ctx, FilterCriteria{ContentFilter: protocol.NewContentFilter(pubsubTopic, "test"), TimeStart: startTime, TimeEnd: endTime}, WithPaging(true, 2)) require.NoError(t, err) // -- First page: @@ -141,7 +160,7 @@ func TestStoreClient(t *testing.T) { require.NoError(t, err) // Query messages with backward pagination - response, err = wakuStore.Query(ctx, FilterCriteria{ContentFilter: protocol.NewContentFilter(relay.DefaultWakuTopic, "test"), TimeStart: startTime, TimeEnd: endTime}, WithPaging(false, 2)) + response, err = wakuStore.Query(ctx, FilterCriteria{ContentFilter: protocol.NewContentFilter(pubsubTopic, "test"), TimeStart: startTime, TimeEnd: endTime}, WithPaging(false, 2)) require.NoError(t, err) // -- First page: @@ -176,46 +195,46 @@ func TestStoreClient(t *testing.T) { require.True(t, response.IsComplete()) // No cursor should be returned if there are no messages that match the criteria - response, err = wakuStore.Query(ctx, FilterCriteria{ContentFilter: protocol.NewContentFilter(relay.DefaultWakuTopic, "no-messages"), TimeStart: startTime, TimeEnd: endTime}, WithPaging(true, 2)) + response, err = wakuStore.Query(ctx, FilterCriteria{ContentFilter: protocol.NewContentFilter(pubsubTopic, "no-messages"), TimeStart: startTime, TimeEnd: endTime}, WithPaging(true, 2)) require.NoError(t, err) require.Len(t, response.messages, 0) require.Empty(t, response.Cursor()) // If the page size is larger than the number of existing messages, it should not return a cursor - response, err = wakuStore.Query(ctx, FilterCriteria{ContentFilter: protocol.NewContentFilter(relay.DefaultWakuTopic, "test"), TimeStart: startTime, TimeEnd: endTime}, WithPaging(true, 100)) + response, err = wakuStore.Query(ctx, FilterCriteria{ContentFilter: protocol.NewContentFilter(pubsubTopic, "test"), TimeStart: startTime, TimeEnd: endTime}, WithPaging(true, 100)) require.NoError(t, err) require.Len(t, response.messages, 5) require.Empty(t, response.Cursor()) // Invalid cursors should fail // TODO: nwaku does not support this feature yet - //_, err = wakuStore.Query(ctx, FilterCriteria{ContentFilter: protocol.NewContentFilter(relay.DefaultWakuTopic, "test"), TimeStart: startTime, TimeEnd: endTime}, WithCursor([]byte{1, 2, 3, 4, 5, 6})) + //_, err = wakuStore.Query(ctx, FilterCriteria{ContentFilter: protocol.NewContentFilter(pubsubTopic, "test"), TimeStart: startTime, TimeEnd: endTime}, WithCursor([]byte{1, 2, 3, 4, 5, 6})) //require.Error(t, err) // Inexistent cursors should return an empty response // TODO: nwaku does not support this feature yet - //response, err = wakuStore.Query(ctx, FilterCriteria{ContentFilter: protocol.NewContentFilter(relay.DefaultWakuTopic, "test"), TimeStart: startTime, TimeEnd: endTime}, WithCursor(make([]byte, 32))) // Requesting cursor 0x00...00 + //response, err = wakuStore.Query(ctx, FilterCriteria{ContentFilter: protocol.NewContentFilter(pubsubTopic, "test"), TimeStart: startTime, TimeEnd: endTime}, WithCursor(make([]byte, 32))) // Requesting cursor 0x00...00 //require.NoError(t, err) //require.Len(t, response.messages, 0) //require.Empty(t, response.Cursor()) // Handle temporal history query with an invalid time window - _, err = wakuStore.Query(ctx, FilterCriteria{ContentFilter: protocol.NewContentFilter(relay.DefaultWakuTopic, "test"), TimeStart: endTime, TimeEnd: startTime}) + _, err = wakuStore.Query(ctx, FilterCriteria{ContentFilter: protocol.NewContentFilter(pubsubTopic, "test"), TimeStart: endTime, TimeEnd: startTime}) require.NotNil(t, err) // Handle temporal history query with a zero-size time window - response, err = wakuStore.Query(ctx, FilterCriteria{ContentFilter: protocol.NewContentFilter(relay.DefaultWakuTopic, "test"), TimeStart: startTime, TimeEnd: startTime}) + response, err = wakuStore.Query(ctx, FilterCriteria{ContentFilter: protocol.NewContentFilter(pubsubTopic, "test"), TimeStart: startTime, TimeEnd: startTime}) require.NoError(t, err) require.Len(t, response.messages, 0) require.Empty(t, response.Cursor()) // Should not include data - response, err = wakuStore.Request(ctx, MessageHashCriteria{MessageHashes: []pb.MessageHash{messages[0].Hash(relay.DefaultWakuTopic)}}, IncludeData(false), WithPeer(storenode.ID)) + response, err = wakuStore.Request(ctx, MessageHashCriteria{MessageHashes: []pb.MessageHash{messages[0].Hash(pubsubTopic)}}, IncludeData(false), WithPeer(storenode.ID)) require.NoError(t, err) require.Len(t, response.messages, 1) require.Nil(t, response.messages[0].Message) - response, err = wakuStore.Request(ctx, FilterCriteria{ContentFilter: protocol.NewContentFilter(relay.DefaultWakuTopic, "test")}, IncludeData(false)) + response, err = wakuStore.Request(ctx, FilterCriteria{ContentFilter: protocol.NewContentFilter(pubsubTopic, "test")}, IncludeData(false)) require.NoError(t, err) require.GreaterOrEqual(t, len(response.messages), 1) require.Nil(t, response.messages[0].Message) From 9412af28dd8102926b7e1fb56daba55ac5276f9a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?rich=CE=9Brd?= Date: Thu, 11 Jul 2024 12:02:52 -0400 Subject: [PATCH 03/27] refactor: ping a subset of connected peers (#1148) --- cmd/waku/node.go | 2 +- library/node.go | 2 +- waku/v2/node/keepalive.go | 156 ++++++++++++++++++++++--------- waku/v2/node/keepalive_test.go | 13 +-- waku/v2/node/wakunode2.go | 8 +- waku/v2/node/wakuoptions.go | 13 ++- waku/v2/node/wakuoptions_test.go | 6 +- 7 files changed, 135 insertions(+), 65 deletions(-) diff --git a/cmd/waku/node.go b/cmd/waku/node.go index 695f08b0..2327bbbe 100644 --- a/cmd/waku/node.go +++ b/cmd/waku/node.go @@ -134,7 +134,7 @@ func Execute(options NodeOptions) error { node.WithLogLevel(lvl), node.WithPrivateKey(prvKey), node.WithHostAddress(hostAddr), - node.WithKeepAlive(options.KeepAlive), + node.WithKeepAlive(10*time.Second, options.KeepAlive), node.WithMaxPeerConnections(options.MaxPeerConnections), node.WithPrometheusRegisterer(prometheus.DefaultRegisterer), node.WithPeerStoreCapacity(options.PeerStoreCapacity), diff --git a/library/node.go b/library/node.go index f1e49b2c..f084b85a 100644 --- a/library/node.go +++ b/library/node.go @@ -163,7 +163,7 @@ func NewNode(instance *WakuInstance, configJSON string) error { opts := []node.WakuNodeOption{ node.WithPrivateKey(prvKey), node.WithHostAddress(hostAddr), - node.WithKeepAlive(time.Duration(*config.KeepAliveInterval) * time.Second), + node.WithKeepAlive(10*time.Second, time.Duration(*config.KeepAliveInterval)*time.Second), } if *config.EnableRelay { diff --git a/waku/v2/node/keepalive.go b/waku/v2/node/keepalive.go index a2a8256e..2cb03317 100644 --- a/waku/v2/node/keepalive.go +++ b/waku/v2/node/keepalive.go @@ -2,6 +2,8 @@ package node import ( "context" + "errors" + "math/rand" "sync" "time" @@ -10,6 +12,7 @@ import ( "github.com/libp2p/go-libp2p/p2p/protocol/ping" "github.com/waku-org/go-waku/logging" "go.uber.org/zap" + "golang.org/x/exp/maps" ) const maxAllowedPingFailures = 2 @@ -19,86 +22,155 @@ const maxAllowedPingFailures = 2 // the peers if they don't reply back const sleepDetectionIntervalFactor = 3 +const maxPeersToPing = 10 + // startKeepAlive creates a go routine that periodically pings connected peers. // This is necessary because TCP connections are automatically closed due to inactivity, // and doing a ping will avoid this (with a small bandwidth cost) -func (w *WakuNode) startKeepAlive(ctx context.Context, t time.Duration) { +func (w *WakuNode) startKeepAlive(ctx context.Context, randomPeersPingDuration time.Duration, allPeersPingDuration time.Duration) { defer w.wg.Done() - w.log.Info("setting up ping protocol", zap.Duration("duration", t)) - ticker := time.NewTicker(t) - defer ticker.Stop() + + if !w.opts.enableRelay { + return + } + + w.log.Info("setting up ping protocol", zap.Duration("randomPeersPingDuration", randomPeersPingDuration), zap.Duration("allPeersPingDuration", allPeersPingDuration)) + + randomPeersTickerC := make(<-chan time.Time) + if randomPeersPingDuration != 0 { + randomPeersTicker := time.NewTicker(randomPeersPingDuration) + defer randomPeersTicker.Stop() + randomPeersTickerC = randomPeersTicker.C + } + + allPeersTickerC := make(<-chan time.Time) + if randomPeersPingDuration != 0 { + allPeersTicker := time.NewTicker(randomPeersPingDuration) + defer allPeersTicker.Stop() + randomPeersTickerC = allPeersTicker.C + } lastTimeExecuted := w.timesource.Now() - sleepDetectionInterval := int64(t) * sleepDetectionIntervalFactor + sleepDetectionInterval := int64(randomPeersPingDuration) * sleepDetectionIntervalFactor for { + peersToPing := []peer.ID{} + select { - case <-ticker.C: + case <-allPeersTickerC: + relayPeersSet := make(map[peer.ID]struct{}) + for _, t := range w.Relay().Topics() { + for _, p := range w.Relay().PubSub().ListPeers(t) { + relayPeersSet[p] = struct{}{} + } + } + peersToPing = maps.Keys(relayPeersSet) + + case <-randomPeersTickerC: difference := w.timesource.Now().UnixNano() - lastTimeExecuted.UnixNano() - forceDisconnectOnPingFailure := false if difference > sleepDetectionInterval { - forceDisconnectOnPingFailure = true lastTimeExecuted = w.timesource.Now() - w.log.Warn("keep alive hasnt been executed recently. Killing connections to peers if ping fails") + w.log.Warn("keep alive hasnt been executed recently. Killing all connections") + for _, p := range w.host.Network().Peers() { + err := w.host.Network().ClosePeer(p) + if err != nil { + w.log.Debug("closing conn to peer", zap.Error(err)) + } + } continue } - // Network's peers collection, - // contains only currently active peers - pingWg := sync.WaitGroup{} - peersToPing := w.host.Network().Peers() - pingWg.Add(len(peersToPing)) - for _, p := range peersToPing { - if p != w.host.ID() { - go w.pingPeer(ctx, &pingWg, p, forceDisconnectOnPingFailure) + // Priorize mesh peers + meshPeersSet := make(map[peer.ID]struct{}) + for _, t := range w.Relay().Topics() { + for _, p := range w.Relay().PubSub().MeshPeers(t) { + meshPeersSet[p] = struct{}{} } } - pingWg.Wait() + peersToPing = append(peersToPing, maps.Keys(meshPeersSet)...) + + // Ping also some random relay peers + if maxPeersToPing-len(peersToPing) > 0 { + relayPeersSet := make(map[peer.ID]struct{}) + for _, t := range w.Relay().Topics() { + for _, p := range w.Relay().PubSub().ListPeers(t) { + if _, ok := meshPeersSet[p]; !ok { + relayPeersSet[p] = struct{}{} + } + } + } + + relayPeers := maps.Keys(relayPeersSet) + rand.Shuffle(len(relayPeers), func(i, j int) { relayPeers[i], relayPeers[j] = relayPeers[j], relayPeers[i] }) + + peerLen := maxPeersToPing - len(peersToPing) + if peerLen > len(relayPeers) { + peerLen = len(relayPeers) + } + peersToPing = append(peersToPing, relayPeers[0:peerLen]...) + } - lastTimeExecuted = w.timesource.Now() case <-ctx.Done(): w.log.Info("stopping ping protocol") return } + + pingWg := sync.WaitGroup{} + pingWg.Add(len(peersToPing)) + for _, p := range peersToPing { + go w.pingPeer(ctx, &pingWg, p) + } + pingWg.Wait() + + lastTimeExecuted = w.timesource.Now() } } -func (w *WakuNode) pingPeer(ctx context.Context, wg *sync.WaitGroup, peerID peer.ID, forceDisconnectOnFail bool) { +func (w *WakuNode) pingPeer(ctx context.Context, wg *sync.WaitGroup, peerID peer.ID) { defer wg.Done() + logger := w.log.With(logging.HostID("peer", peerID)) + + for i := 0; i < maxAllowedPingFailures; i++ { + if w.host.Network().Connectedness(peerID) != network.Connected { + // Peer is no longer connected. No need to ping + return + } + + logger.Debug("pinging") + + if w.tryPing(ctx, peerID, logger) { + return + } + } + + if w.host.Network().Connectedness(peerID) != network.Connected { + return + } + + logger.Info("disconnecting dead peer") + if err := w.host.Network().ClosePeer(peerID); err != nil { + logger.Debug("closing conn to peer", zap.Error(err)) + } +} + +func (w *WakuNode) tryPing(ctx context.Context, peerID peer.ID, logger *zap.Logger) bool { ctx, cancel := context.WithTimeout(ctx, 7*time.Second) defer cancel() - logger := w.log.With(logging.HostID("peer", peerID)) - logger.Debug("pinging") pr := ping.Ping(ctx, w.host, peerID) select { case res := <-pr: if res.Error != nil { - w.keepAliveMutex.Lock() - w.keepAliveFails[peerID]++ - w.keepAliveMutex.Unlock() logger.Debug("could not ping", zap.Error(res.Error)) - } else { - w.keepAliveMutex.Lock() - delete(w.keepAliveFails, peerID) - w.keepAliveMutex.Unlock() + return false } case <-ctx.Done(): - w.keepAliveMutex.Lock() - w.keepAliveFails[peerID]++ - w.keepAliveMutex.Unlock() - logger.Debug("could not ping (context done)", zap.Error(ctx.Err())) - } - - w.keepAliveMutex.Lock() - if (forceDisconnectOnFail || w.keepAliveFails[peerID] > maxAllowedPingFailures) && w.host.Network().Connectedness(peerID) == network.Connected { - logger.Info("disconnecting peer") - if err := w.host.Network().ClosePeer(peerID); err != nil { - logger.Debug("closing conn to peer", zap.Error(err)) + if !errors.Is(ctx.Err(), context.Canceled) { + logger.Debug("could not ping (context)", zap.Error(ctx.Err())) } - w.keepAliveFails[peerID] = 0 + return false } - w.keepAliveMutex.Unlock() + return true } diff --git a/waku/v2/node/keepalive_test.go b/waku/v2/node/keepalive_test.go index a778f5c9..0508fd79 100644 --- a/waku/v2/node/keepalive_test.go +++ b/waku/v2/node/keepalive_test.go @@ -9,7 +9,6 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/libp2p/go-libp2p" - "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/peerstore" "github.com/multiformats/go-multiaddr" "github.com/stretchr/testify/require" @@ -40,15 +39,13 @@ func TestKeepAlive(t *testing.T) { wg := &sync.WaitGroup{} w := &WakuNode{ - host: host1, - wg: wg, - log: utils.Logger(), - keepAliveMutex: sync.Mutex{}, - keepAliveFails: make(map[peer.ID]int), + host: host1, + wg: wg, + log: utils.Logger(), } w.wg.Add(1) - w.pingPeer(ctx2, w.wg, peerID2, false) + w.pingPeer(ctx2, w.wg, peerID2) require.NoError(t, ctx.Err()) } @@ -70,7 +67,7 @@ func TestPeriodicKeepAlive(t *testing.T) { WithPrivateKey(prvKey), WithHostAddress(hostAddr), WithWakuRelay(), - WithKeepAlive(time.Second), + WithKeepAlive(time.Minute, time.Second), ) require.NoError(t, err) diff --git a/waku/v2/node/wakunode2.go b/waku/v2/node/wakunode2.go index cf7a51ad..5032c823 100644 --- a/waku/v2/node/wakunode2.go +++ b/waku/v2/node/wakunode2.go @@ -116,9 +116,6 @@ type WakuNode struct { addressChangesSub event.Subscription enrChangeCh chan struct{} - keepAliveMutex sync.Mutex - keepAliveFails map[peer.ID]int - cancel context.CancelFunc wg *sync.WaitGroup @@ -193,7 +190,6 @@ func New(opts ...WakuNodeOption) (*WakuNode, error) { w.opts = params w.log = params.logger.Named("node2") w.wg = &sync.WaitGroup{} - w.keepAliveFails = make(map[peer.ID]int) w.wakuFlag = enr.NewWakuEnrBitfield(w.opts.enableLightPush, w.opts.enableFilterFullNode, w.opts.enableStore, w.opts.enableRelay) w.circuitRelayNodes = make(chan peer.AddrInfo) w.metrics = newMetrics(params.prometheusReg) @@ -382,9 +378,9 @@ func (w *WakuNode) Start(ctx context.Context) error { return err } - if w.opts.keepAliveInterval > time.Duration(0) { + if w.opts.keepAliveRandomPeersInterval > time.Duration(0) || w.opts.keepAliveAllPeersInterval > time.Duration(0) { w.wg.Add(1) - go w.startKeepAlive(ctx, w.opts.keepAliveInterval) + go w.startKeepAlive(ctx, w.opts.keepAliveRandomPeersInterval, w.opts.keepAliveAllPeersInterval) } w.metadata.SetHost(host) diff --git a/waku/v2/node/wakuoptions.go b/waku/v2/node/wakuoptions.go index 26a82d0d..82d96461 100644 --- a/waku/v2/node/wakuoptions.go +++ b/waku/v2/node/wakuoptions.go @@ -114,7 +114,8 @@ type WakuNodeParameters struct { rlnTreePath string rlnMembershipContractAddress common.Address - keepAliveInterval time.Duration + keepAliveRandomPeersInterval time.Duration + keepAliveAllPeersInterval time.Duration enableLightPush bool @@ -476,10 +477,14 @@ func WithLightPush(lightpushOpts ...lightpush.Option) WakuNodeOption { } // WithKeepAlive is a WakuNodeOption used to set the interval of time when -// each peer will be ping to keep the TCP connection alive -func WithKeepAlive(t time.Duration) WakuNodeOption { +// each peer will be ping to keep the TCP connection alive. Option accepts two +// intervals, the `randomPeersInterval`, which will be used to ping full mesh +// peers (if using relay) and random connected peers, and `allPeersInterval` +// which is used to ping all connected peers +func WithKeepAlive(randomPeersInterval time.Duration, allPeersInterval time.Duration) WakuNodeOption { return func(params *WakuNodeParameters) error { - params.keepAliveInterval = t + params.keepAliveRandomPeersInterval = randomPeersInterval + params.keepAliveAllPeersInterval = allPeersInterval return nil } } diff --git a/waku/v2/node/wakuoptions_test.go b/waku/v2/node/wakuoptions_test.go index 751c7158..9d4ed4f9 100644 --- a/waku/v2/node/wakuoptions_test.go +++ b/waku/v2/node/wakuoptions_test.go @@ -58,7 +58,7 @@ func TestWakuOptions(t *testing.T) { WithWakuStore(), WithMessageProvider(&persistence.DBStore{}), WithLightPush(), - WithKeepAlive(time.Hour), + WithKeepAlive(time.Minute, time.Hour), WithTopicHealthStatusChannel(topicHealthStatusChan), WithWakuStoreFactory(storeFactory), } @@ -107,7 +107,7 @@ func TestWakuRLNOptions(t *testing.T) { WithWakuStore(), WithMessageProvider(&persistence.DBStore{}), WithLightPush(), - WithKeepAlive(time.Hour), + WithKeepAlive(time.Minute, time.Hour), WithTopicHealthStatusChannel(topicHealthStatusChan), WithWakuStoreFactory(storeFactory), WithStaticRLNRelay(&index, handleSpam), @@ -147,7 +147,7 @@ func TestWakuRLNOptions(t *testing.T) { WithWakuStore(), WithMessageProvider(&persistence.DBStore{}), WithLightPush(), - WithKeepAlive(time.Hour), + WithKeepAlive(time.Minute, time.Hour), WithTopicHealthStatusChannel(topicHealthStatusChan), WithWakuStoreFactory(storeFactory), WithDynamicRLNRelay(keystorePath, keystorePassword, rlnTreePath, common.HexToAddress(contractAddress), &index, handleSpam, ethClientAddress), From bb74e39ed9ec0443a103776d9808408a16d77d8b Mon Sep 17 00:00:00 2001 From: Prem Chaitanya Prathi Date: Fri, 12 Jul 2024 09:28:23 +0530 Subject: [PATCH 04/27] feat: support for lightpush to use more than 1 peer (#1158) --- waku/v2/protocol/lightpush/waku_lightpush.go | 67 ++++++++++++------- .../lightpush/waku_lightpush_option.go | 15 ++++- .../lightpush/waku_lightpush_option_test.go | 2 +- .../protocol/lightpush/waku_lightpush_test.go | 32 ++++++++- 4 files changed, 84 insertions(+), 32 deletions(-) diff --git a/waku/v2/protocol/lightpush/waku_lightpush.go b/waku/v2/protocol/lightpush/waku_lightpush.go index 19708d11..b8f3c0f3 100644 --- a/waku/v2/protocol/lightpush/waku_lightpush.go +++ b/waku/v2/protocol/lightpush/waku_lightpush.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "math" + "sync" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" @@ -187,7 +188,7 @@ func (wakuLP *WakuLightPush) reply(stream network.Stream, responsePushRPC *pb.Pu } // request sends a message via lightPush protocol to either a specified peer or peer that is selected. -func (wakuLP *WakuLightPush) request(ctx context.Context, req *pb.PushRequest, params *lightPushRequestParameters) (*pb.PushResponse, error) { +func (wakuLP *WakuLightPush) request(ctx context.Context, req *pb.PushRequest, params *lightPushRequestParameters, peer peer.ID) (*pb.PushResponse, error) { if params == nil { return nil, errors.New("lightpush params are mandatory") } @@ -196,9 +197,9 @@ func (wakuLP *WakuLightPush) request(ctx context.Context, req *pb.PushRequest, p return nil, ErrInvalidID } - logger := wakuLP.log.With(logging.HostID("peer", params.selectedPeer)) + logger := wakuLP.log.With(logging.HostID("peer", peer)) - stream, err := wakuLP.h.NewStream(ctx, params.selectedPeer, LightPushID_v20beta1) + stream, err := wakuLP.h.NewStream(ctx, peer, LightPushID_v20beta1) if err != nil { logger.Error("creating stream to peer", zap.Error(err)) wakuLP.metrics.RecordError(dialFailure) @@ -281,10 +282,10 @@ func (wakuLP *WakuLightPush) handleOpts(ctx context.Context, message *wpb.WakuMe return nil, err } wakuLP.pm.Connect(pData) - params.selectedPeer = pData.AddrInfo.ID + params.selectedPeers = append(params.selectedPeers, pData.AddrInfo.ID) } - - if params.pm != nil && params.selectedPeer == "" { + reqPeerCount := params.maxPeers - len(params.selectedPeers) + if params.pm != nil && reqPeerCount > 0 { var selectedPeers peer.IDSlice //TODO: update this to work with multiple peer selection selectedPeers, err = wakuLP.pm.SelectPeers( @@ -293,17 +294,17 @@ func (wakuLP *WakuLightPush) handleOpts(ctx context.Context, message *wpb.WakuMe Proto: LightPushID_v20beta1, PubsubTopics: []string{params.pubsubTopic}, SpecificPeers: params.preferredPeers, + MaxPeers: reqPeerCount, Ctx: ctx, }, ) if err == nil { - params.selectedPeer = selectedPeers[0] + params.selectedPeers = append(params.selectedPeers, selectedPeers...) } - } - if params.selectedPeer == "" { + if len(params.selectedPeers) == 0 { if err != nil { - params.log.Error("selecting peer", zap.Error(err)) + params.log.Error("selecting peers", zap.Error(err)) wakuLP.metrics.RecordError(peerNotFoundFailure) return nil, ErrNoPeersAvailable } @@ -327,25 +328,41 @@ func (wakuLP *WakuLightPush) Publish(ctx context.Context, message *wpb.WakuMessa req.Message = message req.PubsubTopic = params.pubsubTopic - logger := message.Logger(wakuLP.log, params.pubsubTopic).With(logging.HostID("peerID", params.selectedPeer)) + logger := message.Logger(wakuLP.log, params.pubsubTopic).With(zap.Stringers("peerIDs", params.selectedPeers)) logger.Debug("publishing message") - - response, err := wakuLP.request(ctx, req, params) - if err != nil { - logger.Error("could not publish message", zap.Error(err)) - return wpb.MessageHash{}, err + var wg sync.WaitGroup + var responses []*pb.PushResponse + for _, peerID := range params.selectedPeers { + wg.Add(1) + go func(id peer.ID) { + defer wg.Done() + response, err := wakuLP.request(ctx, req, params, id) + if err != nil { + logger.Error("could not publish message", zap.Error(err), zap.Stringer("peer", id)) + } + responses = append(responses, response) + }(peerID) } - - if response.IsSuccess { - hash := message.Hash(params.pubsubTopic) - utils.MessagesLogger("lightpush").Debug("waku.lightpush published", logging.HexBytes("hash", hash[:])) - return hash, nil - } - + wg.Wait() + var successCount int errMsg := "lightpush error" - if response.Info != nil { - errMsg = *response.Info + + for _, response := range responses { + if response.GetIsSuccess() { + successCount++ + } else { + if response.GetInfo() != "" { + errMsg += *response.Info + } + } + } + + //in case of partial failure, should we retry here or build a layer above that takes care of these things? + if successCount > 0 { + hash := message.Hash(params.pubsubTopic) + utils.MessagesLogger("lightpush").Debug("waku.lightpush published", logging.HexBytes("hash", hash[:]), zap.Int("num-peers", len(responses))) + return hash, nil } return wpb.MessageHash{}, errors.New(errMsg) diff --git a/waku/v2/protocol/lightpush/waku_lightpush_option.go b/waku/v2/protocol/lightpush/waku_lightpush_option.go index 6ec25899..b192fd17 100644 --- a/waku/v2/protocol/lightpush/waku_lightpush_option.go +++ b/waku/v2/protocol/lightpush/waku_lightpush_option.go @@ -29,7 +29,8 @@ func WithRateLimiter(r rate.Limit, b int) Option { type lightPushRequestParameters struct { host host.Host peerAddr multiaddr.Multiaddr - selectedPeer peer.ID + selectedPeers peer.IDSlice + maxPeers int peerSelectionType peermanager.PeerSelection preferredPeers peer.IDSlice requestID []byte @@ -41,10 +42,17 @@ type lightPushRequestParameters struct { // RequestOption is the type of options accepted when performing LightPush protocol requests type RequestOption func(*lightPushRequestParameters) error +func WithMaxPeers(num int) RequestOption { + return func(params *lightPushRequestParameters) error { + params.maxPeers = num + return nil + } +} + // WithPeer is an option used to specify the peerID to push a waku message to func WithPeer(p peer.ID) RequestOption { return func(params *lightPushRequestParameters) error { - params.selectedPeer = p + params.selectedPeers = append(params.selectedPeers, p) if params.peerAddr != nil { return errors.New("peerAddr and peerId options are mutually exclusive") } @@ -58,7 +66,7 @@ func WithPeer(p peer.ID) RequestOption { func WithPeerAddr(pAddr multiaddr.Multiaddr) RequestOption { return func(params *lightPushRequestParameters) error { params.peerAddr = pAddr - if params.selectedPeer != "" { + if len(params.selectedPeers) != 0 { return errors.New("peerAddr and peerId options are mutually exclusive") } return nil @@ -127,5 +135,6 @@ func DefaultOptions(host host.Host) []RequestOption { return []RequestOption{ WithAutomaticRequestID(), WithAutomaticPeerSelection(), + WithMaxPeers(1), //keeping default as 2 for status use-case } } diff --git a/waku/v2/protocol/lightpush/waku_lightpush_option_test.go b/waku/v2/protocol/lightpush/waku_lightpush_option_test.go index 6998b1fa..94d2bea9 100644 --- a/waku/v2/protocol/lightpush/waku_lightpush_option_test.go +++ b/waku/v2/protocol/lightpush/waku_lightpush_option_test.go @@ -36,7 +36,7 @@ func TestLightPushOption(t *testing.T) { } require.Equal(t, host, params.host) - require.NotNil(t, params.selectedPeer) + require.NotEqual(t, 0, len(params.selectedPeers)) require.NotNil(t, params.requestID) maddr, err := multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/12345/p2p/16Uiu2HAm8KUwGRruseAaEGD6xGg6XKrDo8Py5dwDoL9wUpCxawGy") diff --git a/waku/v2/protocol/lightpush/waku_lightpush_test.go b/waku/v2/protocol/lightpush/waku_lightpush_test.go index 004afe9b..0dc1ea9e 100644 --- a/waku/v2/protocol/lightpush/waku_lightpush_test.go +++ b/waku/v2/protocol/lightpush/waku_lightpush_test.go @@ -43,15 +43,19 @@ func makeWakuRelay(t *testing.T, pusubTopic string) (*relay.WakuRelay, *relay.Su // Node1: Relay // Node2: Relay+Lightpush +// Node3: Relay+Lightpush + // Client that will lightpush a message // // Node1 and Node 2 are peers +// Node1 and Node 3 are peers // Client and Node 2 are peers -// Client will use lightpush request, sending the message to Node2 +// Client and Node 3 are peers +// Client will use lightpush request, sending the message to Node2 and Node3 // // Client send a successful message using lightpush -// Node2 receive the message and broadcast it -// Node1 receive the message +// Node2, Node3 receive the message and broadcast it +// Node1 receive the messages func TestWakuLightPush(t *testing.T) { testTopic := "/waku/2/go/lightpush/test" node1, sub1, host1 := makeWakuRelay(t, testTopic) @@ -69,6 +73,16 @@ func TestWakuLightPush(t *testing.T) { require.NoError(t, err) defer lightPushNode2.Stop() + node3, sub3, host3 := makeWakuRelay(t, testTopic) + defer node3.Stop() + defer sub3.Unsubscribe() + + lightPushNode3 := NewWakuLightPush(node3, nil, prometheus.DefaultRegisterer, utils.Logger()) + lightPushNode3.SetHost(host3) + err = lightPushNode3.Start(ctx) + require.NoError(t, err) + defer lightPushNode3.Stop() + port, err := tests.FindFreePort(t, "", 5) require.NoError(t, err) @@ -84,10 +98,21 @@ func TestWakuLightPush(t *testing.T) { err = host2.Connect(ctx, host2.Peerstore().PeerInfo(host1.ID())) require.NoError(t, err) + host3.Peerstore().AddAddr(host1.ID(), tests.GetHostAddress(host1), peerstore.PermanentAddrTTL) + err = host3.Peerstore().AddProtocols(host1.ID(), relay.WakuRelayID_v200) + require.NoError(t, err) + + err = host3.Connect(ctx, host3.Peerstore().PeerInfo(host1.ID())) + require.NoError(t, err) + clientHost.Peerstore().AddAddr(host2.ID(), tests.GetHostAddress(host2), peerstore.PermanentAddrTTL) err = clientHost.Peerstore().AddProtocols(host2.ID(), LightPushID_v20beta1) require.NoError(t, err) + clientHost.Peerstore().AddAddr(host3.ID(), tests.GetHostAddress(host3), peerstore.PermanentAddrTTL) + err = clientHost.Peerstore().AddProtocols(host3.ID(), LightPushID_v20beta1) + require.NoError(t, err) + msg2 := tests.CreateWakuMessage("test2", utils.GetUnixEpoch()) // Wait for the mesh connection to happen between node1 and node2 @@ -109,6 +134,7 @@ func TestWakuLightPush(t *testing.T) { var lpOptions []RequestOption lpOptions = append(lpOptions, WithPubSubTopic(testTopic)) lpOptions = append(lpOptions, WithPeer(host2.ID())) + lpOptions = append(lpOptions, WithMaxPeers(2)) // Checking that msg hash is correct hash, err := client.Publish(ctx, msg2, lpOptions...) From 2f333c1e1c1382490e8544801bf44fb756ef22ff Mon Sep 17 00:00:00 2001 From: Vaclav Pavlin Date: Fri, 12 Jul 2024 06:39:04 +0200 Subject: [PATCH 05/27] chore(wakunode2): add ability to specify PX options in wakunode2 (#1157) Co-authored-by: Prem Chaitanya Prathi --- waku/v2/node/wakunode2.go | 2 +- waku/v2/node/wakunode2_test.go | 60 ++++++++++++++++++++++ waku/v2/node/wakuoptions.go | 7 ++- waku/v2/protocol/peer_exchange/protocol.go | 3 ++ 4 files changed, 69 insertions(+), 3 deletions(-) diff --git a/waku/v2/node/wakunode2.go b/waku/v2/node/wakunode2.go index 5032c823..4eefa7e2 100644 --- a/waku/v2/node/wakunode2.go +++ b/waku/v2/node/wakunode2.go @@ -272,7 +272,7 @@ func New(opts ...WakuNodeOption) (*WakuNode, error) { } } - w.peerExchange, err = peer_exchange.NewWakuPeerExchange(w.DiscV5(), w.opts.clusterID, w.peerConnector, w.peermanager, w.opts.prometheusReg, w.log) + w.peerExchange, err = peer_exchange.NewWakuPeerExchange(w.DiscV5(), w.opts.clusterID, w.peerConnector, w.peermanager, w.opts.prometheusReg, w.log, w.opts.peerExchangeOptions...) if err != nil { return nil, err } diff --git a/waku/v2/node/wakunode2_test.go b/waku/v2/node/wakunode2_test.go index d4ca453c..144ce681 100644 --- a/waku/v2/node/wakunode2_test.go +++ b/waku/v2/node/wakunode2_test.go @@ -13,6 +13,7 @@ import ( "time" wenr "github.com/waku-org/go-waku/waku/v2/protocol/enr" + "github.com/waku-org/go-waku/waku/v2/protocol/peer_exchange" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/p2p/enode" @@ -540,3 +541,62 @@ func TestStaticShardingLimits(t *testing.T) { tests.WaitForMsg(t, 2*time.Second, &wg, s2.Ch) } + +func TestPeerExchangeRatelimit(t *testing.T) { + log := utils.Logger() + + if os.Getenv("RUN_FLAKY_TESTS") != "true" { + + log.Info("Skipping", zap.String("test", t.Name()), + zap.String("reason", "RUN_FLAKY_TESTS environment variable is not set to true")) + t.SkipNow() + } + + ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second) + defer cancel() + + testClusterID := uint16(21) + + // Node1 with Relay + hostAddr1, err := net.ResolveTCPAddr("tcp", "0.0.0.0:0") + require.NoError(t, err) + wakuNode1, err := New( + WithHostAddress(hostAddr1), + WithWakuRelay(), + WithClusterID(testClusterID), + WithPeerExchange(peer_exchange.WithRateLimiter(1, 1)), + ) + require.NoError(t, err) + err = wakuNode1.Start(ctx) + require.NoError(t, err) + defer wakuNode1.Stop() + + // Node2 with Relay + hostAddr2, err := net.ResolveTCPAddr("tcp", "0.0.0.0:0") + require.NoError(t, err) + wakuNode2, err := New( + WithHostAddress(hostAddr2), + WithWakuRelay(), + WithClusterID(testClusterID), + WithPeerExchange(peer_exchange.WithRateLimiter(1, 1)), + ) + require.NoError(t, err) + err = wakuNode2.Start(ctx) + require.NoError(t, err) + defer wakuNode2.Stop() + + err = wakuNode2.DialPeer(ctx, wakuNode1.ListenAddresses()[0].String()) + require.NoError(t, err) + + //time.Sleep(1 * time.Second) + + err = wakuNode1.PeerExchange().Request(ctx, 1) + require.NoError(t, err) + + err = wakuNode1.PeerExchange().Request(ctx, 1) + require.Error(t, err) + + time.Sleep(1 * time.Second) + err = wakuNode1.PeerExchange().Request(ctx, 1) + require.NoError(t, err) +} diff --git a/waku/v2/node/wakuoptions.go b/waku/v2/node/wakuoptions.go index 82d96461..2e34ace7 100644 --- a/waku/v2/node/wakuoptions.go +++ b/waku/v2/node/wakuoptions.go @@ -31,6 +31,7 @@ import ( "github.com/waku-org/go-waku/waku/v2/protocol/legacy_store" "github.com/waku-org/go-waku/waku/v2/protocol/lightpush" "github.com/waku-org/go-waku/waku/v2/protocol/pb" + "github.com/waku-org/go-waku/waku/v2/protocol/peer_exchange" "github.com/waku-org/go-waku/waku/v2/rendezvous" "github.com/waku-org/go-waku/waku/v2/timesource" "github.com/waku-org/go-waku/waku/v2/utils" @@ -102,7 +103,8 @@ type WakuNodeParameters struct { discV5bootnodes []*enode.Node discV5autoUpdate bool - enablePeerExchange bool + enablePeerExchange bool + peerExchangeOptions []peer_exchange.Option enableRLN bool rlnRelayMemIndex *uint @@ -411,9 +413,10 @@ func WithDiscoveryV5(udpPort uint, bootnodes []*enode.Node, autoUpdate bool) Wak } // WithPeerExchange is a WakuOption used to enable Peer Exchange -func WithPeerExchange() WakuNodeOption { +func WithPeerExchange(options ...peer_exchange.Option) WakuNodeOption { return func(params *WakuNodeParameters) error { params.enablePeerExchange = true + params.peerExchangeOptions = options return nil } } diff --git a/waku/v2/protocol/peer_exchange/protocol.go b/waku/v2/protocol/peer_exchange/protocol.go index c02cdca6..5f103e12 100644 --- a/waku/v2/protocol/peer_exchange/protocol.go +++ b/waku/v2/protocol/peer_exchange/protocol.go @@ -100,6 +100,9 @@ func (wakuPX *WakuPeerExchange) onRequest() func(network.Stream) { wakuPX.metrics.RecordError(rateLimitFailure) wakuPX.log.Error("exceeds the rate limit") // TODO: peer exchange protocol should contain an err field + if err := stream.Reset(); err != nil { + wakuPX.log.Error("resetting connection", zap.Error(err)) + } return } From 9fbb955b16f612d8ec1b8a1f19ad5a76cc503099 Mon Sep 17 00:00:00 2001 From: Prem Chaitanya Prathi Date: Mon, 15 Jul 2024 19:29:31 +0530 Subject: [PATCH 06/27] chore: allow setting enr shards for lightclient (#1159) --- waku/v2/node/localnode.go | 8 ++++++++ waku/v2/node/wakunode2.go | 19 +++++++++++++----- waku/v2/node/wakuoptions.go | 19 ++++++++++++++++++ waku/v2/protocol/lightpush/waku_lightpush.go | 4 ++-- waku/v2/protocol/metadata/waku_metadata.go | 21 ++++++-------------- 5 files changed, 49 insertions(+), 22 deletions(-) diff --git a/waku/v2/node/localnode.go b/waku/v2/node/localnode.go index 5f0f47d7..74ba35ce 100644 --- a/waku/v2/node/localnode.go +++ b/waku/v2/node/localnode.go @@ -338,6 +338,14 @@ func (w *WakuNode) setupENR(ctx context.Context, addrs []ma.Multiaddr) error { } +func (w *WakuNode) SetRelayShards(rs protocol.RelayShards) error { + err := wenr.Update(w.log, w.localNode, wenr.WithWakuRelaySharding(rs)) + if err != nil { + return err + } + return nil +} + func (w *WakuNode) watchTopicShards(ctx context.Context) error { evtRelaySubscribed, err := w.Relay().Events().Subscribe(new(relay.EvtRelaySubscribed)) if err != nil { diff --git a/waku/v2/node/wakunode2.go b/waku/v2/node/wakunode2.go index 4eefa7e2..c29a2b93 100644 --- a/waku/v2/node/wakunode2.go +++ b/waku/v2/node/wakunode2.go @@ -457,16 +457,25 @@ func (w *WakuNode) Start(ctx context.Context) error { } w.filterLightNode.SetHost(host) + + err = w.setupENR(ctx, w.ListenAddresses()) + if err != nil { + return err + } + if w.opts.enableFilterLightNode { err := w.filterLightNode.Start(ctx) if err != nil { return err } - } - - err = w.setupENR(ctx, w.ListenAddresses()) - if err != nil { - return err + //TODO: setting this up temporarily to improve connectivity success for lightNode in status. + //This will have to be removed or changed with community sharding will be implemented. + if w.opts.shards != nil { + err = w.SetRelayShards(*w.opts.shards) + if err != nil { + return err + } + } } w.peerExchange.SetHost(host) diff --git a/waku/v2/node/wakuoptions.go b/waku/v2/node/wakuoptions.go index 2e34ace7..445065de 100644 --- a/waku/v2/node/wakuoptions.go +++ b/waku/v2/node/wakuoptions.go @@ -27,6 +27,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/waku-org/go-waku/waku/v2/onlinechecker" "github.com/waku-org/go-waku/waku/v2/peermanager" + "github.com/waku-org/go-waku/waku/v2/protocol" "github.com/waku-org/go-waku/waku/v2/protocol/filter" "github.com/waku-org/go-waku/waku/v2/protocol/legacy_store" "github.com/waku-org/go-waku/waku/v2/protocol/lightpush" @@ -53,6 +54,7 @@ type WakuNodeParameters struct { hostAddr *net.TCPAddr maxConnectionsPerIP int clusterID uint16 + shards *protocol.RelayShards dns4Domain string advertiseAddrs []multiaddr.Multiaddr multiAddr []multiaddr.Multiaddr @@ -317,6 +319,23 @@ func WithClusterID(clusterID uint16) WakuNodeOption { } } +func WithPubSubTopics(topics []string) WakuNodeOption { + return func(params *WakuNodeParameters) error { + rs, err := protocol.TopicsToRelayShards(topics...) + if err != nil { + return err + } + if len(rs) == 0 { + return nil + } + if rs[0].ClusterID != params.clusterID { + return errors.New("pubsubtopics have different clusterID than configured clusterID") + } + params.shards = &rs[0] //Only consider 0 as a node can only support 1 cluster as of now + return nil + } +} + // WithMaxConnectionsPerIP sets the max number of allowed peers from the same IP func WithMaxConnectionsPerIP(limit int) WakuNodeOption { return func(params *WakuNodeParameters) error { diff --git a/waku/v2/protocol/lightpush/waku_lightpush.go b/waku/v2/protocol/lightpush/waku_lightpush.go index b8f3c0f3..7775b557 100644 --- a/waku/v2/protocol/lightpush/waku_lightpush.go +++ b/waku/v2/protocol/lightpush/waku_lightpush.go @@ -328,9 +328,9 @@ func (wakuLP *WakuLightPush) Publish(ctx context.Context, message *wpb.WakuMessa req.Message = message req.PubsubTopic = params.pubsubTopic - logger := message.Logger(wakuLP.log, params.pubsubTopic).With(zap.Stringers("peerIDs", params.selectedPeers)) + logger := message.Logger(wakuLP.log, params.pubsubTopic) - logger.Debug("publishing message") + logger.Debug("publishing message", zap.Stringers("peers", params.selectedPeers)) var wg sync.WaitGroup var responses []*pb.PushResponse for _, peerID := range params.selectedPeers { diff --git a/waku/v2/protocol/metadata/waku_metadata.go b/waku/v2/protocol/metadata/waku_metadata.go index 6129d6c0..23a7e455 100644 --- a/waku/v2/protocol/metadata/waku_metadata.go +++ b/waku/v2/protocol/metadata/waku_metadata.go @@ -117,8 +117,6 @@ func (wakuM *WakuMetadata) Request(ctx context.Context, peerID peer.ID) (*pb.Wak request := &pb.WakuMetadataRequest{} request.ClusterId = clusterID request.Shards = shards - // TODO: remove with nwaku 0.28 deployment - request.ShardsDeprecated = shards // nolint: staticcheck writer := pbio.NewDelimitedWriter(stream) reader := pbio.NewDelimitedReader(stream, math.MaxInt32) @@ -173,8 +171,6 @@ func (wakuM *WakuMetadata) onRequest(ctx context.Context) func(network.Stream) { } else { response.ClusterId = clusterID response.Shards = shards - // TODO: remove with nwaku 0.28 deployment - response.ShardsDeprecated = shards // nolint: staticcheck } err = writer.WriteMsg(response) @@ -245,14 +241,6 @@ func (wakuM *WakuMetadata) Connected(n network.Network, cc network.Conn) { rClusterID := uint16(*response.ClusterId) var rs protocol.RelayShards - if _, err = wakuM.h.Peerstore().SupportsProtocols(peerID, relay.WakuRelayID_v200); err == nil { - wakuM.log.Debug("light peer only checking clusterID") - if rClusterID != wakuM.clusterID { - wakuM.disconnectPeer(peerID, errors.New("different clusterID reported")) - } - return - } - wakuM.log.Debug("relay peer checking cluster and shards") var rShardIDs []uint16 @@ -261,9 +249,12 @@ func (wakuM *WakuMetadata) Connected(n network.Network, cc network.Conn) { rShardIDs = append(rShardIDs, uint16(i)) } } else { - // TODO: remove with nwaku 0.28 deployment - for _, i := range response.ShardsDeprecated { // nolint: staticcheck - rShardIDs = append(rShardIDs, uint16(i)) + if proto, err := wakuM.h.Peerstore().FirstSupportedProtocol(peerID, relay.WakuRelayID_v200); err == nil && proto == "" { + wakuM.log.Debug("light peer only checking clusterID") + if rClusterID != wakuM.clusterID { + wakuM.disconnectPeer(peerID, errors.New("different clusterID reported")) + } + return } } wakuM.log.Debug("getting remote cluster and shards") From dacff8a6ae5ddb0796cc7107d7c5947017596308 Mon Sep 17 00:00:00 2001 From: Prem Chaitanya Prathi Date: Mon, 15 Jul 2024 19:47:27 +0530 Subject: [PATCH 07/27] feat: lightclient err handling (#1160) --- waku/v2/protocol/filter/client.go | 26 +++++++++++++++---- waku/v2/protocol/lightpush/waku_lightpush.go | 11 +++----- .../lightpush/waku_lightpush_option.go | 1 - .../subscription/subscriptions_map.go | 9 ++++--- .../subscription/subscriptions_map_test.go | 12 ++++----- 5 files changed, 36 insertions(+), 23 deletions(-) diff --git a/waku/v2/protocol/filter/client.go b/waku/v2/protocol/filter/client.go index 5909bbbd..3342dc29 100644 --- a/waku/v2/protocol/filter/client.go +++ b/waku/v2/protocol/filter/client.go @@ -147,6 +147,17 @@ func (wf *WakuFilterLightNode) Stop() { }) } +func (wf *WakuFilterLightNode) unsubscribeWithoutSubscription(cf protocol.ContentFilter, peerID peer.ID) { + err := wf.request( + wf.Context(), + protocol.GenerateRequestID(), + pb.FilterSubscribeRequest_UNSUBSCRIBE_ALL, + cf, peerID) + if err != nil { + wf.log.Warn("could not unsubscribe from peer", logging.HostID("peerID", peerID), zap.Error(err)) + } +} + func (wf *WakuFilterLightNode) onRequest(ctx context.Context) func(network.Stream) { return func(stream network.Stream) { peerID := stream.Conn().RemotePeer() @@ -156,6 +167,9 @@ func (wf *WakuFilterLightNode) onRequest(ctx context.Context) func(network.Strea if !wf.subscriptions.IsSubscribedTo(peerID) { logger.Warn("received message push from unknown peer", logging.HostID("peerID", peerID)) wf.metrics.RecordError(unknownPeerMessagePush) + //Send a wildcard unsubscribe to this peer so that further requests are not forwarded to us + //This could be happening due to https://github.com/waku-org/go-waku/issues/1124 + go wf.unsubscribeWithoutSubscription(protocol.ContentFilter{}, peerID) if err := stream.Reset(); err != nil { wf.log.Error("resetting connection", zap.Error(err)) } @@ -199,22 +213,24 @@ func (wf *WakuFilterLightNode) onRequest(ctx context.Context) func(network.Strea } logger = messagePush.WakuMessage.Logger(logger, pubSubTopic) - - if !wf.subscriptions.Has(peerID, protocol.NewContentFilter(pubSubTopic, messagePush.WakuMessage.ContentTopic)) { + cf := protocol.NewContentFilter(pubSubTopic, messagePush.WakuMessage.ContentTopic) + if !wf.subscriptions.Has(peerID, cf) { logger.Warn("received messagepush with invalid subscription parameters") + //Unsubscribe from that peer for the contentTopic, possibly due to https://github.com/waku-org/go-waku/issues/1124 + go wf.unsubscribeWithoutSubscription(cf, peerID) wf.metrics.RecordError(invalidSubscriptionMessage) return } wf.metrics.RecordMessage() - wf.notify(peerID, pubSubTopic, messagePush.WakuMessage) + wf.notify(ctx, peerID, pubSubTopic, messagePush.WakuMessage) logger.Info("received message push") } } -func (wf *WakuFilterLightNode) notify(remotePeerID peer.ID, pubsubTopic string, msg *wpb.WakuMessage) { +func (wf *WakuFilterLightNode) notify(ctx context.Context, remotePeerID peer.ID, pubsubTopic string, msg *wpb.WakuMessage) { envelope := protocol.NewEnvelope(msg, wf.timesource.Now().UnixNano(), pubsubTopic) if wf.broadcaster != nil { @@ -222,7 +238,7 @@ func (wf *WakuFilterLightNode) notify(remotePeerID peer.ID, pubsubTopic string, wf.broadcaster.Submit(envelope) } // Notify filter subscribers - wf.subscriptions.Notify(remotePeerID, envelope) + wf.subscriptions.Notify(ctx, remotePeerID, envelope) } func (wf *WakuFilterLightNode) request(ctx context.Context, requestID []byte, diff --git a/waku/v2/protocol/lightpush/waku_lightpush.go b/waku/v2/protocol/lightpush/waku_lightpush.go index 7775b557..de7bce8b 100644 --- a/waku/v2/protocol/lightpush/waku_lightpush.go +++ b/waku/v2/protocol/lightpush/waku_lightpush.go @@ -189,13 +189,6 @@ func (wakuLP *WakuLightPush) reply(stream network.Stream, responsePushRPC *pb.Pu // request sends a message via lightPush protocol to either a specified peer or peer that is selected. func (wakuLP *WakuLightPush) request(ctx context.Context, req *pb.PushRequest, params *lightPushRequestParameters, peer peer.ID) (*pb.PushResponse, error) { - if params == nil { - return nil, errors.New("lightpush params are mandatory") - } - - if len(params.requestID) == 0 { - return nil, ErrInvalidID - } logger := wakuLP.log.With(logging.HostID("peer", peer)) @@ -336,8 +329,10 @@ func (wakuLP *WakuLightPush) Publish(ctx context.Context, message *wpb.WakuMessa for _, peerID := range params.selectedPeers { wg.Add(1) go func(id peer.ID) { + paramsValue := *params + paramsValue.requestID = protocol.GenerateRequestID() defer wg.Done() - response, err := wakuLP.request(ctx, req, params, id) + response, err := wakuLP.request(ctx, req, ¶msValue, id) if err != nil { logger.Error("could not publish message", zap.Error(err), zap.Stringer("peer", id)) } diff --git a/waku/v2/protocol/lightpush/waku_lightpush_option.go b/waku/v2/protocol/lightpush/waku_lightpush_option.go index b192fd17..7ed04370 100644 --- a/waku/v2/protocol/lightpush/waku_lightpush_option.go +++ b/waku/v2/protocol/lightpush/waku_lightpush_option.go @@ -133,7 +133,6 @@ func WithAutomaticRequestID() RequestOption { // DefaultOptions are the default options to be used when using the lightpush protocol func DefaultOptions(host host.Host) []RequestOption { return []RequestOption{ - WithAutomaticRequestID(), WithAutomaticPeerSelection(), WithMaxPeers(1), //keeping default as 2 for status use-case } diff --git a/waku/v2/protocol/subscription/subscriptions_map.go b/waku/v2/protocol/subscription/subscriptions_map.go index 4692d953..a6621f13 100644 --- a/waku/v2/protocol/subscription/subscriptions_map.go +++ b/waku/v2/protocol/subscription/subscriptions_map.go @@ -1,6 +1,7 @@ package subscription import ( + "context" "errors" "sync" @@ -178,17 +179,17 @@ func (sub *SubscriptionsMap) Clear() { sub.clear() } -func (sub *SubscriptionsMap) Notify(peerID peer.ID, envelope *protocol.Envelope) { +func (sub *SubscriptionsMap) Notify(ctx context.Context, peerID peer.ID, envelope *protocol.Envelope) { sub.RLock() defer sub.RUnlock() subscriptions, ok := sub.items[peerID].SubsPerPubsubTopic[envelope.PubsubTopic()] if ok { - iterateSubscriptionSet(sub.logger, subscriptions, envelope) + iterateSubscriptionSet(ctx, sub.logger, subscriptions, envelope) } } -func iterateSubscriptionSet(logger *zap.Logger, subscriptions SubscriptionSet, envelope *protocol.Envelope) { +func iterateSubscriptionSet(ctx context.Context, logger *zap.Logger, subscriptions SubscriptionSet, envelope *protocol.Envelope) { for _, subscription := range subscriptions { func(subscription *SubscriptionDetails) { subscription.RLock() @@ -201,6 +202,8 @@ func iterateSubscriptionSet(logger *zap.Logger, subscriptions SubscriptionSet, e if !subscription.Closed { select { + case <-ctx.Done(): + return case subscription.C <- envelope: default: logger.Warn("can't deliver message to subscription. subscriber too slow") diff --git a/waku/v2/protocol/subscription/subscriptions_map_test.go b/waku/v2/protocol/subscription/subscriptions_map_test.go index f5c6d21e..01a3b788 100644 --- a/waku/v2/protocol/subscription/subscriptions_map_test.go +++ b/waku/v2/protocol/subscription/subscriptions_map_test.go @@ -153,8 +153,8 @@ func TestSubscriptionsNotify(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - fmap.Notify(p1, envTopic1Ct1) - fmap.Notify(p2, envTopic1Ct1) + fmap.Notify(ctx, p1, envTopic1Ct1) + fmap.Notify(ctx, p2, envTopic1Ct1) }() <-successChan @@ -177,8 +177,8 @@ func TestSubscriptionsNotify(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - fmap.Notify(p1, envTopic1Ct2) - fmap.Notify(p2, envTopic1Ct2) + fmap.Notify(ctx, p1, envTopic1Ct2) + fmap.Notify(ctx, p2, envTopic1Ct2) }() <-successChan @@ -207,8 +207,8 @@ func TestSubscriptionsNotify(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - fmap.Notify(p1, envTopic1Ct1_2) - fmap.Notify(p2, envTopic1Ct1_2) + fmap.Notify(ctx, p1, envTopic1Ct1_2) + fmap.Notify(ctx, p2, envTopic1Ct1_2) }() <-successChan // One of these successes is for closing the subscription From 8afeb529df4b9af3312945945bb8bf2c4ae7b033 Mon Sep 17 00:00:00 2001 From: Prem Chaitanya Prathi Date: Wed, 17 Jul 2024 15:32:32 +0530 Subject: [PATCH 08/27] chore: change log levels (#1165) --- waku/v2/peermanager/peer_discovery.go | 2 +- waku/v2/protocol/peer_exchange/protocol.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/waku/v2/peermanager/peer_discovery.go b/waku/v2/peermanager/peer_discovery.go index ae18907c..8ab1c8be 100644 --- a/waku/v2/peermanager/peer_discovery.go +++ b/waku/v2/peermanager/peer_discovery.go @@ -112,7 +112,7 @@ func (pm *PeerManager) discoverPeersByPubsubTopics(pubsubTopics []string, proto for _, shardInfo := range shardsInfo { err = pm.DiscoverAndConnectToPeers(ctx, shardInfo.ClusterID, shardInfo.ShardIDs[0], proto, maxCount) if err != nil { - pm.logger.Error("failed to discover and connect to peers", zap.Error(err)) + pm.logger.Warn("failed to discover and connect to peers", zap.Error(err)) } } } else { diff --git a/waku/v2/protocol/peer_exchange/protocol.go b/waku/v2/protocol/peer_exchange/protocol.go index 5f103e12..08e5051c 100644 --- a/waku/v2/protocol/peer_exchange/protocol.go +++ b/waku/v2/protocol/peer_exchange/protocol.go @@ -98,7 +98,7 @@ func (wakuPX *WakuPeerExchange) onRequest() func(network.Stream) { if wakuPX.limiter != nil && !wakuPX.limiter.Allow() { wakuPX.metrics.RecordError(rateLimitFailure) - wakuPX.log.Error("exceeds the rate limit") + wakuPX.log.Info("exceeds the rate limit") // TODO: peer exchange protocol should contain an err field if err := stream.Reset(); err != nil { wakuPX.log.Error("resetting connection", zap.Error(err)) From f3da812b33e4107fd9e5e19863d04da47f84c9e3 Mon Sep 17 00:00:00 2001 From: Prem Chaitanya Prathi Date: Fri, 19 Jul 2024 10:22:33 +0530 Subject: [PATCH 09/27] fix: record connection failures when stream opening fails for any protocol (#1163) --- waku/v2/protocol/filter/client.go | 9 ++++++--- waku/v2/protocol/filter/server.go | 4 ++++ waku/v2/protocol/legacy_store/waku_store_client.go | 3 +++ waku/v2/protocol/lightpush/waku_lightpush.go | 9 ++++++--- waku/v2/protocol/metadata/waku_metadata.go | 4 ++++ waku/v2/protocol/peer_exchange/client.go | 3 +++ waku/v2/protocol/store/client.go | 3 +++ 7 files changed, 29 insertions(+), 6 deletions(-) diff --git a/waku/v2/protocol/filter/client.go b/waku/v2/protocol/filter/client.go index 3342dc29..5649a3c9 100644 --- a/waku/v2/protocol/filter/client.go +++ b/waku/v2/protocol/filter/client.go @@ -242,7 +242,7 @@ func (wf *WakuFilterLightNode) notify(ctx context.Context, remotePeerID peer.ID, } func (wf *WakuFilterLightNode) request(ctx context.Context, requestID []byte, - reqType pb.FilterSubscribeRequest_FilterSubscribeType, contentFilter protocol.ContentFilter, peer peer.ID) error { + reqType pb.FilterSubscribeRequest_FilterSubscribeType, contentFilter protocol.ContentFilter, peerID peer.ID) error { request := &pb.FilterSubscribeRequest{ RequestId: hex.EncodeToString(requestID), FilterSubscribeType: reqType, @@ -255,11 +255,14 @@ func (wf *WakuFilterLightNode) request(ctx context.Context, requestID []byte, return err } - logger := wf.log.With(logging.HostID("peerID", peer)) + logger := wf.log.With(logging.HostID("peerID", peerID)) - stream, err := wf.h.NewStream(ctx, peer, FilterSubscribeID_v20beta1) + stream, err := wf.h.NewStream(ctx, peerID, FilterSubscribeID_v20beta1) if err != nil { wf.metrics.RecordError(dialFailure) + if ps, ok := wf.h.Peerstore().(peerstore.WakuPeerstore); ok { + ps.AddConnFailure(peer.AddrInfo{ID: peerID}) + } return err } diff --git a/waku/v2/protocol/filter/server.go b/waku/v2/protocol/filter/server.go index 2bf63bb5..3d898f89 100644 --- a/waku/v2/protocol/filter/server.go +++ b/waku/v2/protocol/filter/server.go @@ -14,6 +14,7 @@ import ( "github.com/libp2p/go-msgio/pbio" "github.com/prometheus/client_golang/prometheus" "github.com/waku-org/go-waku/logging" + "github.com/waku-org/go-waku/waku/v2/peerstore" "github.com/waku-org/go-waku/waku/v2/protocol" "github.com/waku-org/go-waku/waku/v2/protocol/filter/pb" "github.com/waku-org/go-waku/waku/v2/protocol/relay" @@ -273,6 +274,9 @@ func (wf *WakuFilterFullNode) pushMessage(ctx context.Context, logger *zap.Logge wf.metrics.RecordError(pushTimeoutFailure) } else { wf.metrics.RecordError(dialFailure) + if ps, ok := wf.h.Peerstore().(peerstore.WakuPeerstore); ok { + ps.AddConnFailure(peer.AddrInfo{ID: peerID}) + } } logger.Error("opening peer stream", zap.Error(err)) return err diff --git a/waku/v2/protocol/legacy_store/waku_store_client.go b/waku/v2/protocol/legacy_store/waku_store_client.go index b02cd92e..456dada5 100644 --- a/waku/v2/protocol/legacy_store/waku_store_client.go +++ b/waku/v2/protocol/legacy_store/waku_store_client.go @@ -207,6 +207,9 @@ func (store *WakuStore) queryFrom(ctx context.Context, historyRequest *pb.Histor if err != nil { logger.Error("creating stream to peer", zap.Error(err)) store.metrics.RecordError(dialFailure) + if ps, ok := store.h.Peerstore().(peerstore.WakuPeerstore); ok { + ps.AddConnFailure(peer.AddrInfo{ID: selectedPeer}) + } return nil, err } diff --git a/waku/v2/protocol/lightpush/waku_lightpush.go b/waku/v2/protocol/lightpush/waku_lightpush.go index de7bce8b..10028fdd 100644 --- a/waku/v2/protocol/lightpush/waku_lightpush.go +++ b/waku/v2/protocol/lightpush/waku_lightpush.go @@ -188,14 +188,17 @@ func (wakuLP *WakuLightPush) reply(stream network.Stream, responsePushRPC *pb.Pu } // request sends a message via lightPush protocol to either a specified peer or peer that is selected. -func (wakuLP *WakuLightPush) request(ctx context.Context, req *pb.PushRequest, params *lightPushRequestParameters, peer peer.ID) (*pb.PushResponse, error) { +func (wakuLP *WakuLightPush) request(ctx context.Context, req *pb.PushRequest, params *lightPushRequestParameters, peerID peer.ID) (*pb.PushResponse, error) { - logger := wakuLP.log.With(logging.HostID("peer", peer)) + logger := wakuLP.log.With(logging.HostID("peer", peerID)) - stream, err := wakuLP.h.NewStream(ctx, peer, LightPushID_v20beta1) + stream, err := wakuLP.h.NewStream(ctx, peerID, LightPushID_v20beta1) if err != nil { logger.Error("creating stream to peer", zap.Error(err)) wakuLP.metrics.RecordError(dialFailure) + if ps, ok := wakuLP.h.Peerstore().(peerstore.WakuPeerstore); ok { + ps.AddConnFailure(peer.AddrInfo{ID: peerID}) + } return nil, err } pushRequestRPC := &pb.PushRpc{RequestId: hex.EncodeToString(params.requestID), Request: req} diff --git a/waku/v2/protocol/metadata/waku_metadata.go b/waku/v2/protocol/metadata/waku_metadata.go index 23a7e455..dc7c44e5 100644 --- a/waku/v2/protocol/metadata/waku_metadata.go +++ b/waku/v2/protocol/metadata/waku_metadata.go @@ -15,6 +15,7 @@ import ( "github.com/libp2p/go-msgio/pbio" "github.com/multiformats/go-multiaddr" "github.com/waku-org/go-waku/logging" + "github.com/waku-org/go-waku/waku/v2/peerstore" "github.com/waku-org/go-waku/waku/v2/protocol" "github.com/waku-org/go-waku/waku/v2/protocol/enr" "github.com/waku-org/go-waku/waku/v2/protocol/metadata/pb" @@ -103,6 +104,9 @@ func (wakuM *WakuMetadata) Request(ctx context.Context, peerID peer.ID) (*pb.Wak stream, err := wakuM.h.NewStream(ctx, peerID, MetadataID_v1) if err != nil { logger.Error("creating stream to peer", zap.Error(err)) + if ps, ok := wakuM.h.Peerstore().(peerstore.WakuPeerstore); ok { + ps.AddConnFailure(peer.AddrInfo{ID: peerID}) + } return nil, err } diff --git a/waku/v2/protocol/peer_exchange/client.go b/waku/v2/protocol/peer_exchange/client.go index 915ce75f..6baf3095 100644 --- a/waku/v2/protocol/peer_exchange/client.go +++ b/waku/v2/protocol/peer_exchange/client.go @@ -76,6 +76,9 @@ func (wakuPX *WakuPeerExchange) Request(ctx context.Context, numPeers int, opts stream, err := wakuPX.h.NewStream(ctx, params.selectedPeer, PeerExchangeID_v20alpha1) if err != nil { + if ps, ok := wakuPX.h.Peerstore().(peerstore.WakuPeerstore); ok { + ps.AddConnFailure(peer.AddrInfo{ID: params.selectedPeer}) + } return err } diff --git a/waku/v2/protocol/store/client.go b/waku/v2/protocol/store/client.go index 34a08bcf..5cda4eef 100644 --- a/waku/v2/protocol/store/client.go +++ b/waku/v2/protocol/store/client.go @@ -253,6 +253,9 @@ func (s *WakuStore) queryFrom(ctx context.Context, storeRequest *pb.StoreQueryRe stream, err := s.h.NewStream(ctx, selectedPeer, StoreQueryID_v300) if err != nil { logger.Error("creating stream to peer", zap.Error(err)) + if ps, ok := s.h.Peerstore().(peerstore.WakuPeerstore); ok { + ps.AddConnFailure(peer.AddrInfo{ID: selectedPeer}) + } return nil, err } From 75047cc9da32cabe97e0d8afb43f555e670905aa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?rich=CE=9Brd?= Date: Sun, 21 Jul 2024 20:43:22 -0400 Subject: [PATCH 10/27] chore: disconnect on subsequent ping failures (#1164) --- waku/v2/node/keepalive.go | 49 ++++++++++++++++++++++++++++------ waku/v2/node/keepalive_test.go | 4 ++- 2 files changed, 44 insertions(+), 9 deletions(-) diff --git a/waku/v2/node/keepalive.go b/waku/v2/node/keepalive.go index 2cb03317..92e0ab1b 100644 --- a/waku/v2/node/keepalive.go +++ b/waku/v2/node/keepalive.go @@ -7,6 +7,7 @@ import ( "sync" "time" + "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/p2p/protocol/ping" @@ -24,6 +25,17 @@ const sleepDetectionIntervalFactor = 3 const maxPeersToPing = 10 +const maxAllowedSubsequentPingFailures = 2 + +func disconnectAllPeers(host host.Host, logger *zap.Logger) { + for _, p := range host.Network().Peers() { + err := host.Network().ClosePeer(p) + if err != nil { + logger.Debug("closing conn to peer", zap.Error(err)) + } + } +} + // startKeepAlive creates a go routine that periodically pings connected peers. // This is necessary because TCP connections are automatically closed due to inactivity, // and doing a ping will avoid this (with a small bandwidth cost) @@ -54,6 +66,7 @@ func (w *WakuNode) startKeepAlive(ctx context.Context, randomPeersPingDuration t sleepDetectionInterval := int64(randomPeersPingDuration) * sleepDetectionIntervalFactor + var iterationFailure int for { peersToPing := []peer.ID{} @@ -72,12 +85,12 @@ func (w *WakuNode) startKeepAlive(ctx context.Context, randomPeersPingDuration t if difference > sleepDetectionInterval { lastTimeExecuted = w.timesource.Now() w.log.Warn("keep alive hasnt been executed recently. Killing all connections") - for _, p := range w.host.Network().Peers() { - err := w.host.Network().ClosePeer(p) - if err != nil { - w.log.Debug("closing conn to peer", zap.Error(err)) - } - } + disconnectAllPeers(w.host, w.log) + continue + } else if iterationFailure >= maxAllowedSubsequentPingFailures { + iterationFailure = 0 + w.log.Warn("Pinging random peers failed, node is likely disconnected. Killing all connections") + disconnectAllPeers(w.host, w.log) continue } @@ -118,16 +131,31 @@ func (w *WakuNode) startKeepAlive(ctx context.Context, randomPeersPingDuration t pingWg := sync.WaitGroup{} pingWg.Add(len(peersToPing)) + pingResultChan := make(chan bool, len(peersToPing)) for _, p := range peersToPing { - go w.pingPeer(ctx, &pingWg, p) + go w.pingPeer(ctx, &pingWg, p, pingResultChan) } pingWg.Wait() + close(pingResultChan) + + failureCounter := 0 + for couldPing := range pingResultChan { + if !couldPing { + failureCounter++ + } + } + + if len(peersToPing) > 0 && failureCounter == len(peersToPing) { + iterationFailure++ + } else { + iterationFailure = 0 + } lastTimeExecuted = w.timesource.Now() } } -func (w *WakuNode) pingPeer(ctx context.Context, wg *sync.WaitGroup, peerID peer.ID) { +func (w *WakuNode) pingPeer(ctx context.Context, wg *sync.WaitGroup, peerID peer.ID, resultChan chan bool) { defer wg.Done() logger := w.log.With(logging.HostID("peer", peerID)) @@ -135,17 +163,20 @@ func (w *WakuNode) pingPeer(ctx context.Context, wg *sync.WaitGroup, peerID peer for i := 0; i < maxAllowedPingFailures; i++ { if w.host.Network().Connectedness(peerID) != network.Connected { // Peer is no longer connected. No need to ping + resultChan <- false return } logger.Debug("pinging") if w.tryPing(ctx, peerID, logger) { + resultChan <- true return } } if w.host.Network().Connectedness(peerID) != network.Connected { + resultChan <- false return } @@ -153,6 +184,8 @@ func (w *WakuNode) pingPeer(ctx context.Context, wg *sync.WaitGroup, peerID peer if err := w.host.Network().ClosePeer(peerID); err != nil { logger.Debug("closing conn to peer", zap.Error(err)) } + + resultChan <- false } func (w *WakuNode) tryPing(ctx context.Context, peerID peer.ID, logger *zap.Logger) bool { diff --git a/waku/v2/node/keepalive_test.go b/waku/v2/node/keepalive_test.go index 0508fd79..63c48abb 100644 --- a/waku/v2/node/keepalive_test.go +++ b/waku/v2/node/keepalive_test.go @@ -45,9 +45,11 @@ func TestKeepAlive(t *testing.T) { } w.wg.Add(1) - w.pingPeer(ctx2, w.wg, peerID2) + peerFailureSignalChan := make(chan bool, 1) + w.pingPeer(ctx2, w.wg, peerID2, peerFailureSignalChan) require.NoError(t, ctx.Err()) + close(peerFailureSignalChan) } func TestPeriodicKeepAlive(t *testing.T) { From 58d9721026db2f38cb59ac016526b4c021d2046c Mon Sep 17 00:00:00 2001 From: Prem Chaitanya Prathi Date: Wed, 24 Jul 2024 07:59:17 +0530 Subject: [PATCH 11/27] fix: filter ping timeout and retry in case of failure (#1166) --- waku/v2/api/filter.go | 1 - .../v2/protocol/filter/filter_health_check.go | 26 +++++++++++++------ 2 files changed, 18 insertions(+), 9 deletions(-) diff --git a/waku/v2/api/filter.go b/waku/v2/api/filter.go index b9d64310..1f9ea6be 100644 --- a/waku/v2/api/filter.go +++ b/waku/v2/api/filter.go @@ -14,7 +14,6 @@ import ( "go.uber.org/zap" ) -const FilterPingTimeout = 5 * time.Second const MultiplexChannelBuffer = 100 type FilterConfig struct { diff --git a/waku/v2/protocol/filter/filter_health_check.go b/waku/v2/protocol/filter/filter_health_check.go index 836175b5..a6b76a34 100644 --- a/waku/v2/protocol/filter/filter_health_check.go +++ b/waku/v2/protocol/filter/filter_health_check.go @@ -8,6 +8,8 @@ import ( "go.uber.org/zap" ) +const PingTimeout = 5 * time.Second + func (wf *WakuFilterLightNode) PingPeers() { //Send a ping to all the peers and report their status to corresponding subscriptions // Alive or not or set state of subcription?? @@ -17,17 +19,23 @@ func (wf *WakuFilterLightNode) PingPeers() { } func (wf *WakuFilterLightNode) PingPeer(peer peer.ID) { - ctxWithTimeout, cancel := context.WithTimeout(wf.CommonService.Context(), wf.peerPingInterval) + ctxWithTimeout, cancel := context.WithTimeout(wf.CommonService.Context(), PingTimeout) defer cancel() err := wf.Ping(ctxWithTimeout, peer) if err != nil { wf.log.Warn("Filter ping failed towards peer", zap.Stringer("peer", peer), zap.Error(err)) - - subscriptions := wf.subscriptions.GetAllSubscriptionsForPeer(peer) - for _, subscription := range subscriptions { - wf.log.Debug("Notifying sub closing", zap.String("subID", subscription.ID)) - //Indicating that subscription is closing, - subscription.SetClosing() + //quickly retry ping again before marking subscription as failure + //Note that PingTimeout is a fraction of PingInterval so this shouldn't cause parallel pings being sent. + ctxWithTimeout, cancel := context.WithTimeout(wf.CommonService.Context(), PingTimeout) + defer cancel() + err = wf.Ping(ctxWithTimeout, peer) + if err != nil { + subscriptions := wf.subscriptions.GetAllSubscriptionsForPeer(peer) + for _, subscription := range subscriptions { + wf.log.Debug("Notifying sub closing", zap.String("subID", subscription.ID)) + //Indicating that subscription is closing, + subscription.SetClosing() + } } } } @@ -39,7 +47,9 @@ func (wf *WakuFilterLightNode) FilterHealthCheckLoop() { for { select { case <-ticker.C: - wf.PingPeers() + if wf.onlineChecker.IsOnline() { + wf.PingPeers() + } case <-wf.CommonService.Context().Done(): return } From a9be17fd4848508a5a1d55a9113b33457d4859e4 Mon Sep 17 00:00:00 2001 From: Prem Chaitanya Prathi Date: Wed, 24 Jul 2024 18:17:31 +0530 Subject: [PATCH 12/27] chore: method to disconnect all peers and not notify (#1168) --- waku/v2/node/wakunode2.go | 11 +++++++++++ waku/v2/protocol/filter/client.go | 15 --------------- 2 files changed, 11 insertions(+), 15 deletions(-) diff --git a/waku/v2/node/wakunode2.go b/waku/v2/node/wakunode2.go index c29a2b93..dd7fbae9 100644 --- a/waku/v2/node/wakunode2.go +++ b/waku/v2/node/wakunode2.go @@ -799,6 +799,17 @@ func (w *WakuNode) ClosePeerByAddress(address string) error { return w.ClosePeerById(info.ID) } +func (w *WakuNode) DisconnectAllPeers() { + w.host.Network().StopNotify(w.connectionNotif) + for _, peerID := range w.host.Network().Peers() { + err := w.ClosePeerById(peerID) + if err != nil { + w.log.Info("failed to close peer", zap.Stringer("peer", peerID), zap.Error(err)) + } + } + w.host.Network().Notify(w.connectionNotif) +} + // ClosePeerById is used to close a connection to a peer func (w *WakuNode) ClosePeerById(id peer.ID) error { err := w.host.Network().ClosePeer(id) diff --git a/waku/v2/protocol/filter/client.go b/waku/v2/protocol/filter/client.go index 5649a3c9..52b4efa6 100644 --- a/waku/v2/protocol/filter/client.go +++ b/waku/v2/protocol/filter/client.go @@ -147,17 +147,6 @@ func (wf *WakuFilterLightNode) Stop() { }) } -func (wf *WakuFilterLightNode) unsubscribeWithoutSubscription(cf protocol.ContentFilter, peerID peer.ID) { - err := wf.request( - wf.Context(), - protocol.GenerateRequestID(), - pb.FilterSubscribeRequest_UNSUBSCRIBE_ALL, - cf, peerID) - if err != nil { - wf.log.Warn("could not unsubscribe from peer", logging.HostID("peerID", peerID), zap.Error(err)) - } -} - func (wf *WakuFilterLightNode) onRequest(ctx context.Context) func(network.Stream) { return func(stream network.Stream) { peerID := stream.Conn().RemotePeer() @@ -168,8 +157,6 @@ func (wf *WakuFilterLightNode) onRequest(ctx context.Context) func(network.Strea logger.Warn("received message push from unknown peer", logging.HostID("peerID", peerID)) wf.metrics.RecordError(unknownPeerMessagePush) //Send a wildcard unsubscribe to this peer so that further requests are not forwarded to us - //This could be happening due to https://github.com/waku-org/go-waku/issues/1124 - go wf.unsubscribeWithoutSubscription(protocol.ContentFilter{}, peerID) if err := stream.Reset(); err != nil { wf.log.Error("resetting connection", zap.Error(err)) } @@ -216,8 +203,6 @@ func (wf *WakuFilterLightNode) onRequest(ctx context.Context) func(network.Strea cf := protocol.NewContentFilter(pubSubTopic, messagePush.WakuMessage.ContentTopic) if !wf.subscriptions.Has(peerID, cf) { logger.Warn("received messagepush with invalid subscription parameters") - //Unsubscribe from that peer for the contentTopic, possibly due to https://github.com/waku-org/go-waku/issues/1124 - go wf.unsubscribeWithoutSubscription(cf, peerID) wf.metrics.RecordError(invalidSubscriptionMessage) return } From 76d8fd687d10920c111c775b18b021a73bfda2ca Mon Sep 17 00:00:00 2001 From: Prem Chaitanya Prathi Date: Tue, 30 Jul 2024 11:02:59 +0530 Subject: [PATCH 13/27] fix: use total peers for pubsubTopic as out peers target (#1170) --- waku/v2/peermanager/peer_manager.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/waku/v2/peermanager/peer_manager.go b/waku/v2/peermanager/peer_manager.go index 3126f914..1cfc5484 100644 --- a/waku/v2/peermanager/peer_manager.go +++ b/waku/v2/peermanager/peer_manager.go @@ -313,8 +313,10 @@ func (pm *PeerManager) ensureMinRelayConnsPerTopic() { // peers. This will ensure that the peers returned by this function // match those peers that are currently connected - curPeerLen := pm.checkAndUpdateTopicHealth(topicInst) - if curPeerLen < pm.OutPeersTarget { + meshPeerLen := pm.checkAndUpdateTopicHealth(topicInst) + topicPeers := pm.host.Peerstore().(wps.WakuPeerstore).PeersByPubSubTopic(topicStr) + curPeerLen := topicPeers.Len() + if meshPeerLen < waku_proto.GossipSubDMin || curPeerLen < pm.OutPeersTarget { pm.logger.Debug("subscribed topic has not reached target peers, initiating more connections to maintain healthy mesh", zap.String("pubSubTopic", topicStr), zap.Int("connectedPeerCount", curPeerLen), zap.Int("targetPeers", pm.OutPeersTarget)) From e1e136cc68a9f6aa3202b6206d4a3c99bd35831a Mon Sep 17 00:00:00 2001 From: Prem Chaitanya Prathi Date: Tue, 30 Jul 2024 18:06:41 +0530 Subject: [PATCH 14/27] fix: parallelize filter subs to different peers (#1169) --- waku/v2/protocol/filter/client.go | 42 +++++++++++++------- waku/v2/protocol/lightpush/waku_lightpush.go | 15 ++++--- 2 files changed, 37 insertions(+), 20 deletions(-) diff --git a/waku/v2/protocol/filter/client.go b/waku/v2/protocol/filter/client.go index 52b4efa6..c52c9098 100644 --- a/waku/v2/protocol/filter/client.go +++ b/waku/v2/protocol/filter/client.go @@ -407,21 +407,35 @@ func (wf *WakuFilterLightNode) Subscribe(ctx context.Context, contentFilter prot paramsCopy := params.Copy() paramsCopy.selectedPeers = selectedPeers - for _, peer := range selectedPeers { - err := wf.request( - ctx, - params.requestID, - pb.FilterSubscribeRequest_SUBSCRIBE, - cFilter, - peer) - if err != nil { - wf.log.Error("Failed to subscribe", zap.String("pubSubTopic", pubSubTopic), zap.Strings("contentTopics", cTopics), - zap.Error(err)) - failedContentTopics = append(failedContentTopics, cTopics...) - continue + var wg sync.WaitGroup + reqCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + tmpSubs := make([]*subscription.SubscriptionDetails, len(selectedPeers)) + for i, peerID := range selectedPeers { + wg.Add(1) + go func(index int, ID peer.ID) { + defer wg.Done() + err := wf.request( + reqCtx, + params.requestID, + pb.FilterSubscribeRequest_SUBSCRIBE, + cFilter, + ID) + if err != nil { + wf.log.Error("Failed to subscribe", zap.String("pubSubTopic", pubSubTopic), zap.Strings("contentTopics", cTopics), + zap.Error(err)) + failedContentTopics = append(failedContentTopics, cTopics...) + } else { + wf.log.Debug("subscription successful", zap.String("pubSubTopic", pubSubTopic), zap.Strings("contentTopics", cTopics), zap.Stringer("peer", ID)) + tmpSubs[index] = wf.subscriptions.NewSubscription(ID, cFilter) + } + }(i, peerID) + } + wg.Wait() + for _, sub := range tmpSubs { + if sub != nil { + subscriptions = append(subscriptions, sub) } - wf.log.Debug("subscription successful", zap.String("pubSubTopic", pubSubTopic), zap.Strings("contentTopics", cTopics), zap.Stringer("peer", peer)) - subscriptions = append(subscriptions, wf.subscriptions.NewSubscription(peer, cFilter)) } } diff --git a/waku/v2/protocol/lightpush/waku_lightpush.go b/waku/v2/protocol/lightpush/waku_lightpush.go index 10028fdd..c0a72c2e 100644 --- a/waku/v2/protocol/lightpush/waku_lightpush.go +++ b/waku/v2/protocol/lightpush/waku_lightpush.go @@ -7,6 +7,7 @@ import ( "fmt" "math" "sync" + "time" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" @@ -328,19 +329,21 @@ func (wakuLP *WakuLightPush) Publish(ctx context.Context, message *wpb.WakuMessa logger.Debug("publishing message", zap.Stringers("peers", params.selectedPeers)) var wg sync.WaitGroup - var responses []*pb.PushResponse - for _, peerID := range params.selectedPeers { + responses := make([]*pb.PushResponse, params.selectedPeers.Len()) + reqCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + for i, peerID := range params.selectedPeers { wg.Add(1) - go func(id peer.ID) { + go func(index int, id peer.ID) { paramsValue := *params paramsValue.requestID = protocol.GenerateRequestID() defer wg.Done() - response, err := wakuLP.request(ctx, req, ¶msValue, id) + response, err := wakuLP.request(reqCtx, req, ¶msValue, id) if err != nil { logger.Error("could not publish message", zap.Error(err), zap.Stringer("peer", id)) } - responses = append(responses, response) - }(peerID) + responses[index] = response + }(i, peerID) } wg.Wait() var successCount int From cd70fbc912509f756959a7eaf11ee92534256df5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Soko=C5=82owski?= Date: Sat, 6 Apr 2024 14:20:57 +0300 Subject: [PATCH 15/27] chore(nix): refactor, fix library packages MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bit of a cleanup to make it more readable and also fix building of libraries. Moving the actual build to `default.nix` makes `flake.nix` more readable. Signed-off-by: Jakub SokoÅ‚owski --- ci/Jenkinsfile.nix-flake | 14 ++++---------- default.nix | 33 +++++++++++++++++++++++++++++++++ flake.nix | 40 ++++++++++++++++++++++++++++++---------- 3 files changed, 67 insertions(+), 20 deletions(-) create mode 100644 default.nix diff --git a/ci/Jenkinsfile.nix-flake b/ci/Jenkinsfile.nix-flake index cb5f8fa9..625db096 100644 --- a/ci/Jenkinsfile.nix-flake +++ b/ci/Jenkinsfile.nix-flake @@ -1,4 +1,4 @@ -library 'status-jenkins-lib@v1.7.0' +library 'status-jenkins-lib@v1.9.3' pipeline { agent { @@ -27,10 +27,7 @@ pipeline { stages { stage('Build') { steps { script { - sh("""#!/usr/bin/env bash - ${nix._sourceProfileInline()} - nix build --print-out-paths .#node - """) + nix.flake('node') } } } stage('Check') { @@ -45,15 +42,12 @@ pipeline { stages { stage('Build') { steps { script { - sh("""#!/usr/bin/env bash - ${nix._sourceProfileInline()} - nix build --print-out-paths .#library - """) + nix.flake('static-library') } } } stage('Check') { steps { - sh 'ldd ./result/bin/c' + sh 'readelf -h ./result/bin/libgowaku.a' } } } diff --git a/default.nix b/default.nix new file mode 100644 index 00000000..be1c1928 --- /dev/null +++ b/default.nix @@ -0,0 +1,33 @@ +{ + pkgs ? import { }, + self ? ./., + subPkgs ? "cmd/waku", + ldflags ? [], + output ? null, + commit ? builtins.substring 0 7 (self.rev or "dirty"), + version ? builtins.readFile ./VERSION, +}: + +pkgs.buildGo121Module { + name = "go-waku"; + src = self; + + subPackages = subPkgs; + tags = ["gowaku_no_rln"]; + ldflags = [ + "-X github.com/waku-org/go-waku/waku/v2/node.GitCommit=${commit}" + "-X github.com/waku-org/go-waku/waku/v2/node.Version=${version}" + ] ++ ldflags; + doCheck = false; + + # Otherwise library would be just called bin/c. + postInstall = if builtins.isString output then '' + mv $out/bin/* $out/bin/${output} + '' else ""; + + # FIXME: This needs to be manually changed when updating modules. + vendorHash = "sha256-zwvZVTiwv7cc4vAM2Fil+qAG1v1J8q4BqX5lCgCStIc="; + + # Fix for 'nix run' trying to execute 'go-waku'. + meta = { mainProgram = "waku"; }; +} diff --git a/flake.nix b/flake.nix index fff9716b..e81473e2 100644 --- a/flake.nix +++ b/flake.nix @@ -11,11 +11,11 @@ ]; forAllSystems = f: nixpkgs.lib.genAttrs supportedSystems (system: f system); - nixpkgsFor = forAllSystems (system: import nixpkgs { inherit system; }); + pkgsFor = forAllSystems (system: import nixpkgs { inherit system; }); buildPackage = system: subPackages: let - pkgs = nixpkgsFor.${system}; + pkgs = pkgsFor.${system}; commit = builtins.substring 0 7 (self.rev or "dirty"); version = builtins.readFile ./VERSION; in pkgs.buildGo121Module { @@ -34,17 +34,37 @@ meta = { mainProgram = "waku"; }; }; in rec { - packages = forAllSystems (system: { - node = buildPackage system ["cmd/waku"]; - library = buildPackage system ["library/c"]; + packages = forAllSystems (system: let + pkgs = pkgsFor.${system}; + os = pkgs.stdenv.hostPlatform.uname.system; + sttLibExtMap = { Windows = "lib"; Darwin = "a"; Linux = "a"; }; + dynLibExtMap = { Windows = "dll"; Darwin = "dylib"; Linux = "so"; }; + buildPackage = pkgs.callPackage ./default.nix; + in rec { + default = node; + node = buildPackage { + inherit self; + subPkgs = ["cmd/waku"]; + }; + static-library = buildPackage { + inherit self; + subPkgs = ["library/c"]; + ldflags = ["-buildmode=c-archive"]; + output = "libgowaku.${sttLibExtMap.${os}}"; + }; + # FIXME: Compilation fails with: + # relocation R_X86_64_TPOFF32 against runtime.tlsg can not be + # used when making a shared object; recompile with -fPIC + dynamic-library = buildPackage { + inherit self; + subPkgs = ["library/c"]; + ldflags = ["-buildmode=c-shared"]; + output = "libgowaku.${dynLibExtMap.${os}}"; + }; }); - defaultPackage = forAllSystems (system: - buildPackage system ["cmd/waku"] - ); - devShells = forAllSystems (system: let - pkgs = nixpkgsFor.${system}; + pkgs = pkgsFor.${system}; inherit (pkgs) lib stdenv mkShell; in { default = mkShell { From a4009b70d1cbdc26226b48d6b44f42c4ab57c333 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Soko=C5=82owski?= Date: Tue, 30 Jul 2024 18:37:34 +0200 Subject: [PATCH 16/27] fix: replace references to old statusim.net domain MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use of `statusim.net` domain been deprecated since March: https://github.com/status-im/infra-shards/commit/7df38c14 Signed-off-by: Jakub SokoÅ‚owski --- docs/api/filter.md | 2 +- docs/operators/how-to/configure-store.md | 4 ++-- .../app/src/main/java/com/example/waku/MainActivity.kt | 2 +- examples/c-bindings/main.c | 2 +- examples/chat2/README.md | 2 +- examples/waku-csharp/waku-csharp/Program.cs | 2 +- library/c/README.md | 4 ++-- waku/v2/node/address_test.go | 8 ++++---- waku/v2/protocol/enr/enr_test.go | 4 ++-- 9 files changed, 15 insertions(+), 15 deletions(-) diff --git a/docs/api/filter.md b/docs/api/filter.md index d944699e..01d5ba8b 100644 --- a/docs/api/filter.md +++ b/docs/api/filter.md @@ -38,7 +38,7 @@ One of these options must be specified when instantiating a node supporting the ```go ... -peerAddr, err := multiaddr.NewMultiaddr("/dns4/node-01.do-ams3.waku.test.statusim.net/tcp/30303/p2p/16Uiu2HAkykgaECHswi3YKJ5dMLbq2kPVCo89fcyTd38UcQD6ej5W") +peerAddr, err := multiaddr.NewMultiaddr("/dns4/node-01.do-ams3.waku.test.status.im/tcp/30303/p2p/16Uiu2HAkykgaECHswi3YKJ5dMLbq2kPVCo89fcyTd38UcQD6ej5W") if err != nil { panic(err) } diff --git a/docs/operators/how-to/configure-store.md b/docs/operators/how-to/configure-store.md index ee2ecb94..5b91f472 100644 --- a/docs/operators/how-to/configure-store.md +++ b/docs/operators/how-to/configure-store.md @@ -17,12 +17,12 @@ or store and serve historical messages itself. Ensure that `store` is enabled (this is `true` by default) and provide at least one store service node address with the `--storenode` CLI option. -See the following example, using the peer at `/dns4/node-01.ac-cn-hongkong-c.waku.test.statusim.net/tcp/30303/p2p/16Uiu2HAkzHaTP5JsUwfR9NR8Rj9HC24puS6ocaU8wze4QrXr9iXp` as store service node. +See the following example, using the peer at `/dns4/node-01.ac-cn-hongkong-c.waku.test.status.im/tcp/30303/p2p/16Uiu2HAkzHaTP5JsUwfR9NR8Rj9HC24puS6ocaU8wze4QrXr9iXp` as store service node. ```sh ./build/waku \ --store=true \ - --storenode=/dns4/node-01.ac-cn-hongkong-c.waku.test.statusim.net/tcp/30303/p2p/16Uiu2HAkzHaTP5JsUwfR9NR8Rj9HC24puS6ocaU8wze4QrXr9iXp + --storenode=/dns4/node-01.ac-cn-hongkong-c.waku.test.status.im/tcp/30303/p2p/16Uiu2HAkzHaTP5JsUwfR9NR8Rj9HC24puS6ocaU8wze4QrXr9iXp ``` Your node can now send queries to retrieve historical messages diff --git a/examples/android-kotlin/app/src/main/java/com/example/waku/MainActivity.kt b/examples/android-kotlin/app/src/main/java/com/example/waku/MainActivity.kt index 094864fe..5bbd8e25 100644 --- a/examples/android-kotlin/app/src/main/java/com/example/waku/MainActivity.kt +++ b/examples/android-kotlin/app/src/main/java/com/example/waku/MainActivity.kt @@ -59,7 +59,7 @@ class MainActivity : AppCompatActivity() { lbl.text = (lbl.text.toString() + ">>> Default pubsub topic: " + defaultPubsubTopic() + "\n"); try { - node.connect("/dns4/node-01.gc-us-central1-a.waku.test.statusim.net/tcp/30303/p2p/16Uiu2HAmDCp8XJ9z1ev18zuv8NHekAsjNyezAvmMfFEJkiharitG") + node.connect("/dns4/node-01.gc-us-central1-a.waku.test.status.im/tcp/30303/p2p/16Uiu2HAmDCp8XJ9z1ev18zuv8NHekAsjNyezAvmMfFEJkiharitG") lbl.text = (lbl.text.toString() + ">>> Connected to Peer" + "\n") node.peers().forEach { diff --git a/examples/c-bindings/main.c b/examples/c-bindings/main.c index d3b9015d..fcd49ffa 100644 --- a/examples/c-bindings/main.c +++ b/examples/c-bindings/main.c @@ -159,7 +159,7 @@ int main(int argc, char *argv[]) printf("Discovered nodes: %s\n", discoveredNodes); // Connect to a node - waku_connect(ctx, "/dns4/node-01.do-ams3.waku.test.statusim.net/tcp/30303/" + waku_connect(ctx, "/dns4/node-01.do-ams3.waku.test.status.im/tcp/30303/" "p2p/16Uiu2HAkykgaECHswi3YKJ5dMLbq2kPVCo89fcyTd38UcQD6ej5W", 0, on_response, NULL); diff --git a/examples/chat2/README.md b/examples/chat2/README.md index 382d1101..0ef66f8a 100644 --- a/examples/chat2/README.md +++ b/examples/chat2/README.md @@ -28,7 +28,7 @@ You may need to set DNS server if behind a VPN, In order to connect to a *specific* node as [`relay`](https://specs.vac.dev/specs/waku/v2/waku-relay) peer, define that node's `multiaddr` as a `staticnode` when starting the app: ``` -./build/chat2 -staticnode=/dns4/node-01.do-ams3.waku.test.statusim.net/tcp/30303/p2p/16Uiu2HAkykgaECHswi3YKJ5dMLbq2kPVCo89fcyTd38UcQD6ej5W +./build/chat2 -staticnode=/dns4/node-01.do-ams3.waku.test.status.im/tcp/30303/p2p/16Uiu2HAkykgaECHswi3YKJ5dMLbq2kPVCo89fcyTd38UcQD6ej5W ``` This will bypass the random peer selection process and connect to the specified node. diff --git a/examples/waku-csharp/waku-csharp/Program.cs b/examples/waku-csharp/waku-csharp/Program.cs index 927766e6..461f5c77 100644 --- a/examples/waku-csharp/waku-csharp/Program.cs +++ b/examples/waku-csharp/waku-csharp/Program.cs @@ -45,7 +45,7 @@ Console.WriteLine(">>> Default pubsub topic: " + Waku.Utils.DefaultPubsubTopic() try { - node.Connect("/dns4/node-01.gc-us-central1-a.waku.test.statusim.net/tcp/30303/p2p/16Uiu2HAmDCp8XJ9z1ev18zuv8NHekAsjNyezAvmMfFEJkiharitG"); + node.Connect("/dns4/node-01.gc-us-central1-a.waku.test.status.im/tcp/30303/p2p/16Uiu2HAmDCp8XJ9z1ev18zuv8NHekAsjNyezAvmMfFEJkiharitG"); Console.WriteLine(">>> Connected to Peer"); foreach (Waku.Peer peer in node.Peers()) diff --git a/library/c/README.md b/library/c/README.md index e77c5c40..30c2e060 100644 --- a/library/c/README.md +++ b/library/c/README.md @@ -1340,8 +1340,8 @@ If the function is executed succesfully, `onOkCb` will receive an array objects { "peerID":"16Uiu2HAkykgaECHswi3YKJ5dMLbq2kPVCo89fcyTd38UcQD6ej5W", "multiaddrs":[ - "/dns4/node-01.do-ams3.waku.test.statusim.net/tcp/30303/p2p/16Uiu2HAkykgaECHswi3YKJ5dMLbq2kPVCo89fcyTd38UcQD6ej5W", - "/dns4/node-01.do-ams3.waku.test.statusim.net/tcp/8000/wss/p2p/16Uiu2HAkykgaECHswi3YKJ5dMLbq2kPVCo89fcyTd38UcQD6ej5W" + "/dns4/node-01.do-ams3.waku.test.status.im/tcp/30303/p2p/16Uiu2HAkykgaECHswi3YKJ5dMLbq2kPVCo89fcyTd38UcQD6ej5W", + "/dns4/node-01.do-ams3.waku.test.status.im/tcp/8000/wss/p2p/16Uiu2HAkykgaECHswi3YKJ5dMLbq2kPVCo89fcyTd38UcQD6ej5W" ], "enr":"enr:-QESuEC1p_s3xJzAC_XlOuuNrhVUETmfhbm1wxRGis0f7DlqGSw2FM-p2Ugl_r25UHQJ3f1rIRrpzxJXSMaJe4yk1XFSAYJpZIJ2NIJpcISygI2rim11bHRpYWRkcnO4XAArNiZub2RlLTAxLmRvLWFtczMud2FrdS50ZXN0LnN0YXR1c2ltLm5ldAZ2XwAtNiZub2RlLTAxLmRvLWFtczMud2FrdS50ZXN0LnN0YXR1c2ltLm5ldAYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQJATXRSRSUyTw_QLB6H_U3oziVQgNRgrXpK7wp2AMyNxYN0Y3CCdl-DdWRwgiMohXdha3UyDw" }, diff --git a/waku/v2/node/address_test.go b/waku/v2/node/address_test.go index 6f270e16..dd7e482e 100644 --- a/waku/v2/node/address_test.go +++ b/waku/v2/node/address_test.go @@ -17,10 +17,10 @@ func TestExternalAddressSelection(t *testing.T) { a5, _ := ma.NewMultiaddr("/dns4/www.status.im/tcp/443/wss/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // Valid a6, _ := ma.NewMultiaddr("/ip4/192.168.1.20/tcp/19710/wss/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // Invalid (local + wss) a7, _ := ma.NewMultiaddr("/ip4/192.168.1.20/tcp/19710/ws/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // Invalid (it's useless) - a8, _ := ma.NewMultiaddr("/dns4/node-02.gc-us-central1-a.status.prod.statusim.net/tcp/30303/p2p/16Uiu2HAmDQugwDHM3YeUp86iGjrUvbdw3JPRgikC7YoGBsT2ymMg/p2p-circuit/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // VALID - a9, _ := ma.NewMultiaddr("/dns4/node-02.gc-us-central1-a.status.prod.statusim.net/tcp/443/wss/p2p/16Uiu2HAmDQugwDHM3YeUp86iGjrUvbdw3JPRgikC7YoGBsT2ymMg/p2p-circuit/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // VALID - a10, _ := ma.NewMultiaddr("/dns4/node-01.gc-us-central1-a.waku.test.statusim.net/tcp/8000/wss/p2p/16Uiu2HAmDCp8XJ9z1ev18zuv8NHekAsjNyezAvmMfFEJkiharitG/p2p-circuit/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // VALID - a11, _ := ma.NewMultiaddr("/dns4/node-01.gc-us-central1-a.waku.test.statusim.net/tcp/30303/p2p/16Uiu2HAmDCp8XJ9z1ev18zuv8NHekAsjNyezAvmMfFEJkiharitG/p2p-circuit/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // VALID + a8, _ := ma.NewMultiaddr("/dns4/node-02.gc-us-central1-a.status.prod.status.im/tcp/30303/p2p/16Uiu2HAmDQugwDHM3YeUp86iGjrUvbdw3JPRgikC7YoGBsT2ymMg/p2p-circuit/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // VALID + a9, _ := ma.NewMultiaddr("/dns4/node-02.gc-us-central1-a.status.prod.status.im/tcp/443/wss/p2p/16Uiu2HAmDQugwDHM3YeUp86iGjrUvbdw3JPRgikC7YoGBsT2ymMg/p2p-circuit/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // VALID + a10, _ := ma.NewMultiaddr("/dns4/node-01.gc-us-central1-a.waku.test.status.im/tcp/8000/wss/p2p/16Uiu2HAmDCp8XJ9z1ev18zuv8NHekAsjNyezAvmMfFEJkiharitG/p2p-circuit/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // VALID + a11, _ := ma.NewMultiaddr("/dns4/node-01.gc-us-central1-a.waku.test.status.im/tcp/30303/p2p/16Uiu2HAmDCp8XJ9z1ev18zuv8NHekAsjNyezAvmMfFEJkiharitG/p2p-circuit/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // VALID a12, _ := ma.NewMultiaddr("/ip4/188.23.1.8/tcp/30303/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // VALID addrs := []ma.Multiaddr{a1, a2, a3, a4, a5, a6, a7} diff --git a/waku/v2/protocol/enr/enr_test.go b/waku/v2/protocol/enr/enr_test.go index 83ec0f99..5a3f712a 100644 --- a/waku/v2/protocol/enr/enr_test.go +++ b/waku/v2/protocol/enr/enr_test.go @@ -75,8 +75,8 @@ func TestMultiaddr(t *testing.T) { wakuFlag := NewWakuEnrBitfield(true, true, true, true) //wss, _ := ma.NewMultiaddr("/dns4/www.somedomainname.com/tcp/443/wss") - circuit1, _ := ma.NewMultiaddr("/dns4/node-02.gc-us-central1-a.status.prod.statusim.net/tcp/30303/p2p/16Uiu2HAmDQugwDHM3YeUp86iGjrUvbdw3JPRgikC7YoGBsT2ymMg/p2p-circuit") - circuit2, _ := ma.NewMultiaddr("/dns4/node-01.gc-us-central1-a.status.prod.statusim.net/tcp/30303/p2p/16Uiu2HAmDQugwDHM3YeUp86iGjrUvbdw3JPRgikC7YoGBsT2ymMg/p2p-circuit") + circuit1, _ := ma.NewMultiaddr("/dns4/node-02.gc-us-central1-a.status.prod.status.im/tcp/30303/p2p/16Uiu2HAmDQugwDHM3YeUp86iGjrUvbdw3JPRgikC7YoGBsT2ymMg/p2p-circuit") + circuit2, _ := ma.NewMultiaddr("/dns4/node-01.gc-us-central1-a.status.prod.status.im/tcp/30303/p2p/16Uiu2HAmDQugwDHM3YeUp86iGjrUvbdw3JPRgikC7YoGBsT2ymMg/p2p-circuit") multiaddrValues := []ma.Multiaddr{ //wss, From 04a9af931f26bca448c74ec21710f74fdbfd9034 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?rich=CE=9Brd?= Date: Wed, 31 Jul 2024 14:58:21 -0400 Subject: [PATCH 17/27] fix: handle scenario where the node's ENR has no shard (due to shard update) (#1176) --- waku/v2/node/address_test.go | 24 +++++++++++----------- waku/v2/node/localnode.go | 2 +- waku/v2/protocol/metadata/waku_metadata.go | 2 +- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/waku/v2/node/address_test.go b/waku/v2/node/address_test.go index dd7e482e..e2227a94 100644 --- a/waku/v2/node/address_test.go +++ b/waku/v2/node/address_test.go @@ -10,18 +10,18 @@ import ( ) func TestExternalAddressSelection(t *testing.T) { - a1, _ := ma.NewMultiaddr("/ip4/192.168.0.106/tcp/60000/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // Valid - a2, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/60000/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // Valid but should not be prefered - a3, _ := ma.NewMultiaddr("/ip4/192.168.1.20/tcp/19710/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // Valid - a4, _ := ma.NewMultiaddr("/dns4/www.status.im/tcp/2012/ws/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // Invalid (it's useless) - a5, _ := ma.NewMultiaddr("/dns4/www.status.im/tcp/443/wss/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // Valid - a6, _ := ma.NewMultiaddr("/ip4/192.168.1.20/tcp/19710/wss/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // Invalid (local + wss) - a7, _ := ma.NewMultiaddr("/ip4/192.168.1.20/tcp/19710/ws/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // Invalid (it's useless) - a8, _ := ma.NewMultiaddr("/dns4/node-02.gc-us-central1-a.status.prod.status.im/tcp/30303/p2p/16Uiu2HAmDQugwDHM3YeUp86iGjrUvbdw3JPRgikC7YoGBsT2ymMg/p2p-circuit/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // VALID - a9, _ := ma.NewMultiaddr("/dns4/node-02.gc-us-central1-a.status.prod.status.im/tcp/443/wss/p2p/16Uiu2HAmDQugwDHM3YeUp86iGjrUvbdw3JPRgikC7YoGBsT2ymMg/p2p-circuit/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // VALID - a10, _ := ma.NewMultiaddr("/dns4/node-01.gc-us-central1-a.waku.test.status.im/tcp/8000/wss/p2p/16Uiu2HAmDCp8XJ9z1ev18zuv8NHekAsjNyezAvmMfFEJkiharitG/p2p-circuit/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // VALID - a11, _ := ma.NewMultiaddr("/dns4/node-01.gc-us-central1-a.waku.test.status.im/tcp/30303/p2p/16Uiu2HAmDCp8XJ9z1ev18zuv8NHekAsjNyezAvmMfFEJkiharitG/p2p-circuit/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // VALID - a12, _ := ma.NewMultiaddr("/ip4/188.23.1.8/tcp/30303/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // VALID + a1, _ := ma.NewMultiaddr("/ip4/192.168.0.106/tcp/60000/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // Valid + a2, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/60000/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // Valid but should not be prefered + a3, _ := ma.NewMultiaddr("/ip4/192.168.1.20/tcp/19710/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // Valid + a4, _ := ma.NewMultiaddr("/dns4/www.status.im/tcp/2012/ws/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // Invalid (it's useless) + a5, _ := ma.NewMultiaddr("/dns4/www.status.im/tcp/443/wss/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // Valid + a6, _ := ma.NewMultiaddr("/ip4/192.168.1.20/tcp/19710/wss/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // Invalid (local + wss) + a7, _ := ma.NewMultiaddr("/ip4/192.168.1.20/tcp/19710/ws/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // Invalid (it's useless) + a8, _ := ma.NewMultiaddr("/dns4/store-01.gc-us-central1-a.status.prod.status.im/tcp/30303/p2p/16Uiu2HAmDQugwDHM3YeUp86iGjrUvbdw3JPRgikC7YoGBsT2ymMg/p2p-circuit/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // VALID + a9, _ := ma.NewMultiaddr("/dns4/store-01.gc-us-central1-a.status.prod.status.im/tcp/443/wss/p2p/16Uiu2HAmDQugwDHM3YeUp86iGjrUvbdw3JPRgikC7YoGBsT2ymMg/p2p-circuit/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // VALID + a10, _ := ma.NewMultiaddr("/dns4/node-01.gc-us-central1-a.waku.test.status.im/tcp/8000/wss/p2p/16Uiu2HAmDCp8XJ9z1ev18zuv8NHekAsjNyezAvmMfFEJkiharitG/p2p-circuit/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // VALID + a11, _ := ma.NewMultiaddr("/dns4/node-01.gc-us-central1-a.waku.test.status.im/tcp/30303/p2p/16Uiu2HAmDCp8XJ9z1ev18zuv8NHekAsjNyezAvmMfFEJkiharitG/p2p-circuit/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // VALID + a12, _ := ma.NewMultiaddr("/ip4/188.23.1.8/tcp/30303/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // VALID addrs := []ma.Multiaddr{a1, a2, a3, a4, a5, a6, a7} diff --git a/waku/v2/node/localnode.go b/waku/v2/node/localnode.go index 74ba35ce..9de6c59f 100644 --- a/waku/v2/node/localnode.go +++ b/waku/v2/node/localnode.go @@ -382,7 +382,7 @@ func (w *WakuNode) watchTopicShards(ctx context.Context) error { } if len(rs) == 1 { - w.log.Info("updating advertised relay shards in ENR") + w.log.Info("updating advertised relay shards in ENR", zap.Any("newShardInfo", rs[0])) if len(rs[0].ShardIDs) != len(topics) { w.log.Warn("A mix of named and static shards found. ENR shard will contain only the following shards", zap.Any("shards", rs[0])) } diff --git a/waku/v2/protocol/metadata/waku_metadata.go b/waku/v2/protocol/metadata/waku_metadata.go index dc7c44e5..93a70a2b 100644 --- a/waku/v2/protocol/metadata/waku_metadata.go +++ b/waku/v2/protocol/metadata/waku_metadata.go @@ -339,7 +339,7 @@ func (wakuM *WakuMetadata) DisconnectPeerOnShardMismatch(ctx context.Context, pe return err } - if !rs.ContainsAnyShard(rs.ClusterID, peerShards) { + if rs != nil && !rs.ContainsAnyShard(rs.ClusterID, peerShards) { wakuM.log.Info("shard mismatch", logging.HostID("peerID", peerID), zap.Uint16("clusterID", rs.ClusterID), zap.Uint16s("ourShardIDs", rs.ShardIDs), zap.Uint16s("theirShardIDs", peerShards)) wakuM.disconnect(peerID) return errors.New("shard mismatch") From 0fc5bcc9537ec9ab0485c1ca8f196d1311667931 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?rich=CE=9Brd?= Date: Thu, 1 Aug 2024 09:15:05 -0400 Subject: [PATCH 18/27] refactor: move rate limiter and priority queue from status-go to api package (#1171) --- go.mod | 2 +- waku/v2/api/{ => filter}/filter.go | 2 +- waku/v2/api/{ => filter}/filter_test.go | 2 +- waku/v2/api/publish/common.go | 9 ++ waku/v2/api/publish/message_queue.go | 156 ++++++++++++++++++++++ waku/v2/api/publish/message_queue_test.go | 91 +++++++++++++ waku/v2/api/publish/rate_limiting.go | 37 +++++ waku/v2/api/publish/rate_limiting_test.go | 36 +++++ 8 files changed, 332 insertions(+), 3 deletions(-) rename waku/v2/api/{ => filter}/filter.go (99%) rename waku/v2/api/{ => filter}/filter_test.go (99%) create mode 100644 waku/v2/api/publish/common.go create mode 100644 waku/v2/api/publish/message_queue.go create mode 100644 waku/v2/api/publish/message_queue_test.go create mode 100644 waku/v2/api/publish/rate_limiting.go create mode 100644 waku/v2/api/publish/rate_limiting_test.go diff --git a/go.mod b/go.mod index 7e948001..3a181efc 100644 --- a/go.mod +++ b/go.mod @@ -154,7 +154,7 @@ require ( github.com/multiformats/go-multibase v0.2.0 // indirect github.com/multiformats/go-multicodec v0.9.0 // indirect github.com/multiformats/go-multihash v0.2.3 // indirect - github.com/multiformats/go-multistream v0.5.0 + github.com/multiformats/go-multistream v0.5.0 // indirect github.com/multiformats/go-varint v0.0.7 // indirect github.com/opencontainers/runtime-spec v1.2.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 diff --git a/waku/v2/api/filter.go b/waku/v2/api/filter/filter.go similarity index 99% rename from waku/v2/api/filter.go rename to waku/v2/api/filter/filter.go index 1f9ea6be..6bd041e6 100644 --- a/waku/v2/api/filter.go +++ b/waku/v2/api/filter/filter.go @@ -1,4 +1,4 @@ -package api +package filter import ( "context" diff --git a/waku/v2/api/filter_test.go b/waku/v2/api/filter/filter_test.go similarity index 99% rename from waku/v2/api/filter_test.go rename to waku/v2/api/filter/filter_test.go index ff22fb75..af976a69 100644 --- a/waku/v2/api/filter_test.go +++ b/waku/v2/api/filter/filter_test.go @@ -1,4 +1,4 @@ -package api +package filter import ( "context" diff --git a/waku/v2/api/publish/common.go b/waku/v2/api/publish/common.go new file mode 100644 index 00000000..be72f4c1 --- /dev/null +++ b/waku/v2/api/publish/common.go @@ -0,0 +1,9 @@ +package publish + +import ( + "github.com/waku-org/go-waku/waku/v2/protocol" + "go.uber.org/zap" +) + +// PublishFn represents a function that will publish a message. +type PublishFn = func(envelope *protocol.Envelope, logger *zap.Logger) error diff --git a/waku/v2/api/publish/message_queue.go b/waku/v2/api/publish/message_queue.go new file mode 100644 index 00000000..fbd79df8 --- /dev/null +++ b/waku/v2/api/publish/message_queue.go @@ -0,0 +1,156 @@ +package publish + +import ( + "container/heap" + "context" + + "github.com/waku-org/go-waku/waku/v2/protocol" +) + +// MessagePriority determines the ordering for the message priority queue +type MessagePriority = int + +const ( + LowPriority MessagePriority = 1 + NormalPriority MessagePriority = 2 + HighPriority MessagePriority = 3 +) + +type envelopePriority struct { + envelope *protocol.Envelope + priority int + index int +} + +type envelopePriorityQueue []*envelopePriority + +func (pq envelopePriorityQueue) Len() int { return len(pq) } + +func (pq envelopePriorityQueue) Less(i, j int) bool { + if pq[i].priority > pq[j].priority { + return true + } else if pq[i].priority == pq[j].priority { + return pq[i].envelope.Message().GetTimestamp() < pq[j].envelope.Message().GetTimestamp() + } + + return false +} + +func (pq envelopePriorityQueue) Swap(i, j int) { + pq[i], pq[j] = pq[j], pq[i] + pq[i].index = i + pq[j].index = j +} + +func (pq *envelopePriorityQueue) Push(x any) { + n := len(*pq) + item := x.(*envelopePriority) + item.index = n + *pq = append(*pq, item) +} + +func (pq *envelopePriorityQueue) Pop() any { + old := *pq + n := len(old) + item := old[n-1] + old[n-1] = nil // avoid memory leak + item.index = -1 // for safety + *pq = old[0 : n-1] + return item +} + +// MessageQueue is a structure used to handle the ordering of the messages to publish +type MessageQueue struct { + usePriorityQueue bool + + toSendChan chan *protocol.Envelope + throttledPrioritySendQueue chan *envelopePriority + envelopeAvailableOnPriorityQueueSignal chan struct{} + envelopePriorityQueue envelopePriorityQueue +} + +// NewMessageQueue returns a new instance of MessageQueue. The MessageQueue can internally use a +// priority queue to handle the ordering of the messages, or use a simple FIFO queue. +func NewMessageQueue(bufferSize int, usePriorityQueue bool) *MessageQueue { + m := &MessageQueue{ + usePriorityQueue: usePriorityQueue, + } + + if m.usePriorityQueue { + m.envelopePriorityQueue = make(envelopePriorityQueue, 0) + m.throttledPrioritySendQueue = make(chan *envelopePriority, bufferSize) + m.envelopeAvailableOnPriorityQueueSignal = make(chan struct{}, bufferSize) + heap.Init(&m.envelopePriorityQueue) + } else { + m.toSendChan = make(chan *protocol.Envelope, bufferSize) + } + + return m +} + +// Start must be called to handle the lifetime of the internals of the message queue +func (m *MessageQueue) Start(ctx context.Context) { + + for { + select { + case envelopePriority, ok := <-m.throttledPrioritySendQueue: + if !ok { + continue + } + + heap.Push(&m.envelopePriorityQueue, envelopePriority) + + m.envelopeAvailableOnPriorityQueueSignal <- struct{}{} + + case <-ctx.Done(): + if m.usePriorityQueue { + close(m.throttledPrioritySendQueue) + close(m.envelopeAvailableOnPriorityQueueSignal) + } else { + close(m.toSendChan) + } + return + } + } +} + +// Push an envelope into the message queue. The priority is optional, and will be ignored +// if the message queue does not use a priority queue +func (m *MessageQueue) Push(envelope *protocol.Envelope, priority ...MessagePriority) { + if m.usePriorityQueue { + msgPriority := NormalPriority + if len(priority) != 0 { + msgPriority = priority[0] + } + + m.throttledPrioritySendQueue <- &envelopePriority{ + envelope: envelope, + priority: msgPriority, + } + } else { + m.toSendChan <- envelope + } +} + +// Pop will return a channel on which a message can be retrieved from the message queue +func (m *MessageQueue) Pop() <-chan *protocol.Envelope { + ch := make(chan *protocol.Envelope) + + go func() { + select { + case _, ok := <-m.envelopeAvailableOnPriorityQueueSignal: + if ok { + ch <- heap.Pop(&m.envelopePriorityQueue).(*envelopePriority).envelope + } + + case envelope, ok := <-m.toSendChan: + if ok { + ch <- envelope + } + } + + close(ch) + }() + + return ch +} diff --git a/waku/v2/api/publish/message_queue_test.go b/waku/v2/api/publish/message_queue_test.go new file mode 100644 index 00000000..15761c57 --- /dev/null +++ b/waku/v2/api/publish/message_queue_test.go @@ -0,0 +1,91 @@ +package publish + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/waku-org/go-waku/waku/v2/protocol" + "github.com/waku-org/go-waku/waku/v2/protocol/pb" + "google.golang.org/protobuf/proto" +) + +func TestFifoQueue(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + + queue := NewMessageQueue(10, false) + go queue.Start(ctx) + + queue.Push(protocol.NewEnvelope(&pb.WakuMessage{}, 0, "A")) + queue.Push(protocol.NewEnvelope(&pb.WakuMessage{}, 0, "B")) + queue.Push(protocol.NewEnvelope(&pb.WakuMessage{}, 0, "C")) + + envelope, ok := <-queue.Pop() + require.True(t, ok) + require.Equal(t, "A", envelope.PubsubTopic()) + + envelope, ok = <-queue.Pop() + require.True(t, ok) + require.Equal(t, "B", envelope.PubsubTopic()) + + envelope, ok = <-queue.Pop() + require.True(t, ok) + require.Equal(t, "C", envelope.PubsubTopic()) + + cancel() + + _, ok = <-queue.Pop() + require.False(t, ok) +} + +func TestPriorityQueue(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + + queue := NewMessageQueue(10, true) + go queue.Start(ctx) + + queue.Push(protocol.NewEnvelope(&pb.WakuMessage{Timestamp: proto.Int64(0)}, 0, "A"), LowPriority) + queue.Push(protocol.NewEnvelope(&pb.WakuMessage{Timestamp: proto.Int64(1)}, 0, "B"), LowPriority) + queue.Push(protocol.NewEnvelope(&pb.WakuMessage{Timestamp: proto.Int64(2)}, 0, "C"), HighPriority) + queue.Push(protocol.NewEnvelope(&pb.WakuMessage{Timestamp: proto.Int64(3)}, 0, "D"), NormalPriority) + queue.Push(protocol.NewEnvelope(&pb.WakuMessage{Timestamp: proto.Int64(4)}, 0, "E"), HighPriority) + queue.Push(protocol.NewEnvelope(&pb.WakuMessage{Timestamp: proto.Int64(5)}, 0, "F"), LowPriority) + queue.Push(protocol.NewEnvelope(&pb.WakuMessage{Timestamp: proto.Int64(6)}, 0, "G"), NormalPriority) + + time.Sleep(2 * time.Second) + + envelope, ok := <-queue.Pop() + require.True(t, ok) + require.Equal(t, "C", envelope.PubsubTopic()) + + envelope, ok = <-queue.Pop() + require.True(t, ok) + require.Equal(t, "E", envelope.PubsubTopic()) + + envelope, ok = <-queue.Pop() + require.True(t, ok) + require.Equal(t, "D", envelope.PubsubTopic()) + + envelope, ok = <-queue.Pop() + require.True(t, ok) + require.Equal(t, "G", envelope.PubsubTopic()) + + envelope, ok = <-queue.Pop() + require.True(t, ok) + require.Equal(t, "A", envelope.PubsubTopic()) + + envelope, ok = <-queue.Pop() + require.True(t, ok) + require.Equal(t, "B", envelope.PubsubTopic()) + + envelope, ok = <-queue.Pop() + require.True(t, ok) + require.Equal(t, "F", envelope.PubsubTopic()) + + cancel() + + _, ok = <-queue.Pop() + require.False(t, ok) + +} diff --git a/waku/v2/api/publish/rate_limiting.go b/waku/v2/api/publish/rate_limiting.go new file mode 100644 index 00000000..390bed95 --- /dev/null +++ b/waku/v2/api/publish/rate_limiting.go @@ -0,0 +1,37 @@ +package publish + +import ( + "context" + "errors" + + "github.com/waku-org/go-waku/waku/v2/protocol" + "go.uber.org/zap" + "golang.org/x/time/rate" +) + +// PublishRateLimiter is used to decorate publish functions to limit the +// number of messages per second that can be published +type PublishRateLimiter struct { + limiter *rate.Limiter +} + +// NewPublishRateLimiter will create a new instance of PublishRateLimiter. +// You can specify an rate.Inf value to in practice ignore the rate limiting +func NewPublishRateLimiter(r rate.Limit) *PublishRateLimiter { + return &PublishRateLimiter{ + limiter: rate.NewLimiter(r, 1), + } +} + +// ThrottlePublishFn is used to decorate a PublishFn so rate limiting is applied +func (p *PublishRateLimiter) ThrottlePublishFn(ctx context.Context, publishFn PublishFn) PublishFn { + return func(envelope *protocol.Envelope, logger *zap.Logger) error { + if err := p.limiter.Wait(ctx); err != nil { + if !errors.Is(err, context.Canceled) { + logger.Error("could not send message (limiter)", zap.Error(err)) + } + return err + } + return publishFn(envelope, logger) + } +} diff --git a/waku/v2/api/publish/rate_limiting_test.go b/waku/v2/api/publish/rate_limiting_test.go new file mode 100644 index 00000000..e516cbc9 --- /dev/null +++ b/waku/v2/api/publish/rate_limiting_test.go @@ -0,0 +1,36 @@ +package publish + +import ( + "context" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/waku-org/go-waku/waku/v2/protocol" + "github.com/waku-org/go-waku/waku/v2/utils" + "go.uber.org/zap" + "golang.org/x/time/rate" +) + +func TestRateLimit(t *testing.T) { + r := NewPublishRateLimiter(rate.Limit(1)) + l := utils.Logger() + + var counter atomic.Int32 + fn := r.ThrottlePublishFn(context.Background(), func(envelope *protocol.Envelope, logger *zap.Logger) error { + counter.Add(1) + return nil + }) + + go func() { + for i := 0; i <= 10; i++ { + err := fn(nil, l) + require.NoError(t, err) + } + }() + + <-time.After(2 * time.Second) + + require.LessOrEqual(t, counter.Load(), int32(3)) +} From d047df3859e23aa2d142c0e75b86c5ea5c43acfa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?rich=CE=9Brd?= Date: Thu, 1 Aug 2024 10:39:04 -0400 Subject: [PATCH 19/27] refactor: move missing messages logic from status-go to go-waku (#1174) --- logging/logging.go | 4 + waku/v2/api/missing/criteria_interest.go | 47 ++++ waku/v2/api/missing/missing_messages.go | 284 ++++++++++++++++++++++ waku/v2/api/missing/options.go | 39 +++ waku/v2/api/publish/rate_limiting.go | 4 +- waku/v2/api/publish/rate_limiting_test.go | 2 +- 6 files changed, 377 insertions(+), 3 deletions(-) create mode 100644 waku/v2/api/missing/criteria_interest.go create mode 100644 waku/v2/api/missing/missing_messages.go create mode 100644 waku/v2/api/missing/options.go diff --git a/logging/logging.go b/logging/logging.go index 19732d55..d577a1c5 100644 --- a/logging/logging.go +++ b/logging/logging.go @@ -74,6 +74,10 @@ func (t timestamp) String() string { return time.Unix(0, int64(t)).Format(time.RFC3339) } +func Epoch(key string, time time.Time) zap.Field { + return zap.String(key, fmt.Sprintf("%d", time.UnixNano())) +} + // History Query Filters type historyFilters []*pb.ContentFilter diff --git a/waku/v2/api/missing/criteria_interest.go b/waku/v2/api/missing/criteria_interest.go new file mode 100644 index 00000000..919b2fc9 --- /dev/null +++ b/waku/v2/api/missing/criteria_interest.go @@ -0,0 +1,47 @@ +package missing + +import ( + "context" + "slices" + "time" + + "github.com/libp2p/go-libp2p/core/peer" + "github.com/waku-org/go-waku/waku/v2/protocol" +) + +type criteriaInterest struct { + peerID peer.ID + contentFilter protocol.ContentFilter + lastChecked time.Time + + ctx context.Context + cancel context.CancelFunc +} + +func (c criteriaInterest) equals(other criteriaInterest) bool { + if c.peerID != other.peerID { + return false + } + + if c.contentFilter.PubsubTopic != other.contentFilter.PubsubTopic { + return false + } + + contentTopics := c.contentFilter.ContentTopics.ToList() + otherContentTopics := other.contentFilter.ContentTopics.ToList() + + slices.Sort(contentTopics) + slices.Sort(otherContentTopics) + + if len(contentTopics) != len(otherContentTopics) { + return false + } + + for i, contentTopic := range contentTopics { + if contentTopic != otherContentTopics[i] { + return false + } + } + + return true +} diff --git a/waku/v2/api/missing/missing_messages.go b/waku/v2/api/missing/missing_messages.go new file mode 100644 index 00000000..a50e6071 --- /dev/null +++ b/waku/v2/api/missing/missing_messages.go @@ -0,0 +1,284 @@ +package missing + +// test + +import ( + "context" + "encoding/hex" + "errors" + "sync" + "time" + + "github.com/libp2p/go-libp2p/core/peer" + "github.com/waku-org/go-waku/logging" + "github.com/waku-org/go-waku/waku/v2/protocol" + "github.com/waku-org/go-waku/waku/v2/protocol/pb" + "github.com/waku-org/go-waku/waku/v2/protocol/store" + "github.com/waku-org/go-waku/waku/v2/timesource" + "go.uber.org/zap" + "google.golang.org/protobuf/proto" +) + +const maxContentTopicsPerRequest = 10 + +// MessageTracker should keep track of messages it has seen before and +// provide a way to determine whether a message exists or not. This +// is application specific +type MessageTracker interface { + MessageExists(pb.MessageHash) (bool, error) +} + +// MissingMessageVerifier is used to periodically retrieve missing messages from store nodes that have some specific criteria +type MissingMessageVerifier struct { + ctx context.Context + params missingMessageVerifierParams + + messageTracker MessageTracker + + criteriaInterest map[string]criteriaInterest // Track message verification requests and when was the last time a pubsub topic was verified for missing messages + criteriaInterestMu sync.Mutex + + C <-chan *protocol.Envelope + + store *store.WakuStore + timesource timesource.Timesource + logger *zap.Logger +} + +// NewMissingMessageVerifier creates an instance of a MissingMessageVerifier +func NewMissingMessageVerifier(store *store.WakuStore, messageTracker MessageTracker, timesource timesource.Timesource, logger *zap.Logger, options ...MissingMessageVerifierOption) *MissingMessageVerifier { + options = append(defaultMissingMessagesVerifierOptions, options...) + params := missingMessageVerifierParams{} + for _, opt := range options { + opt(¶ms) + } + + return &MissingMessageVerifier{ + store: store, + timesource: timesource, + messageTracker: messageTracker, + logger: logger.Named("missing-msg-verifier"), + params: params, + } +} + +func (m *MissingMessageVerifier) SetCriteriaInterest(peerID peer.ID, contentFilter protocol.ContentFilter) { + m.criteriaInterestMu.Lock() + defer m.criteriaInterestMu.Unlock() + + ctx, cancel := context.WithCancel(m.ctx) + criteriaInterest := criteriaInterest{ + peerID: peerID, + contentFilter: contentFilter, + lastChecked: m.timesource.Now().Add(-m.params.delay), + ctx: ctx, + cancel: cancel, + } + + currMessageVerificationRequest, ok := m.criteriaInterest[contentFilter.PubsubTopic] + + if ok && currMessageVerificationRequest.equals(criteriaInterest) { + return + } + + if ok { + // If there is an ongoing request, we cancel it before replacing it + // by the new list. This can be probably optimized further by tracking + // the last time a content topic was synced, but might not be necessary + // since cancelling an ongoing request would mean cancelling just a single + // page of results + currMessageVerificationRequest.cancel() + } + + m.criteriaInterest[contentFilter.PubsubTopic] = criteriaInterest +} + +func (m *MissingMessageVerifier) Start(ctx context.Context) { + m.ctx = ctx + m.criteriaInterest = make(map[string]criteriaInterest) + + c := make(chan *protocol.Envelope, 1000) + m.C = c + + go func() { + t := time.NewTicker(m.params.interval) + defer t.Stop() + + var semaphore = make(chan struct{}, 5) + for { + select { + case <-t.C: + m.logger.Debug("checking for missing messages...") + m.criteriaInterestMu.Lock() + for _, interest := range m.criteriaInterest { + select { + case <-ctx.Done(): + return + default: + semaphore <- struct{}{} + go func(interest criteriaInterest) { + m.fetchHistory(c, interest) + <-semaphore + }(interest) + } + } + m.criteriaInterestMu.Unlock() + + case <-ctx.Done(): + return + } + } + }() +} + +func (m *MissingMessageVerifier) fetchHistory(c chan<- *protocol.Envelope, interest criteriaInterest) { + contentTopics := interest.contentFilter.ContentTopics.ToList() + for i := 0; i < len(contentTopics); i += maxContentTopicsPerRequest { + j := i + maxContentTopicsPerRequest + if j > len(contentTopics) { + j = len(contentTopics) + } + + now := m.timesource.Now() + err := m.fetchMessagesBatch(c, interest, i, j, now) + if err != nil { + if errors.Is(err, context.Canceled) { + return + } + + m.logger.Error("could not fetch history", + zap.Stringer("peerID", interest.peerID), + zap.String("pubsubTopic", interest.contentFilter.PubsubTopic), + zap.Strings("contentTopics", contentTopics)) + continue + } + + m.criteriaInterestMu.Lock() + c := m.criteriaInterest[interest.contentFilter.PubsubTopic] + if c.equals(interest) { + c.lastChecked = now + m.criteriaInterest[interest.contentFilter.PubsubTopic] = c + } + m.criteriaInterestMu.Unlock() + } +} + +func (m *MissingMessageVerifier) storeQueryWithRetry(ctx context.Context, queryFunc func(ctx context.Context) (*store.Result, error), logger *zap.Logger, logMsg string) (*store.Result, error) { + retry := true + count := 1 + for retry && count <= m.params.maxAttemptsToRetrieveHistory { + logger.Debug(logMsg, zap.Int("attempt", count)) + tCtx, cancel := context.WithTimeout(ctx, 20*time.Second) + result, err := queryFunc(tCtx) + cancel() + if err != nil { + logger.Error("could not query storenode", zap.Error(err), zap.Int("attempt", count)) + select { + case <-m.ctx.Done(): + return nil, m.ctx.Err() + case <-time.After(2 * time.Second): + } + } else { + return result, nil + } + } + + return nil, errors.New("storenode not available") +} + +func (m *MissingMessageVerifier) fetchMessagesBatch(c chan<- *protocol.Envelope, interest criteriaInterest, batchFrom int, batchTo int, now time.Time) error { + contentTopics := interest.contentFilter.ContentTopics.ToList() + + logger := m.logger.With( + zap.Stringer("peerID", interest.peerID), + zap.Strings("contentTopics", contentTopics[batchFrom:batchTo]), + zap.String("pubsubTopic", interest.contentFilter.PubsubTopic), + logging.Epoch("from", interest.lastChecked), + logging.Epoch("to", now), + ) + + result, err := m.storeQueryWithRetry(interest.ctx, func(ctx context.Context) (*store.Result, error) { + return m.store.Query(ctx, store.FilterCriteria{ + ContentFilter: protocol.NewContentFilter(interest.contentFilter.PubsubTopic, contentTopics[batchFrom:batchTo]...), + TimeStart: proto.Int64(interest.lastChecked.Add(-m.params.delay).UnixNano()), + TimeEnd: proto.Int64(now.Add(-m.params.delay).UnixNano()), + }, store.WithPeer(interest.peerID), store.WithPaging(false, 100), store.IncludeData(false)) + }, logger, "retrieving history to check for missing messages") + if err != nil { + if !errors.Is(err, context.Canceled) { + logger.Error("storenode not available", zap.Error(err)) + } + return err + } + + var missingHashes []pb.MessageHash + + for !result.IsComplete() { + for _, mkv := range result.Messages() { + hash := pb.ToMessageHash(mkv.MessageHash) + exists, err := m.messageTracker.MessageExists(hash) + if err != nil { + return err + } + + if exists { + continue + } + + missingHashes = append(missingHashes, hash) + } + + result, err = m.storeQueryWithRetry(interest.ctx, func(ctx context.Context) (*store.Result, error) { + if err = result.Next(ctx); err != nil { + return nil, err + } + return result, nil + }, logger.With(zap.String("cursor", hex.EncodeToString(result.Cursor()))), "retrieving next page") + if err != nil { + if !errors.Is(err, context.Canceled) { + logger.Error("storenode not available", zap.Error(err)) + } + return err + } + } + + if len(missingHashes) == 0 { + // Nothing to do here + return nil + } + + result, err = m.storeQueryWithRetry(interest.ctx, func(ctx context.Context) (*store.Result, error) { + return m.store.QueryByHash(ctx, missingHashes, store.WithPeer(interest.peerID), store.WithPaging(false, 100)) + }, logger, "retrieving missing messages") + if err != nil { + if !errors.Is(err, context.Canceled) { + logger.Error("storenode not available", zap.Error(err)) + } + return err + } + + for !result.IsComplete() { + for _, mkv := range result.Messages() { + select { + case c <- protocol.NewEnvelope(mkv.Message, mkv.Message.GetTimestamp(), mkv.GetPubsubTopic()): + default: + m.logger.Warn("subscriber is too slow!") + } + } + + result, err = m.storeQueryWithRetry(interest.ctx, func(ctx context.Context) (*store.Result, error) { + if err = result.Next(ctx); err != nil { + return nil, err + } + return result, nil + }, logger.With(zap.String("cursor", hex.EncodeToString(result.Cursor()))), "retrieving next page") + if err != nil { + if !errors.Is(err, context.Canceled) { + logger.Error("storenode not available", zap.Error(err)) + } + return err + } + } + + return nil +} diff --git a/waku/v2/api/missing/options.go b/waku/v2/api/missing/options.go new file mode 100644 index 00000000..b16abbc7 --- /dev/null +++ b/waku/v2/api/missing/options.go @@ -0,0 +1,39 @@ +package missing + +import "time" + +type missingMessageVerifierParams struct { + delay time.Duration + interval time.Duration + maxAttemptsToRetrieveHistory int +} + +// MissingMessageVerifierOption is an option that can be used to customize the MissingMessageVerifier behavior +type MissingMessageVerifierOption func(*missingMessageVerifierParams) + +// WithVerificationInterval is an option used to setup the verification interval +func WithVerificationInterval(t time.Duration) MissingMessageVerifierOption { + return func(params *missingMessageVerifierParams) { + params.interval = t + } +} + +// WithDelay is an option used to indicate the delay to apply for verifying messages +func WithDelay(t time.Duration) MissingMessageVerifierOption { + return func(params *missingMessageVerifierParams) { + params.delay = t + } +} + +// WithMaxAttempts indicates how many times will the message verifier retry a failed storenode request +func WithMaxRetryAttempts(max int) MissingMessageVerifierOption { + return func(params *missingMessageVerifierParams) { + params.maxAttemptsToRetrieveHistory = max + } +} + +var defaultMissingMessagesVerifierOptions = []MissingMessageVerifierOption{ + WithVerificationInterval(time.Minute), + WithDelay(20 * time.Second), + WithMaxRetryAttempts(3), +} diff --git a/waku/v2/api/publish/rate_limiting.go b/waku/v2/api/publish/rate_limiting.go index 390bed95..4322413b 100644 --- a/waku/v2/api/publish/rate_limiting.go +++ b/waku/v2/api/publish/rate_limiting.go @@ -17,9 +17,9 @@ type PublishRateLimiter struct { // NewPublishRateLimiter will create a new instance of PublishRateLimiter. // You can specify an rate.Inf value to in practice ignore the rate limiting -func NewPublishRateLimiter(r rate.Limit) *PublishRateLimiter { +func NewPublishRateLimiter(r rate.Limit, b int) *PublishRateLimiter { return &PublishRateLimiter{ - limiter: rate.NewLimiter(r, 1), + limiter: rate.NewLimiter(r, b), } } diff --git a/waku/v2/api/publish/rate_limiting_test.go b/waku/v2/api/publish/rate_limiting_test.go index e516cbc9..bde68bf3 100644 --- a/waku/v2/api/publish/rate_limiting_test.go +++ b/waku/v2/api/publish/rate_limiting_test.go @@ -14,7 +14,7 @@ import ( ) func TestRateLimit(t *testing.T) { - r := NewPublishRateLimiter(rate.Limit(1)) + r := NewPublishRateLimiter(rate.Limit(1), 1) l := utils.Logger() var counter atomic.Int32 From f3560ced3b6689d2ccba28ac8ddf6ccd4980a1c9 Mon Sep 17 00:00:00 2001 From: Prem Chaitanya Prathi Date: Tue, 6 Aug 2024 13:10:56 +0530 Subject: [PATCH 20/27] chore: move filter manager from status-go to go-waku (#1177) --- default.nix | 2 +- examples/basic-light-client/go.mod | 1 + examples/basic-light-client/go.sum | 2 + examples/basic-relay/go.mod | 1 + examples/basic-relay/go.sum | 2 + examples/chat2/go.mod | 1 + examples/chat2/go.sum | 2 + examples/filter2/go.mod | 1 + examples/filter2/go.sum | 2 + examples/noise/go.mod | 1 + examples/noise/go.sum | 2 + examples/rln/go.mod | 1 + examples/rln/go.sum | 2 + flake.nix | 2 +- go.mod | 1 + go.sum | 2 + tests/utils.go | 19 ++ waku/v2/api/filter/filter_manager.go | 248 ++++++++++++++++++ waku/v2/api/filter/filter_test.go | 95 +++++++ waku/v2/protocol/filter/filter_ping_test.go | 2 +- .../filter/filter_proto_ident_test.go | 8 +- .../protocol/filter/filter_subscribe_test.go | 18 +- waku/v2/protocol/filter/filter_test.go | 2 +- waku/v2/protocol/filter/test_utils.go | 14 +- 24 files changed, 407 insertions(+), 24 deletions(-) create mode 100644 waku/v2/api/filter/filter_manager.go diff --git a/default.nix b/default.nix index be1c1928..c2c37add 100644 --- a/default.nix +++ b/default.nix @@ -26,7 +26,7 @@ pkgs.buildGo121Module { '' else ""; # FIXME: This needs to be manually changed when updating modules. - vendorHash = "sha256-zwvZVTiwv7cc4vAM2Fil+qAG1v1J8q4BqX5lCgCStIc="; + vendorHash = "sha256-cOh9LNmcaBnBeMFM1HS2pdH5TTraHfo8PXL37t/A3gQ="; # Fix for 'nix run' trying to execute 'go-waku'. meta = { mainProgram = "waku"; }; diff --git a/examples/basic-light-client/go.mod b/examples/basic-light-client/go.mod index 87361937..a374667e 100644 --- a/examples/basic-light-client/go.mod +++ b/examples/basic-light-client/go.mod @@ -30,6 +30,7 @@ require ( github.com/btcsuite/btcd v0.20.1-beta // indirect github.com/btcsuite/btcd/btcec/v2 v2.2.1 // indirect github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d // indirect + github.com/cenkalti/backoff/v3 v3.2.2 // indirect github.com/cenkalti/backoff/v4 v4.1.2 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/containerd/cgroups v1.1.0 // indirect diff --git a/examples/basic-light-client/go.sum b/examples/basic-light-client/go.sum index 2f3f8015..1a8f34b5 100644 --- a/examples/basic-light-client/go.sum +++ b/examples/basic-light-client/go.sum @@ -94,6 +94,8 @@ github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtE github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= +github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M= +github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= diff --git a/examples/basic-relay/go.mod b/examples/basic-relay/go.mod index 9262ff91..b672c665 100644 --- a/examples/basic-relay/go.mod +++ b/examples/basic-relay/go.mod @@ -29,6 +29,7 @@ require ( github.com/btcsuite/btcd v0.20.1-beta // indirect github.com/btcsuite/btcd/btcec/v2 v2.2.1 // indirect github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d // indirect + github.com/cenkalti/backoff/v3 v3.2.2 // indirect github.com/cenkalti/backoff/v4 v4.1.2 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/containerd/cgroups v1.1.0 // indirect diff --git a/examples/basic-relay/go.sum b/examples/basic-relay/go.sum index 7feb9818..cf29a44f 100644 --- a/examples/basic-relay/go.sum +++ b/examples/basic-relay/go.sum @@ -94,6 +94,8 @@ github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtE github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= +github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M= +github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= diff --git a/examples/chat2/go.mod b/examples/chat2/go.mod index 2407fb63..d7588312 100644 --- a/examples/chat2/go.mod +++ b/examples/chat2/go.mod @@ -36,6 +36,7 @@ require ( github.com/btcsuite/btcd v0.20.1-beta // indirect github.com/btcsuite/btcd/btcec/v2 v2.2.1 // indirect github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d // indirect + github.com/cenkalti/backoff/v3 v3.2.2 // indirect github.com/cenkalti/backoff/v4 v4.1.2 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/containerd/cgroups v1.1.0 // indirect diff --git a/examples/chat2/go.sum b/examples/chat2/go.sum index 810f4931..0b06be1d 100644 --- a/examples/chat2/go.sum +++ b/examples/chat2/go.sum @@ -96,6 +96,8 @@ github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtE github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= +github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M= +github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= diff --git a/examples/filter2/go.mod b/examples/filter2/go.mod index 6dda8f4a..263a2e06 100644 --- a/examples/filter2/go.mod +++ b/examples/filter2/go.mod @@ -26,6 +26,7 @@ require ( github.com/btcsuite/btcd v0.20.1-beta // indirect github.com/btcsuite/btcd/btcec/v2 v2.2.1 // indirect github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d // indirect + github.com/cenkalti/backoff/v3 v3.2.2 // indirect github.com/cenkalti/backoff/v4 v4.1.2 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/containerd/cgroups v1.1.0 // indirect diff --git a/examples/filter2/go.sum b/examples/filter2/go.sum index a5203d1b..343d42b1 100644 --- a/examples/filter2/go.sum +++ b/examples/filter2/go.sum @@ -92,6 +92,8 @@ github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtE github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= +github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M= +github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= diff --git a/examples/noise/go.mod b/examples/noise/go.mod index 70bc5787..762ed67d 100644 --- a/examples/noise/go.mod +++ b/examples/noise/go.mod @@ -28,6 +28,7 @@ require ( github.com/btcsuite/btcd v0.20.1-beta // indirect github.com/btcsuite/btcd/btcec/v2 v2.2.1 // indirect github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d // indirect + github.com/cenkalti/backoff/v3 v3.2.2 // indirect github.com/cenkalti/backoff/v4 v4.1.2 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/containerd/cgroups v1.1.0 // indirect diff --git a/examples/noise/go.sum b/examples/noise/go.sum index 4cc14671..a2ad5e27 100644 --- a/examples/noise/go.sum +++ b/examples/noise/go.sum @@ -92,6 +92,8 @@ github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtE github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= +github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M= +github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= diff --git a/examples/rln/go.mod b/examples/rln/go.mod index 5c9995ef..2500810b 100644 --- a/examples/rln/go.mod +++ b/examples/rln/go.mod @@ -26,6 +26,7 @@ require ( github.com/btcsuite/btcd v0.20.1-beta // indirect github.com/btcsuite/btcd/btcec/v2 v2.2.1 // indirect github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d // indirect + github.com/cenkalti/backoff/v3 v3.2.2 // indirect github.com/cenkalti/backoff/v4 v4.1.2 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/containerd/cgroups v1.1.0 // indirect diff --git a/examples/rln/go.sum b/examples/rln/go.sum index a5203d1b..343d42b1 100644 --- a/examples/rln/go.sum +++ b/examples/rln/go.sum @@ -92,6 +92,8 @@ github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtE github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= +github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M= +github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= diff --git a/flake.nix b/flake.nix index e81473e2..37010eca 100644 --- a/flake.nix +++ b/flake.nix @@ -29,7 +29,7 @@ ]; doCheck = false; # FIXME: This needs to be manually changed when updating modules. - vendorHash = "sha256-zwvZVTiwv7cc4vAM2Fil+qAG1v1J8q4BqX5lCgCStIc="; + vendorHash = "sha256-cOh9LNmcaBnBeMFM1HS2pdH5TTraHfo8PXL37t/A3gQ="; # Fix for 'nix run' trying to execute 'go-waku'. meta = { mainProgram = "waku"; }; }; diff --git a/go.mod b/go.mod index 3a181efc..95089d26 100644 --- a/go.mod +++ b/go.mod @@ -35,6 +35,7 @@ require ( require ( github.com/avast/retry-go/v4 v4.5.1 + github.com/cenkalti/backoff/v3 v3.2.2 github.com/cenkalti/backoff/v4 v4.1.2 github.com/dustin/go-humanize v1.0.1 github.com/go-chi/chi/v5 v5.0.0 diff --git a/go.sum b/go.sum index 21d942b5..08b33d97 100644 --- a/go.sum +++ b/go.sum @@ -245,6 +245,8 @@ github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8n github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= +github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M= +github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= diff --git a/tests/utils.go b/tests/utils.go index d5a57912..82f086ae 100644 --- a/tests/utils.go +++ b/tests/utils.go @@ -21,6 +21,7 @@ import ( "time" "unicode/utf8" + "github.com/cenkalti/backoff/v3" "github.com/waku-org/go-waku/waku/v2/protocol" gcrypto "github.com/ethereum/go-ethereum/crypto" @@ -437,3 +438,21 @@ func WaitForTimeout(t *testing.T, ctx context.Context, timeout time.Duration, wg wg.Wait() } + +type BackOffOption func(*backoff.ExponentialBackOff) + +func RetryWithBackOff(o func() error, options ...BackOffOption) error { + b := backoff.ExponentialBackOff{ + InitialInterval: time.Millisecond * 100, + RandomizationFactor: 0.1, + Multiplier: 1, + MaxInterval: time.Second, + MaxElapsedTime: time.Second * 10, + Clock: backoff.SystemClock, + } + for _, option := range options { + option(&b) + } + b.Reset() + return backoff.Retry(o, &b) +} diff --git a/waku/v2/api/filter/filter_manager.go b/waku/v2/api/filter/filter_manager.go new file mode 100644 index 00000000..4dc92c3d --- /dev/null +++ b/waku/v2/api/filter/filter_manager.go @@ -0,0 +1,248 @@ +package filter + +import ( + "context" + "sync" + "time" + + "github.com/google/uuid" + + "go.uber.org/zap" + "golang.org/x/exp/maps" + + "github.com/waku-org/go-waku/waku/v2/onlinechecker" + "github.com/waku-org/go-waku/waku/v2/protocol" + "github.com/waku-org/go-waku/waku/v2/protocol/filter" +) + +// Methods on FilterManager just aggregate filters from application and subscribe to them +// +// startFilterSubLoop runs a loop where-in it waits for an interval to batch subscriptions +// +// runFilterSubscriptionLoop runs a loop for receiving messages from underlying subscriptions and invokes onNewEnvelopes +// +// filterConfigs is the map of filer IDs to filter configs +// filterSubscriptions is the map of filter subscription IDs to subscriptions + +const filterSubBatchSize = 90 + +type appFilterMap map[string]filterConfig + +type FilterManager struct { + sync.Mutex + ctx context.Context + minPeersPerFilter int + onlineChecker *onlinechecker.DefaultOnlineChecker + filterSubscriptions map[string]SubDetails // map of aggregated filters to apiSub details + logger *zap.Logger + node *filter.WakuFilterLightNode + filterSubBatchDuration time.Duration + incompleteFilterBatch map[string]filterConfig + filterConfigs appFilterMap // map of application filterID to {aggregatedFilterID, application ContentFilter} + waitingToSubQueue chan filterConfig + envProcessor EnevelopeProcessor +} + +type SubDetails struct { + cancel func() + sub *Sub +} + +type filterConfig struct { + ID string + contentFilter protocol.ContentFilter +} + +// EnevelopeProcessor is responsible for processing of received messages +// This is application specific +type EnevelopeProcessor interface { + OnNewEnvelope(env *protocol.Envelope) error +} + +func NewFilterManager(ctx context.Context, logger *zap.Logger, minPeersPerFilter int, envProcessor EnevelopeProcessor, node *filter.WakuFilterLightNode) *FilterManager { + // This fn is being mocked in test + mgr := new(FilterManager) + mgr.ctx = ctx + mgr.logger = logger + mgr.minPeersPerFilter = minPeersPerFilter + mgr.envProcessor = envProcessor + mgr.filterSubscriptions = make(map[string]SubDetails) + mgr.node = node + mgr.onlineChecker = onlinechecker.NewDefaultOnlineChecker(false).(*onlinechecker.DefaultOnlineChecker) + mgr.node.SetOnlineChecker(mgr.onlineChecker) + mgr.filterSubBatchDuration = 5 * time.Second + mgr.incompleteFilterBatch = make(map[string]filterConfig) + mgr.filterConfigs = make(appFilterMap) + mgr.waitingToSubQueue = make(chan filterConfig, 100) + go mgr.startFilterSubLoop() + return mgr +} + +func (mgr *FilterManager) startFilterSubLoop() { + ticker := time.NewTicker(mgr.filterSubBatchDuration) + defer ticker.Stop() + for { + select { + case <-mgr.ctx.Done(): + return + case <-ticker.C: + // TODO: Optimization, handle case where 1st addFilter happens just before ticker expires. + if mgr.onlineChecker.IsOnline() { + mgr.Lock() + for _, af := range mgr.incompleteFilterBatch { + mgr.logger.Debug("ticker hit, hence subscribing", zap.String("agg-filter-id", af.ID), zap.Int("batch-size", len(af.contentFilter.ContentTopics)), + zap.Stringer("agg-content-filter", af.contentFilter)) + go mgr.subscribeAndRunLoop(af) + } + mgr.incompleteFilterBatch = make(map[string]filterConfig) + mgr.Unlock() + } + subs := mgr.node.Subscriptions() + mgr.logger.Debug("filter stats", zap.Int("agg filters count", len(mgr.filterSubscriptions)), zap.Int("filter subs count", len(subs))) + } + } +} + +// addFilter method checks if there are existing waiting filters for the pubsubTopic to be subscribed and adds the new filter to the same batch +// once batchlimit is hit, all filters are subscribed to and new batch is created. +// if node is not online, then batch is pushed to a queue to be picked up later for subscription and new batch is created + +func (mgr *FilterManager) SubscribeFilter(filterID string, cf protocol.ContentFilter) { + mgr.logger.Debug("adding filter", zap.String("filter-id", filterID)) + + mgr.Lock() + defer mgr.Unlock() + + afilter, ok := mgr.incompleteFilterBatch[cf.PubsubTopic] + if !ok { + // no existing batch for pubsubTopic + mgr.logger.Debug("new pubsubTopic batch", zap.String("topic", cf.PubsubTopic)) + afilter = filterConfig{uuid.NewString(), cf} + mgr.incompleteFilterBatch[cf.PubsubTopic] = afilter + mgr.filterConfigs[filterID] = filterConfig{afilter.ID, cf} + } else { + mgr.logger.Debug("existing pubsubTopic batch", zap.String("agg-filter-id", afilter.ID), zap.String("topic", cf.PubsubTopic)) + if len(afilter.contentFilter.ContentTopics)+len(cf.ContentTopics) > filterSubBatchSize { + // filter batch limit is hit + if mgr.onlineChecker.IsOnline() { + // node is online, go ahead and subscribe the batch + mgr.logger.Debug("crossed pubsubTopic batchsize and online, subscribing to filters", zap.String("agg-filter-id", afilter.ID), zap.String("topic", cf.PubsubTopic), zap.Int("batch-size", len(afilter.contentFilter.ContentTopics)+len(cf.ContentTopics))) + go mgr.subscribeAndRunLoop(afilter) + } else { + mgr.logger.Debug("crossed pubsubTopic batchsize and offline, queuing filters", zap.String("agg-filter-id", afilter.ID), zap.String("topic", cf.PubsubTopic), zap.Int("batch-size", len(afilter.contentFilter.ContentTopics)+len(cf.ContentTopics))) + // queue existing batch as node is not online + mgr.waitingToSubQueue <- afilter + } + afilter = filterConfig{uuid.NewString(), cf} + mgr.logger.Debug("creating a new pubsubTopic batch", zap.String("agg-filter-id", afilter.ID), zap.String("topic", cf.PubsubTopic), zap.Stringer("content-filter", cf)) + mgr.incompleteFilterBatch[cf.PubsubTopic] = afilter + mgr.filterConfigs[filterID] = filterConfig{afilter.ID, cf} + } else { + // add to existing batch as batch limit not reached + for _, ct := range maps.Keys(cf.ContentTopics) { + afilter.contentFilter.ContentTopics[ct] = struct{}{} + } + mgr.logger.Debug("adding to existing pubsubTopic batch", zap.String("agg-filter-id", afilter.ID), zap.Stringer("content-filter", cf), zap.Int("batch-size", len(afilter.contentFilter.ContentTopics))) + mgr.filterConfigs[filterID] = filterConfig{afilter.ID, cf} + } + } +} + +func (mgr *FilterManager) subscribeAndRunLoop(f filterConfig) { + ctx, cancel := context.WithCancel(mgr.ctx) + config := FilterConfig{MaxPeers: mgr.minPeersPerFilter} + sub, err := Subscribe(ctx, mgr.node, f.contentFilter, config, mgr.logger) + mgr.Lock() + mgr.filterSubscriptions[f.ID] = SubDetails{cancel, sub} + mgr.Unlock() + if err == nil { + mgr.logger.Debug("subscription successful, running loop", zap.String("agg-filter-id", f.ID), zap.Stringer("content-filter", f.contentFilter)) + mgr.runFilterSubscriptionLoop(sub) + } else { + mgr.logger.Error("subscription fail, need to debug issue", zap.String("agg-filter-id", f.ID), zap.Stringer("content-filter", f.contentFilter), zap.Error(err)) + } +} + +// NetworkChange is to be invoked when there is a change in network detected by application +// This should retrigger a ping to verify if subscriptions are fine. +func (mgr *FilterManager) NetworkChange() { + mgr.node.PingPeers() // ping all peers to check if subscriptions are alive +} + +// OnConnectionStatusChange to be triggered when connection status change is detected either from offline to online or vice-versa +// Note that pubsubTopic specific change can be triggered by specifying pubsubTopic, +// if pubsubTopic is empty it indicates complete connection status change such as node went offline or came back online. +func (mgr *FilterManager) OnConnectionStatusChange(pubsubTopic string, newStatus bool) { + subs := mgr.node.Subscriptions() + mgr.logger.Debug("inside on connection status change", zap.Bool("new-status", newStatus), + zap.Int("agg filters count", len(mgr.filterSubscriptions)), zap.Int("filter subs count", len(subs))) + if newStatus && !mgr.onlineChecker.IsOnline() { // switched from offline to Online + mgr.NetworkChange() + mgr.logger.Debug("switching from offline to online") + mgr.Lock() + if len(mgr.waitingToSubQueue) > 0 { + for af := range mgr.waitingToSubQueue { + // TODO: change the below logic once topic specific health is implemented for lightClients + if pubsubTopic == "" || pubsubTopic == af.contentFilter.PubsubTopic { + // check if any filter subs are pending and subscribe them + mgr.logger.Debug("subscribing from filter queue", zap.String("filter-id", af.ID), zap.Stringer("content-filter", af.contentFilter)) + go mgr.subscribeAndRunLoop(af) + } else { + mgr.waitingToSubQueue <- af + } + if len(mgr.waitingToSubQueue) == 0 { + mgr.logger.Debug("no pending subscriptions") + break + } + } + } + mgr.Unlock() + } + + mgr.onlineChecker.SetOnline(newStatus) +} + +func (mgr *FilterManager) UnsubscribeFilter(filterID string) { + mgr.Lock() + defer mgr.Unlock() + mgr.logger.Debug("removing filter", zap.String("filter-id", filterID)) + filterConfig, ok := mgr.filterConfigs[filterID] + if !ok { + mgr.logger.Debug("filter removal: filter not found", zap.String("filter-id", filterID)) + return + } + af, ok := mgr.filterSubscriptions[filterConfig.ID] + if ok { + delete(mgr.filterConfigs, filterID) + for ct := range filterConfig.contentFilter.ContentTopics { + delete(af.sub.ContentFilter.ContentTopics, ct) + } + if len(af.sub.ContentFilter.ContentTopics) == 0 { + af.cancel() + } else { + go af.sub.Unsubscribe(filterConfig.contentFilter) + } + } else { + mgr.logger.Debug("filter removal: aggregated filter not found", zap.String("filter-id", filterID), zap.String("agg-filter-id", filterConfig.ID)) + } +} + +func (mgr *FilterManager) runFilterSubscriptionLoop(sub *Sub) { + for { + select { + case <-mgr.ctx.Done(): + mgr.logger.Debug("subscription loop ended", zap.Stringer("content-filter", sub.ContentFilter)) + return + case env, ok := <-sub.DataCh: + if ok { + err := mgr.envProcessor.OnNewEnvelope(env) + if err != nil { + mgr.logger.Error("invoking onNewEnvelopes error", zap.Error(err)) + } + } else { + mgr.logger.Debug("filter sub is closed", zap.Any("content-filter", sub.ContentFilter)) + return + } + } + } +} diff --git a/waku/v2/api/filter/filter_test.go b/waku/v2/api/filter/filter_test.go index af976a69..140dedc6 100644 --- a/waku/v2/api/filter/filter_test.go +++ b/waku/v2/api/filter/filter_test.go @@ -1,10 +1,15 @@ +//go:build !race + package filter import ( "context" + "crypto/rand" + "encoding/hex" "testing" "time" + "github.com/google/uuid" "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/suite" "github.com/waku-org/go-waku/waku/v2/protocol" @@ -19,6 +24,7 @@ func TestFilterApiSuite(t *testing.T) { type FilterApiTestSuite struct { filter.FilterTestSuite + msgRcvd chan bool } func (s *FilterApiTestSuite) SetupTest() { @@ -96,3 +102,92 @@ func (s *FilterApiTestSuite) TestSubscribe() { s.Log.Info("DataCh is closed") } + +func (s *FilterApiTestSuite) OnNewEnvelope(env *protocol.Envelope) error { + if env.Message().ContentTopic == s.ContentFilter.ContentTopicsList()[0] { + s.Log.Info("received message via filter") + s.msgRcvd <- true + } else { + s.Log.Info("received message via filter but doesn't match contentTopic") + } + return nil +} + +func (s *FilterApiTestSuite) TestFilterManager() { + ctx, cancel := context.WithCancel(context.Background()) + + testPubsubTopic := s.TestTopic + contentTopicBytes := make([]byte, 4) + _, err := rand.Read(contentTopicBytes) + + s.Require().NoError(err) + + s.ContentFilter = protocol.ContentFilter{ + PubsubTopic: testPubsubTopic, + ContentTopics: protocol.NewContentTopicSet("/test/filtermgr" + hex.EncodeToString(contentTopicBytes) + "/topic/proto"), + } + + s.msgRcvd = make(chan bool, 1) + + s.Log.Info("creating filterManager") + fm := NewFilterManager(ctx, s.Log, 2, s, s.LightNode) + fm.filterSubBatchDuration = 1 * time.Second + fm.onlineChecker.SetOnline(true) + fID := uuid.NewString() + fm.SubscribeFilter(fID, s.ContentFilter) + time.Sleep(2 * time.Second) + + // Ensure there is at least 1 active filter subscription + subscriptions := s.LightNode.Subscriptions() + s.Require().Greater(len(subscriptions), 0) + + s.Log.Info("publishing msg") + + s.PublishMsg(&filter.WakuMsg{ + Payload: "filtermgr testMsg", + ContentTopic: s.ContentFilter.ContentTopicsList()[0], + PubSubTopic: testPubsubTopic, + }) + t := time.NewTicker(2 * time.Second) + select { + case received := <-s.msgRcvd: + s.Require().True(received) + s.Log.Info("unsubscribe 1") + case <-t.C: + s.Log.Error("timed out waiting for message") + s.Fail("timed out waiting for message") + } + // Mock peers going down + s.LightNodeHost.Peerstore().RemovePeer(s.FullNodeHost.ID()) + + fm.OnConnectionStatusChange("", false) + time.Sleep(2 * time.Second) + fm.OnConnectionStatusChange("", true) + s.ConnectToFullNode(s.LightNode, s.FullNode) + time.Sleep(3 * time.Second) + + // Ensure there is at least 1 active filter subscription + subscriptions = s.LightNode.Subscriptions() + s.Require().Greater(len(subscriptions), 0) + s.Log.Info("publish message 2") + + // Ensure that messages are retrieved with a fresh sub + s.PublishMsg(&filter.WakuMsg{ + Payload: "filtermgr testMsg2", + ContentTopic: s.ContentFilter.ContentTopicsList()[0], + PubSubTopic: testPubsubTopic, + }) + t = time.NewTicker(2 * time.Second) + + select { + case received := <-s.msgRcvd: + s.Require().True(received) + s.Log.Info("received message 2") + case <-t.C: + s.Log.Error("timed out waiting for message 2") + s.Fail("timed out waiting for message 2") + } + + fm.UnsubscribeFilter(fID) + cancel() +} diff --git a/waku/v2/protocol/filter/filter_ping_test.go b/waku/v2/protocol/filter/filter_ping_test.go index cc6dfb2a..619b9e93 100644 --- a/waku/v2/protocol/filter/filter_ping_test.go +++ b/waku/v2/protocol/filter/filter_ping_test.go @@ -26,7 +26,7 @@ func (s *FilterTestSuite) TestUnSubscriptionPing() { err := s.LightNode.Ping(context.Background(), s.FullNodeHost.ID()) s.Require().NoError(err) - _, err = s.LightNode.Unsubscribe(s.ctx, s.contentFilter, WithPeer(s.FullNodeHost.ID())) + _, err = s.LightNode.Unsubscribe(s.ctx, s.ContentFilter, WithPeer(s.FullNodeHost.ID())) s.Require().NoError(err) err = s.LightNode.Ping(context.Background(), s.FullNodeHost.ID()) diff --git a/waku/v2/protocol/filter/filter_proto_ident_test.go b/waku/v2/protocol/filter/filter_proto_ident_test.go index 549071a1..6614bfdc 100644 --- a/waku/v2/protocol/filter/filter_proto_ident_test.go +++ b/waku/v2/protocol/filter/filter_proto_ident_test.go @@ -220,8 +220,8 @@ func (s *FilterTestSuite) TestIncorrectSubscribeIdentifier() { s.LightNodeHost.Peerstore().AddAddr(s.FullNodeHost.ID(), tests.GetHostAddress(s.FullNodeHost), peerstore.PermanentAddrTTL) // Subscribe with incorrect SubscribeID - s.contentFilter = protocol.ContentFilter{PubsubTopic: s.TestTopic, ContentTopics: protocol.NewContentTopicSet(s.TestContentTopic)} - _, err := s.LightNode.IncorrectSubscribe(s.ctx, s.contentFilter, WithPeer(s.FullNodeHost.ID())) + s.ContentFilter = protocol.ContentFilter{PubsubTopic: s.TestTopic, ContentTopics: protocol.NewContentTopicSet(s.TestContentTopic)} + _, err := s.LightNode.IncorrectSubscribe(s.ctx, s.ContentFilter, WithPeer(s.FullNodeHost.ID())) s.Require().Error(err) _, err = s.LightNode.UnsubscribeAll(s.ctx) @@ -266,8 +266,8 @@ func (s *FilterTestSuite) TestIncorrectPushIdentifier() { s.Require().NoError(err) // Subscribe - s.contentFilter = protocol.ContentFilter{PubsubTopic: s.TestTopic, ContentTopics: protocol.NewContentTopicSet(s.TestContentTopic)} - s.subDetails, err = s.LightNode.Subscribe(s.ctx, s.contentFilter, WithPeer(s.FullNodeHost.ID())) + s.ContentFilter = protocol.ContentFilter{PubsubTopic: s.TestTopic, ContentTopics: protocol.NewContentTopicSet(s.TestContentTopic)} + s.subDetails, err = s.LightNode.Subscribe(s.ctx, s.ContentFilter, WithPeer(s.FullNodeHost.ID())) s.Require().NoError(err) time.Sleep(1 * time.Second) diff --git a/waku/v2/protocol/filter/filter_subscribe_test.go b/waku/v2/protocol/filter/filter_subscribe_test.go index 112718ee..c8ec33c9 100644 --- a/waku/v2/protocol/filter/filter_subscribe_test.go +++ b/waku/v2/protocol/filter/filter_subscribe_test.go @@ -26,7 +26,7 @@ func (s *FilterTestSuite) TestWakuFilter() { // Wrong content topic s.waitForTimeout(&WakuMsg{s.TestTopic, "TopicB", "second"}) - _, err := s.LightNode.Unsubscribe(s.ctx, s.contentFilter, WithPeer(s.FullNodeHost.ID())) + _, err := s.LightNode.Unsubscribe(s.ctx, s.ContentFilter, WithPeer(s.FullNodeHost.ID())) s.Require().NoError(err) // Should not receive after unsubscribe @@ -180,8 +180,8 @@ func (s *FilterTestSuite) TestContentTopicsLimit() { s.ctx, s.ctxCancel = context.WithTimeout(context.Background(), 20*time.Second) // Test can't exceed 10 seconds // Detect existing content topics from previous test - if len(s.contentFilter.PubsubTopic) > 0 { - existingTopics := len(s.contentFilter.ContentTopicsList()) + if len(s.ContentFilter.PubsubTopic) > 0 { + existingTopics := len(s.ContentFilter.ContentTopicsList()) if existingTopics > 0 { maxContentTopics = maxContentTopics - existingTopics } @@ -233,13 +233,13 @@ func (s *FilterTestSuite) TestSubscribeErrorHandling() { }) // Subscribe with empty pubsub - s.contentFilter = protocol.ContentFilter{PubsubTopic: messages[0].PubSubTopic, ContentTopics: protocol.NewContentTopicSet(messages[0].ContentTopic)} - _, err := s.LightNode.Subscribe(s.ctx, s.contentFilter, WithPeer(s.FullNodeHost.ID())) + s.ContentFilter = protocol.ContentFilter{PubsubTopic: messages[0].PubSubTopic, ContentTopics: protocol.NewContentTopicSet(messages[0].ContentTopic)} + _, err := s.LightNode.Subscribe(s.ctx, s.ContentFilter, WithPeer(s.FullNodeHost.ID())) s.Require().Error(err) // Subscribe with empty content topic - s.contentFilter = protocol.ContentFilter{PubsubTopic: messages[1].PubSubTopic, ContentTopics: protocol.NewContentTopicSet(messages[1].ContentTopic)} - _, err = s.LightNode.Subscribe(s.ctx, s.contentFilter, WithPeer(s.FullNodeHost.ID())) + s.ContentFilter = protocol.ContentFilter{PubsubTopic: messages[1].PubSubTopic, ContentTopics: protocol.NewContentTopicSet(messages[1].ContentTopic)} + _, err = s.LightNode.Subscribe(s.ctx, s.ContentFilter, WithPeer(s.FullNodeHost.ID())) s.Require().Error(err) } @@ -271,8 +271,8 @@ func (s *FilterTestSuite) TestMultipleFullNodeSubscriptions() { s.Log.Info("Subscribing to second", zap.String("fullNode", string(fullNodeIDHex))) // Subscribe to the second full node - s.contentFilter = protocol.ContentFilter{PubsubTopic: s.TestTopic, ContentTopics: protocol.NewContentTopicSet(s.TestContentTopic)} - _, err = s.LightNode.Subscribe(s.ctx, s.contentFilter, WithPeer(s.FullNodeHost.ID())) + s.ContentFilter = protocol.ContentFilter{PubsubTopic: s.TestTopic, ContentTopics: protocol.NewContentTopicSet(s.TestContentTopic)} + _, err = s.LightNode.Subscribe(s.ctx, s.ContentFilter, WithPeer(s.FullNodeHost.ID())) s.Require().NoError(err) _, err = s.LightNode.UnsubscribeAll(s.ctx) diff --git a/waku/v2/protocol/filter/filter_test.go b/waku/v2/protocol/filter/filter_test.go index 5fa0c413..ad590901 100644 --- a/waku/v2/protocol/filter/filter_test.go +++ b/waku/v2/protocol/filter/filter_test.go @@ -117,7 +117,7 @@ func (s *FilterTestSuite) TestAutoShard() { // Wrong content topic s.waitForTimeout(&WakuMsg{s.TestTopic, "TopicB", "second"}) - _, err = s.LightNode.Unsubscribe(s.ctx, s.contentFilter, WithPeer(s.FullNodeHost.ID())) + _, err = s.LightNode.Unsubscribe(s.ctx, s.ContentFilter, WithPeer(s.FullNodeHost.ID())) s.Require().NoError(err) time.Sleep(1 * time.Second) diff --git a/waku/v2/protocol/filter/test_utils.go b/waku/v2/protocol/filter/test_utils.go index 9a2b651e..015cb352 100644 --- a/waku/v2/protocol/filter/test_utils.go +++ b/waku/v2/protocol/filter/test_utils.go @@ -47,7 +47,7 @@ type FilterTestSuite struct { ctx context.Context ctxCancel context.CancelFunc wg *sync.WaitGroup - contentFilter protocol.ContentFilter + ContentFilter protocol.ContentFilter subDetails []*subscription.SubscriptionDetails Log *zap.Logger @@ -63,7 +63,7 @@ type WakuMsg struct { } func (s *FilterTestSuite) SetupTest() { - log := utils.Logger() //.Named("filterv2-test") + log := utils.Logger() s.Log = log s.Log.Info("SetupTest()") @@ -192,7 +192,7 @@ func (s *FilterTestSuite) waitForMsgFromChan(msg *WakuMsg, ch chan *protocol.Env defer s.wg.Done() select { case env := <-ch: - for _, topic := range s.contentFilter.ContentTopicsList() { + for _, topic := range s.ContentFilter.ContentTopicsList() { if topic == env.Message().GetContentTopic() { msgFound = true } @@ -308,8 +308,8 @@ func (s *FilterTestSuite) subscribe(pubsubTopic string, contentTopic string, pee for _, sub := range s.subDetails { if sub.ContentFilter.PubsubTopic == pubsubTopic { sub.Add(contentTopic) - s.contentFilter = sub.ContentFilter - subDetails, err := s.LightNode.Subscribe(s.ctx, s.contentFilter, WithPeer(peer)) + s.ContentFilter = sub.ContentFilter + subDetails, err := s.LightNode.Subscribe(s.ctx, s.ContentFilter, WithPeer(peer)) s.subDetails = subDetails s.Require().NoError(err) return @@ -317,7 +317,7 @@ func (s *FilterTestSuite) subscribe(pubsubTopic string, contentTopic string, pee } s.subDetails = s.getSub(pubsubTopic, contentTopic, peer) - s.contentFilter = s.subDetails[0].ContentFilter + s.ContentFilter = s.subDetails[0].ContentFilter } func (s *FilterTestSuite) unsubscribe(pubsubTopic string, contentTopic string, peer peer.ID) []*subscription.SubscriptionDetails { @@ -331,7 +331,7 @@ func (s *FilterTestSuite) unsubscribe(pubsubTopic string, contentTopic string, p } else { sub.Remove(contentTopic) } - s.contentFilter = sub.ContentFilter + s.ContentFilter = sub.ContentFilter } } From 5aa11311f83305847cabd13927251fbcb6add56f Mon Sep 17 00:00:00 2001 From: Prem Chaitanya Prathi Date: Tue, 6 Aug 2024 17:51:11 +0530 Subject: [PATCH 21/27] fix: use corrected connected peer count and add check to avoid crash (#1182) --- .github/workflows/ci.yml | 2 +- waku/v2/peermanager/peer_manager.go | 33 +++++++++++++---------------- 2 files changed, 16 insertions(+), 19 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 690b3da9..aec81a70 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -163,7 +163,7 @@ jobs: - name: "Run storev3 tests" run: | docker compose -f .github/docker-compose/nwaku.yml up -d - NWAKU_HOST=$(docker-compose -f .github/docker-compose/nwaku.yml port nwaku 60000) + NWAKU_HOST=$(docker compose -f .github/docker-compose/nwaku.yml port nwaku 60000) NWAKU_PORT=$(echo $NWAKU_HOST | cut -d ":" -f 2) sleep 5 make test-storev3 TEST_STOREV3_NODE="/ip4/127.0.0.1/tcp/${NWAKU_PORT}/p2p/16Uiu2HAmMGhfSTUzKbsjMWxc6T1X4wiTWSF1bEWSLjAukCm7KiHV" diff --git a/waku/v2/peermanager/peer_manager.go b/waku/v2/peermanager/peer_manager.go index 1cfc5484..1441f7f4 100644 --- a/waku/v2/peermanager/peer_manager.go +++ b/waku/v2/peermanager/peer_manager.go @@ -309,19 +309,15 @@ func (pm *PeerManager) ensureMinRelayConnsPerTopic() { defer pm.topicMutex.RUnlock() for topicStr, topicInst := range pm.subRelayTopics { - // @cammellos reported that ListPeers returned an invalid number of - // peers. This will ensure that the peers returned by this function - // match those peers that are currently connected - meshPeerLen := pm.checkAndUpdateTopicHealth(topicInst) - topicPeers := pm.host.Peerstore().(wps.WakuPeerstore).PeersByPubSubTopic(topicStr) - curPeerLen := topicPeers.Len() - if meshPeerLen < waku_proto.GossipSubDMin || curPeerLen < pm.OutPeersTarget { + curConnectedPeerLen := pm.getPeersBasedOnconnectionStatus(topicStr, network.Connected).Len() + + if meshPeerLen < waku_proto.GossipSubDMin || curConnectedPeerLen < pm.OutPeersTarget { pm.logger.Debug("subscribed topic has not reached target peers, initiating more connections to maintain healthy mesh", - zap.String("pubSubTopic", topicStr), zap.Int("connectedPeerCount", curPeerLen), + zap.String("pubSubTopic", topicStr), zap.Int("connectedPeerCount", curConnectedPeerLen), zap.Int("targetPeers", pm.OutPeersTarget)) //Find not connected peers. - notConnectedPeers := pm.getNotConnectedPers(topicStr) + notConnectedPeers := pm.getPeersBasedOnconnectionStatus(topicStr, network.NotConnected) if notConnectedPeers.Len() == 0 { pm.logger.Debug("could not find any peers in peerstore to connect to, discovering more", zap.String("pubSubTopic", topicStr)) go pm.discoverPeersByPubsubTopics([]string{topicStr}, relay.WakuRelayID_v200, pm.ctx, 2) @@ -329,12 +325,13 @@ func (pm *PeerManager) ensureMinRelayConnsPerTopic() { } pm.logger.Debug("connecting to eligible peers in peerstore", zap.String("pubSubTopic", topicStr)) //Connect to eligible peers. - numPeersToConnect := pm.OutPeersTarget - curPeerLen - - if numPeersToConnect > notConnectedPeers.Len() { - numPeersToConnect = notConnectedPeers.Len() + numPeersToConnect := pm.OutPeersTarget - curConnectedPeerLen + if numPeersToConnect > 0 { + if numPeersToConnect > notConnectedPeers.Len() { + numPeersToConnect = notConnectedPeers.Len() + } + pm.connectToSpecifiedPeers(notConnectedPeers[0:numPeersToConnect]) } - pm.connectToSpecifiedPeers(notConnectedPeers[0:numPeersToConnect]) } } } @@ -374,8 +371,8 @@ func (pm *PeerManager) connectToSpecifiedPeers(peers peer.IDSlice) { } } -// getNotConnectedPers returns peers for a pubSubTopic that are not connected. -func (pm *PeerManager) getNotConnectedPers(pubsubTopic string) (notConnectedPeers peer.IDSlice) { +// getPeersBasedOnconnectionStatus returns peers for a pubSubTopic that are either connected/not-connected based on status passed. +func (pm *PeerManager) getPeersBasedOnconnectionStatus(pubsubTopic string, connected network.Connectedness) (filteredPeers peer.IDSlice) { var peerList peer.IDSlice if pubsubTopic == "" { peerList = pm.host.Peerstore().Peers() @@ -383,8 +380,8 @@ func (pm *PeerManager) getNotConnectedPers(pubsubTopic string) (notConnectedPeer peerList = pm.host.Peerstore().(*wps.WakuPeerstoreImpl).PeersByPubSubTopic(pubsubTopic) } for _, peerID := range peerList { - if pm.host.Network().Connectedness(peerID) != network.Connected { - notConnectedPeers = append(notConnectedPeers, peerID) + if pm.host.Network().Connectedness(peerID) == connected { + filteredPeers = append(filteredPeers, peerID) } } return From 240051b8b8aeb40d96a868a35634f6ede1aa8b59 Mon Sep 17 00:00:00 2001 From: kaichao Date: Tue, 6 Aug 2024 21:05:47 +0800 Subject: [PATCH 22/27] chore: move outgoing message check from status-go to go-waku (#1180) --- waku/v2/api/publish/message_check.go | 248 ++++++++++++++++++++++ waku/v2/api/publish/message_check_test.go | 33 +++ 2 files changed, 281 insertions(+) create mode 100644 waku/v2/api/publish/message_check.go create mode 100644 waku/v2/api/publish/message_check_test.go diff --git a/waku/v2/api/publish/message_check.go b/waku/v2/api/publish/message_check.go new file mode 100644 index 00000000..ef5148bd --- /dev/null +++ b/waku/v2/api/publish/message_check.go @@ -0,0 +1,248 @@ +package publish + +import ( + "bytes" + "context" + "sync" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/waku-org/go-waku/waku/v2/protocol" + "github.com/waku-org/go-waku/waku/v2/protocol/pb" + "github.com/waku-org/go-waku/waku/v2/protocol/store" + "github.com/waku-org/go-waku/waku/v2/timesource" + "go.uber.org/zap" +) + +const DefaultMaxHashQueryLength = 100 +const DefaultHashQueryInterval = 3 * time.Second +const DefaultMessageSentPeriod = 3 // in seconds +const DefaultMessageExpiredPerid = 10 // in seconds + +type MessageSentCheckOption func(*MessageSentCheck) error + +// MessageSentCheck tracks the outgoing messages and check against store node +// if the message sent time has passed the `messageSentPeriod`, the message id will be includes for the next query +// if the message keeps missing after `messageExpiredPerid`, the message id will be expired +type MessageSentCheck struct { + messageIDs map[string]map[common.Hash]uint32 + messageIDsMu sync.RWMutex + storePeerID peer.ID + MessageStoredChan chan common.Hash + MessageExpiredChan chan common.Hash + ctx context.Context + store *store.WakuStore + timesource timesource.Timesource + logger *zap.Logger + maxHashQueryLength uint64 + hashQueryInterval time.Duration + messageSentPeriod uint32 + messageExpiredPerid uint32 +} + +// NewMessageSentCheck creates a new instance of MessageSentCheck with default parameters +func NewMessageSentCheck(ctx context.Context, store *store.WakuStore, timesource timesource.Timesource, logger *zap.Logger) *MessageSentCheck { + return &MessageSentCheck{ + messageIDs: make(map[string]map[common.Hash]uint32), + messageIDsMu: sync.RWMutex{}, + MessageStoredChan: make(chan common.Hash, 1000), + MessageExpiredChan: make(chan common.Hash, 1000), + ctx: ctx, + store: store, + timesource: timesource, + logger: logger, + maxHashQueryLength: DefaultMaxHashQueryLength, + hashQueryInterval: DefaultHashQueryInterval, + messageSentPeriod: DefaultMessageSentPeriod, + messageExpiredPerid: DefaultMessageExpiredPerid, + } +} + +// WithMaxHashQueryLength sets the maximum number of message hashes to query in one request +func WithMaxHashQueryLength(count uint64) MessageSentCheckOption { + return func(params *MessageSentCheck) error { + params.maxHashQueryLength = count + return nil + } +} + +// WithHashQueryInterval sets the interval to query the store node +func WithHashQueryInterval(interval time.Duration) MessageSentCheckOption { + return func(params *MessageSentCheck) error { + params.hashQueryInterval = interval + return nil + } +} + +// WithMessageSentPeriod sets the delay period to query the store node after message is published +func WithMessageSentPeriod(period uint32) MessageSentCheckOption { + return func(params *MessageSentCheck) error { + params.messageSentPeriod = period + return nil + } +} + +// WithMessageExpiredPerid sets the period that a message is considered expired +func WithMessageExpiredPerid(period uint32) MessageSentCheckOption { + return func(params *MessageSentCheck) error { + params.messageExpiredPerid = period + return nil + } +} + +// Add adds a message for message sent check +func (m *MessageSentCheck) Add(topic string, messageID common.Hash, sentTime uint32) { + m.messageIDsMu.Lock() + defer m.messageIDsMu.Unlock() + + if _, ok := m.messageIDs[topic]; !ok { + m.messageIDs[topic] = make(map[common.Hash]uint32) + } + m.messageIDs[topic][messageID] = sentTime +} + +// DeleteByMessageIDs deletes the message ids from the message sent check, used by scenarios like message acked with MVDS +func (m *MessageSentCheck) DeleteByMessageIDs(messageIDs []common.Hash) { + m.messageIDsMu.Lock() + defer m.messageIDsMu.Unlock() + + for pubsubTopic, subMsgs := range m.messageIDs { + for _, hash := range messageIDs { + delete(subMsgs, hash) + if len(subMsgs) == 0 { + delete(m.messageIDs, pubsubTopic) + } else { + m.messageIDs[pubsubTopic] = subMsgs + } + } + } +} + +// SetStorePeerID sets the peer id of store node +func (m *MessageSentCheck) SetStorePeerID(peerID peer.ID) { + m.storePeerID = peerID +} + +// CheckIfMessagesStored checks if the tracked outgoing messages are stored periodically +func (m *MessageSentCheck) CheckIfMessagesStored() { + ticker := time.NewTicker(m.hashQueryInterval) + defer ticker.Stop() + for { + select { + case <-m.ctx.Done(): + m.logger.Debug("stop the look for message stored check") + return + case <-ticker.C: + m.messageIDsMu.Lock() + m.logger.Debug("running loop for messages stored check", zap.Any("messageIds", m.messageIDs)) + pubsubTopics := make([]string, 0, len(m.messageIDs)) + pubsubMessageIds := make([][]common.Hash, 0, len(m.messageIDs)) + pubsubMessageTime := make([][]uint32, 0, len(m.messageIDs)) + for pubsubTopic, subMsgs := range m.messageIDs { + var queryMsgIds []common.Hash + var queryMsgTime []uint32 + for msgID, sendTime := range subMsgs { + if uint64(len(queryMsgIds)) >= m.maxHashQueryLength { + break + } + // message is sent 5 seconds ago, check if it's stored + if uint32(m.timesource.Now().Unix()) > sendTime+m.messageSentPeriod { + queryMsgIds = append(queryMsgIds, msgID) + queryMsgTime = append(queryMsgTime, sendTime) + } + } + m.logger.Debug("store query for message hashes", zap.Any("queryMsgIds", queryMsgIds), zap.String("pubsubTopic", pubsubTopic)) + if len(queryMsgIds) > 0 { + pubsubTopics = append(pubsubTopics, pubsubTopic) + pubsubMessageIds = append(pubsubMessageIds, queryMsgIds) + pubsubMessageTime = append(pubsubMessageTime, queryMsgTime) + } + } + m.messageIDsMu.Unlock() + + pubsubProcessedMessages := make([][]common.Hash, len(pubsubTopics)) + for i, pubsubTopic := range pubsubTopics { + processedMessages := m.messageHashBasedQuery(m.ctx, pubsubMessageIds[i], pubsubMessageTime[i], pubsubTopic) + pubsubProcessedMessages[i] = processedMessages + } + + m.messageIDsMu.Lock() + for i, pubsubTopic := range pubsubTopics { + subMsgs, ok := m.messageIDs[pubsubTopic] + if !ok { + continue + } + for _, hash := range pubsubProcessedMessages[i] { + delete(subMsgs, hash) + if len(subMsgs) == 0 { + delete(m.messageIDs, pubsubTopic) + } else { + m.messageIDs[pubsubTopic] = subMsgs + } + } + } + m.logger.Debug("messages for next store hash query", zap.Any("messageIds", m.messageIDs)) + m.messageIDsMu.Unlock() + + } + } +} + +func (m *MessageSentCheck) messageHashBasedQuery(ctx context.Context, hashes []common.Hash, relayTime []uint32, pubsubTopic string) []common.Hash { + selectedPeer := m.storePeerID + if selectedPeer == "" { + m.logger.Error("no store peer id available", zap.String("pubsubTopic", pubsubTopic)) + return []common.Hash{} + } + + var opts []store.RequestOption + requestID := protocol.GenerateRequestID() + opts = append(opts, store.WithRequestID(requestID)) + opts = append(opts, store.WithPeer(selectedPeer)) + opts = append(opts, store.WithPaging(false, m.maxHashQueryLength)) + opts = append(opts, store.IncludeData(false)) + + messageHashes := make([]pb.MessageHash, len(hashes)) + for i, hash := range hashes { + messageHashes[i] = pb.ToMessageHash(hash.Bytes()) + } + + m.logger.Debug("store.queryByHash request", zap.String("requestID", hexutil.Encode(requestID)), zap.Stringer("peerID", selectedPeer), zap.Any("messageHashes", messageHashes)) + + result, err := m.store.QueryByHash(ctx, messageHashes, opts...) + if err != nil { + m.logger.Error("store.queryByHash failed", zap.String("requestID", hexutil.Encode(requestID)), zap.Stringer("peerID", selectedPeer), zap.Error(err)) + return []common.Hash{} + } + + m.logger.Debug("store.queryByHash result", zap.String("requestID", hexutil.Encode(requestID)), zap.Int("messages", len(result.Messages()))) + + var ackHashes []common.Hash + var missedHashes []common.Hash + for i, hash := range hashes { + found := false + for _, msg := range result.Messages() { + if bytes.Equal(msg.GetMessageHash(), hash.Bytes()) { + found = true + break + } + } + + if found { + ackHashes = append(ackHashes, hash) + m.MessageStoredChan <- hash + } + + if !found && uint32(m.timesource.Now().Unix()) > relayTime[i]+m.messageExpiredPerid { + missedHashes = append(missedHashes, hash) + m.MessageExpiredChan <- hash + } + } + + m.logger.Debug("ack message hashes", zap.Any("ackHashes", ackHashes)) + m.logger.Debug("missed message hashes", zap.Any("missedHashes", missedHashes)) + + return append(ackHashes, missedHashes...) +} diff --git a/waku/v2/api/publish/message_check_test.go b/waku/v2/api/publish/message_check_test.go new file mode 100644 index 00000000..12947258 --- /dev/null +++ b/waku/v2/api/publish/message_check_test.go @@ -0,0 +1,33 @@ +package publish + +import ( + "context" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestAddAndDelete(t *testing.T) { + ctx := context.TODO() + messageSentCheck := NewMessageSentCheck(ctx, nil, nil, nil) + + messageSentCheck.Add("topic", [32]byte{1}, 1) + messageSentCheck.Add("topic", [32]byte{2}, 2) + messageSentCheck.Add("topic", [32]byte{3}, 3) + messageSentCheck.Add("another-topic", [32]byte{4}, 4) + + require.Equal(t, uint32(1), messageSentCheck.messageIDs["topic"][[32]byte{1}]) + require.Equal(t, uint32(2), messageSentCheck.messageIDs["topic"][[32]byte{2}]) + require.Equal(t, uint32(3), messageSentCheck.messageIDs["topic"][[32]byte{3}]) + require.Equal(t, uint32(4), messageSentCheck.messageIDs["another-topic"][[32]byte{4}]) + + messageSentCheck.DeleteByMessageIDs([]common.Hash{[32]byte{1}, [32]byte{2}}) + require.NotNil(t, messageSentCheck.messageIDs["topic"]) + require.Equal(t, uint32(3), messageSentCheck.messageIDs["topic"][[32]byte{3}]) + + messageSentCheck.DeleteByMessageIDs([]common.Hash{[32]byte{3}}) + require.Nil(t, messageSentCheck.messageIDs["topic"]) + + require.Equal(t, uint32(4), messageSentCheck.messageIDs["another-topic"][[32]byte{4}]) +} From 4f1d692413e9bc0bff52d9b9827db984e02accd7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?rich=CE=9Brd?= Date: Tue, 6 Aug 2024 16:06:53 -0400 Subject: [PATCH 23/27] fix: keep channels open (#1183) --- waku/v2/api/publish/message_queue.go | 34 +++++++++---- waku/v2/api/publish/message_queue_test.go | 62 +++++++++++++++-------- 2 files changed, 63 insertions(+), 33 deletions(-) diff --git a/waku/v2/api/publish/message_queue.go b/waku/v2/api/publish/message_queue.go index fbd79df8..ad153ff9 100644 --- a/waku/v2/api/publish/message_queue.go +++ b/waku/v2/api/publish/message_queue.go @@ -103,12 +103,6 @@ func (m *MessageQueue) Start(ctx context.Context) { m.envelopeAvailableOnPriorityQueueSignal <- struct{}{} case <-ctx.Done(): - if m.usePriorityQueue { - close(m.throttledPrioritySendQueue) - close(m.envelopeAvailableOnPriorityQueueSignal) - } else { - close(m.toSendChan) - } return } } @@ -116,27 +110,43 @@ func (m *MessageQueue) Start(ctx context.Context) { // Push an envelope into the message queue. The priority is optional, and will be ignored // if the message queue does not use a priority queue -func (m *MessageQueue) Push(envelope *protocol.Envelope, priority ...MessagePriority) { +func (m *MessageQueue) Push(ctx context.Context, envelope *protocol.Envelope, priority ...MessagePriority) error { if m.usePriorityQueue { msgPriority := NormalPriority if len(priority) != 0 { msgPriority = priority[0] } - m.throttledPrioritySendQueue <- &envelopePriority{ + pEnvelope := &envelopePriority{ envelope: envelope, priority: msgPriority, } + + select { + case m.throttledPrioritySendQueue <- pEnvelope: + // Do nothing + case <-ctx.Done(): + return ctx.Err() + } } else { - m.toSendChan <- envelope + select { + case m.toSendChan <- envelope: + // Do nothing + case <-ctx.Done(): + return ctx.Err() + } } + + return nil } // Pop will return a channel on which a message can be retrieved from the message queue -func (m *MessageQueue) Pop() <-chan *protocol.Envelope { +func (m *MessageQueue) Pop(ctx context.Context) <-chan *protocol.Envelope { ch := make(chan *protocol.Envelope) go func() { + defer close(ch) + select { case _, ok := <-m.envelopeAvailableOnPriorityQueueSignal: if ok { @@ -147,9 +157,11 @@ func (m *MessageQueue) Pop() <-chan *protocol.Envelope { if ok { ch <- envelope } + + case <-ctx.Done(): + return } - close(ch) }() return ch diff --git a/waku/v2/api/publish/message_queue_test.go b/waku/v2/api/publish/message_queue_test.go index 15761c57..e7b8e21a 100644 --- a/waku/v2/api/publish/message_queue_test.go +++ b/waku/v2/api/publish/message_queue_test.go @@ -17,25 +17,30 @@ func TestFifoQueue(t *testing.T) { queue := NewMessageQueue(10, false) go queue.Start(ctx) - queue.Push(protocol.NewEnvelope(&pb.WakuMessage{}, 0, "A")) - queue.Push(protocol.NewEnvelope(&pb.WakuMessage{}, 0, "B")) - queue.Push(protocol.NewEnvelope(&pb.WakuMessage{}, 0, "C")) + err := queue.Push(ctx, protocol.NewEnvelope(&pb.WakuMessage{}, 0, "A")) + require.NoError(t, err) - envelope, ok := <-queue.Pop() + err = queue.Push(ctx, protocol.NewEnvelope(&pb.WakuMessage{}, 0, "B")) + require.NoError(t, err) + + err = queue.Push(ctx, protocol.NewEnvelope(&pb.WakuMessage{}, 0, "C")) + require.NoError(t, err) + + envelope, ok := <-queue.Pop(ctx) require.True(t, ok) require.Equal(t, "A", envelope.PubsubTopic()) - envelope, ok = <-queue.Pop() + envelope, ok = <-queue.Pop(ctx) require.True(t, ok) require.Equal(t, "B", envelope.PubsubTopic()) - envelope, ok = <-queue.Pop() + envelope, ok = <-queue.Pop(ctx) require.True(t, ok) require.Equal(t, "C", envelope.PubsubTopic()) cancel() - _, ok = <-queue.Pop() + _, ok = <-queue.Pop(ctx) require.False(t, ok) } @@ -45,47 +50,60 @@ func TestPriorityQueue(t *testing.T) { queue := NewMessageQueue(10, true) go queue.Start(ctx) - queue.Push(protocol.NewEnvelope(&pb.WakuMessage{Timestamp: proto.Int64(0)}, 0, "A"), LowPriority) - queue.Push(protocol.NewEnvelope(&pb.WakuMessage{Timestamp: proto.Int64(1)}, 0, "B"), LowPriority) - queue.Push(protocol.NewEnvelope(&pb.WakuMessage{Timestamp: proto.Int64(2)}, 0, "C"), HighPriority) - queue.Push(protocol.NewEnvelope(&pb.WakuMessage{Timestamp: proto.Int64(3)}, 0, "D"), NormalPriority) - queue.Push(protocol.NewEnvelope(&pb.WakuMessage{Timestamp: proto.Int64(4)}, 0, "E"), HighPriority) - queue.Push(protocol.NewEnvelope(&pb.WakuMessage{Timestamp: proto.Int64(5)}, 0, "F"), LowPriority) - queue.Push(protocol.NewEnvelope(&pb.WakuMessage{Timestamp: proto.Int64(6)}, 0, "G"), NormalPriority) + err := queue.Push(ctx, protocol.NewEnvelope(&pb.WakuMessage{Timestamp: proto.Int64(0)}, 0, "A"), LowPriority) + require.NoError(t, err) + + err = queue.Push(ctx, protocol.NewEnvelope(&pb.WakuMessage{Timestamp: proto.Int64(1)}, 0, "B"), LowPriority) + require.NoError(t, err) + + err = queue.Push(ctx, protocol.NewEnvelope(&pb.WakuMessage{Timestamp: proto.Int64(2)}, 0, "C"), HighPriority) + require.NoError(t, err) + + err = queue.Push(ctx, protocol.NewEnvelope(&pb.WakuMessage{Timestamp: proto.Int64(3)}, 0, "D"), NormalPriority) + require.NoError(t, err) + + err = queue.Push(ctx, protocol.NewEnvelope(&pb.WakuMessage{Timestamp: proto.Int64(4)}, 0, "E"), HighPriority) + require.NoError(t, err) + + err = queue.Push(ctx, protocol.NewEnvelope(&pb.WakuMessage{Timestamp: proto.Int64(5)}, 0, "F"), LowPriority) + require.NoError(t, err) + + err = queue.Push(ctx, protocol.NewEnvelope(&pb.WakuMessage{Timestamp: proto.Int64(6)}, 0, "G"), NormalPriority) + require.NoError(t, err) time.Sleep(2 * time.Second) - envelope, ok := <-queue.Pop() + envelope, ok := <-queue.Pop(ctx) require.True(t, ok) require.Equal(t, "C", envelope.PubsubTopic()) - envelope, ok = <-queue.Pop() + envelope, ok = <-queue.Pop(ctx) require.True(t, ok) require.Equal(t, "E", envelope.PubsubTopic()) - envelope, ok = <-queue.Pop() + envelope, ok = <-queue.Pop(ctx) require.True(t, ok) require.Equal(t, "D", envelope.PubsubTopic()) - envelope, ok = <-queue.Pop() + envelope, ok = <-queue.Pop(ctx) require.True(t, ok) require.Equal(t, "G", envelope.PubsubTopic()) - envelope, ok = <-queue.Pop() + envelope, ok = <-queue.Pop(ctx) require.True(t, ok) require.Equal(t, "A", envelope.PubsubTopic()) - envelope, ok = <-queue.Pop() + envelope, ok = <-queue.Pop(ctx) require.True(t, ok) require.Equal(t, "B", envelope.PubsubTopic()) - envelope, ok = <-queue.Pop() + envelope, ok = <-queue.Pop(ctx) require.True(t, ok) require.Equal(t, "F", envelope.PubsubTopic()) cancel() - _, ok = <-queue.Pop() + _, ok = <-queue.Pop(ctx) require.False(t, ok) } From c2e6320953b195b1757b0e8ae3b43f945388be98 Mon Sep 17 00:00:00 2001 From: kaichao Date: Wed, 7 Aug 2024 10:24:08 +0800 Subject: [PATCH 24/27] chore: refactor message sent check method (#1184) --- waku/v2/api/publish/message_check.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/waku/v2/api/publish/message_check.go b/waku/v2/api/publish/message_check.go index ef5148bd..a7b16a57 100644 --- a/waku/v2/api/publish/message_check.go +++ b/waku/v2/api/publish/message_check.go @@ -125,8 +125,8 @@ func (m *MessageSentCheck) SetStorePeerID(peerID peer.ID) { m.storePeerID = peerID } -// CheckIfMessagesStored checks if the tracked outgoing messages are stored periodically -func (m *MessageSentCheck) CheckIfMessagesStored() { +// Start checks if the tracked outgoing messages are stored periodically +func (m *MessageSentCheck) Start() { ticker := time.NewTicker(m.hashQueryInterval) defer ticker.Stop() for { From 3eab289abb4cbe712facb3644a6f47427a47afe1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?rich=CE=9Brd?= Date: Fri, 9 Aug 2024 11:51:14 -0400 Subject: [PATCH 25/27] feat: ping lightpush peers (#1167) Co-authored-by: Prem Chaitanya Prathi --- waku/v2/node/keepalive.go | 72 +++++++++++++++++++++++---------------- waku/v2/node/wakunode2.go | 10 +++--- 2 files changed, 47 insertions(+), 35 deletions(-) diff --git a/waku/v2/node/keepalive.go b/waku/v2/node/keepalive.go index 92e0ab1b..eb28d517 100644 --- a/waku/v2/node/keepalive.go +++ b/waku/v2/node/keepalive.go @@ -23,7 +23,7 @@ const maxAllowedPingFailures = 2 // the peers if they don't reply back const sleepDetectionIntervalFactor = 3 -const maxPeersToPing = 10 +const maxPeersToPingPerProtocol = 10 const maxAllowedSubsequentPingFailures = 2 @@ -56,8 +56,8 @@ func (w *WakuNode) startKeepAlive(ctx context.Context, randomPeersPingDuration t } allPeersTickerC := make(<-chan time.Time) - if randomPeersPingDuration != 0 { - allPeersTicker := time.NewTicker(randomPeersPingDuration) + if allPeersPingDuration != 0 { + allPeersTicker := time.NewTicker(allPeersPingDuration) defer allPeersTicker.Stop() randomPeersTickerC = allPeersTicker.C } @@ -72,13 +72,15 @@ func (w *WakuNode) startKeepAlive(ctx context.Context, randomPeersPingDuration t select { case <-allPeersTickerC: - relayPeersSet := make(map[peer.ID]struct{}) - for _, t := range w.Relay().Topics() { - for _, p := range w.Relay().PubSub().ListPeers(t) { - relayPeersSet[p] = struct{}{} + if w.opts.enableRelay { + relayPeersSet := make(map[peer.ID]struct{}) + for _, t := range w.Relay().Topics() { + for _, p := range w.Relay().PubSub().ListPeers(t) { + relayPeersSet[p] = struct{}{} + } } + peersToPing = append(peersToPing, maps.Keys(relayPeersSet)...) } - peersToPing = maps.Keys(relayPeersSet) case <-randomPeersTickerC: difference := w.timesource.Now().UnixNano() - lastTimeExecuted.UnixNano() @@ -94,36 +96,46 @@ func (w *WakuNode) startKeepAlive(ctx context.Context, randomPeersPingDuration t continue } - // Priorize mesh peers - meshPeersSet := make(map[peer.ID]struct{}) - for _, t := range w.Relay().Topics() { - for _, p := range w.Relay().PubSub().MeshPeers(t) { - meshPeersSet[p] = struct{}{} - } - } - peersToPing = append(peersToPing, maps.Keys(meshPeersSet)...) - - // Ping also some random relay peers - if maxPeersToPing-len(peersToPing) > 0 { - relayPeersSet := make(map[peer.ID]struct{}) + if w.opts.enableRelay { + // Priorize mesh peers + meshPeersSet := make(map[peer.ID]struct{}) for _, t := range w.Relay().Topics() { - for _, p := range w.Relay().PubSub().ListPeers(t) { - if _, ok := meshPeersSet[p]; !ok { - relayPeersSet[p] = struct{}{} - } + for _, p := range w.Relay().PubSub().MeshPeers(t) { + meshPeersSet[p] = struct{}{} } } + peersToPing = append(peersToPing, maps.Keys(meshPeersSet)...) - relayPeers := maps.Keys(relayPeersSet) - rand.Shuffle(len(relayPeers), func(i, j int) { relayPeers[i], relayPeers[j] = relayPeers[j], relayPeers[i] }) + // Ping also some random relay peers + if maxPeersToPingPerProtocol-len(peersToPing) > 0 { + relayPeersSet := make(map[peer.ID]struct{}) + for _, t := range w.Relay().Topics() { + for _, p := range w.Relay().PubSub().ListPeers(t) { + if _, ok := meshPeersSet[p]; !ok { + relayPeersSet[p] = struct{}{} + } + } + } - peerLen := maxPeersToPing - len(peersToPing) - if peerLen > len(relayPeers) { - peerLen = len(relayPeers) + relayPeers := maps.Keys(relayPeersSet) + rand.Shuffle(len(relayPeers), func(i, j int) { relayPeers[i], relayPeers[j] = relayPeers[j], relayPeers[i] }) + + peerLen := maxPeersToPingPerProtocol - len(peersToPing) + if peerLen > len(relayPeers) { + peerLen = len(relayPeers) + } + peersToPing = append(peersToPing, relayPeers[0:peerLen]...) } - peersToPing = append(peersToPing, relayPeers[0:peerLen]...) } + if w.opts.enableFilterLightNode { + // We also ping all filter nodes + filterPeersSet := make(map[peer.ID]struct{}) + for _, s := range w.FilterLightnode().Subscriptions() { + filterPeersSet[s.PeerID] = struct{}{} + } + peersToPing = append(peersToPing, maps.Keys(filterPeersSet)...) + } case <-ctx.Done(): w.log.Info("stopping ping protocol") return diff --git a/waku/v2/node/wakunode2.go b/waku/v2/node/wakunode2.go index dd7fbae9..747109ff 100644 --- a/waku/v2/node/wakunode2.go +++ b/waku/v2/node/wakunode2.go @@ -378,11 +378,6 @@ func (w *WakuNode) Start(ctx context.Context) error { return err } - if w.opts.keepAliveRandomPeersInterval > time.Duration(0) || w.opts.keepAliveAllPeersInterval > time.Duration(0) { - w.wg.Add(1) - go w.startKeepAlive(ctx, w.opts.keepAliveRandomPeersInterval, w.opts.keepAliveAllPeersInterval) - } - w.metadata.SetHost(host) err = w.metadata.Start(ctx) if err != nil { @@ -478,6 +473,11 @@ func (w *WakuNode) Start(ctx context.Context) error { } } + if w.opts.keepAliveRandomPeersInterval > time.Duration(0) || w.opts.keepAliveAllPeersInterval > time.Duration(0) { + w.wg.Add(1) + go w.startKeepAlive(ctx, w.opts.keepAliveRandomPeersInterval, w.opts.keepAliveAllPeersInterval) + } + w.peerExchange.SetHost(host) if w.opts.enablePeerExchange { err := w.peerExchange.Start(ctx) From 92d62a7c381648da7fd3ce67ea1fabc2231c9725 Mon Sep 17 00:00:00 2001 From: kaichao Date: Sat, 10 Aug 2024 20:05:51 +0800 Subject: [PATCH 26/27] chore: refactor sender api (#1187) --- waku/v2/api/publish/message_check.go | 21 ++- waku/v2/api/publish/message_check_test.go | 2 +- waku/v2/api/publish/message_sender.go | 170 +++++++++++++++++++++ waku/v2/api/publish/message_sender_test.go | 123 +++++++++++++++ waku/v2/api/publish/rate_limiting.go | 15 +- 5 files changed, 319 insertions(+), 12 deletions(-) create mode 100644 waku/v2/api/publish/message_sender.go create mode 100644 waku/v2/api/publish/message_sender_test.go diff --git a/waku/v2/api/publish/message_check.go b/waku/v2/api/publish/message_check.go index a7b16a57..a60a8d91 100644 --- a/waku/v2/api/publish/message_check.go +++ b/waku/v2/api/publish/message_check.go @@ -23,6 +23,13 @@ const DefaultMessageExpiredPerid = 10 // in seconds type MessageSentCheckOption func(*MessageSentCheck) error +type ISentCheck interface { + Start() + Add(topic string, messageID common.Hash, sentTime uint32) + DeleteByMessageIDs(messageIDs []common.Hash) + SetStorePeerID(peerID peer.ID) +} + // MessageSentCheck tracks the outgoing messages and check against store node // if the message sent time has passed the `messageSentPeriod`, the message id will be includes for the next query // if the message keeps missing after `messageExpiredPerid`, the message id will be expired @@ -30,8 +37,8 @@ type MessageSentCheck struct { messageIDs map[string]map[common.Hash]uint32 messageIDsMu sync.RWMutex storePeerID peer.ID - MessageStoredChan chan common.Hash - MessageExpiredChan chan common.Hash + messageStoredChan chan common.Hash + messageExpiredChan chan common.Hash ctx context.Context store *store.WakuStore timesource timesource.Timesource @@ -43,12 +50,12 @@ type MessageSentCheck struct { } // NewMessageSentCheck creates a new instance of MessageSentCheck with default parameters -func NewMessageSentCheck(ctx context.Context, store *store.WakuStore, timesource timesource.Timesource, logger *zap.Logger) *MessageSentCheck { +func NewMessageSentCheck(ctx context.Context, store *store.WakuStore, timesource timesource.Timesource, msgStoredChan chan common.Hash, msgExpiredChan chan common.Hash, logger *zap.Logger) *MessageSentCheck { return &MessageSentCheck{ messageIDs: make(map[string]map[common.Hash]uint32), messageIDsMu: sync.RWMutex{}, - MessageStoredChan: make(chan common.Hash, 1000), - MessageExpiredChan: make(chan common.Hash, 1000), + messageStoredChan: msgStoredChan, + messageExpiredChan: msgExpiredChan, ctx: ctx, store: store, timesource: timesource, @@ -232,12 +239,12 @@ func (m *MessageSentCheck) messageHashBasedQuery(ctx context.Context, hashes []c if found { ackHashes = append(ackHashes, hash) - m.MessageStoredChan <- hash + m.messageStoredChan <- hash } if !found && uint32(m.timesource.Now().Unix()) > relayTime[i]+m.messageExpiredPerid { missedHashes = append(missedHashes, hash) - m.MessageExpiredChan <- hash + m.messageExpiredChan <- hash } } diff --git a/waku/v2/api/publish/message_check_test.go b/waku/v2/api/publish/message_check_test.go index 12947258..ef53f4d3 100644 --- a/waku/v2/api/publish/message_check_test.go +++ b/waku/v2/api/publish/message_check_test.go @@ -10,7 +10,7 @@ import ( func TestAddAndDelete(t *testing.T) { ctx := context.TODO() - messageSentCheck := NewMessageSentCheck(ctx, nil, nil, nil) + messageSentCheck := NewMessageSentCheck(ctx, nil, nil, nil, nil, nil) messageSentCheck.Add("topic", [32]byte{1}, 1) messageSentCheck.Add("topic", [32]byte{2}, 2) diff --git a/waku/v2/api/publish/message_sender.go b/waku/v2/api/publish/message_sender.go new file mode 100644 index 00000000..479d894a --- /dev/null +++ b/waku/v2/api/publish/message_sender.go @@ -0,0 +1,170 @@ +package publish + +import ( + "context" + "errors" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/waku-org/go-waku/waku/v2/protocol" + "github.com/waku-org/go-waku/waku/v2/protocol/lightpush" + "github.com/waku-org/go-waku/waku/v2/protocol/relay" + "go.uber.org/zap" + "golang.org/x/time/rate" +) + +const DefaultPeersToPublishForLightpush = 2 +const DefaultPublishingLimiterRate = rate.Limit(2) +const DefaultPublishingLimitBurst = 4 + +type PublishMethod int + +const ( + LightPush PublishMethod = iota + Relay + UnknownMethod +) + +func (pm PublishMethod) String() string { + switch pm { + case LightPush: + return "LightPush" + case Relay: + return "Relay" + default: + return "Unknown" + } +} + +type MessageSender struct { + publishMethod PublishMethod + lightPush *lightpush.WakuLightPush + relay *relay.WakuRelay + messageSentCheck ISentCheck + rateLimiter *PublishRateLimiter + logger *zap.Logger +} + +type Request struct { + ctx context.Context + envelope *protocol.Envelope + publishMethod PublishMethod +} + +func NewRequest(ctx context.Context, envelope *protocol.Envelope) *Request { + return &Request{ + ctx: ctx, + envelope: envelope, + publishMethod: UnknownMethod, + } +} + +func (r *Request) WithPublishMethod(publishMethod PublishMethod) *Request { + r.publishMethod = publishMethod + return r +} + +func NewMessageSender(publishMethod PublishMethod, lightPush *lightpush.WakuLightPush, relay *relay.WakuRelay, logger *zap.Logger) (*MessageSender, error) { + if publishMethod == UnknownMethod { + return nil, errors.New("publish method is required") + } + return &MessageSender{ + publishMethod: publishMethod, + lightPush: lightPush, + relay: relay, + rateLimiter: NewPublishRateLimiter(DefaultPublishingLimiterRate, DefaultPublishingLimitBurst), + logger: logger, + }, nil +} + +func (ms *MessageSender) WithMessageSentCheck(messageSentCheck ISentCheck) *MessageSender { + ms.messageSentCheck = messageSentCheck + return ms +} + +func (ms *MessageSender) WithRateLimiting(rateLimiter *PublishRateLimiter) *MessageSender { + ms.rateLimiter = rateLimiter + return ms +} + +func (ms *MessageSender) Send(req *Request) error { + logger := ms.logger.With( + zap.Stringer("envelopeHash", req.envelope.Hash()), + zap.String("pubsubTopic", req.envelope.PubsubTopic()), + zap.String("contentTopic", req.envelope.Message().ContentTopic), + zap.Int64("timestamp", req.envelope.Message().GetTimestamp()), + ) + + if ms.rateLimiter != nil { + if err := ms.rateLimiter.Check(req.ctx, logger); err != nil { + return err + } + } + + publishMethod := req.publishMethod + if publishMethod == UnknownMethod { + publishMethod = ms.publishMethod + } + + switch publishMethod { + case LightPush: + if ms.lightPush == nil { + return errors.New("lightpush is not available") + } + logger.Info("publishing message via lightpush") + _, err := ms.lightPush.Publish( + req.ctx, + req.envelope.Message(), + lightpush.WithPubSubTopic(req.envelope.PubsubTopic()), + lightpush.WithMaxPeers(DefaultPeersToPublishForLightpush), + ) + if err != nil { + return err + } + case Relay: + if ms.relay == nil { + return errors.New("relay is not available") + } + peerCnt := len(ms.relay.PubSub().ListPeers(req.envelope.PubsubTopic())) + logger.Info("publishing message via relay", zap.Int("peerCnt", peerCnt)) + _, err := ms.relay.Publish(req.ctx, req.envelope.Message(), relay.WithPubSubTopic(req.envelope.PubsubTopic())) + if err != nil { + return err + } + default: + return errors.New("unknown publish method") + } + + if ms.messageSentCheck != nil && !req.envelope.Message().GetEphemeral() { + ms.messageSentCheck.Add( + req.envelope.PubsubTopic(), + common.BytesToHash(req.envelope.Hash().Bytes()), + uint32(req.envelope.Message().GetTimestamp()/int64(time.Second)), + ) + } + + return nil +} + +func (ms *MessageSender) Start() { + if ms.messageSentCheck != nil { + go ms.messageSentCheck.Start() + } +} + +func (ms *MessageSender) PublishMethod() PublishMethod { + return ms.publishMethod +} + +func (ms *MessageSender) MessagesDelivered(messageIDs []common.Hash) { + if ms.messageSentCheck != nil { + ms.messageSentCheck.DeleteByMessageIDs(messageIDs) + } +} + +func (ms *MessageSender) SetStorePeerID(peerID peer.ID) { + if ms.messageSentCheck != nil { + ms.messageSentCheck.SetStorePeerID(peerID) + } +} diff --git a/waku/v2/api/publish/message_sender_test.go b/waku/v2/api/publish/message_sender_test.go new file mode 100644 index 00000000..d6945c8c --- /dev/null +++ b/waku/v2/api/publish/message_sender_test.go @@ -0,0 +1,123 @@ +package publish + +import ( + "context" + "crypto/rand" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + "github.com/waku-org/go-waku/tests" + "github.com/waku-org/go-waku/waku/v2/protocol" + "github.com/waku-org/go-waku/waku/v2/protocol/pb" + "github.com/waku-org/go-waku/waku/v2/protocol/relay" + "github.com/waku-org/go-waku/waku/v2/timesource" + "github.com/waku-org/go-waku/waku/v2/utils" +) + +type MockMessageSentCheck struct { + Messages map[string]map[common.Hash]uint32 +} + +func (m *MockMessageSentCheck) Add(topic string, messageID common.Hash, time uint32) { + if m.Messages[topic] == nil { + m.Messages[topic] = make(map[common.Hash]uint32) + } + m.Messages[topic][messageID] = time +} + +func (m *MockMessageSentCheck) DeleteByMessageIDs(messageIDs []common.Hash) { +} + +func (m *MockMessageSentCheck) SetStorePeerID(peerID peer.ID) { +} + +func (m *MockMessageSentCheck) Start() { +} + +func TestNewSenderWithUnknownMethod(t *testing.T) { + sender, err := NewMessageSender(UnknownMethod, nil, nil, nil) + require.NotNil(t, err) + require.Nil(t, sender) +} + +func TestNewSenderWithRelay(t *testing.T) { + _, relayNode := createRelayNode(t) + err := relayNode.Start(context.Background()) + require.Nil(t, err) + defer relayNode.Stop() + sender, err := NewMessageSender(Relay, nil, relayNode, utils.Logger()) + require.Nil(t, err) + require.NotNil(t, sender) + require.Nil(t, sender.messageSentCheck) + require.Equal(t, Relay, sender.publishMethod) + + msg := &pb.WakuMessage{ + Payload: []byte{1, 2, 3}, + Timestamp: utils.GetUnixEpoch(), + ContentTopic: "test-content-topic", + } + envelope := protocol.NewEnvelope(msg, *utils.GetUnixEpoch(), "test-pubsub-topic") + req := NewRequest(context.TODO(), envelope) + err = sender.Send(req) + require.Nil(t, err) +} + +func TestNewSenderWithRelayAndMessageSentCheck(t *testing.T) { + _, relayNode := createRelayNode(t) + err := relayNode.Start(context.Background()) + require.Nil(t, err) + defer relayNode.Stop() + sender, err := NewMessageSender(Relay, nil, relayNode, utils.Logger()) + + check := &MockMessageSentCheck{Messages: make(map[string]map[common.Hash]uint32)} + sender.WithMessageSentCheck(check) + require.Nil(t, err) + require.NotNil(t, sender) + require.NotNil(t, sender.messageSentCheck) + require.Equal(t, Relay, sender.publishMethod) + + msg := &pb.WakuMessage{ + Payload: []byte{1, 2, 3}, + Timestamp: utils.GetUnixEpoch(), + ContentTopic: "test-content-topic", + } + envelope := protocol.NewEnvelope(msg, *utils.GetUnixEpoch(), "test-pubsub-topic") + req := NewRequest(context.TODO(), envelope) + + require.Equal(t, 0, len(check.Messages)) + + err = sender.Send(req) + require.Nil(t, err) + require.Equal(t, 1, len(check.Messages)) + require.Equal( + t, + uint32(msg.GetTimestamp()/int64(time.Second)), + check.Messages["test-pubsub-topic"][common.BytesToHash(envelope.Hash().Bytes())], + ) +} + +func TestNewSenderWithLightPush(t *testing.T) { + sender, err := NewMessageSender(LightPush, nil, nil, nil) + require.Nil(t, err) + require.NotNil(t, sender) + require.Equal(t, LightPush, sender.publishMethod) +} + +func createRelayNode(t *testing.T) (host.Host, *relay.WakuRelay) { + port, err := tests.FindFreePort(t, "", 5) + require.NoError(t, err) + host, err := tests.MakeHost(context.Background(), port, rand.Reader) + require.NoError(t, err) + bcaster := relay.NewBroadcaster(10) + relay := relay.NewWakuRelay(bcaster, 0, timesource.NewDefaultClock(), prometheus.DefaultRegisterer, utils.Logger()) + relay.SetHost(host) + err = bcaster.Start(context.Background()) + require.NoError(t, err) + + return host, relay +} diff --git a/waku/v2/api/publish/rate_limiting.go b/waku/v2/api/publish/rate_limiting.go index 4322413b..a0bddcbd 100644 --- a/waku/v2/api/publish/rate_limiting.go +++ b/waku/v2/api/publish/rate_limiting.go @@ -26,12 +26,19 @@ func NewPublishRateLimiter(r rate.Limit, b int) *PublishRateLimiter { // ThrottlePublishFn is used to decorate a PublishFn so rate limiting is applied func (p *PublishRateLimiter) ThrottlePublishFn(ctx context.Context, publishFn PublishFn) PublishFn { return func(envelope *protocol.Envelope, logger *zap.Logger) error { - if err := p.limiter.Wait(ctx); err != nil { - if !errors.Is(err, context.Canceled) { - logger.Error("could not send message (limiter)", zap.Error(err)) - } + if err := p.Check(ctx, logger); err != nil { return err } return publishFn(envelope, logger) } } + +func (p *PublishRateLimiter) Check(ctx context.Context, logger *zap.Logger) error { + if err := p.limiter.Wait(ctx); err != nil { + if !errors.Is(err, context.Canceled) { + logger.Error("could not send message (limiter)", zap.Error(err)) + } + return err + } + return nil +} From 159635e21bc5db050040d5b76815a1359fe69d1d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?rich=CE=9Brd?= Date: Sat, 10 Aug 2024 11:13:59 -0400 Subject: [PATCH 27/27] chore: limit the maximum number of message hashes to request per query (#1190) --- waku/v2/api/missing/missing_messages.go | 72 +++++++++++++++---------- waku/v2/api/publish/message_check.go | 8 +-- 2 files changed, 49 insertions(+), 31 deletions(-) diff --git a/waku/v2/api/missing/missing_messages.go b/waku/v2/api/missing/missing_messages.go index a50e6071..8490c966 100644 --- a/waku/v2/api/missing/missing_messages.go +++ b/waku/v2/api/missing/missing_messages.go @@ -20,6 +20,7 @@ import ( ) const maxContentTopicsPerRequest = 10 +const maxMsgHashesPerRequest = 50 // MessageTracker should keep track of messages it has seen before and // provide a way to determine whether a message exists or not. This @@ -247,38 +248,55 @@ func (m *MissingMessageVerifier) fetchMessagesBatch(c chan<- *protocol.Envelope, return nil } - result, err = m.storeQueryWithRetry(interest.ctx, func(ctx context.Context) (*store.Result, error) { - return m.store.QueryByHash(ctx, missingHashes, store.WithPeer(interest.peerID), store.WithPaging(false, 100)) - }, logger, "retrieving missing messages") - if err != nil { - if !errors.Is(err, context.Canceled) { - logger.Error("storenode not available", zap.Error(err)) - } - return err - } - - for !result.IsComplete() { - for _, mkv := range result.Messages() { - select { - case c <- protocol.NewEnvelope(mkv.Message, mkv.Message.GetTimestamp(), mkv.GetPubsubTopic()): - default: - m.logger.Warn("subscriber is too slow!") - } + wg := sync.WaitGroup{} + // Split into batches + for i := 0; i < len(missingHashes); i += maxMsgHashesPerRequest { + j := i + maxMsgHashesPerRequest + if j > len(missingHashes) { + j = len(missingHashes) } - result, err = m.storeQueryWithRetry(interest.ctx, func(ctx context.Context) (*store.Result, error) { - if err = result.Next(ctx); err != nil { - return nil, err + wg.Add(1) + go func(messageHashes []pb.MessageHash) { + defer wg.Wait() + + result, err = m.storeQueryWithRetry(interest.ctx, func(ctx context.Context) (*store.Result, error) { + return m.store.QueryByHash(ctx, messageHashes, store.WithPeer(interest.peerID), store.WithPaging(false, maxMsgHashesPerRequest)) + }, logger, "retrieving missing messages") + if err != nil { + if !errors.Is(err, context.Canceled) { + logger.Error("storenode not available", zap.Error(err)) + } + return } - return result, nil - }, logger.With(zap.String("cursor", hex.EncodeToString(result.Cursor()))), "retrieving next page") - if err != nil { - if !errors.Is(err, context.Canceled) { - logger.Error("storenode not available", zap.Error(err)) + + for !result.IsComplete() { + for _, mkv := range result.Messages() { + select { + case c <- protocol.NewEnvelope(mkv.Message, mkv.Message.GetTimestamp(), mkv.GetPubsubTopic()): + default: + m.logger.Warn("subscriber is too slow!") + } + } + + result, err = m.storeQueryWithRetry(interest.ctx, func(ctx context.Context) (*store.Result, error) { + if err = result.Next(ctx); err != nil { + return nil, err + } + return result, nil + }, logger.With(zap.String("cursor", hex.EncodeToString(result.Cursor()))), "retrieving next page") + if err != nil { + if !errors.Is(err, context.Canceled) { + logger.Error("storenode not available", zap.Error(err)) + } + return + } } - return err - } + + }(missingHashes[i:j]) } + wg.Wait() + return nil } diff --git a/waku/v2/api/publish/message_check.go b/waku/v2/api/publish/message_check.go index a60a8d91..be22abaa 100644 --- a/waku/v2/api/publish/message_check.go +++ b/waku/v2/api/publish/message_check.go @@ -16,7 +16,7 @@ import ( "go.uber.org/zap" ) -const DefaultMaxHashQueryLength = 100 +const DefaultMaxHashQueryLength = 50 const DefaultHashQueryInterval = 3 * time.Second const DefaultMessageSentPeriod = 3 // in seconds const DefaultMessageExpiredPerid = 10 // in seconds @@ -216,7 +216,7 @@ func (m *MessageSentCheck) messageHashBasedQuery(ctx context.Context, hashes []c messageHashes[i] = pb.ToMessageHash(hash.Bytes()) } - m.logger.Debug("store.queryByHash request", zap.String("requestID", hexutil.Encode(requestID)), zap.Stringer("peerID", selectedPeer), zap.Any("messageHashes", messageHashes)) + m.logger.Debug("store.queryByHash request", zap.String("requestID", hexutil.Encode(requestID)), zap.Stringer("peerID", selectedPeer), zap.Stringers("messageHashes", messageHashes)) result, err := m.store.QueryByHash(ctx, messageHashes, opts...) if err != nil { @@ -248,8 +248,8 @@ func (m *MessageSentCheck) messageHashBasedQuery(ctx context.Context, hashes []c } } - m.logger.Debug("ack message hashes", zap.Any("ackHashes", ackHashes)) - m.logger.Debug("missed message hashes", zap.Any("missedHashes", missedHashes)) + m.logger.Debug("ack message hashes", zap.Stringers("ackHashes", ackHashes)) + m.logger.Debug("missed message hashes", zap.Stringers("missedHashes", missedHashes)) return append(ackHashes, missedHashes...) }