diff --git a/.gitignore b/.gitignore index 5c7b40b26..b59f218d8 100644 --- a/.gitignore +++ b/.gitignore @@ -64,7 +64,6 @@ coverage.html Session.vim .undodir/* /.idea/ -/.vscode/ /cmd/*/.ethereum/ *.iml diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 000000000..13c88e400 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "third_party/nwaku"] + path = third_party/nwaku + url = https://github.com/waku-org/nwaku diff --git a/.vscode/settings.json b/.vscode/settings.json index 16fdfe542..d83958300 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -8,4 +8,7 @@ "cSpell.words": [ "unmarshalling" ], + "gopls":{ + "buildFlags": ["-tags=use_nwaku,gowaku_skip_migrations,gowaku_no_rln"] + } } diff --git a/Makefile b/Makefile index d4be1f241..9f3cca02b 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,6 @@ .PHONY: statusgo all test clean help .PHONY: statusgo-android statusgo-ios +.PHONY: build-libwaku test-libwaku clean-libwaku rebuild-libwaku # Clear any GOROOT set outside of the Nix shell export GOROOT= @@ -60,6 +61,10 @@ GIT_AUTHOR ?= $(shell git config user.email || echo $$USER) ENABLE_METRICS ?= true BUILD_TAGS ?= gowaku_no_rln +ifeq ($(USE_NWAKU), true) +BUILD_TAGS += use_nwaku +endif + BUILD_FLAGS ?= -ldflags="-X github.com/status-im/status-go/vendor/github.com/ethereum/go-ethereum/metrics.EnabledStr=$(ENABLE_METRICS)" BUILD_FLAGS_MOBILE ?= @@ -207,8 +212,19 @@ statusgo-library: ##@cross-compile Build status-go as static library for current @echo "Static library built:" @ls -la build/bin/libstatus.* -statusgo-shared-library: generate -statusgo-shared-library: ##@cross-compile Build status-go as shared library for current platform + +LIBWAKU := third_party/nwaku/build/libwaku.$(GOBIN_SHARED_LIB_EXT) +$(LIBWAKU): + @echo "Building libwaku" + $(MAKE) -C third_party/nwaku update || { echo "nwaku make update failed"; exit 1; } + $(MAKE) -C ./third_party/nwaku libwaku + +build-libwaku: $(LIBWAKU) + +statusgo-shared-library: generate ##@cross-compile Build status-go as shared library for current platform +ifeq ($(USE_NWAKU),true) + $(MAKE) $(LIBWAKU) +endif ## cmd/library/README.md explains the magic incantation behind this mkdir -p build/bin/statusgo-lib go run cmd/library/*.go > build/bin/statusgo-lib/main.go @@ -291,9 +307,38 @@ lint-fix: -w {} \; $(MAKE) vendor +mock: ##@other Regenerate mocks + mockgen -package=fake -destination=transactions/fake/mock.go -source=transactions/fake/txservice.go + mockgen -package=status -destination=services/status/account_mock.go -source=services/status/service.go + mockgen -package=peer -destination=services/peer/discoverer_mock.go -source=services/peer/service.go + mockgen -package=mock_transactor -destination=transactions/mock_transactor/transactor.go -source=transactions/transactor.go + mockgen -package=mock_pathprocessor -destination=services/wallet/router/pathprocessor/mock_pathprocessor/processor.go -source=services/wallet/router/pathprocessor/processor.go + mockgen -package=mock_bridge -destination=services/wallet/bridge/mock_bridge/bridge.go -source=services/wallet/bridge/bridge.go + mockgen -package=mock_client -destination=rpc/chain/mock/client/client.go -source=rpc/chain/client.go + mockgen -package=mock_token -destination=services/wallet/token/mock/token/tokenmanager.go -source=services/wallet/token/token.go + mockgen -package=mock_thirdparty -destination=services/wallet/thirdparty/mock/types.go -source=services/wallet/thirdparty/types.go + mockgen -package=mock_balance_persistence -destination=services/wallet/token/mock/balance_persistence/balance_persistence.go -source=services/wallet/token/balance_persistence.go + mockgen -package=mock_network -destination=rpc/network/mock/network.go -source=rpc/network/network.go + mockgen -package=mock_rpcclient -destination=rpc/mock/client/client.go -source=rpc/client.go + mockgen -package=mock_collectibles -destination=services/wallet/collectibles/mock/collection_data_db.go -source=services/wallet/collectibles/collection_data_db.go + mockgen -package=mock_collectibles -destination=services/wallet/collectibles/mock/collectible_data_db.go -source=services/wallet/collectibles/collectible_data_db.go + mockgen -package=mock_thirdparty -destination=services/wallet/thirdparty/mock/collectible_types.go -source=services/wallet/thirdparty/collectible_types.go + mockgen -package=mock_paraswap -destination=services/wallet/thirdparty/paraswap/mock/types.go -source=services/wallet/thirdparty/paraswap/types.go + mockgen -package=mock_onramp -destination=services/wallet/onramp/mock/types.go -source=services/wallet/onramp/types.go + + docker-test: ##@tests Run tests in a docker container with golang. docker run --privileged --rm -it -v "$(PWD):$(DOCKER_TEST_WORKDIR)" -w "$(DOCKER_TEST_WORKDIR)" $(DOCKER_TEST_IMAGE) go test ${ARGS} +test-libwaku: | $(LIBWAKU) + go test -tags '$(BUILD_TAGS) use_nwaku' -run TestBasicWakuV2 ./wakuv2/... -count 1 -v -json | jq -r '.Output' + +clean-libwaku: + @echo "Removing libwaku" + rm $(LIBWAKU) + +rebuild-libwaku: | clean-libwaku $(LIBWAKU) + test: test-unit ##@tests Run basic, short tests during development test-unit-prep: generate diff --git a/cmd/ping-community/main.go b/cmd/ping-community/main.go index dc49d6b04..6f9f6c8ce 100644 --- a/cmd/ping-community/main.go +++ b/cmd/ping-community/main.go @@ -24,13 +24,13 @@ import ( "github.com/status-im/status-go/multiaccounts" "github.com/status-im/status-go/multiaccounts/accounts" "github.com/status-im/status-go/multiaccounts/settings" + "github.com/status-im/status-go/wakuv2" "github.com/status-im/status-go/cmd/utils" "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/params" "github.com/status-im/status-go/protocol" "github.com/status-im/status-go/protocol/common" - "github.com/status-im/status-go/protocol/common/shard" "github.com/status-im/status-go/protocol/identity/alias" "github.com/status-im/status-go/protocol/protobuf" wakuextn "github.com/status-im/status-go/services/wakuext" @@ -49,8 +49,8 @@ var ( seedPhrase = flag.String("seed-phrase", "", "Seed phrase") version = flag.Bool("version", false, "Print version and dump configuration") communityID = flag.String("community-id", "", "The id of the community") - shardCluster = flag.Int("shard-cluster", shard.MainStatusShardCluster, "The shard cluster in which the of the community is published") - shardIndex = flag.Int("shard-index", shard.DefaultShardIndex, "The shard index in which the community is published") + shardCluster = flag.Int("shard-cluster", wakuv2.MainStatusShardCluster, "The shard cluster in which the of the community is published") + shardIndex = flag.Int("shard-index", wakuv2.DefaultShardIndex, "The shard index in which the community is published") chatID = flag.String("chat-id", "", "The id of the chat") dataDir = flag.String("dir", getDefaultDataDir(), "Directory used by node to store data") @@ -152,9 +152,9 @@ func main() { messenger := wakuextservice.Messenger() - var s *shard.Shard = nil + var s *wakuv2.Shard = nil if shardCluster != nil && shardIndex != nil { - s = &shard.Shard{ + s = &wakuv2.Shard{ Cluster: uint16(*shardCluster), Index: uint16(*shardIndex), } diff --git a/eth-node/bridge/geth/waku.go b/eth-node/bridge/geth/waku.go index 214e8e3e6..cac2e06f5 100644 --- a/eth-node/bridge/geth/waku.go +++ b/eth-node/bridge/geth/waku.go @@ -63,11 +63,6 @@ func (w *GethWakuWrapper) StopDiscV5() error { return errors.New("not available in WakuV1") } -// PeerCount function only added for compatibility with waku V2 -func (w *GethWakuWrapper) AddStorePeer(address multiaddr.Multiaddr) (peer.ID, error) { - return "", errors.New("not available in WakuV1") -} - // SubscribeToPubsubTopic function only added for compatibility with waku V2 func (w *GethWakuWrapper) SubscribeToPubsubTopic(topic string, optPublicKey *ecdsa.PublicKey) error { // not available in WakuV1 diff --git a/eth-node/bridge/geth/wakuv2.go b/eth-node/bridge/geth/wakuv2.go index 39243e601..8b6891519 100644 --- a/eth-node/bridge/geth/wakuv2.go +++ b/eth-node/bridge/geth/wakuv2.go @@ -206,10 +206,6 @@ func (w *gethWakuV2Wrapper) RemovePubsubTopicKey(topic string) error { return w.waku.RemovePubsubTopicKey(topic) } -func (w *gethWakuV2Wrapper) AddStorePeer(address multiaddr.Multiaddr) (peer.ID, error) { - return w.waku.AddStorePeer(address) -} - func (w *gethWakuV2Wrapper) AddRelayPeer(address multiaddr.Multiaddr) (peer.ID, error) { return w.waku.AddRelayPeer(address) } @@ -227,7 +223,7 @@ func (w *gethWakuV2Wrapper) DialPeerByID(peerID peer.ID) error { } func (w *gethWakuV2Wrapper) ListenAddresses() ([]multiaddr.Multiaddr, error) { - return w.waku.ListenAddresses(), nil + return w.waku.ListenAddresses() } func (w *gethWakuV2Wrapper) RelayPeersByTopic(topic string) (*types.PeerList, error) { diff --git a/eth-node/types/waku.go b/eth-node/types/waku.go index 9e409ded9..1d1603572 100644 --- a/eth-node/types/waku.go +++ b/eth-node/types/waku.go @@ -134,8 +134,6 @@ type Waku interface { RemovePubsubTopicKey(topic string) error - AddStorePeer(address multiaddr.Multiaddr) (peer.ID, error) - AddRelayPeer(address multiaddr.Multiaddr) (peer.ID, error) DialPeer(address multiaddr.Multiaddr) error diff --git a/nix/shell.nix b/nix/shell.nix index c7e750a4b..0ebd3e3c9 100644 --- a/nix/shell.nix +++ b/nix/shell.nix @@ -26,7 +26,7 @@ in mkShell { buildInputs = with pkgs; [ git jq which - go golangci-lint go-junit-report gopls go-bindata gomobileMod codecov-cli go-generate-fast + go golangci-lint go-junit-report gopls go-bindata gomobileMod codecov-cli go-generate-fast openssl mockgen protobuf3_20 protoc-gen-go gotestsum go-modvendor openjdk ] ++ lib.optionals (stdenv.isDarwin) [ xcodeWrapper ]; diff --git a/node/status_node_services.go b/node/status_node_services.go index cbd527ad0..2b2f5350d 100644 --- a/node/status_node_services.go +++ b/node/status_node_services.go @@ -12,7 +12,6 @@ import ( "go.uber.org/zap" - "github.com/status-im/status-go/protocol/common/shard" "github.com/status-im/status-go/server" "github.com/status-im/status-go/signal" "github.com/status-im/status-go/transactions" @@ -338,7 +337,7 @@ func (b *StatusNode) wakuV2Service(nodeConfig *params.NodeConfig) (*wakuv2.Waku, Nameserver: nodeConfig.WakuV2Config.Nameserver, UDPPort: nodeConfig.WakuV2Config.UDPPort, AutoUpdate: nodeConfig.WakuV2Config.AutoUpdate, - DefaultShardPubsubTopic: shard.DefaultShardPubsubTopic(), + DefaultShardPubsubTopic: wakuv2.DefaultShardPubsubTopic(), TelemetryServerURL: nodeConfig.WakuV2Config.TelemetryServerURL, ClusterID: nodeConfig.ClusterConfig.ClusterID, EnableMissingMessageVerification: nodeConfig.WakuV2Config.EnableMissingMessageVerification, diff --git a/protocol/communities/community.go b/protocol/communities/community.go index 4e6a865f7..cdc7d9d86 100644 --- a/protocol/communities/community.go +++ b/protocol/communities/community.go @@ -23,12 +23,12 @@ import ( "github.com/status-im/status-go/eth-node/types" "github.com/status-im/status-go/images" "github.com/status-im/status-go/protocol/common" - "github.com/status-im/status-go/protocol/common/shard" community_token "github.com/status-im/status-go/protocol/communities/token" "github.com/status-im/status-go/protocol/protobuf" "github.com/status-im/status-go/protocol/requests" "github.com/status-im/status-go/protocol/v1" "github.com/status-im/status-go/server" + "github.com/status-im/status-go/wakuv2" ) const signatureLength = 65 @@ -55,7 +55,7 @@ type Config struct { RequestsToJoin []*RequestToJoin MemberIdentity *ecdsa.PrivateKey EventsData *EventsData - Shard *shard.Shard + Shard *wakuv2.Shard PubsubTopicPrivateKey *ecdsa.PrivateKey LastOpenedAt int64 } @@ -172,7 +172,7 @@ func (o *Community) MarshalPublicAPIJSON() ([]byte, error) { ActiveMembersCount uint64 `json:"activeMembersCount"` PubsubTopic string `json:"pubsubTopic"` PubsubTopicKey string `json:"pubsubTopicKey"` - Shard *shard.Shard `json:"shard"` + Shard *wakuv2.Shard `json:"shard"` }{ ID: o.ID(), Verified: o.config.Verified, @@ -308,7 +308,7 @@ func (o *Community) MarshalJSON() ([]byte, error) { ActiveMembersCount uint64 `json:"activeMembersCount"` PubsubTopic string `json:"pubsubTopic"` PubsubTopicKey string `json:"pubsubTopicKey"` - Shard *shard.Shard `json:"shard"` + Shard *wakuv2.Shard `json:"shard"` LastOpenedAt int64 `json:"lastOpenedAt"` Clock uint64 `json:"clock"` }{ @@ -461,7 +461,7 @@ func (o *Community) DescriptionText() string { return "" } -func (o *Community) Shard() *shard.Shard { +func (o *Community) Shard() *wakuv2.Shard { if o != nil && o.config != nil { return o.config.Shard } diff --git a/protocol/communities/manager.go b/protocol/communities/manager.go index bce788afb..c5c8f3fb8 100644 --- a/protocol/communities/manager.go +++ b/protocol/communities/manager.go @@ -31,7 +31,6 @@ import ( multiaccountscommon "github.com/status-im/status-go/multiaccounts/common" "github.com/status-im/status-go/params" "github.com/status-im/status-go/protocol/common" - "github.com/status-im/status-go/protocol/common/shard" community_token "github.com/status-im/status-go/protocol/communities/token" "github.com/status-im/status-go/protocol/encryption" "github.com/status-im/status-go/protocol/ens" @@ -46,6 +45,7 @@ import ( "github.com/status-im/status-go/services/wallet/token" "github.com/status-im/status-go/services/wallet/wallettypes" "github.com/status-im/status-go/signal" + "github.com/status-im/status-go/wakuv2" ) type Publisher interface { @@ -768,8 +768,8 @@ func (m *Manager) All() ([]*Community, error) { } type CommunityShard struct { - CommunityID string `json:"communityID"` - Shard *shard.Shard `json:"shard"` + CommunityID string `json:"communityID"` + Shard *wakuv2.Shard `json:"shard"` } type CuratedCommunities struct { @@ -1577,7 +1577,7 @@ func (m *Manager) DeleteCommunity(id types.HexBytes) error { return m.persistence.DeleteCommunitySettings(id) } -func (m *Manager) updateShard(community *Community, shard *shard.Shard, clock uint64) error { +func (m *Manager) updateShard(community *Community, shard *wakuv2.Shard, clock uint64) error { community.config.Shard = shard if shard == nil { return m.persistence.DeleteCommunityShard(community.ID()) @@ -1586,7 +1586,7 @@ func (m *Manager) updateShard(community *Community, shard *shard.Shard, clock ui return m.persistence.SaveCommunityShard(community.ID(), shard, clock) } -func (m *Manager) UpdateShard(community *Community, shard *shard.Shard, clock uint64) error { +func (m *Manager) UpdateShard(community *Community, shard *wakuv2.Shard, clock uint64) error { m.communityLock.Lock(community.ID()) defer m.communityLock.Unlock(community.ID()) @@ -1594,7 +1594,7 @@ func (m *Manager) UpdateShard(community *Community, shard *shard.Shard, clock ui } // SetShard assigns a shard to a community -func (m *Manager) SetShard(communityID types.HexBytes, shard *shard.Shard) (*Community, error) { +func (m *Manager) SetShard(communityID types.HexBytes, shard *wakuv2.Shard) (*Community, error) { m.communityLock.Lock(communityID) defer m.communityLock.Unlock(communityID) @@ -2207,11 +2207,11 @@ func (m *Manager) HandleCommunityDescriptionMessage(signer *ecdsa.PublicKey, des if err != nil { return nil, err } - var cShard *shard.Shard + var cShard *wakuv2.Shard if communityShard == nil { - cShard = &shard.Shard{Cluster: shard.MainStatusShardCluster, Index: shard.DefaultShardIndex} + cShard = &wakuv2.Shard{Cluster: wakuv2.MainStatusShardCluster, Index: wakuv2.DefaultShardIndex} } else { - cShard = shard.FromProtobuff(communityShard) + cShard = wakuv2.FromProtobuff(communityShard) } config := Config{ CommunityDescription: processedDescription, @@ -3996,11 +3996,11 @@ func (m *Manager) GetByIDString(idString string) (*Community, error) { return m.GetByID(id) } -func (m *Manager) GetCommunityShard(communityID types.HexBytes) (*shard.Shard, error) { +func (m *Manager) GetCommunityShard(communityID types.HexBytes) (*wakuv2.Shard, error) { return m.persistence.GetCommunityShard(communityID) } -func (m *Manager) SaveCommunityShard(communityID types.HexBytes, shard *shard.Shard, clock uint64) error { +func (m *Manager) SaveCommunityShard(communityID types.HexBytes, shard *wakuv2.Shard, clock uint64) error { m.communityLock.Lock(communityID) defer m.communityLock.Unlock(communityID) diff --git a/protocol/communities/persistence.go b/protocol/communities/persistence.go index 84e20645e..a417b81a2 100644 --- a/protocol/communities/persistence.go +++ b/protocol/communities/persistence.go @@ -16,11 +16,11 @@ import ( "github.com/status-im/status-go/eth-node/crypto" "github.com/status-im/status-go/eth-node/types" "github.com/status-im/status-go/protocol/common" - "github.com/status-im/status-go/protocol/common/shard" "github.com/status-im/status-go/protocol/communities/token" "github.com/status-im/status-go/protocol/encryption" "github.com/status-im/status-go/protocol/protobuf" "github.com/status-im/status-go/services/wallet/bigint" + "github.com/status-im/status-go/wakuv2" ) type Persistence struct { @@ -1766,7 +1766,7 @@ func (p *Persistence) AllNonApprovedCommunitiesRequestsToJoin() ([]*RequestToJoi return nonApprovedRequestsToJoin, nil } -func (p *Persistence) SaveCommunityShard(communityID types.HexBytes, shard *shard.Shard, clock uint64) error { +func (p *Persistence) SaveCommunityShard(communityID types.HexBytes, shard *wakuv2.Shard, clock uint64) error { var cluster, index *uint16 if shard != nil { @@ -1801,7 +1801,7 @@ func (p *Persistence) SaveCommunityShard(communityID types.HexBytes, shard *shar } // if data will not be found, will return sql.ErrNoRows. Must be handled on the caller side -func (p *Persistence) GetCommunityShard(communityID types.HexBytes) (*shard.Shard, error) { +func (p *Persistence) GetCommunityShard(communityID types.HexBytes) (*wakuv2.Shard, error) { var cluster sql.NullInt64 var index sql.NullInt64 err := p.db.QueryRow(`SELECT shard_cluster, shard_index FROM communities_shards WHERE community_id = ?`, @@ -1815,7 +1815,7 @@ func (p *Persistence) GetCommunityShard(communityID types.HexBytes) (*shard.Shar return nil, nil } - return &shard.Shard{ + return &wakuv2.Shard{ Cluster: uint16(cluster.Int64), Index: uint16(index.Int64), }, nil diff --git a/protocol/communities/persistence_mapping.go b/protocol/communities/persistence_mapping.go index a8964187e..8e743c571 100644 --- a/protocol/communities/persistence_mapping.go +++ b/protocol/communities/persistence_mapping.go @@ -7,8 +7,8 @@ import ( "github.com/status-im/status-go/eth-node/crypto" "github.com/status-im/status-go/protocol/common" - "github.com/status-im/status-go/protocol/common/shard" "github.com/status-im/status-go/server" + "github.com/status-im/status-go/wakuv2" ) func communityToRecord(community *Community) (*CommunityRecord, error) { @@ -118,9 +118,9 @@ func recordBundleToCommunity( } } - var s *shard.Shard = nil + var s *wakuv2.Shard = nil if r.community.shardCluster != nil && r.community.shardIndex != nil { - s = &shard.Shard{ + s = &wakuv2.Shard{ Cluster: uint16(*r.community.shardCluster), Index: uint16(*r.community.shardIndex), } diff --git a/protocol/communities/persistence_test.go b/protocol/communities/persistence_test.go index 42053ecb9..19947b181 100644 --- a/protocol/communities/persistence_test.go +++ b/protocol/communities/persistence_test.go @@ -15,13 +15,13 @@ import ( "github.com/status-im/status-go/eth-node/crypto" "github.com/status-im/status-go/eth-node/types" "github.com/status-im/status-go/protocol/common" - "github.com/status-im/status-go/protocol/common/shard" "github.com/status-im/status-go/protocol/communities/token" "github.com/status-im/status-go/protocol/encryption" "github.com/status-im/status-go/protocol/protobuf" "github.com/status-im/status-go/protocol/sqlite" "github.com/status-im/status-go/services/wallet/bigint" "github.com/status-im/status-go/t/helpers" + "github.com/status-im/status-go/wakuv2" ) func TestPersistenceSuite(t *testing.T) { @@ -787,7 +787,7 @@ func (s *PersistenceSuite) TestSaveShardInfo() { s.Require().Nil(resultShard) // not nil shard - expectedShard := &shard.Shard{ + expectedShard := &wakuv2.Shard{ Cluster: 1, Index: 2, } diff --git a/protocol/communities_messenger_token_permissions_test.go b/protocol/communities_messenger_token_permissions_test.go index bbf05984c..8cfe2b74a 100644 --- a/protocol/communities_messenger_token_permissions_test.go +++ b/protocol/communities_messenger_token_permissions_test.go @@ -25,13 +25,13 @@ import ( "github.com/status-im/status-go/eth-node/types" "github.com/status-im/status-go/params" "github.com/status-im/status-go/protocol/common" - "github.com/status-im/status-go/protocol/common/shard" "github.com/status-im/status-go/protocol/communities" "github.com/status-im/status-go/protocol/protobuf" "github.com/status-im/status-go/protocol/requests" "github.com/status-im/status-go/protocol/transport" "github.com/status-im/status-go/protocol/tt" "github.com/status-im/status-go/services/wallet/thirdparty" + "github.com/status-im/status-go/wakuv2" ) const testChainID1 = 1 @@ -488,11 +488,12 @@ func (s *MessengerCommunitiesTokenPermissionsSuite) TestBecomeMemberPermissions( cfg := testWakuV2Config{ logger: s.logger.Named("store-node-waku"), enableStore: false, - clusterID: shard.MainStatusShardCluster, + clusterID: wakuv2.MainStatusShardCluster, } wakuStoreNode := NewTestWakuV2(&s.Suite, cfg) - storeNodeListenAddresses := wakuStoreNode.ListenAddresses() + storeNodeListenAddresses, err := wakuStoreNode.ListenAddresses() + s.Require().NoError(err) s.Require().LessOrEqual(1, len(storeNodeListenAddresses)) storeNodeAddress := storeNodeListenAddresses[0] diff --git a/protocol/linkpreview_unfurler_status.go b/protocol/linkpreview_unfurler_status.go index d4e6e8e4d..6da0194ee 100644 --- a/protocol/linkpreview_unfurler_status.go +++ b/protocol/linkpreview_unfurler_status.go @@ -8,8 +8,8 @@ import ( "github.com/status-im/status-go/api/multiformat" "github.com/status-im/status-go/images" "github.com/status-im/status-go/protocol/common" - "github.com/status-im/status-go/protocol/common/shard" "github.com/status-im/status-go/protocol/communities" + "github.com/status-im/status-go/wakuv2" ) type StatusUnfurler struct { @@ -83,7 +83,7 @@ func (u *StatusUnfurler) buildContactData(publicKey string) (*common.StatusConta return c, nil } -func (u *StatusUnfurler) buildCommunityData(communityID string, shard *shard.Shard) (*communities.Community, *common.StatusCommunityLinkPreview, error) { +func (u *StatusUnfurler) buildCommunityData(communityID string, shard *wakuv2.Shard) (*communities.Community, *common.StatusCommunityLinkPreview, error) { // This automatically checks the database community, err := u.m.FetchCommunity(&FetchCommunityRequest{ CommunityKey: communityID, @@ -108,7 +108,7 @@ func (u *StatusUnfurler) buildCommunityData(communityID string, shard *shard.Sha return community, statusCommunityLinkPreviews, nil } -func (u *StatusUnfurler) buildChannelData(channelUUID string, communityID string, communityShard *shard.Shard) (*common.StatusCommunityChannelLinkPreview, error) { +func (u *StatusUnfurler) buildChannelData(channelUUID string, communityID string, communityShard *wakuv2.Shard) (*common.StatusCommunityChannelLinkPreview, error) { community, communityData, err := u.buildCommunityData(communityID, communityShard) if err != nil { return nil, fmt.Errorf("failed to build channel community data: %w", err) diff --git a/protocol/messenger.go b/protocol/messenger.go index 77f5f2c02..43c2169fe 100644 --- a/protocol/messenger.go +++ b/protocol/messenger.go @@ -830,18 +830,11 @@ func (m *Messenger) Start() (*MessengerResponse, error) { } response := &MessengerResponse{} - storenodes, err := m.AllMailservers() + response.Mailservers, err = m.AllMailservers() if err != nil { return nil, err } - err = m.setupStorenodes(storenodes) - if err != nil { - return nil, err - } - - response.Mailservers = storenodes - m.transport.SetStorenodeConfigProvider(m) if err := m.communityStorenodes.ReloadFromDB(); err != nil { diff --git a/protocol/messenger_communities.go b/protocol/messenger_communities.go index b6e53ccc7..5b58d8170 100644 --- a/protocol/messenger_communities.go +++ b/protocol/messenger_communities.go @@ -25,6 +25,7 @@ import ( gocommon "github.com/status-im/status-go/common" utils "github.com/status-im/status-go/common" + "github.com/status-im/status-go/wakuv2" "github.com/status-im/status-go/account" multiaccountscommon "github.com/status-im/status-go/multiaccounts/common" @@ -34,7 +35,6 @@ import ( "github.com/status-im/status-go/images" "github.com/status-im/status-go/multiaccounts/accounts" "github.com/status-im/status-go/protocol/common" - "github.com/status-im/status-go/protocol/common/shard" "github.com/status-im/status-go/protocol/communities" "github.com/status-im/status-go/protocol/communities/token" "github.com/status-im/status-go/protocol/discord" @@ -89,10 +89,10 @@ const ( type FetchCommunityRequest struct { // CommunityKey should be either a public or a private community key - CommunityKey string `json:"communityKey"` - Shard *shard.Shard `json:"shard"` - TryDatabase bool `json:"tryDatabase"` - WaitForResponse bool `json:"waitForResponse"` + CommunityKey string `json:"communityKey"` + Shard *wakuv2.Shard `json:"shard"` + TryDatabase bool `json:"tryDatabase"` + WaitForResponse bool `json:"waitForResponse"` } func (r *FetchCommunityRequest) Validate() error { @@ -346,7 +346,7 @@ func (m *Messenger) handleCommunitiesSubscription(c chan *communities.Subscripti Sender: community.PrivateKey(), SkipEncryptionLayer: true, MessageType: protobuf.ApplicationMetadataMessage_COMMUNITY_USER_KICKED, - PubsubTopic: shard.DefaultNonProtectedPubsubTopic(), + PubsubTopic: wakuv2.DefaultNonProtectedPubsubTopic(), } _, err = m.sender.SendPrivate(context.Background(), pk, rawMessage) @@ -681,7 +681,7 @@ func (m *Messenger) handleCommunitySharedAddressesRequest(state *ReceivedMessage CommunityID: community.ID(), SkipEncryptionLayer: true, MessageType: protobuf.ApplicationMetadataMessage_COMMUNITY_SHARED_ADDRESSES_RESPONSE, - PubsubTopic: shard.DefaultNonProtectedPubsubTopic(), + PubsubTopic: wakuv2.DefaultNonProtectedPubsubTopic(), ResendType: common.ResendTypeRawMessage, ResendMethod: common.ResendMethodSendPrivate, Recipients: []*ecdsa.PublicKey{signer}, @@ -1044,7 +1044,7 @@ func (m *Messenger) JoinCommunity(ctx context.Context, communityID types.HexByte return mr, nil } -func (m *Messenger) subscribeToCommunityShard(communityID []byte, shard *shard.Shard) error { +func (m *Messenger) subscribeToCommunityShard(communityID []byte, shard *wakuv2.Shard) error { if m.transport.WakuVersion() != 2 { return nil } @@ -1065,7 +1065,7 @@ func (m *Messenger) subscribeToCommunityShard(communityID []byte, shard *shard.S return m.transport.SubscribeToPubsubTopic(pubsubTopic, pubK) } -func (m *Messenger) unsubscribeFromShard(shard *shard.Shard) error { +func (m *Messenger) unsubscribeFromShard(shard *wakuv2.Shard) error { if m.transport.WakuVersion() != 2 { return nil } @@ -1494,7 +1494,7 @@ func (m *Messenger) RequestToJoinCommunity(request *requests.RequestToJoinCommun ResendType: common.ResendTypeRawMessage, SkipEncryptionLayer: true, MessageType: protobuf.ApplicationMetadataMessage_COMMUNITY_REQUEST_TO_JOIN, - PubsubTopic: shard.DefaultNonProtectedPubsubTopic(), + PubsubTopic: wakuv2.DefaultNonProtectedPubsubTopic(), Priority: &common.HighPriority, } @@ -1872,7 +1872,7 @@ func (m *Messenger) CancelRequestToJoinCommunity(ctx context.Context, request *r CommunityID: community.ID(), SkipEncryptionLayer: true, MessageType: protobuf.ApplicationMetadataMessage_COMMUNITY_CANCEL_REQUEST_TO_JOIN, - PubsubTopic: shard.DefaultNonProtectedPubsubTopic(), + PubsubTopic: wakuv2.DefaultNonProtectedPubsubTopic(), ResendType: common.ResendTypeRawMessage, Priority: &common.HighPriority, } @@ -2028,7 +2028,7 @@ func (m *Messenger) acceptRequestToJoinCommunity(requestToJoin *communities.Requ CommunityID: community.ID(), SkipEncryptionLayer: true, MessageType: protobuf.ApplicationMetadataMessage_COMMUNITY_REQUEST_TO_JOIN_RESPONSE, - PubsubTopic: shard.DefaultNonProtectedPubsubTopic(), + PubsubTopic: wakuv2.DefaultNonProtectedPubsubTopic(), ResendType: common.ResendTypeRawMessage, ResendMethod: common.ResendMethodSendPrivate, Recipients: []*ecdsa.PublicKey{pk}, @@ -2503,7 +2503,7 @@ func (m *Messenger) DefaultFilters(o *communities.Community) []transport.Filters {ChatID: updatesChannelID, PubsubTopic: communityPubsubTopic}, {ChatID: mlChannelID, PubsubTopic: communityPubsubTopic}, {ChatID: memberUpdateChannelID, PubsubTopic: communityPubsubTopic}, - {ChatID: uncompressedPubKey, PubsubTopic: shard.DefaultNonProtectedPubsubTopic()}, + {ChatID: uncompressedPubKey, PubsubTopic: wakuv2.DefaultNonProtectedPubsubTopic()}, } return filters @@ -3562,7 +3562,7 @@ func (m *Messenger) HandleCommunityShardKey(state *ReceivedMessageState, message } func (m *Messenger) handleCommunityShardAndFiltersFromProto(community *communities.Community, message *protobuf.CommunityShardKey) error { - err := m.communitiesManager.UpdateShard(community, shard.FromProtobuff(message.Shard), message.Clock) + err := m.communitiesManager.UpdateShard(community, wakuv2.FromProtobuff(message.Shard), message.Clock) if err != nil { return err } @@ -3584,7 +3584,7 @@ func (m *Messenger) handleCommunityShardAndFiltersFromProto(community *communiti } // Unsubscribing from existing shard - if community.Shard() != nil && community.Shard() != shard.FromProtobuff(message.GetShard()) { + if community.Shard() != nil && community.Shard() != wakuv2.FromProtobuff(message.GetShard()) { err := m.unsubscribeFromShard(community.Shard()) if err != nil { return err @@ -3598,7 +3598,7 @@ func (m *Messenger) handleCommunityShardAndFiltersFromProto(community *communiti return err } // Update community filters in case of change of shard - if community.Shard() != shard.FromProtobuff(message.GetShard()) { + if community.Shard() != wakuv2.FromProtobuff(message.GetShard()) { err = m.UpdateCommunityFilters(community) if err != nil { return err diff --git a/protocol/messenger_communities_sharding_test.go b/protocol/messenger_communities_sharding_test.go index 307df61ad..962cd62b1 100644 --- a/protocol/messenger_communities_sharding_test.go +++ b/protocol/messenger_communities_sharding_test.go @@ -12,11 +12,11 @@ import ( gethbridge "github.com/status-im/status-go/eth-node/bridge/geth" "github.com/status-im/status-go/eth-node/types" "github.com/status-im/status-go/protocol/common" - "github.com/status-im/status-go/protocol/common/shard" "github.com/status-im/status-go/protocol/communities" "github.com/status-im/status-go/protocol/protobuf" "github.com/status-im/status-go/protocol/requests" "github.com/status-im/status-go/protocol/tt" + "github.com/status-im/status-go/wakuv2" ) func TestMessengerCommunitiesShardingSuite(t *testing.T) { @@ -108,7 +108,7 @@ func (s *MessengerCommunitiesShardingSuite) TearDownTest() { _ = s.logger.Sync() } -func (s *MessengerCommunitiesShardingSuite) testPostToCommunityChat(shard *shard.Shard, community *communities.Community, chat *Chat) { +func (s *MessengerCommunitiesShardingSuite) testPostToCommunityChat(shard *wakuv2.Shard, community *communities.Community, chat *Chat) { _, err := s.owner.SetCommunityShard(&requests.SetCommunityShard{ CommunityID: community.ID(), Shard: shard, @@ -144,8 +144,8 @@ func (s *MessengerCommunitiesShardingSuite) TestPostToCommunityChat() { // Members should be able to receive messages in a community with sharding enabled. { - shard := &shard.Shard{ - Cluster: shard.MainStatusShardCluster, + shard := &wakuv2.Shard{ + Cluster: wakuv2.MainStatusShardCluster, Index: 128, } s.testPostToCommunityChat(shard, community, chat) @@ -153,8 +153,8 @@ func (s *MessengerCommunitiesShardingSuite) TestPostToCommunityChat() { // Members should be able to receive messages in a community where the sharding configuration has been edited. { - shard := &shard.Shard{ - Cluster: shard.MainStatusShardCluster, + shard := &wakuv2.Shard{ + Cluster: wakuv2.MainStatusShardCluster, Index: 256, } s.testPostToCommunityChat(shard, community, chat) @@ -162,8 +162,8 @@ func (s *MessengerCommunitiesShardingSuite) TestPostToCommunityChat() { // Members should continue to receive messages in a community if it is moved back to default shard. { - shard := &shard.Shard{ - Cluster: shard.MainStatusShardCluster, + shard := &wakuv2.Shard{ + Cluster: wakuv2.MainStatusShardCluster, Index: 32, } s.testPostToCommunityChat(shard, community, chat) @@ -176,8 +176,8 @@ func (s *MessengerCommunitiesShardingSuite) TestIgnoreOutdatedShardKey() { advertiseCommunityToUserOldWay(&s.Suite, community, s.owner, s.alice) joinCommunity(&s.Suite, community.ID(), s.owner, s.alice, alicePassword, []string{aliceAddress1}) - shard := &shard.Shard{ - Cluster: shard.MainStatusShardCluster, + shard := &wakuv2.Shard{ + Cluster: wakuv2.MainStatusShardCluster, Index: 128, } diff --git a/protocol/messenger_community_shard.go b/protocol/messenger_community_shard.go index 8fdda061d..ca8f6abda 100644 --- a/protocol/messenger_community_shard.go +++ b/protocol/messenger_community_shard.go @@ -12,11 +12,11 @@ import ( "github.com/status-im/status-go/eth-node/crypto" "github.com/status-im/status-go/eth-node/types" "github.com/status-im/status-go/protocol/common" - "github.com/status-im/status-go/protocol/common/shard" "github.com/status-im/status-go/protocol/communities" "github.com/status-im/status-go/protocol/protobuf" "github.com/status-im/status-go/protocol/transport" v1protocol "github.com/status-im/status-go/protocol/v1" + "github.com/status-im/status-go/wakuv2" ) func (m *Messenger) sendPublicCommunityShardInfo(community *communities.Community) error { @@ -57,7 +57,7 @@ func (m *Messenger) sendPublicCommunityShardInfo(community *communities.Communit // we don't want to wrap in an encryption layer message SkipEncryptionLayer: true, MessageType: protobuf.ApplicationMetadataMessage_COMMUNITY_PUBLIC_SHARD_INFO, - PubsubTopic: shard.DefaultNonProtectedPubsubTopic(), // it must be sent always to default shard pubsub topic + PubsubTopic: wakuv2.DefaultNonProtectedPubsubTopic(), // it must be sent always to default shard pubsub topic Priority: &common.HighPriority, } @@ -89,7 +89,7 @@ func (m *Messenger) HandleCommunityPublicShardInfo(state *ReceivedMessageState, return err } - err = m.communitiesManager.SaveCommunityShard(publicShardInfo.CommunityId, shard.FromProtobuff(publicShardInfo.Shard), publicShardInfo.Clock) + err = m.communitiesManager.SaveCommunityShard(publicShardInfo.CommunityId, wakuv2.FromProtobuff(publicShardInfo.Shard), publicShardInfo.Clock) if err != nil && err != communities.ErrOldShardInfo { logError(err) return err diff --git a/protocol/messenger_filter_init.go b/protocol/messenger_filter_init.go index e66030a00..546067770 100644 --- a/protocol/messenger_filter_init.go +++ b/protocol/messenger_filter_init.go @@ -12,9 +12,9 @@ import ( gocommon "github.com/status-im/status-go/common" "github.com/status-im/status-go/deprecation" - "github.com/status-im/status-go/protocol/common/shard" "github.com/status-im/status-go/protocol/communities" "github.com/status-im/status-go/protocol/transport" + "github.com/status-im/status-go/wakuv2" ) // InitFilters analyzes chats and contacts in order to setup filters @@ -24,7 +24,7 @@ func (m *Messenger) InitFilters() error { rand.Seed(time.Now().Unix()) // Community requests will arrive in this pubsub topic - if err := m.SubscribeToPubsubTopic(shard.DefaultNonProtectedPubsubTopic(), nil); err != nil { + if err := m.SubscribeToPubsubTopic(wakuv2.DefaultNonProtectedPubsubTopic(), nil); err != nil { return err } diff --git a/protocol/messenger_mailserver_cycle.go b/protocol/messenger_mailserver_cycle.go index 87f0ccbf3..f355c6e4a 100644 --- a/protocol/messenger_mailserver_cycle.go +++ b/protocol/messenger_mailserver_cycle.go @@ -4,8 +4,6 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "go.uber.org/zap" - "github.com/waku-org/go-waku/waku/v2/utils" - gocommon "github.com/status-im/status-go/common" "github.com/status-im/status-go/params" "github.com/status-im/status-go/services/mailservers" @@ -39,28 +37,6 @@ func (m *Messenger) AllMailservers() ([]mailservers.Mailserver, error) { return allMailservers, nil } -func (m *Messenger) setupStorenodes(storenodes []mailservers.Mailserver) error { - if m.transport.WakuVersion() != 2 { - return nil - } - - for _, storenode := range storenodes { - - peerInfo, err := storenode.PeerInfo() - if err != nil { - return err - } - - for _, addr := range utils.EncapsulatePeerID(peerInfo.ID, peerInfo.Addrs...) { - _, err := m.transport.AddStorePeer(addr) - if err != nil { - return err - } - } - } - return nil -} - func (m *Messenger) getFleet() (string, error) { var fleet string dbFleet, err := m.settings.GetFleet() diff --git a/protocol/messenger_peers.go b/protocol/messenger_peers.go index b73e543a1..3f1c5fa6a 100644 --- a/protocol/messenger_peers.go +++ b/protocol/messenger_peers.go @@ -11,10 +11,6 @@ import ( "github.com/status-im/status-go/eth-node/types" ) -func (m *Messenger) AddStorePeer(address multiaddr.Multiaddr) (peer.ID, error) { - return m.transport.AddStorePeer(address) -} - func (m *Messenger) AddRelayPeer(address multiaddr.Multiaddr) (peer.ID, error) { return m.transport.AddRelayPeer(address) } diff --git a/protocol/messenger_share_urls.go b/protocol/messenger_share_urls.go index 238f06dbd..812cd00e4 100644 --- a/protocol/messenger_share_urls.go +++ b/protocol/messenger_share_urls.go @@ -15,11 +15,11 @@ import ( "github.com/status-im/status-go/eth-node/crypto" "github.com/status-im/status-go/eth-node/types" "github.com/status-im/status-go/protocol/common" - "github.com/status-im/status-go/protocol/common/shard" "github.com/status-im/status-go/protocol/communities" "github.com/status-im/status-go/protocol/protobuf" "github.com/status-im/status-go/protocol/requests" "github.com/status-im/status-go/services/utils" + "github.com/status-im/status-go/wakuv2" ) type CommunityURLData struct { @@ -49,7 +49,7 @@ type URLDataResponse struct { Community *CommunityURLData `json:"community"` Channel *CommunityChannelURLData `json:"channel"` Contact *ContactURLData `json:"contact"` - Shard *shard.Shard `json:"shard,omitempty"` + Shard *wakuv2.Shard `json:"shard,omitempty"` } const baseShareURL = "https://status.app" @@ -204,7 +204,7 @@ func parseCommunityURLWithData(data string, chatKey string) (*URLDataResponse, e TagIndices: tagIndices, CommunityID: types.EncodeHex(communityID), }, - Shard: shard.FromProtobuff(urlDataProto.Shard), + Shard: wakuv2.FromProtobuff(urlDataProto.Shard), }, nil } @@ -380,7 +380,7 @@ func parseCommunityChannelURLWithData(data string, chatKey string) (*URLDataResp Color: channelProto.Color, ChannelUUID: channelProto.Uuid, }, - Shard: shard.FromProtobuff(urlDataProto.Shard), + Shard: wakuv2.FromProtobuff(urlDataProto.Shard), }, nil } diff --git a/protocol/messenger_store_node_request_manager.go b/protocol/messenger_store_node_request_manager.go index 362e1767a..bd20cf092 100644 --- a/protocol/messenger_store_node_request_manager.go +++ b/protocol/messenger_store_node_request_manager.go @@ -8,17 +8,16 @@ import ( "sync" "time" - "github.com/waku-org/go-waku/waku/v2/api/history" - gocommon "github.com/status-im/status-go/common" "github.com/status-im/status-go/eth-node/crypto" - "github.com/status-im/status-go/protocol/common/shard" + "github.com/waku-org/go-waku/waku/v2/api/history" "go.uber.org/zap" "github.com/status-im/status-go/eth-node/types" "github.com/status-im/status-go/protocol/communities" "github.com/status-im/status-go/protocol/transport" + "github.com/status-im/status-go/wakuv2" ) const ( @@ -84,7 +83,7 @@ func (m *StoreNodeRequestManager) FetchCommunity(ctx context.Context, community zap.Any("community", community), zap.Any("config", cfg)) - requestCommunity := func(communityID string, shard *shard.Shard) (*communities.Community, StoreNodeRequestStats, error) { + requestCommunity := func(communityID string, shard *wakuv2.Shard) (*communities.Community, StoreNodeRequestStats, error) { channel, err := m.subscribeToRequest(ctx, storeNodeCommunityRequest, communityID, shard, cfg) if err != nil { return nil, StoreNodeRequestStats{}, fmt.Errorf("failed to create a request for community: %w", err) @@ -102,7 +101,7 @@ func (m *StoreNodeRequestManager) FetchCommunity(ctx context.Context, community communityShard := community.Shard if communityShard == nil { id := transport.CommunityShardInfoTopic(community.CommunityID) - fetchedShard, err := m.subscribeToRequest(ctx, storeNodeShardRequest, id, shard.DefaultNonProtectedShard(), cfg) + fetchedShard, err := m.subscribeToRequest(ctx, storeNodeShardRequest, id, wakuv2.DefaultNonProtectedShard(), cfg) if err != nil { return nil, StoreNodeRequestStats{}, fmt.Errorf("failed to create a shard info request: %w", err) } @@ -180,7 +179,7 @@ func (m *StoreNodeRequestManager) FetchContact(ctx context.Context, contactID st // subscribeToRequest checks if a request for given community/contact is already in progress, creates and installs // a new one if not found, and returns a subscription to the result of the found/started request. // The subscription can then be used to get the result of the request, this could be either a community/contact or an error. -func (m *StoreNodeRequestManager) subscribeToRequest(ctx context.Context, requestType storeNodeRequestType, dataID string, shard *shard.Shard, cfg StoreNodeRequestConfig) (storeNodeResponseSubscription, error) { +func (m *StoreNodeRequestManager) subscribeToRequest(ctx context.Context, requestType storeNodeRequestType, dataID string, shard *wakuv2.Shard, cfg StoreNodeRequestConfig) (storeNodeResponseSubscription, error) { // It's important to unlock only after getting the subscription channel. // We also lock `activeRequestsLock` during finalizing the requests. This ensures that the subscription // created in this function will get the result even if the requests proceeds faster than this function ends. @@ -235,7 +234,7 @@ func (m *StoreNodeRequestManager) newStoreNodeRequest(ctx context.Context) *stor // getFilter checks if a filter for a given community is already created and creates one of not found. // Returns the found/created filter, a flag if the filter was created by the function and an error. -func (m *StoreNodeRequestManager) getFilter(requestType storeNodeRequestType, dataID string, shard *shard.Shard) (*transport.Filter, bool, error) { +func (m *StoreNodeRequestManager) getFilter(requestType storeNodeRequestType, dataID string, shard *wakuv2.Shard) (*transport.Filter, bool, error) { // First check if such filter already exists. filter := m.messenger.transport.FilterByChatID(dataID) if filter != nil { @@ -338,7 +337,7 @@ type storeNodeRequestResult struct { // One of data fields (community or contact) will be present depending on request type community *communities.Community contact *Contact - shard *shard.Shard + shard *wakuv2.Shard } type storeNodeResponseSubscription = chan storeNodeRequestResult diff --git a/protocol/messenger_storenode_comunity_test.go b/protocol/messenger_storenode_comunity_test.go index c5f444585..11208be8f 100644 --- a/protocol/messenger_storenode_comunity_test.go +++ b/protocol/messenger_storenode_comunity_test.go @@ -10,9 +10,9 @@ import ( "github.com/multiformats/go-multiaddr" "github.com/status-im/status-go/protocol/storenodes" + "github.com/status-im/status-go/wakuv2" gethbridge "github.com/status-im/status-go/eth-node/bridge/geth" - "github.com/status-im/status-go/protocol/common/shard" "github.com/status-im/status-go/protocol/communities" "github.com/status-im/status-go/protocol/tt" @@ -92,11 +92,12 @@ func (s *MessengerStoreNodeCommunitySuite) createStore(name string) (*waku2.Waku cfg := testWakuV2Config{ logger: s.logger.Named(name), enableStore: true, - clusterID: shard.MainStatusShardCluster, + clusterID: wakuv2.MainStatusShardCluster, } storeNode := NewTestWakuV2(&s.Suite, cfg) - addresses := storeNode.ListenAddresses() + addresses, err := storeNode.ListenAddresses() + s.Require().NoError(err) s.Require().GreaterOrEqual(len(addresses), 1, "no storenode listen address") return storeNode, addresses[0] } @@ -109,7 +110,7 @@ func (s *MessengerStoreNodeCommunitySuite) newMessenger(name string, storenodeAd cfg := testWakuV2Config{ logger: logger, enableStore: false, - clusterID: shard.MainStatusShardCluster, + clusterID: wakuv2.MainStatusShardCluster, } wakuV2 := NewTestWakuV2(&s.Suite, cfg) wakuV2Wrapper := gethbridge.NewGethWakuV2Wrapper(wakuV2) diff --git a/protocol/messenger_storenode_request_test.go b/protocol/messenger_storenode_request_test.go index c6fdd0502..b3f7daf93 100644 --- a/protocol/messenger_storenode_request_test.go +++ b/protocol/messenger_storenode_request_test.go @@ -24,7 +24,6 @@ import ( "github.com/status-im/status-go/multiaccounts/accounts" "github.com/status-im/status-go/params" "github.com/status-im/status-go/protocol/common" - "github.com/status-im/status-go/protocol/common/shard" "github.com/status-im/status-go/protocol/communities" "github.com/status-im/status-go/protocol/communities/token" "github.com/status-im/status-go/protocol/protobuf" @@ -34,6 +33,7 @@ import ( mailserversDB "github.com/status-im/status-go/services/mailservers" "github.com/status-im/status-go/services/wallet/bigint" "github.com/status-im/status-go/t/helpers" + "github.com/status-im/status-go/wakuv2" waku2 "github.com/status-im/status-go/wakuv2" wakuV2common "github.com/status-im/status-go/wakuv2/common" ) @@ -160,7 +160,7 @@ func (s *MessengerStoreNodeRequestSuite) createStore() { cfg := testWakuV2Config{ logger: s.logger.Named("store-waku"), enableStore: true, - clusterID: shard.MainStatusShardCluster, + clusterID: wakuv2.MainStatusShardCluster, } s.wakuStoreNode = NewTestWakuV2(&s.Suite, cfg) @@ -178,7 +178,7 @@ func (s *MessengerStoreNodeRequestSuite) createOwner() { cfg := testWakuV2Config{ logger: s.logger.Named("owner-waku"), enableStore: false, - clusterID: shard.MainStatusShardCluster, + clusterID: wakuv2.MainStatusShardCluster, } wakuV2 := NewTestWakuV2(&s.Suite, cfg) @@ -199,7 +199,7 @@ func (s *MessengerStoreNodeRequestSuite) createBob() { cfg := testWakuV2Config{ logger: s.logger.Named("bob-waku"), enableStore: false, - clusterID: shard.MainStatusShardCluster, + clusterID: wakuv2.MainStatusShardCluster, } wakuV2 := NewTestWakuV2(&s.Suite, cfg) s.bobWaku = gethbridge.NewGethWakuV2Wrapper(wakuV2) @@ -368,7 +368,8 @@ func (s *MessengerStoreNodeRequestSuite) waitForEnvelopes(subscription <-chan st } func (s *MessengerStoreNodeRequestSuite) wakuListenAddress(waku *waku2.Waku) multiaddr.Multiaddr { - addresses := waku.ListenAddresses() + addresses, err := waku.ListenAddresses() + s.Require().NoError(err) s.Require().LessOrEqual(1, len(addresses)) return addresses[0] } @@ -698,8 +699,8 @@ func (s *MessengerStoreNodeRequestSuite) TestRequestShardAndCommunityInfo() { topicPrivKey, err := crypto.GenerateKey() s.Require().NoError(err) - expectedShard := &shard.Shard{ - Cluster: shard.MainStatusShardCluster, + expectedShard := &wakuv2.Shard{ + Cluster: wakuv2.MainStatusShardCluster, Index: 23, } @@ -843,8 +844,8 @@ type testFetchRealCommunityExampleTokenInfo struct { var testFetchRealCommunityExample = []struct { CommunityID string - CommunityURL string // If set, takes precedence over CommunityID - CommunityShard *shard.Shard // WARNING: I didn't test a sharded community + CommunityURL string // If set, takes precedence over CommunityID + CommunityShard *wakuv2.Shard // WARNING: I didn't test a sharded community Fleet string ClusterID uint16 UserPrivateKeyString string // When empty a new user will be created @@ -865,14 +866,14 @@ var testFetchRealCommunityExample = []struct { CommunityID: "0x03073514d4c14a7d10ae9fc9b0f05abc904d84166a6ac80add58bf6a3542a4e50a", CommunityShard: nil, Fleet: params.FleetStatusProd, - ClusterID: shard.MainStatusShardCluster, + ClusterID: wakuv2.MainStatusShardCluster, }, { // Example 3, // https://status.app/c/CxiACi8KFGFwIHJlcSAxIHN0dCBiZWMgbWVtEgdkc2Fkc2FkGAMiByM0MzYwREYqAxkrHAM=#zQ3shwDYZHtrLE7NqoTGjTWzWUu6hom5D4qxfskLZfgfyGRyL CommunityID: "0x03f64be95ed5c925022265f9250f538f65ed3dcf6e4ef6c139803dc02a3487ae7b", Fleet: params.FleetStatusProd, - ClusterID: shard.MainStatusShardCluster, + ClusterID: wakuv2.MainStatusShardCluster, CheckExpectedEnvelopes: true, ExpectedShardEnvelopes: []string{ @@ -975,7 +976,7 @@ var testFetchRealCommunityExample = []struct { //Example 1, CommunityID: "0x02471dd922756a3a50b623e59cf3b99355d6587e43d5c517eb55f9aea9d3fe9fe9", Fleet: params.FleetStatusProd, - ClusterID: shard.MainStatusShardCluster, + ClusterID: wakuv2.MainStatusShardCluster, CheckExpectedEnvelopes: true, ExpectedShardEnvelopes: []string{ "0xc3e68e838d09e0117b3f3fd27aabe5f5a509d13e9045263c78e6890953d43547", @@ -1015,7 +1016,7 @@ var testFetchRealCommunityExample = []struct { ContractAddress: "0x21F6F5Cb75E81e5104D890D750270eD6538C50cb", }, }, - ClusterID: shard.MainStatusShardCluster, + ClusterID: wakuv2.MainStatusShardCluster, CheckExpectedEnvelopes: false, CustomOptions: []StoreNodeRequestOption{ WithInitialPageSize(1), diff --git a/protocol/messenger_testing_utils.go b/protocol/messenger_testing_utils.go index 54aee55af..a6c93ec70 100644 --- a/protocol/messenger_testing_utils.go +++ b/protocol/messenger_testing_utils.go @@ -14,11 +14,11 @@ import ( gocommon "github.com/status-im/status-go/common" "github.com/status-im/status-go/protocol/wakusync" + "github.com/status-im/status-go/wakuv2" "github.com/status-im/status-go/protocol/identity" "github.com/status-im/status-go/eth-node/types" - waku2 "github.com/status-im/status-go/wakuv2" "github.com/stretchr/testify/suite" @@ -206,7 +206,7 @@ func WaitOnSignaledCommunityFound(m *Messenger, action func(), condition func(co } } -func WaitForConnectionStatus(s *suite.Suite, waku *waku2.Waku, action func() bool) { +func WaitForConnectionStatus(s *suite.Suite, waku *wakuv2.Waku, action func() bool) { subscription := waku.SubscribeToConnStatusChanges() defer subscription.Unsubscribe() @@ -238,7 +238,7 @@ func hasAllPeers(m map[peer.ID]types.WakuV2Peer, checkSlice peer.IDSlice) bool { return true } -func WaitForPeersConnected(s *suite.Suite, waku *waku2.Waku, action func() peer.IDSlice) { +func WaitForPeersConnected(s *suite.Suite, waku *wakuv2.Waku, action func() peer.IDSlice) { subscription := waku.SubscribeToConnStatusChanges() defer subscription.Unsubscribe() diff --git a/protocol/requests/set_community_shard.go b/protocol/requests/set_community_shard.go index 0b1240b56..6ae52d1bd 100644 --- a/protocol/requests/set_community_shard.go +++ b/protocol/requests/set_community_shard.go @@ -4,12 +4,12 @@ import ( "errors" "github.com/status-im/status-go/eth-node/types" - "github.com/status-im/status-go/protocol/common/shard" + "github.com/status-im/status-go/wakuv2" ) type SetCommunityShard struct { CommunityID types.HexBytes `json:"communityId"` - Shard *shard.Shard `json:"shard,omitempty"` + Shard *wakuv2.Shard `json:"shard,omitempty"` PrivateKey *types.HexBytes `json:"privateKey,omitempty"` } @@ -19,7 +19,7 @@ func (s *SetCommunityShard) Validate() error { } if s.Shard != nil { // TODO: for now only MainStatusShard(16) is accepted - if s.Shard.Cluster != shard.MainStatusShardCluster { + if s.Shard.Cluster != wakuv2.MainStatusShardCluster { return errors.New("invalid shard cluster") } if s.Shard.Index > 1023 { diff --git a/protocol/transport/filters_manager.go b/protocol/transport/filters_manager.go index 5393d63bf..acf3002d7 100644 --- a/protocol/transport/filters_manager.go +++ b/protocol/transport/filters_manager.go @@ -11,7 +11,7 @@ import ( "go.uber.org/zap" "github.com/status-im/status-go/eth-node/types" - "github.com/status-im/status-go/protocol/common/shard" + "github.com/status-im/status-go/wakuv2" ) const ( @@ -141,7 +141,7 @@ func (f *FiltersManager) InitPublicFilters(publicFiltersToInit []FiltersToInitia } type CommunityFilterToInitialize struct { - Shard *shard.Shard + Shard *wakuv2.Shard PrivKey *ecdsa.PrivateKey } @@ -158,7 +158,7 @@ func (f *FiltersManager) InitCommunityFilters(communityFiltersToInitialize []Com } topics := make([]string, 0) - topics = append(topics, shard.DefaultNonProtectedPubsubTopic()) + topics = append(topics, wakuv2.DefaultNonProtectedPubsubTopic()) topics = append(topics, communityFilter.Shard.PubsubTopic()) for _, pubsubTopic := range topics { diff --git a/protocol/transport/transport.go b/protocol/transport/transport.go index 0f8df437c..a908669c5 100644 --- a/protocol/transport/transport.go +++ b/protocol/transport/transport.go @@ -516,10 +516,6 @@ func (t *Transport) ENR() (*enode.Node, error) { return t.waku.ENR() } -func (t *Transport) AddStorePeer(address multiaddr.Multiaddr) (peer.ID, error) { - return t.waku.AddStorePeer(address) -} - func (t *Transport) AddRelayPeer(address multiaddr.Multiaddr) (peer.ID, error) { return t.waku.AddRelayPeer(address) } diff --git a/protocol/waku_builder_test.go b/protocol/waku_builder_test.go index 7c8adab32..1134d5eec 100644 --- a/protocol/waku_builder_test.go +++ b/protocol/waku_builder_test.go @@ -12,7 +12,6 @@ import ( "github.com/status-im/status-go/appdatabase" gethbridge "github.com/status-im/status-go/eth-node/bridge/geth" "github.com/status-im/status-go/eth-node/types" - "github.com/status-im/status-go/protocol/common/shard" "github.com/status-im/status-go/t/helpers" waku2 "github.com/status-im/status-go/wakuv2" ) @@ -62,7 +61,7 @@ func NewTestWakuV2(s *suite.Suite, cfg testWakuV2Config) *waku2.Waku { err = wakuNode.Start() if cfg.enableStore { - err := wakuNode.SubscribeToPubsubTopic(shard.DefaultNonProtectedPubsubTopic(), nil) + err := wakuNode.SubscribeToPubsubTopic(waku2.DefaultNonProtectedPubsubTopic(), nil) s.Require().NoError(err) } s.Require().NoError(err) @@ -78,7 +77,7 @@ func CreateWakuV2Network(s *suite.Suite, parentLogger *zap.Logger, nodeNames []s nodes[i] = NewTestWakuV2(s, testWakuV2Config{ logger: parentLogger.Named("waku-" + name), enableStore: false, - clusterID: shard.MainStatusShardCluster, + clusterID: waku2.MainStatusShardCluster, }) } @@ -89,9 +88,10 @@ func CreateWakuV2Network(s *suite.Suite, parentLogger *zap.Logger, nodeNames []s continue } - addrs := nodes[j].ListenAddresses() + addrs, err := nodes[j].ListenAddresses() + s.Require().NoError(err) s.Require().Greater(len(addrs), 0) - _, err := nodes[i].AddRelayPeer(addrs[0]) + _, err = nodes[i].AddRelayPeer(addrs[0]) s.Require().NoError(err) err = nodes[i].DialPeer(addrs[0]) s.Require().NoError(err) diff --git a/services/ext/api.go b/services/ext/api.go index 7f7614084..0270f758b 100644 --- a/services/ext/api.go +++ b/services/ext/api.go @@ -18,6 +18,7 @@ import ( "github.com/status-im/status-go/services/browsers" "github.com/status-im/status-go/services/wallet" "github.com/status-im/status-go/services/wallet/bigint" + "github.com/status-im/status-go/wakuv2" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/p2p/enode" @@ -33,7 +34,6 @@ import ( "github.com/status-im/status-go/multiaccounts/settings" "github.com/status-im/status-go/protocol" "github.com/status-im/status-go/protocol/common" - "github.com/status-im/status-go/protocol/common/shard" "github.com/status-im/status-go/protocol/communities" "github.com/status-im/status-go/protocol/communities/token" "github.com/status-im/status-go/protocol/discord" @@ -1279,7 +1279,7 @@ func (api *PublicAPI) RequestCommunityInfoFromMailserver(communityID string) (*c // Deprecated: RequestCommunityInfoFromMailserverWithShard is deprecated in favor of // configurable FetchCommunity. -func (api *PublicAPI) RequestCommunityInfoFromMailserverWithShard(communityID string, shard *shard.Shard) (*communities.Community, error) { +func (api *PublicAPI) RequestCommunityInfoFromMailserverWithShard(communityID string, shard *wakuv2.Shard) (*communities.Community, error) { request := &protocol.FetchCommunityRequest{ CommunityKey: communityID, Shard: shard, @@ -1304,7 +1304,7 @@ func (api *PublicAPI) RequestCommunityInfoFromMailserverAsync(communityID string // Deprecated: RequestCommunityInfoFromMailserverAsyncWithShard is deprecated in favor of // configurable FetchCommunity. -func (api *PublicAPI) RequestCommunityInfoFromMailserverAsyncWithShard(communityID string, shard *shard.Shard) error { +func (api *PublicAPI) RequestCommunityInfoFromMailserverAsyncWithShard(communityID string, shard *wakuv2.Shard) error { request := &protocol.FetchCommunityRequest{ CommunityKey: communityID, Shard: shard, @@ -1448,14 +1448,6 @@ func (api *PublicAPI) StorePubsubTopicKey(topic string, privKey string) error { return api.service.messenger.StorePubsubTopicKey(topic, p) } -func (api *PublicAPI) AddStorePeer(address string) (peer.ID, error) { - maddr, err := multiaddr.NewMultiaddr(address) - if err != nil { - return "", err - } - return api.service.messenger.AddStorePeer(maddr) -} - func (api *PublicAPI) AddRelayPeer(address string) (peer.ID, error) { maddr, err := multiaddr.NewMultiaddr(address) if err != nil { diff --git a/services/mailservers/api_test.go b/services/mailservers/api_test.go index c0d847b98..c830a15e3 100644 --- a/services/mailservers/api_test.go +++ b/services/mailservers/api_test.go @@ -8,10 +8,10 @@ import ( "github.com/status-im/status-go/appdatabase" "github.com/status-im/status-go/eth-node/types" - "github.com/status-im/status-go/protocol/common/shard" "github.com/status-im/status-go/protocol/sqlite" "github.com/status-im/status-go/protocol/transport" "github.com/status-im/status-go/t/helpers" + "github.com/status-im/status-go/wakuv2" ) func setupTestDB(t *testing.T) (*Database, func()) { @@ -62,9 +62,9 @@ func TestTopic(t *testing.T) { defer close() topicA := "0x61000000" topicD := "0x64000000" - topic1 := MailserverTopic{PubsubTopic: shard.DefaultShardPubsubTopic(), ContentTopic: topicA, LastRequest: 1} - topic2 := MailserverTopic{PubsubTopic: shard.DefaultShardPubsubTopic(), ContentTopic: "0x6200000", LastRequest: 2} - topic3 := MailserverTopic{PubsubTopic: shard.DefaultShardPubsubTopic(), ContentTopic: "0x6300000", LastRequest: 3} + topic1 := MailserverTopic{PubsubTopic: wakuv2.DefaultShardPubsubTopic(), ContentTopic: topicA, LastRequest: 1} + topic2 := MailserverTopic{PubsubTopic: wakuv2.DefaultShardPubsubTopic(), ContentTopic: "0x6200000", LastRequest: 2} + topic3 := MailserverTopic{PubsubTopic: wakuv2.DefaultShardPubsubTopic(), ContentTopic: "0x6300000", LastRequest: 3} require.NoError(t, db.AddTopic(topic1)) require.NoError(t, db.AddTopic(topic2)) @@ -77,14 +77,14 @@ func TestTopic(t *testing.T) { filters := []*transport.Filter{ // Existing topic, is not updated { - PubsubTopic: shard.DefaultShardPubsubTopic(), + PubsubTopic: wakuv2.DefaultShardPubsubTopic(), ContentTopic: types.BytesToTopic([]byte{0x61}), }, // Non existing topic is not inserted { Discovery: true, Negotiated: true, - PubsubTopic: shard.DefaultShardPubsubTopic(), + PubsubTopic: wakuv2.DefaultShardPubsubTopic(), ContentTopic: types.BytesToTopic([]byte{0x64}), }, } @@ -160,7 +160,7 @@ func TestAddGetDeleteMailserverTopics(t *testing.T) { defer close() api := &API{db: db} testTopic := MailserverTopic{ - PubsubTopic: shard.DefaultShardPubsubTopic(), + PubsubTopic: wakuv2.DefaultShardPubsubTopic(), ContentTopic: "topic-001", ChatIDs: []string{"chatID01", "chatID02"}, LastRequest: 10, @@ -173,14 +173,14 @@ func TestAddGetDeleteMailserverTopics(t *testing.T) { require.NoError(t, err) require.EqualValues(t, []MailserverTopic{testTopic}, topics) - err = api.DeleteMailserverTopic(context.Background(), shard.DefaultShardPubsubTopic(), testTopic.ContentTopic) + err = api.DeleteMailserverTopic(context.Background(), wakuv2.DefaultShardPubsubTopic(), testTopic.ContentTopic) require.NoError(t, err) topics, err = api.GetMailserverTopics(context.Background()) require.NoError(t, err) require.EqualValues(t, ([]MailserverTopic)(nil), topics) // Delete non-existing topic. - err = api.DeleteMailserverTopic(context.Background(), shard.DefaultShardPubsubTopic(), "non-existing-topic") + err = api.DeleteMailserverTopic(context.Background(), wakuv2.DefaultShardPubsubTopic(), "non-existing-topic") require.NoError(t, err) } diff --git a/services/status/service.go b/services/status/service.go index abfa1531e..d8fac8bd4 100644 --- a/services/status/service.go +++ b/services/status/service.go @@ -10,7 +10,7 @@ import ( "github.com/status-im/status-go/eth-node/types" "github.com/status-im/status-go/protocol" - "github.com/status-im/status-go/protocol/common/shard" + "github.com/status-im/status-go/wakuv2" ) // Make sure that Service implements node.Lifecycle interface. @@ -70,7 +70,7 @@ type PublicAPI struct { service *Service } -func (p *PublicAPI) CommunityInfo(communityID types.HexBytes, shard *shard.Shard) (json.RawMessage, error) { +func (p *PublicAPI) CommunityInfo(communityID types.HexBytes, shard *wakuv2.Shard) (json.RawMessage, error) { if p.service.messenger == nil { return nil, ErrNotInitialized } diff --git a/third_party/nwaku b/third_party/nwaku new file mode 160000 index 000000000..c6f47f8ae --- /dev/null +++ b/third_party/nwaku @@ -0,0 +1 @@ +Subproject commit c6f47f8aeb1f730108dd0daa7b88beedb5931895 diff --git a/timesource/timesource.go b/timesource/timesource.go index e21e8ea68..5e1dc387c 100644 --- a/timesource/timesource.go +++ b/timesource/timesource.go @@ -2,6 +2,7 @@ package timesource import ( "bytes" + "context" "errors" "sort" "sync" @@ -144,8 +145,8 @@ type NTPTimeSource struct { timeQuery ntpQuery // for ease of testing now func() time.Time - quit chan struct{} started bool + cancel context.CancelFunc mu sync.RWMutex latestOffset time.Duration @@ -175,7 +176,7 @@ func (s *NTPTimeSource) updateOffset() error { // runPeriodically runs periodically the given function based on NTPTimeSource // synchronization limits (fastNTPSyncPeriod / slowNTPSyncPeriod) -func (s *NTPTimeSource) runPeriodically(fn func() error, starWithSlowSyncPeriod bool) { +func (s *NTPTimeSource) runPeriodically(ctx context.Context, fn func() error, starWithSlowSyncPeriod bool) { if s.started { return } @@ -184,7 +185,7 @@ func (s *NTPTimeSource) runPeriodically(fn func() error, starWithSlowSyncPeriod if starWithSlowSyncPeriod { period = s.slowNTPSyncPeriod } - s.quit = make(chan struct{}) + go func() { defer common.LogOnPanic() for { @@ -196,7 +197,7 @@ func (s *NTPTimeSource) runPeriodically(fn func() error, starWithSlowSyncPeriod period = s.fastNTPSyncPeriod } - case <-s.quit: + case <-ctx.Done(): return } } @@ -204,11 +205,13 @@ func (s *NTPTimeSource) runPeriodically(fn func() error, starWithSlowSyncPeriod } // Start initializes the local offset and starts a goroutine that periodically updates the local offset. -func (s *NTPTimeSource) Start() { +func (s *NTPTimeSource) Start(ctx context.Context) error { if s.started { - return + return nil } + ctx, cancel := context.WithCancel(ctx) + // Attempt to update the offset synchronously so that user can have reliable messages right away err := s.updateOffset() if err != nil { @@ -217,23 +220,30 @@ func (s *NTPTimeSource) Start() { logutils.ZapLogger().Error("failed to update offset", zap.Error(err)) } - s.runPeriodically(s.updateOffset, err == nil) + s.runPeriodically(ctx, s.updateOffset, err == nil) s.started = true -} + s.cancel = cancel -// Stop goroutine that updates time source. -func (s *NTPTimeSource) Stop() error { - if s.quit == nil { - return nil - } - close(s.quit) - s.started = false return nil } +// Stop goroutine that updates time source. +func (s *NTPTimeSource) Stop() { + if s.cancel == nil { + return + } + + s.cancel() + s.started = false +} + func (s *NTPTimeSource) GetCurrentTime() time.Time { - s.Start() + err := s.Start(context.Background()) + if err != nil { + panic("could not obtain timesource") + } + return s.Now() } @@ -243,7 +253,11 @@ func (s *NTPTimeSource) GetCurrentTimeInMillis() uint64 { func GetCurrentTime() time.Time { ts := Default() - ts.Start() + err := ts.Start(context.Background()) + if err != nil { + panic("could not obtain timesource") + } + return ts.Now() } diff --git a/timesource/timesource_test.go b/timesource/timesource_test.go index 51a753657..35840586a 100644 --- a/timesource/timesource_test.go +++ b/timesource/timesource_test.go @@ -1,6 +1,7 @@ package timesource import ( + "context" "errors" "sync" "testing" @@ -214,7 +215,7 @@ func TestRunningPeriodically(t *testing.T) { // on NTPTimeSource specified periods (fastNTPSyncPeriod & slowNTPSyncPeriod) wg := sync.WaitGroup{} wg.Add(1) - source.runPeriodically(func() error { + source.runPeriodically(context.TODO(), func() error { mu.Lock() periods = append(periods, time.Since(lastCall)) mu.Unlock() @@ -277,14 +278,12 @@ func TestGetCurrentTimeInMillis(t *testing.T) { // test repeat invoke GetCurrentTimeInMillis n = ts.GetCurrentTimeInMillis() require.Equal(t, expectedTime, n) - e := ts.Stop() - require.NoError(t, e) + ts.Stop() // test invoke after stop n = ts.GetCurrentTimeInMillis() require.Equal(t, expectedTime, n) - e = ts.Stop() - require.NoError(t, e) + ts.Stop() } func TestGetCurrentTimeOffline(t *testing.T) { diff --git a/vendor/github.com/waku-org/go-waku/waku/v2/api/common/storenode_requestor.go b/vendor/github.com/waku-org/go-waku/waku/v2/api/common/storenode_requestor.go index 8a723c9e6..a5076b3f6 100644 --- a/vendor/github.com/waku-org/go-waku/waku/v2/api/common/storenode_requestor.go +++ b/vendor/github.com/waku-org/go-waku/waku/v2/api/common/storenode_requestor.go @@ -8,5 +8,5 @@ import ( ) type StorenodeRequestor interface { - Query(ctx context.Context, peerID peer.ID, query *pb.StoreQueryRequest) (StoreRequestResult, error) + Query(ctx context.Context, peerInfo peer.AddrInfo, query *pb.StoreQueryRequest) (StoreRequestResult, error) } diff --git a/vendor/github.com/waku-org/go-waku/waku/v2/api/history/cycle.go b/vendor/github.com/waku-org/go-waku/waku/v2/api/history/cycle.go index 5da8d0ee4..c4976110a 100644 --- a/vendor/github.com/waku-org/go-waku/waku/v2/api/history/cycle.go +++ b/vendor/github.com/waku-org/go-waku/waku/v2/api/history/cycle.go @@ -338,6 +338,24 @@ func (m *StorenodeCycle) GetActiveStorenode() peer.ID { return m.activeStorenode } +func (m *StorenodeCycle) GetActiveStorenodePeerInfo() peer.AddrInfo { + m.RLock() + defer m.RUnlock() + + storeNodes, err := m.storenodeConfigProvider.Storenodes() + if err != nil { + return peer.AddrInfo{} + } + + for _, p := range storeNodes { + if p.ID == m.activeStorenode { + return p + } + } + + return peer.AddrInfo{} +} + func (m *StorenodeCycle) IsStorenodeAvailable(peerID peer.ID) bool { return m.storenodeStatus(peerID) == connected } diff --git a/vendor/github.com/waku-org/go-waku/waku/v2/api/history/history.go b/vendor/github.com/waku-org/go-waku/waku/v2/api/history/history.go index 004cd1567..c61bb2e14 100644 --- a/vendor/github.com/waku-org/go-waku/waku/v2/api/history/history.go +++ b/vendor/github.com/waku-org/go-waku/waku/v2/api/history/history.go @@ -37,7 +37,7 @@ type HistoryRetriever struct { type HistoryProcessor interface { OnEnvelope(env *protocol.Envelope, processEnvelopes bool) error - OnRequestFailed(requestID []byte, peerID peer.ID, err error) + OnRequestFailed(requestID []byte, peerInfo peer.AddrInfo, err error) } func NewHistoryRetriever(store common.StorenodeRequestor, historyProcessor HistoryProcessor, logger *zap.Logger) *HistoryRetriever { @@ -51,7 +51,7 @@ func NewHistoryRetriever(store common.StorenodeRequestor, historyProcessor Histo func (hr *HistoryRetriever) Query( ctx context.Context, criteria store.FilterCriteria, - storenodeID peer.ID, + storenode peer.AddrInfo, pageLimit uint64, shouldProcessNextPage func(int) (bool, uint64), processEnvelopes bool, @@ -178,7 +178,7 @@ loop: newCriteria.TimeStart = timeStart newCriteria.TimeEnd = timeEnd - cursor, envelopesCount, err := hr.createMessagesRequest(queryCtx, storenodeID, newCriteria, w.cursor, w.limit, true, processEnvelopes, logger) + cursor, envelopesCount, err := hr.createMessagesRequest(queryCtx, storenode, newCriteria, w.cursor, w.limit, true, processEnvelopes, logger) queryCancel() if err != nil { @@ -241,7 +241,7 @@ loop: func (hr *HistoryRetriever) createMessagesRequest( ctx context.Context, - peerID peer.ID, + peerInfo peer.AddrInfo, criteria store.FilterCriteria, cursor []byte, limit uint64, @@ -257,7 +257,7 @@ func (hr *HistoryRetriever) createMessagesRequest( }) go func() { - storeCursor, envelopesCount, err = hr.requestStoreMessages(ctx, peerID, criteria, cursor, limit, processEnvelopes) + storeCursor, envelopesCount, err = hr.requestStoreMessages(ctx, peerInfo, criteria, cursor, limit, processEnvelopes) resultCh <- struct { storeCursor []byte envelopesCount int @@ -273,7 +273,7 @@ func (hr *HistoryRetriever) createMessagesRequest( } } else { go func() { - _, _, err = hr.requestStoreMessages(ctx, peerID, criteria, cursor, limit, false) + _, _, err = hr.requestStoreMessages(ctx, peerInfo, criteria, cursor, limit, false) if err != nil { logger.Error("failed to request store messages", zap.Error(err)) } @@ -283,9 +283,9 @@ func (hr *HistoryRetriever) createMessagesRequest( return } -func (hr *HistoryRetriever) requestStoreMessages(ctx context.Context, peerID peer.ID, criteria store.FilterCriteria, cursor []byte, limit uint64, processEnvelopes bool) ([]byte, int, error) { +func (hr *HistoryRetriever) requestStoreMessages(ctx context.Context, peerInfo peer.AddrInfo, criteria store.FilterCriteria, cursor []byte, limit uint64, processEnvelopes bool) ([]byte, int, error) { requestID := protocol.GenerateRequestID() - logger := hr.logger.With(zap.String("requestID", hexutil.Encode(requestID)), zap.Stringer("peerID", peerID)) + logger := hr.logger.With(zap.String("requestID", hexutil.Encode(requestID)), zap.Stringer("peerID", peerInfo.ID)) logger.Debug("store.query", logging.Timep("startTime", criteria.TimeStart), @@ -307,12 +307,12 @@ func (hr *HistoryRetriever) requestStoreMessages(ctx context.Context, peerID pee } queryStart := time.Now() - result, err := hr.store.Query(ctx, peerID, storeQueryRequest) + result, err := hr.store.Query(ctx, peerInfo, storeQueryRequest) queryDuration := time.Since(queryStart) if err != nil { logger.Error("error querying storenode", zap.Error(err)) - hr.historyProcessor.OnRequestFailed(requestID, peerID, err) + hr.historyProcessor.OnRequestFailed(requestID, peerInfo, err) return nil, 0, err } diff --git a/vendor/github.com/waku-org/go-waku/waku/v2/api/missing/criteria_interest.go b/vendor/github.com/waku-org/go-waku/waku/v2/api/missing/criteria_interest.go index 919b2fc91..19aa7b84d 100644 --- a/vendor/github.com/waku-org/go-waku/waku/v2/api/missing/criteria_interest.go +++ b/vendor/github.com/waku-org/go-waku/waku/v2/api/missing/criteria_interest.go @@ -10,7 +10,7 @@ import ( ) type criteriaInterest struct { - peerID peer.ID + peerInfo peer.AddrInfo contentFilter protocol.ContentFilter lastChecked time.Time @@ -19,7 +19,7 @@ type criteriaInterest struct { } func (c criteriaInterest) equals(other criteriaInterest) bool { - if c.peerID != other.peerID { + if c.peerInfo.ID != other.peerInfo.ID { return false } diff --git a/vendor/github.com/waku-org/go-waku/waku/v2/api/missing/default_requestor.go b/vendor/github.com/waku-org/go-waku/waku/v2/api/missing/default_requestor.go index 382821735..a72af3c55 100644 --- a/vendor/github.com/waku-org/go-waku/waku/v2/api/missing/default_requestor.go +++ b/vendor/github.com/waku-org/go-waku/waku/v2/api/missing/default_requestor.go @@ -20,10 +20,10 @@ type defaultStorenodeRequestor struct { store *store.WakuStore } -func (d *defaultStorenodeRequestor) GetMessagesByHash(ctx context.Context, peerID peer.ID, pageSize uint64, messageHashes []pb.MessageHash) (common.StoreRequestResult, error) { - return d.store.QueryByHash(ctx, messageHashes, store.WithPeer(peerID), store.WithPaging(false, pageSize)) +func (d *defaultStorenodeRequestor) GetMessagesByHash(ctx context.Context, peerInfo peer.AddrInfo, pageSize uint64, messageHashes []pb.MessageHash) (common.StoreRequestResult, error) { + return d.store.QueryByHash(ctx, messageHashes, store.WithPeerAddr(peerInfo.Addrs...), store.WithPaging(false, pageSize)) } -func (d *defaultStorenodeRequestor) Query(ctx context.Context, peerID peer.ID, storeQueryRequest *storepb.StoreQueryRequest) (common.StoreRequestResult, error) { - return d.store.RequestRaw(ctx, peerID, storeQueryRequest) +func (d *defaultStorenodeRequestor) Query(ctx context.Context, peerInfo peer.AddrInfo, storeQueryRequest *storepb.StoreQueryRequest) (common.StoreRequestResult, error) { + return d.store.RequestRaw(ctx, peerInfo, storeQueryRequest) } diff --git a/vendor/github.com/waku-org/go-waku/waku/v2/api/missing/missing_messages.go b/vendor/github.com/waku-org/go-waku/waku/v2/api/missing/missing_messages.go index 72ac4f9f3..ab187af42 100644 --- a/vendor/github.com/waku-org/go-waku/waku/v2/api/missing/missing_messages.go +++ b/vendor/github.com/waku-org/go-waku/waku/v2/api/missing/missing_messages.go @@ -66,13 +66,13 @@ func NewMissingMessageVerifier(storenodeRequester common.StorenodeRequestor, mes } } -func (m *MissingMessageVerifier) SetCriteriaInterest(peerID peer.ID, contentFilter protocol.ContentFilter) { +func (m *MissingMessageVerifier) SetCriteriaInterest(peerInfo peer.AddrInfo, contentFilter protocol.ContentFilter) { m.criteriaInterestMu.Lock() defer m.criteriaInterestMu.Unlock() ctx, cancel := context.WithCancel(m.ctx) criteriaInterest := criteriaInterest{ - peerID: peerID, + peerInfo: peerInfo, contentFilter: contentFilter, lastChecked: m.timesource.Now().Add(-m.params.delay), ctx: ctx, @@ -164,7 +164,7 @@ func (m *MissingMessageVerifier) fetchHistory(c chan<- *protocol.Envelope, inter } m.logger.Error("could not fetch history", - zap.Stringer("peerID", interest.peerID), + zap.Stringer("peerID", interest.peerInfo.ID), zap.String("pubsubTopic", interest.contentFilter.PubsubTopic), zap.Strings("contentTopics", contentTopics)) continue @@ -207,7 +207,7 @@ func (m *MissingMessageVerifier) fetchMessagesBatch(c chan<- *protocol.Envelope, contentTopics := interest.contentFilter.ContentTopics.ToList() logger := m.logger.With( - zap.Stringer("peerID", interest.peerID), + zap.Stringer("peerID", interest.peerInfo.ID), zap.Strings("contentTopics", contentTopics[batchFrom:batchTo]), zap.String("pubsubTopic", interest.contentFilter.PubsubTopic), logging.Epoch("from", interest.lastChecked), @@ -226,7 +226,7 @@ func (m *MissingMessageVerifier) fetchMessagesBatch(c chan<- *protocol.Envelope, return m.storenodeRequestor.Query( ctx, - interest.peerID, + interest.peerInfo, storeQueryRequest, ) }, logger, "retrieving history to check for missing messages") @@ -309,7 +309,7 @@ func (m *MissingMessageVerifier) fetchMessagesBatch(c chan<- *protocol.Envelope, PaginationLimit: proto.Uint64(maxMsgHashesPerRequest), } - return m.storenodeRequestor.Query(queryCtx, interest.peerID, storeQueryRequest) + return m.storenodeRequestor.Query(queryCtx, interest.peerInfo, storeQueryRequest) }, logger, "retrieving missing messages") if err != nil { if !errors.Is(err, context.Canceled) { diff --git a/vendor/github.com/waku-org/go-waku/waku/v2/api/publish/default_verifier.go b/vendor/github.com/waku-org/go-waku/waku/v2/api/publish/default_verifier.go index 68eca0304..386728ece 100644 --- a/vendor/github.com/waku-org/go-waku/waku/v2/api/publish/default_verifier.go +++ b/vendor/github.com/waku-org/go-waku/waku/v2/api/publish/default_verifier.go @@ -18,10 +18,10 @@ type defaultStorenodeMessageVerifier struct { store *store.WakuStore } -func (d *defaultStorenodeMessageVerifier) MessageHashesExist(ctx context.Context, requestID []byte, peerID peer.ID, pageSize uint64, messageHashes []pb.MessageHash) ([]pb.MessageHash, error) { +func (d *defaultStorenodeMessageVerifier) MessageHashesExist(ctx context.Context, requestID []byte, peerID peer.AddrInfo, pageSize uint64, messageHashes []pb.MessageHash) ([]pb.MessageHash, error) { var opts []store.RequestOption opts = append(opts, store.WithRequestID(requestID)) - opts = append(opts, store.WithPeer(peerID)) + opts = append(opts, store.WithPeerAddr(peerID.Addrs...)) opts = append(opts, store.WithPaging(false, pageSize)) opts = append(opts, store.IncludeData(false)) diff --git a/vendor/github.com/waku-org/go-waku/waku/v2/api/publish/message_check.go b/vendor/github.com/waku-org/go-waku/waku/v2/api/publish/message_check.go index 8a37e20ce..c091e9592 100644 --- a/vendor/github.com/waku-org/go-waku/waku/v2/api/publish/message_check.go +++ b/vendor/github.com/waku-org/go-waku/waku/v2/api/publish/message_check.go @@ -33,7 +33,7 @@ type ISentCheck interface { type StorenodeMessageVerifier interface { // MessagesExist returns a list of the messages it found from a list of message hashes - MessageHashesExist(ctx context.Context, requestID []byte, peerID peer.ID, pageSize uint64, messageHashes []pb.MessageHash) ([]pb.MessageHash, error) + MessageHashesExist(ctx context.Context, requestID []byte, peerInfo peer.AddrInfo, pageSize uint64, messageHashes []pb.MessageHash) ([]pb.MessageHash, error) } // MessageSentCheck tracks the outgoing messages and check against store node @@ -211,8 +211,8 @@ func (m *MessageSentCheck) Start() { } func (m *MessageSentCheck) messageHashBasedQuery(ctx context.Context, hashes []common.Hash, relayTime []uint32, pubsubTopic string) []common.Hash { - selectedPeer := m.storenodeCycle.GetActiveStorenode() - if selectedPeer == "" { + selectedPeer := m.storenodeCycle.GetActiveStorenodePeerInfo() + if selectedPeer.ID == "" { m.logger.Error("no store peer id available", zap.String("pubsubTopic", pubsubTopic)) return []common.Hash{} } @@ -224,13 +224,13 @@ func (m *MessageSentCheck) messageHashBasedQuery(ctx context.Context, hashes []c messageHashes[i] = pb.ToMessageHash(hash.Bytes()) } - m.logger.Debug("store.queryByHash request", zap.String("requestID", hexutil.Encode(requestID)), zap.Stringer("peerID", selectedPeer), zap.Stringers("messageHashes", messageHashes)) + m.logger.Debug("store.queryByHash request", zap.String("requestID", hexutil.Encode(requestID)), zap.Stringer("peerID", selectedPeer.ID), zap.Stringers("messageHashes", messageHashes)) queryCtx, cancel := context.WithTimeout(ctx, m.storeQueryTimeout) defer cancel() result, err := m.messageVerifier.MessageHashesExist(queryCtx, requestID, selectedPeer, m.maxHashQueryLength, messageHashes) if err != nil { - m.logger.Error("store.queryByHash failed", zap.String("requestID", hexutil.Encode(requestID)), zap.Stringer("peerID", selectedPeer), zap.Error(err)) + m.logger.Error("store.queryByHash failed", zap.String("requestID", hexutil.Encode(requestID)), zap.Stringer("peerID", selectedPeer.ID), zap.Error(err)) return []common.Hash{} } diff --git a/vendor/github.com/waku-org/go-waku/waku/v2/node/wakunode2.go b/vendor/github.com/waku-org/go-waku/waku/v2/node/wakunode2.go index 1ae5b2448..abb5ca604 100644 --- a/vendor/github.com/waku-org/go-waku/waku/v2/node/wakunode2.go +++ b/vendor/github.com/waku-org/go-waku/waku/v2/node/wakunode2.go @@ -703,8 +703,8 @@ func (w *WakuNode) startStore(ctx context.Context, sub *relay.Subscription) erro // AddPeer is used to add a peer and the protocols it support to the node peerstore // TODO: Need to update this for autosharding, to only take contentTopics and optional pubSubTopics or provide an alternate API only for contentTopics. -func (w *WakuNode) AddPeer(address ma.Multiaddr, origin wps.Origin, pubSubTopics []string, protocols ...protocol.ID) (peer.ID, error) { - pData, err := w.peermanager.AddPeer(address, origin, pubSubTopics, protocols...) +func (w *WakuNode) AddPeer(addresses []ma.Multiaddr, origin wps.Origin, pubSubTopics []string, protocols ...protocol.ID) (peer.ID, error) { + pData, err := w.peermanager.AddPeer(addresses, origin, pubSubTopics, protocols...) if err != nil { return "", err } diff --git a/vendor/github.com/waku-org/go-waku/waku/v2/peermanager/peer_manager.go b/vendor/github.com/waku-org/go-waku/waku/v2/peermanager/peer_manager.go index c543cbe8e..6321471af 100644 --- a/vendor/github.com/waku-org/go-waku/waku/v2/peermanager/peer_manager.go +++ b/vendor/github.com/waku-org/go-waku/waku/v2/peermanager/peer_manager.go @@ -684,13 +684,19 @@ func AddrInfoToPeerData(origin wps.Origin, peerID peer.ID, host host.Host, pubsu } // AddPeer adds peer to the peerStore and also to service slots -func (pm *PeerManager) AddPeer(address ma.Multiaddr, origin wps.Origin, pubsubTopics []string, protocols ...protocol.ID) (*service.PeerData, error) { +func (pm *PeerManager) AddPeer(addresses []ma.Multiaddr, origin wps.Origin, pubsubTopics []string, protocols ...protocol.ID) (*service.PeerData, error) { //Assuming all addresses have peerId - info, err := peer.AddrInfoFromP2pAddr(address) + infoArr, err := peer.AddrInfosFromP2pAddrs(addresses...) if err != nil { return nil, err } + if len(infoArr) > 1 { + return nil, errors.New("only a single peerID is expected in AddPeer") + } + + info := infoArr[0] + //Add Service peers to serviceSlots. for _, proto := range protocols { pm.addPeerToServiceSlot(proto, info.ID) @@ -703,11 +709,8 @@ func (pm *PeerManager) AddPeer(address ma.Multiaddr, origin wps.Origin, pubsubTo } pData := &service.PeerData{ - Origin: origin, - AddrInfo: peer.AddrInfo{ - ID: info.ID, - Addrs: info.Addrs, - }, + Origin: origin, + AddrInfo: info, PubsubTopics: pubsubTopics, } diff --git a/vendor/github.com/waku-org/go-waku/waku/v2/protocol/filter/client.go b/vendor/github.com/waku-org/go-waku/waku/v2/protocol/filter/client.go index 3d81048d6..a16477699 100644 --- a/vendor/github.com/waku-org/go-waku/waku/v2/protocol/filter/client.go +++ b/vendor/github.com/waku-org/go-waku/waku/v2/protocol/filter/client.go @@ -16,6 +16,7 @@ import ( "github.com/libp2p/go-libp2p/core/peer" libp2pProtocol "github.com/libp2p/go-libp2p/core/protocol" "github.com/libp2p/go-msgio/pbio" + "github.com/multiformats/go-multiaddr" "github.com/prometheus/client_golang/prometheus" "github.com/waku-org/go-waku/logging" "github.com/waku-org/go-waku/waku/v2/onlinechecker" @@ -343,7 +344,7 @@ func (wf *WakuFilterLightNode) handleFilterSubscribeOptions(ctx context.Context, //Add Peer to peerstore. if params.pm != nil && params.peerAddr != nil { - pData, err := wf.pm.AddPeer(params.peerAddr, peerstore.Static, maps.Keys(pubSubTopicMap), FilterSubscribeID_v20beta1) + pData, err := wf.pm.AddPeer([]multiaddr.Multiaddr{params.peerAddr}, peerstore.Static, maps.Keys(pubSubTopicMap), FilterSubscribeID_v20beta1) if err != nil { return nil, nil, err } diff --git a/vendor/github.com/waku-org/go-waku/waku/v2/protocol/filter/test_utils.go b/vendor/github.com/waku-org/go-waku/waku/v2/protocol/filter/test_utils.go index 88b9e04e6..c6f8d220c 100644 --- a/vendor/github.com/waku-org/go-waku/waku/v2/protocol/filter/test_utils.go +++ b/vendor/github.com/waku-org/go-waku/waku/v2/protocol/filter/test_utils.go @@ -10,6 +10,7 @@ import ( "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/peer" + "github.com/multiformats/go-multiaddr" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/suite" "github.com/waku-org/go-waku/tests" @@ -102,7 +103,7 @@ func (s *FilterTestSuite) TearDownTest() { func (s *FilterTestSuite) ConnectToFullNode(h1 *WakuFilterLightNode, h2 *WakuFilterFullNode) { mAddr := tests.GetAddr(h2.h) - _, err := h1.pm.AddPeer(mAddr, wps.Static, []string{s.TestTopic}, FilterSubscribeID_v20beta1) + _, err := h1.pm.AddPeer([]multiaddr.Multiaddr{mAddr}, wps.Static, []string{s.TestTopic}, FilterSubscribeID_v20beta1) s.Log.Info("add peer", zap.Stringer("mAddr", mAddr)) s.Require().NoError(err) } diff --git a/vendor/github.com/waku-org/go-waku/waku/v2/protocol/legacy_store/waku_store_client.go b/vendor/github.com/waku-org/go-waku/waku/v2/protocol/legacy_store/waku_store_client.go index ef971f003..61781e44c 100644 --- a/vendor/github.com/waku-org/go-waku/waku/v2/protocol/legacy_store/waku_store_client.go +++ b/vendor/github.com/waku-org/go-waku/waku/v2/protocol/legacy_store/waku_store_client.go @@ -310,7 +310,7 @@ func (store *WakuStore) Query(ctx context.Context, query Query, opts ...HistoryR //Add Peer to peerstore. if store.pm != nil && params.peerAddr != nil { - pData, err := store.pm.AddPeer(params.peerAddr, peerstore.Static, pubsubTopics, StoreID_v20beta4) + pData, err := store.pm.AddPeer([]multiaddr.Multiaddr{params.peerAddr}, peerstore.Static, pubsubTopics, StoreID_v20beta4) if err != nil { return nil, err } diff --git a/vendor/github.com/waku-org/go-waku/waku/v2/protocol/lightpush/waku_lightpush.go b/vendor/github.com/waku-org/go-waku/waku/v2/protocol/lightpush/waku_lightpush.go index 7e411a4ac..10eaddfd5 100644 --- a/vendor/github.com/waku-org/go-waku/waku/v2/protocol/lightpush/waku_lightpush.go +++ b/vendor/github.com/waku-org/go-waku/waku/v2/protocol/lightpush/waku_lightpush.go @@ -14,6 +14,7 @@ import ( "github.com/libp2p/go-libp2p/core/peer" libp2pProtocol "github.com/libp2p/go-libp2p/core/protocol" "github.com/libp2p/go-msgio/pbio" + "github.com/multiformats/go-multiaddr" "github.com/prometheus/client_golang/prometheus" "github.com/waku-org/go-waku/logging" "github.com/waku-org/go-waku/waku/v2/peermanager" @@ -273,7 +274,7 @@ func (wakuLP *WakuLightPush) handleOpts(ctx context.Context, message *wpb.WakuMe } if params.pm != nil && params.peerAddr != nil { - pData, err := wakuLP.pm.AddPeer(params.peerAddr, peerstore.Static, []string{params.pubsubTopic}, LightPushID_v20beta1) + pData, err := wakuLP.pm.AddPeer([]multiaddr.Multiaddr{params.peerAddr}, peerstore.Static, []string{params.pubsubTopic}, LightPushID_v20beta1) if err != nil { return nil, err } diff --git a/vendor/github.com/waku-org/go-waku/waku/v2/protocol/peer_exchange/client.go b/vendor/github.com/waku-org/go-waku/waku/v2/protocol/peer_exchange/client.go index ef1f7bb9a..94d702035 100644 --- a/vendor/github.com/waku-org/go-waku/waku/v2/protocol/peer_exchange/client.go +++ b/vendor/github.com/waku-org/go-waku/waku/v2/protocol/peer_exchange/client.go @@ -10,6 +10,7 @@ import ( "github.com/ethereum/go-ethereum/rlp" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-msgio/pbio" + "github.com/multiformats/go-multiaddr" "github.com/waku-org/go-waku/waku/v2/peermanager" "github.com/waku-org/go-waku/waku/v2/peerstore" "github.com/waku-org/go-waku/waku/v2/protocol" @@ -36,7 +37,7 @@ func (wakuPX *WakuPeerExchange) Request(ctx context.Context, numPeers int, opts } if params.pm != nil && params.peerAddr != nil { - pData, err := wakuPX.pm.AddPeer(params.peerAddr, peerstore.Static, []string{}, PeerExchangeID_v20alpha1) + pData, err := wakuPX.pm.AddPeer([]multiaddr.Multiaddr{params.peerAddr}, peerstore.Static, []string{}, PeerExchangeID_v20alpha1) if err != nil { return err } diff --git a/vendor/github.com/waku-org/go-waku/waku/v2/protocol/store/client.go b/vendor/github.com/waku-org/go-waku/waku/v2/protocol/store/client.go index febb863e5..29f0bf038 100644 --- a/vendor/github.com/waku-org/go-waku/waku/v2/protocol/store/client.go +++ b/vendor/github.com/waku-org/go-waku/waku/v2/protocol/store/client.go @@ -194,15 +194,15 @@ func (s *WakuStore) Request(ctx context.Context, criteria Criteria, opts ...Requ return result, nil } -func (s *WakuStore) RequestRaw(ctx context.Context, peerID peer.ID, storeRequest *pb.StoreQueryRequest) (Result, error) { +func (s *WakuStore) RequestRaw(ctx context.Context, peerInfo peer.AddrInfo, storeRequest *pb.StoreQueryRequest) (Result, error) { err := storeRequest.Validate() if err != nil { return nil, err } var params Parameters - params.selectedPeer = peerID - if params.selectedPeer == "" { + params.peerAddr = peerInfo.Addrs + if len(params.peerAddr) == 0 { return nil, ErrMustSelectPeer } diff --git a/vendor/github.com/waku-org/go-waku/waku/v2/protocol/store/options.go b/vendor/github.com/waku-org/go-waku/waku/v2/protocol/store/options.go index facb3f54f..e6218cc7c 100644 --- a/vendor/github.com/waku-org/go-waku/waku/v2/protocol/store/options.go +++ b/vendor/github.com/waku-org/go-waku/waku/v2/protocol/store/options.go @@ -11,7 +11,7 @@ import ( type Parameters struct { selectedPeer peer.ID - peerAddr multiaddr.Multiaddr + peerAddr []multiaddr.Multiaddr peerSelectionType peermanager.PeerSelection preferredPeers peer.IDSlice requestID []byte @@ -33,7 +33,7 @@ type RequestOption func(*Parameters) error func WithPeer(p peer.ID) RequestOption { return func(params *Parameters) error { params.selectedPeer = p - if params.peerAddr != nil { + if len(params.peerAddr) != 0 { return errors.New("WithPeer and WithPeerAddr options are mutually exclusive") } return nil @@ -43,7 +43,7 @@ func WithPeer(p peer.ID) RequestOption { // WithPeerAddr is an option used to specify a peerAddress to request the message history. // This new peer will be added to peerStore. // Note that this option is mutually exclusive to WithPeerAddr, only one of them can be used. -func WithPeerAddr(pAddr multiaddr.Multiaddr) RequestOption { +func WithPeerAddr(pAddr ...multiaddr.Multiaddr) RequestOption { return func(params *Parameters) error { params.peerAddr = pAddr if params.selectedPeer != "" { diff --git a/wakuv2/api_test.go b/wakuv2/api_test.go index ef8c7ab5e..d3353ffa3 100644 --- a/wakuv2/api_test.go +++ b/wakuv2/api_test.go @@ -18,13 +18,13 @@ package wakuv2 +/* TODO-nwaku import ( "testing" "time" "golang.org/x/exp/maps" - "github.com/status-im/status-go/protocol/common/shard" "github.com/status-im/status-go/wakuv2/common" ) @@ -57,7 +57,7 @@ func TestMultipleTopicCopyInNewMessageFilter(t *testing.T) { } found := false - candidates := w.filters.GetWatchersByTopic(shard.DefaultShardPubsubTopic(), t1) + candidates := w.filters.GetWatchersByTopic(DefaultShardPubsubTopic(), t1) for _, f := range candidates { if maps.Equal(f.ContentTopics, common.NewTopicSet(crit.ContentTopics)) { found = true @@ -69,3 +69,4 @@ func TestMultipleTopicCopyInNewMessageFilter(t *testing.T) { t.Fatalf("Could not find filter with both topics") } } +*/ diff --git a/wakuv2/common/envelope.go b/wakuv2/common/envelope.go new file mode 100644 index 000000000..dbf80cb24 --- /dev/null +++ b/wakuv2/common/envelope.go @@ -0,0 +1,79 @@ +package common + +import ( + "encoding/json" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/waku-org/go-waku/waku/v2/protocol/pb" +) + +// Envelope contains information about the pubsub topic of a WakuMessage +// and a hash used to identify a message based on the bytes of a WakuMessage +// protobuffer +type Envelope interface { + Message() *pb.WakuMessage + PubsubTopic() string + Hash() pb.MessageHash +} + +type envelopeImpl struct { + msg *pb.WakuMessage + topic string + hash pb.MessageHash +} + +type tmpWakuMessageJson struct { + Payload []byte `json:"payload,omitempty"` + ContentTopic string `json:"contentTopic,omitempty"` + Version *uint32 `json:"version,omitempty"` + Timestamp *int64 `json:"timestamp,omitempty"` + Meta []byte `json:"meta,omitempty"` + Ephemeral *bool `json:"ephemeral,omitempty"` + RateLimitProof []byte `json:"proof,omitempty"` +} + +type tmpEnvelopeStruct struct { + WakuMessage tmpWakuMessageJson `json:"wakuMessage"` + PubsubTopic string `json:"pubsubTopic"` + MessageHash string `json:"messageHash"` +} + +// NewEnvelope creates a new Envelope from a json string generated in nwaku +func NewEnvelope(jsonEventStr string) (Envelope, error) { + tmpEnvelopeStruct := tmpEnvelopeStruct{} + err := json.Unmarshal([]byte(jsonEventStr), &tmpEnvelopeStruct) + if err != nil { + return nil, err + } + + hash, err := hexutil.Decode(tmpEnvelopeStruct.MessageHash) + if err != nil { + return nil, err + } + + return &envelopeImpl{ + msg: &pb.WakuMessage{ + Payload: tmpEnvelopeStruct.WakuMessage.Payload, + ContentTopic: tmpEnvelopeStruct.WakuMessage.ContentTopic, + Version: tmpEnvelopeStruct.WakuMessage.Version, + Timestamp: tmpEnvelopeStruct.WakuMessage.Timestamp, + Meta: tmpEnvelopeStruct.WakuMessage.Meta, + Ephemeral: tmpEnvelopeStruct.WakuMessage.Ephemeral, + RateLimitProof: tmpEnvelopeStruct.WakuMessage.RateLimitProof, + }, + topic: tmpEnvelopeStruct.PubsubTopic, + hash: pb.ToMessageHash(hash), + }, nil +} + +func (e *envelopeImpl) Message() *pb.WakuMessage { + return e.msg +} + +func (e *envelopeImpl) PubsubTopic() string { + return e.topic +} + +func (e *envelopeImpl) Hash() pb.MessageHash { + return e.hash +} diff --git a/wakuv2/common/message.go b/wakuv2/common/message.go index 58521ae75..977bea6d8 100644 --- a/wakuv2/common/message.go +++ b/wakuv2/common/message.go @@ -9,7 +9,6 @@ import ( "go.uber.org/zap" "github.com/waku-org/go-waku/waku/v2/payload" - "github.com/waku-org/go-waku/waku/v2/protocol" "github.com/status-im/status-go/logutils" @@ -41,7 +40,7 @@ type MessageParams struct { // ReceivedMessage represents a data packet to be received through the // WakuV2 protocol and successfully decrypted. type ReceivedMessage struct { - Envelope *protocol.Envelope // Wrapped Waku Message + Envelope Envelope // Wrapped Waku Message MsgType MessageType @@ -105,7 +104,7 @@ type MemoryMessageStore struct { messages map[common.Hash]*ReceivedMessage } -func NewReceivedMessage(env *protocol.Envelope, msgType MessageType) *ReceivedMessage { +func NewReceivedMessage(env Envelope, msgType MessageType) *ReceivedMessage { ct, err := ExtractTopicFromContentTopic(env.Message().ContentTopic) if err != nil { logutils.ZapLogger().Debug("failed to extract content topic from message", diff --git a/wakuv2/config.go b/wakuv2/config.go index 0d73fe98a..7bad536b3 100644 --- a/wakuv2/config.go +++ b/wakuv2/config.go @@ -23,8 +23,6 @@ import ( "go.uber.org/zap" - "github.com/status-im/status-go/protocol/common/shard" - ethdisc "github.com/ethereum/go-ethereum/p2p/dnsdisc" "github.com/status-im/status-go/wakuv2/common" @@ -117,10 +115,10 @@ func setDefaults(cfg *Config) *Config { } if cfg.DefaultShardPubsubTopic == "" { - cfg.DefaultShardPubsubTopic = shard.DefaultShardPubsubTopic() + cfg.DefaultShardPubsubTopic = DefaultShardPubsubTopic() //For now populating with both used shards, but this can be populated from user subscribed communities etc once community sharding is implemented - cfg.DefaultShardedPubsubTopics = append(cfg.DefaultShardedPubsubTopics, shard.DefaultShardPubsubTopic()) - cfg.DefaultShardedPubsubTopics = append(cfg.DefaultShardedPubsubTopics, shard.DefaultNonProtectedPubsubTopic()) + cfg.DefaultShardedPubsubTopics = append(cfg.DefaultShardedPubsubTopics, DefaultShardPubsubTopic()) + cfg.DefaultShardedPubsubTopics = append(cfg.DefaultShardedPubsubTopics, DefaultNonProtectedPubsubTopic()) } return cfg diff --git a/wakuv2/waku.go b/wakuv2/gowaku.go similarity index 99% rename from wakuv2/waku.go rename to wakuv2/gowaku.go index b04866f6e..2ecc68619 100644 --- a/wakuv2/waku.go +++ b/wakuv2/gowaku.go @@ -1,3 +1,6 @@ +//go:build !use_nwaku +// +build !use_nwaku + // Copyright 2019 The Waku Library Authors. // // The Waku library is free software: you can redistribute it and/or modify @@ -1624,8 +1627,8 @@ func (w *Waku) RelayPeersByTopic(topic string) (*types.PeerList, error) { }, nil } -func (w *Waku) ListenAddresses() []multiaddr.Multiaddr { - return w.node.ListenAddresses() +func (w *Waku) ListenAddresses() ([]multiaddr.Multiaddr, error) { + return w.node.ListenAddresses(), nil } func (w *Waku) ENR() (*enode.Node, error) { @@ -1878,20 +1881,12 @@ func (w *Waku) restartDiscV5(useOnlyDNSDiscCache bool) error { return w.node.SetDiscV5Bootnodes(bootnodes) } -func (w *Waku) AddStorePeer(address multiaddr.Multiaddr) (peer.ID, error) { - peerID, err := w.node.AddPeer(address, wps.Static, w.cfg.DefaultShardedPubsubTopics, store.StoreQueryID_v300) - if err != nil { - return "", err - } - return peerID, nil -} - func (w *Waku) timestamp() int64 { return w.timesource.Now().UnixNano() } func (w *Waku) AddRelayPeer(address multiaddr.Multiaddr) (peer.ID, error) { - peerID, err := w.node.AddPeer(address, wps.Static, w.cfg.DefaultShardedPubsubTopics, relay.WakuRelayID_v200) + peerID, err := w.node.AddPeer([]multiaddr.Multiaddr{address}, wps.Static, w.cfg.DefaultShardedPubsubTopics, relay.WakuRelayID_v200) if err != nil { return "", err } @@ -2009,3 +2004,8 @@ func FormatPeerConnFailures(wakuNode *node.WakuNode) map[string]int { func (w *Waku) LegacyStoreNode() legacy_store.Store { return w.node.LegacyStore() } + +func (w *Waku) ListPeersInMesh(pubsubTopic string) (int, error) { + listPeers := w.node.Relay().PubSub().ListPeers(pubsubTopic) + return len(listPeers), nil +} diff --git a/wakuv2/history_processor_wrapper.go b/wakuv2/history_processor_wrapper.go index eba4b3809..fe4ed93f6 100644 --- a/wakuv2/history_processor_wrapper.go +++ b/wakuv2/history_processor_wrapper.go @@ -3,10 +3,10 @@ package wakuv2 import ( "github.com/libp2p/go-libp2p/core/peer" + "github.com/status-im/status-go/wakuv2/common" + "github.com/waku-org/go-waku/waku/v2/api/history" "github.com/waku-org/go-waku/waku/v2/protocol" - - "github.com/status-im/status-go/wakuv2/common" ) type HistoryProcessorWrapper struct { @@ -21,6 +21,6 @@ func (hr *HistoryProcessorWrapper) OnEnvelope(env *protocol.Envelope, processEnv return hr.waku.OnNewEnvelopes(env, common.StoreMessageType, processEnvelopes) } -func (hr *HistoryProcessorWrapper) OnRequestFailed(requestID []byte, peerID peer.ID, err error) { - hr.waku.onHistoricMessagesRequestFailed(requestID, peerID, err) +func (hr *HistoryProcessorWrapper) OnRequestFailed(requestID []byte, peerInfo peer.AddrInfo, err error) { + hr.waku.onHistoricMessagesRequestFailed(requestID, peerInfo, err) } diff --git a/wakuv2/message_publishing.go b/wakuv2/message_publishing.go index 93543bc6e..3be0ea96d 100644 --- a/wakuv2/message_publishing.go +++ b/wakuv2/message_publishing.go @@ -92,6 +92,7 @@ func (w *Waku) publishEnvelope(envelope *protocol.Envelope) { err = w.messageSender.Send(publish.NewRequest(w.ctx, envelope)) } + /* TODO-nwaku if w.statusTelemetryClient != nil { if err == nil { w.statusTelemetryClient.PushSentEnvelope(w.ctx, SentEnvelope{Envelope: envelope, PublishMethod: w.messageSender.PublishMethod()}) @@ -99,6 +100,7 @@ func (w *Waku) publishEnvelope(envelope *protocol.Envelope) { w.statusTelemetryClient.PushErrorSendingEnvelope(w.ctx, ErrorSendingEnvelope{Error: err, SentEnvelope: SentEnvelope{Envelope: envelope, PublishMethod: w.messageSender.PublishMethod()}}) } } + */ if err != nil { logger.Error("could not send message", zap.Error(err)) diff --git a/wakuv2/nwaku.go b/wakuv2/nwaku.go index ed9f3e80c..c6a04c91e 100644 --- a/wakuv2/nwaku.go +++ b/wakuv2/nwaku.go @@ -1,58 +1,3345 @@ +//go:build use_nwaku +// +build use_nwaku + package wakuv2 +/* + #cgo LDFLAGS: -L../third_party/nwaku/build/ -lnegentropy -lwaku + #cgo LDFLAGS: -L../third_party/nwaku -Wl,-rpath,../third_party/nwaku/build/ + + #include "../third_party/nwaku/library/libwaku.h" + #include + #include + + extern void globalEventCallback(int ret, char* msg, size_t len, void* userData); + + typedef struct { + int ret; + char* msg; + size_t len; + void* wg; + } Resp; + + static void* allocResp(void* wg) { + Resp* r = calloc(1, sizeof(Resp)); + r->wg = wg; + return r; + } + + static void freeResp(void* resp) { + if (resp != NULL) { + free(resp); + } + } + + static char* getMyCharPtr(void* resp) { + if (resp == NULL) { + return NULL; + } + Resp* m = (Resp*) resp; + return m->msg; + } + + static size_t getMyCharLen(void* resp) { + if (resp == NULL) { + return 0; + } + Resp* m = (Resp*) resp; + return m->len; + } + + static int getRet(void* resp) { + if (resp == NULL) { + return 0; + } + Resp* m = (Resp*) resp; + return m->ret; + } + // resp must be set != NULL in case interest on retrieving data from the callback + void GoCallback(int ret, char* msg, size_t len, void* resp); + + #define WAKU_CALL(call) \ + do { \ + int ret = call; \ + if (ret != 0) { \ + printf("Failed the call to: %s. Returned code: %d\n", #call, ret); \ + exit(1); \ + } \ + } while (0) + + static void* cGoWakuNew(const char* configJson, void* resp) { + // We pass NULL because we are not interested in retrieving data from this callback + void* ret = waku_new(configJson, (WakuCallBack) GoCallback, resp); + return ret; + } + + static void cGoWakuStart(void* wakuCtx, void* resp) { + WAKU_CALL(waku_start(wakuCtx, (WakuCallBack) GoCallback, resp)); + } + + static void cGoWakuStop(void* wakuCtx, void* resp) { + WAKU_CALL(waku_stop(wakuCtx, (WakuCallBack) GoCallback, resp)); + } + + static void cGoWakuDestroy(void* wakuCtx, void* resp) { + WAKU_CALL(waku_destroy(wakuCtx, (WakuCallBack) GoCallback, resp)); + } + + static void cGoWakuStartDiscV5(void* wakuCtx, void* resp) { + WAKU_CALL(waku_start_discv5(wakuCtx, (WakuCallBack) GoCallback, resp)); + } + + static void cGoWakuStopDiscV5(void* wakuCtx, void* resp) { + WAKU_CALL(waku_stop_discv5(wakuCtx, (WakuCallBack) GoCallback, resp)); + } + + static void cGoWakuVersion(void* wakuCtx, void* resp) { + WAKU_CALL(waku_version(wakuCtx, (WakuCallBack) GoCallback, resp)); + } + + static void cGoWakuSetEventCallback(void* wakuCtx) { + // The 'globalEventCallback' Go function is shared amongst all possible Waku instances. + + // Given that the 'globalEventCallback' is shared, we pass again the + // wakuCtx instance but in this case is needed to pick up the correct method + // that will handle the event. + + // In other words, for every call the libwaku makes to globalEventCallback, + // the 'userData' parameter will bring the context of the node that registered + // that globalEventCallback. + + // This technique is needed because cgo only allows to export Go functions and not methods. + + waku_set_event_callback(wakuCtx, (WakuCallBack) globalEventCallback, wakuCtx); + } + + static void cGoWakuContentTopic(void* wakuCtx, + char* appName, + int appVersion, + char* contentTopicName, + char* encoding, + void* resp) { + + WAKU_CALL( waku_content_topic(wakuCtx, + appName, + appVersion, + contentTopicName, + encoding, + (WakuCallBack) GoCallback, + resp) ); + } + + static void cGoWakuPubsubTopic(void* wakuCtx, char* topicName, void* resp) { + WAKU_CALL( waku_pubsub_topic(wakuCtx, topicName, (WakuCallBack) GoCallback, resp) ); + } + + static void cGoWakuDefaultPubsubTopic(void* wakuCtx, void* resp) { + WAKU_CALL (waku_default_pubsub_topic(wakuCtx, (WakuCallBack) GoCallback, resp)); + } + + static void cGoWakuRelayPublish(void* wakuCtx, + const char* pubSubTopic, + const char* jsonWakuMessage, + int timeoutMs, + void* resp) { + + WAKU_CALL (waku_relay_publish(wakuCtx, + pubSubTopic, + jsonWakuMessage, + timeoutMs, + (WakuCallBack) GoCallback, + resp)); + } + + static void cGoWakuRelaySubscribe(void* wakuCtx, char* pubSubTopic, void* resp) { + WAKU_CALL ( waku_relay_subscribe(wakuCtx, + pubSubTopic, + (WakuCallBack) GoCallback, + resp) ); + } + + static void cGoWakuRelayAddProtectedShard(void* wakuCtx, int clusterId, int shardId, char* publicKey, void* resp) { + WAKU_CALL ( waku_relay_add_protected_shard(wakuCtx, + clusterId, + shardId, + publicKey, + (WakuCallBack) GoCallback, + resp) ); + } + + static void cGoWakuRelayUnsubscribe(void* wakuCtx, char* pubSubTopic, void* resp) { + + WAKU_CALL ( waku_relay_unsubscribe(wakuCtx, + pubSubTopic, + (WakuCallBack) GoCallback, + resp) ); + } + + static void cGoWakuConnect(void* wakuCtx, char* peerMultiAddr, int timeoutMs, void* resp) { + WAKU_CALL( waku_connect(wakuCtx, + peerMultiAddr, + timeoutMs, + (WakuCallBack) GoCallback, + resp) ); + } + + static void cGoWakuDialPeer(void* wakuCtx, + char* peerMultiAddr, + char* protocol, + int timeoutMs, + void* resp) { + + WAKU_CALL( waku_dial_peer(wakuCtx, + peerMultiAddr, + protocol, + timeoutMs, + (WakuCallBack) GoCallback, + resp) ); + } + + static void cGoWakuDialPeerById(void* wakuCtx, + char* peerId, + char* protocol, + int timeoutMs, + void* resp) { + + WAKU_CALL( waku_dial_peer_by_id(wakuCtx, + peerId, + protocol, + timeoutMs, + (WakuCallBack) GoCallback, + resp) ); + } + + static void cGoWakuDisconnectPeerById(void* wakuCtx, char* peerId, void* resp) { + WAKU_CALL( waku_disconnect_peer_by_id(wakuCtx, + peerId, + (WakuCallBack) GoCallback, + resp) ); + } + + static void cGoWakuListenAddresses(void* wakuCtx, void* resp) { + WAKU_CALL (waku_listen_addresses(wakuCtx, (WakuCallBack) GoCallback, resp) ); + } + + static void cGoWakuGetMyENR(void* ctx, void* resp) { + WAKU_CALL (waku_get_my_enr(ctx, (WakuCallBack) GoCallback, resp) ); + } + + static void cGoWakuGetMyPeerId(void* ctx, void* resp) { + WAKU_CALL (waku_get_my_peerid(ctx, (WakuCallBack) GoCallback, resp) ); + } + + static void cGoWakuPingPeer(void* ctx, char* peerAddr, int timeoutMs, void* resp) { + WAKU_CALL (waku_ping_peer(ctx, peerAddr, timeoutMs, (WakuCallBack) GoCallback, resp) ); + } + + static void cGoWakuListPeersInMesh(void* ctx, char* pubSubTopic, void* resp) { + WAKU_CALL (waku_relay_get_num_peers_in_mesh(ctx, pubSubTopic, (WakuCallBack) GoCallback, resp) ); + } + + static void cGoWakuGetNumConnectedRelayPeers(void* ctx, char* pubSubTopic, void* resp) { + WAKU_CALL (waku_relay_get_num_connected_peers(ctx, pubSubTopic, (WakuCallBack) GoCallback, resp) ); + } + + static void cGoWakuGetConnectedPeers(void* wakuCtx, void* resp) { + WAKU_CALL (waku_get_connected_peers(wakuCtx, (WakuCallBack) GoCallback, resp) ); + } + + static void cGoWakuGetPeerIdsFromPeerStore(void* wakuCtx, void* resp) { + WAKU_CALL (waku_get_peerids_from_peerstore(wakuCtx, (WakuCallBack) GoCallback, resp) ); + } + + static void cGoWakuLightpushPublish(void* wakuCtx, + const char* pubSubTopic, + const char* jsonWakuMessage, + void* resp) { + + WAKU_CALL (waku_lightpush_publish(wakuCtx, + pubSubTopic, + jsonWakuMessage, + (WakuCallBack) GoCallback, + resp)); + } + + static void cGoWakuStoreQuery(void* wakuCtx, + const char* jsonQuery, + const char* peerAddr, + int timeoutMs, + void* resp) { + + WAKU_CALL (waku_store_query(wakuCtx, + jsonQuery, + peerAddr, + timeoutMs, + (WakuCallBack) GoCallback, + resp)); + } + + static void cGoWakuPeerExchangeQuery(void* wakuCtx, + uint64_t numPeers, + void* resp) { + + WAKU_CALL (waku_peer_exchange_request(wakuCtx, + numPeers, + (WakuCallBack) GoCallback, + resp)); + } + + static void cGoWakuGetPeerIdsByProtocol(void* wakuCtx, + const char* protocol, + void* resp) { + + WAKU_CALL (waku_get_peerids_by_protocol(wakuCtx, + protocol, + (WakuCallBack) GoCallback, + resp)); + } + + static void cGoWakuDnsDiscovery(void* wakuCtx, + const char* entTreeUrl, + const char* nameDnsServer, + int timeoutMs, + void* resp) { + + WAKU_CALL (waku_dns_discovery(wakuCtx, + entTreeUrl, + nameDnsServer, + timeoutMs, + (WakuCallBack) GoCallback, + resp)); + } + +*/ +import "C" + import ( + "context" + "crypto/ecdsa" + "crypto/sha256" + "database/sql" + "encoding/hex" "encoding/json" + "errors" "fmt" - "io" - "net/http" - "os" + "net" + "runtime" "strconv" + "strings" + "sync" + "testing" + "time" + "unsafe" + + "github.com/jellydator/ttlcache/v3" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/multiformats/go-multiaddr" + + "go.uber.org/zap" + + "golang.org/x/crypto/pbkdf2" + "golang.org/x/time/rate" + + gethcommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/rpc" + + "github.com/libp2p/go-libp2p/core/metrics" + + libp2pproto "github.com/libp2p/go-libp2p/core/protocol" + + filterapi "github.com/waku-org/go-waku/waku/v2/api/filter" + "github.com/waku-org/go-waku/waku/v2/api/history" + "github.com/waku-org/go-waku/waku/v2/api/missing" + "github.com/waku-org/go-waku/waku/v2/api/publish" + "github.com/waku-org/go-waku/waku/v2/dnsdisc" + "github.com/waku-org/go-waku/waku/v2/onlinechecker" + "github.com/waku-org/go-waku/waku/v2/peermanager" + wps "github.com/waku-org/go-waku/waku/v2/peerstore" + "github.com/waku-org/go-waku/waku/v2/protocol" + + "github.com/waku-org/go-waku/waku/v2/protocol/legacy_store" + "github.com/waku-org/go-waku/waku/v2/protocol/relay" + "github.com/waku-org/go-waku/waku/v2/protocol/store" + storepb "github.com/waku-org/go-waku/waku/v2/protocol/store/pb" + "github.com/waku-org/go-waku/waku/v2/utils" + + gocommon "github.com/status-im/status-go/common" + "github.com/status-im/status-go/connection" + "github.com/status-im/status-go/eth-node/types" + "github.com/status-im/status-go/logutils" + "github.com/status-im/status-go/timesource" + "github.com/status-im/status-go/wakuv2/common" + "github.com/status-im/status-go/wakuv2/persistence" + + node "github.com/waku-org/go-waku/waku/v2/node" + "github.com/waku-org/go-waku/waku/v2/protocol/pb" ) -type NwakuInfo struct { - ListenAddresses []string `json:"listenAddresses"` - EnrUri string `json:"enrUri"` +const messageQueueLimit = 1024 +const requestTimeout = 30 * time.Second +const bootnodesQueryBackoffMs = 200 +const bootnodesMaxRetries = 7 +const cacheTTL = 20 * time.Minute +const maxRelayPeers = 300 +const randomPeersKeepAliveInterval = 5 * time.Second +const allPeersKeepAliveInterval = 5 * time.Minute + +type SentEnvelope struct { + Envelope common.Envelope + PublishMethod publish.PublishMethod } -func GetNwakuInfo(host *string, port *int) (NwakuInfo, error) { - nwakuRestPort := 8645 - if port != nil { - nwakuRestPort = *port +type ErrorSendingEnvelope struct { + Error error + SentEnvelope SentEnvelope +} + +type ITelemetryClient interface { + SetDeviceType(deviceType string) + PushSentEnvelope(ctx context.Context, sentEnvelope SentEnvelope) + PushErrorSendingEnvelope(ctx context.Context, errorSendingEnvelope ErrorSendingEnvelope) + PushPeerCount(ctx context.Context, peerCount int) + PushPeerConnFailures(ctx context.Context, peerConnFailures map[string]int) + PushMessageCheckSuccess(ctx context.Context, messageHash string) + PushMessageCheckFailure(ctx context.Context, messageHash string) + PushPeerCountByShard(ctx context.Context, peerCountByShard map[uint16]uint) + PushPeerCountByOrigin(ctx context.Context, peerCountByOrigin map[wps.Origin]uint) + PushDialFailure(ctx context.Context, dialFailure common.DialError) + PushMissedMessage(ctx context.Context, envelope *protocol.Envelope) + PushMissedRelevantMessage(ctx context.Context, message *common.ReceivedMessage) + PushMessageDeliveryConfirmed(ctx context.Context, messageHash string) + PushSentMessageTotal(ctx context.Context, messageSize uint32) +} + +type WakuMessageHash = string +type WakuPubsubTopic = string +type WakuContentTopic = string + +type WakuConfig struct { + Host string `json:"host,omitempty"` + NodeKey string `json:"nodekey,omitempty"` + EnableRelay bool `json:"relay"` + LogLevel string `json:"logLevel"` + DnsDiscovery bool `json:"dnsDiscovery,omitempty"` + DnsDiscoveryUrl string `json:"dnsDiscoveryUrl,omitempty"` + MaxMessageSize string `json:"maxMessageSize,omitempty"` + Staticnodes []string `json:"staticnodes,omitempty"` + Discv5BootstrapNodes []string `json:"discv5BootstrapNodes,omitempty"` + Discv5Discovery bool `json:"discv5Discovery,omitempty"` + Discv5UdpPort int `json:"discv5UdpPort,omitempty"` + ClusterID uint16 `json:"clusterId,omitempty"` + Shards []uint16 `json:"shards,omitempty"` + PeerExchange bool `json:"peerExchange,omitempty"` + PeerExchangeNode string `json:"peerExchangeNode,omitempty"` + Filter bool `json:"filter,omitempty"` + FilterMaxPeersToServe int `json:"filterMaxPeersToServe,omitempty"` + Lightpush bool `json:"lightpush,omitempty"` + TcpPort int `json:"tcpPort,omitempty"` + RateLimits RateLimitsConfig `json:"rateLimits,omitempty"` +} + +type RateLimitsConfig struct { + Filter *RateLimit `json:"-"` + Lightpush *RateLimit `json:"-"` + PeerExchange *RateLimit `json:"-"` +} + +func (rlc RateLimitsConfig) MarshalJSON() ([]byte, error) { + output := []string{} + if rlc.Filter != nil { + output = append(output, fmt.Sprintf("filter:%s", rlc.Filter.String())) } - envNwakuRestPort := os.Getenv("NWAKU_REST_PORT") - if envNwakuRestPort != "" { - v, err := strconv.Atoi(envNwakuRestPort) - if err != nil { - return NwakuInfo{}, err + if rlc.Lightpush != nil { + output = append(output, fmt.Sprintf("lightpush:%s", rlc.Lightpush.String())) + } + if rlc.PeerExchange != nil { + output = append(output, fmt.Sprintf("px:%s", rlc.PeerExchange.String())) + } + return json.Marshal(output) +} + +type RateLimitUnit string + +const Hour RateLimitUnit = "h" +const Minute RateLimitUnit = "m" +const Second RateLimitUnit = "s" +const Millisecond RateLimitUnit = "ms" + +type RateLimit struct { + Volume int + Period int + Unit RateLimitUnit +} + +func (rl RateLimit) String() string { + return fmt.Sprintf("%d/%d%s", rl.Volume, rl.Period, rl.Unit) +} + +func (rl RateLimit) MarshalJSON() ([]byte, error) { + return json.Marshal(rl.String()) +} + +// Waku represents a dark communication interface through the Ethereum +// network, using its very own P2P communication layer. +type Waku struct { + node *WakuNode + + appDB *sql.DB + + dnsAddressCache map[string][]dnsdisc.DiscoveredNode // Map to store the multiaddresses returned by dns discovery + dnsAddressCacheLock *sync.RWMutex // lock to handle access to the map + dnsDiscAsyncRetrievedSignal chan struct{} + + // Filter-related + filters *common.Filters // Message filters installed with Subscribe function + filterManager *filterapi.FilterManager + + privateKeys map[string]*ecdsa.PrivateKey // Private key storage + symKeys map[string][]byte // Symmetric key storage + keyMu sync.RWMutex // Mutex associated with key stores + + envelopeCache *ttlcache.Cache[gethcommon.Hash, *common.ReceivedMessage] // Pool of envelopes currently tracked by this node + poolMu sync.RWMutex // Mutex to sync the message and expiration pools + + bandwidthCounter *metrics.BandwidthCounter + + protectedTopicStore *persistence.ProtectedTopicsStore + + sendQueue *publish.MessageQueue + + missingMsgVerifier *missing.MissingMessageVerifier + + msgQueue chan *common.ReceivedMessage // Message queue for waku messages that havent been decoded + + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup + + cfg *Config + wakuCfg *WakuConfig + + options []node.WakuNodeOption + + envelopeFeed event.Feed + + storeMsgIDs map[gethcommon.Hash]bool // Map of the currently processing ids + storeMsgIDsMu sync.RWMutex + + messageSender *publish.MessageSender + + topicHealthStatusChan chan peermanager.TopicHealthStatus + connectionNotifChan chan node.PeerConnection + connStatusSubscriptions map[string]*types.ConnStatusSubscription + connStatusMu sync.Mutex + onlineChecker *onlinechecker.DefaultOnlineChecker + state connection.State + + StorenodeCycle *history.StorenodeCycle + HistoryRetriever *history.HistoryRetriever + + logger *zap.Logger + + // NTP Synced timesource + timesource *timesource.NTPTimeSource + + // seededBootnodesForDiscV5 indicates whether we manage to retrieve discovery + // bootnodes successfully + seededBootnodesForDiscV5 bool + + // goingOnline is channel that notifies when connectivity has changed from offline to online + goingOnline chan struct{} + + // discV5BootstrapNodes is the ENR to be used to fetch bootstrap nodes for discovery + discV5BootstrapNodes []string + + onHistoricMessagesRequestFailed func([]byte, peer.AddrInfo, error) + onPeerStats func(types.ConnStatus) + + statusTelemetryClient ITelemetryClient + + defaultShardInfo protocol.RelayShards +} + +func (w *Waku) SetStatusTelemetryClient(client ITelemetryClient) { + w.statusTelemetryClient = client +} + +func newTTLCache() *ttlcache.Cache[gethcommon.Hash, *common.ReceivedMessage] { + cache := ttlcache.New[gethcommon.Hash, *common.ReceivedMessage](ttlcache.WithTTL[gethcommon.Hash, *common.ReceivedMessage](cacheTTL)) + go func() { + defer gocommon.LogOnPanic() + cache.Start() + }() + return cache +} + +// New creates a WakuV2 client ready to communicate through the LibP2P network. +func New(nodeKey *ecdsa.PrivateKey, fleet string, cfg *Config, nwakuCfg *WakuConfig, logger *zap.Logger, appDB *sql.DB, ts *timesource.NTPTimeSource, onHistoricMessagesRequestFailed func([]byte, peer.AddrInfo, error), onPeerStats func(types.ConnStatus)) (*Waku, error) { + node, err := wakuNew(nodeKey, + fleet, + cfg, + nwakuCfg, + logger, appDB, ts, onHistoricMessagesRequestFailed, + onPeerStats) + if err != nil { + return nil, err + } + + return node, nil + + // TODO-nwaku + /* + cfg = setDefaults(cfg) + if err = cfg.Validate(logger); err != nil { + return nil, err } - nwakuRestPort = v - } - nwakuRestHost := "localhost" - if host != nil { - nwakuRestHost = *host - } - envNwakuRestHost := os.Getenv("NWAKU_REST_HOST") - if envNwakuRestHost != "" { - nwakuRestHost = envNwakuRestHost - } + logger.Info("starting wakuv2 with config", zap.Any("config", cfg)) - resp, err := http.Get(fmt.Sprintf("http://%s:%d/debug/v1/info", nwakuRestHost, nwakuRestPort)) - if err != nil { - return NwakuInfo{}, err - } - defer resp.Body.Close() + ctx, cancel := context.WithCancel(context.Background()) - body, err := io.ReadAll(resp.Body) - if err != nil { - return NwakuInfo{}, err - } + waku := &Waku{ + appDB: appDB, + cfg: cfg, + privateKeys: make(map[string]*ecdsa.PrivateKey), + symKeys: make(map[string][]byte), + envelopeCache: newTTLCache(), + msgQueue: make(chan *common.ReceivedMessage, messageQueueLimit), + topicHealthStatusChan: make(chan peermanager.TopicHealthStatus, 100), + connectionNotifChan: make(chan node.PeerConnection, 20), + connStatusSubscriptions: make(map[string]*types.ConnStatusSubscription), + ctx: ctx, + cancel: cancel, + wg: sync.WaitGroup{}, + dnsAddressCache: make(map[string][]dnsdisc.DiscoveredNode), + dnsAddressCacheLock: &sync.RWMutex{}, + dnsDiscAsyncRetrievedSignal: make(chan struct{}), + storeMsgIDs: make(map[gethcommon.Hash]bool), + timesource: ts, + storeMsgIDsMu: sync.RWMutex{}, + logger: logger, + discV5BootstrapNodes: cfg.DiscV5BootstrapNodes, + onHistoricMessagesRequestFailed: onHistoricMessagesRequestFailed, + onPeerStats: onPeerStats, + onlineChecker: onlinechecker.NewDefaultOnlineChecker(false).(*onlinechecker.DefaultOnlineChecker), + sendQueue: publish.NewMessageQueue(1000, cfg.UseThrottledPublish), + } - var data NwakuInfo - err = json.Unmarshal(body, &data) - if err != nil { - return NwakuInfo{}, err - } + waku.bandwidthCounter = metrics.NewBandwidthCounter() - return data, nil + libp2pOpts := node.DefaultLibP2POptions + libp2pOpts = append(libp2pOpts, libp2p.BandwidthReporter(waku.bandwidthCounter)) + libp2pOpts = append(libp2pOpts, libp2p.NATPortMap()) + + opts := []node.WakuNodeOption{ + node.WithLibP2POptions(libp2pOpts...), + node.WithPrivateKey(nodeKey), + node.WithHostAddress(hostAddr), + node.WithConnectionNotification(waku.connectionNotifChan), + node.WithTopicHealthStatusChannel(waku.topicHealthStatusChan), + node.WithKeepAlive(randomPeersKeepAliveInterval, allPeersKeepAliveInterval), + node.WithLogger(logger), + node.WithLogLevel(logger.Level()), + node.WithClusterID(cfg.ClusterID), + node.WithMaxMsgSize(1024 * 1024), + } + + if cfg.EnableDiscV5 { + bootnodes, err := waku.getDiscV5BootstrapNodes(waku.ctx, cfg.DiscV5BootstrapNodes, false) + if err != nil { + logger.Error("failed to get bootstrap nodes", zap.Error(err)) + return nil, err + } + opts = append(opts, node.WithDiscoveryV5(uint(cfg.UDPPort), bootnodes, cfg.AutoUpdate)) + } + shards, err := protocol.TopicsToRelayShards(cfg.DefaultShardPubsubTopic) + if err != nil { + logger.Error("FATAL ERROR: failed to parse relay shards", zap.Error(err)) + return nil, errors.New("failed to parse relay shard, invalid pubsubTopic configuration") + } + if len(shards) == 0 { //Hack so that tests don't fail. TODO: Need to remove this once tests are changed to use proper cluster and shard. + shardInfo := protocol.RelayShards{ClusterID: 0, ShardIDs: []uint16{0}} + shards = append(shards, shardInfo) + } + waku.defaultShardInfo = shards[0] + if cfg.LightClient { + opts = append(opts, node.WithWakuFilterLightNode()) + waku.defaultShardInfo = shards[0] + opts = append(opts, node.WithMaxPeerConnections(cfg.DiscoveryLimit)) + cfg.EnableStoreConfirmationForMessagesSent = false + //TODO: temporary work-around to improve lightClient connectivity, need to be removed once community sharding is implemented + opts = append(opts, node.WithShards(waku.defaultShardInfo.ShardIDs)) + } else { + relayOpts := []pubsub.Option{ + pubsub.WithMaxMessageSize(int(waku.cfg.MaxMessageSize)), + } + + if testing.Testing() { + relayOpts = append(relayOpts, pubsub.WithEventTracer(waku)) + } + + opts = append(opts, node.WithWakuRelayAndMinPeers(waku.cfg.MinPeersForRelay, relayOpts...)) + opts = append(opts, node.WithMaxPeerConnections(maxRelayPeers)) + cfg.EnablePeerExchangeClient = true //Enabling this until discv5 issues are resolved. This will enable more peers to be connected for relay mesh. + cfg.EnableStoreConfirmationForMessagesSent = true + } + + if cfg.EnableStore { + if appDB == nil { + return nil, errors.New("appDB is required for store") + } + opts = append(opts, node.WithWakuStore()) + dbStore, err := persistence.NewDBStore(logger, persistence.WithDB(appDB), persistence.WithRetentionPolicy(cfg.StoreCapacity, time.Duration(cfg.StoreSeconds)*time.Second)) + if err != nil { + return nil, err + } + opts = append(opts, node.WithMessageProvider(dbStore)) + } + + waku.options = opts + + waku.logger.Info("setup the go-waku node successfully") + + return waku, nil*/ +} + +func (w *Waku) SubscribeToConnStatusChanges() *types.ConnStatusSubscription { + w.connStatusMu.Lock() + defer w.connStatusMu.Unlock() + subscription := types.NewConnStatusSubscription() + w.connStatusSubscriptions[subscription.ID] = subscription + return subscription +} + +/* TODO-nwaku +func (w *Waku) getDiscV5BootstrapNodes(ctx context.Context, addresses []string, useOnlyDnsDiscCache bool) ([]*enode.Node, error) { + wg := sync.WaitGroup{} + mu := sync.Mutex{} + var result []*enode.Node + + w.seededBootnodesForDiscV5 = true + + retrieveENR := func(d dnsdisc.DiscoveredNode, wg *sync.WaitGroup) { + mu.Lock() + defer mu.Unlock() + defer wg.Done() + if d.ENR != nil { + result = append(result, d.ENR) + } + } + + for _, addrString := range addresses { + if addrString == "" { + continue + } + + if strings.HasPrefix(addrString, "enrtree://") { + // Use DNS Discovery + wg.Add(1) + go func(addr string) { + defer gocommon.LogOnPanic() + defer wg.Done() + if err := w.dnsDiscover(ctx, addr, retrieveENR, useOnlyDnsDiscCache); err != nil { + go func() { + defer gocommon.LogOnPanic() + w.retryDnsDiscoveryWithBackoff(ctx, addr, w.dnsDiscAsyncRetrievedSignal) + }() + } + }(addrString) + } else { + // It's a normal enr + bootnode, err := enode.Parse(enode.ValidSchemes, addrString) + if err != nil { + return nil, err + } + mu.Lock() + result = append(result, bootnode) + mu.Unlock() + } + } + wg.Wait() + + if len(result) == 0 { + w.seededBootnodesForDiscV5 = false + } + + return result, nil +} + +type fnApplyToEachPeer func(d dnsdisc.DiscoveredNode, wg *sync.WaitGroup) + +func (w *Waku) dnsDiscover(ctx context.Context, enrtreeAddress string, apply fnApplyToEachPeer, useOnlyCache bool) error { + w.logger.Info("retrieving nodes", zap.String("enr", enrtreeAddress)) + ctx, cancel := context.WithTimeout(ctx, requestTimeout) + defer cancel() + + w.dnsAddressCacheLock.Lock() + defer w.dnsAddressCacheLock.Unlock() + + discNodes, ok := w.dnsAddressCache[enrtreeAddress] + if !ok && !useOnlyCache { + nameserver := w.cfg.Nameserver + resolver := w.cfg.Resolver + + var opts []dnsdisc.DNSDiscoveryOption + if nameserver != "" { + opts = append(opts, dnsdisc.WithNameserver(nameserver)) + } + if resolver != nil { + opts = append(opts, dnsdisc.WithResolver(resolver)) + } + + discoveredNodes, err := dnsdisc.RetrieveNodes(ctx, enrtreeAddress, opts...) + if err != nil { + w.logger.Warn("dns discovery error ", zap.Error(err)) + return err + } + + if len(discoveredNodes) != 0 { + w.dnsAddressCache[enrtreeAddress] = append(w.dnsAddressCache[enrtreeAddress], discoveredNodes...) + discNodes = w.dnsAddressCache[enrtreeAddress] + } + } + + wg := &sync.WaitGroup{} + wg.Add(len(discNodes)) + for _, d := range discNodes { + apply(d, wg) + } + wg.Wait() + + return nil +} + +func (w *Waku) retryDnsDiscoveryWithBackoff(ctx context.Context, addr string, successChan chan<- struct{}) { + retries := 0 + for { + err := w.dnsDiscover(ctx, addr, func(d dnsdisc.DiscoveredNode, wg *sync.WaitGroup) {}, false) + if err == nil { + select { + case successChan <- struct{}{}: + default: + } + + break + } + + retries++ + backoff := time.Second * time.Duration(math.Exp2(float64(retries))) + if backoff > time.Minute { + backoff = time.Minute + } + + t := time.NewTimer(backoff) + select { + case <-w.ctx.Done(): + t.Stop() + return + case <-t.C: + t.Stop() + } + } +} +*/ + +func (w *Waku) discoverAndConnectPeers() { + var addrsToConnect []multiaddr.Multiaddr + nameserver := w.cfg.Nameserver + if nameserver == "" { + nameserver = "8.8.8.8" + } + + for _, addrString := range w.cfg.WakuNodes { + addrString := addrString + if strings.HasPrefix(addrString, "enrtree://") { + + // Use DNS Discovery + ctx, _ := context.WithTimeout(w.ctx, requestTimeout) + res, err := w.node.DnsDiscovery(ctx, addrString, nameserver) + if err != nil { + w.logger.Error("could not obtain dns discovery peers for ClusterConfig.WakuNodes", zap.Error(err), zap.String("dnsDiscURL", addrString)) + continue + } + for _, ma := range res { + addrsToConnect = append(addrsToConnect, ma) + } + + } else { + // It is a normal multiaddress + addr, err := multiaddr.NewMultiaddr(addrString) + if err != nil { + w.logger.Warn("invalid peer multiaddress", zap.String("ma", addrString), zap.Error(err)) + continue + } + addrsToConnect = append(addrsToConnect, addr) + } + } + // Now connect to all the Multiaddresses + for _, ma := range addrsToConnect { + ctx, _ := context.WithTimeout(w.ctx, requestTimeout) + w.node.Connect(ctx, ma) + } +} + +func (w *Waku) connect(peerInfo peer.AddrInfo, enr *enode.Node, origin wps.Origin) { + defer gocommon.LogOnPanic() + // Connection will be prunned eventually by the connection manager if needed + // The peer connector in go-waku uses Connect, so it will execute identify as part of its + + // TODO-nwaku + // TODO: is enr and origin required? + // TODO: this function is meant to add a node to a peer store so it can be picked up by the peer manager + // so probably we shouldn't connect directly but expose an AddPeer function in libwaku + + ctx, cancel := context.WithTimeout(w.ctx, requestTimeout) + defer cancel() + + addr := peerInfo.Addrs[0] + err := w.node.Connect(ctx, addr) + if err != nil { + w.logger.Error("couldn't connect to peer", zap.Error(err), zap.Stringer("peerID", peerInfo.ID)) + } +} + +/* TODO-nwaku +func (w *Waku) telemetryBandwidthStats(telemetryServerURL string) { + defer gocommon.LogOnPanic() + defer w.wg.Done() + + if telemetryServerURL == "" { + return + } + + telemetry := NewBandwidthTelemetryClient(w.logger, telemetryServerURL) + + ticker := time.NewTicker(time.Second * 20) + defer ticker.Stop() + + for { + select { + case <-w.ctx.Done(): + return + case <-ticker.C: + bandwidthPerProtocol := w.bandwidthCounter.GetBandwidthByProtocol() + w.bandwidthCounter.Reset() + go telemetry.PushProtocolStats(bandwidthPerProtocol) + } + } +} + +func (w *Waku) GetStats() types.StatsSummary { + stats := w.bandwidthCounter.GetBandwidthTotals() + return types.StatsSummary{ + UploadRate: uint64(stats.RateOut), + DownloadRate: uint64(stats.RateIn), + } +} + +func (w *Waku) runPeerExchangeLoop() { + defer gocommon.LogOnPanic() + defer w.wg.Done() + + if !w.cfg.EnablePeerExchangeClient { + // Currently peer exchange client is only used for light nodes + return + } + + ticker := time.NewTicker(time.Second * 5) + defer ticker.Stop() + + for { + select { + case <-w.ctx.Done(): + w.logger.Debug("Peer exchange loop stopped") + return + case <-ticker.C: + w.logger.Info("Running peer exchange loop") + + // We select only the nodes discovered via DNS Discovery that support peer exchange + // We assume that those peers are running peer exchange according to infra config, + // If not, the peer selection process in go-waku will filter them out anyway + w.dnsAddressCacheLock.RLock() + var peers peer.IDSlice + for _, record := range w.dnsAddressCache { + for _, discoveredNode := range record { + if len(discoveredNode.PeerInfo.Addrs) == 0 { + continue + } + // Attempt to connect to the peers. + // Peers will be added to the libp2p peer store thanks to identify + go w.connect(discoveredNode.PeerInfo, discoveredNode.ENR, wps.DNSDiscovery) + peers = append(peers, discoveredNode.PeerID) + } + } + w.dnsAddressCacheLock.RUnlock() + + if len(peers) != 0 { + err := w.node.PeerExchange().Request(w.ctx, w.cfg.DiscoveryLimit, peer_exchange.WithAutomaticPeerSelection(peers...), + peer_exchange.FilterByShard(int(w.defaultShardInfo.ClusterID), int(w.defaultShardInfo.ShardIDs[0]))) + if err != nil { + w.logger.Error("couldnt request peers via peer exchange", zap.Error(err)) + } + } + } + } +} +*/ + +func (w *Waku) GetPubsubTopic(topic string) string { + if topic == "" { + topic = w.cfg.DefaultShardPubsubTopic + } + + return topic +} + +func (w *Waku) unsubscribeFromPubsubTopicWithWakuRelay(topic string) error { + topic = w.GetPubsubTopic(topic) + return w.node.RelayUnsubscribe(topic) +} + +func (w *Waku) subscribeToPubsubTopicWithWakuRelay(topic string, pubkey *ecdsa.PublicKey) error { + if w.cfg.LightClient { + return errors.New("only available for full nodes") + } + + topic = w.GetPubsubTopic(topic) + + rs, err := protocol.TopicsToRelayShards(topic) + if err != nil { + return err + } + + if len(rs) == 0 { + w.logger.Warn("could not obtain shards from topic", zap.String("topic", topic)) + return nil + } + + if pubkey != nil { + err := w.node.RelayAddProtectedShard(rs[0].ClusterID, rs[0].ShardIDs[0], pubkey) + if err != nil { + return err + } + } + + err = w.node.RelaySubscribe(topic) + if err != nil { + return err + } + + w.wg.Add(1) + go func() { + defer gocommon.LogOnPanic() + defer w.wg.Done() + for { + select { + case <-w.ctx.Done(): + err := w.node.RelayUnsubscribe(topic) + if err != nil && !errors.Is(err, context.Canceled) { + w.logger.Error("could not unsubscribe", zap.Error(err)) + } + return + + case env := <-w.node.MsgChan: + err := w.OnNewEnvelopes(env, common.RelayedMessageType, false) + if err != nil { + w.logger.Error("OnNewEnvelopes error", zap.Error(err)) + } + } + } + }() + + return nil +} + +// MaxMessageSize returns the maximum accepted message size. +func (w *Waku) MaxMessageSize() uint32 { + return w.cfg.MaxMessageSize +} + +// CurrentTime returns current time. +func (w *Waku) CurrentTime() time.Time { + return w.timesource.Now() +} + +// APIs returns the RPC descriptors the Waku implementation offers +func (w *Waku) APIs() []rpc.API { + return []rpc.API{ + { + Namespace: Name, + Version: VersionStr, + Service: NewPublicWakuAPI(w), + Public: false, + }, + } +} + +// Protocols returns the waku sub-protocols ran by this particular client. +func (w *Waku) Protocols() []p2p.Protocol { + return []p2p.Protocol{} +} + +func (w *Waku) SendEnvelopeEvent(event common.EnvelopeEvent) int { + return w.envelopeFeed.Send(event) +} + +// SubscribeEnvelopeEvents subscribes to envelopes feed. +// In order to prevent blocking waku producers events must be amply buffered. +func (w *Waku) SubscribeEnvelopeEvents(events chan<- common.EnvelopeEvent) event.Subscription { + return w.envelopeFeed.Subscribe(events) +} + +// NewKeyPair generates a new cryptographic identity for the client, and injects +// it into the known identities for message decryption. Returns ID of the new key pair. +func (w *Waku) NewKeyPair() (string, error) { + key, err := crypto.GenerateKey() + if err != nil || !validatePrivateKey(key) { + key, err = crypto.GenerateKey() // retry once + } + if err != nil { + return "", err + } + if !validatePrivateKey(key) { + return "", fmt.Errorf("failed to generate valid key") + } + + id, err := toDeterministicID(hexutil.Encode(crypto.FromECDSAPub(&key.PublicKey)), common.KeyIDSize) + if err != nil { + return "", err + } + + w.keyMu.Lock() + defer w.keyMu.Unlock() + + if w.privateKeys[id] != nil { + return "", fmt.Errorf("failed to generate unique ID") + } + w.privateKeys[id] = key + return id, nil +} + +// DeleteKeyPair deletes the specified key if it exists. +func (w *Waku) DeleteKeyPair(key string) bool { + deterministicID, err := toDeterministicID(key, common.KeyIDSize) + if err != nil { + return false + } + + w.keyMu.Lock() + defer w.keyMu.Unlock() + + if w.privateKeys[deterministicID] != nil { + delete(w.privateKeys, deterministicID) + return true + } + return false +} + +// AddKeyPair imports a asymmetric private key and returns it identifier. +func (w *Waku) AddKeyPair(key *ecdsa.PrivateKey) (string, error) { + id, err := makeDeterministicID(hexutil.Encode(crypto.FromECDSAPub(&key.PublicKey)), common.KeyIDSize) + if err != nil { + return "", err + } + if w.HasKeyPair(id) { + return id, nil // no need to re-inject + } + + w.keyMu.Lock() + w.privateKeys[id] = key + w.keyMu.Unlock() + + return id, nil +} + +// SelectKeyPair adds cryptographic identity, and makes sure +// that it is the only private key known to the node. +func (w *Waku) SelectKeyPair(key *ecdsa.PrivateKey) error { + id, err := makeDeterministicID(hexutil.Encode(crypto.FromECDSAPub(&key.PublicKey)), common.KeyIDSize) + if err != nil { + return err + } + + w.keyMu.Lock() + defer w.keyMu.Unlock() + + w.privateKeys = make(map[string]*ecdsa.PrivateKey) // reset key store + w.privateKeys[id] = key + + return nil +} + +// DeleteKeyPairs removes all cryptographic identities known to the node +func (w *Waku) DeleteKeyPairs() error { + w.keyMu.Lock() + defer w.keyMu.Unlock() + + w.privateKeys = make(map[string]*ecdsa.PrivateKey) + + return nil +} + +// HasKeyPair checks if the waku node is configured with the private key +// of the specified public pair. +func (w *Waku) HasKeyPair(id string) bool { + deterministicID, err := toDeterministicID(id, common.KeyIDSize) + if err != nil { + return false + } + + w.keyMu.RLock() + defer w.keyMu.RUnlock() + return w.privateKeys[deterministicID] != nil +} + +// GetPrivateKey retrieves the private key of the specified identity. +func (w *Waku) GetPrivateKey(id string) (*ecdsa.PrivateKey, error) { + deterministicID, err := toDeterministicID(id, common.KeyIDSize) + if err != nil { + return nil, err + } + + w.keyMu.RLock() + defer w.keyMu.RUnlock() + key := w.privateKeys[deterministicID] + if key == nil { + return nil, fmt.Errorf("invalid id") + } + return key, nil +} + +// GenerateSymKey generates a random symmetric key and stores it under id, +// which is then returned. Will be used in the future for session key exchange. +func (w *Waku) GenerateSymKey() (string, error) { + key, err := common.GenerateSecureRandomData(common.AESKeyLength) + if err != nil { + return "", err + } else if !common.ValidateDataIntegrity(key, common.AESKeyLength) { + return "", fmt.Errorf("error in GenerateSymKey: crypto/rand failed to generate random data") + } + + id, err := common.GenerateRandomID() + if err != nil { + return "", fmt.Errorf("failed to generate ID: %s", err) + } + + w.keyMu.Lock() + defer w.keyMu.Unlock() + + if w.symKeys[id] != nil { + return "", fmt.Errorf("failed to generate unique ID") + } + w.symKeys[id] = key + return id, nil +} + +// AddSymKey stores the key with a given id. +func (w *Waku) AddSymKey(id string, key []byte) (string, error) { + deterministicID, err := toDeterministicID(id, common.KeyIDSize) + if err != nil { + return "", err + } + + w.keyMu.Lock() + defer w.keyMu.Unlock() + + if w.symKeys[deterministicID] != nil { + return "", fmt.Errorf("key already exists: %v", id) + } + w.symKeys[deterministicID] = key + return deterministicID, nil +} + +// AddSymKeyDirect stores the key, and returns its id. +func (w *Waku) AddSymKeyDirect(key []byte) (string, error) { + if len(key) != common.AESKeyLength { + return "", fmt.Errorf("wrong key size: %d", len(key)) + } + + id, err := common.GenerateRandomID() + if err != nil { + return "", fmt.Errorf("failed to generate ID: %s", err) + } + + w.keyMu.Lock() + defer w.keyMu.Unlock() + + if w.symKeys[id] != nil { + return "", fmt.Errorf("failed to generate unique ID") + } + w.symKeys[id] = key + return id, nil +} + +// AddSymKeyFromPassword generates the key from password, stores it, and returns its id. +func (w *Waku) AddSymKeyFromPassword(password string) (string, error) { + id, err := common.GenerateRandomID() + if err != nil { + return "", fmt.Errorf("failed to generate ID: %s", err) + } + if w.HasSymKey(id) { + return "", fmt.Errorf("failed to generate unique ID") + } + + // kdf should run no less than 0.1 seconds on an average computer, + // because it's an once in a session experience + derived := pbkdf2.Key([]byte(password), nil, 65356, common.AESKeyLength, sha256.New) + + w.keyMu.Lock() + defer w.keyMu.Unlock() + + // double check is necessary, because deriveKeyMaterial() is very slow + if w.symKeys[id] != nil { + return "", fmt.Errorf("critical error: failed to generate unique ID") + } + w.symKeys[id] = derived + return id, nil +} + +// HasSymKey returns true if there is a key associated with the given id. +// Otherwise returns false. +func (w *Waku) HasSymKey(id string) bool { + w.keyMu.RLock() + defer w.keyMu.RUnlock() + return w.symKeys[id] != nil +} + +// DeleteSymKey deletes the key associated with the name string if it exists. +func (w *Waku) DeleteSymKey(id string) bool { + w.keyMu.Lock() + defer w.keyMu.Unlock() + if w.symKeys[id] != nil { + delete(w.symKeys, id) + return true + } + return false +} + +// GetSymKey returns the symmetric key associated with the given id. +func (w *Waku) GetSymKey(id string) ([]byte, error) { + w.keyMu.RLock() + defer w.keyMu.RUnlock() + if w.symKeys[id] != nil { + return w.symKeys[id], nil + } + return nil, fmt.Errorf("non-existent key ID") +} + +// Subscribe installs a new message handler used for filtering, decrypting +// and subsequent storing of incoming messages. +func (w *Waku) Subscribe(f *common.Filter) (string, error) { + f.PubsubTopic = w.GetPubsubTopic(f.PubsubTopic) + id, err := w.filters.Install(f) + if err != nil { + return id, err + } + + if w.cfg.LightClient { + cf := protocol.NewContentFilter(f.PubsubTopic, f.ContentTopics.ContentTopics()...) + w.filterManager.SubscribeFilter(id, cf) + } + + return id, nil +} + +// Unsubscribe removes an installed message handler. +func (w *Waku) Unsubscribe(ctx context.Context, id string) error { + ok := w.filters.Uninstall(id) + if !ok { + return fmt.Errorf("failed to unsubscribe: invalid ID '%s'", id) + } + + if w.cfg.LightClient { + w.filterManager.UnsubscribeFilter(id) + } + + return nil +} + +// GetFilter returns the filter by id. +func (w *Waku) GetFilter(id string) *common.Filter { + return w.filters.Get(id) +} + +// Unsubscribe removes an installed message handler. +func (w *Waku) UnsubscribeMany(ids []string) error { + for _, id := range ids { + w.logger.Info("cleaning up filter", zap.String("id", id)) + ok := w.filters.Uninstall(id) + if !ok { + w.logger.Warn("could not remove filter with id", zap.String("id", id)) + } + } + return nil +} + +func (w *Waku) SkipPublishToTopic(value bool) { + w.cfg.SkipPublishToTopic = value +} + +func (w *Waku) ConfirmMessageDelivered(hashes []gethcommon.Hash) { + w.messageSender.MessagesDelivered(hashes) + /* TODO-nwaku + if w.statusTelemetryClient != nil { + for _, hash := range hashes { + w.statusTelemetryClient.PushMessageDeliveryConfirmed(w.ctx, hash.String()) + } + } + */ +} + +// OnNewEnvelope is an interface from Waku FilterManager API that gets invoked when any new message is received by Filter. +func (w *Waku) OnNewEnvelope(env common.Envelope) error { + return w.OnNewEnvelopes(env, common.RelayedMessageType, false) +} + +// Start implements node.Service, starting the background data propagation thread +// of the Waku protocol. +func (w *Waku) Start() error { + err := w.node.Start() + if err != nil { + return fmt.Errorf("failed to start nwaku node: %v", err) + } + + if w.ctx == nil { + w.ctx, w.cancel = context.WithCancel(context.Background()) + } + + /* TODO-nwaku + w.goingOnline = make(chan struct{}) + */ + w.StorenodeCycle = history.NewStorenodeCycle(w.logger, newPinger(w.node)) + w.HistoryRetriever = history.NewHistoryRetriever(newStorenodeRequestor(w.node, w.logger), NewHistoryProcessorWrapper(w), w.logger) + w.StorenodeCycle.Start(w.ctx) + + peerID, err := w.node.PeerID() + if err != nil { + return err + } + + w.logger.Info("WakuV2 PeerID", zap.Stringer("id", peerID)) + + /* TODO-nwaku + w.discoverAndConnectPeers() + + if w.cfg.EnableDiscV5 { + err := w.node.DiscV5().Start(w.ctx) + if err != nil { + return err + } + } + + w.wg.Add(1) + go func() { + defer gocommon.LogOnPanic() + defer w.wg.Done() + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() + for { + select { + case <-w.ctx.Done(): + return + case <-ticker.C: + w.checkForConnectionChanges() + case <-w.topicHealthStatusChan: + // TODO: https://github.com/status-im/status-go/issues/4628 + case <-w.connectionNotifChan: + w.checkForConnectionChanges() + } + } + }() + + if w.cfg.TelemetryServerURL != "" { + w.wg.Add(1) + go func() { + defer gocommon.LogOnPanic() + defer w.wg.Done() + peerTelemetryTickerInterval := time.Duration(w.cfg.TelemetryPeerCountSendPeriod) * time.Millisecond + if peerTelemetryTickerInterval == 0 { + peerTelemetryTickerInterval = 10 * time.Second + } + peerTelemetryTicker := time.NewTicker(peerTelemetryTickerInterval) + defer peerTelemetryTicker.Stop() + + dialErrSub, err := w.node.Host().EventBus().Subscribe(new(utils.DialError)) + if err != nil { + w.logger.Error("failed to subscribe to dial errors", zap.Error(err)) + return + } + defer dialErrSub.Close() + + messageSentSub, err := w.node.Host().EventBus().Subscribe(new(publish.MessageSent)) + if err != nil { + w.logger.Error("failed to subscribe to message sent events", zap.Error(err)) + return + } + + for { + select { + case <-w.ctx.Done(): + return + case <-peerTelemetryTicker.C: + w.reportPeerMetrics() + case dialErr := <-dialErrSub.Out(): + errors := common.ParseDialErrors(dialErr.(utils.DialError).Err.Error()) + for _, dialError := range errors { + w.statusTelemetryClient.PushDialFailure(w.ctx, common.DialError{ErrType: dialError.ErrType, ErrMsg: dialError.ErrMsg, Protocols: dialError.Protocols}) + } + case messageSent := <-messageSentSub.Out(): + w.statusTelemetryClient.PushSentMessageTotal(w.ctx, messageSent.(publish.MessageSent).Size) + } + } + }() + } + + w.wg.Add(1) + go w.telemetryBandwidthStats(w.cfg.TelemetryServerURL) + //TODO: commenting for now so that only fleet nodes are used. + //Need to uncomment once filter peer scoring etc is implemented. + + w.wg.Add(1) + go w.runPeerExchangeLoop() + */ + + if w.cfg.EnableMissingMessageVerification { + w.missingMsgVerifier = missing.NewMissingMessageVerifier( + newStorenodeRequestor(w.node, w.logger), + w, + w.timesource, + w.logger) + + w.missingMsgVerifier.Start(w.ctx) + + w.wg.Add(1) + go func() { + defer gocommon.LogOnPanic() + w.wg.Done() + for { + select { + case <-w.ctx.Done(): + return + case envelope := <-w.missingMsgVerifier.C: + err = w.OnNewEnvelopes(envelope, common.MissingMessageType, false) + if err != nil { + w.logger.Error("OnNewEnvelopes error", zap.Error(err)) + } + } + } + }() + } + + /* TODO: nwaku + if w.cfg.LightClient { + // Create FilterManager that will main peer connectivity + // for installed filters + w.filterManager = filterapi.NewFilterManager( + w.ctx, + w.logger, + w.cfg.MinPeersForFilter, + w, + w.node.FilterLightnode(), + filterapi.WithBatchInterval(300*time.Millisecond)) + } + */ + err = w.setupRelaySubscriptions() + if err != nil { + return err + } + + numCPU := runtime.NumCPU() + for i := 0; i < numCPU; i++ { + w.wg.Add(1) + go w.processQueueLoop() + } + + w.wg.Add(1) + go w.broadcast() + + go func() { + defer gocommon.LogOnPanic() + w.sendQueue.Start(w.ctx) + }() + + err = w.startMessageSender() + if err != nil { + return err + } + + /* TODO-nwaku + // we should wait `seedBootnodesForDiscV5` shutdown smoothly before set w.ctx to nil within `w.Stop()` + w.wg.Add(1) + go w.seedBootnodesForDiscV5() + */ + + return nil +} + +func (w *Waku) checkForConnectionChanges() { + + /* TODO-nwaku + isOnline := len(w.node.Host().Network().Peers()) > 0 + + w.connStatusMu.Lock() + + latestConnStatus := types.ConnStatus{ + IsOnline: isOnline, + Peers: FormatPeerStats(w.node), + } + + w.logger.Debug("peer stats", + zap.Int("peersCount", len(latestConnStatus.Peers)), + zap.Any("stats", latestConnStatus)) + for k, subs := range w.connStatusSubscriptions { + if !subs.Send(latestConnStatus) { + delete(w.connStatusSubscriptions, k) + } + } + + w.connStatusMu.Unlock() + + if w.onPeerStats != nil { + w.onPeerStats(latestConnStatus) + } + + w.ConnectionChanged(connection.State{ + Type: w.state.Type, //setting state type as previous one since there won't be a change here + Offline: !latestConnStatus.IsOnline, + }) */ +} + +/* TODO: nwaku +func (w *Waku) reportPeerMetrics() { + if w.statusTelemetryClient != nil { + connFailures := FormatPeerConnFailures(w.node) + w.statusTelemetryClient.PushPeerCount(w.ctx, w.PeerCount()) + w.statusTelemetryClient.PushPeerConnFailures(w.ctx, connFailures) + + peerCountByOrigin := make(map[wps.Origin]uint) + peerCountByShard := make(map[uint16]uint) + wakuPeerStore := w.node.Host().Peerstore().(wps.WakuPeerstore) + + for _, peerID := range w.node.Host().Network().Peers() { + origin, err := wakuPeerStore.Origin(peerID) + if err != nil { + origin = wps.Unknown + } + + peerCountByOrigin[origin]++ + pubsubTopics, err := wakuPeerStore.PubSubTopics(peerID) + if err != nil { + continue + } + + keys := make([]string, 0, len(pubsubTopics)) + for k := range pubsubTopics { + keys = append(keys, k) + } + relayShards, err := protocol.TopicsToRelayShards(keys...) + if err != nil { + continue + } + + for _, shards := range relayShards { + for _, shard := range shards.ShardIDs { + peerCountByShard[shard]++ + } + } + } + w.statusTelemetryClient.PushPeerCountByShard(w.ctx, peerCountByShard) + w.statusTelemetryClient.PushPeerCountByOrigin(w.ctx, peerCountByOrigin) + } +} +*/ + +func (w *Waku) startMessageSender() error { + publishMethod := publish.Relay + /* TODO-nwaku + if w.cfg.LightClient { + publishMethod = publish.LightPush + }*/ + + sender, err := publish.NewMessageSender(publishMethod, newPublisher(w.node), w.logger) + if err != nil { + w.logger.Error("failed to create message sender", zap.Error(err)) + return err + } + + /* TODO-nwaku + if w.cfg.TelemetryServerURL != "" { + sender.WithMessageSentEmitter(w.node.Host()) + }*/ + + if w.cfg.EnableStoreConfirmationForMessagesSent { + msgStoredChan := make(chan gethcommon.Hash, 1000) + msgExpiredChan := make(chan gethcommon.Hash, 1000) + messageSentCheck := publish.NewMessageSentCheck(w.ctx, newStorenodeMessageVerifier(w.node), w.StorenodeCycle, w.timesource, msgStoredChan, msgExpiredChan, w.logger) + sender.WithMessageSentCheck(messageSentCheck) + + w.wg.Add(1) + go func() { + defer gocommon.LogOnPanic() + defer w.wg.Done() + for { + select { + case <-w.ctx.Done(): + return + case hash := <-msgStoredChan: + w.SendEnvelopeEvent(common.EnvelopeEvent{ + Hash: hash, + Event: common.EventEnvelopeSent, + }) + + if w.statusTelemetryClient != nil { + w.statusTelemetryClient.PushMessageCheckSuccess(w.ctx, hash.Hex()) + } + case hash := <-msgExpiredChan: + w.SendEnvelopeEvent(common.EnvelopeEvent{ + Hash: hash, + Event: common.EventEnvelopeExpired, + }) + + if w.statusTelemetryClient != nil { + w.statusTelemetryClient.PushMessageCheckFailure(w.ctx, hash.Hex()) + } + } + } + }() + } + + if !w.cfg.UseThrottledPublish || testing.Testing() { + // To avoid delaying the tests, or for when we dont want to rate limit, we set up an infinite rate limiter, + // basically disabling the rate limit functionality + limiter := publish.NewPublishRateLimiter(rate.Inf, 1) + sender.WithRateLimiting(limiter) + } + + w.messageSender = sender + w.messageSender.Start() + + return nil +} + +func (w *Waku) MessageExists(mh pb.MessageHash) (bool, error) { + w.poolMu.Lock() + defer w.poolMu.Unlock() + return w.envelopeCache.Has(gethcommon.Hash(mh)), nil +} + +func (w *Waku) SetTopicsToVerifyForMissingMessages(peerInfo peer.AddrInfo, pubsubTopic string, contentTopics []string) { + if !w.cfg.EnableMissingMessageVerification { + return + } + + w.missingMsgVerifier.SetCriteriaInterest(peerInfo, protocol.NewContentFilter(pubsubTopic, contentTopics...)) +} + +func (w *Waku) setupRelaySubscriptions() error { + if w.cfg.LightClient { + return nil + } + + if w.protectedTopicStore != nil { + protectedTopics, err := w.protectedTopicStore.ProtectedTopics() + if err != nil { + return err + } + + for _, pt := range protectedTopics { + // Adding subscription to protected topics + err = w.subscribeToPubsubTopicWithWakuRelay(pt.Topic, pt.PubKey) + if err != nil { + return err + } + } + } + + err := w.subscribeToPubsubTopicWithWakuRelay(w.cfg.DefaultShardPubsubTopic, nil) + if err != nil { + return err + } + + return nil +} + +// Stop implements node.Service, stopping the background data propagation thread +// of the Waku protocol. +func (w *Waku) Stop() error { + w.cancel() + + w.envelopeCache.Stop() + + err := w.node.Stop() + if err != nil { + return err + } + + if w.protectedTopicStore != nil { + err := w.protectedTopicStore.Close() + if err != nil { + return err + } + } + + /* TODO-nwaku + close(w.goingOnline)*/ + + w.wg.Wait() + + w.ctx = nil + w.cancel = nil + + return nil +} + +func (w *Waku) OnNewEnvelopes(envelope common.Envelope, msgType common.MessageType, processImmediately bool) error { + if envelope == nil { + return nil + } + + recvMessage := common.NewReceivedMessage(envelope, msgType) + if recvMessage == nil { + return nil + } + + /* TODO-nwaku + if w.statusTelemetryClient != nil { + if msgType == common.MissingMessageType { + w.statusTelemetryClient.PushMissedMessage(w.ctx, envelope) + } + } */ + + logger := w.logger.With( + zap.String("messageType", msgType), + zap.Stringer("envelopeHash", envelope.Hash()), + zap.String("pubsubTopic", envelope.PubsubTopic()), + zap.String("contentTopic", envelope.Message().ContentTopic), + logutils.WakuMessageTimestamp("timestamp", envelope.Message().Timestamp), + ) + + logger.Debug("received new envelope") + trouble := false + + _, err := w.add(recvMessage, processImmediately) + if err != nil { + logger.Info("invalid envelope received", zap.Error(err)) + trouble = true + } + + common.EnvelopesValidatedCounter.Inc() + + if trouble { + return errors.New("received invalid envelope") + } + + return nil +} + +// addEnvelope adds an envelope to the envelope map, used for sending +func (w *Waku) addEnvelope(envelope *common.ReceivedMessage) { + w.poolMu.Lock() + w.envelopeCache.Set(envelope.Hash(), envelope, ttlcache.DefaultTTL) + w.poolMu.Unlock() +} + +func (w *Waku) add(recvMessage *common.ReceivedMessage, processImmediately bool) (bool, error) { + common.EnvelopesReceivedCounter.Inc() + + w.poolMu.Lock() + envelope := w.envelopeCache.Get(recvMessage.Hash()) + alreadyCached := envelope != nil + w.poolMu.Unlock() + + if !alreadyCached { + recvMessage.Processed.Store(false) + w.addEnvelope(recvMessage) + } + + logger := w.logger.With(zap.String("envelopeHash", recvMessage.Hash().Hex())) + + if alreadyCached { + logger.Debug("w envelope already cached") + common.EnvelopesCachedCounter.WithLabelValues("hit").Inc() + } else { + logger.Debug("cached w envelope") + common.EnvelopesCachedCounter.WithLabelValues("miss").Inc() + common.EnvelopesSizeMeter.Observe(float64(len(recvMessage.Envelope.Message().Payload))) + } + + if !alreadyCached || !envelope.Value().Processed.Load() { + if processImmediately { + logger.Debug("immediately processing envelope") + w.processMessage(recvMessage) + } else { + logger.Debug("posting event") + w.postEvent(recvMessage) // notify the local node about the new message + } + } + + return true, nil +} + +// postEvent queues the message for further processing. +func (w *Waku) postEvent(envelope *common.ReceivedMessage) { + w.msgQueue <- envelope +} + +// processQueueLoop delivers the messages to the watchers during the lifetime of the waku node. +func (w *Waku) processQueueLoop() { + defer gocommon.LogOnPanic() + defer w.wg.Done() + if w.ctx == nil { + return + } + for { + select { + case <-w.ctx.Done(): + return + case e := <-w.msgQueue: + w.processMessage(e) + } + } +} + +func (w *Waku) processMessage(e *common.ReceivedMessage) { + logger := w.logger.With( + zap.Stringer("envelopeHash", e.Envelope.Hash()), + zap.String("pubsubTopic", e.PubsubTopic), + zap.String("contentTopic", e.ContentTopic.ContentTopic()), + zap.Int64("timestamp", e.Envelope.Message().GetTimestamp()), + ) + + if e.MsgType == common.StoreMessageType { + // We need to insert it first, and then remove it if not matched, + // as messages are processed asynchronously + w.storeMsgIDsMu.Lock() + w.storeMsgIDs[e.Hash()] = true + w.storeMsgIDsMu.Unlock() + } + + matched := w.filters.NotifyWatchers(e) + + // If not matched we remove it + if !matched { + logger.Debug("filters did not match") + w.storeMsgIDsMu.Lock() + delete(w.storeMsgIDs, e.Hash()) + w.storeMsgIDsMu.Unlock() + } else { + logger.Debug("filters did match") + /* TODO-nwaku + if w.statusTelemetryClient != nil && e.MsgType == common.MissingMessageType { + w.statusTelemetryClient.PushMissedRelevantMessage(w.ctx, e) + } + */ + e.Processed.Store(true) + } + + w.envelopeFeed.Send(common.EnvelopeEvent{ + Topic: e.ContentTopic, + Hash: e.Hash(), + Event: common.EventEnvelopeAvailable, + }) +} + +// GetEnvelope retrieves an envelope from the message queue by its hash. +// It returns nil if the envelope can not be found. +func (w *Waku) GetEnvelope(hash gethcommon.Hash) *common.ReceivedMessage { + w.poolMu.RLock() + defer w.poolMu.RUnlock() + + envelope := w.envelopeCache.Get(hash) + if envelope == nil { + return nil + } + + return envelope.Value() +} + +// isEnvelopeCached checks if envelope with specific hash has already been received and cached. +func (w *Waku) IsEnvelopeCached(hash gethcommon.Hash) bool { + w.poolMu.Lock() + defer w.poolMu.Unlock() + + return w.envelopeCache.Has(hash) +} + +func (w *Waku) ClearEnvelopesCache() { + w.poolMu.Lock() + defer w.poolMu.Unlock() + + w.envelopeCache.Stop() + w.envelopeCache = newTTLCache() +} + +func (w *Waku) PeerCount() (int, error) { + return w.node.GetNumConnectedPeers() +} + +// TODO-nwaku +func (w *Waku) Peers() types.PeerStats { + return nil + // return FormatPeerStats(w.node) +} + +/* TODO-nwaku +func (w *Waku) RelayPeersByTopic(topic string) (*types.PeerList, error) { + if w.cfg.LightClient { + return nil, errors.New("only available in relay mode") + } + + return &types.PeerList{ + FullMeshPeers: w.node.Relay().PubSub().MeshPeers(topic), + AllPeers: w.node.Relay().PubSub().ListPeers(topic), + }, nil +} +*/ + +func (w *Waku) SubscribeToPubsubTopic(topic string, pubkey *ecdsa.PublicKey) error { + topic = w.GetPubsubTopic(topic) + + if !w.cfg.LightClient { + err := w.subscribeToPubsubTopicWithWakuRelay(topic, pubkey) + if err != nil { + return err + } + } + return nil +} + +func (w *Waku) UnsubscribeFromPubsubTopic(topic string) error { + topic = w.GetPubsubTopic(topic) + + if !w.cfg.LightClient { + err := w.unsubscribeFromPubsubTopicWithWakuRelay(topic) + if err != nil { + return err + } + } + return nil +} + +func (w *Waku) RetrievePubsubTopicKey(topic string) (*ecdsa.PrivateKey, error) { + topic = w.GetPubsubTopic(topic) + if w.protectedTopicStore == nil { + return nil, nil + } + + return w.protectedTopicStore.FetchPrivateKey(topic) +} + +func (w *Waku) StorePubsubTopicKey(topic string, privKey *ecdsa.PrivateKey) error { + topic = w.GetPubsubTopic(topic) + if w.protectedTopicStore == nil { + return nil + } + + return w.protectedTopicStore.Insert(topic, privKey, &privKey.PublicKey) +} + +func (w *Waku) RemovePubsubTopicKey(topic string) error { + topic = w.GetPubsubTopic(topic) + if w.protectedTopicStore == nil { + return nil + } + + return w.protectedTopicStore.Delete(topic) +} + +func (w *Waku) handleNetworkChangeFromApp(state connection.State) { + // TODO-nwaku + /* + //If connection state is reported by something other than peerCount becoming 0 e.g from mobile app, disconnect all peers + if (state.Offline && len(w.node.Host().Network().Peers()) > 0) || + (w.state.Type != state.Type && !w.state.Offline && !state.Offline) { // network switched between wifi and cellular + w.logger.Info("connection switched or offline detected via mobile, disconnecting all peers") + w.node.DisconnectAllPeers() + if w.cfg.LightClient { + w.filterManager.NetworkChange() + } + } + */ +} + +/* TODO-nwaku +func (w *Waku) isGoingOnline(state connection.State) bool { + return !state.Offline && !w.onlineChecker.IsOnline() +} +*/ + +func (w *Waku) ConnectionChanged(state connection.State) { + /* TODO-nwaku + if w.isGoingOnline(state) { + //TODO: analyze if we need to discover and connect to peers for relay. + w.discoverAndConnectPeers() + } + + isOnline := !state.Offline + if w.cfg.LightClient { + //TODO: Update this as per https://github.com/waku-org/go-waku/issues/1114 + go func() { + defer gocommon.LogOnPanic() + w.filterManager.OnConnectionStatusChange("", isOnline) + }() + w.handleNetworkChangeFromApp(state) + } else { + // for lightClient state update and onlineChange is handled in filterManager. + if w.isGoingOnline(state) { + select { + case w.goingOnline <- struct{}{}: + default: + w.logger.Warn("could not write on connection changed channel") + } + } + // update state + w.onlineChecker.SetOnline(isOnline) + } + w.state = state + */ +} + +/* TODO-nwaku +// seedBootnodesForDiscV5 tries to fetch bootnodes +// from an ENR periodically. +// It backs off exponentially until maxRetries, at which point it restarts from 0 +// It also restarts if there's a connection change signalled from the client +func (w *Waku) seedBootnodesForDiscV5() { + defer gocommon.LogOnPanic() + defer w.wg.Done() + + if !w.cfg.EnableDiscV5 || w.node.DiscV5() == nil { + return + } + + ticker := time.NewTicker(500 * time.Millisecond) + defer ticker.Stop() + var retries = 0 + + now := func() int64 { + return time.Now().UnixNano() / int64(time.Millisecond) + + } + + var lastTry = now() + + canQuery := func() bool { + backoff := bootnodesQueryBackoffMs * int64(math.Exp2(float64(retries))) + + return lastTry+backoff < now() + } + + for { + select { + case <-w.dnsDiscAsyncRetrievedSignal: + if !canQuery() { + continue + } + + err := w.restartDiscV5(true) + if err != nil { + w.logger.Warn("failed to restart discv5", zap.Error(err)) + } + retries = 0 + lastTry = now() + case <-ticker.C: + if w.seededBootnodesForDiscV5 && len(w.node.Host().Network().Peers()) > 3 { + w.logger.Debug("not querying bootnodes", zap.Bool("seeded", w.seededBootnodesForDiscV5), zap.Int("peer-count", len(w.node.Host().Network().Peers()))) + continue + } + + if !canQuery() { + w.logger.Info("can't query bootnodes", + zap.Int("peer-count", len(w.node.Host().Network().Peers())), + zap.Int64("lastTry", lastTry), zap.Int64("now", now()), + zap.Int64("backoff", bootnodesQueryBackoffMs*int64(math.Exp2(float64(retries)))), + zap.Int("retries", retries), + ) + continue + } + + w.logger.Info("querying bootnodes to restore connectivity", zap.Int("peer-count", len(w.node.Host().Network().Peers()))) + err := w.restartDiscV5(false) + if err != nil { + w.logger.Warn("failed to restart discv5", zap.Error(err)) + } + + lastTry = now() + retries++ + // We reset the retries after a while and restart + if retries > bootnodesMaxRetries { + retries = 0 + } + + // If we go online, trigger immediately + case <-w.goingOnline: + if !canQuery() { + continue + } + + err := w.restartDiscV5(false) + if err != nil { + w.logger.Warn("failed to restart discv5", zap.Error(err)) + } + retries = 0 + lastTry = now() + + case <-w.ctx.Done(): + w.logger.Debug("bootnode seeding stopped") + return + } + } +} + +// Restart discv5, re-retrieving bootstrap nodes +func (w *Waku) restartDiscV5(useOnlyDNSDiscCache bool) error { + ctx, cancel := context.WithTimeout(w.ctx, 30*time.Second) + defer cancel() + bootnodes, err := w.getDiscV5BootstrapNodes(ctx, w.discV5BootstrapNodes, useOnlyDNSDiscCache) + if err != nil { + return err + } + if len(bootnodes) == 0 { + return errors.New("failed to fetch bootnodes") + } + + if w.node.DiscV5().ErrOnNotRunning() != nil { + w.logger.Info("is not started restarting") + err := w.node.DiscV5().Start(w.ctx) + if err != nil { + w.logger.Error("Could not start DiscV5", zap.Error(err)) + } + } else { + w.node.DiscV5().Stop() + w.logger.Info("is started restarting") + + select { + case <-w.ctx.Done(): // Don't start discv5 if we are stopping waku + return nil + default: + } + + err := w.node.DiscV5().Start(w.ctx) + if err != nil { + w.logger.Error("Could not start DiscV5", zap.Error(err)) + } + } + + w.logger.Info("restarting discv5 with nodes", zap.Any("nodes", bootnodes)) + return w.node.SetDiscV5Bootnodes(bootnodes) +} +*/ + +func (w *Waku) timestamp() int64 { + return w.timesource.Now().UnixNano() +} + +func (w *Waku) AddRelayPeer(address multiaddr.Multiaddr) (peer.ID, error) { + // TODO-nwaku + /* + peerID, err := w.node.AddPeer(address, wps.Static, w.cfg.DefaultShardedPubsubTopics, relay.WakuRelayID_v200) + if err != nil { + return "", err + } + return peerID, nil + */ + return "", nil +} + +func (w *Waku) DialPeer(address multiaddr.Multiaddr) error { + // Using WakuConnect so it matches the go-waku's behavior and terminology + ctx, cancel := context.WithTimeout(w.ctx, requestTimeout) + defer cancel() + return w.node.Connect(ctx, address) +} + +func (w *Waku) DialPeerByID(peerID peer.ID) error { + ctx, cancel := context.WithTimeout(w.ctx, requestTimeout) + defer cancel() + return w.node.DialPeerByID(ctx, peerID, relay.WakuRelayID_v200) +} + +func (w *Waku) DropPeer(peerID peer.ID) error { + return w.node.DisconnectPeerByID(peerID) +} + +func (w *Waku) ProcessingP2PMessages() bool { + w.storeMsgIDsMu.Lock() + defer w.storeMsgIDsMu.Unlock() + return len(w.storeMsgIDs) != 0 +} + +func (w *Waku) MarkP2PMessageAsProcessed(hash gethcommon.Hash) { + w.storeMsgIDsMu.Lock() + defer w.storeMsgIDsMu.Unlock() + delete(w.storeMsgIDs, hash) +} + +func (w *Waku) Clean() error { + w.msgQueue = make(chan *common.ReceivedMessage, messageQueueLimit) + + for _, f := range w.filters.All() { + f.Messages = common.NewMemoryMessageStore() + } + + return nil +} + +func (w *Waku) PeerID() (peer.ID, error) { + return w.node.PeerID() +} + +// validatePrivateKey checks the format of the given private key. +func validatePrivateKey(k *ecdsa.PrivateKey) bool { + if k == nil || k.D == nil || k.D.Sign() == 0 { + return false + } + return common.ValidatePublicKey(&k.PublicKey) +} + +// makeDeterministicID generates a deterministic ID, based on a given input +func makeDeterministicID(input string, keyLen int) (id string, err error) { + buf := pbkdf2.Key([]byte(input), nil, 4096, keyLen, sha256.New) + if !common.ValidateDataIntegrity(buf, common.KeyIDSize) { + return "", fmt.Errorf("error in GenerateDeterministicID: failed to generate key") + } + id = gethcommon.Bytes2Hex(buf) + return id, err +} + +// toDeterministicID reviews incoming id, and transforms it to format +// expected internally be private key store. Originally, public keys +// were used as keys, now random keys are being used. And in order to +// make it easier to consume, we now allow both random IDs and public +// keys to be passed. +func toDeterministicID(id string, expectedLen int) (string, error) { + if len(id) != (expectedLen * 2) { // we received hex key, so number of chars in id is doubled + var err error + id, err = makeDeterministicID(id, expectedLen) + if err != nil { + return "", err + } + } + + return id, nil +} + +func FormatPeerStats(wakuNode *node.WakuNode) types.PeerStats { + p := make(types.PeerStats) + for k, v := range wakuNode.PeerStats() { + p[k] = types.WakuV2Peer{ + Addresses: utils.EncapsulatePeerID(k, wakuNode.Host().Peerstore().PeerInfo(k).Addrs...), + Protocols: v, + } + } + return p +} + +// TODO-nwaku +func (w *Waku) StoreNode() *store.WakuStore { + // return w.node.Store() + return nil +} + +func FormatPeerConnFailures(wakuNode *node.WakuNode) map[string]int { + p := make(map[string]int) + for _, peerID := range wakuNode.Host().Network().Peers() { + peerInfo := wakuNode.Host().Peerstore().PeerInfo(peerID) + connFailures := wakuNode.Host().Peerstore().(wps.WakuPeerstore).ConnFailures(peerInfo.ID) + if connFailures > 0 { + p[peerID.String()] = connFailures + } + } + return p +} + +// TODO-nwaku +func (w *Waku) LegacyStoreNode() legacy_store.Store { + // return w.node.LegacyStore() + return nil +} + +func printStackTrace() { + // Create a buffer to hold the stack trace + buf := make([]byte, 102400) + // Capture the stack trace into the buffer + n := runtime.Stack(buf, false) + // Print the stack trace + fmt.Printf("Current stack trace:\n%s\n", buf[:n]) +} + +func wakuNew(nodeKey *ecdsa.PrivateKey, + fleet string, + cfg *Config, // TODO: merge Config and WakuConfig + nwakuCfg *WakuConfig, + logger *zap.Logger, + appDB *sql.DB, + ts *timesource.NTPTimeSource, + onHistoricMessagesRequestFailed func([]byte, peer.AddrInfo, error), onPeerStats func(types.ConnStatus)) (*Waku, error) { + + var err error + if logger == nil { + logger, err = zap.NewDevelopment() + if err != nil { + return nil, err + } + } + if ts == nil { + ts = timesource.Default() + } + + if nodeKey == nil { + // No nodekey is provided, create an ephemeral key + nodeKey, err = crypto.GenerateKey() + if err != nil { + return nil, fmt.Errorf("failed to generate a random private key: %v", err) + } + } + nwakuCfg.NodeKey = hex.EncodeToString(crypto.FromECDSA(nodeKey)) + + nwakuCfg.TcpPort, nwakuCfg.Discv5UdpPort, err = getFreePortIfNeeded(nwakuCfg.TcpPort, nwakuCfg.Discv5UdpPort, logger) + + // TODO-nwaku + // TODO: merge Config and WakuConfig + cfg = setDefaults(cfg) + if err = cfg.Validate(logger); err != nil { + return nil, err + } + logger.Info("starting wakuv2 with config", zap.Any("nwakuCfg", nwakuCfg), zap.Any("wakuCfg", cfg)) + + ctx, cancel := context.WithCancel(context.Background()) + + if !cfg.LightClient { + nwakuCfg.Filter = true + nwakuCfg.FilterMaxPeersToServe = 20 + nwakuCfg.Lightpush = true + nwakuCfg.RateLimits.Filter = &RateLimit{Volume: 100, Period: 1, Unit: Second} + nwakuCfg.RateLimits.Lightpush = &RateLimit{Volume: 5, Period: 1, Unit: Second} + } + + if cfg.EnablePeerExchangeServer { + nwakuCfg.PeerExchange = true + nwakuCfg.RateLimits.PeerExchange = &RateLimit{Volume: 5, Period: 1, Unit: Second} + } + + wakunode, err := newWakuNode(ctx, nwakuCfg, logger) + if err != nil { + cancel() + return nil, err + } + + var protectedTopicStore *persistence.ProtectedTopicsStore + if appDB != nil { + protectedTopicStore, err = persistence.NewProtectedTopicsStore(logger, appDB) + if err != nil { + cancel() + return nil, err + } + } + + // Notice that the events for self node are handled by the 'MyEventCallback' method + + return &Waku{ + node: wakunode, + wakuCfg: nwakuCfg, + cfg: cfg, + privateKeys: make(map[string]*ecdsa.PrivateKey), + symKeys: make(map[string][]byte), + envelopeCache: newTTLCache(), + msgQueue: make(chan *common.ReceivedMessage, messageQueueLimit), + topicHealthStatusChan: make(chan peermanager.TopicHealthStatus, 100), + connectionNotifChan: make(chan node.PeerConnection, 20), + connStatusSubscriptions: make(map[string]*types.ConnStatusSubscription), + ctx: ctx, + cancel: cancel, + wg: sync.WaitGroup{}, + dnsAddressCache: make(map[string][]dnsdisc.DiscoveredNode), + dnsAddressCacheLock: &sync.RWMutex{}, + dnsDiscAsyncRetrievedSignal: make(chan struct{}), + storeMsgIDs: make(map[gethcommon.Hash]bool), + timesource: ts, + storeMsgIDsMu: sync.RWMutex{}, + logger: logger, + discV5BootstrapNodes: nwakuCfg.Discv5BootstrapNodes, + onHistoricMessagesRequestFailed: onHistoricMessagesRequestFailed, + onPeerStats: onPeerStats, + onlineChecker: onlinechecker.NewDefaultOnlineChecker(false).(*onlinechecker.DefaultOnlineChecker), + sendQueue: publish.NewMessageQueue(1000, cfg.UseThrottledPublish), + filters: common.NewFilters(cfg.DefaultShardPubsubTopic, logger), + protectedTopicStore: protectedTopicStore, + }, nil + +} + +// The event callback sends back the node's ctx to know to which +// node is the event being emited for. Since we only have a global +// callback in the go side, We register all the nodes that we create +// so we can later obtain which instance of `WakuNode` is should +// be invoked depending on the ctx received + +var nodeRegistry map[unsafe.Pointer]*WakuNode + +func init() { + nodeRegistry = make(map[unsafe.Pointer]*WakuNode) +} + +func registerNode(node *WakuNode) { + _, ok := nodeRegistry[node.wakuCtx] + if !ok { + nodeRegistry[node.wakuCtx] = node + } +} + +func unregisterNode(node *WakuNode) { + delete(nodeRegistry, node.wakuCtx) +} + +//export globalEventCallback +func globalEventCallback(callerRet C.int, msg *C.char, len C.size_t, userData unsafe.Pointer) { + if callerRet == C.RET_OK { + eventStr := C.GoStringN(msg, C.int(len)) + node, ok := nodeRegistry[userData] + if ok { + node.OnEvent(eventStr) + } + } else { + errMsgField := zap.Skip() + if len != 0 { + errMsgField = zap.String("error", C.GoStringN(msg, C.int(len))) + } + log.Error("globalEventCallback retCode not ok", zap.Int("retCode", int(callerRet)), errMsgField) + } +} + +type response struct { + err error + value any +} + +//export GoCallback +func GoCallback(ret C.int, msg *C.char, len C.size_t, resp unsafe.Pointer) { + if resp != nil { + m := (*C.Resp)(resp) + m.ret = ret + m.msg = msg + m.len = len + wg := (*sync.WaitGroup)(m.wg) + wg.Done() + } +} + +// WakuNode represents an instance of an nwaku node +type WakuNode struct { + wakuCtx unsafe.Pointer + logger *zap.Logger + cancel context.CancelFunc + MsgChan chan common.Envelope +} + +func newWakuNode(ctx context.Context, config *WakuConfig, logger *zap.Logger) (*WakuNode, error) { + ctx, cancel := context.WithCancel(ctx) + + n := &WakuNode{ + cancel: cancel, + } + + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + defer gocommon.LogOnPanic() + + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + wg.Done() + + <-ctx.Done() + }() + + wg.Wait() + + jsonConfig, err := json.Marshal(config) + if err != nil { + return nil, err + } + + var cJsonConfig = C.CString(string(jsonConfig)) + + var resp = C.allocResp(unsafe.Pointer(&wg)) + + defer C.free(unsafe.Pointer(cJsonConfig)) + defer C.freeResp(resp) + + if C.getRet(resp) != C.RET_OK { + errMsg := "error wakuNew: " + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return nil, errors.New(errMsg) + } + + wg.Add(1) + n.wakuCtx = C.cGoWakuNew(cJsonConfig, resp) + n.MsgChan = make(chan common.Envelope, 100) + n.logger = logger.Named("nwaku") + wg.Wait() + + // Notice that the events for self node are handled by the 'MyEventCallback' method + C.cGoWakuSetEventCallback(n.wakuCtx) + + return n, nil +} + +type jsonEvent struct { + EventType string `json:"eventType"` +} + +func (n *WakuNode) OnEvent(eventStr string) { + jsonEvent := jsonEvent{} + err := json.Unmarshal([]byte(eventStr), &jsonEvent) + if err != nil { + n.logger.Error("could not unmarshal nwaku event string", zap.Error(err)) + return + } + + switch jsonEvent.EventType { + case "message": + n.parseMessageEvent(eventStr) + } +} + +func (n *WakuNode) parseMessageEvent(eventStr string) { + envelope, err := common.NewEnvelope(eventStr) + if err != nil { + n.logger.Error("could not parse message", zap.Error(err)) + } + n.MsgChan <- envelope +} + +func (n *WakuNode) GetNumConnectedRelayPeers(optPubsubTopic ...string) (int, error) { + var pubsubTopic string + if len(optPubsubTopic) == 0 { + pubsubTopic = "" + } else { + pubsubTopic = optPubsubTopic[0] + } + + wg := sync.WaitGroup{} + + var resp = C.allocResp(unsafe.Pointer(&wg)) + defer C.freeResp(resp) + + var cPubsubTopic = C.CString(pubsubTopic) + defer C.free(unsafe.Pointer(cPubsubTopic)) + + C.cGoWakuGetNumConnectedRelayPeers(n.wakuCtx, cPubsubTopic, resp) + + if C.getRet(resp) == C.RET_OK { + numPeersStr := C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + numPeers, err := strconv.Atoi(numPeersStr) + if err != nil { + errMsg := "GetNumConnectedRelayPeers - error converting string to int: " + err.Error() + return 0, errors.New(errMsg) + } + return numPeers, nil + } + errMsg := "error GetNumConnectedRelayPeers: " + + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return 0, errors.New(errMsg) +} + +func (n *WakuNode) DisconnectPeerByID(peerID peer.ID) error { + wg := sync.WaitGroup{} + + var resp = C.allocResp(unsafe.Pointer(&wg)) + var cPeerId = C.CString(peerID.String()) + defer C.freeResp(resp) + defer C.free(unsafe.Pointer(cPeerId)) + + wg.Add(1) + C.cGoWakuDisconnectPeerById(n.wakuCtx, cPeerId, resp) + wg.Wait() + + if C.getRet(resp) == C.RET_OK { + return nil + } + errMsg := "error DisconnectPeerById: " + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return errors.New(errMsg) +} + +func (n *WakuNode) GetConnectedPeers() (peer.IDSlice, error) { + wg := sync.WaitGroup{} + + var resp = C.allocResp(unsafe.Pointer(&wg)) + defer C.freeResp(resp) + + wg.Add(1) + C.cGoWakuGetConnectedPeers(n.wakuCtx, resp) + wg.Wait() + + if C.getRet(resp) == C.RET_OK { + peersStr := C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + if peersStr == "" { + return nil, nil + } + // peersStr contains a comma-separated list of peer ids + itemsPeerIds := strings.Split(peersStr, ",") + var peers peer.IDSlice + for _, peerId := range itemsPeerIds { + id, err := peer.Decode(peerId) + if err != nil { + return nil, fmt.Errorf("GetConnectedPeers - decoding peerId: %w", err) + } + peers = append(peers, id) + } + return peers, nil + } + errMsg := C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return nil, fmt.Errorf("GetConnectedPeers: %s", errMsg) + +} + +func (n *WakuNode) RelaySubscribe(pubsubTopic string) error { + if pubsubTopic == "" { + return errors.New("pubsub topic is empty") + } + + wg := sync.WaitGroup{} + + var resp = C.allocResp(unsafe.Pointer(&wg)) + var cPubsubTopic = C.CString(pubsubTopic) + + defer C.freeResp(resp) + defer C.free(unsafe.Pointer(cPubsubTopic)) + + if n.wakuCtx == nil { + return errors.New("wakuCtx is nil") + } + + wg.Add(1) + C.cGoWakuRelaySubscribe(n.wakuCtx, cPubsubTopic, resp) + wg.Wait() + + if C.getRet(resp) == C.RET_OK { + return nil + } + + errMsg := "error WakuRelaySubscribe: " + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return errors.New(errMsg) +} + +func (n *WakuNode) RelayAddProtectedShard(clusterId uint16, shardId uint16, pubkey *ecdsa.PublicKey) error { + if pubkey == nil { + return nil // Nothing to do here + } + + keyHexStr := hex.EncodeToString(crypto.FromECDSAPub(pubkey)) + + wg := sync.WaitGroup{} + + var resp = C.allocResp(unsafe.Pointer(&wg)) + var cPublicKey = C.CString(keyHexStr) + + defer C.freeResp(resp) + defer C.free(unsafe.Pointer(cPublicKey)) + + if n.wakuCtx == nil { + return errors.New("wakuCtx is nil") + } + + wg.Add(1) + C.cGoWakuRelayAddProtectedShard(n.wakuCtx, C.int(clusterId), C.int(shardId), cPublicKey, resp) + wg.Wait() + + if C.getRet(resp) == C.RET_OK { + return nil + } + + errMsg := "error WakuRelayAddProtectedShard: " + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return errors.New(errMsg) +} + +func (n *WakuNode) RelayUnsubscribe(pubsubTopic string) error { + if pubsubTopic == "" { + return errors.New("pubsub topic is empty") + } + + wg := sync.WaitGroup{} + + var resp = C.allocResp(unsafe.Pointer(&wg)) + var cPubsubTopic = C.CString(pubsubTopic) + + defer C.freeResp(resp) + defer C.free(unsafe.Pointer(cPubsubTopic)) + + if n.wakuCtx == nil { + return errors.New("wakuCtx is nil") + } + + wg.Add(1) + C.cGoWakuRelayUnsubscribe(n.wakuCtx, cPubsubTopic, resp) + wg.Wait() + + if C.getRet(resp) == C.RET_OK { + return nil + } + + errMsg := "error WakuRelayUnsubscribe: " + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return errors.New(errMsg) +} + +func (n *WakuNode) PeerExchangeRequest(numPeers uint64) (uint64, error) { + wg := sync.WaitGroup{} + + var resp = C.allocResp(unsafe.Pointer(&wg)) + defer C.freeResp(resp) + + wg.Add(1) + C.cGoWakuPeerExchangeQuery(n.wakuCtx, C.uint64_t(numPeers), resp) + wg.Wait() + if C.getRet(resp) == C.RET_OK { + numRecvPeersStr := C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + numRecvPeers, err := strconv.ParseUint(numRecvPeersStr, 10, 64) + if err != nil { + return 0, err + } + return numRecvPeers, nil + } + + errMsg := C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return 0, errors.New(errMsg) +} + +func (n *WakuNode) StartDiscV5() error { + wg := sync.WaitGroup{} + + var resp = C.allocResp(unsafe.Pointer(&wg)) + defer C.freeResp(resp) + + wg.Add(1) + C.cGoWakuStartDiscV5(n.wakuCtx, resp) + wg.Wait() + if C.getRet(resp) == C.RET_OK { + return nil + } + errMsg := "error WakuStartDiscV5: " + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return errors.New(errMsg) +} + +func (n *WakuNode) StopDiscV5() error { + wg := sync.WaitGroup{} + + var resp = C.allocResp(unsafe.Pointer(&wg)) + defer C.freeResp(resp) + + wg.Add(1) + C.cGoWakuStopDiscV5(n.wakuCtx, resp) + wg.Wait() + + if C.getRet(resp) == C.RET_OK { + return nil + } + errMsg := "error WakuStopDiscV5: " + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return errors.New(errMsg) +} + +func (n *WakuNode) Version() (string, error) { + wg := sync.WaitGroup{} + + var resp = C.allocResp(unsafe.Pointer(&wg)) + defer C.freeResp(resp) + + wg.Add(1) + C.cGoWakuVersion(n.wakuCtx, resp) + wg.Wait() + + if C.getRet(resp) == C.RET_OK { + var version = C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return version, nil + } + + errMsg := "error WakuVersion: " + + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return "", errors.New(errMsg) +} + +func (n *WakuNode) StoreQuery(ctx context.Context, storeRequest *storepb.StoreQueryRequest, peerInfo peer.AddrInfo) (*storepb.StoreQueryResponse, error) { + timeoutMs := getContextTimeoutMilliseconds(ctx) + + b, err := json.Marshal(storeRequest) + if err != nil { + return nil, err + } + + addrs := make([]string, len(peerInfo.Addrs)) + for i, addr := range utils.EncapsulatePeerID(peerInfo.ID, peerInfo.Addrs...) { + addrs[i] = addr.String() + } + + var cJsonQuery = C.CString(string(b)) + var cPeerAddr = C.CString(strings.Join(addrs, ",")) + wg := sync.WaitGroup{} + + var resp = C.allocResp(unsafe.Pointer(&wg)) + + defer C.free(unsafe.Pointer(cJsonQuery)) + defer C.free(unsafe.Pointer(cPeerAddr)) + defer C.freeResp(resp) + + wg.Add(1) + C.cGoWakuStoreQuery(n.wakuCtx, cJsonQuery, cPeerAddr, C.int(timeoutMs), resp) + wg.Wait() + + if C.getRet(resp) == C.RET_OK { + jsonResponseStr := C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + storeQueryResponse := &storepb.StoreQueryResponse{} + err = json.Unmarshal([]byte(jsonResponseStr), storeQueryResponse) + if err != nil { + return nil, err + } + return storeQueryResponse, nil + } + errMsg := "error WakuStoreQuery: " + + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return nil, errors.New(errMsg) +} + +func (n *WakuNode) RelayPublish(ctx context.Context, message *pb.WakuMessage, pubsubTopic string) (pb.MessageHash, error) { + timeoutMs := getContextTimeoutMilliseconds(ctx) + + jsonMsg, err := json.Marshal(message) + if err != nil { + return pb.MessageHash{}, err + } + + wg := sync.WaitGroup{} + + var resp = C.allocResp(unsafe.Pointer(&wg)) + var cPubsubTopic = C.CString(pubsubTopic) + var msg = C.CString(string(jsonMsg)) + defer C.freeResp(resp) + defer C.free(unsafe.Pointer(cPubsubTopic)) + defer C.free(unsafe.Pointer(msg)) + + wg.Add(1) + C.cGoWakuRelayPublish(n.wakuCtx, cPubsubTopic, msg, C.int(timeoutMs), resp) + wg.Wait() + if C.getRet(resp) == C.RET_OK { + msgHash := C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + msgHashBytes, err := hexutil.Decode(msgHash) + if err != nil { + return pb.MessageHash{}, err + } + return pb.ToMessageHash(msgHashBytes), nil + } + errMsg := "WakuRelayPublish: " + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return pb.MessageHash{}, errors.New(errMsg) +} + +func (n *WakuNode) DnsDiscovery(ctx context.Context, enrTreeUrl string, nameDnsServer string) ([]multiaddr.Multiaddr, error) { + wg := sync.WaitGroup{} + + var resp = C.allocResp(unsafe.Pointer(&wg)) + var cEnrTree = C.CString(enrTreeUrl) + var cDnsServer = C.CString(nameDnsServer) + defer C.freeResp(resp) + defer C.free(unsafe.Pointer(cEnrTree)) + defer C.free(unsafe.Pointer(cDnsServer)) + + timeoutMs := getContextTimeoutMilliseconds(ctx) + + wg.Add(1) + C.cGoWakuDnsDiscovery(n.wakuCtx, cEnrTree, cDnsServer, C.int(timeoutMs), resp) + wg.Wait() + if C.getRet(resp) == C.RET_OK { + var addrsRet []multiaddr.Multiaddr + nodeAddresses := C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + addrss := strings.Split(nodeAddresses, ",") + for _, addr := range addrss { + addr, err := multiaddr.NewMultiaddr(addr) + if err != nil { + return nil, err + } + addrsRet = append(addrsRet, addr) + } + return addrsRet, nil + } + errMsg := "error WakuDnsDiscovery: " + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return nil, errors.New(errMsg) +} + +func (n *WakuNode) PingPeer(ctx context.Context, peerInfo peer.AddrInfo) (time.Duration, error) { + addrs := make([]string, len(peerInfo.Addrs)) + for i, addr := range utils.EncapsulatePeerID(peerInfo.ID, peerInfo.Addrs...) { + addrs[i] = addr.String() + } + + wg := sync.WaitGroup{} + + var resp = C.allocResp(unsafe.Pointer(&wg)) + defer C.freeResp(resp) + + var cPeerId = C.CString(strings.Join(addrs, ",")) + defer C.free(unsafe.Pointer(cPeerId)) + + timeoutMs := getContextTimeoutMilliseconds(ctx) + + wg.Add(1) + C.cGoWakuPingPeer(n.wakuCtx, cPeerId, C.int(timeoutMs), resp) + wg.Wait() + if C.getRet(resp) == C.RET_OK { + rttStr := C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + rttInt, err := strconv.ParseInt(rttStr, 10, 64) + if err != nil { + return 0, err + } + return time.Duration(rttInt), nil + } + + errMsg := C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return 0, fmt.Errorf("PingPeer: %s", errMsg) +} + +func (n *WakuNode) Start() error { + wg := sync.WaitGroup{} + + var resp = C.allocResp(unsafe.Pointer(&wg)) + defer C.freeResp(resp) + + wg.Add(1) + C.cGoWakuStart(n.wakuCtx, resp) + wg.Wait() + if C.getRet(resp) == C.RET_OK { + registerNode(n) + return nil + } + + errMsg := "error WakuStart: " + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return errors.New(errMsg) +} + +func (n *WakuNode) Stop() error { + wg := sync.WaitGroup{} + + var resp = C.allocResp(unsafe.Pointer(&wg)) + defer C.freeResp(resp) + + wg.Add(1) + C.cGoWakuStop(n.wakuCtx, resp) + wg.Wait() + if C.getRet(resp) == C.RET_OK { + unregisterNode(n) + return nil + } + + errMsg := "error WakuStop: " + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return errors.New(errMsg) +} + +func (n *WakuNode) Destroy() error { + wg := sync.WaitGroup{} + + var resp = C.allocResp(unsafe.Pointer(&wg)) + defer C.freeResp(resp) + + wg.Add(1) + C.cGoWakuDestroy(n.wakuCtx, resp) + wg.Wait() + if C.getRet(resp) == C.RET_OK { + return nil + } + + errMsg := "error WakuDestroy: " + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return errors.New(errMsg) +} + +func (n *WakuNode) PeerID() (peer.ID, error) { + wg := sync.WaitGroup{} + + var resp = C.allocResp(unsafe.Pointer(&wg)) + defer C.freeResp(resp) + + wg.Add(1) + C.cGoWakuGetMyPeerId(n.wakuCtx, resp) + wg.Wait() + if C.getRet(resp) == C.RET_OK { + peerIdStr := C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + id, err := peer.Decode(peerIdStr) + if err != nil { + errMsg := "WakuGetMyPeerId - decoding peerId: %w" + return "", fmt.Errorf(errMsg, err) + } + return id, nil + } + errMsg := C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return "", errors.New(errMsg) +} + +func (n *WakuNode) Connect(ctx context.Context, addr multiaddr.Multiaddr) error { + wg := sync.WaitGroup{} + + var resp = C.allocResp(unsafe.Pointer(&wg)) + var cPeerMultiAddr = C.CString(addr.String()) + defer C.freeResp(resp) + defer C.free(unsafe.Pointer(cPeerMultiAddr)) + + timeoutMs := getContextTimeoutMilliseconds(ctx) + + wg.Add(1) + C.cGoWakuConnect(n.wakuCtx, cPeerMultiAddr, C.int(timeoutMs), resp) + wg.Wait() + if C.getRet(resp) == C.RET_OK { + return nil + } + errMsg := "error WakuConnect: " + + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return errors.New(errMsg) +} + +func (n *WakuNode) DialPeerByID(ctx context.Context, peerID peer.ID, protocol libp2pproto.ID) error { + wg := sync.WaitGroup{} + + var resp = C.allocResp(unsafe.Pointer(&wg)) + var cPeerId = C.CString(peerID.String()) + var cProtocol = C.CString(string(protocol)) + defer C.freeResp(resp) + defer C.free(unsafe.Pointer(cPeerId)) + defer C.free(unsafe.Pointer(cProtocol)) + + timeoutMs := getContextTimeoutMilliseconds(ctx) + + wg.Add(1) + C.cGoWakuDialPeerById(n.wakuCtx, cPeerId, cProtocol, C.int(timeoutMs), resp) + wg.Wait() + if C.getRet(resp) == C.RET_OK { + return nil + } + errMsg := "error DialPeerById: " + + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return errors.New(errMsg) +} + +func (n *WakuNode) ListenAddresses() ([]multiaddr.Multiaddr, error) { + wg := sync.WaitGroup{} + + var resp = C.allocResp(unsafe.Pointer(&wg)) + defer C.freeResp(resp) + + wg.Add(1) + C.cGoWakuListenAddresses(n.wakuCtx, resp) + wg.Wait() + if C.getRet(resp) == C.RET_OK { + var addrsRet []multiaddr.Multiaddr + listenAddresses := C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + addrss := strings.Split(listenAddresses, ",") + for _, addr := range addrss { + addr, err := multiaddr.NewMultiaddr(addr) + if err != nil { + return nil, err + } + addrsRet = append(addrsRet, addr) + } + return addrsRet, nil + } + errMsg := "error WakuListenAddresses: " + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return nil, errors.New(errMsg) +} + +func (n *WakuNode) ENR() (*enode.Node, error) { + wg := sync.WaitGroup{} + + var resp = C.allocResp(unsafe.Pointer(&wg)) + defer C.freeResp(resp) + + wg.Add(1) + C.cGoWakuGetMyENR(n.wakuCtx, resp) + wg.Wait() + if C.getRet(resp) == C.RET_OK { + enrStr := C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + n, err := enode.Parse(enode.ValidSchemes, enrStr) + if err != nil { + return nil, err + } + return n, nil + } + errMsg := "error WakuGetMyENR: " + + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return nil, errors.New(errMsg) +} + +func (n *WakuNode) ListPeersInMesh(pubsubTopic string) (int, error) { + wg := sync.WaitGroup{} + + var resp = C.allocResp(unsafe.Pointer(&wg)) + var cPubsubTopic = C.CString(pubsubTopic) + defer C.freeResp(resp) + defer C.free(unsafe.Pointer(cPubsubTopic)) + + wg.Add(1) + C.cGoWakuListPeersInMesh(n.wakuCtx, cPubsubTopic, resp) + wg.Wait() + if C.getRet(resp) == C.RET_OK { + numPeersStr := C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + numPeers, err := strconv.Atoi(numPeersStr) + if err != nil { + errMsg := "ListPeersInMesh - error converting string to int: " + err.Error() + return 0, errors.New(errMsg) + } + return numPeers, nil + } + errMsg := "error ListPeersInMesh: " + + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return 0, errors.New(errMsg) +} + +func (n *WakuNode) GetPeerIDsFromPeerStore() (peer.IDSlice, error) { + wg := sync.WaitGroup{} + + var resp = C.allocResp(unsafe.Pointer(&wg)) + defer C.freeResp(resp) + + wg.Add(1) + C.cGoWakuGetPeerIdsFromPeerStore(n.wakuCtx, resp) + wg.Wait() + if C.getRet(resp) == C.RET_OK { + peersStr := C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + if peersStr == "" { + return nil, nil + } + // peersStr contains a comma-separated list of peer ids + itemsPeerIds := strings.Split(peersStr, ",") + + var peers peer.IDSlice + for _, peerId := range itemsPeerIds { + id, err := peer.Decode(peerId) + if err != nil { + return nil, fmt.Errorf("GetPeerIdsFromPeerStore - decoding peerId: %w", err) + } + peers = append(peers, id) + } + return peers, nil + } + errMsg := C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return nil, fmt.Errorf("GetPeerIdsFromPeerStore: %s", errMsg) +} + +func (n *WakuNode) GetPeerIDsByProtocol(protocol libp2pproto.ID) (peer.IDSlice, error) { + wg := sync.WaitGroup{} + + var resp = C.allocResp(unsafe.Pointer(&wg)) + var cProtocol = C.CString(string(protocol)) + defer C.freeResp(resp) + defer C.free(unsafe.Pointer(cProtocol)) + + wg.Add(1) + C.cGoWakuGetPeerIdsByProtocol(n.wakuCtx, cProtocol, resp) + wg.Wait() + if C.getRet(resp) == C.RET_OK { + peersStr := C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + if peersStr == "" { + return nil, nil + } + // peersStr contains a comma-separated list of peer ids + itemsPeerIds := strings.Split(peersStr, ",") + + var peers peer.IDSlice + for _, p := range itemsPeerIds { + id, err := peer.Decode(p) + if err != nil { + return nil, fmt.Errorf("GetPeerIdsByProtocol - decoding peerId: %w", err) + } + peers = append(peers, id) + } + return peers, nil + } + errMsg := "error GetPeerIdsByProtocol: " + + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return nil, fmt.Errorf("GetPeerIdsByProtocol: %s", errMsg) +} + +func (n *WakuNode) DialPeer(ctx context.Context, peerAddr multiaddr.Multiaddr, protocol libp2pproto.ID) error { + wg := sync.WaitGroup{} + + var resp = C.allocResp(unsafe.Pointer(&wg)) + var cPeerMultiAddr = C.CString(peerAddr.String()) + var cProtocol = C.CString(string(protocol)) + defer C.freeResp(resp) + defer C.free(unsafe.Pointer(cPeerMultiAddr)) + defer C.free(unsafe.Pointer(cProtocol)) + + timeoutMs := getContextTimeoutMilliseconds(ctx) + + wg.Add(1) + C.cGoWakuDialPeer(n.wakuCtx, cPeerMultiAddr, cProtocol, C.int(timeoutMs), resp) + wg.Wait() + if C.getRet(resp) == C.RET_OK { + return nil + } + errMsg := "error DialPeer: " + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return errors.New(errMsg) +} + +func (n *WakuNode) GetNumConnectedPeers() (int, error) { + peers, err := n.GetConnectedPeers() + if err != nil { + return 0, err + } + return len(peers), nil +} + +func getContextTimeoutMilliseconds(ctx context.Context) int { + deadline, ok := ctx.Deadline() + if ok { + return int(time.Until(deadline).Milliseconds()) + } + return 0 +} + +func getFreePortIfNeeded(tcpPort int, discV5UDPPort int, logger *zap.Logger) (int, int, error) { + if tcpPort == 0 { + for i := 0; i < 10; i++ { + tcpAddr, err := net.ResolveTCPAddr("tcp", net.JoinHostPort("localhost", "0")) + if err != nil { + logger.Warn("unable to resolve tcp addr: %v", zap.Error(err)) + continue + } + tcpListener, err := net.ListenTCP("tcp", tcpAddr) + if err != nil { + logger.Warn("unable to listen on addr", zap.Stringer("addr", tcpAddr), zap.Error(err)) + continue + } + tcpPort = tcpListener.Addr().(*net.TCPAddr).Port + tcpListener.Close() + break + } + if tcpPort == 0 { + return -1, -1, errors.New("could not obtain a free TCP port") + } + } + + if discV5UDPPort == 0 { + for i := 0; i < 10; i++ { + udpAddr, err := net.ResolveUDPAddr("udp", net.JoinHostPort("localhost", "0")) + if err != nil { + logger.Warn("unable to resolve udp addr: %v", zap.Error(err)) + continue + } + + udpListener, err := net.ListenUDP("udp", udpAddr) + if err != nil { + logger.Warn("unable to listen on addr", zap.Stringer("addr", udpAddr), zap.Error(err)) + continue + } + + discV5UDPPort = udpListener.LocalAddr().(*net.UDPAddr).Port + udpListener.Close() + break + } + if discV5UDPPort == 0 { + return -1, -1, errors.New("could not obtain a free UDP port") + } + } + + return tcpPort, discV5UDPPort, nil } diff --git a/wakuv2/nwaku_test.go b/wakuv2/nwaku_test.go new file mode 100644 index 000000000..cbb1380ee --- /dev/null +++ b/wakuv2/nwaku_test.go @@ -0,0 +1,1090 @@ +//go:build use_nwaku +// +build use_nwaku + +package wakuv2 + +import ( + "context" + "errors" + "slices" + "testing" + "time" + + "github.com/cenkalti/backoff/v3" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/waku-org/go-waku/waku/v2/api/history" + "github.com/waku-org/go-waku/waku/v2/protocol" + "github.com/waku-org/go-waku/waku/v2/protocol/pb" + "github.com/waku-org/go-waku/waku/v2/protocol/store" + "go.uber.org/zap" + "golang.org/x/exp/maps" + "google.golang.org/protobuf/proto" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/crypto" + ethdnsdisc "github.com/ethereum/go-ethereum/p2p/dnsdisc" + "github.com/ethereum/go-ethereum/p2p/enode" + + "github.com/stretchr/testify/require" + + "github.com/status-im/status-go/protocol/tt" + "github.com/status-im/status-go/wakuv2/common" +) + +var testStoreENRBootstrap = "enrtree://AI4W5N5IFEUIHF5LESUAOSMV6TKWF2MB6GU2YK7PU4TYUGUNOCEPW@store.staging.status.nodes.status.im" +var testBootENRBootstrap = "enrtree://AMOJVZX4V6EXP7NTJPMAYJYST2QP6AJXYW76IU6VGJS7UVSNDYZG4@boot.staging.status.nodes.status.im" + +func setDefaultConfig(config *Config, lightMode bool) { + config.ClusterID = 16 + + if lightMode { + config.EnablePeerExchangeClient = true + config.LightClient = true + config.EnableDiscV5 = false + } else { + config.EnableDiscV5 = true + config.EnablePeerExchangeServer = true + config.LightClient = false + config.EnablePeerExchangeClient = false + } +} + +/* +func TestDiscoveryV5(t *testing.T) { + config := &Config{} + setDefaultConfig(config, false) + config.DiscV5BootstrapNodes = []string{testStoreENRBootstrap} + config.DiscoveryLimit = 20 + w, err := New(nil, "shards.staging", config, nil, nil, nil, nil, nil) + require.NoError(t, err) + + require.NoError(t, w.Start()) + + err = tt.RetryWithBackOff(func() error { + if len(w.Peers()) == 0 { + return errors.New("no peers discovered") + } + return nil + }) + + require.NoError(t, err) + + require.NotEqual(t, 0, len(w.Peers())) + require.NoError(t, w.Stop()) +} +*/ +/* +func TestRestartDiscoveryV5(t *testing.T) { + config := &Config{} + setDefaultConfig(config, false) + // Use wrong discv5 bootstrap address, to simulate being offline + config.DiscV5BootstrapNodes = []string{"enrtree://AOGECG2SPND25EEFMAJ5WF3KSGJNSGV356DSTL2YVLLZWIV6SAYBM@1.1.1.2"} + config.DiscoveryLimit = 20 + config.UDPPort = 10002 + config.ClusterID = 16 + w, err := New(nil, "", config, nil, nil, nil, nil, nil) + require.NoError(t, err) + + require.NoError(t, w.Start()) + require.False(t, w.seededBootnodesForDiscV5) + + options := func(b *backoff.ExponentialBackOff) { + b.MaxElapsedTime = 2 * time.Second + } + + // Sanity check, not great, but it's probably helpful + err = tt.RetryWithBackOff(func() error { + if len(w.Peers()) == 0 { + return errors.New("no peers discovered") + } + return nil + }, options) + + require.Error(t, err) + + w.discV5BootstrapNodes = []string{testStoreENRBootstrap} + + options = func(b *backoff.ExponentialBackOff) { + b.MaxElapsedTime = 90 * time.Second + } + + err = tt.RetryWithBackOff(func() error { + if len(w.Peers()) == 0 { + return errors.New("no peers discovered") + } + return nil + }, options) + require.NoError(t, err) + + require.True(t, w.seededBootnodesForDiscV5) + require.NotEqual(t, 0, len(w.Peers())) + require.NoError(t, w.Stop()) +} + +func TestRelayPeers(t *testing.T) { + config := &Config{ + EnableMissingMessageVerification: true, + } + setDefaultConfig(config, false) + w, err := New(nil, "", config, nil, nil, nil, nil, nil) + require.NoError(t, err) + require.NoError(t, w.Start()) + _, err = w.RelayPeersByTopic(config.DefaultShardPubsubTopic) + require.NoError(t, err) + + // Ensure function returns an error for lightclient + config = &Config{} + config.ClusterID = 16 + config.LightClient = true + w, err = New(nil, "", config, nil, nil, nil, nil, nil) + require.NoError(t, err) + require.NoError(t, w.Start()) + _, err = w.RelayPeersByTopic(config.DefaultShardPubsubTopic) + require.Error(t, err) +} +*/ +func parseNodes(rec []string) []*enode.Node { + var ns []*enode.Node + for _, r := range rec { + var n enode.Node + if err := n.UnmarshalText([]byte(r)); err != nil { + panic(err) + } + ns = append(ns, &n) + } + return ns +} + +type testStorenodeConfigProvider struct { + storenode peer.AddrInfo +} + +func (t *testStorenodeConfigProvider) UseStorenodes() (bool, error) { + return true, nil +} + +func (t *testStorenodeConfigProvider) GetPinnedStorenode() (peer.AddrInfo, error) { + return peer.AddrInfo{}, nil +} + +func (t *testStorenodeConfigProvider) Storenodes() ([]peer.AddrInfo, error) { + return []peer.AddrInfo{t.storenode}, nil +} + +func newTestStorenodeConfigProvider(storenode peer.AddrInfo) history.StorenodeConfigProvider { + return &testStorenodeConfigProvider{ + storenode: storenode, + } +} + +// In order to run these tests, you must run an nwaku node +// +// Using Docker: +// +// IP_ADDRESS=$(hostname -I | awk '{print $1}'); +// docker run \ +// -p 61000:61000/tcp -p 8000:8000/udp -p 8646:8646/tcp harbor.status.im/wakuorg/nwaku:v0.33.0 \ +// --discv5-discovery=true --cluster-id=16 --log-level=DEBUG --shard=64 --tcp-port=61000 \ +// --nat=extip:${IP_ADDRESS} --discv5-udp-port=8000 --rest-address=0.0.0.0 --store --rest-port=8646 \ + +func TestBasicWakuV2(t *testing.T) { + extNodeRestPort := 8646 + storeNodeInfo, err := GetNwakuInfo(nil, &extNodeRestPort) + require.NoError(t, err) + + ctx := context.Background() + + wakuConfig := Config{ + UseThrottledPublish: true, + ClusterID: 16, + } + + nwakuConfig := WakuConfig{ + TcpPort: 30303, + NodeKey: "11d0dcea28e86f81937a3bd1163473c7fbc0a0db54fd72914849bc47bdf78710", + EnableRelay: true, + LogLevel: "DEBUG", + DnsDiscoveryUrl: "enrtree://AMOJVZX4V6EXP7NTJPMAYJYST2QP6AJXYW76IU6VGJS7UVSNDYZG4@boot.prod.status.nodes.status.im", + DnsDiscovery: true, + Discv5Discovery: true, + Staticnodes: []string{storeNodeInfo.ListenAddresses[0]}, + ClusterID: 16, + Shards: []uint16{64}, + } + + w, err := New(nil, "", &wakuConfig, &nwakuConfig, nil, nil, nil, nil, nil) + require.NoError(t, err) + require.NoError(t, w.Start()) + + enr, err := w.node.ENR() + require.NoError(t, err) + require.NotNil(t, enr) + + options := func(b *backoff.ExponentialBackOff) { + b.MaxElapsedTime = 30 * time.Second + } + + // Sanity check, not great, but it's probably helpful + err = tt.RetryWithBackOff(func() error { + numConnected, err := w.node.GetNumConnectedPeers() + if err != nil { + return err + } + // Have to be connected to at least 3 nodes: the static node, the bootstrap node, and one discovered node + if numConnected > 2 { + return nil + } + return errors.New("no peers discovered") + }, options) + require.NoError(t, err) + + // Get local store node address + storeNode, err := peer.AddrInfoFromString(storeNodeInfo.ListenAddresses[0]) + require.NoError(t, err) + + w.node.DialPeer(ctx, storeNode.Addrs[0], "") + + w.StorenodeCycle.SetStorenodeConfigProvider(newTestStorenodeConfigProvider(*storeNode)) + + // Check that we are indeed connected to the store node + connectedStoreNodes, err := w.node.GetPeerIDsByProtocol(store.StoreQueryID_v300) + require.NoError(t, err) + require.True(t, slices.Contains(connectedStoreNodes, storeNode.ID), "nwaku should be connected to the store node") + + // Disconnect from the store node + err = w.node.DisconnectPeerByID(storeNode.ID) + require.NoError(t, err) + + // Check that we are indeed disconnected + connectedStoreNodes, err = w.node.GetPeerIDsByProtocol(store.StoreQueryID_v300) + require.NoError(t, err) + isDisconnected := !slices.Contains(connectedStoreNodes, storeNode.ID) + require.True(t, isDisconnected, "nwaku should be disconnected from the store node") + + // Re-connect + err = w.DialPeerByID(storeNode.ID) + require.NoError(t, err) + + // Check that we are connected again + connectedStoreNodes, err = w.node.GetPeerIDsByProtocol(store.StoreQueryID_v300) + require.NoError(t, err) + require.True(t, slices.Contains(connectedStoreNodes, storeNode.ID), "nwaku should be connected to the store node") + + filter := &common.Filter{ + PubsubTopic: w.cfg.DefaultShardPubsubTopic, + Messages: common.NewMemoryMessageStore(), + ContentTopics: common.NewTopicSetFromBytes([][]byte{{1, 2, 3, 4}}), + } + + _, err = w.Subscribe(filter) + require.NoError(t, err) + + msgTimestamp := w.timestamp() + contentTopic := maps.Keys(filter.ContentTopics)[0] + + time.Sleep(2 * time.Second) + + msgID, err := w.Send(w.cfg.DefaultShardPubsubTopic, &pb.WakuMessage{ + Payload: []byte{1, 2, 3, 4, 5}, + ContentTopic: contentTopic.ContentTopic(), + Version: proto.Uint32(0), + Timestamp: &msgTimestamp, + }, nil) + + require.NoError(t, err) + require.NotEqual(t, msgID, "1") + + time.Sleep(1 * time.Second) + + messages := filter.Retrieve() + require.Len(t, messages, 1) + + timestampInSeconds := msgTimestamp / int64(time.Second) + marginInSeconds := 20 + + options = func(b *backoff.ExponentialBackOff) { + b.MaxElapsedTime = 60 * time.Second + b.InitialInterval = 500 * time.Millisecond + } + err = tt.RetryWithBackOff(func() error { + err := w.HistoryRetriever.Query( + context.Background(), + store.FilterCriteria{ + ContentFilter: protocol.NewContentFilter(w.cfg.DefaultShardPubsubTopic, contentTopic.ContentTopic()), + TimeStart: proto.Int64((timestampInSeconds - int64(marginInSeconds)) * int64(time.Second)), + TimeEnd: proto.Int64((timestampInSeconds + int64(marginInSeconds)) * int64(time.Second)), + }, + *storeNode, + 10, + nil, false, + ) + + return err + + // TODO-nwaku + /*if err != nil || envelopeCount == 0 { + // in case of failure extend timestamp margin up to 40secs + if marginInSeconds < 40 { + marginInSeconds += 5 + } + return errors.New("no messages received from store node") + } + return nil*/ + + }, options) + require.NoError(t, err) + + time.Sleep(10 * time.Second) + + require.NoError(t, w.Stop()) +} + +type mapResolver map[string]string + +func (mr mapResolver) LookupTXT(ctx context.Context, name string) ([]string, error) { + if record, ok := mr[name]; ok { + return []string{record}, nil + } + return nil, errors.New("not found") +} + +var signingKeyForTesting, _ = crypto.ToECDSA(hexutil.MustDecode("0xdc599867fc513f8f5e2c2c9c489cde5e71362d1d9ec6e693e0de063236ed1240")) + +func makeTestTree(domain string, nodes []*enode.Node, links []string) (*ethdnsdisc.Tree, string) { + tree, err := ethdnsdisc.MakeTree(1, nodes, links) + if err != nil { + panic(err) + } + url, err := tree.Sign(signingKeyForTesting, domain) + if err != nil { + panic(err) + } + return tree, url +} + +func TestPeerExchange(t *testing.T) { + logger, err := zap.NewDevelopment() + require.NoError(t, err) + + discV5NodeConfig := Config{ + UseThrottledPublish: true, + ClusterID: 16, + } + + // start node that will be discovered by PeerExchange + discV5NodeWakuConfig := WakuConfig{ + EnableRelay: true, + LogLevel: "DEBUG", + Discv5Discovery: true, + ClusterID: 16, + Shards: []uint16{64}, + PeerExchange: false, + Discv5UdpPort: 9001, + TcpPort: 60010, + } + + discV5Node, err := New(nil, "", &discV5NodeConfig, &discV5NodeWakuConfig, logger.Named("discV5Node"), nil, nil, nil, nil) + require.NoError(t, err) + require.NoError(t, discV5Node.Start()) + + time.Sleep(1 * time.Second) + + discV5NodePeerId, err := discV5Node.node.PeerID() + require.NoError(t, err) + + discv5NodeEnr, err := discV5Node.node.ENR() + require.NoError(t, err) + + pxServerConfig := Config{ + UseThrottledPublish: true, + ClusterID: 16, + } + + // start node which serves as PeerExchange server + pxServerWakuConfig := WakuConfig{ + EnableRelay: true, + LogLevel: "DEBUG", + Discv5Discovery: true, + ClusterID: 16, + Shards: []uint16{64}, + PeerExchange: true, + Discv5UdpPort: 9000, + Discv5BootstrapNodes: []string{discv5NodeEnr.String()}, + TcpPort: 60011, + } + + pxServerNode, err := New(nil, "", &pxServerConfig, &pxServerWakuConfig, logger.Named("pxServerNode"), nil, nil, nil, nil) + require.NoError(t, err) + require.NoError(t, pxServerNode.Start()) + + // Adding an extra second to make sure PX cache is not empty + time.Sleep(2 * time.Second) + + serverNodeMa, err := pxServerNode.node.ListenAddresses() + require.NoError(t, err) + require.NotNil(t, serverNodeMa) + + // Sanity check, not great, but it's probably helpful + options := func(b *backoff.ExponentialBackOff) { + b.MaxElapsedTime = 30 * time.Second + } + + // Check that pxServerNode has discV5Node in its Peer Store + err = tt.RetryWithBackOff(func() error { + peers, err := pxServerNode.node.GetPeerIDsFromPeerStore() + + if err != nil { + return err + } + + if slices.Contains(peers, discV5NodePeerId) { + return nil + } + + return errors.New("pxServer is missing the discv5 node in its peer store") + }, options) + require.NoError(t, err) + + pxClientConfig := Config{ + UseThrottledPublish: true, + ClusterID: 16, + } + + // start light node which uses PeerExchange to discover peers + pxClientWakuConfig := WakuConfig{ + EnableRelay: false, + LogLevel: "DEBUG", + Discv5Discovery: false, + ClusterID: 16, + Shards: []uint16{64}, + PeerExchange: true, + Discv5UdpPort: 9002, + TcpPort: 60012, + PeerExchangeNode: serverNodeMa[0].String(), + } + + lightNode, err := New(nil, "", &pxClientConfig, &pxClientWakuConfig, logger.Named("lightNode"), nil, nil, nil, nil) + require.NoError(t, err) + require.NoError(t, lightNode.Start()) + + time.Sleep(1 * time.Second) + + pxServerPeerId, err := pxServerNode.node.PeerID() + require.NoError(t, err) + + // Check that the light node discovered the discV5Node and has both nodes in its peer store + err = tt.RetryWithBackOff(func() error { + peers, err := lightNode.node.GetPeerIDsFromPeerStore() + if err != nil { + return err + } + + if slices.Contains(peers, discV5NodePeerId) && slices.Contains(peers, pxServerPeerId) { + return nil + } + return errors.New("lightnode is missing peers") + }, options) + require.NoError(t, err) + + // Now perform the PX request manually to see if it also works + err = tt.RetryWithBackOff(func() error { + numPeersReceived, err := lightNode.node.PeerExchangeRequest(1) + if err != nil { + return err + } + + if numPeersReceived == 1 { + return nil + } + return errors.New("Peer Exchange is not returning peers") + }, options) + require.NoError(t, err) + + // Stop nodes + require.NoError(t, lightNode.Stop()) + require.NoError(t, pxServerNode.Stop()) + require.NoError(t, discV5Node.Stop()) + + /* logger, err := zap.NewDevelopment() + require.NoError(t, err) + // start node which serve as PeerExchange server + config := &Config{} + config.ClusterID = 16 + config.EnableDiscV5 = true + config.EnablePeerExchangeServer = true + config.EnablePeerExchangeClient = false + pxServerNode, err := New(nil, "", config, logger.Named("pxServerNode"), nil, nil, nil, nil) + require.NoError(t, err) + require.NoError(t, pxServerNode.Start()) + + time.Sleep(1 * time.Second) + + // start node that will be discovered by PeerExchange + config = &Config{} + config.ClusterID = 16 + config.EnableDiscV5 = true + config.EnablePeerExchangeServer = false + config.EnablePeerExchangeClient = false + enr, err := pxServerNode.ENR() + require.NoError(t, err) + + config.DiscV5BootstrapNodes = []string{enr.String()} + discV5Node, err := New(nil, "", config, logger.Named("discV5Node"), nil, nil, nil, nil) + require.NoError(t, err) + require.NoError(t, discV5Node.Start()) + + time.Sleep(1 * time.Second) + + // start light node which use PeerExchange to discover peers + enrNodes := []*enode.Node{enr} + tree, url := makeTestTree("n", enrNodes, nil) + resolver := mapResolver(tree.ToTXT("n")) + + config = &Config{} + config.ClusterID = 16 + config.EnablePeerExchangeServer = false + config.EnablePeerExchangeClient = true + config.LightClient = true + config.Resolver = resolver + + config.WakuNodes = []string{url} + lightNode, err := New(nil, "", config, logger.Named("lightNode"), nil, nil, nil, nil) + require.NoError(t, err) + require.NoError(t, lightNode.Start()) + + // Sanity check, not great, but it's probably helpful + options := func(b *backoff.ExponentialBackOff) { + b.MaxElapsedTime = 30 * time.Second + } + err = tt.RetryWithBackOff(func() error { + // we should not use lightNode.Peers() here as it only indicates peers that are connected right now, + // in light client mode,the peer will be closed via `w.node.Host().Network().ClosePeer(peerInfo.ID)` + // after invoking identifyAndConnect, instead, we should check the peerStore, peers from peerStore + // won't get deleted especially if they are statically added. + numConnected, err := lightNode.GetNumConnectedPeers() + if err != nil { + return err + } + if numConnected == 2 { + return nil + } + return errors.New("no peers discovered") + }, options) + require.NoError(t, err) + + _, cancel := context.WithCancel(context.Background()) + defer cancel() + _, err = discV5Node.WakuPeerExchangeRequest(1) + require.NoError(t, err) + _, err = discV5Node.WakuPeerExchangeRequest(1) + require.Error(t, err) //should fail due to rate limit + + require.NoError(t, lightNode.Stop()) + require.NoError(t, pxServerNode.Stop()) + require.NoError(t, discV5Node.Stop()) */ +} + +func TestDnsDiscover(t *testing.T) { + logger, err := zap.NewDevelopment() + require.NoError(t, err) + nodeConfig := Config{ + UseThrottledPublish: true, + ClusterID: 16, + Nameserver: "8.8.8.8", + } + nodeWakuConfig := WakuConfig{ + EnableRelay: true, + LogLevel: "DEBUG", + ClusterID: 16, + Shards: []uint16{64}, + Discv5UdpPort: 9040, + TcpPort: 60040, + } + node, err := New(nil, "", &nodeConfig, &nodeWakuConfig, logger.Named("node"), nil, nil, nil, nil) + require.NoError(t, err) + require.NoError(t, node.Start()) + time.Sleep(1 * time.Second) + sampleEnrTree := "enrtree://AMOJVZX4V6EXP7NTJPMAYJYST2QP6AJXYW76IU6VGJS7UVSNDYZG4@boot.prod.status.nodes.status.im" + + ctx, cancel := context.WithTimeout(context.TODO(), requestTimeout) + defer cancel() + res, err := node.node.DnsDiscovery(ctx, sampleEnrTree, nodeConfig.Nameserver) + require.NoError(t, err) + require.True(t, len(res) > 1, "multiple nodes should be returned from the DNS Discovery query") + // Stop nodes + require.NoError(t, node.Stop()) +} + +func TestDial(t *testing.T) { + logger, err := zap.NewDevelopment() + require.NoError(t, err) + dialerNodeConfig := Config{ + UseThrottledPublish: true, + ClusterID: 16, + } + // start node that will initiate the dial + dialerNodeWakuConfig := WakuConfig{ + EnableRelay: true, + LogLevel: "DEBUG", + Discv5Discovery: false, + ClusterID: 16, + Shards: []uint16{64}, + Discv5UdpPort: 9020, + TcpPort: 60020, + } + dialerNode, err := New(nil, "", &dialerNodeConfig, &dialerNodeWakuConfig, logger.Named("dialerNode"), nil, nil, nil, nil) + require.NoError(t, err) + require.NoError(t, dialerNode.Start()) + time.Sleep(1 * time.Second) + receiverNodeConfig := Config{ + UseThrottledPublish: true, + ClusterID: 16, + } + // start node that will receive the dial + receiverNodeWakuConfig := WakuConfig{ + EnableRelay: true, + LogLevel: "DEBUG", + Discv5Discovery: false, + ClusterID: 16, + Shards: []uint16{64}, + Discv5UdpPort: 9021, + TcpPort: 60021, + } + receiverNode, err := New(nil, "", &receiverNodeConfig, &receiverNodeWakuConfig, logger.Named("receiverNode"), nil, nil, nil, nil) + require.NoError(t, err) + require.NoError(t, receiverNode.Start()) + time.Sleep(1 * time.Second) + receiverMultiaddr, err := receiverNode.node.ListenAddresses() + require.NoError(t, err) + require.NotNil(t, receiverMultiaddr) + // Check that both nodes start with no connected peers + dialerPeerCount, err := dialerNode.PeerCount() + require.NoError(t, err) + require.True(t, dialerPeerCount == 0, "Dialer node should have no connected peers") + receiverPeerCount, err := receiverNode.PeerCount() + require.NoError(t, err) + require.True(t, receiverPeerCount == 0, "Receiver node should have no connected peers") + // Dial + err = dialerNode.DialPeer(receiverMultiaddr[0]) + require.NoError(t, err) + time.Sleep(1 * time.Second) + // Check that both nodes now have one connected peer + dialerPeerCount, err = dialerNode.PeerCount() + require.NoError(t, err) + require.True(t, dialerPeerCount == 1, "Dialer node should have 1 peer") + receiverPeerCount, err = receiverNode.PeerCount() + require.NoError(t, err) + require.True(t, receiverPeerCount == 1, "Receiver node should have 1 peer") + // Stop nodes + require.NoError(t, dialerNode.Stop()) + require.NoError(t, receiverNode.Stop()) +} + +/* + +func TestWakuV2Filter(t *testing.T) { + t.Skip("flaky test") + + enrTreeAddress := testBootENRBootstrap + envEnrTreeAddress := os.Getenv("ENRTREE_ADDRESS") + if envEnrTreeAddress != "" { + enrTreeAddress = envEnrTreeAddress + } + config := &Config{} + setDefaultConfig(config, true) + config.EnablePeerExchangeClient = false + config.Port = 0 + config.MinPeersForFilter = 2 + + config.DiscV5BootstrapNodes = []string{enrTreeAddress} + config.DiscoveryLimit = 20 + config.WakuNodes = []string{enrTreeAddress} + w, err := New(nil, "", config, nil, nil, nil, nil, nil) + require.NoError(t, err) + require.NoError(t, w.Start()) + + options := func(b *backoff.ExponentialBackOff) { + b.MaxElapsedTime = 10 * time.Second + } + time.Sleep(10 * time.Second) //TODO: Check if we can remove this sleep. + + // Sanity check, not great, but it's probably helpful + err = tt.RetryWithBackOff(func() error { + peers, err := w.GetPeerIdsByProtocol(string(filter.FilterSubscribeID_v20beta1)) + if err != nil { + return err + } + if len(peers) < 2 { + return errors.New("no peers discovered") + } + return nil + }, options) + require.NoError(t, err) + testPubsubTopic := "/waku/2/rs/16/32" + contentTopicBytes := make([]byte, 4) + _, err = rand.Read(contentTopicBytes) + require.NoError(t, err) + filter := &common.Filter{ + Messages: common.NewMemoryMessageStore(), + PubsubTopic: testPubsubTopic, + ContentTopics: common.NewTopicSetFromBytes([][]byte{contentTopicBytes}), + } + + fID, err := w.Subscribe(filter) + require.NoError(t, err) + + msgTimestamp := w.timestamp() + contentTopic := maps.Keys(filter.ContentTopics)[0] + + _, err = w.Send(testPubsubTopic, &pb.WakuMessage{ + Payload: []byte{1, 2, 3, 4, 5}, + ContentTopic: contentTopic.ContentTopic(), + Version: proto.Uint32(0), + Timestamp: &msgTimestamp, + }, nil) + require.NoError(t, err) + time.Sleep(5 * time.Second) + + // Ensure there is at least 1 active filter subscription + subscriptions := w.FilterLightnode().Subscriptions() + require.Greater(t, len(subscriptions), 0) + + messages := filter.Retrieve() + require.Len(t, messages, 1) + + // Mock peers going down + _, err = w.FilterLightnode().UnsubscribeWithSubscription(w.ctx, subscriptions[0]) + require.NoError(t, err) + + time.Sleep(10 * time.Second) + + // Ensure there is at least 1 active filter subscription + subscriptions = w.FilterLightnode().Subscriptions() + require.Greater(t, len(subscriptions), 0) + + // Ensure that messages are retrieved with a fresh sub + _, err = w.Send(testPubsubTopic, &pb.WakuMessage{ + Payload: []byte{1, 2, 3, 4, 5, 6}, + ContentTopic: contentTopic.ContentTopic(), + Version: proto.Uint32(0), + Timestamp: &msgTimestamp, + }, nil) + require.NoError(t, err) + time.Sleep(10 * time.Second) + + messages = filter.Retrieve() + require.Len(t, messages, 1) + err = w.Unsubscribe(context.Background(), fID) + require.NoError(t, err) + require.NoError(t, w.Stop()) +} + +func TestWakuV2Store(t *testing.T) { + t.Skip("deprecated. Storenode must use nwaku") + + // Configuration for the first Waku node + config1 := &Config{ + Port: 0, + ClusterID: 16, + EnableDiscV5: false, + DiscoveryLimit: 20, + EnableStore: false, + StoreCapacity: 100, + StoreSeconds: 3600, + EnableMissingMessageVerification: true, + } + w1PeersCh := make(chan peer.IDSlice, 100) // buffered not to block on the send side + + // Start the first Waku node + w1, err := New(nil, "", config1, nil, nil, nil, nil, func(cs types.ConnStatus) { + w1PeersCh <- maps.Keys(cs.Peers) + }) + require.NoError(t, err) + require.NoError(t, w1.Start()) + defer func() { + require.NoError(t, w1.Stop()) + close(w1PeersCh) + }() + + // Configuration for the second Waku node + sql2, err := helpers.SetupTestMemorySQLDB(appdatabase.DbInitializer{}) + require.NoError(t, err) + config2 := &Config{ + Port: 0, + ClusterID: 16, + EnableDiscV5: false, + DiscoveryLimit: 20, + EnableStore: true, + StoreCapacity: 100, + StoreSeconds: 3600, + } + + // Start the second Waku node + w2, err := New(nil, "", config2, nil, sql2, nil, nil, nil) + require.NoError(t, err) + require.NoError(t, w2.Start()) + w2EnvelopeCh := make(chan common.EnvelopeEvent, 100) + w2.SubscribeEnvelopeEvents(w2EnvelopeCh) + defer func() { + require.NoError(t, w2.Stop()) + close(w2EnvelopeCh) + }() + + // Connect the two nodes directly + peer2Addr, err := w2.ListenAddresses() + require.NoError(t, err) + + err = w1.DialPeer(peer2Addr[0]) + require.NoError(t, err) + + // Create a filter for the second node to catch messages + filter := &common.Filter{ + Messages: common.NewMemoryMessageStore(), + PubsubTopic: config2.DefaultShardPubsubTopic, + ContentTopics: common.NewTopicSetFromBytes([][]byte{{1, 2, 3, 4}}), + } + + _, err = w2.Subscribe(filter) + require.NoError(t, err) + + time.Sleep(2 * time.Second) + + // Send a message from the first node + msgTimestamp := w1.CurrentTime().UnixNano() + contentTopic := maps.Keys(filter.ContentTopics)[0] + _, err = w1.Send(config1.DefaultShardPubsubTopic, &pb.WakuMessage{ + Payload: []byte{1, 2, 3, 4, 5}, + ContentTopic: contentTopic.ContentTopic(), + Version: proto.Uint32(0), + Timestamp: &msgTimestamp, + }, nil) + require.NoError(t, err) + + waitForEnvelope(t, contentTopic.ContentTopic(), w2EnvelopeCh) + + // Retrieve the message from the second node's filter + messages := filter.Retrieve() + require.Len(t, messages, 1) + + timestampInSeconds := msgTimestamp / int64(time.Second) + marginInSeconds := 5 + // Query the second node's store for the message + _, envelopeCount, err := w1.Query( + context.Background(), + w2.Host().ID(), + store.FilterCriteria{ + TimeStart: proto.Int64((timestampInSeconds - int64(marginInSeconds)) * int64(time.Second)), + TimeEnd: proto.Int64((timestampInSeconds + int64(marginInSeconds)) * int64(time.Second)), + ContentFilter: protocol.NewContentFilter(config1.DefaultShardPubsubTopic, contentTopic.ContentTopic()), + }, + nil, + nil, + false, + ) + require.NoError(t, err) + require.True(t, envelopeCount > 0, "no messages received from store node") +} + +func waitForPeerConnection(t *testing.T, peerID peer.ID, peerCh chan peer.IDSlice) { + waitForPeerConnectionWithTimeout(t, peerID, peerCh, 3*time.Second) +} + +func waitForPeerConnectionWithTimeout(t *testing.T, peerID peer.ID, peerCh chan peer.IDSlice, timeout time.Duration) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + for { + select { + case peers := <-peerCh: + for _, p := range peers { + if p == peerID { + return + } + } + case <-ctx.Done(): + require.Fail(t, "timed out waiting for peer "+peerID.String()) + return + } + } +} + +func waitForEnvelope(t *testing.T, contentTopic string, envCh chan common.EnvelopeEvent) { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + for { + select { + case env := <-envCh: + if env.Topic.ContentTopic() == contentTopic { + return + } + case <-ctx.Done(): + require.Fail(t, "timed out waiting for envelope's topic "+contentTopic) + return + } + } +} + +func TestOnlineChecker(t *testing.T) { + w, err := New(nil, "shards.staging", nil, nil, nil, nil, nil, nil) + require.NoError(t, w.Start()) + + require.NoError(t, err) + require.False(t, w.onlineChecker.IsOnline()) + + w.ConnectionChanged(connection.State{Offline: false}) + require.True(t, w.onlineChecker.IsOnline()) + + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + defer wg.Done() + <-w.goingOnline + require.True(t, true) + }() + + time.Sleep(100 * time.Millisecond) + + w.ConnectionChanged(connection.State{Offline: true}) + require.False(t, w.onlineChecker.IsOnline()) + + // Test lightnode online checker + config := &Config{} + config.ClusterID = 16 + config.LightClient = true + lightNode, err := New(nil, "shards.staging", config, nil, nil, nil, nil, nil) + require.NoError(t, err) + + err = lightNode.Start() + require.NoError(t, err) + + require.False(t, lightNode.onlineChecker.IsOnline()) + f := &common.Filter{} + lightNode.filterManager.SubscribeFilter("test", protocol.NewContentFilter(f.PubsubTopic, f.ContentTopics.ContentTopics()...)) + +} + +func TestLightpushRateLimit(t *testing.T) { + logger, err := zap.NewDevelopment() + require.NoError(t, err) + + config0 := &Config{} + setDefaultConfig(config0, false) + w0PeersCh := make(chan peer.IDSlice, 5) // buffered not to block on the send side + + // Start the relayu node + w0, err := New(nil, "", config0, logger.Named("relayNode"), nil, nil, nil, func(cs types.ConnStatus) { + w0PeersCh <- maps.Keys(cs.Peers) + }) + require.NoError(t, err) + require.NoError(t, w0.Start()) + defer func() { + require.NoError(t, w0.Stop()) + close(w0PeersCh) + }() + + contentTopics := common.NewTopicSetFromBytes([][]byte{{1, 2, 3, 4}}) + filter := &common.Filter{ + PubsubTopic: config0.DefaultShardPubsubTopic, + Messages: common.NewMemoryMessageStore(), + ContentTopics: contentTopics, + } + + _, err = w0.Subscribe(filter) + require.NoError(t, err) + + config1 := &Config{} + setDefaultConfig(config1, false) + w1PeersCh := make(chan peer.IDSlice, 5) // buffered not to block on the send side + + // Start the full node + w1, err := New(nil, "", config1, logger.Named("fullNode"), nil, nil, nil, func(cs types.ConnStatus) { + w1PeersCh <- maps.Keys(cs.Peers) + }) + require.NoError(t, err) + require.NoError(t, w1.Start()) + defer func() { + require.NoError(t, w1.Stop()) + close(w1PeersCh) + }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + //Connect the relay peer and full node + err = w1.DialPeer(ctx, w0.ListenAddresses()[0].String()) + require.NoError(t, err) + + err = tt.RetryWithBackOff(func() error { + if len(w1.Peers()) == 0 { + return errors.New("no peers discovered") + } + return nil + }) + require.NoError(t, err) + + config2 := &Config{} + setDefaultConfig(config2, true) + w2PeersCh := make(chan peer.IDSlice, 5) // buffered not to block on the send side + + // Start the light node + w2, err := New(nil, "", config2, logger.Named("lightNode"), nil, nil, nil, func(cs types.ConnStatus) { + w2PeersCh <- maps.Keys(cs.Peers) + }) + require.NoError(t, err) + require.NoError(t, w2.Start()) + defer func() { + require.NoError(t, w2.Stop()) + close(w2PeersCh) + }() + + //Use this instead of DialPeer to make sure the peer is added to PeerStore and can be selected for Lighpush + w2.AddDiscoveredPeer(w1.PeerID(), w1.ListenAddresses(), wps.Static, w1.cfg.DefaultShardedPubsubTopics, w1.node.ENR(), true) + + waitForPeerConnectionWithTimeout(t, w2.Host().ID(), w1PeersCh, 5*time.Second) + + event := make(chan common.EnvelopeEvent, 10) + w2.SubscribeEnvelopeEvents(event) + + for i := range [4]int{} { + msgTimestamp := w2.timestamp() + _, err := w2.Send(config2.DefaultShardPubsubTopic, &pb.WakuMessage{ + Payload: []byte{1, 2, 3, 4, 5, 6, byte(i)}, + ContentTopic: maps.Keys(contentTopics)[0].ContentTopic(), + Version: proto.Uint32(0), + Timestamp: &msgTimestamp, + }, nil) + + require.NoError(t, err) + + time.Sleep(550 * time.Millisecond) + + } + + messages := filter.Retrieve() + require.Len(t, messages, 2) + +} + +func TestTelemetryFormat(t *testing.T) { + logger, err := zap.NewDevelopment() + require.NoError(t, err) + + tc := NewBandwidthTelemetryClient(logger, "#") + + s := metrics.Stats{ + TotalIn: 10, + TotalOut: 20, + RateIn: 30, + RateOut: 40, + } + + m := make(map[libp2pprotocol.ID]metrics.Stats) + m[relay.WakuRelayID_v200] = s + m[filter.FilterPushID_v20beta1] = s + m[filter.FilterSubscribeID_v20beta1] = s + m[legacy_store.StoreID_v20beta4] = s + m[lightpush.LightPushID_v20beta1] = s + + requestBody := tc.getTelemetryRequestBody(m) + _, err = json.Marshal(requestBody) + require.NoError(t, err) +} +*/ diff --git a/wakuv2/nwaku_test_utils.go b/wakuv2/nwaku_test_utils.go new file mode 100644 index 000000000..ed9f3e80c --- /dev/null +++ b/wakuv2/nwaku_test_utils.go @@ -0,0 +1,58 @@ +package wakuv2 + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "strconv" +) + +type NwakuInfo struct { + ListenAddresses []string `json:"listenAddresses"` + EnrUri string `json:"enrUri"` +} + +func GetNwakuInfo(host *string, port *int) (NwakuInfo, error) { + nwakuRestPort := 8645 + if port != nil { + nwakuRestPort = *port + } + envNwakuRestPort := os.Getenv("NWAKU_REST_PORT") + if envNwakuRestPort != "" { + v, err := strconv.Atoi(envNwakuRestPort) + if err != nil { + return NwakuInfo{}, err + } + nwakuRestPort = v + } + + nwakuRestHost := "localhost" + if host != nil { + nwakuRestHost = *host + } + envNwakuRestHost := os.Getenv("NWAKU_REST_HOST") + if envNwakuRestHost != "" { + nwakuRestHost = envNwakuRestHost + } + + resp, err := http.Get(fmt.Sprintf("http://%s:%d/debug/v1/info", nwakuRestHost, nwakuRestPort)) + if err != nil { + return NwakuInfo{}, err + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return NwakuInfo{}, err + } + + var data NwakuInfo + err = json.Unmarshal(body, &data) + if err != nil { + return NwakuInfo{}, err + } + + return data, nil +} diff --git a/wakuv2/pinger.go b/wakuv2/pinger.go new file mode 100644 index 000000000..ef5f14727 --- /dev/null +++ b/wakuv2/pinger.go @@ -0,0 +1,26 @@ +//go:build use_nwaku +// +build use_nwaku + +package wakuv2 + +import ( + "context" + "time" + + "github.com/libp2p/go-libp2p/core/peer" + commonapi "github.com/waku-org/go-waku/waku/v2/api/common" +) + +type pinger struct { + node *WakuNode +} + +func newPinger(node *WakuNode) commonapi.Pinger { + return &pinger{ + node: node, + } +} + +func (p *pinger) PingPeer(ctx context.Context, peerInfo peer.AddrInfo) (time.Duration, error) { + return p.node.PingPeer(ctx, peerInfo) +} diff --git a/wakuv2/publisher.go b/wakuv2/publisher.go new file mode 100644 index 000000000..52ffd3e35 --- /dev/null +++ b/wakuv2/publisher.go @@ -0,0 +1,37 @@ +//go:build use_nwaku +// +build use_nwaku + +package wakuv2 + +import ( + "context" + + "github.com/libp2p/go-libp2p/core/peer" + "github.com/waku-org/go-waku/waku/v2/api/publish" + "github.com/waku-org/go-waku/waku/v2/protocol/pb" +) + +type nwakuPublisher struct { + node *WakuNode +} + +func newPublisher(node *WakuNode) publish.Publisher { + return &nwakuPublisher{ + node: node, + } +} + +func (p *nwakuPublisher) RelayListPeers(pubsubTopic string) ([]peer.ID, error) { + // TODO-nwaku + return nil, nil +} + +func (p *nwakuPublisher) RelayPublish(ctx context.Context, message *pb.WakuMessage, pubsubTopic string) (pb.MessageHash, error) { + return p.node.RelayPublish(ctx, message, pubsubTopic) +} + +// LightpushPublish publishes a message via WakuLightPush +func (p *nwakuPublisher) LightpushPublish(ctx context.Context, message *pb.WakuMessage, pubsubTopic string, maxPeers int) (pb.MessageHash, error) { + // TODO-nwaku + return pb.MessageHash{}, nil +} diff --git a/wakuv2/result.go b/wakuv2/result.go new file mode 100644 index 000000000..0e7dc1a9d --- /dev/null +++ b/wakuv2/result.go @@ -0,0 +1,77 @@ +//go:build use_nwaku +// +build use_nwaku + +package wakuv2 + +import ( + "context" + "encoding/hex" + + "github.com/libp2p/go-libp2p/core/peer" + "github.com/waku-org/go-waku/waku/v2/protocol" + "github.com/waku-org/go-waku/waku/v2/protocol/store" + storepb "github.com/waku-org/go-waku/waku/v2/protocol/store/pb" +) + +type storeResultImpl struct { + done bool + + node *WakuNode + storeRequest *storepb.StoreQueryRequest + storeResponse *storepb.StoreQueryResponse + peerInfo peer.AddrInfo +} + +func newStoreResultImpl(node *WakuNode, peerInfo peer.AddrInfo, storeRequest *storepb.StoreQueryRequest, storeResponse *storepb.StoreQueryResponse) *storeResultImpl { + return &storeResultImpl{ + node: node, + storeRequest: storeRequest, + storeResponse: storeResponse, + peerInfo: peerInfo, + } +} + +func (r *storeResultImpl) Cursor() []byte { + return r.storeResponse.GetPaginationCursor() +} + +func (r *storeResultImpl) IsComplete() bool { + return r.done +} + +func (r *storeResultImpl) PeerID() peer.ID { + return r.peerInfo.ID +} + +func (r *storeResultImpl) Query() *storepb.StoreQueryRequest { + return r.storeRequest +} + +func (r *storeResultImpl) Response() *storepb.StoreQueryResponse { + return r.storeResponse +} + +func (r *storeResultImpl) Next(ctx context.Context, opts ...store.RequestOption) error { + // TODO: opts is being ignored. Will require some changes in go-waku. For now using this + // is not necessary + + if r.storeResponse.GetPaginationCursor() == nil { + r.done = true + return nil + } + + r.storeRequest.RequestId = hex.EncodeToString(protocol.GenerateRequestID()) + r.storeRequest.PaginationCursor = r.storeResponse.PaginationCursor + + storeResponse, err := r.node.StoreQuery(ctx, r.storeRequest, r.peerInfo) + if err != nil { + return err + } + + r.storeResponse = storeResponse + return nil +} + +func (r *storeResultImpl) Messages() []*storepb.WakuMessageKeyValue { + return r.storeResponse.GetMessages() +} diff --git a/protocol/common/shard/shard.go b/wakuv2/shard.go similarity index 98% rename from protocol/common/shard/shard.go rename to wakuv2/shard.go index 011a6f452..fc8686755 100644 --- a/protocol/common/shard/shard.go +++ b/wakuv2/shard.go @@ -1,4 +1,4 @@ -package shard +package wakuv2 import ( wakuproto "github.com/waku-org/go-waku/waku/v2/protocol" diff --git a/wakuv2/storenode_message_verifier.go b/wakuv2/storenode_message_verifier.go new file mode 100644 index 000000000..f0a7cb2ed --- /dev/null +++ b/wakuv2/storenode_message_verifier.go @@ -0,0 +1,59 @@ +//go:build use_nwaku +// +build use_nwaku + +package wakuv2 + +import ( + "context" + "encoding/hex" + "fmt" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/waku-org/go-waku/waku/v2/api/publish" + "github.com/waku-org/go-waku/waku/v2/protocol/pb" + storepb "github.com/waku-org/go-waku/waku/v2/protocol/store/pb" +) + +type storenodeMessageVerifier struct { + node *WakuNode +} + +func newStorenodeMessageVerifier(node *WakuNode) publish.StorenodeMessageVerifier { + return &storenodeMessageVerifier{ + node: node, + } +} + +func (d *storenodeMessageVerifier) MessageHashesExist(ctx context.Context, requestID []byte, peerInfo peer.AddrInfo, pageSize uint64, messageHashes []pb.MessageHash) ([]pb.MessageHash, error) { + requestIDStr := hex.EncodeToString(requestID) + storeRequest := &storepb.StoreQueryRequest{ + RequestId: requestIDStr, + MessageHashes: make([][]byte, len(messageHashes)), + IncludeData: false, + PaginationCursor: nil, + PaginationForward: false, + PaginationLimit: proto.Uint64(pageSize), + } + + for i, mhash := range messageHashes { + storeRequest.MessageHashes[i] = mhash.Bytes() + } + + response, err := d.node.StoreQuery(ctx, storeRequest, peerInfo) + if err != nil { + return nil, err + } + + if response.GetStatusCode() != http.StatusOK { + return nil, fmt.Errorf("could not query storenode: %s %d %s", requestIDStr, response.GetStatusCode(), response.GetStatusDesc()) + } + + result := make([]pb.MessageHash, len(response.Messages)) + for i, msg := range response.Messages { + result[i] = pb.ToMessageHash(msg.GetMessageHash()) + } + + return result, nil +} diff --git a/wakuv2/storenode_requestor.go b/wakuv2/storenode_requestor.go new file mode 100644 index 000000000..b623a6e8e --- /dev/null +++ b/wakuv2/storenode_requestor.go @@ -0,0 +1,76 @@ +//go:build use_nwaku +// +build use_nwaku + +package wakuv2 + +import ( + "context" + "encoding/hex" + "fmt" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/libp2p/go-libp2p/core/peer" + commonapi "github.com/waku-org/go-waku/waku/v2/api/common" + "github.com/waku-org/go-waku/waku/v2/protocol" + "github.com/waku-org/go-waku/waku/v2/protocol/pb" + storepb "github.com/waku-org/go-waku/waku/v2/protocol/store/pb" + "go.uber.org/zap" +) + +type storenodeRequestor struct { + node *WakuNode + logger *zap.Logger +} + +func newStorenodeRequestor(node *WakuNode, logger *zap.Logger) commonapi.StorenodeRequestor { + return &storenodeRequestor{ + node: node, + logger: logger.Named("storenodeRequestor"), + } +} + +func (s *storenodeRequestor) GetMessagesByHash(ctx context.Context, peerInfo peer.AddrInfo, pageSize uint64, messageHashes []pb.MessageHash) (commonapi.StoreRequestResult, error) { + requestIDStr := hex.EncodeToString(protocol.GenerateRequestID()) + + logger := s.logger.With(zap.Stringer("peerID", peerInfo.ID), zap.String("requestID", requestIDStr)) + + logger.Debug("sending store request") + + storeRequest := &storepb.StoreQueryRequest{ + RequestId: requestIDStr, + MessageHashes: make([][]byte, len(messageHashes)), + IncludeData: true, + PaginationCursor: nil, + PaginationForward: false, + PaginationLimit: proto.Uint64(pageSize), + } + + for i, mhash := range messageHashes { + storeRequest.MessageHashes[i] = mhash.Bytes() + } + + storeResponse, err := s.node.StoreQuery(ctx, storeRequest, peerInfo) + if err != nil { + return nil, err + } + + if storeResponse.GetStatusCode() != http.StatusOK { + return nil, fmt.Errorf("could not query storenode: %s %d %s", requestIDStr, storeResponse.GetStatusCode(), storeResponse.GetStatusDesc()) + } + + return newStoreResultImpl(s.node, peerInfo, storeRequest, storeResponse), nil +} + +func (s *storenodeRequestor) Query(ctx context.Context, peerInfo peer.AddrInfo, storeRequest *storepb.StoreQueryRequest) (commonapi.StoreRequestResult, error) { + storeResponse, err := s.node.StoreQuery(ctx, storeRequest, peerInfo) + if err != nil { + return nil, err + } + + if storeResponse.GetStatusCode() != http.StatusOK { + return nil, fmt.Errorf("could not query storenode: %s %d %s", storeRequest.RequestId, storeResponse.GetStatusCode(), storeResponse.GetStatusDesc()) + } + + return newStoreResultImpl(s.node, peerInfo, storeRequest, storeResponse), nil +} diff --git a/wakuv2/waku_test.go b/wakuv2/waku_test.go index ce80dbd96..e801194fc 100644 --- a/wakuv2/waku_test.go +++ b/wakuv2/waku_test.go @@ -1,3 +1,6 @@ +//go:build !use_nwaku +// +build !use_nwaku + package wakuv2 import ( @@ -545,8 +548,9 @@ func TestWakuV2Store(t *testing.T) { }() // Connect the two nodes directly - peer2Addr := w2.node.ListenAddresses()[0].String() - err = w1.node.DialPeer(context.Background(), peer2Addr) + peer2Addr, err := w2.ListenAddresses() + require.NoError(t, err) + err = w1.node.DialPeer(context.Background(), peer2Addr[0].String()) require.NoError(t, err) waitForPeerConnection(t, w2.node.Host().ID(), w1PeersCh) @@ -719,7 +723,9 @@ func TestLightpushRateLimit(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() //Connect the relay peer and full node - err = w1.node.DialPeer(ctx, w0.node.ListenAddresses()[0].String()) + peerAddr, err := w0.ListenAddresses() + require.NoError(t, err) + err = w1.node.DialPeer(ctx, peerAddr[0].String()) require.NoError(t, err) err = tt.RetryWithBackOff(func() error { @@ -746,7 +752,9 @@ func TestLightpushRateLimit(t *testing.T) { }() //Use this instead of DialPeer to make sure the peer is added to PeerStore and can be selected for Lighpush - w2.node.AddDiscoveredPeer(w1.PeerID(), w1.node.ListenAddresses(), wps.Static, w1.cfg.DefaultShardedPubsubTopics, w1.node.ENR(), true) + addresses, err := w1.ListenAddresses() + require.NoError(t, err) + w2.node.AddDiscoveredPeer(w1.PeerID(), addresses, wps.Static, w1.cfg.DefaultShardedPubsubTopics, w1.node.ENR(), true) waitForPeerConnectionWithTimeout(t, w2.node.Host().ID(), w1PeersCh, 5*time.Second)