diff --git a/.gitignore b/.gitignore
index 5e4b8e1d7..d9aefe181 100644
--- a/.gitignore
+++ b/.gitignore
@@ -64,7 +64,6 @@ coverage.html
Session.vim
.undodir/*
/.idea/
-/.vscode/
/cmd/*/.ethereum/
*.iml
diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 000000000..13c88e400
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "third_party/nwaku"]
+ path = third_party/nwaku
+ url = https://github.com/waku-org/nwaku
diff --git a/.vscode/settings.json b/.vscode/settings.json
index 16fdfe542..d83958300 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -8,4 +8,7 @@
"cSpell.words": [
"unmarshalling"
],
+ "gopls":{
+ "buildFlags": ["-tags=use_nwaku,gowaku_skip_migrations,gowaku_no_rln"]
+ }
}
diff --git a/Makefile b/Makefile
index be509e8e7..305979237 100644
--- a/Makefile
+++ b/Makefile
@@ -1,5 +1,6 @@
.PHONY: statusgo statusd-prune all test clean help
.PHONY: statusgo-android statusgo-ios
+.PHONY: build-libwaku test-libwaku clean-libwaku rebuild-libwaku
# Clear any GOROOT set outside of the Nix shell
export GOROOT=
@@ -355,9 +356,28 @@ mock: ##@other Regenerate mocks
mockgen -package=mock_paraswap -destination=services/wallet/thirdparty/paraswap/mock/types.go -source=services/wallet/thirdparty/paraswap/types.go
mockgen -package=mock_onramp -destination=services/wallet/onramp/mock/types.go -source=services/wallet/onramp/types.go
+LIBWAKU := third_party/nwaku/build/libwaku.$(GOBIN_SHARED_LIB_EXT)
+$(LIBWAKU):
+ echo "Building libwaku"
+ $(MAKE) -C third_party/nwaku update || { echo "nwaku make update failed"; exit 1; }
+ $(MAKE) -C ./third_party/nwaku libwaku
+
+build-libwaku: $(LIBWAKU)
+
docker-test: ##@tests Run tests in a docker container with golang.
docker run --privileged --rm -it -v "$(PWD):$(DOCKER_TEST_WORKDIR)" -w "$(DOCKER_TEST_WORKDIR)" $(DOCKER_TEST_IMAGE) go test ${ARGS}
+test-libwaku: | $(LIBWAKU)
+ // chequear nwaku
+ // sino lanzarlo
+ go test -tags '$(BUILD_TAGS) use_nwaku' -run TestBasicWakuV2 ./wakuv2/... -count 1 -v -json | jq -r '.Output'
+
+clean-libwaku:
+ echo "Removing libwaku"
+ rm $(LIBWAKU)
+
+rebuild-libwaku: | clean-libwaku $(LIBWAKU)
+
test: test-unit ##@tests Run basic, short tests during development
test-unit: export BUILD_TAGS ?=
diff --git a/eth-node/bridge/geth/node.go b/eth-node/bridge/geth/node.go
index e123ec654..91088080a 100644
--- a/eth-node/bridge/geth/node.go
+++ b/eth-node/bridge/geth/node.go
@@ -19,10 +19,10 @@ import (
type gethNodeWrapper struct {
stack *node.Node
waku1 *waku.Waku
- waku2 *wakuv2.NWaku
+ waku2 *wakuv2.Waku
}
-func NewNodeBridge(stack *node.Node, waku1 *waku.Waku, waku2 *wakuv2.NWaku) types.Node {
+func NewNodeBridge(stack *node.Node, waku1 *waku.Waku, waku2 *wakuv2.Waku) types.Node {
return &gethNodeWrapper{stack: stack, waku1: waku1, waku2: waku2}
}
@@ -38,7 +38,7 @@ func (w *gethNodeWrapper) SetWaku1(waku *waku.Waku) {
w.waku1 = waku
}
-func (w *gethNodeWrapper) SetWaku2(waku *wakuv2.NWaku) {
+func (w *gethNodeWrapper) SetWaku2(waku *wakuv2.Waku) {
w.waku2 = waku
}
diff --git a/eth-node/bridge/geth/wakuv2.go b/eth-node/bridge/geth/wakuv2.go
index c13279787..dd1c8f4ed 100644
--- a/eth-node/bridge/geth/wakuv2.go
+++ b/eth-node/bridge/geth/wakuv2.go
@@ -22,11 +22,11 @@ import (
)
type gethWakuV2Wrapper struct {
- waku *wakuv2.NWaku
+ waku *wakuv2.Waku
}
// NewGethWakuWrapper returns an object that wraps Geth's Waku in a types interface
-func NewGethWakuV2Wrapper(w *wakuv2.NWaku) types.Waku {
+func NewGethWakuV2Wrapper(w *wakuv2.Waku) types.Waku {
if w == nil {
panic("waku cannot be nil")
}
@@ -37,7 +37,7 @@ func NewGethWakuV2Wrapper(w *wakuv2.NWaku) types.Waku {
}
// GetGethWhisperFrom retrieves the underlying whisper Whisper struct from a wrapped Whisper interface
-func GetGethWakuV2From(m types.Waku) *wakuv2.NWaku {
+func GetGethWakuV2From(m types.Waku) *wakuv2.Waku {
return m.(*gethWakuV2Wrapper).waku
}
diff --git a/nix/shell.nix b/nix/shell.nix
index 369a7d3f3..c03105219 100644
--- a/nix/shell.nix
+++ b/nix/shell.nix
@@ -21,7 +21,7 @@ in pkgs.mkShell {
buildInputs = with pkgs; [
git jq which
- go golangci-lint go-junit-report gopls go-bindata gomobileMod
+ go golangci-lint go-junit-report gopls go-bindata gomobileMod openssl
mockgen protobuf3_20 protoc-gen-go gotestsum go-modvendor openjdk cc-test-reporter
] ++ lib.optionals (stdenv.isDarwin) [ xcodeWrapper ];
diff --git a/node/get_status_node.go b/node/get_status_node.go
index ce6bcba9f..27ea87eb8 100644
--- a/node/get_status_node.go
+++ b/node/get_status_node.go
@@ -122,7 +122,7 @@ type StatusNode struct {
// nwakuSrvc *
wakuSrvc *waku.Waku
wakuExtSrvc *wakuext.Service
- wakuV2Srvc *wakuv2.NWaku
+ wakuV2Srvc *wakuv2.Waku
wakuV2ExtSrvc *wakuv2ext.Service
ensSrvc *ens.Service
communityTokensSrvc *communitytokens.Service
diff --git a/node/status_node_services.go b/node/status_node_services.go
index 7a424465b..04c1402d6 100644
--- a/node/status_node_services.go
+++ b/node/status_node_services.go
@@ -263,7 +263,7 @@ func (b *StatusNode) WakuExtService() *wakuext.Service {
func (b *StatusNode) WakuV2ExtService() *wakuv2ext.Service {
return b.wakuV2ExtSrvc
}
-func (b *StatusNode) WakuV2Service() *wakuv2.NWaku {
+func (b *StatusNode) WakuV2Service() *wakuv2.Waku {
return b.wakuV2Srvc
}
@@ -315,7 +315,7 @@ func (b *StatusNode) wakuService(wakuCfg *params.WakuConfig, clusterCfg *params.
}
-func (b *StatusNode) wakuV2Service(nodeConfig *params.NodeConfig) (*wakuv2.NWaku, error) {
+func (b *StatusNode) wakuV2Service(nodeConfig *params.NodeConfig) (*wakuv2.Waku, error) {
if b.wakuV2Srvc == nil {
cfg := &wakuv2.Config{
MaxMessageSize: wakucommon.DefaultMaxMessageSize,
diff --git a/protocol/communities/persistence_test.go b/protocol/communities/persistence_test.go
index 42053ecb9..19947b181 100644
--- a/protocol/communities/persistence_test.go
+++ b/protocol/communities/persistence_test.go
@@ -15,13 +15,13 @@ import (
"github.com/status-im/status-go/eth-node/crypto"
"github.com/status-im/status-go/eth-node/types"
"github.com/status-im/status-go/protocol/common"
- "github.com/status-im/status-go/protocol/common/shard"
"github.com/status-im/status-go/protocol/communities/token"
"github.com/status-im/status-go/protocol/encryption"
"github.com/status-im/status-go/protocol/protobuf"
"github.com/status-im/status-go/protocol/sqlite"
"github.com/status-im/status-go/services/wallet/bigint"
"github.com/status-im/status-go/t/helpers"
+ "github.com/status-im/status-go/wakuv2"
)
func TestPersistenceSuite(t *testing.T) {
@@ -787,7 +787,7 @@ func (s *PersistenceSuite) TestSaveShardInfo() {
s.Require().Nil(resultShard)
// not nil shard
- expectedShard := &shard.Shard{
+ expectedShard := &wakuv2.Shard{
Cluster: 1,
Index: 2,
}
diff --git a/protocol/communities_messenger_token_permissions_test.go b/protocol/communities_messenger_token_permissions_test.go
index bbf05984c..8cfe2b74a 100644
--- a/protocol/communities_messenger_token_permissions_test.go
+++ b/protocol/communities_messenger_token_permissions_test.go
@@ -25,13 +25,13 @@ import (
"github.com/status-im/status-go/eth-node/types"
"github.com/status-im/status-go/params"
"github.com/status-im/status-go/protocol/common"
- "github.com/status-im/status-go/protocol/common/shard"
"github.com/status-im/status-go/protocol/communities"
"github.com/status-im/status-go/protocol/protobuf"
"github.com/status-im/status-go/protocol/requests"
"github.com/status-im/status-go/protocol/transport"
"github.com/status-im/status-go/protocol/tt"
"github.com/status-im/status-go/services/wallet/thirdparty"
+ "github.com/status-im/status-go/wakuv2"
)
const testChainID1 = 1
@@ -488,11 +488,12 @@ func (s *MessengerCommunitiesTokenPermissionsSuite) TestBecomeMemberPermissions(
cfg := testWakuV2Config{
logger: s.logger.Named("store-node-waku"),
enableStore: false,
- clusterID: shard.MainStatusShardCluster,
+ clusterID: wakuv2.MainStatusShardCluster,
}
wakuStoreNode := NewTestWakuV2(&s.Suite, cfg)
- storeNodeListenAddresses := wakuStoreNode.ListenAddresses()
+ storeNodeListenAddresses, err := wakuStoreNode.ListenAddresses()
+ s.Require().NoError(err)
s.Require().LessOrEqual(1, len(storeNodeListenAddresses))
storeNodeAddress := storeNodeListenAddresses[0]
diff --git a/protocol/messenger_communities_sharding_test.go b/protocol/messenger_communities_sharding_test.go
index 307df61ad..962cd62b1 100644
--- a/protocol/messenger_communities_sharding_test.go
+++ b/protocol/messenger_communities_sharding_test.go
@@ -12,11 +12,11 @@ import (
gethbridge "github.com/status-im/status-go/eth-node/bridge/geth"
"github.com/status-im/status-go/eth-node/types"
"github.com/status-im/status-go/protocol/common"
- "github.com/status-im/status-go/protocol/common/shard"
"github.com/status-im/status-go/protocol/communities"
"github.com/status-im/status-go/protocol/protobuf"
"github.com/status-im/status-go/protocol/requests"
"github.com/status-im/status-go/protocol/tt"
+ "github.com/status-im/status-go/wakuv2"
)
func TestMessengerCommunitiesShardingSuite(t *testing.T) {
@@ -108,7 +108,7 @@ func (s *MessengerCommunitiesShardingSuite) TearDownTest() {
_ = s.logger.Sync()
}
-func (s *MessengerCommunitiesShardingSuite) testPostToCommunityChat(shard *shard.Shard, community *communities.Community, chat *Chat) {
+func (s *MessengerCommunitiesShardingSuite) testPostToCommunityChat(shard *wakuv2.Shard, community *communities.Community, chat *Chat) {
_, err := s.owner.SetCommunityShard(&requests.SetCommunityShard{
CommunityID: community.ID(),
Shard: shard,
@@ -144,8 +144,8 @@ func (s *MessengerCommunitiesShardingSuite) TestPostToCommunityChat() {
// Members should be able to receive messages in a community with sharding enabled.
{
- shard := &shard.Shard{
- Cluster: shard.MainStatusShardCluster,
+ shard := &wakuv2.Shard{
+ Cluster: wakuv2.MainStatusShardCluster,
Index: 128,
}
s.testPostToCommunityChat(shard, community, chat)
@@ -153,8 +153,8 @@ func (s *MessengerCommunitiesShardingSuite) TestPostToCommunityChat() {
// Members should be able to receive messages in a community where the sharding configuration has been edited.
{
- shard := &shard.Shard{
- Cluster: shard.MainStatusShardCluster,
+ shard := &wakuv2.Shard{
+ Cluster: wakuv2.MainStatusShardCluster,
Index: 256,
}
s.testPostToCommunityChat(shard, community, chat)
@@ -162,8 +162,8 @@ func (s *MessengerCommunitiesShardingSuite) TestPostToCommunityChat() {
// Members should continue to receive messages in a community if it is moved back to default shard.
{
- shard := &shard.Shard{
- Cluster: shard.MainStatusShardCluster,
+ shard := &wakuv2.Shard{
+ Cluster: wakuv2.MainStatusShardCluster,
Index: 32,
}
s.testPostToCommunityChat(shard, community, chat)
@@ -176,8 +176,8 @@ func (s *MessengerCommunitiesShardingSuite) TestIgnoreOutdatedShardKey() {
advertiseCommunityToUserOldWay(&s.Suite, community, s.owner, s.alice)
joinCommunity(&s.Suite, community.ID(), s.owner, s.alice, alicePassword, []string{aliceAddress1})
- shard := &shard.Shard{
- Cluster: shard.MainStatusShardCluster,
+ shard := &wakuv2.Shard{
+ Cluster: wakuv2.MainStatusShardCluster,
Index: 128,
}
diff --git a/protocol/messenger_config.go b/protocol/messenger_config.go
index b17f4c65b..d98d9edea 100644
--- a/protocol/messenger_config.go
+++ b/protocol/messenger_config.go
@@ -114,7 +114,7 @@ type config struct {
telemetryServerURL string
telemetrySendPeriod time.Duration
- wakuService *wakuv2.NWaku
+ wakuService *wakuv2.Waku
messageResendMinDelay time.Duration
messageResendMaxCount int
@@ -387,7 +387,7 @@ func WithCommunityTokensService(s communities.CommunityTokensServiceInterface) O
}
}
-func WithWakuService(s *wakuv2.NWaku) Option {
+func WithWakuService(s *wakuv2.Waku) Option {
return func(c *config) error {
c.wakuService = s
return nil
diff --git a/protocol/messenger_storenode_comunity_test.go b/protocol/messenger_storenode_comunity_test.go
index d2df04876..9a523cc58 100644
--- a/protocol/messenger_storenode_comunity_test.go
+++ b/protocol/messenger_storenode_comunity_test.go
@@ -10,9 +10,9 @@ import (
"github.com/multiformats/go-multiaddr"
"github.com/status-im/status-go/protocol/storenodes"
+ "github.com/status-im/status-go/wakuv2"
gethbridge "github.com/status-im/status-go/eth-node/bridge/geth"
- "github.com/status-im/status-go/protocol/common/shard"
"github.com/status-im/status-go/protocol/communities"
"github.com/status-im/status-go/protocol/tt"
@@ -92,11 +92,12 @@ func (s *MessengerStoreNodeCommunitySuite) createStore(name string) (*waku2.Waku
cfg := testWakuV2Config{
logger: s.logger.Named(name),
enableStore: true,
- clusterID: shard.MainStatusShardCluster,
+ clusterID: wakuv2.MainStatusShardCluster,
}
storeNode := NewTestWakuV2(&s.Suite, cfg)
- addresses := storeNode.ListenAddresses()
+ addresses, err := storeNode.ListenAddresses()
+ s.Require().NoError(err)
s.Require().GreaterOrEqual(len(addresses), 1, "no storenode listen address")
return storeNode, addresses[0]
}
@@ -109,7 +110,7 @@ func (s *MessengerStoreNodeCommunitySuite) newMessenger(name string, storenodeAd
cfg := testWakuV2Config{
logger: logger,
enableStore: false,
- clusterID: shard.MainStatusShardCluster,
+ clusterID: wakuv2.MainStatusShardCluster,
}
wakuV2 := NewTestWakuV2(&s.Suite, cfg)
wakuV2Wrapper := gethbridge.NewGethWakuV2Wrapper(wakuV2)
diff --git a/protocol/messenger_storenode_request_test.go b/protocol/messenger_storenode_request_test.go
index 543e32fb3..8718087f9 100644
--- a/protocol/messenger_storenode_request_test.go
+++ b/protocol/messenger_storenode_request_test.go
@@ -24,7 +24,6 @@ import (
"github.com/status-im/status-go/multiaccounts/accounts"
"github.com/status-im/status-go/params"
"github.com/status-im/status-go/protocol/common"
- "github.com/status-im/status-go/protocol/common/shard"
"github.com/status-im/status-go/protocol/communities"
"github.com/status-im/status-go/protocol/communities/token"
"github.com/status-im/status-go/protocol/protobuf"
@@ -34,6 +33,7 @@ import (
mailserversDB "github.com/status-im/status-go/services/mailservers"
"github.com/status-im/status-go/services/wallet/bigint"
"github.com/status-im/status-go/t/helpers"
+ "github.com/status-im/status-go/wakuv2"
waku2 "github.com/status-im/status-go/wakuv2"
wakuV2common "github.com/status-im/status-go/wakuv2/common"
)
@@ -160,7 +160,7 @@ func (s *MessengerStoreNodeRequestSuite) createStore() {
cfg := testWakuV2Config{
logger: s.logger.Named("store-waku"),
enableStore: true,
- clusterID: shard.MainStatusShardCluster,
+ clusterID: wakuv2.MainStatusShardCluster,
}
s.wakuStoreNode = NewTestWakuV2(&s.Suite, cfg)
@@ -178,7 +178,7 @@ func (s *MessengerStoreNodeRequestSuite) createOwner() {
cfg := testWakuV2Config{
logger: s.logger.Named("owner-waku"),
enableStore: false,
- clusterID: shard.MainStatusShardCluster,
+ clusterID: wakuv2.MainStatusShardCluster,
}
wakuV2 := NewTestWakuV2(&s.Suite, cfg)
@@ -199,7 +199,7 @@ func (s *MessengerStoreNodeRequestSuite) createBob() {
cfg := testWakuV2Config{
logger: s.logger.Named("bob-waku"),
enableStore: false,
- clusterID: shard.MainStatusShardCluster,
+ clusterID: wakuv2.MainStatusShardCluster,
}
wakuV2 := NewTestWakuV2(&s.Suite, cfg)
s.bobWaku = gethbridge.NewGethWakuV2Wrapper(wakuV2)
@@ -366,7 +366,8 @@ func (s *MessengerStoreNodeRequestSuite) waitForEnvelopes(subscription <-chan st
}
func (s *MessengerStoreNodeRequestSuite) wakuListenAddress(waku *waku2.Waku) multiaddr.Multiaddr {
- addresses := waku.ListenAddresses()
+ addresses, err := waku.ListenAddresses()
+ s.Require().NoError(err)
s.Require().LessOrEqual(1, len(addresses))
return addresses[0]
}
@@ -696,8 +697,8 @@ func (s *MessengerStoreNodeRequestSuite) TestRequestShardAndCommunityInfo() {
topicPrivKey, err := crypto.GenerateKey()
s.Require().NoError(err)
- expectedShard := &shard.Shard{
- Cluster: shard.MainStatusShardCluster,
+ expectedShard := &wakuv2.Shard{
+ Cluster: wakuv2.MainStatusShardCluster,
Index: 23,
}
@@ -841,8 +842,8 @@ type testFetchRealCommunityExampleTokenInfo struct {
var testFetchRealCommunityExample = []struct {
CommunityID string
- CommunityURL string // If set, takes precedence over CommunityID
- CommunityShard *shard.Shard // WARNING: I didn't test a sharded community
+ CommunityURL string // If set, takes precedence over CommunityID
+ CommunityShard *wakuv2.Shard // WARNING: I didn't test a sharded community
Fleet string
ClusterID uint16
UserPrivateKeyString string // When empty a new user will be created
@@ -863,14 +864,14 @@ var testFetchRealCommunityExample = []struct {
CommunityID: "0x03073514d4c14a7d10ae9fc9b0f05abc904d84166a6ac80add58bf6a3542a4e50a",
CommunityShard: nil,
Fleet: params.FleetStatusProd,
- ClusterID: shard.MainStatusShardCluster,
+ ClusterID: wakuv2.MainStatusShardCluster,
},
{
// Example 3,
// https://status.app/c/CxiACi8KFGFwIHJlcSAxIHN0dCBiZWMgbWVtEgdkc2Fkc2FkGAMiByM0MzYwREYqAxkrHAM=#zQ3shwDYZHtrLE7NqoTGjTWzWUu6hom5D4qxfskLZfgfyGRyL
CommunityID: "0x03f64be95ed5c925022265f9250f538f65ed3dcf6e4ef6c139803dc02a3487ae7b",
Fleet: params.FleetStatusProd,
- ClusterID: shard.MainStatusShardCluster,
+ ClusterID: wakuv2.MainStatusShardCluster,
CheckExpectedEnvelopes: true,
ExpectedShardEnvelopes: []string{
@@ -973,7 +974,7 @@ var testFetchRealCommunityExample = []struct {
//Example 1,
CommunityID: "0x02471dd922756a3a50b623e59cf3b99355d6587e43d5c517eb55f9aea9d3fe9fe9",
Fleet: params.FleetStatusProd,
- ClusterID: shard.MainStatusShardCluster,
+ ClusterID: wakuv2.MainStatusShardCluster,
CheckExpectedEnvelopes: true,
ExpectedShardEnvelopes: []string{
"0xc3e68e838d09e0117b3f3fd27aabe5f5a509d13e9045263c78e6890953d43547",
@@ -1013,7 +1014,7 @@ var testFetchRealCommunityExample = []struct {
ContractAddress: "0x21F6F5Cb75E81e5104D890D750270eD6538C50cb",
},
},
- ClusterID: shard.MainStatusShardCluster,
+ ClusterID: wakuv2.MainStatusShardCluster,
CheckExpectedEnvelopes: false,
CustomOptions: []StoreNodeRequestOption{
WithInitialPageSize(1),
diff --git a/protocol/messenger_testing_utils.go b/protocol/messenger_testing_utils.go
index b869c8618..70116351c 100644
--- a/protocol/messenger_testing_utils.go
+++ b/protocol/messenger_testing_utils.go
@@ -205,7 +205,7 @@ func WaitOnSignaledCommunityFound(m *Messenger, action func(), condition func(co
}
}
-func WaitForConnectionStatus(s *suite.Suite, waku *wakuv2.NWaku, action func() bool) {
+func WaitForConnectionStatus(s *suite.Suite, waku *wakuv2.Waku, action func() bool) {
subscription := waku.SubscribeToConnStatusChanges()
defer subscription.Unsubscribe()
@@ -237,7 +237,7 @@ func hasAllPeers(m map[peer.ID]types.WakuV2Peer, checkSlice peer.IDSlice) bool {
return true
}
-func WaitForPeersConnected(s *suite.Suite, waku *wakuv2.NWaku, action func() peer.IDSlice) {
+func WaitForPeersConnected(s *suite.Suite, waku *wakuv2.Waku, action func() peer.IDSlice) {
subscription := waku.SubscribeToConnStatusChanges()
defer subscription.Unsubscribe()
diff --git a/protocol/waku_builder_test.go b/protocol/waku_builder_test.go
index 7c8adab32..1134d5eec 100644
--- a/protocol/waku_builder_test.go
+++ b/protocol/waku_builder_test.go
@@ -12,7 +12,6 @@ import (
"github.com/status-im/status-go/appdatabase"
gethbridge "github.com/status-im/status-go/eth-node/bridge/geth"
"github.com/status-im/status-go/eth-node/types"
- "github.com/status-im/status-go/protocol/common/shard"
"github.com/status-im/status-go/t/helpers"
waku2 "github.com/status-im/status-go/wakuv2"
)
@@ -62,7 +61,7 @@ func NewTestWakuV2(s *suite.Suite, cfg testWakuV2Config) *waku2.Waku {
err = wakuNode.Start()
if cfg.enableStore {
- err := wakuNode.SubscribeToPubsubTopic(shard.DefaultNonProtectedPubsubTopic(), nil)
+ err := wakuNode.SubscribeToPubsubTopic(waku2.DefaultNonProtectedPubsubTopic(), nil)
s.Require().NoError(err)
}
s.Require().NoError(err)
@@ -78,7 +77,7 @@ func CreateWakuV2Network(s *suite.Suite, parentLogger *zap.Logger, nodeNames []s
nodes[i] = NewTestWakuV2(s, testWakuV2Config{
logger: parentLogger.Named("waku-" + name),
enableStore: false,
- clusterID: shard.MainStatusShardCluster,
+ clusterID: waku2.MainStatusShardCluster,
})
}
@@ -89,9 +88,10 @@ func CreateWakuV2Network(s *suite.Suite, parentLogger *zap.Logger, nodeNames []s
continue
}
- addrs := nodes[j].ListenAddresses()
+ addrs, err := nodes[j].ListenAddresses()
+ s.Require().NoError(err)
s.Require().Greater(len(addrs), 0)
- _, err := nodes[i].AddRelayPeer(addrs[0])
+ _, err = nodes[i].AddRelayPeer(addrs[0])
s.Require().NoError(err)
err = nodes[i].DialPeer(addrs[0])
s.Require().NoError(err)
diff --git a/services/ext/service.go b/services/ext/service.go
index c138d061c..95d1b49c7 100644
--- a/services/ext/service.go
+++ b/services/ext/service.go
@@ -123,7 +123,7 @@ func (s *Service) GetPeer(rawURL string) (*enode.Node, error) {
return enode.ParseV4(rawURL)
}
-func (s *Service) InitProtocol(nodeName string, identity *ecdsa.PrivateKey, appDb, walletDb *sql.DB, httpServer *server.MediaServer, multiAccountDb *multiaccounts.Database, acc *multiaccounts.Account, accountManager *account.GethManager, rpcClient *rpc.Client, walletService *wallet.Service, communityTokensService *communitytokens.Service, wakuService *wakuv2.NWaku, logger *zap.Logger) error {
+func (s *Service) InitProtocol(nodeName string, identity *ecdsa.PrivateKey, appDb, walletDb *sql.DB, httpServer *server.MediaServer, multiAccountDb *multiaccounts.Database, acc *multiaccounts.Account, accountManager *account.GethManager, rpcClient *rpc.Client, walletService *wallet.Service, communityTokensService *communitytokens.Service, wakuService *wakuv2.Waku, logger *zap.Logger) error {
var err error
if !s.config.ShhextConfig.PFSEnabled {
return nil
@@ -393,7 +393,7 @@ func buildMessengerOptions(
accountsDB *accounts.Database,
walletService *wallet.Service,
communityTokensService *communitytokens.Service,
- wakuService *wakuv2.NWaku,
+ wakuService *wakuv2.Waku,
logger *zap.Logger,
messengerSignalsHandler protocol.MessengerSignalsHandler,
accountManager account.Manager,
diff --git a/services/mailservers/api_test.go b/services/mailservers/api_test.go
index 503eaf26c..677dc5e2e 100644
--- a/services/mailservers/api_test.go
+++ b/services/mailservers/api_test.go
@@ -8,10 +8,10 @@ import (
"github.com/status-im/status-go/appdatabase"
"github.com/status-im/status-go/eth-node/types"
- "github.com/status-im/status-go/protocol/common/shard"
"github.com/status-im/status-go/protocol/sqlite"
"github.com/status-im/status-go/protocol/transport"
"github.com/status-im/status-go/t/helpers"
+ "github.com/status-im/status-go/wakuv2"
)
func setupTestDB(t *testing.T) (*Database, func()) {
@@ -62,9 +62,9 @@ func TestTopic(t *testing.T) {
defer close()
topicA := "0x61000000"
topicD := "0x64000000"
- topic1 := MailserverTopic{PubsubTopic: shard.DefaultShardPubsubTopic(), ContentTopic: topicA, LastRequest: 1}
- topic2 := MailserverTopic{PubsubTopic: shard.DefaultShardPubsubTopic(), ContentTopic: "0x6200000", LastRequest: 2}
- topic3 := MailserverTopic{PubsubTopic: shard.DefaultShardPubsubTopic(), ContentTopic: "0x6300000", LastRequest: 3}
+ topic1 := MailserverTopic{PubsubTopic: wakuv2.DefaultShardPubsubTopic(), ContentTopic: topicA, LastRequest: 1}
+ topic2 := MailserverTopic{PubsubTopic: wakuv2.DefaultShardPubsubTopic(), ContentTopic: "0x6200000", LastRequest: 2}
+ topic3 := MailserverTopic{PubsubTopic: wakuv2.DefaultShardPubsubTopic(), ContentTopic: "0x6300000", LastRequest: 3}
require.NoError(t, db.AddTopic(topic1))
require.NoError(t, db.AddTopic(topic2))
@@ -77,14 +77,14 @@ func TestTopic(t *testing.T) {
filters := []*transport.Filter{
// Existing topic, is not updated
{
- PubsubTopic: shard.DefaultShardPubsubTopic(),
+ PubsubTopic: wakuv2.DefaultShardPubsubTopic(),
ContentTopic: types.BytesToTopic([]byte{0x61}),
},
// Non existing topic is not inserted
{
Discovery: true,
Negotiated: true,
- PubsubTopic: shard.DefaultShardPubsubTopic(),
+ PubsubTopic: wakuv2.DefaultShardPubsubTopic(),
ContentTopic: types.BytesToTopic([]byte{0x64}),
},
}
@@ -160,7 +160,7 @@ func TestAddGetDeleteMailserverTopics(t *testing.T) {
defer close()
api := &API{db: db}
testTopic := MailserverTopic{
- PubsubTopic: shard.DefaultShardPubsubTopic(),
+ PubsubTopic: wakuv2.DefaultShardPubsubTopic(),
ContentTopic: "topic-001",
ChatIDs: []string{"chatID01", "chatID02"},
LastRequest: 10,
@@ -173,14 +173,14 @@ func TestAddGetDeleteMailserverTopics(t *testing.T) {
require.NoError(t, err)
require.EqualValues(t, []MailserverTopic{testTopic}, topics)
- err = api.DeleteMailserverTopic(context.Background(), shard.DefaultShardPubsubTopic(), testTopic.ContentTopic)
+ err = api.DeleteMailserverTopic(context.Background(), wakuv2.DefaultShardPubsubTopic(), testTopic.ContentTopic)
require.NoError(t, err)
topics, err = api.GetMailserverTopics(context.Background())
require.NoError(t, err)
require.EqualValues(t, ([]MailserverTopic)(nil), topics)
// Delete non-existing topic.
- err = api.DeleteMailserverTopic(context.Background(), shard.DefaultShardPubsubTopic(), "non-existing-topic")
+ err = api.DeleteMailserverTopic(context.Background(), wakuv2.DefaultShardPubsubTopic(), "non-existing-topic")
require.NoError(t, err)
}
diff --git a/third_party/nwaku b/third_party/nwaku
new file mode 160000
index 000000000..b358c90fa
--- /dev/null
+++ b/third_party/nwaku
@@ -0,0 +1 @@
+Subproject commit b358c90fa51d20957853e790aafc4e0987297ac7
diff --git a/wakuv2/api.go b/wakuv2/api.go
index 454a7fbab..f106b32f5 100644
--- a/wakuv2/api.go
+++ b/wakuv2/api.go
@@ -1,17 +1,17 @@
-// Copyright 2019 The NWaku Library Authors.
+// Copyright 2019 The Waku Library Authors.
//
-// The NWaku library is free software: you can redistribute it and/or modify
+// The Waku library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
-// The NWaku library is distributed in the hope that it will be useful,
+// The Waku library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty off
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
-// along with the NWaku library. If not, see .
+// along with the Waku library. If not, see .
//
// This software uses the go-ethereum library, which is licensed
// under the GNU Lesser General Public Library, version 3 or any later.
@@ -52,14 +52,14 @@ var (
// PublicWakuAPI provides the waku RPC service that can be
// use publicly without security implications.
type PublicWakuAPI struct {
- w *NWaku
+ w *Waku
mu sync.Mutex
lastUsed map[string]time.Time // keeps track when a filter was polled for the last time.
}
// NewPublicWakuAPI create a new RPC waku service.
-func NewPublicWakuAPI(w *NWaku) *PublicWakuAPI {
+func NewPublicWakuAPI(w *Waku) *PublicWakuAPI {
api := &PublicWakuAPI{
w: w,
lastUsed: make(map[string]time.Time),
@@ -185,7 +185,7 @@ type NewMessage struct {
Priority *int `json:"priority"`
}
-// Post posts a message on the NWaku network.
+// Post posts a message on the Waku network.
// returns the hash of the message in case of success.
func (api *PublicWakuAPI) Post(ctx context.Context, req NewMessage) (hexutil.Bytes, error) {
var (
@@ -252,7 +252,7 @@ func (api *PublicWakuAPI) Post(ctx context.Context, req NewMessage) (hexutil.Byt
Version: &version,
ContentTopic: req.ContentTopic.ContentTopic(),
Timestamp: proto.Int64(api.w.timestamp()),
- Meta: []byte{}, // TODO: empty for now. Once we use NWaku Archive v2, we should deprecate the timestamp and use an ULID here
+ Meta: []byte{}, // TODO: empty for now. Once we use Waku Archive v2, we should deprecate the timestamp and use an ULID here
Ephemeral: &req.Ephemeral,
}
diff --git a/wakuv2/gowaku.go b/wakuv2/gowaku.go
new file mode 100644
index 000000000..efb07be23
--- /dev/null
+++ b/wakuv2/gowaku.go
@@ -0,0 +1,1882 @@
+//go:build !use_nwaku
+// +build !use_nwaku
+
+// Copyright 2019 The Waku Library Authors.
+//
+// The Waku library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The Waku library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty off
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the Waku library. If not, see .
+//
+// This software uses the go-ethereum library, which is licensed
+// under the GNU Lesser General Public Library, version 3 or any later.
+
+package wakuv2
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "crypto/sha256"
+ "database/sql"
+ "errors"
+ "fmt"
+ "math"
+ "net"
+ "runtime"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/jellydator/ttlcache/v3"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/multiformats/go-multiaddr"
+
+ "go.uber.org/zap"
+
+ "golang.org/x/crypto/pbkdf2"
+ "golang.org/x/time/rate"
+
+ gethcommon "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/event"
+ "github.com/ethereum/go-ethereum/p2p"
+ "github.com/ethereum/go-ethereum/p2p/enode"
+ "github.com/ethereum/go-ethereum/rpc"
+
+ "github.com/libp2p/go-libp2p"
+ pubsub "github.com/libp2p/go-libp2p-pubsub"
+ "github.com/libp2p/go-libp2p/core/metrics"
+
+ filterapi "github.com/waku-org/go-waku/waku/v2/api/filter"
+ "github.com/waku-org/go-waku/waku/v2/api/missing"
+ "github.com/waku-org/go-waku/waku/v2/api/publish"
+ "github.com/waku-org/go-waku/waku/v2/dnsdisc"
+ "github.com/waku-org/go-waku/waku/v2/onlinechecker"
+ "github.com/waku-org/go-waku/waku/v2/peermanager"
+ wps "github.com/waku-org/go-waku/waku/v2/peerstore"
+ "github.com/waku-org/go-waku/waku/v2/protocol"
+ "github.com/waku-org/go-waku/waku/v2/protocol/filter"
+ "github.com/waku-org/go-waku/waku/v2/protocol/legacy_store"
+ "github.com/waku-org/go-waku/waku/v2/protocol/lightpush"
+ "github.com/waku-org/go-waku/waku/v2/protocol/peer_exchange"
+ "github.com/waku-org/go-waku/waku/v2/protocol/relay"
+ "github.com/waku-org/go-waku/waku/v2/protocol/store"
+ "github.com/waku-org/go-waku/waku/v2/utils"
+
+ "github.com/status-im/status-go/connection"
+ "github.com/status-im/status-go/eth-node/types"
+ "github.com/status-im/status-go/logutils"
+ "github.com/status-im/status-go/timesource"
+ "github.com/status-im/status-go/wakuv2/common"
+ "github.com/status-im/status-go/wakuv2/persistence"
+
+ node "github.com/waku-org/go-waku/waku/v2/node"
+ "github.com/waku-org/go-waku/waku/v2/protocol/pb"
+)
+
+const messageQueueLimit = 1024
+const requestTimeout = 30 * time.Second
+const bootnodesQueryBackoffMs = 200
+const bootnodesMaxRetries = 7
+const cacheTTL = 20 * time.Minute
+const maxRelayPeers = 300
+const randomPeersKeepAliveInterval = 5 * time.Second
+const allPeersKeepAliveInterval = 5 * time.Minute
+const peersToPublishForLightpush = 2
+const publishingLimiterRate = rate.Limit(2)
+const publishingLimitBurst = 4
+
+type SentEnvelope struct {
+ Envelope *protocol.Envelope
+ PublishMethod PublishMethod
+}
+
+type ErrorSendingEnvelope struct {
+ Error error
+ SentEnvelope SentEnvelope
+}
+
+type ITelemetryClient interface {
+ PushReceivedEnvelope(ctx context.Context, receivedEnvelope *protocol.Envelope)
+ PushSentEnvelope(ctx context.Context, sentEnvelope SentEnvelope)
+ PushErrorSendingEnvelope(ctx context.Context, errorSendingEnvelope ErrorSendingEnvelope)
+ PushPeerCount(ctx context.Context, peerCount int)
+ PushPeerConnFailures(ctx context.Context, peerConnFailures map[string]int)
+}
+
+// Waku represents a dark communication interface through the Ethereum
+// network, using its very own P2P communication layer.
+type Waku struct {
+ node *node.WakuNode // reference to a libp2p waku node
+ appDB *sql.DB
+
+ dnsAddressCache map[string][]dnsdisc.DiscoveredNode // Map to store the multiaddresses returned by dns discovery
+ dnsAddressCacheLock *sync.RWMutex // lock to handle access to the map
+
+ // Filter-related
+ filters *common.Filters // Message filters installed with Subscribe function
+ filterManager *filterapi.FilterManager
+
+ privateKeys map[string]*ecdsa.PrivateKey // Private key storage
+ symKeys map[string][]byte // Symmetric key storage
+ keyMu sync.RWMutex // Mutex associated with key stores
+
+ envelopeCache *ttlcache.Cache[gethcommon.Hash, *common.ReceivedMessage] // Pool of envelopes currently tracked by this node
+ poolMu sync.RWMutex // Mutex to sync the message and expiration pools
+
+ bandwidthCounter *metrics.BandwidthCounter
+
+ protectedTopicStore *persistence.ProtectedTopicsStore
+
+ sendQueue *publish.MessageQueue
+ limiter *publish.PublishRateLimiter
+
+ missingMsgVerifier *missing.MissingMessageVerifier
+
+ msgQueue chan *common.ReceivedMessage // Message queue for waku messages that havent been decoded
+
+ ctx context.Context
+ cancel context.CancelFunc
+ wg sync.WaitGroup
+
+ cfg *Config
+ options []node.WakuNodeOption
+
+ envelopeFeed event.Feed
+
+ storeMsgIDs map[gethcommon.Hash]bool // Map of the currently processing ids
+ storeMsgIDsMu sync.RWMutex
+
+ messageSentCheck *publish.MessageSentCheck
+
+ topicHealthStatusChan chan peermanager.TopicHealthStatus
+ connectionNotifChan chan node.PeerConnection
+ connStatusSubscriptions map[string]*types.ConnStatusSubscription
+ connStatusMu sync.Mutex
+ onlineChecker *onlinechecker.DefaultOnlineChecker
+ state connection.State
+
+ logger *zap.Logger
+
+ // NTP Synced timesource
+ timesource *timesource.NTPTimeSource
+
+ // seededBootnodesForDiscV5 indicates whether we manage to retrieve discovery
+ // bootnodes successfully
+ seededBootnodesForDiscV5 bool
+
+ // goingOnline is channel that notifies when connectivity has changed from offline to online
+ goingOnline chan struct{}
+
+ // discV5BootstrapNodes is the ENR to be used to fetch bootstrap nodes for discovery
+ discV5BootstrapNodes []string
+
+ onHistoricMessagesRequestFailed func([]byte, peer.ID, error)
+ onPeerStats func(types.ConnStatus)
+
+ statusTelemetryClient ITelemetryClient
+
+ defaultShardInfo protocol.RelayShards
+}
+
+func (w *Waku) SetStatusTelemetryClient(client ITelemetryClient) {
+ w.statusTelemetryClient = client
+}
+
+func newTTLCache() *ttlcache.Cache[gethcommon.Hash, *common.ReceivedMessage] {
+ cache := ttlcache.New[gethcommon.Hash, *common.ReceivedMessage](ttlcache.WithTTL[gethcommon.Hash, *common.ReceivedMessage](cacheTTL))
+ go cache.Start()
+ return cache
+}
+
+// New creates a WakuV2 client ready to communicate through the LibP2P network.
+func New(nodeKey *ecdsa.PrivateKey, fleet string, cfg *Config, logger *zap.Logger, appDB *sql.DB, ts *timesource.NTPTimeSource, onHistoricMessagesRequestFailed func([]byte, peer.ID, error), onPeerStats func(types.ConnStatus)) (*Waku, error) {
+ var err error
+ if logger == nil {
+ logger, err = zap.NewDevelopment()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if ts == nil {
+ ts = timesource.Default()
+ }
+
+ cfg = setDefaults(cfg)
+ if err = cfg.Validate(logger); err != nil {
+ return nil, err
+ }
+
+ logger.Info("starting wakuv2 with config", zap.Any("config", cfg))
+
+ ctx, cancel := context.WithCancel(context.Background())
+
+ waku := &Waku{
+ appDB: appDB,
+ cfg: cfg,
+ privateKeys: make(map[string]*ecdsa.PrivateKey),
+ symKeys: make(map[string][]byte),
+ envelopeCache: newTTLCache(),
+ msgQueue: make(chan *common.ReceivedMessage, messageQueueLimit),
+ topicHealthStatusChan: make(chan peermanager.TopicHealthStatus, 100),
+ connectionNotifChan: make(chan node.PeerConnection, 20),
+ connStatusSubscriptions: make(map[string]*types.ConnStatusSubscription),
+ ctx: ctx,
+ cancel: cancel,
+ wg: sync.WaitGroup{},
+ dnsAddressCache: make(map[string][]dnsdisc.DiscoveredNode),
+ dnsAddressCacheLock: &sync.RWMutex{},
+ storeMsgIDs: make(map[gethcommon.Hash]bool),
+ timesource: ts,
+ storeMsgIDsMu: sync.RWMutex{},
+ logger: logger,
+ discV5BootstrapNodes: cfg.DiscV5BootstrapNodes,
+ onHistoricMessagesRequestFailed: onHistoricMessagesRequestFailed,
+ onPeerStats: onPeerStats,
+ onlineChecker: onlinechecker.NewDefaultOnlineChecker(false).(*onlinechecker.DefaultOnlineChecker),
+ sendQueue: publish.NewMessageQueue(1000, cfg.UseThrottledPublish),
+ }
+
+ if !cfg.UseThrottledPublish || testing.Testing() {
+ // To avoid delaying the tests, or for when we dont want to rate limit, we set up an infinite rate limiter,
+ // basically disabling the rate limit functionality
+ waku.limiter = publish.NewPublishRateLimiter(rate.Inf, 1)
+
+ } else {
+ waku.limiter = publish.NewPublishRateLimiter(publishingLimiterRate, publishingLimitBurst)
+ }
+
+ waku.filters = common.NewFilters(waku.cfg.DefaultShardPubsubTopic, waku.logger)
+ waku.bandwidthCounter = metrics.NewBandwidthCounter()
+
+ if nodeKey == nil {
+ // No nodekey is provided, create an ephemeral key
+ nodeKey, err = crypto.GenerateKey()
+ if err != nil {
+ return nil, fmt.Errorf("failed to generate a random go-waku private key: %v", err)
+ }
+ }
+
+ hostAddr, err := net.ResolveTCPAddr("tcp", fmt.Sprint(cfg.Host, ":", cfg.Port))
+ if err != nil {
+ return nil, fmt.Errorf("failed to setup the network interface: %v", err)
+ }
+
+ libp2pOpts := node.DefaultLibP2POptions
+ libp2pOpts = append(libp2pOpts, libp2p.BandwidthReporter(waku.bandwidthCounter))
+ libp2pOpts = append(libp2pOpts, libp2p.NATPortMap())
+
+ opts := []node.WakuNodeOption{
+ node.WithLibP2POptions(libp2pOpts...),
+ node.WithPrivateKey(nodeKey),
+ node.WithHostAddress(hostAddr),
+ node.WithConnectionNotification(waku.connectionNotifChan),
+ node.WithTopicHealthStatusChannel(waku.topicHealthStatusChan),
+ node.WithKeepAlive(randomPeersKeepAliveInterval, allPeersKeepAliveInterval),
+ node.WithLogger(logger),
+ node.WithLogLevel(logger.Level()),
+ node.WithClusterID(cfg.ClusterID),
+ node.WithMaxMsgSize(1024 * 1024),
+ }
+
+ if cfg.EnableDiscV5 {
+ bootnodes, err := waku.getDiscV5BootstrapNodes(waku.ctx, cfg.DiscV5BootstrapNodes)
+ if err != nil {
+ logger.Error("failed to get bootstrap nodes", zap.Error(err))
+ return nil, err
+ }
+ opts = append(opts, node.WithDiscoveryV5(uint(cfg.UDPPort), bootnodes, cfg.AutoUpdate))
+ }
+ shards, err := protocol.TopicsToRelayShards(cfg.DefaultShardPubsubTopic)
+ if err != nil {
+ logger.Error("FATAL ERROR: failed to parse relay shards", zap.Error(err))
+ return nil, errors.New("failed to parse relay shard, invalid pubsubTopic configuration")
+ }
+ if len(shards) == 0 { //Hack so that tests don't fail. TODO: Need to remove this once tests are changed to use proper cluster and shard.
+ shardInfo := protocol.RelayShards{ClusterID: 0, ShardIDs: []uint16{0}}
+ shards = append(shards, shardInfo)
+ }
+ waku.defaultShardInfo = shards[0]
+ if cfg.LightClient {
+ opts = append(opts, node.WithWakuFilterLightNode())
+ waku.defaultShardInfo = shards[0]
+ opts = append(opts, node.WithMaxPeerConnections(cfg.DiscoveryLimit))
+ cfg.EnableStoreConfirmationForMessagesSent = false
+ //TODO: temporary work-around to improve lightClient connectivity, need to be removed once community sharding is implemented
+ opts = append(opts, node.WithPubSubTopics(cfg.DefaultShardedPubsubTopics))
+ } else {
+ relayOpts := []pubsub.Option{
+ pubsub.WithMaxMessageSize(int(waku.cfg.MaxMessageSize)),
+ }
+
+ if waku.logger.Level() == zap.DebugLevel {
+ relayOpts = append(relayOpts, pubsub.WithEventTracer(waku))
+ }
+
+ opts = append(opts, node.WithWakuRelayAndMinPeers(waku.cfg.MinPeersForRelay, relayOpts...))
+ opts = append(opts, node.WithMaxPeerConnections(maxRelayPeers))
+ cfg.EnablePeerExchangeClient = true //Enabling this until discv5 issues are resolved. This will enable more peers to be connected for relay mesh.
+ cfg.EnableStoreConfirmationForMessagesSent = true
+ }
+
+ if cfg.EnableStore {
+ if appDB == nil {
+ return nil, errors.New("appDB is required for store")
+ }
+ opts = append(opts, node.WithWakuStore())
+ dbStore, err := persistence.NewDBStore(logger, persistence.WithDB(appDB), persistence.WithRetentionPolicy(cfg.StoreCapacity, time.Duration(cfg.StoreSeconds)*time.Second))
+ if err != nil {
+ return nil, err
+ }
+ opts = append(opts, node.WithMessageProvider(dbStore))
+ }
+
+ if !cfg.LightClient {
+ opts = append(opts, node.WithWakuFilterFullNode(filter.WithMaxSubscribers(20)))
+ opts = append(opts, node.WithLightPush(lightpush.WithRateLimiter(1, 1)))
+ }
+
+ if appDB != nil {
+ waku.protectedTopicStore, err = persistence.NewProtectedTopicsStore(logger, appDB)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if cfg.EnablePeerExchangeServer {
+ opts = append(opts, node.WithPeerExchange(peer_exchange.WithRateLimiter(1, 1)))
+ }
+
+ waku.options = opts
+ waku.logger.Info("setup the go-waku node successfully")
+
+ return waku, nil
+}
+
+func (w *Waku) SubscribeToConnStatusChanges() *types.ConnStatusSubscription {
+ w.connStatusMu.Lock()
+ defer w.connStatusMu.Unlock()
+ subscription := types.NewConnStatusSubscription()
+ w.connStatusSubscriptions[subscription.ID] = subscription
+ return subscription
+}
+
+func (w *Waku) GetNodeENRString() (string, error) {
+ if w.node == nil {
+ return "", errors.New("node not initialized")
+ }
+ return w.node.ENR().String(), nil
+}
+
+func (w *Waku) getDiscV5BootstrapNodes(ctx context.Context, addresses []string) ([]*enode.Node, error) {
+ wg := sync.WaitGroup{}
+ mu := sync.Mutex{}
+ var result []*enode.Node
+
+ w.seededBootnodesForDiscV5 = true
+
+ retrieveENR := func(d dnsdisc.DiscoveredNode, wg *sync.WaitGroup) {
+ mu.Lock()
+ defer mu.Unlock()
+ defer wg.Done()
+ if d.ENR != nil {
+ result = append(result, d.ENR)
+ }
+ }
+
+ for _, addrString := range addresses {
+ if addrString == "" {
+ continue
+ }
+
+ if strings.HasPrefix(addrString, "enrtree://") {
+ // Use DNS Discovery
+ wg.Add(1)
+ go func(addr string) {
+ defer wg.Done()
+ if err := w.dnsDiscover(ctx, addr, retrieveENR); err != nil {
+ mu.Lock()
+ w.seededBootnodesForDiscV5 = false
+ mu.Unlock()
+ }
+ }(addrString)
+ } else {
+ // It's a normal enr
+ bootnode, err := enode.Parse(enode.ValidSchemes, addrString)
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, bootnode)
+ }
+ }
+ wg.Wait()
+
+ return result, nil
+}
+
+type fnApplyToEachPeer func(d dnsdisc.DiscoveredNode, wg *sync.WaitGroup)
+
+func (w *Waku) dnsDiscover(ctx context.Context, enrtreeAddress string, apply fnApplyToEachPeer) error {
+ w.logger.Info("retrieving nodes", zap.String("enr", enrtreeAddress))
+ ctx, cancel := context.WithTimeout(ctx, requestTimeout)
+ defer cancel()
+
+ w.dnsAddressCacheLock.Lock()
+ defer w.dnsAddressCacheLock.Unlock()
+
+ discNodes, ok := w.dnsAddressCache[enrtreeAddress]
+ if !ok {
+ nameserver := w.cfg.Nameserver
+ resolver := w.cfg.Resolver
+
+ var opts []dnsdisc.DNSDiscoveryOption
+ if nameserver != "" {
+ opts = append(opts, dnsdisc.WithNameserver(nameserver))
+ }
+ if resolver != nil {
+ opts = append(opts, dnsdisc.WithResolver(resolver))
+ }
+
+ discoveredNodes, err := dnsdisc.RetrieveNodes(ctx, enrtreeAddress, opts...)
+ if err != nil {
+ w.logger.Warn("dns discovery error ", zap.Error(err))
+ return err
+ }
+
+ if len(discoveredNodes) != 0 {
+ w.dnsAddressCache[enrtreeAddress] = append(w.dnsAddressCache[enrtreeAddress], discoveredNodes...)
+ discNodes = w.dnsAddressCache[enrtreeAddress]
+ }
+ }
+
+ wg := &sync.WaitGroup{}
+ wg.Add(len(discNodes))
+ for _, d := range discNodes {
+ apply(d, wg)
+ }
+ wg.Wait()
+
+ return nil
+}
+
+func (w *Waku) discoverAndConnectPeers() {
+ fnApply := func(d dnsdisc.DiscoveredNode, wg *sync.WaitGroup) {
+ defer wg.Done()
+ if len(d.PeerInfo.Addrs) != 0 {
+ go w.connect(d.PeerInfo, d.ENR, wps.DNSDiscovery)
+ }
+ }
+
+ for _, addrString := range w.cfg.WakuNodes {
+ addrString := addrString
+ if strings.HasPrefix(addrString, "enrtree://") {
+ // Use DNS Discovery
+ go func() {
+ if err := w.dnsDiscover(w.ctx, addrString, fnApply); err != nil {
+ w.logger.Error("could not obtain dns discovery peers for ClusterConfig.WakuNodes", zap.Error(err), zap.String("dnsDiscURL", addrString))
+ }
+ }()
+ } else {
+ // It is a normal multiaddress
+ addr, err := multiaddr.NewMultiaddr(addrString)
+ if err != nil {
+ w.logger.Warn("invalid peer multiaddress", zap.String("ma", addrString), zap.Error(err))
+ continue
+ }
+
+ peerInfo, err := peer.AddrInfoFromP2pAddr(addr)
+ if err != nil {
+ w.logger.Warn("invalid peer multiaddress", zap.Stringer("addr", addr), zap.Error(err))
+ continue
+ }
+
+ go w.connect(*peerInfo, nil, wps.Static)
+ }
+ }
+}
+
+func (w *Waku) connect(peerInfo peer.AddrInfo, enr *enode.Node, origin wps.Origin) {
+ // Connection will be prunned eventually by the connection manager if needed
+ // The peer connector in go-waku uses Connect, so it will execute identify as part of its
+ w.node.AddDiscoveredPeer(peerInfo.ID, peerInfo.Addrs, origin, w.cfg.DefaultShardedPubsubTopics, enr, true)
+}
+
+func (w *Waku) telemetryBandwidthStats(telemetryServerURL string) {
+ w.wg.Add(1)
+ defer w.wg.Done()
+
+ if telemetryServerURL == "" {
+ return
+ }
+
+ telemetry := NewBandwidthTelemetryClient(w.logger, telemetryServerURL)
+
+ ticker := time.NewTicker(time.Second * 20)
+ defer ticker.Stop()
+
+ today := time.Now()
+
+ for {
+ select {
+ case <-w.ctx.Done():
+ return
+ case now := <-ticker.C:
+ // Reset totals when day changes
+ if now.Day() != today.Day() {
+ today = now
+ w.bandwidthCounter.Reset()
+ }
+
+ go telemetry.PushProtocolStats(w.bandwidthCounter.GetBandwidthByProtocol())
+ }
+ }
+}
+
+func (w *Waku) GetStats() types.StatsSummary {
+ stats := w.bandwidthCounter.GetBandwidthTotals()
+ return types.StatsSummary{
+ UploadRate: uint64(stats.RateOut),
+ DownloadRate: uint64(stats.RateIn),
+ }
+}
+
+func (w *Waku) runPeerExchangeLoop() {
+ w.wg.Add(1)
+ defer w.wg.Done()
+
+ if !w.cfg.EnablePeerExchangeClient {
+ // Currently peer exchange client is only used for light nodes
+ return
+ }
+
+ ticker := time.NewTicker(time.Second * 5)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-w.ctx.Done():
+ w.logger.Debug("Peer exchange loop stopped")
+ return
+ case <-ticker.C:
+ w.logger.Info("Running peer exchange loop")
+
+ // We select only the nodes discovered via DNS Discovery that support peer exchange
+ // We assume that those peers are running peer exchange according to infra config,
+ // If not, the peer selection process in go-waku will filter them out anyway
+ w.dnsAddressCacheLock.RLock()
+ var peers peer.IDSlice
+ for _, record := range w.dnsAddressCache {
+ for _, discoveredNode := range record {
+ if len(discoveredNode.PeerInfo.Addrs) == 0 {
+ continue
+ }
+ // Attempt to connect to the peers.
+ // Peers will be added to the libp2p peer store thanks to identify
+ go w.connect(discoveredNode.PeerInfo, discoveredNode.ENR, wps.DNSDiscovery)
+ peers = append(peers, discoveredNode.PeerID)
+ }
+ }
+ w.dnsAddressCacheLock.RUnlock()
+
+ if len(peers) != 0 {
+ err := w.node.PeerExchange().Request(w.ctx, w.cfg.DiscoveryLimit, peer_exchange.WithAutomaticPeerSelection(peers...),
+ peer_exchange.FilterByShard(int(w.defaultShardInfo.ClusterID), int(w.defaultShardInfo.ShardIDs[0])))
+ if err != nil {
+ w.logger.Error("couldnt request peers via peer exchange", zap.Error(err))
+ }
+ }
+ }
+ }
+}
+
+func (w *Waku) GetPubsubTopic(topic string) string {
+ if topic == "" {
+ topic = w.cfg.DefaultShardPubsubTopic
+ }
+
+ return topic
+}
+
+func (w *Waku) unsubscribeFromPubsubTopicWithWakuRelay(topic string) error {
+ topic = w.GetPubsubTopic(topic)
+
+ if !w.node.Relay().IsSubscribed(topic) {
+ return nil
+ }
+
+ contentFilter := protocol.NewContentFilter(topic)
+
+ return w.node.Relay().Unsubscribe(w.ctx, contentFilter)
+}
+
+func (w *Waku) subscribeToPubsubTopicWithWakuRelay(topic string, pubkey *ecdsa.PublicKey) error {
+ if w.cfg.LightClient {
+ return errors.New("only available for full nodes")
+ }
+
+ topic = w.GetPubsubTopic(topic)
+
+ if w.node.Relay().IsSubscribed(topic) {
+ return nil
+ }
+
+ if pubkey != nil {
+ err := w.node.Relay().AddSignedTopicValidator(topic, pubkey)
+ if err != nil {
+ return err
+ }
+ }
+
+ contentFilter := protocol.NewContentFilter(topic)
+
+ sub, err := w.node.Relay().Subscribe(w.ctx, contentFilter)
+ if err != nil {
+ return err
+ }
+
+ w.wg.Add(1)
+ go func() {
+ defer w.wg.Done()
+ for {
+ select {
+ case <-w.ctx.Done():
+ err := w.node.Relay().Unsubscribe(w.ctx, contentFilter)
+ if err != nil && !errors.Is(err, context.Canceled) {
+ w.logger.Error("could not unsubscribe", zap.Error(err))
+ }
+ return
+ case env := <-sub[0].Ch:
+ err := w.OnNewEnvelopes(env, common.RelayedMessageType, false)
+ if err != nil {
+ w.logger.Error("OnNewEnvelopes error", zap.Error(err))
+ }
+ }
+ }
+ }()
+
+ return nil
+}
+
+// MaxMessageSize returns the maximum accepted message size.
+func (w *Waku) MaxMessageSize() uint32 {
+ return w.cfg.MaxMessageSize
+}
+
+// CurrentTime returns current time.
+func (w *Waku) CurrentTime() time.Time {
+ return w.timesource.Now()
+}
+
+// APIs returns the RPC descriptors the Waku implementation offers
+func (w *Waku) APIs() []rpc.API {
+ return []rpc.API{
+ {
+ Namespace: Name,
+ Version: VersionStr,
+ Service: NewPublicWakuAPI(w),
+ Public: false,
+ },
+ }
+}
+
+// Protocols returns the waku sub-protocols ran by this particular client.
+func (w *Waku) Protocols() []p2p.Protocol {
+ return []p2p.Protocol{}
+}
+
+func (w *Waku) SendEnvelopeEvent(event common.EnvelopeEvent) int {
+ return w.envelopeFeed.Send(event)
+}
+
+// SubscribeEnvelopeEvents subscribes to envelopes feed.
+// In order to prevent blocking waku producers events must be amply buffered.
+func (w *Waku) SubscribeEnvelopeEvents(events chan<- common.EnvelopeEvent) event.Subscription {
+ return w.envelopeFeed.Subscribe(events)
+}
+
+// NewKeyPair generates a new cryptographic identity for the client, and injects
+// it into the known identities for message decryption. Returns ID of the new key pair.
+func (w *Waku) NewKeyPair() (string, error) {
+ key, err := crypto.GenerateKey()
+ if err != nil || !validatePrivateKey(key) {
+ key, err = crypto.GenerateKey() // retry once
+ }
+ if err != nil {
+ return "", err
+ }
+ if !validatePrivateKey(key) {
+ return "", fmt.Errorf("failed to generate valid key")
+ }
+
+ id, err := toDeterministicID(hexutil.Encode(crypto.FromECDSAPub(&key.PublicKey)), common.KeyIDSize)
+ if err != nil {
+ return "", err
+ }
+
+ w.keyMu.Lock()
+ defer w.keyMu.Unlock()
+
+ if w.privateKeys[id] != nil {
+ return "", fmt.Errorf("failed to generate unique ID")
+ }
+ w.privateKeys[id] = key
+ return id, nil
+}
+
+// DeleteKeyPair deletes the specified key if it exists.
+func (w *Waku) DeleteKeyPair(key string) bool {
+ deterministicID, err := toDeterministicID(key, common.KeyIDSize)
+ if err != nil {
+ return false
+ }
+
+ w.keyMu.Lock()
+ defer w.keyMu.Unlock()
+
+ if w.privateKeys[deterministicID] != nil {
+ delete(w.privateKeys, deterministicID)
+ return true
+ }
+ return false
+}
+
+// AddKeyPair imports a asymmetric private key and returns it identifier.
+func (w *Waku) AddKeyPair(key *ecdsa.PrivateKey) (string, error) {
+ id, err := makeDeterministicID(hexutil.Encode(crypto.FromECDSAPub(&key.PublicKey)), common.KeyIDSize)
+ if err != nil {
+ return "", err
+ }
+ if w.HasKeyPair(id) {
+ return id, nil // no need to re-inject
+ }
+
+ w.keyMu.Lock()
+ w.privateKeys[id] = key
+ w.keyMu.Unlock()
+
+ return id, nil
+}
+
+// SelectKeyPair adds cryptographic identity, and makes sure
+// that it is the only private key known to the node.
+func (w *Waku) SelectKeyPair(key *ecdsa.PrivateKey) error {
+ id, err := makeDeterministicID(hexutil.Encode(crypto.FromECDSAPub(&key.PublicKey)), common.KeyIDSize)
+ if err != nil {
+ return err
+ }
+
+ w.keyMu.Lock()
+ defer w.keyMu.Unlock()
+
+ w.privateKeys = make(map[string]*ecdsa.PrivateKey) // reset key store
+ w.privateKeys[id] = key
+
+ return nil
+}
+
+// DeleteKeyPairs removes all cryptographic identities known to the node
+func (w *Waku) DeleteKeyPairs() error {
+ w.keyMu.Lock()
+ defer w.keyMu.Unlock()
+
+ w.privateKeys = make(map[string]*ecdsa.PrivateKey)
+
+ return nil
+}
+
+// HasKeyPair checks if the waku node is configured with the private key
+// of the specified public pair.
+func (w *Waku) HasKeyPair(id string) bool {
+ deterministicID, err := toDeterministicID(id, common.KeyIDSize)
+ if err != nil {
+ return false
+ }
+
+ w.keyMu.RLock()
+ defer w.keyMu.RUnlock()
+ return w.privateKeys[deterministicID] != nil
+}
+
+// GetPrivateKey retrieves the private key of the specified identity.
+func (w *Waku) GetPrivateKey(id string) (*ecdsa.PrivateKey, error) {
+ deterministicID, err := toDeterministicID(id, common.KeyIDSize)
+ if err != nil {
+ return nil, err
+ }
+
+ w.keyMu.RLock()
+ defer w.keyMu.RUnlock()
+ key := w.privateKeys[deterministicID]
+ if key == nil {
+ return nil, fmt.Errorf("invalid id")
+ }
+ return key, nil
+}
+
+// GenerateSymKey generates a random symmetric key and stores it under id,
+// which is then returned. Will be used in the future for session key exchange.
+func (w *Waku) GenerateSymKey() (string, error) {
+ key, err := common.GenerateSecureRandomData(common.AESKeyLength)
+ if err != nil {
+ return "", err
+ } else if !common.ValidateDataIntegrity(key, common.AESKeyLength) {
+ return "", fmt.Errorf("error in GenerateSymKey: crypto/rand failed to generate random data")
+ }
+
+ id, err := common.GenerateRandomID()
+ if err != nil {
+ return "", fmt.Errorf("failed to generate ID: %s", err)
+ }
+
+ w.keyMu.Lock()
+ defer w.keyMu.Unlock()
+
+ if w.symKeys[id] != nil {
+ return "", fmt.Errorf("failed to generate unique ID")
+ }
+ w.symKeys[id] = key
+ return id, nil
+}
+
+// AddSymKey stores the key with a given id.
+func (w *Waku) AddSymKey(id string, key []byte) (string, error) {
+ deterministicID, err := toDeterministicID(id, common.KeyIDSize)
+ if err != nil {
+ return "", err
+ }
+
+ w.keyMu.Lock()
+ defer w.keyMu.Unlock()
+
+ if w.symKeys[deterministicID] != nil {
+ return "", fmt.Errorf("key already exists: %v", id)
+ }
+ w.symKeys[deterministicID] = key
+ return deterministicID, nil
+}
+
+// AddSymKeyDirect stores the key, and returns its id.
+func (w *Waku) AddSymKeyDirect(key []byte) (string, error) {
+ if len(key) != common.AESKeyLength {
+ return "", fmt.Errorf("wrong key size: %d", len(key))
+ }
+
+ id, err := common.GenerateRandomID()
+ if err != nil {
+ return "", fmt.Errorf("failed to generate ID: %s", err)
+ }
+
+ w.keyMu.Lock()
+ defer w.keyMu.Unlock()
+
+ if w.symKeys[id] != nil {
+ return "", fmt.Errorf("failed to generate unique ID")
+ }
+ w.symKeys[id] = key
+ return id, nil
+}
+
+// AddSymKeyFromPassword generates the key from password, stores it, and returns its id.
+func (w *Waku) AddSymKeyFromPassword(password string) (string, error) {
+ id, err := common.GenerateRandomID()
+ if err != nil {
+ return "", fmt.Errorf("failed to generate ID: %s", err)
+ }
+ if w.HasSymKey(id) {
+ return "", fmt.Errorf("failed to generate unique ID")
+ }
+
+ // kdf should run no less than 0.1 seconds on an average computer,
+ // because it's an once in a session experience
+ derived := pbkdf2.Key([]byte(password), nil, 65356, common.AESKeyLength, sha256.New)
+
+ w.keyMu.Lock()
+ defer w.keyMu.Unlock()
+
+ // double check is necessary, because deriveKeyMaterial() is very slow
+ if w.symKeys[id] != nil {
+ return "", fmt.Errorf("critical error: failed to generate unique ID")
+ }
+ w.symKeys[id] = derived
+ return id, nil
+}
+
+// HasSymKey returns true if there is a key associated with the given id.
+// Otherwise returns false.
+func (w *Waku) HasSymKey(id string) bool {
+ w.keyMu.RLock()
+ defer w.keyMu.RUnlock()
+ return w.symKeys[id] != nil
+}
+
+// DeleteSymKey deletes the key associated with the name string if it exists.
+func (w *Waku) DeleteSymKey(id string) bool {
+ w.keyMu.Lock()
+ defer w.keyMu.Unlock()
+ if w.symKeys[id] != nil {
+ delete(w.symKeys, id)
+ return true
+ }
+ return false
+}
+
+// GetSymKey returns the symmetric key associated with the given id.
+func (w *Waku) GetSymKey(id string) ([]byte, error) {
+ w.keyMu.RLock()
+ defer w.keyMu.RUnlock()
+ if w.symKeys[id] != nil {
+ return w.symKeys[id], nil
+ }
+ return nil, fmt.Errorf("non-existent key ID")
+}
+
+// Subscribe installs a new message handler used for filtering, decrypting
+// and subsequent storing of incoming messages.
+func (w *Waku) Subscribe(f *common.Filter) (string, error) {
+ f.PubsubTopic = w.GetPubsubTopic(f.PubsubTopic)
+ id, err := w.filters.Install(f)
+ if err != nil {
+ return id, err
+ }
+
+ if w.cfg.LightClient {
+ cf := protocol.NewContentFilter(f.PubsubTopic, f.ContentTopics.ContentTopics()...)
+ w.filterManager.SubscribeFilter(id, cf)
+ }
+
+ return id, nil
+}
+
+// Unsubscribe removes an installed message handler.
+func (w *Waku) Unsubscribe(ctx context.Context, id string) error {
+ ok := w.filters.Uninstall(id)
+ if !ok {
+ return fmt.Errorf("failed to unsubscribe: invalid ID '%s'", id)
+ }
+
+ if w.cfg.LightClient {
+ w.filterManager.UnsubscribeFilter(id)
+ }
+
+ return nil
+}
+
+// GetFilter returns the filter by id.
+func (w *Waku) GetFilter(id string) *common.Filter {
+ return w.filters.Get(id)
+}
+
+// Unsubscribe removes an installed message handler.
+func (w *Waku) UnsubscribeMany(ids []string) error {
+ for _, id := range ids {
+ w.logger.Info("cleaning up filter", zap.String("id", id))
+ ok := w.filters.Uninstall(id)
+ if !ok {
+ w.logger.Warn("could not remove filter with id", zap.String("id", id))
+ }
+ }
+ return nil
+}
+
+func (w *Waku) SkipPublishToTopic(value bool) {
+ w.cfg.SkipPublishToTopic = value
+}
+
+func (w *Waku) ConfirmMessageDelivered(hashes []gethcommon.Hash) {
+ if !w.cfg.EnableStoreConfirmationForMessagesSent {
+ return
+ }
+ w.messageSentCheck.DeleteByMessageIDs(hashes)
+}
+
+func (w *Waku) SetStorePeerID(peerID peer.ID) {
+ if w.messageSentCheck != nil {
+ w.messageSentCheck.SetStorePeerID(peerID)
+ }
+}
+
+func (w *Waku) Query(ctx context.Context, peerID peer.ID, query store.FilterCriteria, cursor []byte, opts []store.RequestOption, processEnvelopes bool) ([]byte, int, error) {
+ requestID := protocol.GenerateRequestID()
+
+ opts = append(opts,
+ store.WithRequestID(requestID),
+ store.WithPeer(peerID),
+ store.WithCursor(cursor))
+
+ logger := w.logger.With(zap.String("requestID", hexutil.Encode(requestID)), zap.Stringer("peerID", peerID))
+
+ logger.Debug("store.query",
+ logutils.WakuMessageTimestamp("startTime", query.TimeStart),
+ logutils.WakuMessageTimestamp("endTime", query.TimeEnd),
+ zap.Strings("contentTopics", query.ContentTopics.ToList()),
+ zap.String("pubsubTopic", query.PubsubTopic),
+ zap.String("cursor", hexutil.Encode(cursor)),
+ )
+
+ queryStart := time.Now()
+ result, err := w.node.Store().Query(ctx, query, opts...)
+ queryDuration := time.Since(queryStart)
+ if err != nil {
+ logger.Error("error querying storenode", zap.Error(err))
+
+ if w.onHistoricMessagesRequestFailed != nil {
+ w.onHistoricMessagesRequestFailed(requestID, peerID, err)
+ }
+ return nil, 0, err
+ }
+
+ messages := result.Messages()
+ envelopesCount := len(messages)
+ w.logger.Debug("store.query response", zap.Duration("queryDuration", queryDuration), zap.Int("numMessages", envelopesCount), zap.Bool("hasCursor", result.IsComplete() && result.Cursor() != nil))
+ for _, mkv := range messages {
+ msg := mkv.Message
+
+ // Temporarily setting RateLimitProof to nil so it matches the WakuMessage protobuffer we are sending
+ // See https://github.com/vacp2p/rfc/issues/563
+ mkv.Message.RateLimitProof = nil
+
+ envelope := protocol.NewEnvelope(msg, msg.GetTimestamp(), query.PubsubTopic)
+
+ err = w.OnNewEnvelopes(envelope, common.StoreMessageType, processEnvelopes)
+ if err != nil {
+ return nil, 0, err
+ }
+ }
+
+ return result.Cursor(), envelopesCount, nil
+}
+
+// OnNewEnvelope is an interface from Waku FilterManager API that gets invoked when any new message is received by Filter.
+func (w *Waku) OnNewEnvelope(env *protocol.Envelope) error {
+ return w.OnNewEnvelopes(env, common.RelayedMessageType, false)
+}
+
+// Start implements node.Service, starting the background data propagation thread
+// of the Waku protocol.
+func (w *Waku) Start() error {
+ if w.ctx == nil {
+ w.ctx, w.cancel = context.WithCancel(context.Background())
+ }
+
+ var err error
+ if w.node, err = node.New(w.options...); err != nil {
+ return fmt.Errorf("failed to create a go-waku node: %v", err)
+ }
+
+ w.goingOnline = make(chan struct{})
+
+ if err = w.node.Start(w.ctx); err != nil {
+ return fmt.Errorf("failed to start go-waku node: %v", err)
+ }
+
+ w.logger.Info("WakuV2 PeerID", zap.Stringer("id", w.node.Host().ID()))
+
+ w.discoverAndConnectPeers()
+
+ if w.cfg.EnableDiscV5 {
+ err := w.node.DiscV5().Start(w.ctx)
+ if err != nil {
+ return err
+ }
+ }
+
+ w.wg.Add(1)
+ go func() {
+ defer w.wg.Done()
+ ticker := time.NewTicker(5 * time.Second)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-w.ctx.Done():
+ return
+ case <-ticker.C:
+ w.checkForConnectionChanges()
+ case <-w.topicHealthStatusChan:
+ // TODO: https://github.com/status-im/status-go/issues/4628
+ case <-w.connectionNotifChan:
+ w.checkForConnectionChanges()
+ }
+ }
+ }()
+
+ go w.telemetryBandwidthStats(w.cfg.TelemetryServerURL)
+ //TODO: commenting for now so that only fleet nodes are used.
+ //Need to uncomment once filter peer scoring etc is implemented.
+ go w.runPeerExchangeLoop()
+
+ if w.cfg.EnableMissingMessageVerification {
+
+ w.missingMsgVerifier = missing.NewMissingMessageVerifier(
+ w.node.Store(),
+ w,
+ w.node.Timesource(),
+ w.logger)
+
+ w.missingMsgVerifier.Start(w.ctx)
+
+ w.wg.Add(1)
+ go func() {
+ w.wg.Done()
+ for {
+ select {
+ case <-w.ctx.Done():
+ return
+ case envelope := <-w.missingMsgVerifier.C:
+ err = w.OnNewEnvelopes(envelope, common.MissingMessageType, false)
+ if err != nil {
+ w.logger.Error("OnNewEnvelopes error", zap.Error(err))
+ }
+ }
+ }
+ }()
+ }
+
+ if w.cfg.LightClient {
+ // Create FilterManager that will main peer connectivity
+ // for installed filters
+ w.filterManager = filterapi.NewFilterManager(w.ctx, w.logger, w.cfg.MinPeersForFilter,
+ w,
+ w.node.FilterLightnode())
+ }
+
+ err = w.setupRelaySubscriptions()
+ if err != nil {
+ return err
+ }
+
+ numCPU := runtime.NumCPU()
+ for i := 0; i < numCPU; i++ {
+ go w.processQueueLoop()
+ }
+
+ go w.broadcast()
+
+ go w.sendQueue.Start(w.ctx)
+
+ if w.cfg.EnableStoreConfirmationForMessagesSent {
+ w.confirmMessagesSent()
+ }
+
+ // we should wait `seedBootnodesForDiscV5` shutdown smoothly before set w.ctx to nil within `w.Stop()`
+ go w.seedBootnodesForDiscV5()
+
+ return nil
+}
+
+func (w *Waku) checkForConnectionChanges() {
+
+ isOnline := len(w.node.Host().Network().Peers()) > 0
+
+ w.connStatusMu.Lock()
+
+ latestConnStatus := types.ConnStatus{
+ IsOnline: isOnline,
+ Peers: FormatPeerStats(w.node),
+ }
+
+ w.logger.Debug("peer stats",
+ zap.Int("peersCount", len(latestConnStatus.Peers)),
+ zap.Any("stats", latestConnStatus))
+ for k, subs := range w.connStatusSubscriptions {
+ if !subs.Send(latestConnStatus) {
+ delete(w.connStatusSubscriptions, k)
+ }
+ }
+
+ w.connStatusMu.Unlock()
+
+ if w.onPeerStats != nil {
+ w.onPeerStats(latestConnStatus)
+ }
+
+ if w.statusTelemetryClient != nil {
+ connFailures := FormatPeerConnFailures(w.node)
+ w.statusTelemetryClient.PushPeerCount(w.ctx, w.PeerCount())
+ w.statusTelemetryClient.PushPeerConnFailures(w.ctx, connFailures)
+ }
+
+ w.ConnectionChanged(connection.State{
+ Type: w.state.Type, //setting state type as previous one since there won't be a change here
+ Offline: !latestConnStatus.IsOnline,
+ })
+}
+
+func (w *Waku) confirmMessagesSent() {
+ w.messageSentCheck = publish.NewMessageSentCheck(w.ctx, w.node.Store(), w.node.Timesource(), w.logger)
+ go w.messageSentCheck.Start()
+
+ go func() {
+ for {
+ select {
+ case <-w.ctx.Done():
+ return
+ case hash := <-w.messageSentCheck.MessageStoredChan:
+ w.SendEnvelopeEvent(common.EnvelopeEvent{
+ Hash: hash,
+ Event: common.EventEnvelopeSent,
+ })
+ case hash := <-w.messageSentCheck.MessageExpiredChan:
+ w.SendEnvelopeEvent(common.EnvelopeEvent{
+ Hash: hash,
+ Event: common.EventEnvelopeExpired,
+ })
+ }
+ }
+ }()
+}
+
+func (w *Waku) MessageExists(mh pb.MessageHash) (bool, error) {
+ w.poolMu.Lock()
+ defer w.poolMu.Unlock()
+ return w.envelopeCache.Has(gethcommon.Hash(mh)), nil
+}
+
+func (w *Waku) SetTopicsToVerifyForMissingMessages(peerID peer.ID, pubsubTopic string, contentTopics []string) {
+ if !w.cfg.EnableMissingMessageVerification {
+ return
+ }
+
+ w.missingMsgVerifier.SetCriteriaInterest(peerID, protocol.NewContentFilter(pubsubTopic, contentTopics...))
+}
+
+func (w *Waku) setupRelaySubscriptions() error {
+ if w.cfg.LightClient {
+ return nil
+ }
+
+ if w.protectedTopicStore != nil {
+ protectedTopics, err := w.protectedTopicStore.ProtectedTopics()
+ if err != nil {
+ return err
+ }
+
+ for _, pt := range protectedTopics {
+ // Adding subscription to protected topics
+ err = w.subscribeToPubsubTopicWithWakuRelay(pt.Topic, pt.PubKey)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ err := w.subscribeToPubsubTopicWithWakuRelay(w.cfg.DefaultShardPubsubTopic, nil)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// Stop implements node.Service, stopping the background data propagation thread
+// of the Waku protocol.
+func (w *Waku) Stop() error {
+ w.cancel()
+
+ w.envelopeCache.Stop()
+
+ w.node.Stop()
+
+ if w.protectedTopicStore != nil {
+ err := w.protectedTopicStore.Close()
+ if err != nil {
+ return err
+ }
+ }
+
+ close(w.goingOnline)
+ w.wg.Wait()
+
+ w.ctx = nil
+ w.cancel = nil
+
+ return nil
+}
+
+func (w *Waku) OnNewEnvelopes(envelope *protocol.Envelope, msgType common.MessageType, processImmediately bool) error {
+ if envelope == nil {
+ return nil
+ }
+
+ recvMessage := common.NewReceivedMessage(envelope, msgType)
+ if recvMessage == nil {
+ return nil
+ }
+
+ if w.statusTelemetryClient != nil {
+ w.statusTelemetryClient.PushReceivedEnvelope(w.ctx, envelope)
+ }
+
+ logger := w.logger.With(
+ zap.String("messageType", msgType),
+ zap.Stringer("envelopeHash", envelope.Hash()),
+ zap.String("pubsubTopic", envelope.PubsubTopic()),
+ zap.String("contentTopic", envelope.Message().ContentTopic),
+ logutils.WakuMessageTimestamp("timestamp", envelope.Message().Timestamp),
+ )
+
+ logger.Debug("received new envelope")
+ trouble := false
+
+ _, err := w.add(recvMessage, processImmediately)
+ if err != nil {
+ logger.Info("invalid envelope received", zap.Error(err))
+ trouble = true
+ }
+
+ common.EnvelopesValidatedCounter.Inc()
+
+ if trouble {
+ return errors.New("received invalid envelope")
+ }
+
+ return nil
+}
+
+// addEnvelope adds an envelope to the envelope map, used for sending
+func (w *Waku) addEnvelope(envelope *common.ReceivedMessage) {
+ w.poolMu.Lock()
+ w.envelopeCache.Set(envelope.Hash(), envelope, ttlcache.DefaultTTL)
+ w.poolMu.Unlock()
+}
+
+func (w *Waku) add(recvMessage *common.ReceivedMessage, processImmediately bool) (bool, error) {
+ common.EnvelopesReceivedCounter.Inc()
+
+ w.poolMu.Lock()
+ envelope := w.envelopeCache.Get(recvMessage.Hash())
+ alreadyCached := envelope != nil
+ w.poolMu.Unlock()
+
+ if !alreadyCached {
+ recvMessage.Processed.Store(false)
+ w.addEnvelope(recvMessage)
+ }
+
+ logger := w.logger.With(zap.String("envelopeHash", recvMessage.Hash().Hex()))
+
+ if alreadyCached {
+ logger.Debug("w envelope already cached")
+ common.EnvelopesCachedCounter.WithLabelValues("hit").Inc()
+ } else {
+ logger.Debug("cached w envelope")
+ common.EnvelopesCachedCounter.WithLabelValues("miss").Inc()
+ common.EnvelopesSizeMeter.Observe(float64(len(recvMessage.Envelope.Message().Payload)))
+ }
+
+ if !alreadyCached || !envelope.Value().Processed.Load() {
+ if processImmediately {
+ logger.Debug("immediately processing envelope")
+ w.processMessage(recvMessage)
+ } else {
+ logger.Debug("posting event")
+ w.postEvent(recvMessage) // notify the local node about the new message
+ }
+ }
+
+ return true, nil
+}
+
+// postEvent queues the message for further processing.
+func (w *Waku) postEvent(envelope *common.ReceivedMessage) {
+ w.msgQueue <- envelope
+}
+
+// processQueueLoop delivers the messages to the watchers during the lifetime of the waku node.
+func (w *Waku) processQueueLoop() {
+ if w.ctx == nil {
+ return
+ }
+ for {
+ select {
+ case <-w.ctx.Done():
+ return
+ case e := <-w.msgQueue:
+ w.processMessage(e)
+ }
+ }
+}
+
+func (w *Waku) processMessage(e *common.ReceivedMessage) {
+ logger := w.logger.With(
+ zap.Stringer("envelopeHash", e.Envelope.Hash()),
+ zap.String("pubsubTopic", e.PubsubTopic),
+ zap.String("contentTopic", e.ContentTopic.ContentTopic()),
+ zap.Int64("timestamp", e.Envelope.Message().GetTimestamp()),
+ )
+
+ if e.MsgType == common.StoreMessageType {
+ // We need to insert it first, and then remove it if not matched,
+ // as messages are processed asynchronously
+ w.storeMsgIDsMu.Lock()
+ w.storeMsgIDs[e.Hash()] = true
+ w.storeMsgIDsMu.Unlock()
+ }
+
+ ephemeral := e.Envelope.Message().Ephemeral
+ if w.cfg.EnableStoreConfirmationForMessagesSent && e.MsgType == common.SendMessageType && (ephemeral == nil || !*ephemeral) {
+ w.messageSentCheck.Add(e.PubsubTopic, e.Hash(), e.Sent)
+ }
+
+ matched := w.filters.NotifyWatchers(e)
+
+ // If not matched we remove it
+ if !matched {
+ logger.Debug("filters did not match")
+ w.storeMsgIDsMu.Lock()
+ delete(w.storeMsgIDs, e.Hash())
+ w.storeMsgIDsMu.Unlock()
+ } else {
+ logger.Debug("filters did match")
+ e.Processed.Store(true)
+ }
+
+ w.envelopeFeed.Send(common.EnvelopeEvent{
+ Topic: e.ContentTopic,
+ Hash: e.Hash(),
+ Event: common.EventEnvelopeAvailable,
+ })
+}
+
+// GetEnvelope retrieves an envelope from the message queue by its hash.
+// It returns nil if the envelope can not be found.
+func (w *Waku) GetEnvelope(hash gethcommon.Hash) *common.ReceivedMessage {
+ w.poolMu.RLock()
+ defer w.poolMu.RUnlock()
+
+ envelope := w.envelopeCache.Get(hash)
+ if envelope == nil {
+ return nil
+ }
+
+ return envelope.Value()
+}
+
+// isEnvelopeCached checks if envelope with specific hash has already been received and cached.
+func (w *Waku) IsEnvelopeCached(hash gethcommon.Hash) bool {
+ w.poolMu.Lock()
+ defer w.poolMu.Unlock()
+
+ return w.envelopeCache.Has(hash)
+}
+
+func (w *Waku) ClearEnvelopesCache() {
+ w.poolMu.Lock()
+ defer w.poolMu.Unlock()
+
+ w.envelopeCache.Stop()
+ w.envelopeCache = newTTLCache()
+}
+
+func (w *Waku) PeerCount() int {
+ return w.node.PeerCount()
+}
+
+func (w *Waku) Peers() types.PeerStats {
+ return FormatPeerStats(w.node)
+}
+
+func (w *Waku) RelayPeersByTopic(topic string) (*types.PeerList, error) {
+ if w.cfg.LightClient {
+ return nil, errors.New("only available in relay mode")
+ }
+
+ return &types.PeerList{
+ FullMeshPeers: w.node.Relay().PubSub().MeshPeers(topic),
+ AllPeers: w.node.Relay().PubSub().ListPeers(topic),
+ }, nil
+}
+
+func (w *Waku) ListenAddresses() ([]multiaddr.Multiaddr, error) {
+ return w.node.ListenAddresses(), nil
+}
+
+func (w *Waku) ENR() (*enode.Node, error) {
+ enr := w.node.ENR()
+ if enr == nil {
+ return nil, errors.New("enr not available")
+ }
+
+ return enr, nil
+}
+
+func (w *Waku) SubscribeToPubsubTopic(topic string, pubkey *ecdsa.PublicKey) error {
+ topic = w.GetPubsubTopic(topic)
+
+ if !w.cfg.LightClient {
+ err := w.subscribeToPubsubTopicWithWakuRelay(topic, pubkey)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (w *Waku) UnsubscribeFromPubsubTopic(topic string) error {
+ topic = w.GetPubsubTopic(topic)
+
+ if !w.cfg.LightClient {
+ err := w.unsubscribeFromPubsubTopicWithWakuRelay(topic)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (w *Waku) RetrievePubsubTopicKey(topic string) (*ecdsa.PrivateKey, error) {
+ topic = w.GetPubsubTopic(topic)
+ if w.protectedTopicStore == nil {
+ return nil, nil
+ }
+
+ return w.protectedTopicStore.FetchPrivateKey(topic)
+}
+
+func (w *Waku) StorePubsubTopicKey(topic string, privKey *ecdsa.PrivateKey) error {
+ topic = w.GetPubsubTopic(topic)
+ if w.protectedTopicStore == nil {
+ return nil
+ }
+
+ return w.protectedTopicStore.Insert(topic, privKey, &privKey.PublicKey)
+}
+
+func (w *Waku) RemovePubsubTopicKey(topic string) error {
+ topic = w.GetPubsubTopic(topic)
+ if w.protectedTopicStore == nil {
+ return nil
+ }
+
+ return w.protectedTopicStore.Delete(topic)
+}
+
+func (w *Waku) StartDiscV5() error {
+ if w.node.DiscV5() == nil {
+ return errors.New("discv5 is not setup")
+ }
+
+ return w.node.DiscV5().Start(w.ctx)
+}
+
+func (w *Waku) StopDiscV5() error {
+ if w.node.DiscV5() == nil {
+ return errors.New("discv5 is not setup")
+ }
+
+ w.node.DiscV5().Stop()
+ return nil
+}
+
+func (w *Waku) handleNetworkChangeFromApp(state connection.State) {
+ //If connection state is reported by something other than peerCount becoming 0 e.g from mobile app, disconnect all peers
+ if (state.Offline && len(w.node.Host().Network().Peers()) > 0) ||
+ (w.state.Type != state.Type && !w.state.Offline && !state.Offline) { // network switched between wifi and cellular
+ w.logger.Info("connection switched or offline detected via mobile, disconnecting all peers")
+ w.node.DisconnectAllPeers()
+ if w.cfg.LightClient {
+ w.filterManager.NetworkChange()
+ }
+ }
+}
+
+func (w *Waku) ConnectionChanged(state connection.State) {
+ isOnline := !state.Offline
+ if w.cfg.LightClient {
+ //TODO: Update this as per https://github.com/waku-org/go-waku/issues/1114
+ go w.filterManager.OnConnectionStatusChange("", isOnline)
+ w.handleNetworkChangeFromApp(state)
+ } else {
+ // for lightClient state update and onlineChange is handled in filterManager.
+ // going online
+ if isOnline && !w.onlineChecker.IsOnline() {
+ //TODO: analyze if we need to discover and connect to peers for relay.
+ w.discoverAndConnectPeers()
+ select {
+ case w.goingOnline <- struct{}{}:
+ default:
+ w.logger.Warn("could not write on connection changed channel")
+ }
+ }
+ // update state
+ w.onlineChecker.SetOnline(isOnline)
+ }
+ w.state = state
+}
+
+// seedBootnodesForDiscV5 tries to fetch bootnodes
+// from an ENR periodically.
+// It backs off exponentially until maxRetries, at which point it restarts from 0
+// It also restarts if there's a connection change signalled from the client
+func (w *Waku) seedBootnodesForDiscV5() {
+ w.wg.Add(1)
+ defer w.wg.Done()
+
+ if !w.cfg.EnableDiscV5 || w.node.DiscV5() == nil {
+ return
+ }
+
+ ticker := time.NewTicker(500 * time.Millisecond)
+ defer ticker.Stop()
+ var retries = 0
+
+ now := func() int64 {
+ return time.Now().UnixNano() / int64(time.Millisecond)
+
+ }
+
+ var lastTry = now()
+
+ canQuery := func() bool {
+ backoff := bootnodesQueryBackoffMs * int64(math.Exp2(float64(retries)))
+
+ return lastTry+backoff < now()
+ }
+
+ for {
+ select {
+ case <-ticker.C:
+ if w.seededBootnodesForDiscV5 && len(w.node.Host().Network().Peers()) > 3 {
+ w.logger.Debug("not querying bootnodes", zap.Bool("seeded", w.seededBootnodesForDiscV5), zap.Int("peer-count", len(w.node.Host().Network().Peers())))
+ continue
+ }
+ if canQuery() {
+ w.logger.Info("querying bootnodes to restore connectivity", zap.Int("peer-count", len(w.node.Host().Network().Peers())))
+ err := w.restartDiscV5()
+ if err != nil {
+ w.logger.Warn("failed to restart discv5", zap.Error(err))
+ }
+
+ lastTry = now()
+ retries++
+ // We reset the retries after a while and restart
+ if retries > bootnodesMaxRetries {
+ retries = 0
+ }
+
+ } else {
+ w.logger.Info("can't query bootnodes", zap.Int("peer-count", len(w.node.Host().Network().Peers())), zap.Int64("lastTry", lastTry), zap.Int64("now", now()), zap.Int64("backoff", bootnodesQueryBackoffMs*int64(math.Exp2(float64(retries)))), zap.Int("retries", retries))
+
+ }
+ // If we go online, trigger immediately
+ case <-w.goingOnline:
+ if w.cfg.EnableDiscV5 {
+ if canQuery() {
+ err := w.restartDiscV5()
+ if err != nil {
+ w.logger.Warn("failed to restart discv5", zap.Error(err))
+ }
+
+ }
+ retries = 0
+ lastTry = now()
+ }
+
+ case <-w.ctx.Done():
+ w.logger.Debug("bootnode seeding stopped")
+ return
+ }
+ }
+}
+
+// Restart discv5, re-retrieving bootstrap nodes
+func (w *Waku) restartDiscV5() error {
+ ctx, cancel := context.WithTimeout(w.ctx, 30*time.Second)
+ defer cancel()
+ bootnodes, err := w.getDiscV5BootstrapNodes(ctx, w.discV5BootstrapNodes)
+ if err != nil {
+ return err
+ }
+ if len(bootnodes) == 0 {
+ return errors.New("failed to fetch bootnodes")
+ }
+
+ if w.node.DiscV5().ErrOnNotRunning() != nil {
+ w.logger.Info("is not started restarting")
+ err := w.node.DiscV5().Start(w.ctx)
+ if err != nil {
+ w.logger.Error("Could not start DiscV5", zap.Error(err))
+ }
+ } else {
+ w.node.DiscV5().Stop()
+ w.logger.Info("is started restarting")
+
+ select {
+ case <-w.ctx.Done(): // Don't start discv5 if we are stopping waku
+ return nil
+ default:
+ }
+
+ err := w.node.DiscV5().Start(w.ctx)
+ if err != nil {
+ w.logger.Error("Could not start DiscV5", zap.Error(err))
+ }
+ }
+
+ w.logger.Info("restarting discv5 with nodes", zap.Any("nodes", bootnodes))
+ return w.node.SetDiscV5Bootnodes(bootnodes)
+}
+
+func (w *Waku) AddStorePeer(address multiaddr.Multiaddr) (peer.ID, error) {
+ peerID, err := w.node.AddPeer(address, wps.Static, w.cfg.DefaultShardedPubsubTopics, store.StoreQueryID_v300)
+ if err != nil {
+ return "", err
+ }
+ return peerID, nil
+}
+
+func (w *Waku) timestamp() int64 {
+ return w.timesource.Now().UnixNano()
+}
+
+func (w *Waku) AddRelayPeer(address multiaddr.Multiaddr) (peer.ID, error) {
+ peerID, err := w.node.AddPeer(address, wps.Static, w.cfg.DefaultShardedPubsubTopics, relay.WakuRelayID_v200)
+ if err != nil {
+ return "", err
+ }
+ return peerID, nil
+}
+
+func (w *Waku) DialPeer(address multiaddr.Multiaddr) error {
+ ctx, cancel := context.WithTimeout(w.ctx, requestTimeout)
+ defer cancel()
+ return w.node.DialPeerWithMultiAddress(ctx, address)
+}
+
+func (w *Waku) DialPeerByID(peerID peer.ID) error {
+ ctx, cancel := context.WithTimeout(w.ctx, requestTimeout)
+ defer cancel()
+ return w.node.DialPeerByID(ctx, peerID)
+}
+
+func (w *Waku) DropPeer(peerID peer.ID) error {
+ return w.node.ClosePeerById(peerID)
+}
+
+func (w *Waku) ProcessingP2PMessages() bool {
+ w.storeMsgIDsMu.Lock()
+ defer w.storeMsgIDsMu.Unlock()
+ return len(w.storeMsgIDs) != 0
+}
+
+func (w *Waku) MarkP2PMessageAsProcessed(hash gethcommon.Hash) {
+ w.storeMsgIDsMu.Lock()
+ defer w.storeMsgIDsMu.Unlock()
+ delete(w.storeMsgIDs, hash)
+}
+
+func (w *Waku) Clean() error {
+ w.msgQueue = make(chan *common.ReceivedMessage, messageQueueLimit)
+
+ for _, f := range w.filters.All() {
+ f.Messages = common.NewMemoryMessageStore()
+ }
+
+ return nil
+}
+
+func (w *Waku) PeerID() peer.ID {
+ return w.node.Host().ID()
+}
+
+func (w *Waku) Peerstore() peerstore.Peerstore {
+ return w.node.Host().Peerstore()
+}
+
+// validatePrivateKey checks the format of the given private key.
+func validatePrivateKey(k *ecdsa.PrivateKey) bool {
+ if k == nil || k.D == nil || k.D.Sign() == 0 {
+ return false
+ }
+ return common.ValidatePublicKey(&k.PublicKey)
+}
+
+// makeDeterministicID generates a deterministic ID, based on a given input
+func makeDeterministicID(input string, keyLen int) (id string, err error) {
+ buf := pbkdf2.Key([]byte(input), nil, 4096, keyLen, sha256.New)
+ if !common.ValidateDataIntegrity(buf, common.KeyIDSize) {
+ return "", fmt.Errorf("error in GenerateDeterministicID: failed to generate key")
+ }
+ id = gethcommon.Bytes2Hex(buf)
+ return id, err
+}
+
+// toDeterministicID reviews incoming id, and transforms it to format
+// expected internally be private key store. Originally, public keys
+// were used as keys, now random keys are being used. And in order to
+// make it easier to consume, we now allow both random IDs and public
+// keys to be passed.
+func toDeterministicID(id string, expectedLen int) (string, error) {
+ if len(id) != (expectedLen * 2) { // we received hex key, so number of chars in id is doubled
+ var err error
+ id, err = makeDeterministicID(id, expectedLen)
+ if err != nil {
+ return "", err
+ }
+ }
+
+ return id, nil
+}
+
+func FormatPeerStats(wakuNode *node.WakuNode) types.PeerStats {
+ p := make(types.PeerStats)
+ for k, v := range wakuNode.PeerStats() {
+ p[k] = types.WakuV2Peer{
+ Addresses: utils.EncapsulatePeerID(k, wakuNode.Host().Peerstore().PeerInfo(k).Addrs...),
+ Protocols: v,
+ }
+ }
+ return p
+}
+
+func (w *Waku) StoreNode() *store.WakuStore {
+ return w.node.Store()
+}
+
+func FormatPeerConnFailures(wakuNode *node.WakuNode) map[string]int {
+ p := make(map[string]int)
+ for _, peerID := range wakuNode.Host().Network().Peers() {
+ peerInfo := wakuNode.Host().Peerstore().PeerInfo(peerID)
+ connFailures := wakuNode.Host().Peerstore().(wps.WakuPeerstore).ConnFailures(peerInfo)
+ if connFailures > 0 {
+ p[peerID.String()] = connFailures
+ }
+ }
+ return p
+}
+
+func (w *Waku) LegacyStoreNode() legacy_store.Store {
+ return w.node.LegacyStore()
+}
+
+func (w *Waku) WakuLightpushPublish(message *pb.WakuMessage, pubsubTopic string) (string, error) {
+ msgHash, err := w.node.Lightpush().Publish(w.ctx, message, lightpush.WithPubSubTopic(pubsubTopic))
+ if err != nil {
+ return "", err
+ }
+ return msgHash.String(), nil
+}
+
+func (w *Waku) WakuRelayPublish(message *pb.WakuMessage, pubsubTopic string) (string, error) {
+ msgHash, err := w.node.Relay().Publish(w.ctx, message, relay.WithPubSubTopic(pubsubTopic))
+ if err != nil {
+ return "", err
+ }
+ return msgHash.String(), nil
+}
+
+func (w *Waku) ListPeersInMesh(pubsubTopic string) (int, error) {
+ listPeers := w.node.Relay().PubSub().ListPeers(pubsubTopic)
+ return len(listPeers), nil
+}
diff --git a/wakuv2/message_publishing.go b/wakuv2/message_publishing.go
index 5603275bf..25f8f57d8 100644
--- a/wakuv2/message_publishing.go
+++ b/wakuv2/message_publishing.go
@@ -1,7 +1,6 @@
package wakuv2
import (
- "encoding/json"
"errors"
"go.uber.org/zap"
@@ -35,7 +34,7 @@ func (pm PublishMethod) String() string {
// Send injects a message into the waku send queue, to be distributed in the
// network in the coming cycles.
-func (w *NWaku) Send(pubsubTopic string, msg *pb.WakuMessage, priority *int) ([]byte, error) {
+func (w *Waku) Send(pubsubTopic string, msg *pb.WakuMessage, priority *int) ([]byte, error) {
pubsubTopic = w.GetPubsubTopic(pubsubTopic)
if w.protectedTopicStore != nil {
privKey, err := w.protectedTopicStore.FetchPrivateKey(pubsubTopic)
@@ -77,7 +76,7 @@ func (w *NWaku) Send(pubsubTopic string, msg *pb.WakuMessage, priority *int) ([]
return envelope.Hash().Bytes(), nil
}
-func (w *NWaku) broadcast() {
+func (w *Waku) broadcast() {
for {
var envelope *protocol.Envelope
@@ -103,11 +102,7 @@ func (w *NWaku) broadcast() {
publishMethod = LightPush
fn = func(env *protocol.Envelope, logger *zap.Logger) error {
logger.Info("publishing message via lightpush")
- jsonMsg, err := json.Marshal(env.Message())
- if err != nil {
- return err
- }
- _, err = w.WakuLightpushPublish(string(jsonMsg), env.PubsubTopic())
+ _, err := w.WakuLightpushPublish(env.Message(), env.PubsubTopic())
return err
}
} else {
@@ -119,14 +114,8 @@ func (w *NWaku) broadcast() {
}
logger.Info("publishing message via relay", zap.Int("peerCnt", peerCnt))
- timeoutMs := 1000
- msg, err := json.Marshal(env.Message())
- if err != nil {
- return err
- }
-
- _, err = w.WakuRelayPublish(env.PubsubTopic(), string(msg), timeoutMs)
+ _, err = w.WakuRelayPublish(env.Message(), env.PubsubTopic())
return err
}
}
@@ -153,7 +142,7 @@ func (w *NWaku) broadcast() {
}
}
-func (w *NWaku) publishEnvelope(envelope *protocol.Envelope, publishFn publish.PublishFn, logger *zap.Logger) {
+func (w *Waku) publishEnvelope(envelope *protocol.Envelope, publishFn publish.PublishFn, logger *zap.Logger) {
defer w.wg.Done()
if err := publishFn(envelope, logger); err != nil {
diff --git a/wakuv2/nwaku.go b/wakuv2/nwaku.go
index 258a17370..0fcb0b236 100644
--- a/wakuv2/nwaku.go
+++ b/wakuv2/nwaku.go
@@ -1,10 +1,13 @@
+//go:build use_nwaku
+// +build use_nwaku
+
package wakuv2
/*
- #cgo LDFLAGS: -L../vendor/nwaku/build/ -lnegentropy -lwaku -Wl,--allow-multiple-definition
- #cgo LDFLAGS: -Lvendor/nwaku/build/ -Wl,-rpath,vendor/nwaku/build/
+ #cgo LDFLAGS: -L../third_party/nwaku/vendor/negentropy/cpp/ -lnegentropy -L../third_party/nwaku/build/ -lwaku -lm -ldl -pthread -lminiupnpc -L../third_party/nwaku/vendor/nim-nat-traversal/vendor/miniupnp/miniupnpc/build/ -lnatpmp -L../third_party/nwaku/vendor/nim-nat-traversal/vendor/libnatpmp-upstream/ -L../third_party/nwaku/vendor/nim-libbacktrace/install/usr/lib/ -lbacktrace -Wl,--allow-multiple-definition
+ #cgo LDFLAGS: -Wl,-rpath,../third_party/nwaku/build/
- #include "../vendor/nwaku/library/libwaku.h"
+ #include "../third_party/nwaku/library/libwaku.h"
#include
#include
@@ -100,7 +103,7 @@ package wakuv2
}
void cGoWakuSetEventCallback(void* wakuCtx) {
- // The 'globalEventCallback' Go function is shared amongst all possible NWaku instances.
+ // The 'globalEventCallback' Go function is shared amongst all possible Waku instances.
// Given that the 'globalEventCallback' is shared, we pass again the
// wakuCtx instance but in this case is needed to pick up the correct method
@@ -250,8 +253,6 @@ import (
"encoding/json"
"errors"
"fmt"
- "io"
- "net/http"
"os"
"os/signal"
"runtime"
@@ -331,7 +332,7 @@ type ITelemetryClient interface {
PushPeerConnFailures(ctx context.Context, peerConnFailures map[string]int)
}
-func (w *NWaku) SetStatusTelemetryClient(client ITelemetryClient) {
+func (w *Waku) SetStatusTelemetryClient(client ITelemetryClient) {
w.statusTelemetryClient = client
}
@@ -341,7 +342,7 @@ func newTTLCache() *ttlcache.Cache[gethcommon.Hash, *common.ReceivedMessage] {
return cache
}
-func (w *NWaku) SubscribeToConnStatusChanges() *types.ConnStatusSubscription {
+func (w *Waku) SubscribeToConnStatusChanges() *types.ConnStatusSubscription {
w.connStatusMu.Lock()
defer w.connStatusMu.Unlock()
subscription := types.NewConnStatusSubscription()
@@ -349,7 +350,7 @@ func (w *NWaku) SubscribeToConnStatusChanges() *types.ConnStatusSubscription {
return subscription
}
-func (w *NWaku) getDiscV5BootstrapNodes(ctx context.Context, addresses []string) ([]*enode.Node, error) {
+func (w *Waku) getDiscV5BootstrapNodes(ctx context.Context, addresses []string) ([]*enode.Node, error) {
wg := sync.WaitGroup{}
mu := sync.Mutex{}
var result []*enode.Node
@@ -397,7 +398,7 @@ func (w *NWaku) getDiscV5BootstrapNodes(ctx context.Context, addresses []string)
type fnApplyToEachPeer func(d dnsdisc.DiscoveredNode, wg *sync.WaitGroup)
-func (w *NWaku) dnsDiscover(ctx context.Context, enrtreeAddress string, apply fnApplyToEachPeer) error {
+func (w *Waku) dnsDiscover(ctx context.Context, enrtreeAddress string, apply fnApplyToEachPeer) error {
w.logger.Info("retrieving nodes", zap.String("enr", enrtreeAddress))
ctx, cancel := context.WithTimeout(ctx, requestTimeout)
defer cancel()
@@ -440,7 +441,7 @@ func (w *NWaku) dnsDiscover(ctx context.Context, enrtreeAddress string, apply fn
return nil
}
-func (w *NWaku) discoverAndConnectPeers() {
+func (w *Waku) discoverAndConnectPeers() {
fnApply := func(d dnsdisc.DiscoveredNode, wg *sync.WaitGroup) {
defer wg.Done()
if len(d.PeerInfo.Addrs) != 0 {
@@ -476,14 +477,14 @@ func (w *NWaku) discoverAndConnectPeers() {
}
}
-func (w *NWaku) connect(peerInfo peer.AddrInfo, enr *enode.Node, origin wps.Origin) {
+func (w *Waku) connect(peerInfo peer.AddrInfo, enr *enode.Node, origin wps.Origin) {
// Connection will be prunned eventually by the connection manager if needed
// The peer connector in go-waku uses Connect, so it will execute identify as part of its
addr := peerInfo.Addrs[0]
w.WakuConnect(addr.String(), 1000)
}
-func (w *NWaku) telemetryBandwidthStats(telemetryServerURL string) {
+func (w *Waku) telemetryBandwidthStats(telemetryServerURL string) {
w.wg.Add(1)
defer w.wg.Done()
@@ -514,7 +515,7 @@ func (w *NWaku) telemetryBandwidthStats(telemetryServerURL string) {
}
}
-func (w *NWaku) GetStats() types.StatsSummary {
+func (w *Waku) GetStats() types.StatsSummary {
stats := w.bandwidthCounter.GetBandwidthTotals()
return types.StatsSummary{
UploadRate: uint64(stats.RateOut),
@@ -522,7 +523,7 @@ func (w *NWaku) GetStats() types.StatsSummary {
}
}
-func (w *NWaku) runPeerExchangeLoop() {
+func (w *Waku) runPeerExchangeLoop() {
w.wg.Add(1)
defer w.wg.Done()
@@ -572,7 +573,7 @@ func (w *NWaku) runPeerExchangeLoop() {
}
}
-func (w *NWaku) GetPubsubTopic(topic string) string {
+func (w *Waku) GetPubsubTopic(topic string) string {
if topic == "" {
topic = w.cfg.DefaultShardPubsubTopic
}
@@ -581,12 +582,12 @@ func (w *NWaku) GetPubsubTopic(topic string) string {
}
// CurrentTime returns current time.
-func (w *NWaku) CurrentTime() time.Time {
+func (w *Waku) CurrentTime() time.Time {
return w.timesource.Now()
}
-// APIs returns the RPC descriptors the NWaku implementation offers
-func (w *NWaku) APIs() []rpc.API {
+// APIs returns the RPC descriptors the Waku implementation offers
+func (w *Waku) APIs() []rpc.API {
return []rpc.API{
{
Namespace: Name,
@@ -598,23 +599,23 @@ func (w *NWaku) APIs() []rpc.API {
}
// Protocols returns the waku sub-protocols ran by this particular client.
-func (w *NWaku) Protocols() []p2p.Protocol {
+func (w *Waku) Protocols() []p2p.Protocol {
return []p2p.Protocol{}
}
-func (w *NWaku) SendEnvelopeEvent(event common.EnvelopeEvent) int {
+func (w *Waku) SendEnvelopeEvent(event common.EnvelopeEvent) int {
return w.envelopeFeed.Send(event)
}
// SubscribeEnvelopeEvents subscribes to envelopes feed.
// In order to prevent blocking waku producers events must be amply buffered.
-func (w *NWaku) SubscribeEnvelopeEvents(events chan<- common.EnvelopeEvent) event.Subscription {
+func (w *Waku) SubscribeEnvelopeEvents(events chan<- common.EnvelopeEvent) event.Subscription {
return w.envelopeFeed.Subscribe(events)
}
// NewKeyPair generates a new cryptographic identity for the client, and injects
// it into the known identities for message decryption. Returns ID of the new key pair.
-func (w *NWaku) NewKeyPair() (string, error) {
+func (w *Waku) NewKeyPair() (string, error) {
key, err := crypto.GenerateKey()
if err != nil || !validatePrivateKey(key) {
key, err = crypto.GenerateKey() // retry once
@@ -642,7 +643,7 @@ func (w *NWaku) NewKeyPair() (string, error) {
}
// DeleteKeyPair deletes the specified key if it exists.
-func (w *NWaku) DeleteKeyPair(key string) bool {
+func (w *Waku) DeleteKeyPair(key string) bool {
deterministicID, err := toDeterministicID(key, common.KeyIDSize)
if err != nil {
return false
@@ -659,7 +660,7 @@ func (w *NWaku) DeleteKeyPair(key string) bool {
}
// AddKeyPair imports a asymmetric private key and returns it identifier.
-func (w *NWaku) AddKeyPair(key *ecdsa.PrivateKey) (string, error) {
+func (w *Waku) AddKeyPair(key *ecdsa.PrivateKey) (string, error) {
id, err := makeDeterministicID(hexutil.Encode(crypto.FromECDSAPub(&key.PublicKey)), common.KeyIDSize)
if err != nil {
return "", err
@@ -677,7 +678,7 @@ func (w *NWaku) AddKeyPair(key *ecdsa.PrivateKey) (string, error) {
// SelectKeyPair adds cryptographic identity, and makes sure
// that it is the only private key known to the node.
-func (w *NWaku) SelectKeyPair(key *ecdsa.PrivateKey) error {
+func (w *Waku) SelectKeyPair(key *ecdsa.PrivateKey) error {
id, err := makeDeterministicID(hexutil.Encode(crypto.FromECDSAPub(&key.PublicKey)), common.KeyIDSize)
if err != nil {
return err
@@ -693,7 +694,7 @@ func (w *NWaku) SelectKeyPair(key *ecdsa.PrivateKey) error {
}
// DeleteKeyPairs removes all cryptographic identities known to the node
-func (w *NWaku) DeleteKeyPairs() error {
+func (w *Waku) DeleteKeyPairs() error {
w.keyMu.Lock()
defer w.keyMu.Unlock()
@@ -704,7 +705,7 @@ func (w *NWaku) DeleteKeyPairs() error {
// HasKeyPair checks if the waku node is configured with the private key
// of the specified public pair.
-func (w *NWaku) HasKeyPair(id string) bool {
+func (w *Waku) HasKeyPair(id string) bool {
deterministicID, err := toDeterministicID(id, common.KeyIDSize)
if err != nil {
return false
@@ -716,7 +717,7 @@ func (w *NWaku) HasKeyPair(id string) bool {
}
// GetPrivateKey retrieves the private key of the specified identity.
-func (w *NWaku) GetPrivateKey(id string) (*ecdsa.PrivateKey, error) {
+func (w *Waku) GetPrivateKey(id string) (*ecdsa.PrivateKey, error) {
deterministicID, err := toDeterministicID(id, common.KeyIDSize)
if err != nil {
return nil, err
@@ -733,7 +734,7 @@ func (w *NWaku) GetPrivateKey(id string) (*ecdsa.PrivateKey, error) {
// GenerateSymKey generates a random symmetric key and stores it under id,
// which is then returned. Will be used in the future for session key exchange.
-func (w *NWaku) GenerateSymKey() (string, error) {
+func (w *Waku) GenerateSymKey() (string, error) {
key, err := common.GenerateSecureRandomData(common.AESKeyLength)
if err != nil {
return "", err
@@ -757,7 +758,7 @@ func (w *NWaku) GenerateSymKey() (string, error) {
}
// AddSymKey stores the key with a given id.
-func (w *NWaku) AddSymKey(id string, key []byte) (string, error) {
+func (w *Waku) AddSymKey(id string, key []byte) (string, error) {
deterministicID, err := toDeterministicID(id, common.KeyIDSize)
if err != nil {
return "", err
@@ -774,7 +775,7 @@ func (w *NWaku) AddSymKey(id string, key []byte) (string, error) {
}
// AddSymKeyDirect stores the key, and returns its id.
-func (w *NWaku) AddSymKeyDirect(key []byte) (string, error) {
+func (w *Waku) AddSymKeyDirect(key []byte) (string, error) {
if len(key) != common.AESKeyLength {
return "", fmt.Errorf("wrong key size: %d", len(key))
}
@@ -795,7 +796,7 @@ func (w *NWaku) AddSymKeyDirect(key []byte) (string, error) {
}
// AddSymKeyFromPassword generates the key from password, stores it, and returns its id.
-func (w *NWaku) AddSymKeyFromPassword(password string) (string, error) {
+func (w *Waku) AddSymKeyFromPassword(password string) (string, error) {
id, err := common.GenerateRandomID()
if err != nil {
return "", fmt.Errorf("failed to generate ID: %s", err)
@@ -821,14 +822,14 @@ func (w *NWaku) AddSymKeyFromPassword(password string) (string, error) {
// HasSymKey returns true if there is a key associated with the given id.
// Otherwise returns false.
-func (w *NWaku) HasSymKey(id string) bool {
+func (w *Waku) HasSymKey(id string) bool {
w.keyMu.RLock()
defer w.keyMu.RUnlock()
return w.symKeys[id] != nil
}
// DeleteSymKey deletes the key associated with the name string if it exists.
-func (w *NWaku) DeleteSymKey(id string) bool {
+func (w *Waku) DeleteSymKey(id string) bool {
w.keyMu.Lock()
defer w.keyMu.Unlock()
if w.symKeys[id] != nil {
@@ -839,7 +840,7 @@ func (w *NWaku) DeleteSymKey(id string) bool {
}
// GetSymKey returns the symmetric key associated with the given id.
-func (w *NWaku) GetSymKey(id string) ([]byte, error) {
+func (w *Waku) GetSymKey(id string) ([]byte, error) {
w.keyMu.RLock()
defer w.keyMu.RUnlock()
if w.symKeys[id] != nil {
@@ -850,7 +851,7 @@ func (w *NWaku) GetSymKey(id string) ([]byte, error) {
// Subscribe installs a new message handler used for filtering, decrypting
// and subsequent storing of incoming messages.
-func (w *NWaku) Subscribe(f *common.Filter) (string, error) {
+func (w *Waku) Subscribe(f *common.Filter) (string, error) {
f.PubsubTopic = w.GetPubsubTopic(f.PubsubTopic)
id, err := w.filters.Install(f)
if err != nil {
@@ -866,7 +867,7 @@ func (w *NWaku) Subscribe(f *common.Filter) (string, error) {
}
// Unsubscribe removes an installed message handler.
-func (w *NWaku) Unsubscribe(ctx context.Context, id string) error {
+func (w *Waku) Unsubscribe(ctx context.Context, id string) error {
ok := w.filters.Uninstall(id)
if !ok {
return fmt.Errorf("failed to unsubscribe: invalid ID '%s'", id)
@@ -880,12 +881,12 @@ func (w *NWaku) Unsubscribe(ctx context.Context, id string) error {
}
// GetFilter returns the filter by id.
-func (w *NWaku) GetFilter(id string) *common.Filter {
+func (w *Waku) GetFilter(id string) *common.Filter {
return w.filters.Get(id)
}
// Unsubscribe removes an installed message handler.
-func (w *NWaku) UnsubscribeMany(ids []string) error {
+func (w *Waku) UnsubscribeMany(ids []string) error {
for _, id := range ids {
w.logger.Info("cleaning up filter", zap.String("id", id))
ok := w.filters.Uninstall(id)
@@ -896,24 +897,24 @@ func (w *NWaku) UnsubscribeMany(ids []string) error {
return nil
}
-func (w *NWaku) SkipPublishToTopic(value bool) {
+func (w *Waku) SkipPublishToTopic(value bool) {
w.cfg.SkipPublishToTopic = value
}
-func (w *NWaku) ConfirmMessageDelivered(hashes []gethcommon.Hash) {
+func (w *Waku) ConfirmMessageDelivered(hashes []gethcommon.Hash) {
if !w.cfg.EnableStoreConfirmationForMessagesSent {
return
}
w.messageSentCheck.DeleteByMessageIDs(hashes)
}
-func (w *NWaku) SetStorePeerID(peerID peer.ID) {
+func (w *Waku) SetStorePeerID(peerID peer.ID) {
if w.messageSentCheck != nil {
w.messageSentCheck.SetStorePeerID(peerID)
}
}
-func (w *NWaku) Query(ctx context.Context,
+func (w *Waku) Query(ctx context.Context,
peerID peer.ID,
query store.FilterCriteria,
cursor []byte,
@@ -998,14 +999,14 @@ func (w *NWaku) Query(ctx context.Context,
return nil, 0, nil
}
-// OnNewEnvelope is an interface from NWaku FilterManager API that gets invoked when any new message is received by Filter.
-func (w *NWaku) OnNewEnvelope(env *protocol.Envelope) error {
+// OnNewEnvelope is an interface from Waku FilterManager API that gets invoked when any new message is received by Filter.
+func (w *Waku) OnNewEnvelope(env *protocol.Envelope) error {
return w.OnNewEnvelopes(env, common.RelayedMessageType, false)
}
// Start implements node.Service, starting the background data propagation thread
-// of the NWaku protocol.
-func (w *NWaku) Start() error {
+// of the Waku protocol.
+func (w *Waku) Start() error {
// if w.ctx == nil {
// w.ctx, w.cancel = context.WithCancel(context.Background())
// }
@@ -1125,7 +1126,7 @@ func (w *NWaku) Start() error {
return nil
}
-func (w *NWaku) checkForConnectionChanges() {
+func (w *Waku) checkForConnectionChanges() {
// isOnline := len(w.node.Host().Network().Peers()) > 0
@@ -1163,7 +1164,7 @@ func (w *NWaku) checkForConnectionChanges() {
// })
}
-// func (w *NWaku) confirmMessagesSent() {
+// func (w *Waku) confirmMessagesSent() {
// w.messageSentCheck = publish.NewMessageSentCheck(w.ctx, w.node.Store(), w.node.Timesource(), w.logger)
// go w.messageSentCheck.Start()
@@ -1187,13 +1188,13 @@ func (w *NWaku) checkForConnectionChanges() {
// }()
// }
-func (w *NWaku) MessageExists(mh pb.MessageHash) (bool, error) {
+func (w *Waku) MessageExists(mh pb.MessageHash) (bool, error) {
w.poolMu.Lock()
defer w.poolMu.Unlock()
return w.envelopeCache.Has(gethcommon.Hash(mh)), nil
}
-func (w *NWaku) SetTopicsToVerifyForMissingMessages(peerID peer.ID, pubsubTopic string, contentTopics []string) {
+func (w *Waku) SetTopicsToVerifyForMissingMessages(peerID peer.ID, pubsubTopic string, contentTopics []string) {
if !w.cfg.EnableMissingMessageVerification {
return
}
@@ -1201,7 +1202,7 @@ func (w *NWaku) SetTopicsToVerifyForMissingMessages(peerID peer.ID, pubsubTopic
w.missingMsgVerifier.SetCriteriaInterest(peerID, protocol.NewContentFilter(pubsubTopic, contentTopics...))
}
-func (w *NWaku) setupRelaySubscriptions() error {
+func (w *Waku) setupRelaySubscriptions() error {
if w.cfg.LightClient {
return nil
}
@@ -1235,7 +1236,7 @@ func (w *NWaku) setupRelaySubscriptions() error {
return nil
}
-func (w *NWaku) OnNewEnvelopes(envelope *protocol.Envelope, msgType common.MessageType, processImmediately bool) error {
+func (w *Waku) OnNewEnvelopes(envelope *protocol.Envelope, msgType common.MessageType, processImmediately bool) error {
if envelope == nil {
return nil
}
@@ -1276,13 +1277,13 @@ func (w *NWaku) OnNewEnvelopes(envelope *protocol.Envelope, msgType common.Messa
}
// addEnvelope adds an envelope to the envelope map, used for sending
-func (w *NWaku) addEnvelope(envelope *common.ReceivedMessage) {
+func (w *Waku) addEnvelope(envelope *common.ReceivedMessage) {
w.poolMu.Lock()
w.envelopeCache.Set(envelope.Hash(), envelope, ttlcache.DefaultTTL)
w.poolMu.Unlock()
}
-func (w *NWaku) add(recvMessage *common.ReceivedMessage, processImmediately bool) (bool, error) {
+func (w *Waku) add(recvMessage *common.ReceivedMessage, processImmediately bool) (bool, error) {
common.EnvelopesReceivedCounter.Inc()
w.poolMu.Lock()
@@ -1320,12 +1321,12 @@ func (w *NWaku) add(recvMessage *common.ReceivedMessage, processImmediately bool
}
// postEvent queues the message for further processing.
-func (w *NWaku) postEvent(envelope *common.ReceivedMessage) {
+func (w *Waku) postEvent(envelope *common.ReceivedMessage) {
w.msgQueue <- envelope
}
// processQueueLoop delivers the messages to the watchers during the lifetime of the waku node.
-func (w *NWaku) processQueueLoop() {
+func (w *Waku) processQueueLoop() {
if w.ctx == nil {
return
}
@@ -1339,7 +1340,7 @@ func (w *NWaku) processQueueLoop() {
}
}
-func (w *NWaku) processMessage(e *common.ReceivedMessage) {
+func (w *Waku) processMessage(e *common.ReceivedMessage) {
logger := w.logger.With(
zap.Stringer("envelopeHash", e.Envelope.Hash()),
zap.String("pubsubTopic", e.PubsubTopic),
@@ -1382,7 +1383,7 @@ func (w *NWaku) processMessage(e *common.ReceivedMessage) {
// GetEnvelope retrieves an envelope from the message queue by its hash.
// It returns nil if the envelope can not be found.
-func (w *NWaku) GetEnvelope(hash gethcommon.Hash) *common.ReceivedMessage {
+func (w *Waku) GetEnvelope(hash gethcommon.Hash) *common.ReceivedMessage {
w.poolMu.RLock()
defer w.poolMu.RUnlock()
@@ -1395,14 +1396,14 @@ func (w *NWaku) GetEnvelope(hash gethcommon.Hash) *common.ReceivedMessage {
}
// isEnvelopeCached checks if envelope with specific hash has already been received and cached.
-func (w *NWaku) IsEnvelopeCached(hash gethcommon.Hash) bool {
+func (w *Waku) IsEnvelopeCached(hash gethcommon.Hash) bool {
w.poolMu.Lock()
defer w.poolMu.Unlock()
return w.envelopeCache.Has(hash)
}
-func (w *NWaku) ClearEnvelopesCache() {
+func (w *Waku) ClearEnvelopesCache() {
w.poolMu.Lock()
defer w.poolMu.Unlock()
@@ -1410,17 +1411,17 @@ func (w *NWaku) ClearEnvelopesCache() {
w.envelopeCache = newTTLCache()
}
-func (w *NWaku) PeerCount() int {
+func (w *Waku) PeerCount() int {
return 0
// return w.node.PeerCount()
}
-func (w *NWaku) Peers() types.PeerStats {
+func (w *Waku) Peers() types.PeerStats {
return nil
// return FormatPeerStats(w.node)
}
-func (w *NWaku) RelayPeersByTopic(topic string) (*types.PeerList, error) {
+func (w *Waku) RelayPeersByTopic(topic string) (*types.PeerList, error) {
if w.cfg.LightClient {
return nil, errors.New("only available in relay mode")
}
@@ -1432,7 +1433,7 @@ func (w *NWaku) RelayPeersByTopic(topic string) (*types.PeerList, error) {
return nil, nil
}
-func (w *NWaku) SubscribeToPubsubTopic(topic string, pubkey *ecdsa.PublicKey) error {
+func (w *Waku) SubscribeToPubsubTopic(topic string, pubkey *ecdsa.PublicKey) error {
topic = w.GetPubsubTopic(topic)
if !w.cfg.LightClient {
@@ -1445,7 +1446,7 @@ func (w *NWaku) SubscribeToPubsubTopic(topic string, pubkey *ecdsa.PublicKey) er
return nil
}
-func (w *NWaku) UnsubscribeFromPubsubTopic(topic string) error {
+func (w *Waku) UnsubscribeFromPubsubTopic(topic string) error {
topic = w.GetPubsubTopic(topic)
if !w.cfg.LightClient {
@@ -1457,7 +1458,7 @@ func (w *NWaku) UnsubscribeFromPubsubTopic(topic string) error {
return nil
}
-func (w *NWaku) RetrievePubsubTopicKey(topic string) (*ecdsa.PrivateKey, error) {
+func (w *Waku) RetrievePubsubTopicKey(topic string) (*ecdsa.PrivateKey, error) {
topic = w.GetPubsubTopic(topic)
if w.protectedTopicStore == nil {
return nil, nil
@@ -1466,7 +1467,7 @@ func (w *NWaku) RetrievePubsubTopicKey(topic string) (*ecdsa.PrivateKey, error)
return w.protectedTopicStore.FetchPrivateKey(topic)
}
-func (w *NWaku) StorePubsubTopicKey(topic string, privKey *ecdsa.PrivateKey) error {
+func (w *Waku) StorePubsubTopicKey(topic string, privKey *ecdsa.PrivateKey) error {
topic = w.GetPubsubTopic(topic)
if w.protectedTopicStore == nil {
return nil
@@ -1475,7 +1476,7 @@ func (w *NWaku) StorePubsubTopicKey(topic string, privKey *ecdsa.PrivateKey) err
return w.protectedTopicStore.Insert(topic, privKey, &privKey.PublicKey)
}
-func (w *NWaku) RemovePubsubTopicKey(topic string) error {
+func (w *Waku) RemovePubsubTopicKey(topic string) error {
topic = w.GetPubsubTopic(topic)
if w.protectedTopicStore == nil {
return nil
@@ -1484,7 +1485,7 @@ func (w *NWaku) RemovePubsubTopicKey(topic string) error {
return w.protectedTopicStore.Delete(topic)
}
-func (w *NWaku) handleNetworkChangeFromApp(state connection.State) {
+func (w *Waku) handleNetworkChangeFromApp(state connection.State) {
//If connection state is reported by something other than peerCount becoming 0 e.g from mobile app, disconnect all peers
// if (state.Offline && len(w.node.Host().Network().Peers()) > 0) ||
// (w.state.Type != state.Type && !w.state.Offline && !state.Offline) { // network switched between wifi and cellular
@@ -1496,7 +1497,7 @@ func (w *NWaku) handleNetworkChangeFromApp(state connection.State) {
// }
}
-func (w *NWaku) ConnectionChanged(state connection.State) {
+func (w *Waku) ConnectionChanged(state connection.State) {
isOnline := !state.Offline
if w.cfg.LightClient {
//TODO: Update this as per https://github.com/waku-org/go-waku/issues/1114
@@ -1520,7 +1521,7 @@ func (w *NWaku) ConnectionChanged(state connection.State) {
w.state = state
}
-func (w *NWaku) AddStorePeer(address multiaddr.Multiaddr) (peer.ID, error) {
+func (w *Waku) AddStorePeer(address multiaddr.Multiaddr) (peer.ID, error) {
// peerID, err := w.node.AddPeer(address, wps.Static, w.cfg.DefaultShardedPubsubTopics, store.StoreQueryID_v300)
// if err != nil {
// return "", err
@@ -1529,11 +1530,11 @@ func (w *NWaku) AddStorePeer(address multiaddr.Multiaddr) (peer.ID, error) {
return "", nil
}
-func (w *NWaku) timestamp() int64 {
+func (w *Waku) timestamp() int64 {
return w.timesource.Now().UnixNano()
}
-func (w *NWaku) AddRelayPeer(address multiaddr.Multiaddr) (peer.ID, error) {
+func (w *Waku) AddRelayPeer(address multiaddr.Multiaddr) (peer.ID, error) {
// peerID, err := w.node.AddPeer(address, wps.Static, w.cfg.DefaultShardedPubsubTopics, relay.WakuRelayID_v200)
// if err != nil {
// return "", err
@@ -1542,38 +1543,38 @@ func (w *NWaku) AddRelayPeer(address multiaddr.Multiaddr) (peer.ID, error) {
return "", nil
}
-func (w *NWaku) DialPeer(address multiaddr.Multiaddr) error {
+func (w *Waku) DialPeer(address multiaddr.Multiaddr) error {
// ctx, cancel := context.WithTimeout(w.ctx, requestTimeout)
// defer cancel()
// return w.node.DialPeerWithMultiAddress(ctx, address)
return nil
}
-func (w *NWaku) DialPeerByID(peerID peer.ID) error {
+func (w *Waku) DialPeerByID(peerID peer.ID) error {
// ctx, cancel := context.WithTimeout(w.ctx, requestTimeout)
// defer cancel()
// return w.node.DialPeerByID(ctx, peerID)
return nil
}
-func (w *NWaku) DropPeer(peerID peer.ID) error {
+func (w *Waku) DropPeer(peerID peer.ID) error {
// return w.node.ClosePeerById(peerID)
return nil
}
-func (w *NWaku) ProcessingP2PMessages() bool {
+func (w *Waku) ProcessingP2PMessages() bool {
w.storeMsgIDsMu.Lock()
defer w.storeMsgIDsMu.Unlock()
return len(w.storeMsgIDs) != 0
}
-func (w *NWaku) MarkP2PMessageAsProcessed(hash gethcommon.Hash) {
+func (w *Waku) MarkP2PMessageAsProcessed(hash gethcommon.Hash) {
w.storeMsgIDsMu.Lock()
defer w.storeMsgIDsMu.Unlock()
delete(w.storeMsgIDs, hash)
}
-func (w *NWaku) Clean() error {
+func (w *Waku) Clean() error {
w.msgQueue = make(chan *common.ReceivedMessage, messageQueueLimit)
for _, f := range w.filters.All() {
@@ -1583,12 +1584,12 @@ func (w *NWaku) Clean() error {
return nil
}
-func (w *NWaku) PeerID() peer.ID {
+func (w *Waku) PeerID() peer.ID {
// return w.node.Host().ID()
return ""
}
-func (w *NWaku) Peerstore() peerstore.Peerstore {
+func (w *Waku) Peerstore() peerstore.Peerstore {
// return w.node.Host().Peerstore()
return nil
}
@@ -1639,7 +1640,7 @@ func FormatPeerStats(wakuNode *node.WakuNode) types.PeerStats {
return p
}
-func (w *NWaku) StoreNode() *store.WakuStore {
+func (w *Waku) StoreNode() *store.WakuStore {
// return w.node.Store()
return nil
}
@@ -1656,7 +1657,7 @@ func FormatPeerConnFailures(wakuNode *node.WakuNode) map[string]int {
return p
}
-func (w *NWaku) LegacyStoreNode() legacy_store.Store {
+func (w *Waku) LegacyStoreNode() legacy_store.Store {
// return w.node.LegacyStore()
return nil
}
@@ -1675,7 +1676,7 @@ type WakuConfig struct {
var jamon unsafe.Pointer
-type NWaku struct {
+type Waku struct {
wakuCtx unsafe.Pointer
appDB *sql.DB
@@ -1749,7 +1750,7 @@ type NWaku struct {
defaultShardInfo protocol.RelayShards
}
-func (w *NWaku) Stop() error {
+func (w *Waku) Stop() error {
return w.WakuStop()
}
@@ -1772,7 +1773,7 @@ func wakuNew(nodeKey *ecdsa.PrivateKey,
logger *zap.Logger,
appDB *sql.DB,
ts *timesource.NTPTimeSource,
- onHistoricMessagesRequestFailed func([]byte, peer.ID, error), onPeerStats func(types.ConnStatus)) (*NWaku, error) {
+ onHistoricMessagesRequestFailed func([]byte, peer.ID, error), onPeerStats func(types.ConnStatus)) (*Waku, error) {
nwakuConfig := WakuConfig{
Host: cfg.Host,
@@ -1817,7 +1818,7 @@ func wakuNew(nodeKey *ecdsa.PrivateKey,
if C.getRet(resp) == C.RET_OK {
- return &NWaku{
+ return &Waku{
wakuCtx: wakuCtx,
cfg: cfg,
privateKeys: make(map[string]*ecdsa.PrivateKey),
@@ -1848,7 +1849,7 @@ func wakuNew(nodeKey *ecdsa.PrivateKey,
return nil, errors.New(errMsg)
}
-func (self *NWaku) WakuStart() error {
+func (self *Waku) WakuStart() error {
var resp = C.allocResp()
defer C.freeResp(resp)
@@ -1861,7 +1862,7 @@ func (self *NWaku) WakuStart() error {
return errors.New(errMsg)
}
-func (self *NWaku) WakuStop() error {
+func (self *Waku) WakuStop() error {
var resp = C.allocResp()
defer C.freeResp(resp)
C.cGoWakuStop(self.wakuCtx, resp)
@@ -1873,7 +1874,7 @@ func (self *NWaku) WakuStop() error {
return errors.New(errMsg)
}
-func (self *NWaku) WakuDestroy() error {
+func (self *Waku) WakuDestroy() error {
var resp = C.allocResp()
defer C.freeResp(resp)
C.cGoWakuDestroy(self.wakuCtx, resp)
@@ -1885,7 +1886,7 @@ func (self *NWaku) WakuDestroy() error {
return errors.New(errMsg)
}
-func (self *NWaku) StartDiscV5() error {
+func (self *Waku) StartDiscV5() error {
var resp = C.allocResp()
defer C.freeResp(resp)
C.cGoWakuStartDiscV5(self.wakuCtx, resp)
@@ -1897,7 +1898,7 @@ func (self *NWaku) StartDiscV5() error {
return errors.New(errMsg)
}
-func (self *NWaku) StopDiscV5() error {
+func (self *Waku) StopDiscV5() error {
var resp = C.allocResp()
defer C.freeResp(resp)
C.cGoWakuStopDiscV5(self.wakuCtx, resp)
@@ -1909,7 +1910,7 @@ func (self *NWaku) StopDiscV5() error {
return errors.New(errMsg)
}
-func (self *NWaku) WakuVersion() (string, error) {
+func (self *Waku) WakuVersion() (string, error) {
var resp = C.allocResp()
defer C.freeResp(resp)
@@ -1928,20 +1929,20 @@ func (self *NWaku) WakuVersion() (string, error) {
//export globalEventCallback
func globalEventCallback(callerRet C.int, msg *C.char, len C.size_t, userData unsafe.Pointer) {
// This is shared among all Golang instances
- self := NWaku{wakuCtx: userData}
+ self := Waku{wakuCtx: userData}
self.MyEventCallback(callerRet, msg, len)
}
-func (self *NWaku) MyEventCallback(callerRet C.int, msg *C.char, len C.size_t) {
+func (self *Waku) MyEventCallback(callerRet C.int, msg *C.char, len C.size_t) {
fmt.Println("Event received:", C.GoStringN(msg, C.int(len)))
}
-func (self *NWaku) WakuSetEventCallback() {
+func (self *Waku) WakuSetEventCallback() {
// Notice that the events for self node are handled by the 'MyEventCallback' method
C.cGoWakuSetEventCallback(self.wakuCtx)
}
-func (self *NWaku) FormatContentTopic(
+func (self *Waku) FormatContentTopic(
appName string,
appVersion int,
contentTopicName string,
@@ -1975,7 +1976,7 @@ func (self *NWaku) FormatContentTopic(
return "", errors.New(errMsg)
}
-func (self *NWaku) FormatPubsubTopic(topicName string) (WakuPubsubTopic, error) {
+func (self *Waku) FormatPubsubTopic(topicName string) (WakuPubsubTopic, error) {
var cTopicName = C.CString(topicName)
var resp = C.allocResp()
@@ -1994,7 +1995,7 @@ func (self *NWaku) FormatPubsubTopic(topicName string) (WakuPubsubTopic, error)
return "", errors.New(errMsg)
}
-func (self *NWaku) WakuDefaultPubsubTopic() (WakuPubsubTopic, error) {
+func (self *Waku) WakuDefaultPubsubTopic() (WakuPubsubTopic, error) {
var resp = C.allocResp()
defer C.freeResp(resp)
C.cGoWakuDefaultPubsubTopic(self.wakuCtx, resp)
@@ -2009,13 +2010,16 @@ func (self *NWaku) WakuDefaultPubsubTopic() (WakuPubsubTopic, error) {
return "", errors.New(errMsg)
}
-func (self *NWaku) WakuRelayPublish(
- pubsubTopic string,
- message string,
- timeoutMs int) (WakuMessageHash, error) {
+func (self *Waku) WakuRelayPublish(wakuMsg *pb.WakuMessage, pubsubTopic string) (string, error) {
+ timeoutMs := 1000
+
+ message, err := json.Marshal(wakuMsg)
+ if err != nil {
+ return "", err
+ }
var cPubsubTopic = C.CString(pubsubTopic)
- var msg = C.CString(message)
+ var msg = C.CString(string(message))
var resp = C.allocResp()
defer C.freeResp(resp)
@@ -2032,7 +2036,7 @@ func (self *NWaku) WakuRelayPublish(
return "", errors.New(errMsg)
}
-func (self *NWaku) WakuRelaySubscribe(pubsubTopic string) error {
+func (self *Waku) WakuRelaySubscribe(pubsubTopic string) error {
var resp = C.allocResp()
var cPubsubTopic = C.CString(pubsubTopic)
@@ -2059,7 +2063,7 @@ func (self *NWaku) WakuRelaySubscribe(pubsubTopic string) error {
return errors.New(errMsg)
}
-func (self *NWaku) WakuRelayUnsubscribe(pubsubTopic string) error {
+func (self *Waku) WakuRelayUnsubscribe(pubsubTopic string) error {
var resp = C.allocResp()
var cPubsubTopic = C.CString(pubsubTopic)
defer C.freeResp(resp)
@@ -2074,12 +2078,14 @@ func (self *NWaku) WakuRelayUnsubscribe(pubsubTopic string) error {
return errors.New(errMsg)
}
-func (self *NWaku) WakuLightpushPublish(
- pubsubTopic string,
- message string) (string, error) {
+func (self *Waku) WakuLightpushPublish(message *pb.WakuMessage, pubsubTopic string) (string, error) {
+ jsonMsg, err := json.Marshal(message)
+ if err != nil {
+ return "", err
+ }
var cPubsubTopic = C.CString(pubsubTopic)
- var msg = C.CString(message)
+ var msg = C.CString(string(jsonMsg))
var resp = C.allocResp()
defer C.freeResp(resp)
@@ -2096,7 +2102,7 @@ func (self *NWaku) WakuLightpushPublish(
return "", errors.New(errMsg)
}
-func (self *NWaku) wakuStoreQuery(
+func (self *Waku) wakuStoreQuery(
jsonQuery string,
peerAddr string,
timeoutMs int) (string, error) {
@@ -2119,7 +2125,7 @@ func (self *NWaku) wakuStoreQuery(
return "", errors.New(errMsg)
}
-func (self *NWaku) WakuPeerExchangeRequest(numPeers uint64) (string, error) {
+func (self *Waku) WakuPeerExchangeRequest(numPeers uint64) (string, error) {
var resp = C.allocResp()
defer C.freeResp(resp)
@@ -2133,7 +2139,7 @@ func (self *NWaku) WakuPeerExchangeRequest(numPeers uint64) (string, error) {
return "", errors.New(errMsg)
}
-func (self *NWaku) WakuConnect(peerMultiAddr string, timeoutMs int) error {
+func (self *Waku) WakuConnect(peerMultiAddr string, timeoutMs int) error {
var resp = C.allocResp()
var cPeerMultiAddr = C.CString(peerMultiAddr)
defer C.freeResp(resp)
@@ -2149,7 +2155,7 @@ func (self *NWaku) WakuConnect(peerMultiAddr string, timeoutMs int) error {
return errors.New(errMsg)
}
-func (self *NWaku) ListenAddresses() ([]multiaddr.Multiaddr, error) {
+func (self *Waku) ListenAddresses() ([]multiaddr.Multiaddr, error) {
var resp = C.allocResp()
defer C.freeResp(resp)
C.cGoWakuListenAddresses(self.wakuCtx, resp)
@@ -2178,7 +2184,7 @@ func (self *NWaku) ListenAddresses() ([]multiaddr.Multiaddr, error) {
return nil, errors.New(errMsg)
}
-func (self *NWaku) ENR() (*enode.Node, error) {
+func (self *Waku) ENR() (*enode.Node, error) {
var resp = C.allocResp()
defer C.freeResp(resp)
C.cGoWakuGetMyENR(self.wakuCtx, resp)
@@ -2196,7 +2202,7 @@ func (self *NWaku) ENR() (*enode.Node, error) {
return nil, errors.New(errMsg)
}
-func (self *NWaku) ListPeersInMesh(pubsubTopic string) (int, error) {
+func (self *Waku) ListPeersInMesh(pubsubTopic string) (int, error) {
var resp = C.allocResp()
var cPubsubTopic = C.CString(pubsubTopic)
defer C.freeResp(resp)
@@ -2219,7 +2225,7 @@ func (self *NWaku) ListPeersInMesh(pubsubTopic string) (int, error) {
return 0, errors.New(errMsg)
}
-func (self *NWaku) GetNumConnectedPeers(paramPubsubTopic ...string) (int, error) {
+func (self *Waku) GetNumConnectedPeers(paramPubsubTopic ...string) (int, error) {
var pubsubTopic string
if len(paramPubsubTopic) == 0 {
pubsubTopic = ""
@@ -2249,7 +2255,7 @@ func (self *NWaku) GetNumConnectedPeers(paramPubsubTopic ...string) (int, error)
return 0, errors.New(errMsg)
}
-func (self *NWaku) GetPeerIdsByProtocol(protocol string) (peer.IDSlice, error) {
+func (self *Waku) GetPeerIdsByProtocol(protocol string) (peer.IDSlice, error) {
var resp = C.allocResp()
var cProtocol = C.CString(protocol)
defer C.freeResp(resp)
@@ -2373,7 +2379,7 @@ func (self *NWaku) GetPeerIdsByProtocol(protocol string) (peer.IDSlice, error) {
// }
// MaxMessageSize returns the maximum accepted message size.
-func (w *NWaku) MaxMessageSize() uint32 {
+func (w *Waku) MaxMessageSize() uint32 {
return w.cfg.MaxMessageSize
}
@@ -2385,7 +2391,7 @@ func New(nodeKey *ecdsa.PrivateKey,
appDB *sql.DB,
ts *timesource.NTPTimeSource,
onHistoricMessagesRequestFailed func([]byte, peer.ID, error),
- onPeerStats func(types.ConnStatus)) (*NWaku, error) {
+ onPeerStats func(types.ConnStatus)) (*Waku, error) {
// Lock the main goroutine to its current OS thread
runtime.LockOSThread()
@@ -2529,51 +2535,3 @@ func New(nodeKey *ecdsa.PrivateKey,
// return waku, nil
}
-
-type NwakuInfo struct {
- ListenAddresses []string `json:"listenAddresses"`
- EnrUri string `json:"enrUri"`
-}
-
-func GetNwakuInfo(host *string, port *int) (NwakuInfo, error) {
- nwakuRestPort := 8645
- if port != nil {
- nwakuRestPort = *port
- }
- envNwakuRestPort := os.Getenv("NWAKU_REST_PORT")
- if envNwakuRestPort != "" {
- v, err := strconv.Atoi(envNwakuRestPort)
- if err != nil {
- return NwakuInfo{}, err
- }
- nwakuRestPort = v
- }
-
- nwakuRestHost := "localhost"
- if host != nil {
- nwakuRestHost = *host
- }
- envNwakuRestHost := os.Getenv("NWAKU_REST_HOST")
- if envNwakuRestHost != "" {
- nwakuRestHost = envNwakuRestHost
- }
-
- resp, err := http.Get(fmt.Sprintf("http://%s:%d/debug/v1/info", nwakuRestHost, nwakuRestPort))
- if err != nil {
- return NwakuInfo{}, err
- }
- defer resp.Body.Close()
-
- body, err := io.ReadAll(resp.Body)
- if err != nil {
- return NwakuInfo{}, err
- }
-
- var data NwakuInfo
- err = json.Unmarshal(body, &data)
- if err != nil {
- return NwakuInfo{}, err
- }
-
- return data, nil
-}
diff --git a/wakuv2/nwaku_test.go b/wakuv2/nwaku_test.go
new file mode 100644
index 000000000..2c1490cb2
--- /dev/null
+++ b/wakuv2/nwaku_test.go
@@ -0,0 +1,809 @@
+//go:build use_nwaku
+// +build use_nwaku
+
+package wakuv2
+
+import (
+ "context"
+ "crypto/rand"
+ "errors"
+ "math/big"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/cenkalti/backoff/v3"
+
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/crypto"
+ ethdnsdisc "github.com/ethereum/go-ethereum/p2p/dnsdisc"
+ "github.com/ethereum/go-ethereum/p2p/enode"
+
+ "github.com/stretchr/testify/require"
+ "golang.org/x/exp/maps"
+ "google.golang.org/protobuf/proto"
+
+ "github.com/waku-org/go-waku/waku/v2/dnsdisc"
+ "github.com/waku-org/go-waku/waku/v2/protocol"
+ "github.com/waku-org/go-waku/waku/v2/protocol/pb"
+ "github.com/waku-org/go-waku/waku/v2/protocol/store"
+
+ "github.com/status-im/status-go/protocol/tt"
+ "github.com/status-im/status-go/wakuv2/common"
+)
+
+var testStoreENRBootstrap = "enrtree://AI4W5N5IFEUIHF5LESUAOSMV6TKWF2MB6GU2YK7PU4TYUGUNOCEPW@store.staging.status.nodes.status.im"
+var testBootENRBootstrap = "enrtree://AMOJVZX4V6EXP7NTJPMAYJYST2QP6AJXYW76IU6VGJS7UVSNDYZG4@boot.staging.status.nodes.status.im"
+
+func setDefaultConfig(config *Config, lightMode bool) {
+ config.ClusterID = 16
+
+ if lightMode {
+ config.EnablePeerExchangeClient = true
+ config.LightClient = true
+ config.EnableDiscV5 = false
+ } else {
+ config.EnableDiscV5 = true
+ config.EnablePeerExchangeServer = true
+ config.LightClient = false
+ config.EnablePeerExchangeClient = false
+ }
+}
+
+/*
+func TestDiscoveryV5(t *testing.T) {
+ config := &Config{}
+ setDefaultConfig(config, false)
+ config.DiscV5BootstrapNodes = []string{testStoreENRBootstrap}
+ config.DiscoveryLimit = 20
+ w, err := New(nil, "shards.staging", config, nil, nil, nil, nil, nil)
+ require.NoError(t, err)
+
+ require.NoError(t, w.Start())
+
+ err = tt.RetryWithBackOff(func() error {
+ if len(w.Peers()) == 0 {
+ return errors.New("no peers discovered")
+ }
+ return nil
+ })
+
+ require.NoError(t, err)
+
+ require.NotEqual(t, 0, len(w.Peers()))
+ require.NoError(t, w.Stop())
+}
+*/
+/*
+func TestRestartDiscoveryV5(t *testing.T) {
+ config := &Config{}
+ setDefaultConfig(config, false)
+ // Use wrong discv5 bootstrap address, to simulate being offline
+ config.DiscV5BootstrapNodes = []string{"enrtree://AOGECG2SPND25EEFMAJ5WF3KSGJNSGV356DSTL2YVLLZWIV6SAYBM@1.1.1.2"}
+ config.DiscoveryLimit = 20
+ config.UDPPort = 10002
+ config.ClusterID = 16
+ w, err := New(nil, "", config, nil, nil, nil, nil, nil)
+ require.NoError(t, err)
+
+ require.NoError(t, w.Start())
+ require.False(t, w.seededBootnodesForDiscV5)
+
+ options := func(b *backoff.ExponentialBackOff) {
+ b.MaxElapsedTime = 2 * time.Second
+ }
+
+ // Sanity check, not great, but it's probably helpful
+ err = tt.RetryWithBackOff(func() error {
+ if len(w.Peers()) == 0 {
+ return errors.New("no peers discovered")
+ }
+ return nil
+ }, options)
+
+ require.Error(t, err)
+
+ w.discV5BootstrapNodes = []string{testStoreENRBootstrap}
+
+ options = func(b *backoff.ExponentialBackOff) {
+ b.MaxElapsedTime = 90 * time.Second
+ }
+
+ err = tt.RetryWithBackOff(func() error {
+ if len(w.Peers()) == 0 {
+ return errors.New("no peers discovered")
+ }
+ return nil
+ }, options)
+ require.NoError(t, err)
+
+ require.True(t, w.seededBootnodesForDiscV5)
+ require.NotEqual(t, 0, len(w.Peers()))
+ require.NoError(t, w.Stop())
+}
+
+func TestRelayPeers(t *testing.T) {
+ config := &Config{
+ EnableMissingMessageVerification: true,
+ }
+ setDefaultConfig(config, false)
+ w, err := New(nil, "", config, nil, nil, nil, nil, nil)
+ require.NoError(t, err)
+ require.NoError(t, w.Start())
+ _, err = w.RelayPeersByTopic(config.DefaultShardPubsubTopic)
+ require.NoError(t, err)
+
+ // Ensure function returns an error for lightclient
+ config = &Config{}
+ config.ClusterID = 16
+ config.LightClient = true
+ w, err = New(nil, "", config, nil, nil, nil, nil, nil)
+ require.NoError(t, err)
+ require.NoError(t, w.Start())
+ _, err = w.RelayPeersByTopic(config.DefaultShardPubsubTopic)
+ require.Error(t, err)
+}
+*/
+func parseNodes(rec []string) []*enode.Node {
+ var ns []*enode.Node
+ for _, r := range rec {
+ var n enode.Node
+ if err := n.UnmarshalText([]byte(r)); err != nil {
+ panic(err)
+ }
+ ns = append(ns, &n)
+ }
+ return ns
+}
+
+// In order to run these tests, you must run an nwaku node
+//
+// Using Docker:
+//
+// IP_ADDRESS=$(hostname -I | awk '{print $1}');
+// docker run \
+// -p 60000:60000/tcp -p 9000:9000/udp -p 8645:8645/tcp harbor.status.im/wakuorg/nwaku:v0.31.0 \
+// --tcp-port=60000 --discv5-discovery=true --cluster-id=16 --pubsub-topic=/waku/2/rs/16/32 --pubsub-topic=/waku/2/rs/16/64 \
+// --nat=extip:${IP_ADDRESS} --discv5-discovery --discv5-udp-port=9000 --rest-address=0.0.0.0 --store
+
+func TestBasicWakuV2(t *testing.T) {
+ nwakuInfo, err := GetNwakuInfo(nil, nil)
+ require.NoError(t, err)
+
+ // Creating a fake DNS Discovery ENRTree
+ tree, url := makeTestTree("n", parseNodes([]string{nwakuInfo.EnrUri}), nil)
+ enrTreeAddress := url
+ envEnrTreeAddress := os.Getenv("ENRTREE_ADDRESS")
+ if envEnrTreeAddress != "" {
+ enrTreeAddress = envEnrTreeAddress
+ }
+
+ config := &Config{}
+ setDefaultConfig(config, false)
+ config.Port = 0
+ config.Resolver = mapResolver(tree.ToTXT("n"))
+ config.DiscV5BootstrapNodes = []string{enrTreeAddress}
+ config.DiscoveryLimit = 20
+ config.WakuNodes = []string{enrTreeAddress}
+ w, err := New(nil, "", config, nil, nil, nil, nil, nil)
+ require.NoError(t, err)
+ require.NoError(t, w.Start())
+
+ enr, err := w.ENR()
+ require.NoError(t, err)
+ require.NotNil(t, enr)
+
+ // DNSDiscovery
+ ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
+ defer cancel()
+
+ discoveredNodes, err := dnsdisc.RetrieveNodes(ctx, enrTreeAddress, dnsdisc.WithResolver(config.Resolver))
+ require.NoError(t, err)
+
+ // Peer used for retrieving history
+ r, err := rand.Int(rand.Reader, big.NewInt(int64(len(discoveredNodes))))
+ require.NoError(t, err)
+
+ storeNode := discoveredNodes[int(r.Int64())]
+
+ options := func(b *backoff.ExponentialBackOff) {
+ b.MaxElapsedTime = 30 * time.Second
+ }
+
+ // Sanity check, not great, but it's probably helpful
+ err = tt.RetryWithBackOff(func() error {
+ if len(w.Peers()) < 1 {
+ return errors.New("no peers discovered")
+ }
+ return nil
+ }, options)
+ require.NoError(t, err)
+
+ // Dropping Peer
+ err = w.DropPeer(storeNode.PeerID)
+ require.NoError(t, err)
+
+ // Dialing with peerID
+ err = w.DialPeerByID(storeNode.PeerID)
+ require.NoError(t, err)
+
+ err = tt.RetryWithBackOff(func() error {
+ if len(w.Peers()) < 1 {
+ return errors.New("no peers discovered")
+ }
+ return nil
+ }, options)
+ require.NoError(t, err)
+
+ filter := &common.Filter{
+ PubsubTopic: config.DefaultShardPubsubTopic,
+ Messages: common.NewMemoryMessageStore(),
+ ContentTopics: common.NewTopicSetFromBytes([][]byte{{1, 2, 3, 4}}),
+ }
+
+ _, err = w.Subscribe(filter)
+ require.NoError(t, err)
+
+ msgTimestamp := w.timestamp()
+ contentTopic := maps.Keys(filter.ContentTopics)[0]
+
+ time.Sleep(2 * time.Second)
+
+ _, err = w.Send(config.DefaultShardPubsubTopic, &pb.WakuMessage{
+ Payload: []byte{1, 2, 3, 4, 5},
+ ContentTopic: contentTopic.ContentTopic(),
+ Version: proto.Uint32(0),
+ Timestamp: &msgTimestamp,
+ }, nil)
+
+ require.NoError(t, err)
+
+ time.Sleep(1 * time.Second)
+
+ messages := filter.Retrieve()
+ require.Len(t, messages, 1)
+
+ timestampInSeconds := msgTimestamp / int64(time.Second)
+ marginInSeconds := 20
+
+ options = func(b *backoff.ExponentialBackOff) {
+ b.MaxElapsedTime = 60 * time.Second
+ b.InitialInterval = 500 * time.Millisecond
+ }
+ err = tt.RetryWithBackOff(func() error {
+ _, envelopeCount, err := w.Query(
+ context.Background(),
+ storeNode.PeerID,
+ store.FilterCriteria{
+ ContentFilter: protocol.NewContentFilter(config.DefaultShardPubsubTopic, contentTopic.ContentTopic()),
+ TimeStart: proto.Int64((timestampInSeconds - int64(marginInSeconds)) * int64(time.Second)),
+ TimeEnd: proto.Int64((timestampInSeconds + int64(marginInSeconds)) * int64(time.Second)),
+ },
+ nil,
+ nil,
+ false,
+ )
+ if err != nil || envelopeCount == 0 {
+ // in case of failure extend timestamp margin up to 40secs
+ if marginInSeconds < 40 {
+ marginInSeconds += 5
+ }
+ return errors.New("no messages received from store node")
+ }
+ return nil
+ }, options)
+ require.NoError(t, err)
+
+ require.NoError(t, w.Stop())
+}
+
+type mapResolver map[string]string
+
+func (mr mapResolver) LookupTXT(ctx context.Context, name string) ([]string, error) {
+ if record, ok := mr[name]; ok {
+ return []string{record}, nil
+ }
+ return nil, errors.New("not found")
+}
+
+var signingKeyForTesting, _ = crypto.ToECDSA(hexutil.MustDecode("0xdc599867fc513f8f5e2c2c9c489cde5e71362d1d9ec6e693e0de063236ed1240"))
+
+func makeTestTree(domain string, nodes []*enode.Node, links []string) (*ethdnsdisc.Tree, string) {
+ tree, err := ethdnsdisc.MakeTree(1, nodes, links)
+ if err != nil {
+ panic(err)
+ }
+ url, err := tree.Sign(signingKeyForTesting, domain)
+ if err != nil {
+ panic(err)
+ }
+ return tree, url
+}
+
+/*
+func TestPeerExchange(t *testing.T) {
+ logger, err := zap.NewDevelopment()
+ require.NoError(t, err)
+ // start node which serve as PeerExchange server
+ config := &Config{}
+ config.ClusterID = 16
+ config.EnableDiscV5 = true
+ config.EnablePeerExchangeServer = true
+ config.EnablePeerExchangeClient = false
+ pxServerNode, err := New(nil, "", config, logger.Named("pxServerNode"), nil, nil, nil, nil)
+ require.NoError(t, err)
+ require.NoError(t, pxServerNode.Start())
+
+ time.Sleep(1 * time.Second)
+
+ // start node that will be discovered by PeerExchange
+ config = &Config{}
+ config.ClusterID = 16
+ config.EnableDiscV5 = true
+ config.EnablePeerExchangeServer = false
+ config.EnablePeerExchangeClient = false
+ enr, err := pxServerNode.ENR()
+ require.NoError(t, err)
+
+ config.DiscV5BootstrapNodes = []string{enr.String()}
+ discV5Node, err := New(nil, "", config, logger.Named("discV5Node"), nil, nil, nil, nil)
+ require.NoError(t, err)
+ require.NoError(t, discV5Node.Start())
+
+ time.Sleep(1 * time.Second)
+
+ // start light node which use PeerExchange to discover peers
+ enrNodes := []*enode.Node{enr}
+ tree, url := makeTestTree("n", enrNodes, nil)
+ resolver := mapResolver(tree.ToTXT("n"))
+
+ config = &Config{}
+ config.ClusterID = 16
+ config.EnablePeerExchangeServer = false
+ config.EnablePeerExchangeClient = true
+ config.LightClient = true
+ config.Resolver = resolver
+
+ config.WakuNodes = []string{url}
+ lightNode, err := New(nil, "", config, logger.Named("lightNode"), nil, nil, nil, nil)
+ require.NoError(t, err)
+ require.NoError(t, lightNode.Start())
+
+ // Sanity check, not great, but it's probably helpful
+ options := func(b *backoff.ExponentialBackOff) {
+ b.MaxElapsedTime = 30 * time.Second
+ }
+ err = tt.RetryWithBackOff(func() error {
+ // we should not use lightNode.Peers() here as it only indicates peers that are connected right now,
+ // in light client mode,the peer will be closed via `w.node.Host().Network().ClosePeer(peerInfo.ID)`
+ // after invoking identifyAndConnect, instead, we should check the peerStore, peers from peerStore
+ // won't get deleted especially if they are statically added.
+ numConnected, err := lightNode.GetNumConnectedPeers()
+ if err != nil {
+ return err
+ }
+ if numConnected == 2 {
+ return nil
+ }
+ return errors.New("no peers discovered")
+ }, options)
+ require.NoError(t, err)
+
+ _, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ _, err = discV5Node.WakuPeerExchangeRequest(1)
+ require.NoError(t, err)
+ _, err = discV5Node.WakuPeerExchangeRequest(1)
+ require.Error(t, err) //should fail due to rate limit
+
+ require.NoError(t, lightNode.Stop())
+ require.NoError(t, pxServerNode.Stop())
+ require.NoError(t, discV5Node.Stop())
+}
+
+func TestWakuV2Filter(t *testing.T) {
+ t.Skip("flaky test")
+
+ enrTreeAddress := testBootENRBootstrap
+ envEnrTreeAddress := os.Getenv("ENRTREE_ADDRESS")
+ if envEnrTreeAddress != "" {
+ enrTreeAddress = envEnrTreeAddress
+ }
+ config := &Config{}
+ setDefaultConfig(config, true)
+ config.EnablePeerExchangeClient = false
+ config.Port = 0
+ config.MinPeersForFilter = 2
+
+ config.DiscV5BootstrapNodes = []string{enrTreeAddress}
+ config.DiscoveryLimit = 20
+ config.WakuNodes = []string{enrTreeAddress}
+ w, err := New(nil, "", config, nil, nil, nil, nil, nil)
+ require.NoError(t, err)
+ require.NoError(t, w.Start())
+
+ options := func(b *backoff.ExponentialBackOff) {
+ b.MaxElapsedTime = 10 * time.Second
+ }
+ time.Sleep(10 * time.Second) //TODO: Check if we can remove this sleep.
+
+ // Sanity check, not great, but it's probably helpful
+ err = tt.RetryWithBackOff(func() error {
+ peers, err := w.GetPeerIdsByProtocol(string(filter.FilterSubscribeID_v20beta1))
+ if err != nil {
+ return err
+ }
+ if len(peers) < 2 {
+ return errors.New("no peers discovered")
+ }
+ return nil
+ }, options)
+ require.NoError(t, err)
+ testPubsubTopic := "/waku/2/rs/16/32"
+ contentTopicBytes := make([]byte, 4)
+ _, err = rand.Read(contentTopicBytes)
+ require.NoError(t, err)
+ filter := &common.Filter{
+ Messages: common.NewMemoryMessageStore(),
+ PubsubTopic: testPubsubTopic,
+ ContentTopics: common.NewTopicSetFromBytes([][]byte{contentTopicBytes}),
+ }
+
+ fID, err := w.Subscribe(filter)
+ require.NoError(t, err)
+
+ msgTimestamp := w.timestamp()
+ contentTopic := maps.Keys(filter.ContentTopics)[0]
+
+ _, err = w.Send(testPubsubTopic, &pb.WakuMessage{
+ Payload: []byte{1, 2, 3, 4, 5},
+ ContentTopic: contentTopic.ContentTopic(),
+ Version: proto.Uint32(0),
+ Timestamp: &msgTimestamp,
+ }, nil)
+ require.NoError(t, err)
+ time.Sleep(5 * time.Second)
+
+ // Ensure there is at least 1 active filter subscription
+ subscriptions := w.FilterLightnode().Subscriptions()
+ require.Greater(t, len(subscriptions), 0)
+
+ messages := filter.Retrieve()
+ require.Len(t, messages, 1)
+
+ // Mock peers going down
+ _, err = w.FilterLightnode().UnsubscribeWithSubscription(w.ctx, subscriptions[0])
+ require.NoError(t, err)
+
+ time.Sleep(10 * time.Second)
+
+ // Ensure there is at least 1 active filter subscription
+ subscriptions = w.FilterLightnode().Subscriptions()
+ require.Greater(t, len(subscriptions), 0)
+
+ // Ensure that messages are retrieved with a fresh sub
+ _, err = w.Send(testPubsubTopic, &pb.WakuMessage{
+ Payload: []byte{1, 2, 3, 4, 5, 6},
+ ContentTopic: contentTopic.ContentTopic(),
+ Version: proto.Uint32(0),
+ Timestamp: &msgTimestamp,
+ }, nil)
+ require.NoError(t, err)
+ time.Sleep(10 * time.Second)
+
+ messages = filter.Retrieve()
+ require.Len(t, messages, 1)
+ err = w.Unsubscribe(context.Background(), fID)
+ require.NoError(t, err)
+ require.NoError(t, w.Stop())
+}
+
+func TestWakuV2Store(t *testing.T) {
+ t.Skip("deprecated. Storenode must use nwaku")
+
+ // Configuration for the first Waku node
+ config1 := &Config{
+ Port: 0,
+ ClusterID: 16,
+ EnableDiscV5: false,
+ DiscoveryLimit: 20,
+ EnableStore: false,
+ StoreCapacity: 100,
+ StoreSeconds: 3600,
+ EnableMissingMessageVerification: true,
+ }
+ w1PeersCh := make(chan peer.IDSlice, 100) // buffered not to block on the send side
+
+ // Start the first Waku node
+ w1, err := New(nil, "", config1, nil, nil, nil, nil, func(cs types.ConnStatus) {
+ w1PeersCh <- maps.Keys(cs.Peers)
+ })
+ require.NoError(t, err)
+ require.NoError(t, w1.Start())
+ defer func() {
+ require.NoError(t, w1.Stop())
+ close(w1PeersCh)
+ }()
+
+ // Configuration for the second Waku node
+ sql2, err := helpers.SetupTestMemorySQLDB(appdatabase.DbInitializer{})
+ require.NoError(t, err)
+ config2 := &Config{
+ Port: 0,
+ ClusterID: 16,
+ EnableDiscV5: false,
+ DiscoveryLimit: 20,
+ EnableStore: true,
+ StoreCapacity: 100,
+ StoreSeconds: 3600,
+ }
+
+ // Start the second Waku node
+ w2, err := New(nil, "", config2, nil, sql2, nil, nil, nil)
+ require.NoError(t, err)
+ require.NoError(t, w2.Start())
+ w2EnvelopeCh := make(chan common.EnvelopeEvent, 100)
+ w2.SubscribeEnvelopeEvents(w2EnvelopeCh)
+ defer func() {
+ require.NoError(t, w2.Stop())
+ close(w2EnvelopeCh)
+ }()
+
+ // Connect the two nodes directly
+ peer2Addr, err := w2.ListenAddresses()
+ require.NoError(t, err)
+
+ err = w1.DialPeer(peer2Addr[0])
+ require.NoError(t, err)
+
+ // Create a filter for the second node to catch messages
+ filter := &common.Filter{
+ Messages: common.NewMemoryMessageStore(),
+ PubsubTopic: config2.DefaultShardPubsubTopic,
+ ContentTopics: common.NewTopicSetFromBytes([][]byte{{1, 2, 3, 4}}),
+ }
+
+ _, err = w2.Subscribe(filter)
+ require.NoError(t, err)
+
+ time.Sleep(2 * time.Second)
+
+ // Send a message from the first node
+ msgTimestamp := w1.CurrentTime().UnixNano()
+ contentTopic := maps.Keys(filter.ContentTopics)[0]
+ _, err = w1.Send(config1.DefaultShardPubsubTopic, &pb.WakuMessage{
+ Payload: []byte{1, 2, 3, 4, 5},
+ ContentTopic: contentTopic.ContentTopic(),
+ Version: proto.Uint32(0),
+ Timestamp: &msgTimestamp,
+ }, nil)
+ require.NoError(t, err)
+
+ waitForEnvelope(t, contentTopic.ContentTopic(), w2EnvelopeCh)
+
+ // Retrieve the message from the second node's filter
+ messages := filter.Retrieve()
+ require.Len(t, messages, 1)
+
+ timestampInSeconds := msgTimestamp / int64(time.Second)
+ marginInSeconds := 5
+ // Query the second node's store for the message
+ _, envelopeCount, err := w1.Query(
+ context.Background(),
+ w2.Host().ID(),
+ store.FilterCriteria{
+ TimeStart: proto.Int64((timestampInSeconds - int64(marginInSeconds)) * int64(time.Second)),
+ TimeEnd: proto.Int64((timestampInSeconds + int64(marginInSeconds)) * int64(time.Second)),
+ ContentFilter: protocol.NewContentFilter(config1.DefaultShardPubsubTopic, contentTopic.ContentTopic()),
+ },
+ nil,
+ nil,
+ false,
+ )
+ require.NoError(t, err)
+ require.True(t, envelopeCount > 0, "no messages received from store node")
+}
+
+func waitForPeerConnection(t *testing.T, peerID peer.ID, peerCh chan peer.IDSlice) {
+ waitForPeerConnectionWithTimeout(t, peerID, peerCh, 3*time.Second)
+}
+
+func waitForPeerConnectionWithTimeout(t *testing.T, peerID peer.ID, peerCh chan peer.IDSlice, timeout time.Duration) {
+ ctx, cancel := context.WithTimeout(context.Background(), timeout)
+ defer cancel()
+ for {
+ select {
+ case peers := <-peerCh:
+ for _, p := range peers {
+ if p == peerID {
+ return
+ }
+ }
+ case <-ctx.Done():
+ require.Fail(t, "timed out waiting for peer "+peerID.String())
+ return
+ }
+ }
+}
+
+func waitForEnvelope(t *testing.T, contentTopic string, envCh chan common.EnvelopeEvent) {
+ ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
+ defer cancel()
+ for {
+ select {
+ case env := <-envCh:
+ if env.Topic.ContentTopic() == contentTopic {
+ return
+ }
+ case <-ctx.Done():
+ require.Fail(t, "timed out waiting for envelope's topic "+contentTopic)
+ return
+ }
+ }
+}
+
+func TestOnlineChecker(t *testing.T) {
+ w, err := New(nil, "shards.staging", nil, nil, nil, nil, nil, nil)
+ require.NoError(t, w.Start())
+
+ require.NoError(t, err)
+ require.False(t, w.onlineChecker.IsOnline())
+
+ w.ConnectionChanged(connection.State{Offline: false})
+ require.True(t, w.onlineChecker.IsOnline())
+
+ wg := sync.WaitGroup{}
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ <-w.goingOnline
+ require.True(t, true)
+ }()
+
+ time.Sleep(100 * time.Millisecond)
+
+ w.ConnectionChanged(connection.State{Offline: true})
+ require.False(t, w.onlineChecker.IsOnline())
+
+ // Test lightnode online checker
+ config := &Config{}
+ config.ClusterID = 16
+ config.LightClient = true
+ lightNode, err := New(nil, "shards.staging", config, nil, nil, nil, nil, nil)
+ require.NoError(t, err)
+
+ err = lightNode.Start()
+ require.NoError(t, err)
+
+ require.False(t, lightNode.onlineChecker.IsOnline())
+ f := &common.Filter{}
+ lightNode.filterManager.SubscribeFilter("test", protocol.NewContentFilter(f.PubsubTopic, f.ContentTopics.ContentTopics()...))
+
+}
+
+func TestLightpushRateLimit(t *testing.T) {
+ logger, err := zap.NewDevelopment()
+ require.NoError(t, err)
+
+ config0 := &Config{}
+ setDefaultConfig(config0, false)
+ w0PeersCh := make(chan peer.IDSlice, 5) // buffered not to block on the send side
+
+ // Start the relayu node
+ w0, err := New(nil, "", config0, logger.Named("relayNode"), nil, nil, nil, func(cs types.ConnStatus) {
+ w0PeersCh <- maps.Keys(cs.Peers)
+ })
+ require.NoError(t, err)
+ require.NoError(t, w0.Start())
+ defer func() {
+ require.NoError(t, w0.Stop())
+ close(w0PeersCh)
+ }()
+
+ contentTopics := common.NewTopicSetFromBytes([][]byte{{1, 2, 3, 4}})
+ filter := &common.Filter{
+ PubsubTopic: config0.DefaultShardPubsubTopic,
+ Messages: common.NewMemoryMessageStore(),
+ ContentTopics: contentTopics,
+ }
+
+ _, err = w0.Subscribe(filter)
+ require.NoError(t, err)
+
+ config1 := &Config{}
+ setDefaultConfig(config1, false)
+ w1PeersCh := make(chan peer.IDSlice, 5) // buffered not to block on the send side
+
+ // Start the full node
+ w1, err := New(nil, "", config1, logger.Named("fullNode"), nil, nil, nil, func(cs types.ConnStatus) {
+ w1PeersCh <- maps.Keys(cs.Peers)
+ })
+ require.NoError(t, err)
+ require.NoError(t, w1.Start())
+ defer func() {
+ require.NoError(t, w1.Stop())
+ close(w1PeersCh)
+ }()
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ //Connect the relay peer and full node
+ err = w1.DialPeer(ctx, w0.ListenAddresses()[0].String())
+ require.NoError(t, err)
+
+ err = tt.RetryWithBackOff(func() error {
+ if len(w1.Peers()) == 0 {
+ return errors.New("no peers discovered")
+ }
+ return nil
+ })
+ require.NoError(t, err)
+
+ config2 := &Config{}
+ setDefaultConfig(config2, true)
+ w2PeersCh := make(chan peer.IDSlice, 5) // buffered not to block on the send side
+
+ // Start the light node
+ w2, err := New(nil, "", config2, logger.Named("lightNode"), nil, nil, nil, func(cs types.ConnStatus) {
+ w2PeersCh <- maps.Keys(cs.Peers)
+ })
+ require.NoError(t, err)
+ require.NoError(t, w2.Start())
+ defer func() {
+ require.NoError(t, w2.Stop())
+ close(w2PeersCh)
+ }()
+
+ //Use this instead of DialPeer to make sure the peer is added to PeerStore and can be selected for Lighpush
+ w2.AddDiscoveredPeer(w1.PeerID(), w1.ListenAddresses(), wps.Static, w1.cfg.DefaultShardedPubsubTopics, w1.node.ENR(), true)
+
+ waitForPeerConnectionWithTimeout(t, w2.Host().ID(), w1PeersCh, 5*time.Second)
+
+ event := make(chan common.EnvelopeEvent, 10)
+ w2.SubscribeEnvelopeEvents(event)
+
+ for i := range [4]int{} {
+ msgTimestamp := w2.timestamp()
+ _, err := w2.Send(config2.DefaultShardPubsubTopic, &pb.WakuMessage{
+ Payload: []byte{1, 2, 3, 4, 5, 6, byte(i)},
+ ContentTopic: maps.Keys(contentTopics)[0].ContentTopic(),
+ Version: proto.Uint32(0),
+ Timestamp: &msgTimestamp,
+ }, nil)
+
+ require.NoError(t, err)
+
+ time.Sleep(550 * time.Millisecond)
+
+ }
+
+ messages := filter.Retrieve()
+ require.Len(t, messages, 2)
+
+}
+
+func TestTelemetryFormat(t *testing.T) {
+ logger, err := zap.NewDevelopment()
+ require.NoError(t, err)
+
+ tc := NewBandwidthTelemetryClient(logger, "#")
+
+ s := metrics.Stats{
+ TotalIn: 10,
+ TotalOut: 20,
+ RateIn: 30,
+ RateOut: 40,
+ }
+
+ m := make(map[libp2pprotocol.ID]metrics.Stats)
+ m[relay.WakuRelayID_v200] = s
+ m[filter.FilterPushID_v20beta1] = s
+ m[filter.FilterSubscribeID_v20beta1] = s
+ m[legacy_store.StoreID_v20beta4] = s
+ m[lightpush.LightPushID_v20beta1] = s
+
+ requestBody := tc.getTelemetryRequestBody(m)
+ _, err = json.Marshal(requestBody)
+ require.NoError(t, err)
+}
+*/
diff --git a/wakuv2/nwaku_test_utils.go b/wakuv2/nwaku_test_utils.go
new file mode 100644
index 000000000..ed9f3e80c
--- /dev/null
+++ b/wakuv2/nwaku_test_utils.go
@@ -0,0 +1,58 @@
+package wakuv2
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "strconv"
+)
+
+type NwakuInfo struct {
+ ListenAddresses []string `json:"listenAddresses"`
+ EnrUri string `json:"enrUri"`
+}
+
+func GetNwakuInfo(host *string, port *int) (NwakuInfo, error) {
+ nwakuRestPort := 8645
+ if port != nil {
+ nwakuRestPort = *port
+ }
+ envNwakuRestPort := os.Getenv("NWAKU_REST_PORT")
+ if envNwakuRestPort != "" {
+ v, err := strconv.Atoi(envNwakuRestPort)
+ if err != nil {
+ return NwakuInfo{}, err
+ }
+ nwakuRestPort = v
+ }
+
+ nwakuRestHost := "localhost"
+ if host != nil {
+ nwakuRestHost = *host
+ }
+ envNwakuRestHost := os.Getenv("NWAKU_REST_HOST")
+ if envNwakuRestHost != "" {
+ nwakuRestHost = envNwakuRestHost
+ }
+
+ resp, err := http.Get(fmt.Sprintf("http://%s:%d/debug/v1/info", nwakuRestHost, nwakuRestPort))
+ if err != nil {
+ return NwakuInfo{}, err
+ }
+ defer resp.Body.Close()
+
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return NwakuInfo{}, err
+ }
+
+ var data NwakuInfo
+ err = json.Unmarshal(body, &data)
+ if err != nil {
+ return NwakuInfo{}, err
+ }
+
+ return data, nil
+}
diff --git a/wakuv2/tracer.go b/wakuv2/tracer.go
index e34894de1..163d8f7b6 100644
--- a/wakuv2/tracer.go
+++ b/wakuv2/tracer.go
@@ -11,7 +11,7 @@ import (
// Trace implements EventTracer interface.
// We use custom logging, because we want to base58-encode the peerIDs. And also make the messageIDs readable.
-func (w *NWaku) Trace(evt *pubsub_pb.TraceEvent) {
+func (w *Waku) Trace(evt *pubsub_pb.TraceEvent) {
f := []zap.Field{
zap.String("type", evt.Type.String()),
diff --git a/wakuv2/waku_test.go b/wakuv2/waku_test.go
index 778344498..f7a19dc14 100644
--- a/wakuv2/waku_test.go
+++ b/wakuv2/waku_test.go
@@ -1,3 +1,6 @@
+//go:build !use_nwaku
+// +build !use_nwaku
+
package wakuv2
import (
@@ -352,10 +355,7 @@ func TestPeerExchange(t *testing.T) {
config.EnableDiscV5 = true
config.EnablePeerExchangeServer = false
config.EnablePeerExchangeClient = false
- enr, err := pxServerNode.ENR()
- require.NoError(t, err)
-
- config.DiscV5BootstrapNodes = []string{enr.String()}
+ config.DiscV5BootstrapNodes = []string{pxServerNode.node.ENR().String()}
discV5Node, err := New(nil, "", config, logger.Named("discV5Node"), nil, nil, nil, nil)
require.NoError(t, err)
require.NoError(t, discV5Node.Start())
@@ -363,7 +363,7 @@ func TestPeerExchange(t *testing.T) {
time.Sleep(1 * time.Second)
// start light node which use PeerExchange to discover peers
- enrNodes := []*enode.Node{enr}
+ enrNodes := []*enode.Node{pxServerNode.node.ENR()}
tree, url := makeTestTree("n", enrNodes, nil)
resolver := mapResolver(tree.ToTXT("n"))
@@ -388,23 +388,17 @@ func TestPeerExchange(t *testing.T) {
// in light client mode,the peer will be closed via `w.node.Host().Network().ClosePeer(peerInfo.ID)`
// after invoking identifyAndConnect, instead, we should check the peerStore, peers from peerStore
// won't get deleted especially if they are statically added.
- numConnected, err := lightNode.GetNumConnectedPeers()
- if err != nil {
- return err
- }
- if numConnected == 2 {
+ if len(lightNode.node.Host().Peerstore().Peers()) == 2 {
return nil
}
return errors.New("no peers discovered")
}, options)
require.NoError(t, err)
- _, cancel := context.WithCancel(context.Background())
+ ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- _, err = discV5Node.WakuPeerExchangeRequest(1)
- require.NoError(t, err)
- _, err = discV5Node.WakuPeerExchangeRequest(1)
- require.Error(t, err) //should fail due to rate limit
+ require.NoError(t, discV5Node.node.PeerExchange().Request(ctx, 1))
+ require.Error(t, discV5Node.node.PeerExchange().Request(ctx, 1)) //should fail due to rate limit
require.NoError(t, lightNode.Stop())
require.NoError(t, pxServerNode.Stop())
@@ -439,7 +433,7 @@ func TestWakuV2Filter(t *testing.T) {
// Sanity check, not great, but it's probably helpful
err = tt.RetryWithBackOff(func() error {
- peers, err := w.GetPeerIdsByProtocol(string(filter.FilterSubscribeID_v20beta1))
+ peers, err := w.node.PeerManager().FilterPeersByProto(nil, nil, filter.FilterSubscribeID_v20beta1)
if err != nil {
return err
}
@@ -475,20 +469,20 @@ func TestWakuV2Filter(t *testing.T) {
time.Sleep(5 * time.Second)
// Ensure there is at least 1 active filter subscription
- subscriptions := w.FilterLightnode().Subscriptions()
+ subscriptions := w.node.FilterLightnode().Subscriptions()
require.Greater(t, len(subscriptions), 0)
messages := filter.Retrieve()
require.Len(t, messages, 1)
// Mock peers going down
- _, err = w.FilterLightnode().UnsubscribeWithSubscription(w.ctx, subscriptions[0])
+ _, err = w.node.FilterLightnode().UnsubscribeWithSubscription(w.ctx, subscriptions[0])
require.NoError(t, err)
time.Sleep(10 * time.Second)
// Ensure there is at least 1 active filter subscription
- subscriptions = w.FilterLightnode().Subscriptions()
+ subscriptions = w.node.FilterLightnode().Subscriptions()
require.Greater(t, len(subscriptions), 0)
// Ensure that messages are retrieved with a fresh sub
@@ -562,10 +556,11 @@ func TestWakuV2Store(t *testing.T) {
// Connect the two nodes directly
peer2Addr, err := w2.ListenAddresses()
require.NoError(t, err)
-
- err = w1.DialPeer(peer2Addr[0])
+ err = w1.node.DialPeer(context.Background(), peer2Addr[0].String())
require.NoError(t, err)
+ waitForPeerConnection(t, w2.node.Host().ID(), w1PeersCh)
+
// Create a filter for the second node to catch messages
filter := &common.Filter{
Messages: common.NewMemoryMessageStore(),
@@ -600,7 +595,7 @@ func TestWakuV2Store(t *testing.T) {
// Query the second node's store for the message
_, envelopeCount, err := w1.Query(
context.Background(),
- w2.Host().ID(),
+ w2.node.Host().ID(),
store.FilterCriteria{
TimeStart: proto.Int64((timestampInSeconds - int64(marginInSeconds)) * int64(time.Second)),
TimeEnd: proto.Int64((timestampInSeconds + int64(marginInSeconds)) * int64(time.Second)),
@@ -738,7 +733,9 @@ func TestLightpushRateLimit(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
//Connect the relay peer and full node
- err = w1.DialPeer(ctx, w0.ListenAddresses()[0].String())
+ peerAddr, err := w0.ListenAddresses()
+ require.NoError(t, err)
+ err = w1.node.DialPeer(ctx, peerAddr[0].String())
require.NoError(t, err)
err = tt.RetryWithBackOff(func() error {
@@ -765,9 +762,11 @@ func TestLightpushRateLimit(t *testing.T) {
}()
//Use this instead of DialPeer to make sure the peer is added to PeerStore and can be selected for Lighpush
- w2.AddDiscoveredPeer(w1.PeerID(), w1.ListenAddresses(), wps.Static, w1.cfg.DefaultShardedPubsubTopics, w1.node.ENR(), true)
+ addresses, err := w1.ListenAddresses()
+ require.NoError(t, err)
+ w2.node.AddDiscoveredPeer(w1.PeerID(), addresses, wps.Static, w1.cfg.DefaultShardedPubsubTopics, w1.node.ENR(), true)
- waitForPeerConnectionWithTimeout(t, w2.Host().ID(), w1PeersCh, 5*time.Second)
+ waitForPeerConnectionWithTimeout(t, w2.node.Host().ID(), w1PeersCh, 5*time.Second)
event := make(chan common.EnvelopeEvent, 10)
w2.SubscribeEnvelopeEvents(event)