feat_: build nwaku with nix and use build tags to choose between go-waku and nwaku (#5896)
This commit is contained in:
parent
2062fc663f
commit
1f7d42acb0
|
@ -64,7 +64,6 @@ coverage.html
|
||||||
Session.vim
|
Session.vim
|
||||||
.undodir/*
|
.undodir/*
|
||||||
/.idea/
|
/.idea/
|
||||||
/.vscode/
|
|
||||||
/cmd/*/.ethereum/
|
/cmd/*/.ethereum/
|
||||||
*.iml
|
*.iml
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,3 @@
|
||||||
|
[submodule "third_party/nwaku"]
|
||||||
|
path = third_party/nwaku
|
||||||
|
url = https://github.com/waku-org/nwaku
|
|
@ -8,4 +8,7 @@
|
||||||
"cSpell.words": [
|
"cSpell.words": [
|
||||||
"unmarshalling"
|
"unmarshalling"
|
||||||
],
|
],
|
||||||
|
"gopls":{
|
||||||
|
"buildFlags": ["-tags=use_nwaku,gowaku_skip_migrations,gowaku_no_rln"]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
20
Makefile
20
Makefile
|
@ -1,5 +1,6 @@
|
||||||
.PHONY: statusgo statusd-prune all test clean help
|
.PHONY: statusgo statusd-prune all test clean help
|
||||||
.PHONY: statusgo-android statusgo-ios
|
.PHONY: statusgo-android statusgo-ios
|
||||||
|
.PHONY: build-libwaku test-libwaku clean-libwaku rebuild-libwaku
|
||||||
|
|
||||||
# Clear any GOROOT set outside of the Nix shell
|
# Clear any GOROOT set outside of the Nix shell
|
||||||
export GOROOT=
|
export GOROOT=
|
||||||
|
@ -355,9 +356,28 @@ mock: ##@other Regenerate mocks
|
||||||
mockgen -package=mock_paraswap -destination=services/wallet/thirdparty/paraswap/mock/types.go -source=services/wallet/thirdparty/paraswap/types.go
|
mockgen -package=mock_paraswap -destination=services/wallet/thirdparty/paraswap/mock/types.go -source=services/wallet/thirdparty/paraswap/types.go
|
||||||
mockgen -package=mock_onramp -destination=services/wallet/onramp/mock/types.go -source=services/wallet/onramp/types.go
|
mockgen -package=mock_onramp -destination=services/wallet/onramp/mock/types.go -source=services/wallet/onramp/types.go
|
||||||
|
|
||||||
|
LIBWAKU := third_party/nwaku/build/libwaku.$(GOBIN_SHARED_LIB_EXT)
|
||||||
|
$(LIBWAKU):
|
||||||
|
echo "Building libwaku"
|
||||||
|
$(MAKE) -C third_party/nwaku update || { echo "nwaku make update failed"; exit 1; }
|
||||||
|
$(MAKE) -C ./third_party/nwaku libwaku
|
||||||
|
|
||||||
|
build-libwaku: $(LIBWAKU)
|
||||||
|
|
||||||
docker-test: ##@tests Run tests in a docker container with golang.
|
docker-test: ##@tests Run tests in a docker container with golang.
|
||||||
docker run --privileged --rm -it -v "$(PWD):$(DOCKER_TEST_WORKDIR)" -w "$(DOCKER_TEST_WORKDIR)" $(DOCKER_TEST_IMAGE) go test ${ARGS}
|
docker run --privileged --rm -it -v "$(PWD):$(DOCKER_TEST_WORKDIR)" -w "$(DOCKER_TEST_WORKDIR)" $(DOCKER_TEST_IMAGE) go test ${ARGS}
|
||||||
|
|
||||||
|
test-libwaku: | $(LIBWAKU)
|
||||||
|
// chequear nwaku
|
||||||
|
// sino lanzarlo
|
||||||
|
go test -tags '$(BUILD_TAGS) use_nwaku' -run TestBasicWakuV2 ./wakuv2/... -count 1 -v -json | jq -r '.Output'
|
||||||
|
|
||||||
|
clean-libwaku:
|
||||||
|
echo "Removing libwaku"
|
||||||
|
rm $(LIBWAKU)
|
||||||
|
|
||||||
|
rebuild-libwaku: | clean-libwaku $(LIBWAKU)
|
||||||
|
|
||||||
test: test-unit ##@tests Run basic, short tests during development
|
test: test-unit ##@tests Run basic, short tests during development
|
||||||
|
|
||||||
test-unit: export BUILD_TAGS ?=
|
test-unit: export BUILD_TAGS ?=
|
||||||
|
|
|
@ -19,10 +19,10 @@ import (
|
||||||
type gethNodeWrapper struct {
|
type gethNodeWrapper struct {
|
||||||
stack *node.Node
|
stack *node.Node
|
||||||
waku1 *waku.Waku
|
waku1 *waku.Waku
|
||||||
waku2 *wakuv2.NWaku
|
waku2 *wakuv2.Waku
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewNodeBridge(stack *node.Node, waku1 *waku.Waku, waku2 *wakuv2.NWaku) types.Node {
|
func NewNodeBridge(stack *node.Node, waku1 *waku.Waku, waku2 *wakuv2.Waku) types.Node {
|
||||||
return &gethNodeWrapper{stack: stack, waku1: waku1, waku2: waku2}
|
return &gethNodeWrapper{stack: stack, waku1: waku1, waku2: waku2}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -38,7 +38,7 @@ func (w *gethNodeWrapper) SetWaku1(waku *waku.Waku) {
|
||||||
w.waku1 = waku
|
w.waku1 = waku
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *gethNodeWrapper) SetWaku2(waku *wakuv2.NWaku) {
|
func (w *gethNodeWrapper) SetWaku2(waku *wakuv2.Waku) {
|
||||||
w.waku2 = waku
|
w.waku2 = waku
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -22,11 +22,11 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
type gethWakuV2Wrapper struct {
|
type gethWakuV2Wrapper struct {
|
||||||
waku *wakuv2.NWaku
|
waku *wakuv2.Waku
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewGethWakuWrapper returns an object that wraps Geth's Waku in a types interface
|
// NewGethWakuWrapper returns an object that wraps Geth's Waku in a types interface
|
||||||
func NewGethWakuV2Wrapper(w *wakuv2.NWaku) types.Waku {
|
func NewGethWakuV2Wrapper(w *wakuv2.Waku) types.Waku {
|
||||||
if w == nil {
|
if w == nil {
|
||||||
panic("waku cannot be nil")
|
panic("waku cannot be nil")
|
||||||
}
|
}
|
||||||
|
@ -37,7 +37,7 @@ func NewGethWakuV2Wrapper(w *wakuv2.NWaku) types.Waku {
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetGethWhisperFrom retrieves the underlying whisper Whisper struct from a wrapped Whisper interface
|
// GetGethWhisperFrom retrieves the underlying whisper Whisper struct from a wrapped Whisper interface
|
||||||
func GetGethWakuV2From(m types.Waku) *wakuv2.NWaku {
|
func GetGethWakuV2From(m types.Waku) *wakuv2.Waku {
|
||||||
return m.(*gethWakuV2Wrapper).waku
|
return m.(*gethWakuV2Wrapper).waku
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -21,7 +21,7 @@ in pkgs.mkShell {
|
||||||
|
|
||||||
buildInputs = with pkgs; [
|
buildInputs = with pkgs; [
|
||||||
git jq which
|
git jq which
|
||||||
go golangci-lint go-junit-report gopls go-bindata gomobileMod
|
go golangci-lint go-junit-report gopls go-bindata gomobileMod openssl
|
||||||
mockgen protobuf3_20 protoc-gen-go gotestsum go-modvendor openjdk cc-test-reporter
|
mockgen protobuf3_20 protoc-gen-go gotestsum go-modvendor openjdk cc-test-reporter
|
||||||
] ++ lib.optionals (stdenv.isDarwin) [ xcodeWrapper ];
|
] ++ lib.optionals (stdenv.isDarwin) [ xcodeWrapper ];
|
||||||
|
|
||||||
|
|
|
@ -122,7 +122,7 @@ type StatusNode struct {
|
||||||
// nwakuSrvc *
|
// nwakuSrvc *
|
||||||
wakuSrvc *waku.Waku
|
wakuSrvc *waku.Waku
|
||||||
wakuExtSrvc *wakuext.Service
|
wakuExtSrvc *wakuext.Service
|
||||||
wakuV2Srvc *wakuv2.NWaku
|
wakuV2Srvc *wakuv2.Waku
|
||||||
wakuV2ExtSrvc *wakuv2ext.Service
|
wakuV2ExtSrvc *wakuv2ext.Service
|
||||||
ensSrvc *ens.Service
|
ensSrvc *ens.Service
|
||||||
communityTokensSrvc *communitytokens.Service
|
communityTokensSrvc *communitytokens.Service
|
||||||
|
|
|
@ -263,7 +263,7 @@ func (b *StatusNode) WakuExtService() *wakuext.Service {
|
||||||
func (b *StatusNode) WakuV2ExtService() *wakuv2ext.Service {
|
func (b *StatusNode) WakuV2ExtService() *wakuv2ext.Service {
|
||||||
return b.wakuV2ExtSrvc
|
return b.wakuV2ExtSrvc
|
||||||
}
|
}
|
||||||
func (b *StatusNode) WakuV2Service() *wakuv2.NWaku {
|
func (b *StatusNode) WakuV2Service() *wakuv2.Waku {
|
||||||
return b.wakuV2Srvc
|
return b.wakuV2Srvc
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -315,7 +315,7 @@ func (b *StatusNode) wakuService(wakuCfg *params.WakuConfig, clusterCfg *params.
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *StatusNode) wakuV2Service(nodeConfig *params.NodeConfig) (*wakuv2.NWaku, error) {
|
func (b *StatusNode) wakuV2Service(nodeConfig *params.NodeConfig) (*wakuv2.Waku, error) {
|
||||||
if b.wakuV2Srvc == nil {
|
if b.wakuV2Srvc == nil {
|
||||||
cfg := &wakuv2.Config{
|
cfg := &wakuv2.Config{
|
||||||
MaxMessageSize: wakucommon.DefaultMaxMessageSize,
|
MaxMessageSize: wakucommon.DefaultMaxMessageSize,
|
||||||
|
|
|
@ -15,13 +15,13 @@ import (
|
||||||
"github.com/status-im/status-go/eth-node/crypto"
|
"github.com/status-im/status-go/eth-node/crypto"
|
||||||
"github.com/status-im/status-go/eth-node/types"
|
"github.com/status-im/status-go/eth-node/types"
|
||||||
"github.com/status-im/status-go/protocol/common"
|
"github.com/status-im/status-go/protocol/common"
|
||||||
"github.com/status-im/status-go/protocol/common/shard"
|
|
||||||
"github.com/status-im/status-go/protocol/communities/token"
|
"github.com/status-im/status-go/protocol/communities/token"
|
||||||
"github.com/status-im/status-go/protocol/encryption"
|
"github.com/status-im/status-go/protocol/encryption"
|
||||||
"github.com/status-im/status-go/protocol/protobuf"
|
"github.com/status-im/status-go/protocol/protobuf"
|
||||||
"github.com/status-im/status-go/protocol/sqlite"
|
"github.com/status-im/status-go/protocol/sqlite"
|
||||||
"github.com/status-im/status-go/services/wallet/bigint"
|
"github.com/status-im/status-go/services/wallet/bigint"
|
||||||
"github.com/status-im/status-go/t/helpers"
|
"github.com/status-im/status-go/t/helpers"
|
||||||
|
"github.com/status-im/status-go/wakuv2"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestPersistenceSuite(t *testing.T) {
|
func TestPersistenceSuite(t *testing.T) {
|
||||||
|
@ -787,7 +787,7 @@ func (s *PersistenceSuite) TestSaveShardInfo() {
|
||||||
s.Require().Nil(resultShard)
|
s.Require().Nil(resultShard)
|
||||||
|
|
||||||
// not nil shard
|
// not nil shard
|
||||||
expectedShard := &shard.Shard{
|
expectedShard := &wakuv2.Shard{
|
||||||
Cluster: 1,
|
Cluster: 1,
|
||||||
Index: 2,
|
Index: 2,
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,13 +25,13 @@ import (
|
||||||
"github.com/status-im/status-go/eth-node/types"
|
"github.com/status-im/status-go/eth-node/types"
|
||||||
"github.com/status-im/status-go/params"
|
"github.com/status-im/status-go/params"
|
||||||
"github.com/status-im/status-go/protocol/common"
|
"github.com/status-im/status-go/protocol/common"
|
||||||
"github.com/status-im/status-go/protocol/common/shard"
|
|
||||||
"github.com/status-im/status-go/protocol/communities"
|
"github.com/status-im/status-go/protocol/communities"
|
||||||
"github.com/status-im/status-go/protocol/protobuf"
|
"github.com/status-im/status-go/protocol/protobuf"
|
||||||
"github.com/status-im/status-go/protocol/requests"
|
"github.com/status-im/status-go/protocol/requests"
|
||||||
"github.com/status-im/status-go/protocol/transport"
|
"github.com/status-im/status-go/protocol/transport"
|
||||||
"github.com/status-im/status-go/protocol/tt"
|
"github.com/status-im/status-go/protocol/tt"
|
||||||
"github.com/status-im/status-go/services/wallet/thirdparty"
|
"github.com/status-im/status-go/services/wallet/thirdparty"
|
||||||
|
"github.com/status-im/status-go/wakuv2"
|
||||||
)
|
)
|
||||||
|
|
||||||
const testChainID1 = 1
|
const testChainID1 = 1
|
||||||
|
@ -488,11 +488,12 @@ func (s *MessengerCommunitiesTokenPermissionsSuite) TestBecomeMemberPermissions(
|
||||||
cfg := testWakuV2Config{
|
cfg := testWakuV2Config{
|
||||||
logger: s.logger.Named("store-node-waku"),
|
logger: s.logger.Named("store-node-waku"),
|
||||||
enableStore: false,
|
enableStore: false,
|
||||||
clusterID: shard.MainStatusShardCluster,
|
clusterID: wakuv2.MainStatusShardCluster,
|
||||||
}
|
}
|
||||||
wakuStoreNode := NewTestWakuV2(&s.Suite, cfg)
|
wakuStoreNode := NewTestWakuV2(&s.Suite, cfg)
|
||||||
|
|
||||||
storeNodeListenAddresses := wakuStoreNode.ListenAddresses()
|
storeNodeListenAddresses, err := wakuStoreNode.ListenAddresses()
|
||||||
|
s.Require().NoError(err)
|
||||||
s.Require().LessOrEqual(1, len(storeNodeListenAddresses))
|
s.Require().LessOrEqual(1, len(storeNodeListenAddresses))
|
||||||
|
|
||||||
storeNodeAddress := storeNodeListenAddresses[0]
|
storeNodeAddress := storeNodeListenAddresses[0]
|
||||||
|
|
|
@ -12,11 +12,11 @@ import (
|
||||||
gethbridge "github.com/status-im/status-go/eth-node/bridge/geth"
|
gethbridge "github.com/status-im/status-go/eth-node/bridge/geth"
|
||||||
"github.com/status-im/status-go/eth-node/types"
|
"github.com/status-im/status-go/eth-node/types"
|
||||||
"github.com/status-im/status-go/protocol/common"
|
"github.com/status-im/status-go/protocol/common"
|
||||||
"github.com/status-im/status-go/protocol/common/shard"
|
|
||||||
"github.com/status-im/status-go/protocol/communities"
|
"github.com/status-im/status-go/protocol/communities"
|
||||||
"github.com/status-im/status-go/protocol/protobuf"
|
"github.com/status-im/status-go/protocol/protobuf"
|
||||||
"github.com/status-im/status-go/protocol/requests"
|
"github.com/status-im/status-go/protocol/requests"
|
||||||
"github.com/status-im/status-go/protocol/tt"
|
"github.com/status-im/status-go/protocol/tt"
|
||||||
|
"github.com/status-im/status-go/wakuv2"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestMessengerCommunitiesShardingSuite(t *testing.T) {
|
func TestMessengerCommunitiesShardingSuite(t *testing.T) {
|
||||||
|
@ -108,7 +108,7 @@ func (s *MessengerCommunitiesShardingSuite) TearDownTest() {
|
||||||
_ = s.logger.Sync()
|
_ = s.logger.Sync()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *MessengerCommunitiesShardingSuite) testPostToCommunityChat(shard *shard.Shard, community *communities.Community, chat *Chat) {
|
func (s *MessengerCommunitiesShardingSuite) testPostToCommunityChat(shard *wakuv2.Shard, community *communities.Community, chat *Chat) {
|
||||||
_, err := s.owner.SetCommunityShard(&requests.SetCommunityShard{
|
_, err := s.owner.SetCommunityShard(&requests.SetCommunityShard{
|
||||||
CommunityID: community.ID(),
|
CommunityID: community.ID(),
|
||||||
Shard: shard,
|
Shard: shard,
|
||||||
|
@ -144,8 +144,8 @@ func (s *MessengerCommunitiesShardingSuite) TestPostToCommunityChat() {
|
||||||
|
|
||||||
// Members should be able to receive messages in a community with sharding enabled.
|
// Members should be able to receive messages in a community with sharding enabled.
|
||||||
{
|
{
|
||||||
shard := &shard.Shard{
|
shard := &wakuv2.Shard{
|
||||||
Cluster: shard.MainStatusShardCluster,
|
Cluster: wakuv2.MainStatusShardCluster,
|
||||||
Index: 128,
|
Index: 128,
|
||||||
}
|
}
|
||||||
s.testPostToCommunityChat(shard, community, chat)
|
s.testPostToCommunityChat(shard, community, chat)
|
||||||
|
@ -153,8 +153,8 @@ func (s *MessengerCommunitiesShardingSuite) TestPostToCommunityChat() {
|
||||||
|
|
||||||
// Members should be able to receive messages in a community where the sharding configuration has been edited.
|
// Members should be able to receive messages in a community where the sharding configuration has been edited.
|
||||||
{
|
{
|
||||||
shard := &shard.Shard{
|
shard := &wakuv2.Shard{
|
||||||
Cluster: shard.MainStatusShardCluster,
|
Cluster: wakuv2.MainStatusShardCluster,
|
||||||
Index: 256,
|
Index: 256,
|
||||||
}
|
}
|
||||||
s.testPostToCommunityChat(shard, community, chat)
|
s.testPostToCommunityChat(shard, community, chat)
|
||||||
|
@ -162,8 +162,8 @@ func (s *MessengerCommunitiesShardingSuite) TestPostToCommunityChat() {
|
||||||
|
|
||||||
// Members should continue to receive messages in a community if it is moved back to default shard.
|
// Members should continue to receive messages in a community if it is moved back to default shard.
|
||||||
{
|
{
|
||||||
shard := &shard.Shard{
|
shard := &wakuv2.Shard{
|
||||||
Cluster: shard.MainStatusShardCluster,
|
Cluster: wakuv2.MainStatusShardCluster,
|
||||||
Index: 32,
|
Index: 32,
|
||||||
}
|
}
|
||||||
s.testPostToCommunityChat(shard, community, chat)
|
s.testPostToCommunityChat(shard, community, chat)
|
||||||
|
@ -176,8 +176,8 @@ func (s *MessengerCommunitiesShardingSuite) TestIgnoreOutdatedShardKey() {
|
||||||
advertiseCommunityToUserOldWay(&s.Suite, community, s.owner, s.alice)
|
advertiseCommunityToUserOldWay(&s.Suite, community, s.owner, s.alice)
|
||||||
joinCommunity(&s.Suite, community.ID(), s.owner, s.alice, alicePassword, []string{aliceAddress1})
|
joinCommunity(&s.Suite, community.ID(), s.owner, s.alice, alicePassword, []string{aliceAddress1})
|
||||||
|
|
||||||
shard := &shard.Shard{
|
shard := &wakuv2.Shard{
|
||||||
Cluster: shard.MainStatusShardCluster,
|
Cluster: wakuv2.MainStatusShardCluster,
|
||||||
Index: 128,
|
Index: 128,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -114,7 +114,7 @@ type config struct {
|
||||||
|
|
||||||
telemetryServerURL string
|
telemetryServerURL string
|
||||||
telemetrySendPeriod time.Duration
|
telemetrySendPeriod time.Duration
|
||||||
wakuService *wakuv2.NWaku
|
wakuService *wakuv2.Waku
|
||||||
|
|
||||||
messageResendMinDelay time.Duration
|
messageResendMinDelay time.Duration
|
||||||
messageResendMaxCount int
|
messageResendMaxCount int
|
||||||
|
@ -387,7 +387,7 @@ func WithCommunityTokensService(s communities.CommunityTokensServiceInterface) O
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func WithWakuService(s *wakuv2.NWaku) Option {
|
func WithWakuService(s *wakuv2.Waku) Option {
|
||||||
return func(c *config) error {
|
return func(c *config) error {
|
||||||
c.wakuService = s
|
c.wakuService = s
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -10,9 +10,9 @@ import (
|
||||||
"github.com/multiformats/go-multiaddr"
|
"github.com/multiformats/go-multiaddr"
|
||||||
|
|
||||||
"github.com/status-im/status-go/protocol/storenodes"
|
"github.com/status-im/status-go/protocol/storenodes"
|
||||||
|
"github.com/status-im/status-go/wakuv2"
|
||||||
|
|
||||||
gethbridge "github.com/status-im/status-go/eth-node/bridge/geth"
|
gethbridge "github.com/status-im/status-go/eth-node/bridge/geth"
|
||||||
"github.com/status-im/status-go/protocol/common/shard"
|
|
||||||
"github.com/status-im/status-go/protocol/communities"
|
"github.com/status-im/status-go/protocol/communities"
|
||||||
"github.com/status-im/status-go/protocol/tt"
|
"github.com/status-im/status-go/protocol/tt"
|
||||||
|
|
||||||
|
@ -92,11 +92,12 @@ func (s *MessengerStoreNodeCommunitySuite) createStore(name string) (*waku2.Waku
|
||||||
cfg := testWakuV2Config{
|
cfg := testWakuV2Config{
|
||||||
logger: s.logger.Named(name),
|
logger: s.logger.Named(name),
|
||||||
enableStore: true,
|
enableStore: true,
|
||||||
clusterID: shard.MainStatusShardCluster,
|
clusterID: wakuv2.MainStatusShardCluster,
|
||||||
}
|
}
|
||||||
|
|
||||||
storeNode := NewTestWakuV2(&s.Suite, cfg)
|
storeNode := NewTestWakuV2(&s.Suite, cfg)
|
||||||
addresses := storeNode.ListenAddresses()
|
addresses, err := storeNode.ListenAddresses()
|
||||||
|
s.Require().NoError(err)
|
||||||
s.Require().GreaterOrEqual(len(addresses), 1, "no storenode listen address")
|
s.Require().GreaterOrEqual(len(addresses), 1, "no storenode listen address")
|
||||||
return storeNode, addresses[0]
|
return storeNode, addresses[0]
|
||||||
}
|
}
|
||||||
|
@ -109,7 +110,7 @@ func (s *MessengerStoreNodeCommunitySuite) newMessenger(name string, storenodeAd
|
||||||
cfg := testWakuV2Config{
|
cfg := testWakuV2Config{
|
||||||
logger: logger,
|
logger: logger,
|
||||||
enableStore: false,
|
enableStore: false,
|
||||||
clusterID: shard.MainStatusShardCluster,
|
clusterID: wakuv2.MainStatusShardCluster,
|
||||||
}
|
}
|
||||||
wakuV2 := NewTestWakuV2(&s.Suite, cfg)
|
wakuV2 := NewTestWakuV2(&s.Suite, cfg)
|
||||||
wakuV2Wrapper := gethbridge.NewGethWakuV2Wrapper(wakuV2)
|
wakuV2Wrapper := gethbridge.NewGethWakuV2Wrapper(wakuV2)
|
||||||
|
|
|
@ -24,7 +24,6 @@ import (
|
||||||
"github.com/status-im/status-go/multiaccounts/accounts"
|
"github.com/status-im/status-go/multiaccounts/accounts"
|
||||||
"github.com/status-im/status-go/params"
|
"github.com/status-im/status-go/params"
|
||||||
"github.com/status-im/status-go/protocol/common"
|
"github.com/status-im/status-go/protocol/common"
|
||||||
"github.com/status-im/status-go/protocol/common/shard"
|
|
||||||
"github.com/status-im/status-go/protocol/communities"
|
"github.com/status-im/status-go/protocol/communities"
|
||||||
"github.com/status-im/status-go/protocol/communities/token"
|
"github.com/status-im/status-go/protocol/communities/token"
|
||||||
"github.com/status-im/status-go/protocol/protobuf"
|
"github.com/status-im/status-go/protocol/protobuf"
|
||||||
|
@ -34,6 +33,7 @@ import (
|
||||||
mailserversDB "github.com/status-im/status-go/services/mailservers"
|
mailserversDB "github.com/status-im/status-go/services/mailservers"
|
||||||
"github.com/status-im/status-go/services/wallet/bigint"
|
"github.com/status-im/status-go/services/wallet/bigint"
|
||||||
"github.com/status-im/status-go/t/helpers"
|
"github.com/status-im/status-go/t/helpers"
|
||||||
|
"github.com/status-im/status-go/wakuv2"
|
||||||
waku2 "github.com/status-im/status-go/wakuv2"
|
waku2 "github.com/status-im/status-go/wakuv2"
|
||||||
wakuV2common "github.com/status-im/status-go/wakuv2/common"
|
wakuV2common "github.com/status-im/status-go/wakuv2/common"
|
||||||
)
|
)
|
||||||
|
@ -160,7 +160,7 @@ func (s *MessengerStoreNodeRequestSuite) createStore() {
|
||||||
cfg := testWakuV2Config{
|
cfg := testWakuV2Config{
|
||||||
logger: s.logger.Named("store-waku"),
|
logger: s.logger.Named("store-waku"),
|
||||||
enableStore: true,
|
enableStore: true,
|
||||||
clusterID: shard.MainStatusShardCluster,
|
clusterID: wakuv2.MainStatusShardCluster,
|
||||||
}
|
}
|
||||||
|
|
||||||
s.wakuStoreNode = NewTestWakuV2(&s.Suite, cfg)
|
s.wakuStoreNode = NewTestWakuV2(&s.Suite, cfg)
|
||||||
|
@ -178,7 +178,7 @@ func (s *MessengerStoreNodeRequestSuite) createOwner() {
|
||||||
cfg := testWakuV2Config{
|
cfg := testWakuV2Config{
|
||||||
logger: s.logger.Named("owner-waku"),
|
logger: s.logger.Named("owner-waku"),
|
||||||
enableStore: false,
|
enableStore: false,
|
||||||
clusterID: shard.MainStatusShardCluster,
|
clusterID: wakuv2.MainStatusShardCluster,
|
||||||
}
|
}
|
||||||
|
|
||||||
wakuV2 := NewTestWakuV2(&s.Suite, cfg)
|
wakuV2 := NewTestWakuV2(&s.Suite, cfg)
|
||||||
|
@ -199,7 +199,7 @@ func (s *MessengerStoreNodeRequestSuite) createBob() {
|
||||||
cfg := testWakuV2Config{
|
cfg := testWakuV2Config{
|
||||||
logger: s.logger.Named("bob-waku"),
|
logger: s.logger.Named("bob-waku"),
|
||||||
enableStore: false,
|
enableStore: false,
|
||||||
clusterID: shard.MainStatusShardCluster,
|
clusterID: wakuv2.MainStatusShardCluster,
|
||||||
}
|
}
|
||||||
wakuV2 := NewTestWakuV2(&s.Suite, cfg)
|
wakuV2 := NewTestWakuV2(&s.Suite, cfg)
|
||||||
s.bobWaku = gethbridge.NewGethWakuV2Wrapper(wakuV2)
|
s.bobWaku = gethbridge.NewGethWakuV2Wrapper(wakuV2)
|
||||||
|
@ -366,7 +366,8 @@ func (s *MessengerStoreNodeRequestSuite) waitForEnvelopes(subscription <-chan st
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *MessengerStoreNodeRequestSuite) wakuListenAddress(waku *waku2.Waku) multiaddr.Multiaddr {
|
func (s *MessengerStoreNodeRequestSuite) wakuListenAddress(waku *waku2.Waku) multiaddr.Multiaddr {
|
||||||
addresses := waku.ListenAddresses()
|
addresses, err := waku.ListenAddresses()
|
||||||
|
s.Require().NoError(err)
|
||||||
s.Require().LessOrEqual(1, len(addresses))
|
s.Require().LessOrEqual(1, len(addresses))
|
||||||
return addresses[0]
|
return addresses[0]
|
||||||
}
|
}
|
||||||
|
@ -696,8 +697,8 @@ func (s *MessengerStoreNodeRequestSuite) TestRequestShardAndCommunityInfo() {
|
||||||
topicPrivKey, err := crypto.GenerateKey()
|
topicPrivKey, err := crypto.GenerateKey()
|
||||||
s.Require().NoError(err)
|
s.Require().NoError(err)
|
||||||
|
|
||||||
expectedShard := &shard.Shard{
|
expectedShard := &wakuv2.Shard{
|
||||||
Cluster: shard.MainStatusShardCluster,
|
Cluster: wakuv2.MainStatusShardCluster,
|
||||||
Index: 23,
|
Index: 23,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -842,7 +843,7 @@ type testFetchRealCommunityExampleTokenInfo struct {
|
||||||
var testFetchRealCommunityExample = []struct {
|
var testFetchRealCommunityExample = []struct {
|
||||||
CommunityID string
|
CommunityID string
|
||||||
CommunityURL string // If set, takes precedence over CommunityID
|
CommunityURL string // If set, takes precedence over CommunityID
|
||||||
CommunityShard *shard.Shard // WARNING: I didn't test a sharded community
|
CommunityShard *wakuv2.Shard // WARNING: I didn't test a sharded community
|
||||||
Fleet string
|
Fleet string
|
||||||
ClusterID uint16
|
ClusterID uint16
|
||||||
UserPrivateKeyString string // When empty a new user will be created
|
UserPrivateKeyString string // When empty a new user will be created
|
||||||
|
@ -863,14 +864,14 @@ var testFetchRealCommunityExample = []struct {
|
||||||
CommunityID: "0x03073514d4c14a7d10ae9fc9b0f05abc904d84166a6ac80add58bf6a3542a4e50a",
|
CommunityID: "0x03073514d4c14a7d10ae9fc9b0f05abc904d84166a6ac80add58bf6a3542a4e50a",
|
||||||
CommunityShard: nil,
|
CommunityShard: nil,
|
||||||
Fleet: params.FleetStatusProd,
|
Fleet: params.FleetStatusProd,
|
||||||
ClusterID: shard.MainStatusShardCluster,
|
ClusterID: wakuv2.MainStatusShardCluster,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
// Example 3,
|
// Example 3,
|
||||||
// https://status.app/c/CxiACi8KFGFwIHJlcSAxIHN0dCBiZWMgbWVtEgdkc2Fkc2FkGAMiByM0MzYwREYqAxkrHAM=#zQ3shwDYZHtrLE7NqoTGjTWzWUu6hom5D4qxfskLZfgfyGRyL
|
// https://status.app/c/CxiACi8KFGFwIHJlcSAxIHN0dCBiZWMgbWVtEgdkc2Fkc2FkGAMiByM0MzYwREYqAxkrHAM=#zQ3shwDYZHtrLE7NqoTGjTWzWUu6hom5D4qxfskLZfgfyGRyL
|
||||||
CommunityID: "0x03f64be95ed5c925022265f9250f538f65ed3dcf6e4ef6c139803dc02a3487ae7b",
|
CommunityID: "0x03f64be95ed5c925022265f9250f538f65ed3dcf6e4ef6c139803dc02a3487ae7b",
|
||||||
Fleet: params.FleetStatusProd,
|
Fleet: params.FleetStatusProd,
|
||||||
ClusterID: shard.MainStatusShardCluster,
|
ClusterID: wakuv2.MainStatusShardCluster,
|
||||||
|
|
||||||
CheckExpectedEnvelopes: true,
|
CheckExpectedEnvelopes: true,
|
||||||
ExpectedShardEnvelopes: []string{
|
ExpectedShardEnvelopes: []string{
|
||||||
|
@ -973,7 +974,7 @@ var testFetchRealCommunityExample = []struct {
|
||||||
//Example 1,
|
//Example 1,
|
||||||
CommunityID: "0x02471dd922756a3a50b623e59cf3b99355d6587e43d5c517eb55f9aea9d3fe9fe9",
|
CommunityID: "0x02471dd922756a3a50b623e59cf3b99355d6587e43d5c517eb55f9aea9d3fe9fe9",
|
||||||
Fleet: params.FleetStatusProd,
|
Fleet: params.FleetStatusProd,
|
||||||
ClusterID: shard.MainStatusShardCluster,
|
ClusterID: wakuv2.MainStatusShardCluster,
|
||||||
CheckExpectedEnvelopes: true,
|
CheckExpectedEnvelopes: true,
|
||||||
ExpectedShardEnvelopes: []string{
|
ExpectedShardEnvelopes: []string{
|
||||||
"0xc3e68e838d09e0117b3f3fd27aabe5f5a509d13e9045263c78e6890953d43547",
|
"0xc3e68e838d09e0117b3f3fd27aabe5f5a509d13e9045263c78e6890953d43547",
|
||||||
|
@ -1013,7 +1014,7 @@ var testFetchRealCommunityExample = []struct {
|
||||||
ContractAddress: "0x21F6F5Cb75E81e5104D890D750270eD6538C50cb",
|
ContractAddress: "0x21F6F5Cb75E81e5104D890D750270eD6538C50cb",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
ClusterID: shard.MainStatusShardCluster,
|
ClusterID: wakuv2.MainStatusShardCluster,
|
||||||
CheckExpectedEnvelopes: false,
|
CheckExpectedEnvelopes: false,
|
||||||
CustomOptions: []StoreNodeRequestOption{
|
CustomOptions: []StoreNodeRequestOption{
|
||||||
WithInitialPageSize(1),
|
WithInitialPageSize(1),
|
||||||
|
|
|
@ -205,7 +205,7 @@ func WaitOnSignaledCommunityFound(m *Messenger, action func(), condition func(co
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func WaitForConnectionStatus(s *suite.Suite, waku *wakuv2.NWaku, action func() bool) {
|
func WaitForConnectionStatus(s *suite.Suite, waku *wakuv2.Waku, action func() bool) {
|
||||||
subscription := waku.SubscribeToConnStatusChanges()
|
subscription := waku.SubscribeToConnStatusChanges()
|
||||||
defer subscription.Unsubscribe()
|
defer subscription.Unsubscribe()
|
||||||
|
|
||||||
|
@ -237,7 +237,7 @@ func hasAllPeers(m map[peer.ID]types.WakuV2Peer, checkSlice peer.IDSlice) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func WaitForPeersConnected(s *suite.Suite, waku *wakuv2.NWaku, action func() peer.IDSlice) {
|
func WaitForPeersConnected(s *suite.Suite, waku *wakuv2.Waku, action func() peer.IDSlice) {
|
||||||
subscription := waku.SubscribeToConnStatusChanges()
|
subscription := waku.SubscribeToConnStatusChanges()
|
||||||
defer subscription.Unsubscribe()
|
defer subscription.Unsubscribe()
|
||||||
|
|
||||||
|
|
|
@ -12,7 +12,6 @@ import (
|
||||||
"github.com/status-im/status-go/appdatabase"
|
"github.com/status-im/status-go/appdatabase"
|
||||||
gethbridge "github.com/status-im/status-go/eth-node/bridge/geth"
|
gethbridge "github.com/status-im/status-go/eth-node/bridge/geth"
|
||||||
"github.com/status-im/status-go/eth-node/types"
|
"github.com/status-im/status-go/eth-node/types"
|
||||||
"github.com/status-im/status-go/protocol/common/shard"
|
|
||||||
"github.com/status-im/status-go/t/helpers"
|
"github.com/status-im/status-go/t/helpers"
|
||||||
waku2 "github.com/status-im/status-go/wakuv2"
|
waku2 "github.com/status-im/status-go/wakuv2"
|
||||||
)
|
)
|
||||||
|
@ -62,7 +61,7 @@ func NewTestWakuV2(s *suite.Suite, cfg testWakuV2Config) *waku2.Waku {
|
||||||
|
|
||||||
err = wakuNode.Start()
|
err = wakuNode.Start()
|
||||||
if cfg.enableStore {
|
if cfg.enableStore {
|
||||||
err := wakuNode.SubscribeToPubsubTopic(shard.DefaultNonProtectedPubsubTopic(), nil)
|
err := wakuNode.SubscribeToPubsubTopic(waku2.DefaultNonProtectedPubsubTopic(), nil)
|
||||||
s.Require().NoError(err)
|
s.Require().NoError(err)
|
||||||
}
|
}
|
||||||
s.Require().NoError(err)
|
s.Require().NoError(err)
|
||||||
|
@ -78,7 +77,7 @@ func CreateWakuV2Network(s *suite.Suite, parentLogger *zap.Logger, nodeNames []s
|
||||||
nodes[i] = NewTestWakuV2(s, testWakuV2Config{
|
nodes[i] = NewTestWakuV2(s, testWakuV2Config{
|
||||||
logger: parentLogger.Named("waku-" + name),
|
logger: parentLogger.Named("waku-" + name),
|
||||||
enableStore: false,
|
enableStore: false,
|
||||||
clusterID: shard.MainStatusShardCluster,
|
clusterID: waku2.MainStatusShardCluster,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -89,9 +88,10 @@ func CreateWakuV2Network(s *suite.Suite, parentLogger *zap.Logger, nodeNames []s
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
addrs := nodes[j].ListenAddresses()
|
addrs, err := nodes[j].ListenAddresses()
|
||||||
|
s.Require().NoError(err)
|
||||||
s.Require().Greater(len(addrs), 0)
|
s.Require().Greater(len(addrs), 0)
|
||||||
_, err := nodes[i].AddRelayPeer(addrs[0])
|
_, err = nodes[i].AddRelayPeer(addrs[0])
|
||||||
s.Require().NoError(err)
|
s.Require().NoError(err)
|
||||||
err = nodes[i].DialPeer(addrs[0])
|
err = nodes[i].DialPeer(addrs[0])
|
||||||
s.Require().NoError(err)
|
s.Require().NoError(err)
|
||||||
|
|
|
@ -123,7 +123,7 @@ func (s *Service) GetPeer(rawURL string) (*enode.Node, error) {
|
||||||
return enode.ParseV4(rawURL)
|
return enode.ParseV4(rawURL)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) InitProtocol(nodeName string, identity *ecdsa.PrivateKey, appDb, walletDb *sql.DB, httpServer *server.MediaServer, multiAccountDb *multiaccounts.Database, acc *multiaccounts.Account, accountManager *account.GethManager, rpcClient *rpc.Client, walletService *wallet.Service, communityTokensService *communitytokens.Service, wakuService *wakuv2.NWaku, logger *zap.Logger) error {
|
func (s *Service) InitProtocol(nodeName string, identity *ecdsa.PrivateKey, appDb, walletDb *sql.DB, httpServer *server.MediaServer, multiAccountDb *multiaccounts.Database, acc *multiaccounts.Account, accountManager *account.GethManager, rpcClient *rpc.Client, walletService *wallet.Service, communityTokensService *communitytokens.Service, wakuService *wakuv2.Waku, logger *zap.Logger) error {
|
||||||
var err error
|
var err error
|
||||||
if !s.config.ShhextConfig.PFSEnabled {
|
if !s.config.ShhextConfig.PFSEnabled {
|
||||||
return nil
|
return nil
|
||||||
|
@ -393,7 +393,7 @@ func buildMessengerOptions(
|
||||||
accountsDB *accounts.Database,
|
accountsDB *accounts.Database,
|
||||||
walletService *wallet.Service,
|
walletService *wallet.Service,
|
||||||
communityTokensService *communitytokens.Service,
|
communityTokensService *communitytokens.Service,
|
||||||
wakuService *wakuv2.NWaku,
|
wakuService *wakuv2.Waku,
|
||||||
logger *zap.Logger,
|
logger *zap.Logger,
|
||||||
messengerSignalsHandler protocol.MessengerSignalsHandler,
|
messengerSignalsHandler protocol.MessengerSignalsHandler,
|
||||||
accountManager account.Manager,
|
accountManager account.Manager,
|
||||||
|
|
|
@ -8,10 +8,10 @@ import (
|
||||||
|
|
||||||
"github.com/status-im/status-go/appdatabase"
|
"github.com/status-im/status-go/appdatabase"
|
||||||
"github.com/status-im/status-go/eth-node/types"
|
"github.com/status-im/status-go/eth-node/types"
|
||||||
"github.com/status-im/status-go/protocol/common/shard"
|
|
||||||
"github.com/status-im/status-go/protocol/sqlite"
|
"github.com/status-im/status-go/protocol/sqlite"
|
||||||
"github.com/status-im/status-go/protocol/transport"
|
"github.com/status-im/status-go/protocol/transport"
|
||||||
"github.com/status-im/status-go/t/helpers"
|
"github.com/status-im/status-go/t/helpers"
|
||||||
|
"github.com/status-im/status-go/wakuv2"
|
||||||
)
|
)
|
||||||
|
|
||||||
func setupTestDB(t *testing.T) (*Database, func()) {
|
func setupTestDB(t *testing.T) (*Database, func()) {
|
||||||
|
@ -62,9 +62,9 @@ func TestTopic(t *testing.T) {
|
||||||
defer close()
|
defer close()
|
||||||
topicA := "0x61000000"
|
topicA := "0x61000000"
|
||||||
topicD := "0x64000000"
|
topicD := "0x64000000"
|
||||||
topic1 := MailserverTopic{PubsubTopic: shard.DefaultShardPubsubTopic(), ContentTopic: topicA, LastRequest: 1}
|
topic1 := MailserverTopic{PubsubTopic: wakuv2.DefaultShardPubsubTopic(), ContentTopic: topicA, LastRequest: 1}
|
||||||
topic2 := MailserverTopic{PubsubTopic: shard.DefaultShardPubsubTopic(), ContentTopic: "0x6200000", LastRequest: 2}
|
topic2 := MailserverTopic{PubsubTopic: wakuv2.DefaultShardPubsubTopic(), ContentTopic: "0x6200000", LastRequest: 2}
|
||||||
topic3 := MailserverTopic{PubsubTopic: shard.DefaultShardPubsubTopic(), ContentTopic: "0x6300000", LastRequest: 3}
|
topic3 := MailserverTopic{PubsubTopic: wakuv2.DefaultShardPubsubTopic(), ContentTopic: "0x6300000", LastRequest: 3}
|
||||||
|
|
||||||
require.NoError(t, db.AddTopic(topic1))
|
require.NoError(t, db.AddTopic(topic1))
|
||||||
require.NoError(t, db.AddTopic(topic2))
|
require.NoError(t, db.AddTopic(topic2))
|
||||||
|
@ -77,14 +77,14 @@ func TestTopic(t *testing.T) {
|
||||||
filters := []*transport.Filter{
|
filters := []*transport.Filter{
|
||||||
// Existing topic, is not updated
|
// Existing topic, is not updated
|
||||||
{
|
{
|
||||||
PubsubTopic: shard.DefaultShardPubsubTopic(),
|
PubsubTopic: wakuv2.DefaultShardPubsubTopic(),
|
||||||
ContentTopic: types.BytesToTopic([]byte{0x61}),
|
ContentTopic: types.BytesToTopic([]byte{0x61}),
|
||||||
},
|
},
|
||||||
// Non existing topic is not inserted
|
// Non existing topic is not inserted
|
||||||
{
|
{
|
||||||
Discovery: true,
|
Discovery: true,
|
||||||
Negotiated: true,
|
Negotiated: true,
|
||||||
PubsubTopic: shard.DefaultShardPubsubTopic(),
|
PubsubTopic: wakuv2.DefaultShardPubsubTopic(),
|
||||||
ContentTopic: types.BytesToTopic([]byte{0x64}),
|
ContentTopic: types.BytesToTopic([]byte{0x64}),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -160,7 +160,7 @@ func TestAddGetDeleteMailserverTopics(t *testing.T) {
|
||||||
defer close()
|
defer close()
|
||||||
api := &API{db: db}
|
api := &API{db: db}
|
||||||
testTopic := MailserverTopic{
|
testTopic := MailserverTopic{
|
||||||
PubsubTopic: shard.DefaultShardPubsubTopic(),
|
PubsubTopic: wakuv2.DefaultShardPubsubTopic(),
|
||||||
ContentTopic: "topic-001",
|
ContentTopic: "topic-001",
|
||||||
ChatIDs: []string{"chatID01", "chatID02"},
|
ChatIDs: []string{"chatID01", "chatID02"},
|
||||||
LastRequest: 10,
|
LastRequest: 10,
|
||||||
|
@ -173,14 +173,14 @@ func TestAddGetDeleteMailserverTopics(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.EqualValues(t, []MailserverTopic{testTopic}, topics)
|
require.EqualValues(t, []MailserverTopic{testTopic}, topics)
|
||||||
|
|
||||||
err = api.DeleteMailserverTopic(context.Background(), shard.DefaultShardPubsubTopic(), testTopic.ContentTopic)
|
err = api.DeleteMailserverTopic(context.Background(), wakuv2.DefaultShardPubsubTopic(), testTopic.ContentTopic)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
topics, err = api.GetMailserverTopics(context.Background())
|
topics, err = api.GetMailserverTopics(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.EqualValues(t, ([]MailserverTopic)(nil), topics)
|
require.EqualValues(t, ([]MailserverTopic)(nil), topics)
|
||||||
|
|
||||||
// Delete non-existing topic.
|
// Delete non-existing topic.
|
||||||
err = api.DeleteMailserverTopic(context.Background(), shard.DefaultShardPubsubTopic(), "non-existing-topic")
|
err = api.DeleteMailserverTopic(context.Background(), wakuv2.DefaultShardPubsubTopic(), "non-existing-topic")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
Subproject commit b358c90fa51d20957853e790aafc4e0987297ac7
|
|
@ -1,17 +1,17 @@
|
||||||
// Copyright 2019 The NWaku Library Authors.
|
// Copyright 2019 The Waku Library Authors.
|
||||||
//
|
//
|
||||||
// The NWaku library is free software: you can redistribute it and/or modify
|
// The Waku library is free software: you can redistribute it and/or modify
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
// (at your option) any later version.
|
// (at your option) any later version.
|
||||||
//
|
//
|
||||||
// The NWaku library is distributed in the hope that it will be useful,
|
// The Waku library is distributed in the hope that it will be useful,
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty off
|
// but WITHOUT ANY WARRANTY; without even the implied warranty off
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
// GNU Lesser General Public License for more details.
|
// GNU Lesser General Public License for more details.
|
||||||
//
|
//
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
// along with the NWaku library. If not, see <http://www.gnu.org/licenses/>.
|
// along with the Waku library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
//
|
//
|
||||||
// This software uses the go-ethereum library, which is licensed
|
// This software uses the go-ethereum library, which is licensed
|
||||||
// under the GNU Lesser General Public Library, version 3 or any later.
|
// under the GNU Lesser General Public Library, version 3 or any later.
|
||||||
|
@ -52,14 +52,14 @@ var (
|
||||||
// PublicWakuAPI provides the waku RPC service that can be
|
// PublicWakuAPI provides the waku RPC service that can be
|
||||||
// use publicly without security implications.
|
// use publicly without security implications.
|
||||||
type PublicWakuAPI struct {
|
type PublicWakuAPI struct {
|
||||||
w *NWaku
|
w *Waku
|
||||||
|
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
lastUsed map[string]time.Time // keeps track when a filter was polled for the last time.
|
lastUsed map[string]time.Time // keeps track when a filter was polled for the last time.
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPublicWakuAPI create a new RPC waku service.
|
// NewPublicWakuAPI create a new RPC waku service.
|
||||||
func NewPublicWakuAPI(w *NWaku) *PublicWakuAPI {
|
func NewPublicWakuAPI(w *Waku) *PublicWakuAPI {
|
||||||
api := &PublicWakuAPI{
|
api := &PublicWakuAPI{
|
||||||
w: w,
|
w: w,
|
||||||
lastUsed: make(map[string]time.Time),
|
lastUsed: make(map[string]time.Time),
|
||||||
|
@ -185,7 +185,7 @@ type NewMessage struct {
|
||||||
Priority *int `json:"priority"`
|
Priority *int `json:"priority"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Post posts a message on the NWaku network.
|
// Post posts a message on the Waku network.
|
||||||
// returns the hash of the message in case of success.
|
// returns the hash of the message in case of success.
|
||||||
func (api *PublicWakuAPI) Post(ctx context.Context, req NewMessage) (hexutil.Bytes, error) {
|
func (api *PublicWakuAPI) Post(ctx context.Context, req NewMessage) (hexutil.Bytes, error) {
|
||||||
var (
|
var (
|
||||||
|
@ -252,7 +252,7 @@ func (api *PublicWakuAPI) Post(ctx context.Context, req NewMessage) (hexutil.Byt
|
||||||
Version: &version,
|
Version: &version,
|
||||||
ContentTopic: req.ContentTopic.ContentTopic(),
|
ContentTopic: req.ContentTopic.ContentTopic(),
|
||||||
Timestamp: proto.Int64(api.w.timestamp()),
|
Timestamp: proto.Int64(api.w.timestamp()),
|
||||||
Meta: []byte{}, // TODO: empty for now. Once we use NWaku Archive v2, we should deprecate the timestamp and use an ULID here
|
Meta: []byte{}, // TODO: empty for now. Once we use Waku Archive v2, we should deprecate the timestamp and use an ULID here
|
||||||
Ephemeral: &req.Ephemeral,
|
Ephemeral: &req.Ephemeral,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,7 +1,6 @@
|
||||||
package wakuv2
|
package wakuv2
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
"errors"
|
||||||
|
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
@ -35,7 +34,7 @@ func (pm PublishMethod) String() string {
|
||||||
|
|
||||||
// Send injects a message into the waku send queue, to be distributed in the
|
// Send injects a message into the waku send queue, to be distributed in the
|
||||||
// network in the coming cycles.
|
// network in the coming cycles.
|
||||||
func (w *NWaku) Send(pubsubTopic string, msg *pb.WakuMessage, priority *int) ([]byte, error) {
|
func (w *Waku) Send(pubsubTopic string, msg *pb.WakuMessage, priority *int) ([]byte, error) {
|
||||||
pubsubTopic = w.GetPubsubTopic(pubsubTopic)
|
pubsubTopic = w.GetPubsubTopic(pubsubTopic)
|
||||||
if w.protectedTopicStore != nil {
|
if w.protectedTopicStore != nil {
|
||||||
privKey, err := w.protectedTopicStore.FetchPrivateKey(pubsubTopic)
|
privKey, err := w.protectedTopicStore.FetchPrivateKey(pubsubTopic)
|
||||||
|
@ -77,7 +76,7 @@ func (w *NWaku) Send(pubsubTopic string, msg *pb.WakuMessage, priority *int) ([]
|
||||||
return envelope.Hash().Bytes(), nil
|
return envelope.Hash().Bytes(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) broadcast() {
|
func (w *Waku) broadcast() {
|
||||||
for {
|
for {
|
||||||
var envelope *protocol.Envelope
|
var envelope *protocol.Envelope
|
||||||
|
|
||||||
|
@ -103,11 +102,7 @@ func (w *NWaku) broadcast() {
|
||||||
publishMethod = LightPush
|
publishMethod = LightPush
|
||||||
fn = func(env *protocol.Envelope, logger *zap.Logger) error {
|
fn = func(env *protocol.Envelope, logger *zap.Logger) error {
|
||||||
logger.Info("publishing message via lightpush")
|
logger.Info("publishing message via lightpush")
|
||||||
jsonMsg, err := json.Marshal(env.Message())
|
_, err := w.WakuLightpushPublish(env.Message(), env.PubsubTopic())
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = w.WakuLightpushPublish(string(jsonMsg), env.PubsubTopic())
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -119,14 +114,8 @@ func (w *NWaku) broadcast() {
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Info("publishing message via relay", zap.Int("peerCnt", peerCnt))
|
logger.Info("publishing message via relay", zap.Int("peerCnt", peerCnt))
|
||||||
timeoutMs := 1000
|
|
||||||
msg, err := json.Marshal(env.Message())
|
|
||||||
|
|
||||||
if err != nil {
|
_, err = w.WakuRelayPublish(env.Message(), env.PubsubTopic())
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = w.WakuRelayPublish(env.PubsubTopic(), string(msg), timeoutMs)
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -153,7 +142,7 @@ func (w *NWaku) broadcast() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) publishEnvelope(envelope *protocol.Envelope, publishFn publish.PublishFn, logger *zap.Logger) {
|
func (w *Waku) publishEnvelope(envelope *protocol.Envelope, publishFn publish.PublishFn, logger *zap.Logger) {
|
||||||
defer w.wg.Done()
|
defer w.wg.Done()
|
||||||
|
|
||||||
if err := publishFn(envelope, logger); err != nil {
|
if err := publishFn(envelope, logger); err != nil {
|
||||||
|
|
298
wakuv2/nwaku.go
298
wakuv2/nwaku.go
|
@ -1,10 +1,13 @@
|
||||||
|
//go:build use_nwaku
|
||||||
|
// +build use_nwaku
|
||||||
|
|
||||||
package wakuv2
|
package wakuv2
|
||||||
|
|
||||||
/*
|
/*
|
||||||
#cgo LDFLAGS: -L../vendor/nwaku/build/ -lnegentropy -lwaku -Wl,--allow-multiple-definition
|
#cgo LDFLAGS: -L../third_party/nwaku/vendor/negentropy/cpp/ -lnegentropy -L../third_party/nwaku/build/ -lwaku -lm -ldl -pthread -lminiupnpc -L../third_party/nwaku/vendor/nim-nat-traversal/vendor/miniupnp/miniupnpc/build/ -lnatpmp -L../third_party/nwaku/vendor/nim-nat-traversal/vendor/libnatpmp-upstream/ -L../third_party/nwaku/vendor/nim-libbacktrace/install/usr/lib/ -lbacktrace -Wl,--allow-multiple-definition
|
||||||
#cgo LDFLAGS: -Lvendor/nwaku/build/ -Wl,-rpath,vendor/nwaku/build/
|
#cgo LDFLAGS: -Wl,-rpath,../third_party/nwaku/build/
|
||||||
|
|
||||||
#include "../vendor/nwaku/library/libwaku.h"
|
#include "../third_party/nwaku/library/libwaku.h"
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
|
|
||||||
|
@ -100,7 +103,7 @@ package wakuv2
|
||||||
}
|
}
|
||||||
|
|
||||||
void cGoWakuSetEventCallback(void* wakuCtx) {
|
void cGoWakuSetEventCallback(void* wakuCtx) {
|
||||||
// The 'globalEventCallback' Go function is shared amongst all possible NWaku instances.
|
// The 'globalEventCallback' Go function is shared amongst all possible Waku instances.
|
||||||
|
|
||||||
// Given that the 'globalEventCallback' is shared, we pass again the
|
// Given that the 'globalEventCallback' is shared, we pass again the
|
||||||
// wakuCtx instance but in this case is needed to pick up the correct method
|
// wakuCtx instance but in this case is needed to pick up the correct method
|
||||||
|
@ -250,8 +253,6 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
@ -331,7 +332,7 @@ type ITelemetryClient interface {
|
||||||
PushPeerConnFailures(ctx context.Context, peerConnFailures map[string]int)
|
PushPeerConnFailures(ctx context.Context, peerConnFailures map[string]int)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) SetStatusTelemetryClient(client ITelemetryClient) {
|
func (w *Waku) SetStatusTelemetryClient(client ITelemetryClient) {
|
||||||
w.statusTelemetryClient = client
|
w.statusTelemetryClient = client
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -341,7 +342,7 @@ func newTTLCache() *ttlcache.Cache[gethcommon.Hash, *common.ReceivedMessage] {
|
||||||
return cache
|
return cache
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) SubscribeToConnStatusChanges() *types.ConnStatusSubscription {
|
func (w *Waku) SubscribeToConnStatusChanges() *types.ConnStatusSubscription {
|
||||||
w.connStatusMu.Lock()
|
w.connStatusMu.Lock()
|
||||||
defer w.connStatusMu.Unlock()
|
defer w.connStatusMu.Unlock()
|
||||||
subscription := types.NewConnStatusSubscription()
|
subscription := types.NewConnStatusSubscription()
|
||||||
|
@ -349,7 +350,7 @@ func (w *NWaku) SubscribeToConnStatusChanges() *types.ConnStatusSubscription {
|
||||||
return subscription
|
return subscription
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) getDiscV5BootstrapNodes(ctx context.Context, addresses []string) ([]*enode.Node, error) {
|
func (w *Waku) getDiscV5BootstrapNodes(ctx context.Context, addresses []string) ([]*enode.Node, error) {
|
||||||
wg := sync.WaitGroup{}
|
wg := sync.WaitGroup{}
|
||||||
mu := sync.Mutex{}
|
mu := sync.Mutex{}
|
||||||
var result []*enode.Node
|
var result []*enode.Node
|
||||||
|
@ -397,7 +398,7 @@ func (w *NWaku) getDiscV5BootstrapNodes(ctx context.Context, addresses []string)
|
||||||
|
|
||||||
type fnApplyToEachPeer func(d dnsdisc.DiscoveredNode, wg *sync.WaitGroup)
|
type fnApplyToEachPeer func(d dnsdisc.DiscoveredNode, wg *sync.WaitGroup)
|
||||||
|
|
||||||
func (w *NWaku) dnsDiscover(ctx context.Context, enrtreeAddress string, apply fnApplyToEachPeer) error {
|
func (w *Waku) dnsDiscover(ctx context.Context, enrtreeAddress string, apply fnApplyToEachPeer) error {
|
||||||
w.logger.Info("retrieving nodes", zap.String("enr", enrtreeAddress))
|
w.logger.Info("retrieving nodes", zap.String("enr", enrtreeAddress))
|
||||||
ctx, cancel := context.WithTimeout(ctx, requestTimeout)
|
ctx, cancel := context.WithTimeout(ctx, requestTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
@ -440,7 +441,7 @@ func (w *NWaku) dnsDiscover(ctx context.Context, enrtreeAddress string, apply fn
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) discoverAndConnectPeers() {
|
func (w *Waku) discoverAndConnectPeers() {
|
||||||
fnApply := func(d dnsdisc.DiscoveredNode, wg *sync.WaitGroup) {
|
fnApply := func(d dnsdisc.DiscoveredNode, wg *sync.WaitGroup) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
if len(d.PeerInfo.Addrs) != 0 {
|
if len(d.PeerInfo.Addrs) != 0 {
|
||||||
|
@ -476,14 +477,14 @@ func (w *NWaku) discoverAndConnectPeers() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) connect(peerInfo peer.AddrInfo, enr *enode.Node, origin wps.Origin) {
|
func (w *Waku) connect(peerInfo peer.AddrInfo, enr *enode.Node, origin wps.Origin) {
|
||||||
// Connection will be prunned eventually by the connection manager if needed
|
// Connection will be prunned eventually by the connection manager if needed
|
||||||
// The peer connector in go-waku uses Connect, so it will execute identify as part of its
|
// The peer connector in go-waku uses Connect, so it will execute identify as part of its
|
||||||
addr := peerInfo.Addrs[0]
|
addr := peerInfo.Addrs[0]
|
||||||
w.WakuConnect(addr.String(), 1000)
|
w.WakuConnect(addr.String(), 1000)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) telemetryBandwidthStats(telemetryServerURL string) {
|
func (w *Waku) telemetryBandwidthStats(telemetryServerURL string) {
|
||||||
w.wg.Add(1)
|
w.wg.Add(1)
|
||||||
defer w.wg.Done()
|
defer w.wg.Done()
|
||||||
|
|
||||||
|
@ -514,7 +515,7 @@ func (w *NWaku) telemetryBandwidthStats(telemetryServerURL string) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) GetStats() types.StatsSummary {
|
func (w *Waku) GetStats() types.StatsSummary {
|
||||||
stats := w.bandwidthCounter.GetBandwidthTotals()
|
stats := w.bandwidthCounter.GetBandwidthTotals()
|
||||||
return types.StatsSummary{
|
return types.StatsSummary{
|
||||||
UploadRate: uint64(stats.RateOut),
|
UploadRate: uint64(stats.RateOut),
|
||||||
|
@ -522,7 +523,7 @@ func (w *NWaku) GetStats() types.StatsSummary {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) runPeerExchangeLoop() {
|
func (w *Waku) runPeerExchangeLoop() {
|
||||||
w.wg.Add(1)
|
w.wg.Add(1)
|
||||||
defer w.wg.Done()
|
defer w.wg.Done()
|
||||||
|
|
||||||
|
@ -572,7 +573,7 @@ func (w *NWaku) runPeerExchangeLoop() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) GetPubsubTopic(topic string) string {
|
func (w *Waku) GetPubsubTopic(topic string) string {
|
||||||
if topic == "" {
|
if topic == "" {
|
||||||
topic = w.cfg.DefaultShardPubsubTopic
|
topic = w.cfg.DefaultShardPubsubTopic
|
||||||
}
|
}
|
||||||
|
@ -581,12 +582,12 @@ func (w *NWaku) GetPubsubTopic(topic string) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// CurrentTime returns current time.
|
// CurrentTime returns current time.
|
||||||
func (w *NWaku) CurrentTime() time.Time {
|
func (w *Waku) CurrentTime() time.Time {
|
||||||
return w.timesource.Now()
|
return w.timesource.Now()
|
||||||
}
|
}
|
||||||
|
|
||||||
// APIs returns the RPC descriptors the NWaku implementation offers
|
// APIs returns the RPC descriptors the Waku implementation offers
|
||||||
func (w *NWaku) APIs() []rpc.API {
|
func (w *Waku) APIs() []rpc.API {
|
||||||
return []rpc.API{
|
return []rpc.API{
|
||||||
{
|
{
|
||||||
Namespace: Name,
|
Namespace: Name,
|
||||||
|
@ -598,23 +599,23 @@ func (w *NWaku) APIs() []rpc.API {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Protocols returns the waku sub-protocols ran by this particular client.
|
// Protocols returns the waku sub-protocols ran by this particular client.
|
||||||
func (w *NWaku) Protocols() []p2p.Protocol {
|
func (w *Waku) Protocols() []p2p.Protocol {
|
||||||
return []p2p.Protocol{}
|
return []p2p.Protocol{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) SendEnvelopeEvent(event common.EnvelopeEvent) int {
|
func (w *Waku) SendEnvelopeEvent(event common.EnvelopeEvent) int {
|
||||||
return w.envelopeFeed.Send(event)
|
return w.envelopeFeed.Send(event)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SubscribeEnvelopeEvents subscribes to envelopes feed.
|
// SubscribeEnvelopeEvents subscribes to envelopes feed.
|
||||||
// In order to prevent blocking waku producers events must be amply buffered.
|
// In order to prevent blocking waku producers events must be amply buffered.
|
||||||
func (w *NWaku) SubscribeEnvelopeEvents(events chan<- common.EnvelopeEvent) event.Subscription {
|
func (w *Waku) SubscribeEnvelopeEvents(events chan<- common.EnvelopeEvent) event.Subscription {
|
||||||
return w.envelopeFeed.Subscribe(events)
|
return w.envelopeFeed.Subscribe(events)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewKeyPair generates a new cryptographic identity for the client, and injects
|
// NewKeyPair generates a new cryptographic identity for the client, and injects
|
||||||
// it into the known identities for message decryption. Returns ID of the new key pair.
|
// it into the known identities for message decryption. Returns ID of the new key pair.
|
||||||
func (w *NWaku) NewKeyPair() (string, error) {
|
func (w *Waku) NewKeyPair() (string, error) {
|
||||||
key, err := crypto.GenerateKey()
|
key, err := crypto.GenerateKey()
|
||||||
if err != nil || !validatePrivateKey(key) {
|
if err != nil || !validatePrivateKey(key) {
|
||||||
key, err = crypto.GenerateKey() // retry once
|
key, err = crypto.GenerateKey() // retry once
|
||||||
|
@ -642,7 +643,7 @@ func (w *NWaku) NewKeyPair() (string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteKeyPair deletes the specified key if it exists.
|
// DeleteKeyPair deletes the specified key if it exists.
|
||||||
func (w *NWaku) DeleteKeyPair(key string) bool {
|
func (w *Waku) DeleteKeyPair(key string) bool {
|
||||||
deterministicID, err := toDeterministicID(key, common.KeyIDSize)
|
deterministicID, err := toDeterministicID(key, common.KeyIDSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false
|
||||||
|
@ -659,7 +660,7 @@ func (w *NWaku) DeleteKeyPair(key string) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddKeyPair imports a asymmetric private key and returns it identifier.
|
// AddKeyPair imports a asymmetric private key and returns it identifier.
|
||||||
func (w *NWaku) AddKeyPair(key *ecdsa.PrivateKey) (string, error) {
|
func (w *Waku) AddKeyPair(key *ecdsa.PrivateKey) (string, error) {
|
||||||
id, err := makeDeterministicID(hexutil.Encode(crypto.FromECDSAPub(&key.PublicKey)), common.KeyIDSize)
|
id, err := makeDeterministicID(hexutil.Encode(crypto.FromECDSAPub(&key.PublicKey)), common.KeyIDSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -677,7 +678,7 @@ func (w *NWaku) AddKeyPair(key *ecdsa.PrivateKey) (string, error) {
|
||||||
|
|
||||||
// SelectKeyPair adds cryptographic identity, and makes sure
|
// SelectKeyPair adds cryptographic identity, and makes sure
|
||||||
// that it is the only private key known to the node.
|
// that it is the only private key known to the node.
|
||||||
func (w *NWaku) SelectKeyPair(key *ecdsa.PrivateKey) error {
|
func (w *Waku) SelectKeyPair(key *ecdsa.PrivateKey) error {
|
||||||
id, err := makeDeterministicID(hexutil.Encode(crypto.FromECDSAPub(&key.PublicKey)), common.KeyIDSize)
|
id, err := makeDeterministicID(hexutil.Encode(crypto.FromECDSAPub(&key.PublicKey)), common.KeyIDSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -693,7 +694,7 @@ func (w *NWaku) SelectKeyPair(key *ecdsa.PrivateKey) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteKeyPairs removes all cryptographic identities known to the node
|
// DeleteKeyPairs removes all cryptographic identities known to the node
|
||||||
func (w *NWaku) DeleteKeyPairs() error {
|
func (w *Waku) DeleteKeyPairs() error {
|
||||||
w.keyMu.Lock()
|
w.keyMu.Lock()
|
||||||
defer w.keyMu.Unlock()
|
defer w.keyMu.Unlock()
|
||||||
|
|
||||||
|
@ -704,7 +705,7 @@ func (w *NWaku) DeleteKeyPairs() error {
|
||||||
|
|
||||||
// HasKeyPair checks if the waku node is configured with the private key
|
// HasKeyPair checks if the waku node is configured with the private key
|
||||||
// of the specified public pair.
|
// of the specified public pair.
|
||||||
func (w *NWaku) HasKeyPair(id string) bool {
|
func (w *Waku) HasKeyPair(id string) bool {
|
||||||
deterministicID, err := toDeterministicID(id, common.KeyIDSize)
|
deterministicID, err := toDeterministicID(id, common.KeyIDSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false
|
||||||
|
@ -716,7 +717,7 @@ func (w *NWaku) HasKeyPair(id string) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetPrivateKey retrieves the private key of the specified identity.
|
// GetPrivateKey retrieves the private key of the specified identity.
|
||||||
func (w *NWaku) GetPrivateKey(id string) (*ecdsa.PrivateKey, error) {
|
func (w *Waku) GetPrivateKey(id string) (*ecdsa.PrivateKey, error) {
|
||||||
deterministicID, err := toDeterministicID(id, common.KeyIDSize)
|
deterministicID, err := toDeterministicID(id, common.KeyIDSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -733,7 +734,7 @@ func (w *NWaku) GetPrivateKey(id string) (*ecdsa.PrivateKey, error) {
|
||||||
|
|
||||||
// GenerateSymKey generates a random symmetric key and stores it under id,
|
// GenerateSymKey generates a random symmetric key and stores it under id,
|
||||||
// which is then returned. Will be used in the future for session key exchange.
|
// which is then returned. Will be used in the future for session key exchange.
|
||||||
func (w *NWaku) GenerateSymKey() (string, error) {
|
func (w *Waku) GenerateSymKey() (string, error) {
|
||||||
key, err := common.GenerateSecureRandomData(common.AESKeyLength)
|
key, err := common.GenerateSecureRandomData(common.AESKeyLength)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -757,7 +758,7 @@ func (w *NWaku) GenerateSymKey() (string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddSymKey stores the key with a given id.
|
// AddSymKey stores the key with a given id.
|
||||||
func (w *NWaku) AddSymKey(id string, key []byte) (string, error) {
|
func (w *Waku) AddSymKey(id string, key []byte) (string, error) {
|
||||||
deterministicID, err := toDeterministicID(id, common.KeyIDSize)
|
deterministicID, err := toDeterministicID(id, common.KeyIDSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -774,7 +775,7 @@ func (w *NWaku) AddSymKey(id string, key []byte) (string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddSymKeyDirect stores the key, and returns its id.
|
// AddSymKeyDirect stores the key, and returns its id.
|
||||||
func (w *NWaku) AddSymKeyDirect(key []byte) (string, error) {
|
func (w *Waku) AddSymKeyDirect(key []byte) (string, error) {
|
||||||
if len(key) != common.AESKeyLength {
|
if len(key) != common.AESKeyLength {
|
||||||
return "", fmt.Errorf("wrong key size: %d", len(key))
|
return "", fmt.Errorf("wrong key size: %d", len(key))
|
||||||
}
|
}
|
||||||
|
@ -795,7 +796,7 @@ func (w *NWaku) AddSymKeyDirect(key []byte) (string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddSymKeyFromPassword generates the key from password, stores it, and returns its id.
|
// AddSymKeyFromPassword generates the key from password, stores it, and returns its id.
|
||||||
func (w *NWaku) AddSymKeyFromPassword(password string) (string, error) {
|
func (w *Waku) AddSymKeyFromPassword(password string) (string, error) {
|
||||||
id, err := common.GenerateRandomID()
|
id, err := common.GenerateRandomID()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("failed to generate ID: %s", err)
|
return "", fmt.Errorf("failed to generate ID: %s", err)
|
||||||
|
@ -821,14 +822,14 @@ func (w *NWaku) AddSymKeyFromPassword(password string) (string, error) {
|
||||||
|
|
||||||
// HasSymKey returns true if there is a key associated with the given id.
|
// HasSymKey returns true if there is a key associated with the given id.
|
||||||
// Otherwise returns false.
|
// Otherwise returns false.
|
||||||
func (w *NWaku) HasSymKey(id string) bool {
|
func (w *Waku) HasSymKey(id string) bool {
|
||||||
w.keyMu.RLock()
|
w.keyMu.RLock()
|
||||||
defer w.keyMu.RUnlock()
|
defer w.keyMu.RUnlock()
|
||||||
return w.symKeys[id] != nil
|
return w.symKeys[id] != nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteSymKey deletes the key associated with the name string if it exists.
|
// DeleteSymKey deletes the key associated with the name string if it exists.
|
||||||
func (w *NWaku) DeleteSymKey(id string) bool {
|
func (w *Waku) DeleteSymKey(id string) bool {
|
||||||
w.keyMu.Lock()
|
w.keyMu.Lock()
|
||||||
defer w.keyMu.Unlock()
|
defer w.keyMu.Unlock()
|
||||||
if w.symKeys[id] != nil {
|
if w.symKeys[id] != nil {
|
||||||
|
@ -839,7 +840,7 @@ func (w *NWaku) DeleteSymKey(id string) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetSymKey returns the symmetric key associated with the given id.
|
// GetSymKey returns the symmetric key associated with the given id.
|
||||||
func (w *NWaku) GetSymKey(id string) ([]byte, error) {
|
func (w *Waku) GetSymKey(id string) ([]byte, error) {
|
||||||
w.keyMu.RLock()
|
w.keyMu.RLock()
|
||||||
defer w.keyMu.RUnlock()
|
defer w.keyMu.RUnlock()
|
||||||
if w.symKeys[id] != nil {
|
if w.symKeys[id] != nil {
|
||||||
|
@ -850,7 +851,7 @@ func (w *NWaku) GetSymKey(id string) ([]byte, error) {
|
||||||
|
|
||||||
// Subscribe installs a new message handler used for filtering, decrypting
|
// Subscribe installs a new message handler used for filtering, decrypting
|
||||||
// and subsequent storing of incoming messages.
|
// and subsequent storing of incoming messages.
|
||||||
func (w *NWaku) Subscribe(f *common.Filter) (string, error) {
|
func (w *Waku) Subscribe(f *common.Filter) (string, error) {
|
||||||
f.PubsubTopic = w.GetPubsubTopic(f.PubsubTopic)
|
f.PubsubTopic = w.GetPubsubTopic(f.PubsubTopic)
|
||||||
id, err := w.filters.Install(f)
|
id, err := w.filters.Install(f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -866,7 +867,7 @@ func (w *NWaku) Subscribe(f *common.Filter) (string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unsubscribe removes an installed message handler.
|
// Unsubscribe removes an installed message handler.
|
||||||
func (w *NWaku) Unsubscribe(ctx context.Context, id string) error {
|
func (w *Waku) Unsubscribe(ctx context.Context, id string) error {
|
||||||
ok := w.filters.Uninstall(id)
|
ok := w.filters.Uninstall(id)
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("failed to unsubscribe: invalid ID '%s'", id)
|
return fmt.Errorf("failed to unsubscribe: invalid ID '%s'", id)
|
||||||
|
@ -880,12 +881,12 @@ func (w *NWaku) Unsubscribe(ctx context.Context, id string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetFilter returns the filter by id.
|
// GetFilter returns the filter by id.
|
||||||
func (w *NWaku) GetFilter(id string) *common.Filter {
|
func (w *Waku) GetFilter(id string) *common.Filter {
|
||||||
return w.filters.Get(id)
|
return w.filters.Get(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unsubscribe removes an installed message handler.
|
// Unsubscribe removes an installed message handler.
|
||||||
func (w *NWaku) UnsubscribeMany(ids []string) error {
|
func (w *Waku) UnsubscribeMany(ids []string) error {
|
||||||
for _, id := range ids {
|
for _, id := range ids {
|
||||||
w.logger.Info("cleaning up filter", zap.String("id", id))
|
w.logger.Info("cleaning up filter", zap.String("id", id))
|
||||||
ok := w.filters.Uninstall(id)
|
ok := w.filters.Uninstall(id)
|
||||||
|
@ -896,24 +897,24 @@ func (w *NWaku) UnsubscribeMany(ids []string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) SkipPublishToTopic(value bool) {
|
func (w *Waku) SkipPublishToTopic(value bool) {
|
||||||
w.cfg.SkipPublishToTopic = value
|
w.cfg.SkipPublishToTopic = value
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) ConfirmMessageDelivered(hashes []gethcommon.Hash) {
|
func (w *Waku) ConfirmMessageDelivered(hashes []gethcommon.Hash) {
|
||||||
if !w.cfg.EnableStoreConfirmationForMessagesSent {
|
if !w.cfg.EnableStoreConfirmationForMessagesSent {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
w.messageSentCheck.DeleteByMessageIDs(hashes)
|
w.messageSentCheck.DeleteByMessageIDs(hashes)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) SetStorePeerID(peerID peer.ID) {
|
func (w *Waku) SetStorePeerID(peerID peer.ID) {
|
||||||
if w.messageSentCheck != nil {
|
if w.messageSentCheck != nil {
|
||||||
w.messageSentCheck.SetStorePeerID(peerID)
|
w.messageSentCheck.SetStorePeerID(peerID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) Query(ctx context.Context,
|
func (w *Waku) Query(ctx context.Context,
|
||||||
peerID peer.ID,
|
peerID peer.ID,
|
||||||
query store.FilterCriteria,
|
query store.FilterCriteria,
|
||||||
cursor []byte,
|
cursor []byte,
|
||||||
|
@ -998,14 +999,14 @@ func (w *NWaku) Query(ctx context.Context,
|
||||||
return nil, 0, nil
|
return nil, 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// OnNewEnvelope is an interface from NWaku FilterManager API that gets invoked when any new message is received by Filter.
|
// OnNewEnvelope is an interface from Waku FilterManager API that gets invoked when any new message is received by Filter.
|
||||||
func (w *NWaku) OnNewEnvelope(env *protocol.Envelope) error {
|
func (w *Waku) OnNewEnvelope(env *protocol.Envelope) error {
|
||||||
return w.OnNewEnvelopes(env, common.RelayedMessageType, false)
|
return w.OnNewEnvelopes(env, common.RelayedMessageType, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start implements node.Service, starting the background data propagation thread
|
// Start implements node.Service, starting the background data propagation thread
|
||||||
// of the NWaku protocol.
|
// of the Waku protocol.
|
||||||
func (w *NWaku) Start() error {
|
func (w *Waku) Start() error {
|
||||||
// if w.ctx == nil {
|
// if w.ctx == nil {
|
||||||
// w.ctx, w.cancel = context.WithCancel(context.Background())
|
// w.ctx, w.cancel = context.WithCancel(context.Background())
|
||||||
// }
|
// }
|
||||||
|
@ -1125,7 +1126,7 @@ func (w *NWaku) Start() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) checkForConnectionChanges() {
|
func (w *Waku) checkForConnectionChanges() {
|
||||||
|
|
||||||
// isOnline := len(w.node.Host().Network().Peers()) > 0
|
// isOnline := len(w.node.Host().Network().Peers()) > 0
|
||||||
|
|
||||||
|
@ -1163,7 +1164,7 @@ func (w *NWaku) checkForConnectionChanges() {
|
||||||
// })
|
// })
|
||||||
}
|
}
|
||||||
|
|
||||||
// func (w *NWaku) confirmMessagesSent() {
|
// func (w *Waku) confirmMessagesSent() {
|
||||||
// w.messageSentCheck = publish.NewMessageSentCheck(w.ctx, w.node.Store(), w.node.Timesource(), w.logger)
|
// w.messageSentCheck = publish.NewMessageSentCheck(w.ctx, w.node.Store(), w.node.Timesource(), w.logger)
|
||||||
// go w.messageSentCheck.Start()
|
// go w.messageSentCheck.Start()
|
||||||
|
|
||||||
|
@ -1187,13 +1188,13 @@ func (w *NWaku) checkForConnectionChanges() {
|
||||||
// }()
|
// }()
|
||||||
// }
|
// }
|
||||||
|
|
||||||
func (w *NWaku) MessageExists(mh pb.MessageHash) (bool, error) {
|
func (w *Waku) MessageExists(mh pb.MessageHash) (bool, error) {
|
||||||
w.poolMu.Lock()
|
w.poolMu.Lock()
|
||||||
defer w.poolMu.Unlock()
|
defer w.poolMu.Unlock()
|
||||||
return w.envelopeCache.Has(gethcommon.Hash(mh)), nil
|
return w.envelopeCache.Has(gethcommon.Hash(mh)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) SetTopicsToVerifyForMissingMessages(peerID peer.ID, pubsubTopic string, contentTopics []string) {
|
func (w *Waku) SetTopicsToVerifyForMissingMessages(peerID peer.ID, pubsubTopic string, contentTopics []string) {
|
||||||
if !w.cfg.EnableMissingMessageVerification {
|
if !w.cfg.EnableMissingMessageVerification {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -1201,7 +1202,7 @@ func (w *NWaku) SetTopicsToVerifyForMissingMessages(peerID peer.ID, pubsubTopic
|
||||||
w.missingMsgVerifier.SetCriteriaInterest(peerID, protocol.NewContentFilter(pubsubTopic, contentTopics...))
|
w.missingMsgVerifier.SetCriteriaInterest(peerID, protocol.NewContentFilter(pubsubTopic, contentTopics...))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) setupRelaySubscriptions() error {
|
func (w *Waku) setupRelaySubscriptions() error {
|
||||||
if w.cfg.LightClient {
|
if w.cfg.LightClient {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -1235,7 +1236,7 @@ func (w *NWaku) setupRelaySubscriptions() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) OnNewEnvelopes(envelope *protocol.Envelope, msgType common.MessageType, processImmediately bool) error {
|
func (w *Waku) OnNewEnvelopes(envelope *protocol.Envelope, msgType common.MessageType, processImmediately bool) error {
|
||||||
if envelope == nil {
|
if envelope == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -1276,13 +1277,13 @@ func (w *NWaku) OnNewEnvelopes(envelope *protocol.Envelope, msgType common.Messa
|
||||||
}
|
}
|
||||||
|
|
||||||
// addEnvelope adds an envelope to the envelope map, used for sending
|
// addEnvelope adds an envelope to the envelope map, used for sending
|
||||||
func (w *NWaku) addEnvelope(envelope *common.ReceivedMessage) {
|
func (w *Waku) addEnvelope(envelope *common.ReceivedMessage) {
|
||||||
w.poolMu.Lock()
|
w.poolMu.Lock()
|
||||||
w.envelopeCache.Set(envelope.Hash(), envelope, ttlcache.DefaultTTL)
|
w.envelopeCache.Set(envelope.Hash(), envelope, ttlcache.DefaultTTL)
|
||||||
w.poolMu.Unlock()
|
w.poolMu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) add(recvMessage *common.ReceivedMessage, processImmediately bool) (bool, error) {
|
func (w *Waku) add(recvMessage *common.ReceivedMessage, processImmediately bool) (bool, error) {
|
||||||
common.EnvelopesReceivedCounter.Inc()
|
common.EnvelopesReceivedCounter.Inc()
|
||||||
|
|
||||||
w.poolMu.Lock()
|
w.poolMu.Lock()
|
||||||
|
@ -1320,12 +1321,12 @@ func (w *NWaku) add(recvMessage *common.ReceivedMessage, processImmediately bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// postEvent queues the message for further processing.
|
// postEvent queues the message for further processing.
|
||||||
func (w *NWaku) postEvent(envelope *common.ReceivedMessage) {
|
func (w *Waku) postEvent(envelope *common.ReceivedMessage) {
|
||||||
w.msgQueue <- envelope
|
w.msgQueue <- envelope
|
||||||
}
|
}
|
||||||
|
|
||||||
// processQueueLoop delivers the messages to the watchers during the lifetime of the waku node.
|
// processQueueLoop delivers the messages to the watchers during the lifetime of the waku node.
|
||||||
func (w *NWaku) processQueueLoop() {
|
func (w *Waku) processQueueLoop() {
|
||||||
if w.ctx == nil {
|
if w.ctx == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -1339,7 +1340,7 @@ func (w *NWaku) processQueueLoop() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) processMessage(e *common.ReceivedMessage) {
|
func (w *Waku) processMessage(e *common.ReceivedMessage) {
|
||||||
logger := w.logger.With(
|
logger := w.logger.With(
|
||||||
zap.Stringer("envelopeHash", e.Envelope.Hash()),
|
zap.Stringer("envelopeHash", e.Envelope.Hash()),
|
||||||
zap.String("pubsubTopic", e.PubsubTopic),
|
zap.String("pubsubTopic", e.PubsubTopic),
|
||||||
|
@ -1382,7 +1383,7 @@ func (w *NWaku) processMessage(e *common.ReceivedMessage) {
|
||||||
|
|
||||||
// GetEnvelope retrieves an envelope from the message queue by its hash.
|
// GetEnvelope retrieves an envelope from the message queue by its hash.
|
||||||
// It returns nil if the envelope can not be found.
|
// It returns nil if the envelope can not be found.
|
||||||
func (w *NWaku) GetEnvelope(hash gethcommon.Hash) *common.ReceivedMessage {
|
func (w *Waku) GetEnvelope(hash gethcommon.Hash) *common.ReceivedMessage {
|
||||||
w.poolMu.RLock()
|
w.poolMu.RLock()
|
||||||
defer w.poolMu.RUnlock()
|
defer w.poolMu.RUnlock()
|
||||||
|
|
||||||
|
@ -1395,14 +1396,14 @@ func (w *NWaku) GetEnvelope(hash gethcommon.Hash) *common.ReceivedMessage {
|
||||||
}
|
}
|
||||||
|
|
||||||
// isEnvelopeCached checks if envelope with specific hash has already been received and cached.
|
// isEnvelopeCached checks if envelope with specific hash has already been received and cached.
|
||||||
func (w *NWaku) IsEnvelopeCached(hash gethcommon.Hash) bool {
|
func (w *Waku) IsEnvelopeCached(hash gethcommon.Hash) bool {
|
||||||
w.poolMu.Lock()
|
w.poolMu.Lock()
|
||||||
defer w.poolMu.Unlock()
|
defer w.poolMu.Unlock()
|
||||||
|
|
||||||
return w.envelopeCache.Has(hash)
|
return w.envelopeCache.Has(hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) ClearEnvelopesCache() {
|
func (w *Waku) ClearEnvelopesCache() {
|
||||||
w.poolMu.Lock()
|
w.poolMu.Lock()
|
||||||
defer w.poolMu.Unlock()
|
defer w.poolMu.Unlock()
|
||||||
|
|
||||||
|
@ -1410,17 +1411,17 @@ func (w *NWaku) ClearEnvelopesCache() {
|
||||||
w.envelopeCache = newTTLCache()
|
w.envelopeCache = newTTLCache()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) PeerCount() int {
|
func (w *Waku) PeerCount() int {
|
||||||
return 0
|
return 0
|
||||||
// return w.node.PeerCount()
|
// return w.node.PeerCount()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) Peers() types.PeerStats {
|
func (w *Waku) Peers() types.PeerStats {
|
||||||
return nil
|
return nil
|
||||||
// return FormatPeerStats(w.node)
|
// return FormatPeerStats(w.node)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) RelayPeersByTopic(topic string) (*types.PeerList, error) {
|
func (w *Waku) RelayPeersByTopic(topic string) (*types.PeerList, error) {
|
||||||
if w.cfg.LightClient {
|
if w.cfg.LightClient {
|
||||||
return nil, errors.New("only available in relay mode")
|
return nil, errors.New("only available in relay mode")
|
||||||
}
|
}
|
||||||
|
@ -1432,7 +1433,7 @@ func (w *NWaku) RelayPeersByTopic(topic string) (*types.PeerList, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) SubscribeToPubsubTopic(topic string, pubkey *ecdsa.PublicKey) error {
|
func (w *Waku) SubscribeToPubsubTopic(topic string, pubkey *ecdsa.PublicKey) error {
|
||||||
topic = w.GetPubsubTopic(topic)
|
topic = w.GetPubsubTopic(topic)
|
||||||
|
|
||||||
if !w.cfg.LightClient {
|
if !w.cfg.LightClient {
|
||||||
|
@ -1445,7 +1446,7 @@ func (w *NWaku) SubscribeToPubsubTopic(topic string, pubkey *ecdsa.PublicKey) er
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) UnsubscribeFromPubsubTopic(topic string) error {
|
func (w *Waku) UnsubscribeFromPubsubTopic(topic string) error {
|
||||||
topic = w.GetPubsubTopic(topic)
|
topic = w.GetPubsubTopic(topic)
|
||||||
|
|
||||||
if !w.cfg.LightClient {
|
if !w.cfg.LightClient {
|
||||||
|
@ -1457,7 +1458,7 @@ func (w *NWaku) UnsubscribeFromPubsubTopic(topic string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) RetrievePubsubTopicKey(topic string) (*ecdsa.PrivateKey, error) {
|
func (w *Waku) RetrievePubsubTopicKey(topic string) (*ecdsa.PrivateKey, error) {
|
||||||
topic = w.GetPubsubTopic(topic)
|
topic = w.GetPubsubTopic(topic)
|
||||||
if w.protectedTopicStore == nil {
|
if w.protectedTopicStore == nil {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
|
@ -1466,7 +1467,7 @@ func (w *NWaku) RetrievePubsubTopicKey(topic string) (*ecdsa.PrivateKey, error)
|
||||||
return w.protectedTopicStore.FetchPrivateKey(topic)
|
return w.protectedTopicStore.FetchPrivateKey(topic)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) StorePubsubTopicKey(topic string, privKey *ecdsa.PrivateKey) error {
|
func (w *Waku) StorePubsubTopicKey(topic string, privKey *ecdsa.PrivateKey) error {
|
||||||
topic = w.GetPubsubTopic(topic)
|
topic = w.GetPubsubTopic(topic)
|
||||||
if w.protectedTopicStore == nil {
|
if w.protectedTopicStore == nil {
|
||||||
return nil
|
return nil
|
||||||
|
@ -1475,7 +1476,7 @@ func (w *NWaku) StorePubsubTopicKey(topic string, privKey *ecdsa.PrivateKey) err
|
||||||
return w.protectedTopicStore.Insert(topic, privKey, &privKey.PublicKey)
|
return w.protectedTopicStore.Insert(topic, privKey, &privKey.PublicKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) RemovePubsubTopicKey(topic string) error {
|
func (w *Waku) RemovePubsubTopicKey(topic string) error {
|
||||||
topic = w.GetPubsubTopic(topic)
|
topic = w.GetPubsubTopic(topic)
|
||||||
if w.protectedTopicStore == nil {
|
if w.protectedTopicStore == nil {
|
||||||
return nil
|
return nil
|
||||||
|
@ -1484,7 +1485,7 @@ func (w *NWaku) RemovePubsubTopicKey(topic string) error {
|
||||||
return w.protectedTopicStore.Delete(topic)
|
return w.protectedTopicStore.Delete(topic)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) handleNetworkChangeFromApp(state connection.State) {
|
func (w *Waku) handleNetworkChangeFromApp(state connection.State) {
|
||||||
//If connection state is reported by something other than peerCount becoming 0 e.g from mobile app, disconnect all peers
|
//If connection state is reported by something other than peerCount becoming 0 e.g from mobile app, disconnect all peers
|
||||||
// if (state.Offline && len(w.node.Host().Network().Peers()) > 0) ||
|
// if (state.Offline && len(w.node.Host().Network().Peers()) > 0) ||
|
||||||
// (w.state.Type != state.Type && !w.state.Offline && !state.Offline) { // network switched between wifi and cellular
|
// (w.state.Type != state.Type && !w.state.Offline && !state.Offline) { // network switched between wifi and cellular
|
||||||
|
@ -1496,7 +1497,7 @@ func (w *NWaku) handleNetworkChangeFromApp(state connection.State) {
|
||||||
// }
|
// }
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) ConnectionChanged(state connection.State) {
|
func (w *Waku) ConnectionChanged(state connection.State) {
|
||||||
isOnline := !state.Offline
|
isOnline := !state.Offline
|
||||||
if w.cfg.LightClient {
|
if w.cfg.LightClient {
|
||||||
//TODO: Update this as per https://github.com/waku-org/go-waku/issues/1114
|
//TODO: Update this as per https://github.com/waku-org/go-waku/issues/1114
|
||||||
|
@ -1520,7 +1521,7 @@ func (w *NWaku) ConnectionChanged(state connection.State) {
|
||||||
w.state = state
|
w.state = state
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) AddStorePeer(address multiaddr.Multiaddr) (peer.ID, error) {
|
func (w *Waku) AddStorePeer(address multiaddr.Multiaddr) (peer.ID, error) {
|
||||||
// peerID, err := w.node.AddPeer(address, wps.Static, w.cfg.DefaultShardedPubsubTopics, store.StoreQueryID_v300)
|
// peerID, err := w.node.AddPeer(address, wps.Static, w.cfg.DefaultShardedPubsubTopics, store.StoreQueryID_v300)
|
||||||
// if err != nil {
|
// if err != nil {
|
||||||
// return "", err
|
// return "", err
|
||||||
|
@ -1529,11 +1530,11 @@ func (w *NWaku) AddStorePeer(address multiaddr.Multiaddr) (peer.ID, error) {
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) timestamp() int64 {
|
func (w *Waku) timestamp() int64 {
|
||||||
return w.timesource.Now().UnixNano()
|
return w.timesource.Now().UnixNano()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) AddRelayPeer(address multiaddr.Multiaddr) (peer.ID, error) {
|
func (w *Waku) AddRelayPeer(address multiaddr.Multiaddr) (peer.ID, error) {
|
||||||
// peerID, err := w.node.AddPeer(address, wps.Static, w.cfg.DefaultShardedPubsubTopics, relay.WakuRelayID_v200)
|
// peerID, err := w.node.AddPeer(address, wps.Static, w.cfg.DefaultShardedPubsubTopics, relay.WakuRelayID_v200)
|
||||||
// if err != nil {
|
// if err != nil {
|
||||||
// return "", err
|
// return "", err
|
||||||
|
@ -1542,38 +1543,38 @@ func (w *NWaku) AddRelayPeer(address multiaddr.Multiaddr) (peer.ID, error) {
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) DialPeer(address multiaddr.Multiaddr) error {
|
func (w *Waku) DialPeer(address multiaddr.Multiaddr) error {
|
||||||
// ctx, cancel := context.WithTimeout(w.ctx, requestTimeout)
|
// ctx, cancel := context.WithTimeout(w.ctx, requestTimeout)
|
||||||
// defer cancel()
|
// defer cancel()
|
||||||
// return w.node.DialPeerWithMultiAddress(ctx, address)
|
// return w.node.DialPeerWithMultiAddress(ctx, address)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) DialPeerByID(peerID peer.ID) error {
|
func (w *Waku) DialPeerByID(peerID peer.ID) error {
|
||||||
// ctx, cancel := context.WithTimeout(w.ctx, requestTimeout)
|
// ctx, cancel := context.WithTimeout(w.ctx, requestTimeout)
|
||||||
// defer cancel()
|
// defer cancel()
|
||||||
// return w.node.DialPeerByID(ctx, peerID)
|
// return w.node.DialPeerByID(ctx, peerID)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) DropPeer(peerID peer.ID) error {
|
func (w *Waku) DropPeer(peerID peer.ID) error {
|
||||||
// return w.node.ClosePeerById(peerID)
|
// return w.node.ClosePeerById(peerID)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) ProcessingP2PMessages() bool {
|
func (w *Waku) ProcessingP2PMessages() bool {
|
||||||
w.storeMsgIDsMu.Lock()
|
w.storeMsgIDsMu.Lock()
|
||||||
defer w.storeMsgIDsMu.Unlock()
|
defer w.storeMsgIDsMu.Unlock()
|
||||||
return len(w.storeMsgIDs) != 0
|
return len(w.storeMsgIDs) != 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) MarkP2PMessageAsProcessed(hash gethcommon.Hash) {
|
func (w *Waku) MarkP2PMessageAsProcessed(hash gethcommon.Hash) {
|
||||||
w.storeMsgIDsMu.Lock()
|
w.storeMsgIDsMu.Lock()
|
||||||
defer w.storeMsgIDsMu.Unlock()
|
defer w.storeMsgIDsMu.Unlock()
|
||||||
delete(w.storeMsgIDs, hash)
|
delete(w.storeMsgIDs, hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) Clean() error {
|
func (w *Waku) Clean() error {
|
||||||
w.msgQueue = make(chan *common.ReceivedMessage, messageQueueLimit)
|
w.msgQueue = make(chan *common.ReceivedMessage, messageQueueLimit)
|
||||||
|
|
||||||
for _, f := range w.filters.All() {
|
for _, f := range w.filters.All() {
|
||||||
|
@ -1583,12 +1584,12 @@ func (w *NWaku) Clean() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) PeerID() peer.ID {
|
func (w *Waku) PeerID() peer.ID {
|
||||||
// return w.node.Host().ID()
|
// return w.node.Host().ID()
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) Peerstore() peerstore.Peerstore {
|
func (w *Waku) Peerstore() peerstore.Peerstore {
|
||||||
// return w.node.Host().Peerstore()
|
// return w.node.Host().Peerstore()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -1639,7 +1640,7 @@ func FormatPeerStats(wakuNode *node.WakuNode) types.PeerStats {
|
||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) StoreNode() *store.WakuStore {
|
func (w *Waku) StoreNode() *store.WakuStore {
|
||||||
// return w.node.Store()
|
// return w.node.Store()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -1656,7 +1657,7 @@ func FormatPeerConnFailures(wakuNode *node.WakuNode) map[string]int {
|
||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) LegacyStoreNode() legacy_store.Store {
|
func (w *Waku) LegacyStoreNode() legacy_store.Store {
|
||||||
// return w.node.LegacyStore()
|
// return w.node.LegacyStore()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -1675,7 +1676,7 @@ type WakuConfig struct {
|
||||||
|
|
||||||
var jamon unsafe.Pointer
|
var jamon unsafe.Pointer
|
||||||
|
|
||||||
type NWaku struct {
|
type Waku struct {
|
||||||
wakuCtx unsafe.Pointer
|
wakuCtx unsafe.Pointer
|
||||||
|
|
||||||
appDB *sql.DB
|
appDB *sql.DB
|
||||||
|
@ -1749,7 +1750,7 @@ type NWaku struct {
|
||||||
defaultShardInfo protocol.RelayShards
|
defaultShardInfo protocol.RelayShards
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *NWaku) Stop() error {
|
func (w *Waku) Stop() error {
|
||||||
return w.WakuStop()
|
return w.WakuStop()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1772,7 +1773,7 @@ func wakuNew(nodeKey *ecdsa.PrivateKey,
|
||||||
logger *zap.Logger,
|
logger *zap.Logger,
|
||||||
appDB *sql.DB,
|
appDB *sql.DB,
|
||||||
ts *timesource.NTPTimeSource,
|
ts *timesource.NTPTimeSource,
|
||||||
onHistoricMessagesRequestFailed func([]byte, peer.ID, error), onPeerStats func(types.ConnStatus)) (*NWaku, error) {
|
onHistoricMessagesRequestFailed func([]byte, peer.ID, error), onPeerStats func(types.ConnStatus)) (*Waku, error) {
|
||||||
|
|
||||||
nwakuConfig := WakuConfig{
|
nwakuConfig := WakuConfig{
|
||||||
Host: cfg.Host,
|
Host: cfg.Host,
|
||||||
|
@ -1817,7 +1818,7 @@ func wakuNew(nodeKey *ecdsa.PrivateKey,
|
||||||
|
|
||||||
if C.getRet(resp) == C.RET_OK {
|
if C.getRet(resp) == C.RET_OK {
|
||||||
|
|
||||||
return &NWaku{
|
return &Waku{
|
||||||
wakuCtx: wakuCtx,
|
wakuCtx: wakuCtx,
|
||||||
cfg: cfg,
|
cfg: cfg,
|
||||||
privateKeys: make(map[string]*ecdsa.PrivateKey),
|
privateKeys: make(map[string]*ecdsa.PrivateKey),
|
||||||
|
@ -1848,7 +1849,7 @@ func wakuNew(nodeKey *ecdsa.PrivateKey,
|
||||||
return nil, errors.New(errMsg)
|
return nil, errors.New(errMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *NWaku) WakuStart() error {
|
func (self *Waku) WakuStart() error {
|
||||||
|
|
||||||
var resp = C.allocResp()
|
var resp = C.allocResp()
|
||||||
defer C.freeResp(resp)
|
defer C.freeResp(resp)
|
||||||
|
@ -1861,7 +1862,7 @@ func (self *NWaku) WakuStart() error {
|
||||||
return errors.New(errMsg)
|
return errors.New(errMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *NWaku) WakuStop() error {
|
func (self *Waku) WakuStop() error {
|
||||||
var resp = C.allocResp()
|
var resp = C.allocResp()
|
||||||
defer C.freeResp(resp)
|
defer C.freeResp(resp)
|
||||||
C.cGoWakuStop(self.wakuCtx, resp)
|
C.cGoWakuStop(self.wakuCtx, resp)
|
||||||
|
@ -1873,7 +1874,7 @@ func (self *NWaku) WakuStop() error {
|
||||||
return errors.New(errMsg)
|
return errors.New(errMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *NWaku) WakuDestroy() error {
|
func (self *Waku) WakuDestroy() error {
|
||||||
var resp = C.allocResp()
|
var resp = C.allocResp()
|
||||||
defer C.freeResp(resp)
|
defer C.freeResp(resp)
|
||||||
C.cGoWakuDestroy(self.wakuCtx, resp)
|
C.cGoWakuDestroy(self.wakuCtx, resp)
|
||||||
|
@ -1885,7 +1886,7 @@ func (self *NWaku) WakuDestroy() error {
|
||||||
return errors.New(errMsg)
|
return errors.New(errMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *NWaku) StartDiscV5() error {
|
func (self *Waku) StartDiscV5() error {
|
||||||
var resp = C.allocResp()
|
var resp = C.allocResp()
|
||||||
defer C.freeResp(resp)
|
defer C.freeResp(resp)
|
||||||
C.cGoWakuStartDiscV5(self.wakuCtx, resp)
|
C.cGoWakuStartDiscV5(self.wakuCtx, resp)
|
||||||
|
@ -1897,7 +1898,7 @@ func (self *NWaku) StartDiscV5() error {
|
||||||
return errors.New(errMsg)
|
return errors.New(errMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *NWaku) StopDiscV5() error {
|
func (self *Waku) StopDiscV5() error {
|
||||||
var resp = C.allocResp()
|
var resp = C.allocResp()
|
||||||
defer C.freeResp(resp)
|
defer C.freeResp(resp)
|
||||||
C.cGoWakuStopDiscV5(self.wakuCtx, resp)
|
C.cGoWakuStopDiscV5(self.wakuCtx, resp)
|
||||||
|
@ -1909,7 +1910,7 @@ func (self *NWaku) StopDiscV5() error {
|
||||||
return errors.New(errMsg)
|
return errors.New(errMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *NWaku) WakuVersion() (string, error) {
|
func (self *Waku) WakuVersion() (string, error) {
|
||||||
var resp = C.allocResp()
|
var resp = C.allocResp()
|
||||||
defer C.freeResp(resp)
|
defer C.freeResp(resp)
|
||||||
|
|
||||||
|
@ -1928,20 +1929,20 @@ func (self *NWaku) WakuVersion() (string, error) {
|
||||||
//export globalEventCallback
|
//export globalEventCallback
|
||||||
func globalEventCallback(callerRet C.int, msg *C.char, len C.size_t, userData unsafe.Pointer) {
|
func globalEventCallback(callerRet C.int, msg *C.char, len C.size_t, userData unsafe.Pointer) {
|
||||||
// This is shared among all Golang instances
|
// This is shared among all Golang instances
|
||||||
self := NWaku{wakuCtx: userData}
|
self := Waku{wakuCtx: userData}
|
||||||
self.MyEventCallback(callerRet, msg, len)
|
self.MyEventCallback(callerRet, msg, len)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *NWaku) MyEventCallback(callerRet C.int, msg *C.char, len C.size_t) {
|
func (self *Waku) MyEventCallback(callerRet C.int, msg *C.char, len C.size_t) {
|
||||||
fmt.Println("Event received:", C.GoStringN(msg, C.int(len)))
|
fmt.Println("Event received:", C.GoStringN(msg, C.int(len)))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *NWaku) WakuSetEventCallback() {
|
func (self *Waku) WakuSetEventCallback() {
|
||||||
// Notice that the events for self node are handled by the 'MyEventCallback' method
|
// Notice that the events for self node are handled by the 'MyEventCallback' method
|
||||||
C.cGoWakuSetEventCallback(self.wakuCtx)
|
C.cGoWakuSetEventCallback(self.wakuCtx)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *NWaku) FormatContentTopic(
|
func (self *Waku) FormatContentTopic(
|
||||||
appName string,
|
appName string,
|
||||||
appVersion int,
|
appVersion int,
|
||||||
contentTopicName string,
|
contentTopicName string,
|
||||||
|
@ -1975,7 +1976,7 @@ func (self *NWaku) FormatContentTopic(
|
||||||
return "", errors.New(errMsg)
|
return "", errors.New(errMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *NWaku) FormatPubsubTopic(topicName string) (WakuPubsubTopic, error) {
|
func (self *Waku) FormatPubsubTopic(topicName string) (WakuPubsubTopic, error) {
|
||||||
var cTopicName = C.CString(topicName)
|
var cTopicName = C.CString(topicName)
|
||||||
var resp = C.allocResp()
|
var resp = C.allocResp()
|
||||||
|
|
||||||
|
@ -1994,7 +1995,7 @@ func (self *NWaku) FormatPubsubTopic(topicName string) (WakuPubsubTopic, error)
|
||||||
return "", errors.New(errMsg)
|
return "", errors.New(errMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *NWaku) WakuDefaultPubsubTopic() (WakuPubsubTopic, error) {
|
func (self *Waku) WakuDefaultPubsubTopic() (WakuPubsubTopic, error) {
|
||||||
var resp = C.allocResp()
|
var resp = C.allocResp()
|
||||||
defer C.freeResp(resp)
|
defer C.freeResp(resp)
|
||||||
C.cGoWakuDefaultPubsubTopic(self.wakuCtx, resp)
|
C.cGoWakuDefaultPubsubTopic(self.wakuCtx, resp)
|
||||||
|
@ -2009,13 +2010,16 @@ func (self *NWaku) WakuDefaultPubsubTopic() (WakuPubsubTopic, error) {
|
||||||
return "", errors.New(errMsg)
|
return "", errors.New(errMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *NWaku) WakuRelayPublish(
|
func (self *Waku) WakuRelayPublish(wakuMsg *pb.WakuMessage, pubsubTopic string) (string, error) {
|
||||||
pubsubTopic string,
|
timeoutMs := 1000
|
||||||
message string,
|
|
||||||
timeoutMs int) (WakuMessageHash, error) {
|
message, err := json.Marshal(wakuMsg)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
var cPubsubTopic = C.CString(pubsubTopic)
|
var cPubsubTopic = C.CString(pubsubTopic)
|
||||||
var msg = C.CString(message)
|
var msg = C.CString(string(message))
|
||||||
var resp = C.allocResp()
|
var resp = C.allocResp()
|
||||||
|
|
||||||
defer C.freeResp(resp)
|
defer C.freeResp(resp)
|
||||||
|
@ -2032,7 +2036,7 @@ func (self *NWaku) WakuRelayPublish(
|
||||||
return "", errors.New(errMsg)
|
return "", errors.New(errMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *NWaku) WakuRelaySubscribe(pubsubTopic string) error {
|
func (self *Waku) WakuRelaySubscribe(pubsubTopic string) error {
|
||||||
var resp = C.allocResp()
|
var resp = C.allocResp()
|
||||||
var cPubsubTopic = C.CString(pubsubTopic)
|
var cPubsubTopic = C.CString(pubsubTopic)
|
||||||
|
|
||||||
|
@ -2059,7 +2063,7 @@ func (self *NWaku) WakuRelaySubscribe(pubsubTopic string) error {
|
||||||
return errors.New(errMsg)
|
return errors.New(errMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *NWaku) WakuRelayUnsubscribe(pubsubTopic string) error {
|
func (self *Waku) WakuRelayUnsubscribe(pubsubTopic string) error {
|
||||||
var resp = C.allocResp()
|
var resp = C.allocResp()
|
||||||
var cPubsubTopic = C.CString(pubsubTopic)
|
var cPubsubTopic = C.CString(pubsubTopic)
|
||||||
defer C.freeResp(resp)
|
defer C.freeResp(resp)
|
||||||
|
@ -2074,12 +2078,14 @@ func (self *NWaku) WakuRelayUnsubscribe(pubsubTopic string) error {
|
||||||
return errors.New(errMsg)
|
return errors.New(errMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *NWaku) WakuLightpushPublish(
|
func (self *Waku) WakuLightpushPublish(message *pb.WakuMessage, pubsubTopic string) (string, error) {
|
||||||
pubsubTopic string,
|
jsonMsg, err := json.Marshal(message)
|
||||||
message string) (string, error) {
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
var cPubsubTopic = C.CString(pubsubTopic)
|
var cPubsubTopic = C.CString(pubsubTopic)
|
||||||
var msg = C.CString(message)
|
var msg = C.CString(string(jsonMsg))
|
||||||
var resp = C.allocResp()
|
var resp = C.allocResp()
|
||||||
|
|
||||||
defer C.freeResp(resp)
|
defer C.freeResp(resp)
|
||||||
|
@ -2096,7 +2102,7 @@ func (self *NWaku) WakuLightpushPublish(
|
||||||
return "", errors.New(errMsg)
|
return "", errors.New(errMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *NWaku) wakuStoreQuery(
|
func (self *Waku) wakuStoreQuery(
|
||||||
jsonQuery string,
|
jsonQuery string,
|
||||||
peerAddr string,
|
peerAddr string,
|
||||||
timeoutMs int) (string, error) {
|
timeoutMs int) (string, error) {
|
||||||
|
@ -2119,7 +2125,7 @@ func (self *NWaku) wakuStoreQuery(
|
||||||
return "", errors.New(errMsg)
|
return "", errors.New(errMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *NWaku) WakuPeerExchangeRequest(numPeers uint64) (string, error) {
|
func (self *Waku) WakuPeerExchangeRequest(numPeers uint64) (string, error) {
|
||||||
var resp = C.allocResp()
|
var resp = C.allocResp()
|
||||||
defer C.freeResp(resp)
|
defer C.freeResp(resp)
|
||||||
|
|
||||||
|
@ -2133,7 +2139,7 @@ func (self *NWaku) WakuPeerExchangeRequest(numPeers uint64) (string, error) {
|
||||||
return "", errors.New(errMsg)
|
return "", errors.New(errMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *NWaku) WakuConnect(peerMultiAddr string, timeoutMs int) error {
|
func (self *Waku) WakuConnect(peerMultiAddr string, timeoutMs int) error {
|
||||||
var resp = C.allocResp()
|
var resp = C.allocResp()
|
||||||
var cPeerMultiAddr = C.CString(peerMultiAddr)
|
var cPeerMultiAddr = C.CString(peerMultiAddr)
|
||||||
defer C.freeResp(resp)
|
defer C.freeResp(resp)
|
||||||
|
@ -2149,7 +2155,7 @@ func (self *NWaku) WakuConnect(peerMultiAddr string, timeoutMs int) error {
|
||||||
return errors.New(errMsg)
|
return errors.New(errMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *NWaku) ListenAddresses() ([]multiaddr.Multiaddr, error) {
|
func (self *Waku) ListenAddresses() ([]multiaddr.Multiaddr, error) {
|
||||||
var resp = C.allocResp()
|
var resp = C.allocResp()
|
||||||
defer C.freeResp(resp)
|
defer C.freeResp(resp)
|
||||||
C.cGoWakuListenAddresses(self.wakuCtx, resp)
|
C.cGoWakuListenAddresses(self.wakuCtx, resp)
|
||||||
|
@ -2178,7 +2184,7 @@ func (self *NWaku) ListenAddresses() ([]multiaddr.Multiaddr, error) {
|
||||||
return nil, errors.New(errMsg)
|
return nil, errors.New(errMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *NWaku) ENR() (*enode.Node, error) {
|
func (self *Waku) ENR() (*enode.Node, error) {
|
||||||
var resp = C.allocResp()
|
var resp = C.allocResp()
|
||||||
defer C.freeResp(resp)
|
defer C.freeResp(resp)
|
||||||
C.cGoWakuGetMyENR(self.wakuCtx, resp)
|
C.cGoWakuGetMyENR(self.wakuCtx, resp)
|
||||||
|
@ -2196,7 +2202,7 @@ func (self *NWaku) ENR() (*enode.Node, error) {
|
||||||
return nil, errors.New(errMsg)
|
return nil, errors.New(errMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *NWaku) ListPeersInMesh(pubsubTopic string) (int, error) {
|
func (self *Waku) ListPeersInMesh(pubsubTopic string) (int, error) {
|
||||||
var resp = C.allocResp()
|
var resp = C.allocResp()
|
||||||
var cPubsubTopic = C.CString(pubsubTopic)
|
var cPubsubTopic = C.CString(pubsubTopic)
|
||||||
defer C.freeResp(resp)
|
defer C.freeResp(resp)
|
||||||
|
@ -2219,7 +2225,7 @@ func (self *NWaku) ListPeersInMesh(pubsubTopic string) (int, error) {
|
||||||
return 0, errors.New(errMsg)
|
return 0, errors.New(errMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *NWaku) GetNumConnectedPeers(paramPubsubTopic ...string) (int, error) {
|
func (self *Waku) GetNumConnectedPeers(paramPubsubTopic ...string) (int, error) {
|
||||||
var pubsubTopic string
|
var pubsubTopic string
|
||||||
if len(paramPubsubTopic) == 0 {
|
if len(paramPubsubTopic) == 0 {
|
||||||
pubsubTopic = ""
|
pubsubTopic = ""
|
||||||
|
@ -2249,7 +2255,7 @@ func (self *NWaku) GetNumConnectedPeers(paramPubsubTopic ...string) (int, error)
|
||||||
return 0, errors.New(errMsg)
|
return 0, errors.New(errMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *NWaku) GetPeerIdsByProtocol(protocol string) (peer.IDSlice, error) {
|
func (self *Waku) GetPeerIdsByProtocol(protocol string) (peer.IDSlice, error) {
|
||||||
var resp = C.allocResp()
|
var resp = C.allocResp()
|
||||||
var cProtocol = C.CString(protocol)
|
var cProtocol = C.CString(protocol)
|
||||||
defer C.freeResp(resp)
|
defer C.freeResp(resp)
|
||||||
|
@ -2373,7 +2379,7 @@ func (self *NWaku) GetPeerIdsByProtocol(protocol string) (peer.IDSlice, error) {
|
||||||
// }
|
// }
|
||||||
|
|
||||||
// MaxMessageSize returns the maximum accepted message size.
|
// MaxMessageSize returns the maximum accepted message size.
|
||||||
func (w *NWaku) MaxMessageSize() uint32 {
|
func (w *Waku) MaxMessageSize() uint32 {
|
||||||
return w.cfg.MaxMessageSize
|
return w.cfg.MaxMessageSize
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2385,7 +2391,7 @@ func New(nodeKey *ecdsa.PrivateKey,
|
||||||
appDB *sql.DB,
|
appDB *sql.DB,
|
||||||
ts *timesource.NTPTimeSource,
|
ts *timesource.NTPTimeSource,
|
||||||
onHistoricMessagesRequestFailed func([]byte, peer.ID, error),
|
onHistoricMessagesRequestFailed func([]byte, peer.ID, error),
|
||||||
onPeerStats func(types.ConnStatus)) (*NWaku, error) {
|
onPeerStats func(types.ConnStatus)) (*Waku, error) {
|
||||||
|
|
||||||
// Lock the main goroutine to its current OS thread
|
// Lock the main goroutine to its current OS thread
|
||||||
runtime.LockOSThread()
|
runtime.LockOSThread()
|
||||||
|
@ -2529,51 +2535,3 @@ func New(nodeKey *ecdsa.PrivateKey,
|
||||||
|
|
||||||
// return waku, nil
|
// return waku, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type NwakuInfo struct {
|
|
||||||
ListenAddresses []string `json:"listenAddresses"`
|
|
||||||
EnrUri string `json:"enrUri"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetNwakuInfo(host *string, port *int) (NwakuInfo, error) {
|
|
||||||
nwakuRestPort := 8645
|
|
||||||
if port != nil {
|
|
||||||
nwakuRestPort = *port
|
|
||||||
}
|
|
||||||
envNwakuRestPort := os.Getenv("NWAKU_REST_PORT")
|
|
||||||
if envNwakuRestPort != "" {
|
|
||||||
v, err := strconv.Atoi(envNwakuRestPort)
|
|
||||||
if err != nil {
|
|
||||||
return NwakuInfo{}, err
|
|
||||||
}
|
|
||||||
nwakuRestPort = v
|
|
||||||
}
|
|
||||||
|
|
||||||
nwakuRestHost := "localhost"
|
|
||||||
if host != nil {
|
|
||||||
nwakuRestHost = *host
|
|
||||||
}
|
|
||||||
envNwakuRestHost := os.Getenv("NWAKU_REST_HOST")
|
|
||||||
if envNwakuRestHost != "" {
|
|
||||||
nwakuRestHost = envNwakuRestHost
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := http.Get(fmt.Sprintf("http://%s:%d/debug/v1/info", nwakuRestHost, nwakuRestPort))
|
|
||||||
if err != nil {
|
|
||||||
return NwakuInfo{}, err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
body, err := io.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return NwakuInfo{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var data NwakuInfo
|
|
||||||
err = json.Unmarshal(body, &data)
|
|
||||||
if err != nil {
|
|
||||||
return NwakuInfo{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return data, nil
|
|
||||||
}
|
|
||||||
|
|
|
@ -0,0 +1,809 @@
|
||||||
|
//go:build use_nwaku
|
||||||
|
// +build use_nwaku
|
||||||
|
|
||||||
|
package wakuv2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/rand"
|
||||||
|
"errors"
|
||||||
|
"math/big"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/cenkalti/backoff/v3"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
ethdnsdisc "github.com/ethereum/go-ethereum/p2p/dnsdisc"
|
||||||
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"golang.org/x/exp/maps"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
|
||||||
|
"github.com/waku-org/go-waku/waku/v2/dnsdisc"
|
||||||
|
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||||
|
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||||
|
"github.com/waku-org/go-waku/waku/v2/protocol/store"
|
||||||
|
|
||||||
|
"github.com/status-im/status-go/protocol/tt"
|
||||||
|
"github.com/status-im/status-go/wakuv2/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
var testStoreENRBootstrap = "enrtree://AI4W5N5IFEUIHF5LESUAOSMV6TKWF2MB6GU2YK7PU4TYUGUNOCEPW@store.staging.status.nodes.status.im"
|
||||||
|
var testBootENRBootstrap = "enrtree://AMOJVZX4V6EXP7NTJPMAYJYST2QP6AJXYW76IU6VGJS7UVSNDYZG4@boot.staging.status.nodes.status.im"
|
||||||
|
|
||||||
|
func setDefaultConfig(config *Config, lightMode bool) {
|
||||||
|
config.ClusterID = 16
|
||||||
|
|
||||||
|
if lightMode {
|
||||||
|
config.EnablePeerExchangeClient = true
|
||||||
|
config.LightClient = true
|
||||||
|
config.EnableDiscV5 = false
|
||||||
|
} else {
|
||||||
|
config.EnableDiscV5 = true
|
||||||
|
config.EnablePeerExchangeServer = true
|
||||||
|
config.LightClient = false
|
||||||
|
config.EnablePeerExchangeClient = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
func TestDiscoveryV5(t *testing.T) {
|
||||||
|
config := &Config{}
|
||||||
|
setDefaultConfig(config, false)
|
||||||
|
config.DiscV5BootstrapNodes = []string{testStoreENRBootstrap}
|
||||||
|
config.DiscoveryLimit = 20
|
||||||
|
w, err := New(nil, "shards.staging", config, nil, nil, nil, nil, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.NoError(t, w.Start())
|
||||||
|
|
||||||
|
err = tt.RetryWithBackOff(func() error {
|
||||||
|
if len(w.Peers()) == 0 {
|
||||||
|
return errors.New("no peers discovered")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.NotEqual(t, 0, len(w.Peers()))
|
||||||
|
require.NoError(t, w.Stop())
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
/*
|
||||||
|
func TestRestartDiscoveryV5(t *testing.T) {
|
||||||
|
config := &Config{}
|
||||||
|
setDefaultConfig(config, false)
|
||||||
|
// Use wrong discv5 bootstrap address, to simulate being offline
|
||||||
|
config.DiscV5BootstrapNodes = []string{"enrtree://AOGECG2SPND25EEFMAJ5WF3KSGJNSGV356DSTL2YVLLZWIV6SAYBM@1.1.1.2"}
|
||||||
|
config.DiscoveryLimit = 20
|
||||||
|
config.UDPPort = 10002
|
||||||
|
config.ClusterID = 16
|
||||||
|
w, err := New(nil, "", config, nil, nil, nil, nil, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.NoError(t, w.Start())
|
||||||
|
require.False(t, w.seededBootnodesForDiscV5)
|
||||||
|
|
||||||
|
options := func(b *backoff.ExponentialBackOff) {
|
||||||
|
b.MaxElapsedTime = 2 * time.Second
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sanity check, not great, but it's probably helpful
|
||||||
|
err = tt.RetryWithBackOff(func() error {
|
||||||
|
if len(w.Peers()) == 0 {
|
||||||
|
return errors.New("no peers discovered")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}, options)
|
||||||
|
|
||||||
|
require.Error(t, err)
|
||||||
|
|
||||||
|
w.discV5BootstrapNodes = []string{testStoreENRBootstrap}
|
||||||
|
|
||||||
|
options = func(b *backoff.ExponentialBackOff) {
|
||||||
|
b.MaxElapsedTime = 90 * time.Second
|
||||||
|
}
|
||||||
|
|
||||||
|
err = tt.RetryWithBackOff(func() error {
|
||||||
|
if len(w.Peers()) == 0 {
|
||||||
|
return errors.New("no peers discovered")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}, options)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.True(t, w.seededBootnodesForDiscV5)
|
||||||
|
require.NotEqual(t, 0, len(w.Peers()))
|
||||||
|
require.NoError(t, w.Stop())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRelayPeers(t *testing.T) {
|
||||||
|
config := &Config{
|
||||||
|
EnableMissingMessageVerification: true,
|
||||||
|
}
|
||||||
|
setDefaultConfig(config, false)
|
||||||
|
w, err := New(nil, "", config, nil, nil, nil, nil, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, w.Start())
|
||||||
|
_, err = w.RelayPeersByTopic(config.DefaultShardPubsubTopic)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Ensure function returns an error for lightclient
|
||||||
|
config = &Config{}
|
||||||
|
config.ClusterID = 16
|
||||||
|
config.LightClient = true
|
||||||
|
w, err = New(nil, "", config, nil, nil, nil, nil, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, w.Start())
|
||||||
|
_, err = w.RelayPeersByTopic(config.DefaultShardPubsubTopic)
|
||||||
|
require.Error(t, err)
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
func parseNodes(rec []string) []*enode.Node {
|
||||||
|
var ns []*enode.Node
|
||||||
|
for _, r := range rec {
|
||||||
|
var n enode.Node
|
||||||
|
if err := n.UnmarshalText([]byte(r)); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
ns = append(ns, &n)
|
||||||
|
}
|
||||||
|
return ns
|
||||||
|
}
|
||||||
|
|
||||||
|
// In order to run these tests, you must run an nwaku node
|
||||||
|
//
|
||||||
|
// Using Docker:
|
||||||
|
//
|
||||||
|
// IP_ADDRESS=$(hostname -I | awk '{print $1}');
|
||||||
|
// docker run \
|
||||||
|
// -p 60000:60000/tcp -p 9000:9000/udp -p 8645:8645/tcp harbor.status.im/wakuorg/nwaku:v0.31.0 \
|
||||||
|
// --tcp-port=60000 --discv5-discovery=true --cluster-id=16 --pubsub-topic=/waku/2/rs/16/32 --pubsub-topic=/waku/2/rs/16/64 \
|
||||||
|
// --nat=extip:${IP_ADDRESS} --discv5-discovery --discv5-udp-port=9000 --rest-address=0.0.0.0 --store
|
||||||
|
|
||||||
|
func TestBasicWakuV2(t *testing.T) {
|
||||||
|
nwakuInfo, err := GetNwakuInfo(nil, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Creating a fake DNS Discovery ENRTree
|
||||||
|
tree, url := makeTestTree("n", parseNodes([]string{nwakuInfo.EnrUri}), nil)
|
||||||
|
enrTreeAddress := url
|
||||||
|
envEnrTreeAddress := os.Getenv("ENRTREE_ADDRESS")
|
||||||
|
if envEnrTreeAddress != "" {
|
||||||
|
enrTreeAddress = envEnrTreeAddress
|
||||||
|
}
|
||||||
|
|
||||||
|
config := &Config{}
|
||||||
|
setDefaultConfig(config, false)
|
||||||
|
config.Port = 0
|
||||||
|
config.Resolver = mapResolver(tree.ToTXT("n"))
|
||||||
|
config.DiscV5BootstrapNodes = []string{enrTreeAddress}
|
||||||
|
config.DiscoveryLimit = 20
|
||||||
|
config.WakuNodes = []string{enrTreeAddress}
|
||||||
|
w, err := New(nil, "", config, nil, nil, nil, nil, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, w.Start())
|
||||||
|
|
||||||
|
enr, err := w.ENR()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, enr)
|
||||||
|
|
||||||
|
// DNSDiscovery
|
||||||
|
ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
discoveredNodes, err := dnsdisc.RetrieveNodes(ctx, enrTreeAddress, dnsdisc.WithResolver(config.Resolver))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Peer used for retrieving history
|
||||||
|
r, err := rand.Int(rand.Reader, big.NewInt(int64(len(discoveredNodes))))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
storeNode := discoveredNodes[int(r.Int64())]
|
||||||
|
|
||||||
|
options := func(b *backoff.ExponentialBackOff) {
|
||||||
|
b.MaxElapsedTime = 30 * time.Second
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sanity check, not great, but it's probably helpful
|
||||||
|
err = tt.RetryWithBackOff(func() error {
|
||||||
|
if len(w.Peers()) < 1 {
|
||||||
|
return errors.New("no peers discovered")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}, options)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Dropping Peer
|
||||||
|
err = w.DropPeer(storeNode.PeerID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Dialing with peerID
|
||||||
|
err = w.DialPeerByID(storeNode.PeerID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = tt.RetryWithBackOff(func() error {
|
||||||
|
if len(w.Peers()) < 1 {
|
||||||
|
return errors.New("no peers discovered")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}, options)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
filter := &common.Filter{
|
||||||
|
PubsubTopic: config.DefaultShardPubsubTopic,
|
||||||
|
Messages: common.NewMemoryMessageStore(),
|
||||||
|
ContentTopics: common.NewTopicSetFromBytes([][]byte{{1, 2, 3, 4}}),
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = w.Subscribe(filter)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
msgTimestamp := w.timestamp()
|
||||||
|
contentTopic := maps.Keys(filter.ContentTopics)[0]
|
||||||
|
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
|
||||||
|
_, err = w.Send(config.DefaultShardPubsubTopic, &pb.WakuMessage{
|
||||||
|
Payload: []byte{1, 2, 3, 4, 5},
|
||||||
|
ContentTopic: contentTopic.ContentTopic(),
|
||||||
|
Version: proto.Uint32(0),
|
||||||
|
Timestamp: &msgTimestamp,
|
||||||
|
}, nil)
|
||||||
|
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
time.Sleep(1 * time.Second)
|
||||||
|
|
||||||
|
messages := filter.Retrieve()
|
||||||
|
require.Len(t, messages, 1)
|
||||||
|
|
||||||
|
timestampInSeconds := msgTimestamp / int64(time.Second)
|
||||||
|
marginInSeconds := 20
|
||||||
|
|
||||||
|
options = func(b *backoff.ExponentialBackOff) {
|
||||||
|
b.MaxElapsedTime = 60 * time.Second
|
||||||
|
b.InitialInterval = 500 * time.Millisecond
|
||||||
|
}
|
||||||
|
err = tt.RetryWithBackOff(func() error {
|
||||||
|
_, envelopeCount, err := w.Query(
|
||||||
|
context.Background(),
|
||||||
|
storeNode.PeerID,
|
||||||
|
store.FilterCriteria{
|
||||||
|
ContentFilter: protocol.NewContentFilter(config.DefaultShardPubsubTopic, contentTopic.ContentTopic()),
|
||||||
|
TimeStart: proto.Int64((timestampInSeconds - int64(marginInSeconds)) * int64(time.Second)),
|
||||||
|
TimeEnd: proto.Int64((timestampInSeconds + int64(marginInSeconds)) * int64(time.Second)),
|
||||||
|
},
|
||||||
|
nil,
|
||||||
|
nil,
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
if err != nil || envelopeCount == 0 {
|
||||||
|
// in case of failure extend timestamp margin up to 40secs
|
||||||
|
if marginInSeconds < 40 {
|
||||||
|
marginInSeconds += 5
|
||||||
|
}
|
||||||
|
return errors.New("no messages received from store node")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}, options)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.NoError(t, w.Stop())
|
||||||
|
}
|
||||||
|
|
||||||
|
type mapResolver map[string]string
|
||||||
|
|
||||||
|
func (mr mapResolver) LookupTXT(ctx context.Context, name string) ([]string, error) {
|
||||||
|
if record, ok := mr[name]; ok {
|
||||||
|
return []string{record}, nil
|
||||||
|
}
|
||||||
|
return nil, errors.New("not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
var signingKeyForTesting, _ = crypto.ToECDSA(hexutil.MustDecode("0xdc599867fc513f8f5e2c2c9c489cde5e71362d1d9ec6e693e0de063236ed1240"))
|
||||||
|
|
||||||
|
func makeTestTree(domain string, nodes []*enode.Node, links []string) (*ethdnsdisc.Tree, string) {
|
||||||
|
tree, err := ethdnsdisc.MakeTree(1, nodes, links)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
url, err := tree.Sign(signingKeyForTesting, domain)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return tree, url
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
func TestPeerExchange(t *testing.T) {
|
||||||
|
logger, err := zap.NewDevelopment()
|
||||||
|
require.NoError(t, err)
|
||||||
|
// start node which serve as PeerExchange server
|
||||||
|
config := &Config{}
|
||||||
|
config.ClusterID = 16
|
||||||
|
config.EnableDiscV5 = true
|
||||||
|
config.EnablePeerExchangeServer = true
|
||||||
|
config.EnablePeerExchangeClient = false
|
||||||
|
pxServerNode, err := New(nil, "", config, logger.Named("pxServerNode"), nil, nil, nil, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, pxServerNode.Start())
|
||||||
|
|
||||||
|
time.Sleep(1 * time.Second)
|
||||||
|
|
||||||
|
// start node that will be discovered by PeerExchange
|
||||||
|
config = &Config{}
|
||||||
|
config.ClusterID = 16
|
||||||
|
config.EnableDiscV5 = true
|
||||||
|
config.EnablePeerExchangeServer = false
|
||||||
|
config.EnablePeerExchangeClient = false
|
||||||
|
enr, err := pxServerNode.ENR()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
config.DiscV5BootstrapNodes = []string{enr.String()}
|
||||||
|
discV5Node, err := New(nil, "", config, logger.Named("discV5Node"), nil, nil, nil, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, discV5Node.Start())
|
||||||
|
|
||||||
|
time.Sleep(1 * time.Second)
|
||||||
|
|
||||||
|
// start light node which use PeerExchange to discover peers
|
||||||
|
enrNodes := []*enode.Node{enr}
|
||||||
|
tree, url := makeTestTree("n", enrNodes, nil)
|
||||||
|
resolver := mapResolver(tree.ToTXT("n"))
|
||||||
|
|
||||||
|
config = &Config{}
|
||||||
|
config.ClusterID = 16
|
||||||
|
config.EnablePeerExchangeServer = false
|
||||||
|
config.EnablePeerExchangeClient = true
|
||||||
|
config.LightClient = true
|
||||||
|
config.Resolver = resolver
|
||||||
|
|
||||||
|
config.WakuNodes = []string{url}
|
||||||
|
lightNode, err := New(nil, "", config, logger.Named("lightNode"), nil, nil, nil, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, lightNode.Start())
|
||||||
|
|
||||||
|
// Sanity check, not great, but it's probably helpful
|
||||||
|
options := func(b *backoff.ExponentialBackOff) {
|
||||||
|
b.MaxElapsedTime = 30 * time.Second
|
||||||
|
}
|
||||||
|
err = tt.RetryWithBackOff(func() error {
|
||||||
|
// we should not use lightNode.Peers() here as it only indicates peers that are connected right now,
|
||||||
|
// in light client mode,the peer will be closed via `w.node.Host().Network().ClosePeer(peerInfo.ID)`
|
||||||
|
// after invoking identifyAndConnect, instead, we should check the peerStore, peers from peerStore
|
||||||
|
// won't get deleted especially if they are statically added.
|
||||||
|
numConnected, err := lightNode.GetNumConnectedPeers()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if numConnected == 2 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return errors.New("no peers discovered")
|
||||||
|
}, options)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
_, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
_, err = discV5Node.WakuPeerExchangeRequest(1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = discV5Node.WakuPeerExchangeRequest(1)
|
||||||
|
require.Error(t, err) //should fail due to rate limit
|
||||||
|
|
||||||
|
require.NoError(t, lightNode.Stop())
|
||||||
|
require.NoError(t, pxServerNode.Stop())
|
||||||
|
require.NoError(t, discV5Node.Stop())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWakuV2Filter(t *testing.T) {
|
||||||
|
t.Skip("flaky test")
|
||||||
|
|
||||||
|
enrTreeAddress := testBootENRBootstrap
|
||||||
|
envEnrTreeAddress := os.Getenv("ENRTREE_ADDRESS")
|
||||||
|
if envEnrTreeAddress != "" {
|
||||||
|
enrTreeAddress = envEnrTreeAddress
|
||||||
|
}
|
||||||
|
config := &Config{}
|
||||||
|
setDefaultConfig(config, true)
|
||||||
|
config.EnablePeerExchangeClient = false
|
||||||
|
config.Port = 0
|
||||||
|
config.MinPeersForFilter = 2
|
||||||
|
|
||||||
|
config.DiscV5BootstrapNodes = []string{enrTreeAddress}
|
||||||
|
config.DiscoveryLimit = 20
|
||||||
|
config.WakuNodes = []string{enrTreeAddress}
|
||||||
|
w, err := New(nil, "", config, nil, nil, nil, nil, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, w.Start())
|
||||||
|
|
||||||
|
options := func(b *backoff.ExponentialBackOff) {
|
||||||
|
b.MaxElapsedTime = 10 * time.Second
|
||||||
|
}
|
||||||
|
time.Sleep(10 * time.Second) //TODO: Check if we can remove this sleep.
|
||||||
|
|
||||||
|
// Sanity check, not great, but it's probably helpful
|
||||||
|
err = tt.RetryWithBackOff(func() error {
|
||||||
|
peers, err := w.GetPeerIdsByProtocol(string(filter.FilterSubscribeID_v20beta1))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(peers) < 2 {
|
||||||
|
return errors.New("no peers discovered")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}, options)
|
||||||
|
require.NoError(t, err)
|
||||||
|
testPubsubTopic := "/waku/2/rs/16/32"
|
||||||
|
contentTopicBytes := make([]byte, 4)
|
||||||
|
_, err = rand.Read(contentTopicBytes)
|
||||||
|
require.NoError(t, err)
|
||||||
|
filter := &common.Filter{
|
||||||
|
Messages: common.NewMemoryMessageStore(),
|
||||||
|
PubsubTopic: testPubsubTopic,
|
||||||
|
ContentTopics: common.NewTopicSetFromBytes([][]byte{contentTopicBytes}),
|
||||||
|
}
|
||||||
|
|
||||||
|
fID, err := w.Subscribe(filter)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
msgTimestamp := w.timestamp()
|
||||||
|
contentTopic := maps.Keys(filter.ContentTopics)[0]
|
||||||
|
|
||||||
|
_, err = w.Send(testPubsubTopic, &pb.WakuMessage{
|
||||||
|
Payload: []byte{1, 2, 3, 4, 5},
|
||||||
|
ContentTopic: contentTopic.ContentTopic(),
|
||||||
|
Version: proto.Uint32(0),
|
||||||
|
Timestamp: &msgTimestamp,
|
||||||
|
}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
time.Sleep(5 * time.Second)
|
||||||
|
|
||||||
|
// Ensure there is at least 1 active filter subscription
|
||||||
|
subscriptions := w.FilterLightnode().Subscriptions()
|
||||||
|
require.Greater(t, len(subscriptions), 0)
|
||||||
|
|
||||||
|
messages := filter.Retrieve()
|
||||||
|
require.Len(t, messages, 1)
|
||||||
|
|
||||||
|
// Mock peers going down
|
||||||
|
_, err = w.FilterLightnode().UnsubscribeWithSubscription(w.ctx, subscriptions[0])
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
time.Sleep(10 * time.Second)
|
||||||
|
|
||||||
|
// Ensure there is at least 1 active filter subscription
|
||||||
|
subscriptions = w.FilterLightnode().Subscriptions()
|
||||||
|
require.Greater(t, len(subscriptions), 0)
|
||||||
|
|
||||||
|
// Ensure that messages are retrieved with a fresh sub
|
||||||
|
_, err = w.Send(testPubsubTopic, &pb.WakuMessage{
|
||||||
|
Payload: []byte{1, 2, 3, 4, 5, 6},
|
||||||
|
ContentTopic: contentTopic.ContentTopic(),
|
||||||
|
Version: proto.Uint32(0),
|
||||||
|
Timestamp: &msgTimestamp,
|
||||||
|
}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
time.Sleep(10 * time.Second)
|
||||||
|
|
||||||
|
messages = filter.Retrieve()
|
||||||
|
require.Len(t, messages, 1)
|
||||||
|
err = w.Unsubscribe(context.Background(), fID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, w.Stop())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWakuV2Store(t *testing.T) {
|
||||||
|
t.Skip("deprecated. Storenode must use nwaku")
|
||||||
|
|
||||||
|
// Configuration for the first Waku node
|
||||||
|
config1 := &Config{
|
||||||
|
Port: 0,
|
||||||
|
ClusterID: 16,
|
||||||
|
EnableDiscV5: false,
|
||||||
|
DiscoveryLimit: 20,
|
||||||
|
EnableStore: false,
|
||||||
|
StoreCapacity: 100,
|
||||||
|
StoreSeconds: 3600,
|
||||||
|
EnableMissingMessageVerification: true,
|
||||||
|
}
|
||||||
|
w1PeersCh := make(chan peer.IDSlice, 100) // buffered not to block on the send side
|
||||||
|
|
||||||
|
// Start the first Waku node
|
||||||
|
w1, err := New(nil, "", config1, nil, nil, nil, nil, func(cs types.ConnStatus) {
|
||||||
|
w1PeersCh <- maps.Keys(cs.Peers)
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, w1.Start())
|
||||||
|
defer func() {
|
||||||
|
require.NoError(t, w1.Stop())
|
||||||
|
close(w1PeersCh)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Configuration for the second Waku node
|
||||||
|
sql2, err := helpers.SetupTestMemorySQLDB(appdatabase.DbInitializer{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
config2 := &Config{
|
||||||
|
Port: 0,
|
||||||
|
ClusterID: 16,
|
||||||
|
EnableDiscV5: false,
|
||||||
|
DiscoveryLimit: 20,
|
||||||
|
EnableStore: true,
|
||||||
|
StoreCapacity: 100,
|
||||||
|
StoreSeconds: 3600,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start the second Waku node
|
||||||
|
w2, err := New(nil, "", config2, nil, sql2, nil, nil, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, w2.Start())
|
||||||
|
w2EnvelopeCh := make(chan common.EnvelopeEvent, 100)
|
||||||
|
w2.SubscribeEnvelopeEvents(w2EnvelopeCh)
|
||||||
|
defer func() {
|
||||||
|
require.NoError(t, w2.Stop())
|
||||||
|
close(w2EnvelopeCh)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Connect the two nodes directly
|
||||||
|
peer2Addr, err := w2.ListenAddresses()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = w1.DialPeer(peer2Addr[0])
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Create a filter for the second node to catch messages
|
||||||
|
filter := &common.Filter{
|
||||||
|
Messages: common.NewMemoryMessageStore(),
|
||||||
|
PubsubTopic: config2.DefaultShardPubsubTopic,
|
||||||
|
ContentTopics: common.NewTopicSetFromBytes([][]byte{{1, 2, 3, 4}}),
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = w2.Subscribe(filter)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
|
||||||
|
// Send a message from the first node
|
||||||
|
msgTimestamp := w1.CurrentTime().UnixNano()
|
||||||
|
contentTopic := maps.Keys(filter.ContentTopics)[0]
|
||||||
|
_, err = w1.Send(config1.DefaultShardPubsubTopic, &pb.WakuMessage{
|
||||||
|
Payload: []byte{1, 2, 3, 4, 5},
|
||||||
|
ContentTopic: contentTopic.ContentTopic(),
|
||||||
|
Version: proto.Uint32(0),
|
||||||
|
Timestamp: &msgTimestamp,
|
||||||
|
}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
waitForEnvelope(t, contentTopic.ContentTopic(), w2EnvelopeCh)
|
||||||
|
|
||||||
|
// Retrieve the message from the second node's filter
|
||||||
|
messages := filter.Retrieve()
|
||||||
|
require.Len(t, messages, 1)
|
||||||
|
|
||||||
|
timestampInSeconds := msgTimestamp / int64(time.Second)
|
||||||
|
marginInSeconds := 5
|
||||||
|
// Query the second node's store for the message
|
||||||
|
_, envelopeCount, err := w1.Query(
|
||||||
|
context.Background(),
|
||||||
|
w2.Host().ID(),
|
||||||
|
store.FilterCriteria{
|
||||||
|
TimeStart: proto.Int64((timestampInSeconds - int64(marginInSeconds)) * int64(time.Second)),
|
||||||
|
TimeEnd: proto.Int64((timestampInSeconds + int64(marginInSeconds)) * int64(time.Second)),
|
||||||
|
ContentFilter: protocol.NewContentFilter(config1.DefaultShardPubsubTopic, contentTopic.ContentTopic()),
|
||||||
|
},
|
||||||
|
nil,
|
||||||
|
nil,
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, envelopeCount > 0, "no messages received from store node")
|
||||||
|
}
|
||||||
|
|
||||||
|
func waitForPeerConnection(t *testing.T, peerID peer.ID, peerCh chan peer.IDSlice) {
|
||||||
|
waitForPeerConnectionWithTimeout(t, peerID, peerCh, 3*time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
func waitForPeerConnectionWithTimeout(t *testing.T, peerID peer.ID, peerCh chan peer.IDSlice, timeout time.Duration) {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||||
|
defer cancel()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case peers := <-peerCh:
|
||||||
|
for _, p := range peers {
|
||||||
|
if p == peerID {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case <-ctx.Done():
|
||||||
|
require.Fail(t, "timed out waiting for peer "+peerID.String())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func waitForEnvelope(t *testing.T, contentTopic string, envCh chan common.EnvelopeEvent) {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case env := <-envCh:
|
||||||
|
if env.Topic.ContentTopic() == contentTopic {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
case <-ctx.Done():
|
||||||
|
require.Fail(t, "timed out waiting for envelope's topic "+contentTopic)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOnlineChecker(t *testing.T) {
|
||||||
|
w, err := New(nil, "shards.staging", nil, nil, nil, nil, nil, nil)
|
||||||
|
require.NoError(t, w.Start())
|
||||||
|
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.False(t, w.onlineChecker.IsOnline())
|
||||||
|
|
||||||
|
w.ConnectionChanged(connection.State{Offline: false})
|
||||||
|
require.True(t, w.onlineChecker.IsOnline())
|
||||||
|
|
||||||
|
wg := sync.WaitGroup{}
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
<-w.goingOnline
|
||||||
|
require.True(t, true)
|
||||||
|
}()
|
||||||
|
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
|
||||||
|
w.ConnectionChanged(connection.State{Offline: true})
|
||||||
|
require.False(t, w.onlineChecker.IsOnline())
|
||||||
|
|
||||||
|
// Test lightnode online checker
|
||||||
|
config := &Config{}
|
||||||
|
config.ClusterID = 16
|
||||||
|
config.LightClient = true
|
||||||
|
lightNode, err := New(nil, "shards.staging", config, nil, nil, nil, nil, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = lightNode.Start()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.False(t, lightNode.onlineChecker.IsOnline())
|
||||||
|
f := &common.Filter{}
|
||||||
|
lightNode.filterManager.SubscribeFilter("test", protocol.NewContentFilter(f.PubsubTopic, f.ContentTopics.ContentTopics()...))
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLightpushRateLimit(t *testing.T) {
|
||||||
|
logger, err := zap.NewDevelopment()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
config0 := &Config{}
|
||||||
|
setDefaultConfig(config0, false)
|
||||||
|
w0PeersCh := make(chan peer.IDSlice, 5) // buffered not to block on the send side
|
||||||
|
|
||||||
|
// Start the relayu node
|
||||||
|
w0, err := New(nil, "", config0, logger.Named("relayNode"), nil, nil, nil, func(cs types.ConnStatus) {
|
||||||
|
w0PeersCh <- maps.Keys(cs.Peers)
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, w0.Start())
|
||||||
|
defer func() {
|
||||||
|
require.NoError(t, w0.Stop())
|
||||||
|
close(w0PeersCh)
|
||||||
|
}()
|
||||||
|
|
||||||
|
contentTopics := common.NewTopicSetFromBytes([][]byte{{1, 2, 3, 4}})
|
||||||
|
filter := &common.Filter{
|
||||||
|
PubsubTopic: config0.DefaultShardPubsubTopic,
|
||||||
|
Messages: common.NewMemoryMessageStore(),
|
||||||
|
ContentTopics: contentTopics,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = w0.Subscribe(filter)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
config1 := &Config{}
|
||||||
|
setDefaultConfig(config1, false)
|
||||||
|
w1PeersCh := make(chan peer.IDSlice, 5) // buffered not to block on the send side
|
||||||
|
|
||||||
|
// Start the full node
|
||||||
|
w1, err := New(nil, "", config1, logger.Named("fullNode"), nil, nil, nil, func(cs types.ConnStatus) {
|
||||||
|
w1PeersCh <- maps.Keys(cs.Peers)
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, w1.Start())
|
||||||
|
defer func() {
|
||||||
|
require.NoError(t, w1.Stop())
|
||||||
|
close(w1PeersCh)
|
||||||
|
}()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
//Connect the relay peer and full node
|
||||||
|
err = w1.DialPeer(ctx, w0.ListenAddresses()[0].String())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = tt.RetryWithBackOff(func() error {
|
||||||
|
if len(w1.Peers()) == 0 {
|
||||||
|
return errors.New("no peers discovered")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
config2 := &Config{}
|
||||||
|
setDefaultConfig(config2, true)
|
||||||
|
w2PeersCh := make(chan peer.IDSlice, 5) // buffered not to block on the send side
|
||||||
|
|
||||||
|
// Start the light node
|
||||||
|
w2, err := New(nil, "", config2, logger.Named("lightNode"), nil, nil, nil, func(cs types.ConnStatus) {
|
||||||
|
w2PeersCh <- maps.Keys(cs.Peers)
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, w2.Start())
|
||||||
|
defer func() {
|
||||||
|
require.NoError(t, w2.Stop())
|
||||||
|
close(w2PeersCh)
|
||||||
|
}()
|
||||||
|
|
||||||
|
//Use this instead of DialPeer to make sure the peer is added to PeerStore and can be selected for Lighpush
|
||||||
|
w2.AddDiscoveredPeer(w1.PeerID(), w1.ListenAddresses(), wps.Static, w1.cfg.DefaultShardedPubsubTopics, w1.node.ENR(), true)
|
||||||
|
|
||||||
|
waitForPeerConnectionWithTimeout(t, w2.Host().ID(), w1PeersCh, 5*time.Second)
|
||||||
|
|
||||||
|
event := make(chan common.EnvelopeEvent, 10)
|
||||||
|
w2.SubscribeEnvelopeEvents(event)
|
||||||
|
|
||||||
|
for i := range [4]int{} {
|
||||||
|
msgTimestamp := w2.timestamp()
|
||||||
|
_, err := w2.Send(config2.DefaultShardPubsubTopic, &pb.WakuMessage{
|
||||||
|
Payload: []byte{1, 2, 3, 4, 5, 6, byte(i)},
|
||||||
|
ContentTopic: maps.Keys(contentTopics)[0].ContentTopic(),
|
||||||
|
Version: proto.Uint32(0),
|
||||||
|
Timestamp: &msgTimestamp,
|
||||||
|
}, nil)
|
||||||
|
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
time.Sleep(550 * time.Millisecond)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
messages := filter.Retrieve()
|
||||||
|
require.Len(t, messages, 2)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTelemetryFormat(t *testing.T) {
|
||||||
|
logger, err := zap.NewDevelopment()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
tc := NewBandwidthTelemetryClient(logger, "#")
|
||||||
|
|
||||||
|
s := metrics.Stats{
|
||||||
|
TotalIn: 10,
|
||||||
|
TotalOut: 20,
|
||||||
|
RateIn: 30,
|
||||||
|
RateOut: 40,
|
||||||
|
}
|
||||||
|
|
||||||
|
m := make(map[libp2pprotocol.ID]metrics.Stats)
|
||||||
|
m[relay.WakuRelayID_v200] = s
|
||||||
|
m[filter.FilterPushID_v20beta1] = s
|
||||||
|
m[filter.FilterSubscribeID_v20beta1] = s
|
||||||
|
m[legacy_store.StoreID_v20beta4] = s
|
||||||
|
m[lightpush.LightPushID_v20beta1] = s
|
||||||
|
|
||||||
|
requestBody := tc.getTelemetryRequestBody(m)
|
||||||
|
_, err = json.Marshal(requestBody)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
*/
|
|
@ -0,0 +1,58 @@
|
||||||
|
package wakuv2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
type NwakuInfo struct {
|
||||||
|
ListenAddresses []string `json:"listenAddresses"`
|
||||||
|
EnrUri string `json:"enrUri"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetNwakuInfo(host *string, port *int) (NwakuInfo, error) {
|
||||||
|
nwakuRestPort := 8645
|
||||||
|
if port != nil {
|
||||||
|
nwakuRestPort = *port
|
||||||
|
}
|
||||||
|
envNwakuRestPort := os.Getenv("NWAKU_REST_PORT")
|
||||||
|
if envNwakuRestPort != "" {
|
||||||
|
v, err := strconv.Atoi(envNwakuRestPort)
|
||||||
|
if err != nil {
|
||||||
|
return NwakuInfo{}, err
|
||||||
|
}
|
||||||
|
nwakuRestPort = v
|
||||||
|
}
|
||||||
|
|
||||||
|
nwakuRestHost := "localhost"
|
||||||
|
if host != nil {
|
||||||
|
nwakuRestHost = *host
|
||||||
|
}
|
||||||
|
envNwakuRestHost := os.Getenv("NWAKU_REST_HOST")
|
||||||
|
if envNwakuRestHost != "" {
|
||||||
|
nwakuRestHost = envNwakuRestHost
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := http.Get(fmt.Sprintf("http://%s:%d/debug/v1/info", nwakuRestHost, nwakuRestPort))
|
||||||
|
if err != nil {
|
||||||
|
return NwakuInfo{}, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
body, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return NwakuInfo{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var data NwakuInfo
|
||||||
|
err = json.Unmarshal(body, &data)
|
||||||
|
if err != nil {
|
||||||
|
return NwakuInfo{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return data, nil
|
||||||
|
}
|
|
@ -11,7 +11,7 @@ import (
|
||||||
|
|
||||||
// Trace implements EventTracer interface.
|
// Trace implements EventTracer interface.
|
||||||
// We use custom logging, because we want to base58-encode the peerIDs. And also make the messageIDs readable.
|
// We use custom logging, because we want to base58-encode the peerIDs. And also make the messageIDs readable.
|
||||||
func (w *NWaku) Trace(evt *pubsub_pb.TraceEvent) {
|
func (w *Waku) Trace(evt *pubsub_pb.TraceEvent) {
|
||||||
|
|
||||||
f := []zap.Field{
|
f := []zap.Field{
|
||||||
zap.String("type", evt.Type.String()),
|
zap.String("type", evt.Type.String()),
|
||||||
|
|
|
@ -1,3 +1,6 @@
|
||||||
|
//go:build !use_nwaku
|
||||||
|
// +build !use_nwaku
|
||||||
|
|
||||||
package wakuv2
|
package wakuv2
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -352,10 +355,7 @@ func TestPeerExchange(t *testing.T) {
|
||||||
config.EnableDiscV5 = true
|
config.EnableDiscV5 = true
|
||||||
config.EnablePeerExchangeServer = false
|
config.EnablePeerExchangeServer = false
|
||||||
config.EnablePeerExchangeClient = false
|
config.EnablePeerExchangeClient = false
|
||||||
enr, err := pxServerNode.ENR()
|
config.DiscV5BootstrapNodes = []string{pxServerNode.node.ENR().String()}
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
config.DiscV5BootstrapNodes = []string{enr.String()}
|
|
||||||
discV5Node, err := New(nil, "", config, logger.Named("discV5Node"), nil, nil, nil, nil)
|
discV5Node, err := New(nil, "", config, logger.Named("discV5Node"), nil, nil, nil, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, discV5Node.Start())
|
require.NoError(t, discV5Node.Start())
|
||||||
|
@ -363,7 +363,7 @@ func TestPeerExchange(t *testing.T) {
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
|
|
||||||
// start light node which use PeerExchange to discover peers
|
// start light node which use PeerExchange to discover peers
|
||||||
enrNodes := []*enode.Node{enr}
|
enrNodes := []*enode.Node{pxServerNode.node.ENR()}
|
||||||
tree, url := makeTestTree("n", enrNodes, nil)
|
tree, url := makeTestTree("n", enrNodes, nil)
|
||||||
resolver := mapResolver(tree.ToTXT("n"))
|
resolver := mapResolver(tree.ToTXT("n"))
|
||||||
|
|
||||||
|
@ -388,23 +388,17 @@ func TestPeerExchange(t *testing.T) {
|
||||||
// in light client mode,the peer will be closed via `w.node.Host().Network().ClosePeer(peerInfo.ID)`
|
// in light client mode,the peer will be closed via `w.node.Host().Network().ClosePeer(peerInfo.ID)`
|
||||||
// after invoking identifyAndConnect, instead, we should check the peerStore, peers from peerStore
|
// after invoking identifyAndConnect, instead, we should check the peerStore, peers from peerStore
|
||||||
// won't get deleted especially if they are statically added.
|
// won't get deleted especially if they are statically added.
|
||||||
numConnected, err := lightNode.GetNumConnectedPeers()
|
if len(lightNode.node.Host().Peerstore().Peers()) == 2 {
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if numConnected == 2 {
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return errors.New("no peers discovered")
|
return errors.New("no peers discovered")
|
||||||
}, options)
|
}, options)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
_, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
_, err = discV5Node.WakuPeerExchangeRequest(1)
|
require.NoError(t, discV5Node.node.PeerExchange().Request(ctx, 1))
|
||||||
require.NoError(t, err)
|
require.Error(t, discV5Node.node.PeerExchange().Request(ctx, 1)) //should fail due to rate limit
|
||||||
_, err = discV5Node.WakuPeerExchangeRequest(1)
|
|
||||||
require.Error(t, err) //should fail due to rate limit
|
|
||||||
|
|
||||||
require.NoError(t, lightNode.Stop())
|
require.NoError(t, lightNode.Stop())
|
||||||
require.NoError(t, pxServerNode.Stop())
|
require.NoError(t, pxServerNode.Stop())
|
||||||
|
@ -439,7 +433,7 @@ func TestWakuV2Filter(t *testing.T) {
|
||||||
|
|
||||||
// Sanity check, not great, but it's probably helpful
|
// Sanity check, not great, but it's probably helpful
|
||||||
err = tt.RetryWithBackOff(func() error {
|
err = tt.RetryWithBackOff(func() error {
|
||||||
peers, err := w.GetPeerIdsByProtocol(string(filter.FilterSubscribeID_v20beta1))
|
peers, err := w.node.PeerManager().FilterPeersByProto(nil, nil, filter.FilterSubscribeID_v20beta1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -475,20 +469,20 @@ func TestWakuV2Filter(t *testing.T) {
|
||||||
time.Sleep(5 * time.Second)
|
time.Sleep(5 * time.Second)
|
||||||
|
|
||||||
// Ensure there is at least 1 active filter subscription
|
// Ensure there is at least 1 active filter subscription
|
||||||
subscriptions := w.FilterLightnode().Subscriptions()
|
subscriptions := w.node.FilterLightnode().Subscriptions()
|
||||||
require.Greater(t, len(subscriptions), 0)
|
require.Greater(t, len(subscriptions), 0)
|
||||||
|
|
||||||
messages := filter.Retrieve()
|
messages := filter.Retrieve()
|
||||||
require.Len(t, messages, 1)
|
require.Len(t, messages, 1)
|
||||||
|
|
||||||
// Mock peers going down
|
// Mock peers going down
|
||||||
_, err = w.FilterLightnode().UnsubscribeWithSubscription(w.ctx, subscriptions[0])
|
_, err = w.node.FilterLightnode().UnsubscribeWithSubscription(w.ctx, subscriptions[0])
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
time.Sleep(10 * time.Second)
|
time.Sleep(10 * time.Second)
|
||||||
|
|
||||||
// Ensure there is at least 1 active filter subscription
|
// Ensure there is at least 1 active filter subscription
|
||||||
subscriptions = w.FilterLightnode().Subscriptions()
|
subscriptions = w.node.FilterLightnode().Subscriptions()
|
||||||
require.Greater(t, len(subscriptions), 0)
|
require.Greater(t, len(subscriptions), 0)
|
||||||
|
|
||||||
// Ensure that messages are retrieved with a fresh sub
|
// Ensure that messages are retrieved with a fresh sub
|
||||||
|
@ -562,10 +556,11 @@ func TestWakuV2Store(t *testing.T) {
|
||||||
// Connect the two nodes directly
|
// Connect the two nodes directly
|
||||||
peer2Addr, err := w2.ListenAddresses()
|
peer2Addr, err := w2.ListenAddresses()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
err = w1.node.DialPeer(context.Background(), peer2Addr[0].String())
|
||||||
err = w1.DialPeer(peer2Addr[0])
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
waitForPeerConnection(t, w2.node.Host().ID(), w1PeersCh)
|
||||||
|
|
||||||
// Create a filter for the second node to catch messages
|
// Create a filter for the second node to catch messages
|
||||||
filter := &common.Filter{
|
filter := &common.Filter{
|
||||||
Messages: common.NewMemoryMessageStore(),
|
Messages: common.NewMemoryMessageStore(),
|
||||||
|
@ -600,7 +595,7 @@ func TestWakuV2Store(t *testing.T) {
|
||||||
// Query the second node's store for the message
|
// Query the second node's store for the message
|
||||||
_, envelopeCount, err := w1.Query(
|
_, envelopeCount, err := w1.Query(
|
||||||
context.Background(),
|
context.Background(),
|
||||||
w2.Host().ID(),
|
w2.node.Host().ID(),
|
||||||
store.FilterCriteria{
|
store.FilterCriteria{
|
||||||
TimeStart: proto.Int64((timestampInSeconds - int64(marginInSeconds)) * int64(time.Second)),
|
TimeStart: proto.Int64((timestampInSeconds - int64(marginInSeconds)) * int64(time.Second)),
|
||||||
TimeEnd: proto.Int64((timestampInSeconds + int64(marginInSeconds)) * int64(time.Second)),
|
TimeEnd: proto.Int64((timestampInSeconds + int64(marginInSeconds)) * int64(time.Second)),
|
||||||
|
@ -738,7 +733,9 @@ func TestLightpushRateLimit(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
//Connect the relay peer and full node
|
//Connect the relay peer and full node
|
||||||
err = w1.DialPeer(ctx, w0.ListenAddresses()[0].String())
|
peerAddr, err := w0.ListenAddresses()
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = w1.node.DialPeer(ctx, peerAddr[0].String())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = tt.RetryWithBackOff(func() error {
|
err = tt.RetryWithBackOff(func() error {
|
||||||
|
@ -765,9 +762,11 @@ func TestLightpushRateLimit(t *testing.T) {
|
||||||
}()
|
}()
|
||||||
|
|
||||||
//Use this instead of DialPeer to make sure the peer is added to PeerStore and can be selected for Lighpush
|
//Use this instead of DialPeer to make sure the peer is added to PeerStore and can be selected for Lighpush
|
||||||
w2.AddDiscoveredPeer(w1.PeerID(), w1.ListenAddresses(), wps.Static, w1.cfg.DefaultShardedPubsubTopics, w1.node.ENR(), true)
|
addresses, err := w1.ListenAddresses()
|
||||||
|
require.NoError(t, err)
|
||||||
|
w2.node.AddDiscoveredPeer(w1.PeerID(), addresses, wps.Static, w1.cfg.DefaultShardedPubsubTopics, w1.node.ENR(), true)
|
||||||
|
|
||||||
waitForPeerConnectionWithTimeout(t, w2.Host().ID(), w1PeersCh, 5*time.Second)
|
waitForPeerConnectionWithTimeout(t, w2.node.Host().ID(), w1PeersCh, 5*time.Second)
|
||||||
|
|
||||||
event := make(chan common.EnvelopeEvent, 10)
|
event := make(chan common.EnvelopeEvent, 10)
|
||||||
w2.SubscribeEnvelopeEvents(event)
|
w2.SubscribeEnvelopeEvents(event)
|
||||||
|
|
Loading…
Reference in New Issue