refactor_: start using nwaku
- some minor progress to add nwaku in status-go - nwaku.go: GetNumConnectedPeers controls when passed pubsub is empty - waku_test.go: adapt TestWakuV2Store - add missing shard.go - feat_: build nwaku with nix and use build tags to choose between go-waku and nwaku (#5896) - chore_: update nwaku - nwaku bump (#5911) - bump: nwaku - chore: add USE_NWAKU env flag - fix: build libwaku only if needed - feat: testing discovery and dialing with nwaku integration (#5940)
This commit is contained in:
parent
d657edffc4
commit
3107c1eb0b
|
@ -64,7 +64,6 @@ coverage.html
|
|||
Session.vim
|
||||
.undodir/*
|
||||
/.idea/
|
||||
/.vscode/
|
||||
/cmd/*/.ethereum/
|
||||
*.iml
|
||||
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
[submodule "third_party/nwaku"]
|
||||
path = third_party/nwaku
|
||||
url = https://github.com/waku-org/nwaku
|
|
@ -8,4 +8,7 @@
|
|||
"cSpell.words": [
|
||||
"unmarshalling"
|
||||
],
|
||||
"gopls":{
|
||||
"buildFlags": ["-tags=use_nwaku,gowaku_skip_migrations,gowaku_no_rln"]
|
||||
}
|
||||
}
|
||||
|
|
49
Makefile
49
Makefile
|
@ -1,5 +1,6 @@
|
|||
.PHONY: statusgo statusd-prune all test clean help
|
||||
.PHONY: statusgo-android statusgo-ios
|
||||
.PHONY: build-libwaku test-libwaku clean-libwaku rebuild-libwaku
|
||||
|
||||
# Clear any GOROOT set outside of the Nix shell
|
||||
export GOROOT=
|
||||
|
@ -61,6 +62,10 @@ GIT_AUTHOR ?= $(shell git config user.email || echo $$USER)
|
|||
ENABLE_METRICS ?= true
|
||||
BUILD_TAGS ?= gowaku_no_rln
|
||||
|
||||
ifeq ($(USE_NWAKU), true)
|
||||
BUILD_TAGS += use_nwaku
|
||||
endif
|
||||
|
||||
BUILD_FLAGS ?= -ldflags="-X github.com/status-im/status-go/params.Version=$(RELEASE_TAG:v%=%) \
|
||||
-X github.com/status-im/status-go/params.GitCommit=$(GIT_COMMIT) \
|
||||
-X github.com/status-im/status-go/params.IpfsGatewayURL=$(IPFS_GATEWAY_URL) \
|
||||
|
@ -234,8 +239,19 @@ statusgo-library: ##@cross-compile Build status-go as static library for current
|
|||
@echo "Static library built:"
|
||||
@ls -la build/bin/libstatus.*
|
||||
|
||||
statusgo-shared-library: generate
|
||||
statusgo-shared-library: ##@cross-compile Build status-go as shared library for current platform
|
||||
|
||||
LIBWAKU := third_party/nwaku/build/libwaku.$(GOBIN_SHARED_LIB_EXT)
|
||||
$(LIBWAKU):
|
||||
@echo "Building libwaku"
|
||||
$(MAKE) -C third_party/nwaku update || { echo "nwaku make update failed"; exit 1; }
|
||||
$(MAKE) -C ./third_party/nwaku libwaku
|
||||
|
||||
build-libwaku: $(LIBWAKU)
|
||||
|
||||
statusgo-shared-library: generate ##@cross-compile Build status-go as shared library for current platform
|
||||
ifeq ($(USE_NWAKU),true)
|
||||
$(MAKE) $(LIBWAKU)
|
||||
endif
|
||||
## cmd/library/README.md explains the magic incantation behind this
|
||||
mkdir -p build/bin/statusgo-lib
|
||||
go run cmd/library/*.go > build/bin/statusgo-lib/main.go
|
||||
|
@ -356,9 +372,38 @@ lint-fix:
|
|||
-w {} \;
|
||||
$(MAKE) vendor
|
||||
|
||||
mock: ##@other Regenerate mocks
|
||||
mockgen -package=fake -destination=transactions/fake/mock.go -source=transactions/fake/txservice.go
|
||||
mockgen -package=status -destination=services/status/account_mock.go -source=services/status/service.go
|
||||
mockgen -package=peer -destination=services/peer/discoverer_mock.go -source=services/peer/service.go
|
||||
mockgen -package=mock_transactor -destination=transactions/mock_transactor/transactor.go -source=transactions/transactor.go
|
||||
mockgen -package=mock_pathprocessor -destination=services/wallet/router/pathprocessor/mock_pathprocessor/processor.go -source=services/wallet/router/pathprocessor/processor.go
|
||||
mockgen -package=mock_bridge -destination=services/wallet/bridge/mock_bridge/bridge.go -source=services/wallet/bridge/bridge.go
|
||||
mockgen -package=mock_client -destination=rpc/chain/mock/client/client.go -source=rpc/chain/client.go
|
||||
mockgen -package=mock_token -destination=services/wallet/token/mock/token/tokenmanager.go -source=services/wallet/token/token.go
|
||||
mockgen -package=mock_thirdparty -destination=services/wallet/thirdparty/mock/types.go -source=services/wallet/thirdparty/types.go
|
||||
mockgen -package=mock_balance_persistence -destination=services/wallet/token/mock/balance_persistence/balance_persistence.go -source=services/wallet/token/balance_persistence.go
|
||||
mockgen -package=mock_network -destination=rpc/network/mock/network.go -source=rpc/network/network.go
|
||||
mockgen -package=mock_rpcclient -destination=rpc/mock/client/client.go -source=rpc/client.go
|
||||
mockgen -package=mock_collectibles -destination=services/wallet/collectibles/mock/collection_data_db.go -source=services/wallet/collectibles/collection_data_db.go
|
||||
mockgen -package=mock_collectibles -destination=services/wallet/collectibles/mock/collectible_data_db.go -source=services/wallet/collectibles/collectible_data_db.go
|
||||
mockgen -package=mock_thirdparty -destination=services/wallet/thirdparty/mock/collectible_types.go -source=services/wallet/thirdparty/collectible_types.go
|
||||
mockgen -package=mock_paraswap -destination=services/wallet/thirdparty/paraswap/mock/types.go -source=services/wallet/thirdparty/paraswap/types.go
|
||||
mockgen -package=mock_onramp -destination=services/wallet/onramp/mock/types.go -source=services/wallet/onramp/types.go
|
||||
|
||||
|
||||
docker-test: ##@tests Run tests in a docker container with golang.
|
||||
docker run --privileged --rm -it -v "$(PWD):$(DOCKER_TEST_WORKDIR)" -w "$(DOCKER_TEST_WORKDIR)" $(DOCKER_TEST_IMAGE) go test ${ARGS}
|
||||
|
||||
test-libwaku: | $(LIBWAKU)
|
||||
go test -tags '$(BUILD_TAGS) use_nwaku' -run TestBasicWakuV2 ./wakuv2/... -count 1 -v -json | jq -r '.Output'
|
||||
|
||||
clean-libwaku:
|
||||
@echo "Removing libwaku"
|
||||
rm $(LIBWAKU)
|
||||
|
||||
rebuild-libwaku: | clean-libwaku $(LIBWAKU)
|
||||
|
||||
test: test-unit ##@tests Run basic, short tests during development
|
||||
|
||||
test-unit: generate
|
||||
|
|
|
@ -24,12 +24,12 @@ import (
|
|||
"github.com/status-im/status-go/multiaccounts"
|
||||
"github.com/status-im/status-go/multiaccounts/accounts"
|
||||
"github.com/status-im/status-go/multiaccounts/settings"
|
||||
"github.com/status-im/status-go/wakuv2"
|
||||
|
||||
"github.com/status-im/status-go/logutils"
|
||||
"github.com/status-im/status-go/params"
|
||||
"github.com/status-im/status-go/protocol"
|
||||
"github.com/status-im/status-go/protocol/common"
|
||||
"github.com/status-im/status-go/protocol/common/shard"
|
||||
"github.com/status-im/status-go/protocol/identity/alias"
|
||||
"github.com/status-im/status-go/protocol/protobuf"
|
||||
wakuextn "github.com/status-im/status-go/services/wakuext"
|
||||
|
@ -48,8 +48,8 @@ var (
|
|||
seedPhrase = flag.String("seed-phrase", "", "Seed phrase")
|
||||
version = flag.Bool("version", false, "Print version and dump configuration")
|
||||
communityID = flag.String("community-id", "", "The id of the community")
|
||||
shardCluster = flag.Int("shard-cluster", shard.MainStatusShardCluster, "The shard cluster in which the of the community is published")
|
||||
shardIndex = flag.Int("shard-index", shard.DefaultShardIndex, "The shard index in which the community is published")
|
||||
shardCluster = flag.Int("shard-cluster", wakuv2.MainStatusShardCluster, "The shard cluster in which the of the community is published")
|
||||
shardIndex = flag.Int("shard-index", wakuv2.DefaultShardIndex, "The shard index in which the community is published")
|
||||
chatID = flag.String("chat-id", "", "The id of the chat")
|
||||
|
||||
dataDir = flag.String("dir", getDefaultDataDir(), "Directory used by node to store data")
|
||||
|
@ -148,9 +148,9 @@ func main() {
|
|||
|
||||
messenger := wakuextservice.Messenger()
|
||||
|
||||
var s *shard.Shard = nil
|
||||
var s *wakuv2.Shard = nil
|
||||
if shardCluster != nil && shardIndex != nil {
|
||||
s = &shard.Shard{
|
||||
s = &wakuv2.Shard{
|
||||
Cluster: uint16(*shardCluster),
|
||||
Index: uint16(*shardIndex),
|
||||
}
|
||||
|
|
|
@ -227,7 +227,7 @@ func (w *gethWakuV2Wrapper) DialPeerByID(peerID peer.ID) error {
|
|||
}
|
||||
|
||||
func (w *gethWakuV2Wrapper) ListenAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
return w.waku.ListenAddresses(), nil
|
||||
return w.waku.ListenAddresses()
|
||||
}
|
||||
|
||||
func (w *gethWakuV2Wrapper) RelayPeersByTopic(topic string) (*types.PeerList, error) {
|
||||
|
|
|
@ -21,7 +21,7 @@ in pkgs.mkShell {
|
|||
|
||||
buildInputs = with pkgs; [
|
||||
git jq which
|
||||
go golangci-lint go-junit-report gopls go-bindata gomobileMod codecov-cli go-generate-fast
|
||||
go golangci-lint go-junit-report gopls go-bindata gomobileMod codecov-cli go-generate-fast openssl
|
||||
mockgen protobuf3_20 protoc-gen-go gotestsum go-modvendor openjdk cc-test-reporter
|
||||
] ++ lib.optionals (stdenv.isDarwin) [ xcodeWrapper ];
|
||||
|
||||
|
|
|
@ -10,7 +10,6 @@ import (
|
|||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/status-im/status-go/protocol/common/shard"
|
||||
"github.com/status-im/status-go/server"
|
||||
"github.com/status-im/status-go/signal"
|
||||
"github.com/status-im/status-go/transactions"
|
||||
|
@ -336,7 +335,7 @@ func (b *StatusNode) wakuV2Service(nodeConfig *params.NodeConfig) (*wakuv2.Waku,
|
|||
Nameserver: nodeConfig.WakuV2Config.Nameserver,
|
||||
UDPPort: nodeConfig.WakuV2Config.UDPPort,
|
||||
AutoUpdate: nodeConfig.WakuV2Config.AutoUpdate,
|
||||
DefaultShardPubsubTopic: shard.DefaultShardPubsubTopic(),
|
||||
DefaultShardPubsubTopic: wakuv2.DefaultShardPubsubTopic(),
|
||||
TelemetryServerURL: nodeConfig.WakuV2Config.TelemetryServerURL,
|
||||
ClusterID: nodeConfig.ClusterConfig.ClusterID,
|
||||
EnableMissingMessageVerification: nodeConfig.WakuV2Config.EnableMissingMessageVerification,
|
||||
|
|
|
@ -23,12 +23,12 @@ import (
|
|||
"github.com/status-im/status-go/eth-node/types"
|
||||
"github.com/status-im/status-go/images"
|
||||
"github.com/status-im/status-go/protocol/common"
|
||||
"github.com/status-im/status-go/protocol/common/shard"
|
||||
community_token "github.com/status-im/status-go/protocol/communities/token"
|
||||
"github.com/status-im/status-go/protocol/protobuf"
|
||||
"github.com/status-im/status-go/protocol/requests"
|
||||
"github.com/status-im/status-go/protocol/v1"
|
||||
"github.com/status-im/status-go/server"
|
||||
"github.com/status-im/status-go/wakuv2"
|
||||
)
|
||||
|
||||
const signatureLength = 65
|
||||
|
@ -55,7 +55,7 @@ type Config struct {
|
|||
RequestsToJoin []*RequestToJoin
|
||||
MemberIdentity *ecdsa.PrivateKey
|
||||
EventsData *EventsData
|
||||
Shard *shard.Shard
|
||||
Shard *wakuv2.Shard
|
||||
PubsubTopicPrivateKey *ecdsa.PrivateKey
|
||||
LastOpenedAt int64
|
||||
}
|
||||
|
@ -172,7 +172,7 @@ func (o *Community) MarshalPublicAPIJSON() ([]byte, error) {
|
|||
ActiveMembersCount uint64 `json:"activeMembersCount"`
|
||||
PubsubTopic string `json:"pubsubTopic"`
|
||||
PubsubTopicKey string `json:"pubsubTopicKey"`
|
||||
Shard *shard.Shard `json:"shard"`
|
||||
Shard *wakuv2.Shard `json:"shard"`
|
||||
}{
|
||||
ID: o.ID(),
|
||||
Verified: o.config.Verified,
|
||||
|
@ -308,7 +308,7 @@ func (o *Community) MarshalJSON() ([]byte, error) {
|
|||
ActiveMembersCount uint64 `json:"activeMembersCount"`
|
||||
PubsubTopic string `json:"pubsubTopic"`
|
||||
PubsubTopicKey string `json:"pubsubTopicKey"`
|
||||
Shard *shard.Shard `json:"shard"`
|
||||
Shard *wakuv2.Shard `json:"shard"`
|
||||
LastOpenedAt int64 `json:"lastOpenedAt"`
|
||||
Clock uint64 `json:"clock"`
|
||||
}{
|
||||
|
@ -461,7 +461,7 @@ func (o *Community) DescriptionText() string {
|
|||
return ""
|
||||
}
|
||||
|
||||
func (o *Community) Shard() *shard.Shard {
|
||||
func (o *Community) Shard() *wakuv2.Shard {
|
||||
if o != nil && o.config != nil {
|
||||
return o.config.Shard
|
||||
}
|
||||
|
|
|
@ -30,7 +30,6 @@ import (
|
|||
multiaccountscommon "github.com/status-im/status-go/multiaccounts/common"
|
||||
"github.com/status-im/status-go/params"
|
||||
"github.com/status-im/status-go/protocol/common"
|
||||
"github.com/status-im/status-go/protocol/common/shard"
|
||||
community_token "github.com/status-im/status-go/protocol/communities/token"
|
||||
"github.com/status-im/status-go/protocol/encryption"
|
||||
"github.com/status-im/status-go/protocol/ens"
|
||||
|
@ -45,6 +44,7 @@ import (
|
|||
"github.com/status-im/status-go/services/wallet/token"
|
||||
"github.com/status-im/status-go/signal"
|
||||
"github.com/status-im/status-go/transactions"
|
||||
"github.com/status-im/status-go/wakuv2"
|
||||
)
|
||||
|
||||
type Publisher interface {
|
||||
|
@ -740,8 +740,8 @@ func (m *Manager) All() ([]*Community, error) {
|
|||
}
|
||||
|
||||
type CommunityShard struct {
|
||||
CommunityID string `json:"communityID"`
|
||||
Shard *shard.Shard `json:"shard"`
|
||||
CommunityID string `json:"communityID"`
|
||||
Shard *wakuv2.Shard `json:"shard"`
|
||||
}
|
||||
|
||||
type CuratedCommunities struct {
|
||||
|
@ -1549,7 +1549,7 @@ func (m *Manager) DeleteCommunity(id types.HexBytes) error {
|
|||
return m.persistence.DeleteCommunitySettings(id)
|
||||
}
|
||||
|
||||
func (m *Manager) updateShard(community *Community, shard *shard.Shard, clock uint64) error {
|
||||
func (m *Manager) updateShard(community *Community, shard *wakuv2.Shard, clock uint64) error {
|
||||
community.config.Shard = shard
|
||||
if shard == nil {
|
||||
return m.persistence.DeleteCommunityShard(community.ID())
|
||||
|
@ -1558,7 +1558,7 @@ func (m *Manager) updateShard(community *Community, shard *shard.Shard, clock ui
|
|||
return m.persistence.SaveCommunityShard(community.ID(), shard, clock)
|
||||
}
|
||||
|
||||
func (m *Manager) UpdateShard(community *Community, shard *shard.Shard, clock uint64) error {
|
||||
func (m *Manager) UpdateShard(community *Community, shard *wakuv2.Shard, clock uint64) error {
|
||||
m.communityLock.Lock(community.ID())
|
||||
defer m.communityLock.Unlock(community.ID())
|
||||
|
||||
|
@ -1566,7 +1566,7 @@ func (m *Manager) UpdateShard(community *Community, shard *shard.Shard, clock ui
|
|||
}
|
||||
|
||||
// SetShard assigns a shard to a community
|
||||
func (m *Manager) SetShard(communityID types.HexBytes, shard *shard.Shard) (*Community, error) {
|
||||
func (m *Manager) SetShard(communityID types.HexBytes, shard *wakuv2.Shard) (*Community, error) {
|
||||
m.communityLock.Lock(communityID)
|
||||
defer m.communityLock.Unlock(communityID)
|
||||
|
||||
|
@ -2158,11 +2158,11 @@ func (m *Manager) HandleCommunityDescriptionMessage(signer *ecdsa.PublicKey, des
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var cShard *shard.Shard
|
||||
var cShard *wakuv2.Shard
|
||||
if communityShard == nil {
|
||||
cShard = &shard.Shard{Cluster: shard.MainStatusShardCluster, Index: shard.DefaultShardIndex}
|
||||
cShard = &wakuv2.Shard{Cluster: wakuv2.MainStatusShardCluster, Index: wakuv2.DefaultShardIndex}
|
||||
} else {
|
||||
cShard = shard.FromProtobuff(communityShard)
|
||||
cShard = wakuv2.FromProtobuff(communityShard)
|
||||
}
|
||||
config := Config{
|
||||
CommunityDescription: processedDescription,
|
||||
|
@ -3940,11 +3940,11 @@ func (m *Manager) GetByIDString(idString string) (*Community, error) {
|
|||
return m.GetByID(id)
|
||||
}
|
||||
|
||||
func (m *Manager) GetCommunityShard(communityID types.HexBytes) (*shard.Shard, error) {
|
||||
func (m *Manager) GetCommunityShard(communityID types.HexBytes) (*wakuv2.Shard, error) {
|
||||
return m.persistence.GetCommunityShard(communityID)
|
||||
}
|
||||
|
||||
func (m *Manager) SaveCommunityShard(communityID types.HexBytes, shard *shard.Shard, clock uint64) error {
|
||||
func (m *Manager) SaveCommunityShard(communityID types.HexBytes, shard *wakuv2.Shard, clock uint64) error {
|
||||
m.communityLock.Lock(communityID)
|
||||
defer m.communityLock.Unlock(communityID)
|
||||
|
||||
|
|
|
@ -16,11 +16,11 @@ import (
|
|||
"github.com/status-im/status-go/eth-node/crypto"
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
"github.com/status-im/status-go/protocol/common"
|
||||
"github.com/status-im/status-go/protocol/common/shard"
|
||||
"github.com/status-im/status-go/protocol/communities/token"
|
||||
"github.com/status-im/status-go/protocol/encryption"
|
||||
"github.com/status-im/status-go/protocol/protobuf"
|
||||
"github.com/status-im/status-go/services/wallet/bigint"
|
||||
"github.com/status-im/status-go/wakuv2"
|
||||
)
|
||||
|
||||
type Persistence struct {
|
||||
|
@ -1766,7 +1766,7 @@ func (p *Persistence) AllNonApprovedCommunitiesRequestsToJoin() ([]*RequestToJoi
|
|||
return nonApprovedRequestsToJoin, nil
|
||||
}
|
||||
|
||||
func (p *Persistence) SaveCommunityShard(communityID types.HexBytes, shard *shard.Shard, clock uint64) error {
|
||||
func (p *Persistence) SaveCommunityShard(communityID types.HexBytes, shard *wakuv2.Shard, clock uint64) error {
|
||||
var cluster, index *uint16
|
||||
|
||||
if shard != nil {
|
||||
|
@ -1801,7 +1801,7 @@ func (p *Persistence) SaveCommunityShard(communityID types.HexBytes, shard *shar
|
|||
}
|
||||
|
||||
// if data will not be found, will return sql.ErrNoRows. Must be handled on the caller side
|
||||
func (p *Persistence) GetCommunityShard(communityID types.HexBytes) (*shard.Shard, error) {
|
||||
func (p *Persistence) GetCommunityShard(communityID types.HexBytes) (*wakuv2.Shard, error) {
|
||||
var cluster sql.NullInt64
|
||||
var index sql.NullInt64
|
||||
err := p.db.QueryRow(`SELECT shard_cluster, shard_index FROM communities_shards WHERE community_id = ?`,
|
||||
|
@ -1815,7 +1815,7 @@ func (p *Persistence) GetCommunityShard(communityID types.HexBytes) (*shard.Shar
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
return &shard.Shard{
|
||||
return &wakuv2.Shard{
|
||||
Cluster: uint16(cluster.Int64),
|
||||
Index: uint16(index.Int64),
|
||||
}, nil
|
||||
|
|
|
@ -7,8 +7,8 @@ import (
|
|||
|
||||
"github.com/status-im/status-go/eth-node/crypto"
|
||||
"github.com/status-im/status-go/protocol/common"
|
||||
"github.com/status-im/status-go/protocol/common/shard"
|
||||
"github.com/status-im/status-go/server"
|
||||
"github.com/status-im/status-go/wakuv2"
|
||||
)
|
||||
|
||||
func communityToRecord(community *Community) (*CommunityRecord, error) {
|
||||
|
@ -118,9 +118,9 @@ func recordBundleToCommunity(
|
|||
}
|
||||
}
|
||||
|
||||
var s *shard.Shard = nil
|
||||
var s *wakuv2.Shard = nil
|
||||
if r.community.shardCluster != nil && r.community.shardIndex != nil {
|
||||
s = &shard.Shard{
|
||||
s = &wakuv2.Shard{
|
||||
Cluster: uint16(*r.community.shardCluster),
|
||||
Index: uint16(*r.community.shardIndex),
|
||||
}
|
||||
|
|
|
@ -15,13 +15,13 @@ import (
|
|||
"github.com/status-im/status-go/eth-node/crypto"
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
"github.com/status-im/status-go/protocol/common"
|
||||
"github.com/status-im/status-go/protocol/common/shard"
|
||||
"github.com/status-im/status-go/protocol/communities/token"
|
||||
"github.com/status-im/status-go/protocol/encryption"
|
||||
"github.com/status-im/status-go/protocol/protobuf"
|
||||
"github.com/status-im/status-go/protocol/sqlite"
|
||||
"github.com/status-im/status-go/services/wallet/bigint"
|
||||
"github.com/status-im/status-go/t/helpers"
|
||||
"github.com/status-im/status-go/wakuv2"
|
||||
)
|
||||
|
||||
func TestPersistenceSuite(t *testing.T) {
|
||||
|
@ -787,7 +787,7 @@ func (s *PersistenceSuite) TestSaveShardInfo() {
|
|||
s.Require().Nil(resultShard)
|
||||
|
||||
// not nil shard
|
||||
expectedShard := &shard.Shard{
|
||||
expectedShard := &wakuv2.Shard{
|
||||
Cluster: 1,
|
||||
Index: 2,
|
||||
}
|
||||
|
|
|
@ -25,13 +25,13 @@ import (
|
|||
"github.com/status-im/status-go/eth-node/types"
|
||||
"github.com/status-im/status-go/params"
|
||||
"github.com/status-im/status-go/protocol/common"
|
||||
"github.com/status-im/status-go/protocol/common/shard"
|
||||
"github.com/status-im/status-go/protocol/communities"
|
||||
"github.com/status-im/status-go/protocol/protobuf"
|
||||
"github.com/status-im/status-go/protocol/requests"
|
||||
"github.com/status-im/status-go/protocol/transport"
|
||||
"github.com/status-im/status-go/protocol/tt"
|
||||
"github.com/status-im/status-go/services/wallet/thirdparty"
|
||||
"github.com/status-im/status-go/wakuv2"
|
||||
)
|
||||
|
||||
const testChainID1 = 1
|
||||
|
@ -488,11 +488,12 @@ func (s *MessengerCommunitiesTokenPermissionsSuite) TestBecomeMemberPermissions(
|
|||
cfg := testWakuV2Config{
|
||||
logger: s.logger.Named("store-node-waku"),
|
||||
enableStore: false,
|
||||
clusterID: shard.MainStatusShardCluster,
|
||||
clusterID: wakuv2.MainStatusShardCluster,
|
||||
}
|
||||
wakuStoreNode := NewTestWakuV2(&s.Suite, cfg)
|
||||
|
||||
storeNodeListenAddresses := wakuStoreNode.ListenAddresses()
|
||||
storeNodeListenAddresses, err := wakuStoreNode.ListenAddresses()
|
||||
s.Require().NoError(err)
|
||||
s.Require().LessOrEqual(1, len(storeNodeListenAddresses))
|
||||
|
||||
storeNodeAddress := storeNodeListenAddresses[0]
|
||||
|
|
|
@ -8,8 +8,8 @@ import (
|
|||
"github.com/status-im/status-go/api/multiformat"
|
||||
"github.com/status-im/status-go/images"
|
||||
"github.com/status-im/status-go/protocol/common"
|
||||
"github.com/status-im/status-go/protocol/common/shard"
|
||||
"github.com/status-im/status-go/protocol/communities"
|
||||
"github.com/status-im/status-go/wakuv2"
|
||||
)
|
||||
|
||||
type StatusUnfurler struct {
|
||||
|
@ -83,7 +83,7 @@ func (u *StatusUnfurler) buildContactData(publicKey string) (*common.StatusConta
|
|||
return c, nil
|
||||
}
|
||||
|
||||
func (u *StatusUnfurler) buildCommunityData(communityID string, shard *shard.Shard) (*communities.Community, *common.StatusCommunityLinkPreview, error) {
|
||||
func (u *StatusUnfurler) buildCommunityData(communityID string, shard *wakuv2.Shard) (*communities.Community, *common.StatusCommunityLinkPreview, error) {
|
||||
// This automatically checks the database
|
||||
community, err := u.m.FetchCommunity(&FetchCommunityRequest{
|
||||
CommunityKey: communityID,
|
||||
|
@ -108,7 +108,7 @@ func (u *StatusUnfurler) buildCommunityData(communityID string, shard *shard.Sha
|
|||
return community, statusCommunityLinkPreviews, nil
|
||||
}
|
||||
|
||||
func (u *StatusUnfurler) buildChannelData(channelUUID string, communityID string, communityShard *shard.Shard) (*common.StatusCommunityChannelLinkPreview, error) {
|
||||
func (u *StatusUnfurler) buildChannelData(channelUUID string, communityID string, communityShard *wakuv2.Shard) (*common.StatusCommunityChannelLinkPreview, error) {
|
||||
community, communityData, err := u.buildCommunityData(communityID, communityShard)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to build channel community data: %w", err)
|
||||
|
|
|
@ -38,13 +38,13 @@ import (
|
|||
"github.com/status-im/status-go/eth-node/types"
|
||||
"github.com/status-im/status-go/images"
|
||||
multiaccountscommon "github.com/status-im/status-go/multiaccounts/common"
|
||||
"github.com/status-im/status-go/wakuv2"
|
||||
|
||||
"github.com/status-im/status-go/multiaccounts"
|
||||
"github.com/status-im/status-go/multiaccounts/accounts"
|
||||
"github.com/status-im/status-go/multiaccounts/settings"
|
||||
"github.com/status-im/status-go/protocol/anonmetrics"
|
||||
"github.com/status-im/status-go/protocol/common"
|
||||
"github.com/status-im/status-go/protocol/common/shard"
|
||||
"github.com/status-im/status-go/protocol/communities"
|
||||
"github.com/status-im/status-go/protocol/encryption"
|
||||
"github.com/status-im/status-go/protocol/encryption/multidevice"
|
||||
|
@ -1738,7 +1738,7 @@ func (m *Messenger) InitFilters() error {
|
|||
logger := m.logger.With(zap.String("site", "Init"))
|
||||
|
||||
// Community requests will arrive in this pubsub topic
|
||||
err := m.SubscribeToPubsubTopic(shard.DefaultNonProtectedPubsubTopic(), nil)
|
||||
err := m.SubscribeToPubsubTopic(wakuv2.DefaultNonProtectedPubsubTopic(), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
|
||||
gocommon "github.com/status-im/status-go/common"
|
||||
utils "github.com/status-im/status-go/common"
|
||||
"github.com/status-im/status-go/wakuv2"
|
||||
|
||||
"github.com/status-im/status-go/account"
|
||||
multiaccountscommon "github.com/status-im/status-go/multiaccounts/common"
|
||||
|
@ -34,7 +35,6 @@ import (
|
|||
"github.com/status-im/status-go/images"
|
||||
"github.com/status-im/status-go/multiaccounts/accounts"
|
||||
"github.com/status-im/status-go/protocol/common"
|
||||
"github.com/status-im/status-go/protocol/common/shard"
|
||||
"github.com/status-im/status-go/protocol/communities"
|
||||
"github.com/status-im/status-go/protocol/communities/token"
|
||||
"github.com/status-im/status-go/protocol/discord"
|
||||
|
@ -89,10 +89,10 @@ const (
|
|||
|
||||
type FetchCommunityRequest struct {
|
||||
// CommunityKey should be either a public or a private community key
|
||||
CommunityKey string `json:"communityKey"`
|
||||
Shard *shard.Shard `json:"shard"`
|
||||
TryDatabase bool `json:"tryDatabase"`
|
||||
WaitForResponse bool `json:"waitForResponse"`
|
||||
CommunityKey string `json:"communityKey"`
|
||||
Shard *wakuv2.Shard `json:"shard"`
|
||||
TryDatabase bool `json:"tryDatabase"`
|
||||
WaitForResponse bool `json:"waitForResponse"`
|
||||
}
|
||||
|
||||
func (r *FetchCommunityRequest) Validate() error {
|
||||
|
@ -346,7 +346,7 @@ func (m *Messenger) handleCommunitiesSubscription(c chan *communities.Subscripti
|
|||
Sender: community.PrivateKey(),
|
||||
SkipEncryptionLayer: true,
|
||||
MessageType: protobuf.ApplicationMetadataMessage_COMMUNITY_USER_KICKED,
|
||||
PubsubTopic: shard.DefaultNonProtectedPubsubTopic(),
|
||||
PubsubTopic: wakuv2.DefaultNonProtectedPubsubTopic(),
|
||||
}
|
||||
|
||||
_, err = m.sender.SendPrivate(context.Background(), pk, rawMessage)
|
||||
|
@ -681,7 +681,7 @@ func (m *Messenger) handleCommunitySharedAddressesRequest(state *ReceivedMessage
|
|||
CommunityID: community.ID(),
|
||||
SkipEncryptionLayer: true,
|
||||
MessageType: protobuf.ApplicationMetadataMessage_COMMUNITY_SHARED_ADDRESSES_RESPONSE,
|
||||
PubsubTopic: shard.DefaultNonProtectedPubsubTopic(),
|
||||
PubsubTopic: wakuv2.DefaultNonProtectedPubsubTopic(),
|
||||
ResendType: common.ResendTypeRawMessage,
|
||||
ResendMethod: common.ResendMethodSendPrivate,
|
||||
Recipients: []*ecdsa.PublicKey{signer},
|
||||
|
@ -1044,7 +1044,7 @@ func (m *Messenger) JoinCommunity(ctx context.Context, communityID types.HexByte
|
|||
return mr, nil
|
||||
}
|
||||
|
||||
func (m *Messenger) subscribeToCommunityShard(communityID []byte, shard *shard.Shard) error {
|
||||
func (m *Messenger) subscribeToCommunityShard(communityID []byte, shard *wakuv2.Shard) error {
|
||||
if m.transport.WakuVersion() != 2 {
|
||||
return nil
|
||||
}
|
||||
|
@ -1065,7 +1065,7 @@ func (m *Messenger) subscribeToCommunityShard(communityID []byte, shard *shard.S
|
|||
return m.transport.SubscribeToPubsubTopic(pubsubTopic, pubK)
|
||||
}
|
||||
|
||||
func (m *Messenger) unsubscribeFromShard(shard *shard.Shard) error {
|
||||
func (m *Messenger) unsubscribeFromShard(shard *wakuv2.Shard) error {
|
||||
if m.transport.WakuVersion() != 2 {
|
||||
return nil
|
||||
}
|
||||
|
@ -1494,7 +1494,7 @@ func (m *Messenger) RequestToJoinCommunity(request *requests.RequestToJoinCommun
|
|||
ResendType: common.ResendTypeRawMessage,
|
||||
SkipEncryptionLayer: true,
|
||||
MessageType: protobuf.ApplicationMetadataMessage_COMMUNITY_REQUEST_TO_JOIN,
|
||||
PubsubTopic: shard.DefaultNonProtectedPubsubTopic(),
|
||||
PubsubTopic: wakuv2.DefaultNonProtectedPubsubTopic(),
|
||||
Priority: &common.HighPriority,
|
||||
}
|
||||
|
||||
|
@ -1872,7 +1872,7 @@ func (m *Messenger) CancelRequestToJoinCommunity(ctx context.Context, request *r
|
|||
CommunityID: community.ID(),
|
||||
SkipEncryptionLayer: true,
|
||||
MessageType: protobuf.ApplicationMetadataMessage_COMMUNITY_CANCEL_REQUEST_TO_JOIN,
|
||||
PubsubTopic: shard.DefaultNonProtectedPubsubTopic(),
|
||||
PubsubTopic: wakuv2.DefaultNonProtectedPubsubTopic(),
|
||||
ResendType: common.ResendTypeRawMessage,
|
||||
Priority: &common.HighPriority,
|
||||
}
|
||||
|
@ -2028,7 +2028,7 @@ func (m *Messenger) acceptRequestToJoinCommunity(requestToJoin *communities.Requ
|
|||
CommunityID: community.ID(),
|
||||
SkipEncryptionLayer: true,
|
||||
MessageType: protobuf.ApplicationMetadataMessage_COMMUNITY_REQUEST_TO_JOIN_RESPONSE,
|
||||
PubsubTopic: shard.DefaultNonProtectedPubsubTopic(),
|
||||
PubsubTopic: wakuv2.DefaultNonProtectedPubsubTopic(),
|
||||
ResendType: common.ResendTypeRawMessage,
|
||||
ResendMethod: common.ResendMethodSendPrivate,
|
||||
Recipients: []*ecdsa.PublicKey{pk},
|
||||
|
@ -2503,7 +2503,7 @@ func (m *Messenger) DefaultFilters(o *communities.Community) []transport.Filters
|
|||
{ChatID: updatesChannelID, PubsubTopic: communityPubsubTopic},
|
||||
{ChatID: mlChannelID, PubsubTopic: communityPubsubTopic},
|
||||
{ChatID: memberUpdateChannelID, PubsubTopic: communityPubsubTopic},
|
||||
{ChatID: uncompressedPubKey, PubsubTopic: shard.DefaultNonProtectedPubsubTopic()},
|
||||
{ChatID: uncompressedPubKey, PubsubTopic: wakuv2.DefaultNonProtectedPubsubTopic()},
|
||||
}
|
||||
|
||||
return filters
|
||||
|
@ -3562,7 +3562,7 @@ func (m *Messenger) HandleCommunityShardKey(state *ReceivedMessageState, message
|
|||
}
|
||||
|
||||
func (m *Messenger) handleCommunityShardAndFiltersFromProto(community *communities.Community, message *protobuf.CommunityShardKey) error {
|
||||
err := m.communitiesManager.UpdateShard(community, shard.FromProtobuff(message.Shard), message.Clock)
|
||||
err := m.communitiesManager.UpdateShard(community, wakuv2.FromProtobuff(message.Shard), message.Clock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -3584,7 +3584,7 @@ func (m *Messenger) handleCommunityShardAndFiltersFromProto(community *communiti
|
|||
}
|
||||
|
||||
// Unsubscribing from existing shard
|
||||
if community.Shard() != nil && community.Shard() != shard.FromProtobuff(message.GetShard()) {
|
||||
if community.Shard() != nil && community.Shard() != wakuv2.FromProtobuff(message.GetShard()) {
|
||||
err := m.unsubscribeFromShard(community.Shard())
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -3598,7 +3598,7 @@ func (m *Messenger) handleCommunityShardAndFiltersFromProto(community *communiti
|
|||
return err
|
||||
}
|
||||
// Update community filters in case of change of shard
|
||||
if community.Shard() != shard.FromProtobuff(message.GetShard()) {
|
||||
if community.Shard() != wakuv2.FromProtobuff(message.GetShard()) {
|
||||
err = m.UpdateCommunityFilters(community)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -12,11 +12,11 @@ import (
|
|||
gethbridge "github.com/status-im/status-go/eth-node/bridge/geth"
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
"github.com/status-im/status-go/protocol/common"
|
||||
"github.com/status-im/status-go/protocol/common/shard"
|
||||
"github.com/status-im/status-go/protocol/communities"
|
||||
"github.com/status-im/status-go/protocol/protobuf"
|
||||
"github.com/status-im/status-go/protocol/requests"
|
||||
"github.com/status-im/status-go/protocol/tt"
|
||||
"github.com/status-im/status-go/wakuv2"
|
||||
)
|
||||
|
||||
func TestMessengerCommunitiesShardingSuite(t *testing.T) {
|
||||
|
@ -108,7 +108,7 @@ func (s *MessengerCommunitiesShardingSuite) TearDownTest() {
|
|||
_ = s.logger.Sync()
|
||||
}
|
||||
|
||||
func (s *MessengerCommunitiesShardingSuite) testPostToCommunityChat(shard *shard.Shard, community *communities.Community, chat *Chat) {
|
||||
func (s *MessengerCommunitiesShardingSuite) testPostToCommunityChat(shard *wakuv2.Shard, community *communities.Community, chat *Chat) {
|
||||
_, err := s.owner.SetCommunityShard(&requests.SetCommunityShard{
|
||||
CommunityID: community.ID(),
|
||||
Shard: shard,
|
||||
|
@ -144,8 +144,8 @@ func (s *MessengerCommunitiesShardingSuite) TestPostToCommunityChat() {
|
|||
|
||||
// Members should be able to receive messages in a community with sharding enabled.
|
||||
{
|
||||
shard := &shard.Shard{
|
||||
Cluster: shard.MainStatusShardCluster,
|
||||
shard := &wakuv2.Shard{
|
||||
Cluster: wakuv2.MainStatusShardCluster,
|
||||
Index: 128,
|
||||
}
|
||||
s.testPostToCommunityChat(shard, community, chat)
|
||||
|
@ -153,8 +153,8 @@ func (s *MessengerCommunitiesShardingSuite) TestPostToCommunityChat() {
|
|||
|
||||
// Members should be able to receive messages in a community where the sharding configuration has been edited.
|
||||
{
|
||||
shard := &shard.Shard{
|
||||
Cluster: shard.MainStatusShardCluster,
|
||||
shard := &wakuv2.Shard{
|
||||
Cluster: wakuv2.MainStatusShardCluster,
|
||||
Index: 256,
|
||||
}
|
||||
s.testPostToCommunityChat(shard, community, chat)
|
||||
|
@ -162,8 +162,8 @@ func (s *MessengerCommunitiesShardingSuite) TestPostToCommunityChat() {
|
|||
|
||||
// Members should continue to receive messages in a community if it is moved back to default shard.
|
||||
{
|
||||
shard := &shard.Shard{
|
||||
Cluster: shard.MainStatusShardCluster,
|
||||
shard := &wakuv2.Shard{
|
||||
Cluster: wakuv2.MainStatusShardCluster,
|
||||
Index: 32,
|
||||
}
|
||||
s.testPostToCommunityChat(shard, community, chat)
|
||||
|
@ -176,8 +176,8 @@ func (s *MessengerCommunitiesShardingSuite) TestIgnoreOutdatedShardKey() {
|
|||
advertiseCommunityToUserOldWay(&s.Suite, community, s.owner, s.alice)
|
||||
joinCommunity(&s.Suite, community.ID(), s.owner, s.alice, alicePassword, []string{aliceAddress1})
|
||||
|
||||
shard := &shard.Shard{
|
||||
Cluster: shard.MainStatusShardCluster,
|
||||
shard := &wakuv2.Shard{
|
||||
Cluster: wakuv2.MainStatusShardCluster,
|
||||
Index: 128,
|
||||
}
|
||||
|
||||
|
|
|
@ -12,11 +12,11 @@ import (
|
|||
"github.com/status-im/status-go/eth-node/crypto"
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
"github.com/status-im/status-go/protocol/common"
|
||||
"github.com/status-im/status-go/protocol/common/shard"
|
||||
"github.com/status-im/status-go/protocol/communities"
|
||||
"github.com/status-im/status-go/protocol/protobuf"
|
||||
"github.com/status-im/status-go/protocol/transport"
|
||||
v1protocol "github.com/status-im/status-go/protocol/v1"
|
||||
"github.com/status-im/status-go/wakuv2"
|
||||
)
|
||||
|
||||
func (m *Messenger) sendPublicCommunityShardInfo(community *communities.Community) error {
|
||||
|
@ -57,7 +57,7 @@ func (m *Messenger) sendPublicCommunityShardInfo(community *communities.Communit
|
|||
// we don't want to wrap in an encryption layer message
|
||||
SkipEncryptionLayer: true,
|
||||
MessageType: protobuf.ApplicationMetadataMessage_COMMUNITY_PUBLIC_SHARD_INFO,
|
||||
PubsubTopic: shard.DefaultNonProtectedPubsubTopic(), // it must be sent always to default shard pubsub topic
|
||||
PubsubTopic: wakuv2.DefaultNonProtectedPubsubTopic(), // it must be sent always to default shard pubsub topic
|
||||
Priority: &common.HighPriority,
|
||||
}
|
||||
|
||||
|
@ -89,7 +89,7 @@ func (m *Messenger) HandleCommunityPublicShardInfo(state *ReceivedMessageState,
|
|||
return err
|
||||
}
|
||||
|
||||
err = m.communitiesManager.SaveCommunityShard(publicShardInfo.CommunityId, shard.FromProtobuff(publicShardInfo.Shard), publicShardInfo.Clock)
|
||||
err = m.communitiesManager.SaveCommunityShard(publicShardInfo.CommunityId, wakuv2.FromProtobuff(publicShardInfo.Shard), publicShardInfo.Clock)
|
||||
if err != nil && err != communities.ErrOldShardInfo {
|
||||
logError(err)
|
||||
return err
|
||||
|
|
|
@ -15,11 +15,11 @@ import (
|
|||
"github.com/status-im/status-go/eth-node/crypto"
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
"github.com/status-im/status-go/protocol/common"
|
||||
"github.com/status-im/status-go/protocol/common/shard"
|
||||
"github.com/status-im/status-go/protocol/communities"
|
||||
"github.com/status-im/status-go/protocol/protobuf"
|
||||
"github.com/status-im/status-go/protocol/requests"
|
||||
"github.com/status-im/status-go/services/utils"
|
||||
"github.com/status-im/status-go/wakuv2"
|
||||
)
|
||||
|
||||
type CommunityURLData struct {
|
||||
|
@ -49,7 +49,7 @@ type URLDataResponse struct {
|
|||
Community *CommunityURLData `json:"community"`
|
||||
Channel *CommunityChannelURLData `json:"channel"`
|
||||
Contact *ContactURLData `json:"contact"`
|
||||
Shard *shard.Shard `json:"shard,omitempty"`
|
||||
Shard *wakuv2.Shard `json:"shard,omitempty"`
|
||||
}
|
||||
|
||||
const baseShareURL = "https://status.app"
|
||||
|
@ -204,7 +204,7 @@ func parseCommunityURLWithData(data string, chatKey string) (*URLDataResponse, e
|
|||
TagIndices: tagIndices,
|
||||
CommunityID: types.EncodeHex(communityID),
|
||||
},
|
||||
Shard: shard.FromProtobuff(urlDataProto.Shard),
|
||||
Shard: wakuv2.FromProtobuff(urlDataProto.Shard),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -380,7 +380,7 @@ func parseCommunityChannelURLWithData(data string, chatKey string) (*URLDataResp
|
|||
Color: channelProto.Color,
|
||||
ChannelUUID: channelProto.Uuid,
|
||||
},
|
||||
Shard: shard.FromProtobuff(urlDataProto.Shard),
|
||||
Shard: wakuv2.FromProtobuff(urlDataProto.Shard),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
|
||||
gocommon "github.com/status-im/status-go/common"
|
||||
"github.com/status-im/status-go/eth-node/crypto"
|
||||
"github.com/status-im/status-go/protocol/common/shard"
|
||||
"github.com/waku-org/go-waku/waku/v2/api/history"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
@ -17,6 +16,7 @@ import (
|
|||
"github.com/status-im/status-go/eth-node/types"
|
||||
"github.com/status-im/status-go/protocol/communities"
|
||||
"github.com/status-im/status-go/protocol/transport"
|
||||
"github.com/status-im/status-go/wakuv2"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -82,7 +82,7 @@ func (m *StoreNodeRequestManager) FetchCommunity(community communities.Community
|
|||
zap.Any("community", community),
|
||||
zap.Any("config", cfg))
|
||||
|
||||
requestCommunity := func(communityID string, shard *shard.Shard) (*communities.Community, StoreNodeRequestStats, error) {
|
||||
requestCommunity := func(communityID string, shard *wakuv2.Shard) (*communities.Community, StoreNodeRequestStats, error) {
|
||||
channel, err := m.subscribeToRequest(storeNodeCommunityRequest, communityID, shard, cfg)
|
||||
if err != nil {
|
||||
return nil, StoreNodeRequestStats{}, fmt.Errorf("failed to create a request for community: %w", err)
|
||||
|
@ -100,7 +100,7 @@ func (m *StoreNodeRequestManager) FetchCommunity(community communities.Community
|
|||
communityShard := community.Shard
|
||||
if communityShard == nil {
|
||||
id := transport.CommunityShardInfoTopic(community.CommunityID)
|
||||
fetchedShard, err := m.subscribeToRequest(storeNodeShardRequest, id, shard.DefaultNonProtectedShard(), cfg)
|
||||
fetchedShard, err := m.subscribeToRequest(storeNodeShardRequest, id, wakuv2.DefaultNonProtectedShard(), cfg)
|
||||
if err != nil {
|
||||
return nil, StoreNodeRequestStats{}, fmt.Errorf("failed to create a shard info request: %w", err)
|
||||
}
|
||||
|
@ -178,7 +178,7 @@ func (m *StoreNodeRequestManager) FetchContact(contactID string, opts []StoreNod
|
|||
// subscribeToRequest checks if a request for given community/contact is already in progress, creates and installs
|
||||
// a new one if not found, and returns a subscription to the result of the found/started request.
|
||||
// The subscription can then be used to get the result of the request, this could be either a community/contact or an error.
|
||||
func (m *StoreNodeRequestManager) subscribeToRequest(requestType storeNodeRequestType, dataID string, shard *shard.Shard, cfg StoreNodeRequestConfig) (storeNodeResponseSubscription, error) {
|
||||
func (m *StoreNodeRequestManager) subscribeToRequest(requestType storeNodeRequestType, dataID string, shard *wakuv2.Shard, cfg StoreNodeRequestConfig) (storeNodeResponseSubscription, error) {
|
||||
// It's important to unlock only after getting the subscription channel.
|
||||
// We also lock `activeRequestsLock` during finalizing the requests. This ensures that the subscription
|
||||
// created in this function will get the result even if the requests proceeds faster than this function ends.
|
||||
|
@ -232,7 +232,7 @@ func (m *StoreNodeRequestManager) newStoreNodeRequest() *storeNodeRequest {
|
|||
|
||||
// getFilter checks if a filter for a given community is already created and creates one of not found.
|
||||
// Returns the found/created filter, a flag if the filter was created by the function and an error.
|
||||
func (m *StoreNodeRequestManager) getFilter(requestType storeNodeRequestType, dataID string, shard *shard.Shard) (*transport.Filter, bool, error) {
|
||||
func (m *StoreNodeRequestManager) getFilter(requestType storeNodeRequestType, dataID string, shard *wakuv2.Shard) (*transport.Filter, bool, error) {
|
||||
// First check if such filter already exists.
|
||||
filter := m.messenger.transport.FilterByChatID(dataID)
|
||||
if filter != nil {
|
||||
|
@ -334,7 +334,7 @@ type storeNodeRequestResult struct {
|
|||
// One of data fields (community or contact) will be present depending on request type
|
||||
community *communities.Community
|
||||
contact *Contact
|
||||
shard *shard.Shard
|
||||
shard *wakuv2.Shard
|
||||
}
|
||||
|
||||
type storeNodeResponseSubscription = chan storeNodeRequestResult
|
||||
|
|
|
@ -10,9 +10,9 @@ import (
|
|||
"github.com/multiformats/go-multiaddr"
|
||||
|
||||
"github.com/status-im/status-go/protocol/storenodes"
|
||||
"github.com/status-im/status-go/wakuv2"
|
||||
|
||||
gethbridge "github.com/status-im/status-go/eth-node/bridge/geth"
|
||||
"github.com/status-im/status-go/protocol/common/shard"
|
||||
"github.com/status-im/status-go/protocol/communities"
|
||||
"github.com/status-im/status-go/protocol/tt"
|
||||
|
||||
|
@ -92,11 +92,12 @@ func (s *MessengerStoreNodeCommunitySuite) createStore(name string) (*waku2.Waku
|
|||
cfg := testWakuV2Config{
|
||||
logger: s.logger.Named(name),
|
||||
enableStore: true,
|
||||
clusterID: shard.MainStatusShardCluster,
|
||||
clusterID: wakuv2.MainStatusShardCluster,
|
||||
}
|
||||
|
||||
storeNode := NewTestWakuV2(&s.Suite, cfg)
|
||||
addresses := storeNode.ListenAddresses()
|
||||
addresses, err := storeNode.ListenAddresses()
|
||||
s.Require().NoError(err)
|
||||
s.Require().GreaterOrEqual(len(addresses), 1, "no storenode listen address")
|
||||
return storeNode, addresses[0]
|
||||
}
|
||||
|
@ -109,7 +110,7 @@ func (s *MessengerStoreNodeCommunitySuite) newMessenger(name string, storenodeAd
|
|||
cfg := testWakuV2Config{
|
||||
logger: logger,
|
||||
enableStore: false,
|
||||
clusterID: shard.MainStatusShardCluster,
|
||||
clusterID: wakuv2.MainStatusShardCluster,
|
||||
}
|
||||
wakuV2 := NewTestWakuV2(&s.Suite, cfg)
|
||||
wakuV2Wrapper := gethbridge.NewGethWakuV2Wrapper(wakuV2)
|
||||
|
|
|
@ -24,7 +24,6 @@ import (
|
|||
"github.com/status-im/status-go/multiaccounts/accounts"
|
||||
"github.com/status-im/status-go/params"
|
||||
"github.com/status-im/status-go/protocol/common"
|
||||
"github.com/status-im/status-go/protocol/common/shard"
|
||||
"github.com/status-im/status-go/protocol/communities"
|
||||
"github.com/status-im/status-go/protocol/communities/token"
|
||||
"github.com/status-im/status-go/protocol/protobuf"
|
||||
|
@ -34,6 +33,7 @@ import (
|
|||
mailserversDB "github.com/status-im/status-go/services/mailservers"
|
||||
"github.com/status-im/status-go/services/wallet/bigint"
|
||||
"github.com/status-im/status-go/t/helpers"
|
||||
"github.com/status-im/status-go/wakuv2"
|
||||
waku2 "github.com/status-im/status-go/wakuv2"
|
||||
wakuV2common "github.com/status-im/status-go/wakuv2/common"
|
||||
)
|
||||
|
@ -160,7 +160,7 @@ func (s *MessengerStoreNodeRequestSuite) createStore() {
|
|||
cfg := testWakuV2Config{
|
||||
logger: s.logger.Named("store-waku"),
|
||||
enableStore: true,
|
||||
clusterID: shard.MainStatusShardCluster,
|
||||
clusterID: wakuv2.MainStatusShardCluster,
|
||||
}
|
||||
|
||||
s.wakuStoreNode = NewTestWakuV2(&s.Suite, cfg)
|
||||
|
@ -178,7 +178,7 @@ func (s *MessengerStoreNodeRequestSuite) createOwner() {
|
|||
cfg := testWakuV2Config{
|
||||
logger: s.logger.Named("owner-waku"),
|
||||
enableStore: false,
|
||||
clusterID: shard.MainStatusShardCluster,
|
||||
clusterID: wakuv2.MainStatusShardCluster,
|
||||
}
|
||||
|
||||
wakuV2 := NewTestWakuV2(&s.Suite, cfg)
|
||||
|
@ -199,7 +199,7 @@ func (s *MessengerStoreNodeRequestSuite) createBob() {
|
|||
cfg := testWakuV2Config{
|
||||
logger: s.logger.Named("bob-waku"),
|
||||
enableStore: false,
|
||||
clusterID: shard.MainStatusShardCluster,
|
||||
clusterID: wakuv2.MainStatusShardCluster,
|
||||
}
|
||||
wakuV2 := NewTestWakuV2(&s.Suite, cfg)
|
||||
s.bobWaku = gethbridge.NewGethWakuV2Wrapper(wakuV2)
|
||||
|
@ -366,7 +366,8 @@ func (s *MessengerStoreNodeRequestSuite) waitForEnvelopes(subscription <-chan st
|
|||
}
|
||||
|
||||
func (s *MessengerStoreNodeRequestSuite) wakuListenAddress(waku *waku2.Waku) multiaddr.Multiaddr {
|
||||
addresses := waku.ListenAddresses()
|
||||
addresses, err := waku.ListenAddresses()
|
||||
s.Require().NoError(err)
|
||||
s.Require().LessOrEqual(1, len(addresses))
|
||||
return addresses[0]
|
||||
}
|
||||
|
@ -696,8 +697,8 @@ func (s *MessengerStoreNodeRequestSuite) TestRequestShardAndCommunityInfo() {
|
|||
topicPrivKey, err := crypto.GenerateKey()
|
||||
s.Require().NoError(err)
|
||||
|
||||
expectedShard := &shard.Shard{
|
||||
Cluster: shard.MainStatusShardCluster,
|
||||
expectedShard := &wakuv2.Shard{
|
||||
Cluster: wakuv2.MainStatusShardCluster,
|
||||
Index: 23,
|
||||
}
|
||||
|
||||
|
@ -841,8 +842,8 @@ type testFetchRealCommunityExampleTokenInfo struct {
|
|||
|
||||
var testFetchRealCommunityExample = []struct {
|
||||
CommunityID string
|
||||
CommunityURL string // If set, takes precedence over CommunityID
|
||||
CommunityShard *shard.Shard // WARNING: I didn't test a sharded community
|
||||
CommunityURL string // If set, takes precedence over CommunityID
|
||||
CommunityShard *wakuv2.Shard // WARNING: I didn't test a sharded community
|
||||
Fleet string
|
||||
ClusterID uint16
|
||||
UserPrivateKeyString string // When empty a new user will be created
|
||||
|
@ -863,14 +864,14 @@ var testFetchRealCommunityExample = []struct {
|
|||
CommunityID: "0x03073514d4c14a7d10ae9fc9b0f05abc904d84166a6ac80add58bf6a3542a4e50a",
|
||||
CommunityShard: nil,
|
||||
Fleet: params.FleetStatusProd,
|
||||
ClusterID: shard.MainStatusShardCluster,
|
||||
ClusterID: wakuv2.MainStatusShardCluster,
|
||||
},
|
||||
{
|
||||
// Example 3,
|
||||
// https://status.app/c/CxiACi8KFGFwIHJlcSAxIHN0dCBiZWMgbWVtEgdkc2Fkc2FkGAMiByM0MzYwREYqAxkrHAM=#zQ3shwDYZHtrLE7NqoTGjTWzWUu6hom5D4qxfskLZfgfyGRyL
|
||||
CommunityID: "0x03f64be95ed5c925022265f9250f538f65ed3dcf6e4ef6c139803dc02a3487ae7b",
|
||||
Fleet: params.FleetStatusProd,
|
||||
ClusterID: shard.MainStatusShardCluster,
|
||||
ClusterID: wakuv2.MainStatusShardCluster,
|
||||
|
||||
CheckExpectedEnvelopes: true,
|
||||
ExpectedShardEnvelopes: []string{
|
||||
|
@ -973,7 +974,7 @@ var testFetchRealCommunityExample = []struct {
|
|||
//Example 1,
|
||||
CommunityID: "0x02471dd922756a3a50b623e59cf3b99355d6587e43d5c517eb55f9aea9d3fe9fe9",
|
||||
Fleet: params.FleetStatusProd,
|
||||
ClusterID: shard.MainStatusShardCluster,
|
||||
ClusterID: wakuv2.MainStatusShardCluster,
|
||||
CheckExpectedEnvelopes: true,
|
||||
ExpectedShardEnvelopes: []string{
|
||||
"0xc3e68e838d09e0117b3f3fd27aabe5f5a509d13e9045263c78e6890953d43547",
|
||||
|
@ -1013,7 +1014,7 @@ var testFetchRealCommunityExample = []struct {
|
|||
ContractAddress: "0x21F6F5Cb75E81e5104D890D750270eD6538C50cb",
|
||||
},
|
||||
},
|
||||
ClusterID: shard.MainStatusShardCluster,
|
||||
ClusterID: wakuv2.MainStatusShardCluster,
|
||||
CheckExpectedEnvelopes: false,
|
||||
CustomOptions: []StoreNodeRequestOption{
|
||||
WithInitialPageSize(1),
|
||||
|
|
|
@ -14,11 +14,11 @@ import (
|
|||
|
||||
gocommon "github.com/status-im/status-go/common"
|
||||
"github.com/status-im/status-go/protocol/wakusync"
|
||||
"github.com/status-im/status-go/wakuv2"
|
||||
|
||||
"github.com/status-im/status-go/protocol/identity"
|
||||
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
waku2 "github.com/status-im/status-go/wakuv2"
|
||||
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
|
@ -206,7 +206,7 @@ func WaitOnSignaledCommunityFound(m *Messenger, action func(), condition func(co
|
|||
}
|
||||
}
|
||||
|
||||
func WaitForConnectionStatus(s *suite.Suite, waku *waku2.Waku, action func() bool) {
|
||||
func WaitForConnectionStatus(s *suite.Suite, waku *wakuv2.Waku, action func() bool) {
|
||||
subscription := waku.SubscribeToConnStatusChanges()
|
||||
defer subscription.Unsubscribe()
|
||||
|
||||
|
@ -238,7 +238,7 @@ func hasAllPeers(m map[peer.ID]types.WakuV2Peer, checkSlice peer.IDSlice) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func WaitForPeersConnected(s *suite.Suite, waku *waku2.Waku, action func() peer.IDSlice) {
|
||||
func WaitForPeersConnected(s *suite.Suite, waku *wakuv2.Waku, action func() peer.IDSlice) {
|
||||
subscription := waku.SubscribeToConnStatusChanges()
|
||||
defer subscription.Unsubscribe()
|
||||
|
||||
|
|
|
@ -4,12 +4,12 @@ import (
|
|||
"errors"
|
||||
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
"github.com/status-im/status-go/protocol/common/shard"
|
||||
"github.com/status-im/status-go/wakuv2"
|
||||
)
|
||||
|
||||
type SetCommunityShard struct {
|
||||
CommunityID types.HexBytes `json:"communityId"`
|
||||
Shard *shard.Shard `json:"shard,omitempty"`
|
||||
Shard *wakuv2.Shard `json:"shard,omitempty"`
|
||||
PrivateKey *types.HexBytes `json:"privateKey,omitempty"`
|
||||
}
|
||||
|
||||
|
@ -19,7 +19,7 @@ func (s *SetCommunityShard) Validate() error {
|
|||
}
|
||||
if s.Shard != nil {
|
||||
// TODO: for now only MainStatusShard(16) is accepted
|
||||
if s.Shard.Cluster != shard.MainStatusShardCluster {
|
||||
if s.Shard.Cluster != wakuv2.MainStatusShardCluster {
|
||||
return errors.New("invalid shard cluster")
|
||||
}
|
||||
if s.Shard.Index > 1023 {
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
"go.uber.org/zap"
|
||||
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
"github.com/status-im/status-go/protocol/common/shard"
|
||||
"github.com/status-im/status-go/wakuv2"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -141,7 +141,7 @@ func (f *FiltersManager) InitPublicFilters(publicFiltersToInit []FiltersToInitia
|
|||
}
|
||||
|
||||
type CommunityFilterToInitialize struct {
|
||||
Shard *shard.Shard
|
||||
Shard *wakuv2.Shard
|
||||
PrivKey *ecdsa.PrivateKey
|
||||
}
|
||||
|
||||
|
@ -158,7 +158,7 @@ func (f *FiltersManager) InitCommunityFilters(communityFiltersToInitialize []Com
|
|||
}
|
||||
|
||||
topics := make([]string, 0)
|
||||
topics = append(topics, shard.DefaultNonProtectedPubsubTopic())
|
||||
topics = append(topics, wakuv2.DefaultNonProtectedPubsubTopic())
|
||||
topics = append(topics, communityFilter.Shard.PubsubTopic())
|
||||
|
||||
for _, pubsubTopic := range topics {
|
||||
|
|
|
@ -12,7 +12,6 @@ import (
|
|||
"github.com/status-im/status-go/appdatabase"
|
||||
gethbridge "github.com/status-im/status-go/eth-node/bridge/geth"
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
"github.com/status-im/status-go/protocol/common/shard"
|
||||
"github.com/status-im/status-go/t/helpers"
|
||||
waku2 "github.com/status-im/status-go/wakuv2"
|
||||
)
|
||||
|
@ -62,7 +61,7 @@ func NewTestWakuV2(s *suite.Suite, cfg testWakuV2Config) *waku2.Waku {
|
|||
|
||||
err = wakuNode.Start()
|
||||
if cfg.enableStore {
|
||||
err := wakuNode.SubscribeToPubsubTopic(shard.DefaultNonProtectedPubsubTopic(), nil)
|
||||
err := wakuNode.SubscribeToPubsubTopic(waku2.DefaultNonProtectedPubsubTopic(), nil)
|
||||
s.Require().NoError(err)
|
||||
}
|
||||
s.Require().NoError(err)
|
||||
|
@ -78,7 +77,7 @@ func CreateWakuV2Network(s *suite.Suite, parentLogger *zap.Logger, nodeNames []s
|
|||
nodes[i] = NewTestWakuV2(s, testWakuV2Config{
|
||||
logger: parentLogger.Named("waku-" + name),
|
||||
enableStore: false,
|
||||
clusterID: shard.MainStatusShardCluster,
|
||||
clusterID: waku2.MainStatusShardCluster,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -89,9 +88,10 @@ func CreateWakuV2Network(s *suite.Suite, parentLogger *zap.Logger, nodeNames []s
|
|||
continue
|
||||
}
|
||||
|
||||
addrs := nodes[j].ListenAddresses()
|
||||
addrs, err := nodes[j].ListenAddresses()
|
||||
s.Require().NoError(err)
|
||||
s.Require().Greater(len(addrs), 0)
|
||||
_, err := nodes[i].AddRelayPeer(addrs[0])
|
||||
_, err = nodes[i].AddRelayPeer(addrs[0])
|
||||
s.Require().NoError(err)
|
||||
err = nodes[i].DialPeer(addrs[0])
|
||||
s.Require().NoError(err)
|
||||
|
|
|
@ -16,6 +16,7 @@ import (
|
|||
"github.com/status-im/status-go/services/browsers"
|
||||
"github.com/status-im/status-go/services/wallet"
|
||||
"github.com/status-im/status-go/services/wallet/bigint"
|
||||
"github.com/status-im/status-go/wakuv2"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
|
@ -32,7 +33,6 @@ import (
|
|||
"github.com/status-im/status-go/multiaccounts/settings"
|
||||
"github.com/status-im/status-go/protocol"
|
||||
"github.com/status-im/status-go/protocol/common"
|
||||
"github.com/status-im/status-go/protocol/common/shard"
|
||||
"github.com/status-im/status-go/protocol/communities"
|
||||
"github.com/status-im/status-go/protocol/communities/token"
|
||||
"github.com/status-im/status-go/protocol/discord"
|
||||
|
@ -1308,7 +1308,7 @@ func (api *PublicAPI) RequestCommunityInfoFromMailserver(communityID string) (*c
|
|||
|
||||
// Deprecated: RequestCommunityInfoFromMailserverWithShard is deprecated in favor of
|
||||
// configurable FetchCommunity.
|
||||
func (api *PublicAPI) RequestCommunityInfoFromMailserverWithShard(communityID string, shard *shard.Shard) (*communities.Community, error) {
|
||||
func (api *PublicAPI) RequestCommunityInfoFromMailserverWithShard(communityID string, shard *wakuv2.Shard) (*communities.Community, error) {
|
||||
request := &protocol.FetchCommunityRequest{
|
||||
CommunityKey: communityID,
|
||||
Shard: shard,
|
||||
|
@ -1333,7 +1333,7 @@ func (api *PublicAPI) RequestCommunityInfoFromMailserverAsync(communityID string
|
|||
|
||||
// Deprecated: RequestCommunityInfoFromMailserverAsyncWithShard is deprecated in favor of
|
||||
// configurable FetchCommunity.
|
||||
func (api *PublicAPI) RequestCommunityInfoFromMailserverAsyncWithShard(communityID string, shard *shard.Shard) error {
|
||||
func (api *PublicAPI) RequestCommunityInfoFromMailserverAsyncWithShard(communityID string, shard *wakuv2.Shard) error {
|
||||
request := &protocol.FetchCommunityRequest{
|
||||
CommunityKey: communityID,
|
||||
Shard: shard,
|
||||
|
|
|
@ -8,10 +8,10 @@ import (
|
|||
|
||||
"github.com/status-im/status-go/appdatabase"
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
"github.com/status-im/status-go/protocol/common/shard"
|
||||
"github.com/status-im/status-go/protocol/sqlite"
|
||||
"github.com/status-im/status-go/protocol/transport"
|
||||
"github.com/status-im/status-go/t/helpers"
|
||||
"github.com/status-im/status-go/wakuv2"
|
||||
)
|
||||
|
||||
func setupTestDB(t *testing.T) (*Database, func()) {
|
||||
|
@ -62,9 +62,9 @@ func TestTopic(t *testing.T) {
|
|||
defer close()
|
||||
topicA := "0x61000000"
|
||||
topicD := "0x64000000"
|
||||
topic1 := MailserverTopic{PubsubTopic: shard.DefaultShardPubsubTopic(), ContentTopic: topicA, LastRequest: 1}
|
||||
topic2 := MailserverTopic{PubsubTopic: shard.DefaultShardPubsubTopic(), ContentTopic: "0x6200000", LastRequest: 2}
|
||||
topic3 := MailserverTopic{PubsubTopic: shard.DefaultShardPubsubTopic(), ContentTopic: "0x6300000", LastRequest: 3}
|
||||
topic1 := MailserverTopic{PubsubTopic: wakuv2.DefaultShardPubsubTopic(), ContentTopic: topicA, LastRequest: 1}
|
||||
topic2 := MailserverTopic{PubsubTopic: wakuv2.DefaultShardPubsubTopic(), ContentTopic: "0x6200000", LastRequest: 2}
|
||||
topic3 := MailserverTopic{PubsubTopic: wakuv2.DefaultShardPubsubTopic(), ContentTopic: "0x6300000", LastRequest: 3}
|
||||
|
||||
require.NoError(t, db.AddTopic(topic1))
|
||||
require.NoError(t, db.AddTopic(topic2))
|
||||
|
@ -77,14 +77,14 @@ func TestTopic(t *testing.T) {
|
|||
filters := []*transport.Filter{
|
||||
// Existing topic, is not updated
|
||||
{
|
||||
PubsubTopic: shard.DefaultShardPubsubTopic(),
|
||||
PubsubTopic: wakuv2.DefaultShardPubsubTopic(),
|
||||
ContentTopic: types.BytesToTopic([]byte{0x61}),
|
||||
},
|
||||
// Non existing topic is not inserted
|
||||
{
|
||||
Discovery: true,
|
||||
Negotiated: true,
|
||||
PubsubTopic: shard.DefaultShardPubsubTopic(),
|
||||
PubsubTopic: wakuv2.DefaultShardPubsubTopic(),
|
||||
ContentTopic: types.BytesToTopic([]byte{0x64}),
|
||||
},
|
||||
}
|
||||
|
@ -160,7 +160,7 @@ func TestAddGetDeleteMailserverTopics(t *testing.T) {
|
|||
defer close()
|
||||
api := &API{db: db}
|
||||
testTopic := MailserverTopic{
|
||||
PubsubTopic: shard.DefaultShardPubsubTopic(),
|
||||
PubsubTopic: wakuv2.DefaultShardPubsubTopic(),
|
||||
ContentTopic: "topic-001",
|
||||
ChatIDs: []string{"chatID01", "chatID02"},
|
||||
LastRequest: 10,
|
||||
|
@ -173,14 +173,14 @@ func TestAddGetDeleteMailserverTopics(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
require.EqualValues(t, []MailserverTopic{testTopic}, topics)
|
||||
|
||||
err = api.DeleteMailserverTopic(context.Background(), shard.DefaultShardPubsubTopic(), testTopic.ContentTopic)
|
||||
err = api.DeleteMailserverTopic(context.Background(), wakuv2.DefaultShardPubsubTopic(), testTopic.ContentTopic)
|
||||
require.NoError(t, err)
|
||||
topics, err = api.GetMailserverTopics(context.Background())
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, ([]MailserverTopic)(nil), topics)
|
||||
|
||||
// Delete non-existing topic.
|
||||
err = api.DeleteMailserverTopic(context.Background(), shard.DefaultShardPubsubTopic(), "non-existing-topic")
|
||||
err = api.DeleteMailserverTopic(context.Background(), wakuv2.DefaultShardPubsubTopic(), "non-existing-topic")
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
"github.com/status-im/status-go/protocol"
|
||||
"github.com/status-im/status-go/protocol/common/shard"
|
||||
"github.com/status-im/status-go/wakuv2"
|
||||
)
|
||||
|
||||
// Make sure that Service implements node.Lifecycle interface.
|
||||
|
@ -70,7 +70,7 @@ type PublicAPI struct {
|
|||
service *Service
|
||||
}
|
||||
|
||||
func (p *PublicAPI) CommunityInfo(communityID types.HexBytes, shard *shard.Shard) (json.RawMessage, error) {
|
||||
func (p *PublicAPI) CommunityInfo(communityID types.HexBytes, shard *wakuv2.Shard) (json.RawMessage, error) {
|
||||
if p.service.messenger == nil {
|
||||
return nil, ErrNotInitialized
|
||||
}
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
Subproject commit c861fa9f7560068874570598c81b7a1425a9e931
|
|
@ -18,6 +18,7 @@
|
|||
|
||||
package wakuv2
|
||||
|
||||
/* TODO-nwaku
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
|
@ -512,4 +513,4 @@ func (api *PublicWakuAPI) NewMessageFilter(req Criteria) (string, error) {
|
|||
api.mu.Unlock()
|
||||
|
||||
return id, nil
|
||||
}
|
||||
} */
|
|
@ -18,13 +18,13 @@
|
|||
|
||||
package wakuv2
|
||||
|
||||
/* TODO-nwaku
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/exp/maps"
|
||||
|
||||
"github.com/status-im/status-go/protocol/common/shard"
|
||||
"github.com/status-im/status-go/wakuv2/common"
|
||||
)
|
||||
|
||||
|
@ -57,7 +57,7 @@ func TestMultipleTopicCopyInNewMessageFilter(t *testing.T) {
|
|||
}
|
||||
|
||||
found := false
|
||||
candidates := w.filters.GetWatchersByTopic(shard.DefaultShardPubsubTopic(), t1)
|
||||
candidates := w.filters.GetWatchersByTopic(DefaultShardPubsubTopic(), t1)
|
||||
for _, f := range candidates {
|
||||
if maps.Equal(f.ContentTopics, common.NewTopicSet(crit.ContentTopics)) {
|
||||
found = true
|
||||
|
@ -68,4 +68,4 @@ func TestMultipleTopicCopyInNewMessageFilter(t *testing.T) {
|
|||
if !found {
|
||||
t.Fatalf("Could not find filter with both topics")
|
||||
}
|
||||
}
|
||||
} */
|
|
@ -23,8 +23,6 @@ import (
|
|||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/status-im/status-go/protocol/common/shard"
|
||||
|
||||
ethdisc "github.com/ethereum/go-ethereum/p2p/dnsdisc"
|
||||
|
||||
"github.com/status-im/status-go/wakuv2/common"
|
||||
|
@ -117,10 +115,10 @@ func setDefaults(cfg *Config) *Config {
|
|||
}
|
||||
|
||||
if cfg.DefaultShardPubsubTopic == "" {
|
||||
cfg.DefaultShardPubsubTopic = shard.DefaultShardPubsubTopic()
|
||||
cfg.DefaultShardPubsubTopic = DefaultShardPubsubTopic()
|
||||
//For now populating with both used shards, but this can be populated from user subscribed communities etc once community sharding is implemented
|
||||
cfg.DefaultShardedPubsubTopics = append(cfg.DefaultShardedPubsubTopics, shard.DefaultShardPubsubTopic())
|
||||
cfg.DefaultShardedPubsubTopics = append(cfg.DefaultShardedPubsubTopics, shard.DefaultNonProtectedPubsubTopic())
|
||||
cfg.DefaultShardedPubsubTopics = append(cfg.DefaultShardedPubsubTopics, DefaultShardPubsubTopic())
|
||||
cfg.DefaultShardedPubsubTopics = append(cfg.DefaultShardedPubsubTopics, DefaultNonProtectedPubsubTopic())
|
||||
}
|
||||
|
||||
return cfg
|
||||
|
|
|
@ -1,3 +1,6 @@
|
|||
//go:build !use_nwaku
|
||||
// +build !use_nwaku
|
||||
|
||||
// Copyright 2019 The Waku Library Authors.
|
||||
//
|
||||
// The Waku library is free software: you can redistribute it and/or modify
|
||||
|
@ -1575,8 +1578,8 @@ func (w *Waku) RelayPeersByTopic(topic string) (*types.PeerList, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (w *Waku) ListenAddresses() []multiaddr.Multiaddr {
|
||||
return w.node.ListenAddresses()
|
||||
func (w *Waku) ListenAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
return w.node.ListenAddresses(), nil
|
||||
}
|
||||
|
||||
func (w *Waku) ENR() (*enode.Node, error) {
|
||||
|
@ -1947,3 +1950,24 @@ func FormatPeerConnFailures(wakuNode *node.WakuNode) map[string]int {
|
|||
func (w *Waku) LegacyStoreNode() legacy_store.Store {
|
||||
return w.node.LegacyStore()
|
||||
}
|
||||
|
||||
func (w *Waku) WakuLightpushPublish(message *pb.WakuMessage, pubsubTopic string) (string, error) {
|
||||
msgHash, err := w.node.Lightpush().Publish(w.ctx, message, lightpush.WithPubSubTopic(pubsubTopic))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return msgHash.String(), nil
|
||||
}
|
||||
|
||||
func (w *Waku) WakuRelayPublish(message *pb.WakuMessage, pubsubTopic string) (string, error) {
|
||||
msgHash, err := w.node.Relay().Publish(w.ctx, message, relay.WithPubSubTopic(pubsubTopic))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return msgHash.String(), nil
|
||||
}
|
||||
|
||||
func (w *Waku) ListPeersInMesh(pubsubTopic string) (int, error) {
|
||||
listPeers := w.node.Relay().PubSub().ListPeers(pubsubTopic)
|
||||
return len(listPeers), nil
|
||||
}
|
2618
wakuv2/nwaku.go
2618
wakuv2/nwaku.go
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,807 @@
|
|||
//go:build use_nwaku
|
||||
// +build use_nwaku
|
||||
|
||||
package wakuv2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"slices"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff/v3"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/store"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
ethdnsdisc "github.com/ethereum/go-ethereum/p2p/dnsdisc"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/status-im/status-go/protocol/tt"
|
||||
)
|
||||
|
||||
var testStoreENRBootstrap = "enrtree://AI4W5N5IFEUIHF5LESUAOSMV6TKWF2MB6GU2YK7PU4TYUGUNOCEPW@store.staging.status.nodes.status.im"
|
||||
var testBootENRBootstrap = "enrtree://AMOJVZX4V6EXP7NTJPMAYJYST2QP6AJXYW76IU6VGJS7UVSNDYZG4@boot.staging.status.nodes.status.im"
|
||||
|
||||
func setDefaultConfig(config *Config, lightMode bool) {
|
||||
config.ClusterID = 16
|
||||
|
||||
if lightMode {
|
||||
config.EnablePeerExchangeClient = true
|
||||
config.LightClient = true
|
||||
config.EnableDiscV5 = false
|
||||
} else {
|
||||
config.EnableDiscV5 = true
|
||||
config.EnablePeerExchangeServer = true
|
||||
config.LightClient = false
|
||||
config.EnablePeerExchangeClient = false
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
func TestDiscoveryV5(t *testing.T) {
|
||||
config := &Config{}
|
||||
setDefaultConfig(config, false)
|
||||
config.DiscV5BootstrapNodes = []string{testStoreENRBootstrap}
|
||||
config.DiscoveryLimit = 20
|
||||
w, err := New(nil, "shards.staging", config, nil, nil, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, w.Start())
|
||||
|
||||
err = tt.RetryWithBackOff(func() error {
|
||||
if len(w.Peers()) == 0 {
|
||||
return errors.New("no peers discovered")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NotEqual(t, 0, len(w.Peers()))
|
||||
require.NoError(t, w.Stop())
|
||||
}
|
||||
*/
|
||||
/*
|
||||
func TestRestartDiscoveryV5(t *testing.T) {
|
||||
config := &Config{}
|
||||
setDefaultConfig(config, false)
|
||||
// Use wrong discv5 bootstrap address, to simulate being offline
|
||||
config.DiscV5BootstrapNodes = []string{"enrtree://AOGECG2SPND25EEFMAJ5WF3KSGJNSGV356DSTL2YVLLZWIV6SAYBM@1.1.1.2"}
|
||||
config.DiscoveryLimit = 20
|
||||
config.UDPPort = 10002
|
||||
config.ClusterID = 16
|
||||
w, err := New(nil, "", config, nil, nil, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, w.Start())
|
||||
require.False(t, w.seededBootnodesForDiscV5)
|
||||
|
||||
options := func(b *backoff.ExponentialBackOff) {
|
||||
b.MaxElapsedTime = 2 * time.Second
|
||||
}
|
||||
|
||||
// Sanity check, not great, but it's probably helpful
|
||||
err = tt.RetryWithBackOff(func() error {
|
||||
if len(w.Peers()) == 0 {
|
||||
return errors.New("no peers discovered")
|
||||
}
|
||||
return nil
|
||||
}, options)
|
||||
|
||||
require.Error(t, err)
|
||||
|
||||
w.discV5BootstrapNodes = []string{testStoreENRBootstrap}
|
||||
|
||||
options = func(b *backoff.ExponentialBackOff) {
|
||||
b.MaxElapsedTime = 90 * time.Second
|
||||
}
|
||||
|
||||
err = tt.RetryWithBackOff(func() error {
|
||||
if len(w.Peers()) == 0 {
|
||||
return errors.New("no peers discovered")
|
||||
}
|
||||
return nil
|
||||
}, options)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.True(t, w.seededBootnodesForDiscV5)
|
||||
require.NotEqual(t, 0, len(w.Peers()))
|
||||
require.NoError(t, w.Stop())
|
||||
}
|
||||
|
||||
func TestRelayPeers(t *testing.T) {
|
||||
config := &Config{
|
||||
EnableMissingMessageVerification: true,
|
||||
}
|
||||
setDefaultConfig(config, false)
|
||||
w, err := New(nil, "", config, nil, nil, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, w.Start())
|
||||
_, err = w.RelayPeersByTopic(config.DefaultShardPubsubTopic)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Ensure function returns an error for lightclient
|
||||
config = &Config{}
|
||||
config.ClusterID = 16
|
||||
config.LightClient = true
|
||||
w, err = New(nil, "", config, nil, nil, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, w.Start())
|
||||
_, err = w.RelayPeersByTopic(config.DefaultShardPubsubTopic)
|
||||
require.Error(t, err)
|
||||
}
|
||||
*/
|
||||
func parseNodes(rec []string) []*enode.Node {
|
||||
var ns []*enode.Node
|
||||
for _, r := range rec {
|
||||
var n enode.Node
|
||||
if err := n.UnmarshalText([]byte(r)); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
ns = append(ns, &n)
|
||||
}
|
||||
return ns
|
||||
}
|
||||
|
||||
// In order to run these tests, you must run an nwaku node
|
||||
//
|
||||
// Using Docker:
|
||||
//
|
||||
// IP_ADDRESS=$(hostname -I | awk '{print $1}');
|
||||
// docker run \
|
||||
// -p 61000:61000/tcp -p 8000:8000/udp -p 8646:8646/tcp harbor.status.im/wakuorg/nwaku:v0.33.0 \
|
||||
// --discv5-discovery=true --cluster-id=16 --log-level=DEBUG \
|
||||
// --nat=extip:${IP_ADDRESS} --discv5-udp-port=8000 --rest-address=0.0.0.0 --store --rest-port=8646 \
|
||||
|
||||
func TestBasicWakuV2(t *testing.T) {
|
||||
extNodeRestPort := 8646
|
||||
storeNodeInfo, err := GetNwakuInfo(nil, &extNodeRestPort)
|
||||
require.NoError(t, err)
|
||||
|
||||
nwakuConfig := WakuConfig{
|
||||
Port: 30303,
|
||||
NodeKey: "11d0dcea28e86f81937a3bd1163473c7fbc0a0db54fd72914849bc47bdf78710",
|
||||
EnableRelay: true,
|
||||
LogLevel: "DEBUG",
|
||||
DnsDiscoveryUrl: "enrtree://AMOJVZX4V6EXP7NTJPMAYJYST2QP6AJXYW76IU6VGJS7UVSNDYZG4@boot.prod.status.nodes.status.im",
|
||||
DnsDiscovery: true,
|
||||
Discv5Discovery: true,
|
||||
Staticnodes: []string{storeNodeInfo.ListenAddresses[0]},
|
||||
ClusterID: 16,
|
||||
Shards: []uint16{64},
|
||||
}
|
||||
|
||||
w, err := New(nil, "", &nwakuConfig, nil, nil, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, w.Start())
|
||||
|
||||
enr, err := w.ENR()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, enr)
|
||||
|
||||
options := func(b *backoff.ExponentialBackOff) {
|
||||
b.MaxElapsedTime = 30 * time.Second
|
||||
}
|
||||
|
||||
// Sanity check, not great, but it's probably helpful
|
||||
err = tt.RetryWithBackOff(func() error {
|
||||
|
||||
numConnected, err := w.GetNumConnectedPeers()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Have to be connected to at least 3 nodes: the static node, the bootstrap node, and one discovered node
|
||||
if numConnected > 2 {
|
||||
return nil
|
||||
}
|
||||
return errors.New("no peers discovered")
|
||||
}, options)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Get local store node address
|
||||
storeNode, err :=peer.AddrInfoFromString(storeNodeInfo.ListenAddresses[0])
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that we are indeed connected to the store node
|
||||
connectedStoreNodes, err := w.GetPeerIdsByProtocol(string(store.StoreQueryID_v300))
|
||||
require.NoError(t, err)
|
||||
require.True(t, slices.Contains(connectedStoreNodes, storeNode.ID), "nwaku should be connected to the store node")
|
||||
|
||||
// Disconnect from the store node
|
||||
err = w.DisconnectPeerById(storeNode.ID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that we are indeed disconnected
|
||||
connectedStoreNodes, err = w.GetPeerIdsByProtocol(string(store.StoreQueryID_v300))
|
||||
require.NoError(t, err)
|
||||
isDisconnected := !slices.Contains(connectedStoreNodes, storeNode.ID)
|
||||
require.True(t, isDisconnected, "nwaku should be disconnected from the store node")
|
||||
|
||||
// Re-connect
|
||||
err = w.DialPeerByID(storeNode.ID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that we are connected again
|
||||
connectedStoreNodes, err = w.GetPeerIdsByProtocol(string(store.StoreQueryID_v300))
|
||||
require.NoError(t, err)
|
||||
require.True(t, slices.Contains(connectedStoreNodes, storeNode.ID), "nwaku should be connected to the store node")
|
||||
|
||||
/*
|
||||
filter := &common.Filter{
|
||||
PubsubTopic: config.DefaultShardPubsubTopic,
|
||||
Messages: common.NewMemoryMessageStore(),
|
||||
ContentTopics: common.NewTopicSetFromBytes([][]byte{{1, 2, 3, 4}}),
|
||||
}
|
||||
|
||||
_, err = w.Subscribe(filter)
|
||||
require.NoError(t, err)
|
||||
|
||||
msgTimestamp := w.timestamp()
|
||||
contentTopic := maps.Keys(filter.ContentTopics)[0]
|
||||
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
_, err = w.Send(config.DefaultShardPubsubTopic, &pb.WakuMessage{
|
||||
Payload: []byte{1, 2, 3, 4, 5},
|
||||
ContentTopic: contentTopic.ContentTopic(),
|
||||
Version: proto.Uint32(0),
|
||||
Timestamp: &msgTimestamp,
|
||||
}, nil)
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
messages := filter.Retrieve()
|
||||
require.Len(t, messages, 1)
|
||||
|
||||
timestampInSeconds := msgTimestamp / int64(time.Second)
|
||||
marginInSeconds := 20
|
||||
|
||||
options = func(b *backoff.ExponentialBackOff) {
|
||||
b.MaxElapsedTime = 60 * time.Second
|
||||
b.InitialInterval = 500 * time.Millisecond
|
||||
}
|
||||
err = tt.RetryWithBackOff(func() error {
|
||||
_, envelopeCount, err := w.Query(
|
||||
context.Background(),
|
||||
storeNode.PeerID,
|
||||
store.FilterCriteria{
|
||||
ContentFilter: protocol.NewContentFilter(config.DefaultShardPubsubTopic, contentTopic.ContentTopic()),
|
||||
TimeStart: proto.Int64((timestampInSeconds - int64(marginInSeconds)) * int64(time.Second)),
|
||||
TimeEnd: proto.Int64((timestampInSeconds + int64(marginInSeconds)) * int64(time.Second)),
|
||||
},
|
||||
nil,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
if err != nil || envelopeCount == 0 {
|
||||
// in case of failure extend timestamp margin up to 40secs
|
||||
if marginInSeconds < 40 {
|
||||
marginInSeconds += 5
|
||||
}
|
||||
return errors.New("no messages received from store node")
|
||||
}
|
||||
return nil
|
||||
}, options)
|
||||
require.NoError(t, err) */
|
||||
|
||||
require.NoError(t, w.Stop())
|
||||
}
|
||||
|
||||
type mapResolver map[string]string
|
||||
|
||||
func (mr mapResolver) LookupTXT(ctx context.Context, name string) ([]string, error) {
|
||||
if record, ok := mr[name]; ok {
|
||||
return []string{record}, nil
|
||||
}
|
||||
return nil, errors.New("not found")
|
||||
}
|
||||
|
||||
var signingKeyForTesting, _ = crypto.ToECDSA(hexutil.MustDecode("0xdc599867fc513f8f5e2c2c9c489cde5e71362d1d9ec6e693e0de063236ed1240"))
|
||||
|
||||
func makeTestTree(domain string, nodes []*enode.Node, links []string) (*ethdnsdisc.Tree, string) {
|
||||
tree, err := ethdnsdisc.MakeTree(1, nodes, links)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
url, err := tree.Sign(signingKeyForTesting, domain)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return tree, url
|
||||
}
|
||||
|
||||
/*
|
||||
func TestPeerExchange(t *testing.T) {
|
||||
logger, err := zap.NewDevelopment()
|
||||
require.NoError(t, err)
|
||||
// start node which serve as PeerExchange server
|
||||
config := &Config{}
|
||||
config.ClusterID = 16
|
||||
config.EnableDiscV5 = true
|
||||
config.EnablePeerExchangeServer = true
|
||||
config.EnablePeerExchangeClient = false
|
||||
pxServerNode, err := New(nil, "", config, logger.Named("pxServerNode"), nil, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, pxServerNode.Start())
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
// start node that will be discovered by PeerExchange
|
||||
config = &Config{}
|
||||
config.ClusterID = 16
|
||||
config.EnableDiscV5 = true
|
||||
config.EnablePeerExchangeServer = false
|
||||
config.EnablePeerExchangeClient = false
|
||||
enr, err := pxServerNode.ENR()
|
||||
require.NoError(t, err)
|
||||
|
||||
config.DiscV5BootstrapNodes = []string{enr.String()}
|
||||
discV5Node, err := New(nil, "", config, logger.Named("discV5Node"), nil, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, discV5Node.Start())
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
// start light node which use PeerExchange to discover peers
|
||||
enrNodes := []*enode.Node{enr}
|
||||
tree, url := makeTestTree("n", enrNodes, nil)
|
||||
resolver := mapResolver(tree.ToTXT("n"))
|
||||
|
||||
config = &Config{}
|
||||
config.ClusterID = 16
|
||||
config.EnablePeerExchangeServer = false
|
||||
config.EnablePeerExchangeClient = true
|
||||
config.LightClient = true
|
||||
config.Resolver = resolver
|
||||
|
||||
config.WakuNodes = []string{url}
|
||||
lightNode, err := New(nil, "", config, logger.Named("lightNode"), nil, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, lightNode.Start())
|
||||
|
||||
// Sanity check, not great, but it's probably helpful
|
||||
options := func(b *backoff.ExponentialBackOff) {
|
||||
b.MaxElapsedTime = 30 * time.Second
|
||||
}
|
||||
err = tt.RetryWithBackOff(func() error {
|
||||
// we should not use lightNode.Peers() here as it only indicates peers that are connected right now,
|
||||
// in light client mode,the peer will be closed via `w.node.Host().Network().ClosePeer(peerInfo.ID)`
|
||||
// after invoking identifyAndConnect, instead, we should check the peerStore, peers from peerStore
|
||||
// won't get deleted especially if they are statically added.
|
||||
numConnected, err := lightNode.GetNumConnectedPeers()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if numConnected == 2 {
|
||||
return nil
|
||||
}
|
||||
return errors.New("no peers discovered")
|
||||
}, options)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
_, err = discV5Node.WakuPeerExchangeRequest(1)
|
||||
require.NoError(t, err)
|
||||
_, err = discV5Node.WakuPeerExchangeRequest(1)
|
||||
require.Error(t, err) //should fail due to rate limit
|
||||
|
||||
require.NoError(t, lightNode.Stop())
|
||||
require.NoError(t, pxServerNode.Stop())
|
||||
require.NoError(t, discV5Node.Stop())
|
||||
}
|
||||
|
||||
func TestWakuV2Filter(t *testing.T) {
|
||||
t.Skip("flaky test")
|
||||
|
||||
enrTreeAddress := testBootENRBootstrap
|
||||
envEnrTreeAddress := os.Getenv("ENRTREE_ADDRESS")
|
||||
if envEnrTreeAddress != "" {
|
||||
enrTreeAddress = envEnrTreeAddress
|
||||
}
|
||||
config := &Config{}
|
||||
setDefaultConfig(config, true)
|
||||
config.EnablePeerExchangeClient = false
|
||||
config.Port = 0
|
||||
config.MinPeersForFilter = 2
|
||||
|
||||
config.DiscV5BootstrapNodes = []string{enrTreeAddress}
|
||||
config.DiscoveryLimit = 20
|
||||
config.WakuNodes = []string{enrTreeAddress}
|
||||
w, err := New(nil, "", config, nil, nil, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, w.Start())
|
||||
|
||||
options := func(b *backoff.ExponentialBackOff) {
|
||||
b.MaxElapsedTime = 10 * time.Second
|
||||
}
|
||||
time.Sleep(10 * time.Second) //TODO: Check if we can remove this sleep.
|
||||
|
||||
// Sanity check, not great, but it's probably helpful
|
||||
err = tt.RetryWithBackOff(func() error {
|
||||
peers, err := w.GetPeerIdsByProtocol(string(filter.FilterSubscribeID_v20beta1))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(peers) < 2 {
|
||||
return errors.New("no peers discovered")
|
||||
}
|
||||
return nil
|
||||
}, options)
|
||||
require.NoError(t, err)
|
||||
testPubsubTopic := "/waku/2/rs/16/32"
|
||||
contentTopicBytes := make([]byte, 4)
|
||||
_, err = rand.Read(contentTopicBytes)
|
||||
require.NoError(t, err)
|
||||
filter := &common.Filter{
|
||||
Messages: common.NewMemoryMessageStore(),
|
||||
PubsubTopic: testPubsubTopic,
|
||||
ContentTopics: common.NewTopicSetFromBytes([][]byte{contentTopicBytes}),
|
||||
}
|
||||
|
||||
fID, err := w.Subscribe(filter)
|
||||
require.NoError(t, err)
|
||||
|
||||
msgTimestamp := w.timestamp()
|
||||
contentTopic := maps.Keys(filter.ContentTopics)[0]
|
||||
|
||||
_, err = w.Send(testPubsubTopic, &pb.WakuMessage{
|
||||
Payload: []byte{1, 2, 3, 4, 5},
|
||||
ContentTopic: contentTopic.ContentTopic(),
|
||||
Version: proto.Uint32(0),
|
||||
Timestamp: &msgTimestamp,
|
||||
}, nil)
|
||||
require.NoError(t, err)
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// Ensure there is at least 1 active filter subscription
|
||||
subscriptions := w.FilterLightnode().Subscriptions()
|
||||
require.Greater(t, len(subscriptions), 0)
|
||||
|
||||
messages := filter.Retrieve()
|
||||
require.Len(t, messages, 1)
|
||||
|
||||
// Mock peers going down
|
||||
_, err = w.FilterLightnode().UnsubscribeWithSubscription(w.ctx, subscriptions[0])
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
// Ensure there is at least 1 active filter subscription
|
||||
subscriptions = w.FilterLightnode().Subscriptions()
|
||||
require.Greater(t, len(subscriptions), 0)
|
||||
|
||||
// Ensure that messages are retrieved with a fresh sub
|
||||
_, err = w.Send(testPubsubTopic, &pb.WakuMessage{
|
||||
Payload: []byte{1, 2, 3, 4, 5, 6},
|
||||
ContentTopic: contentTopic.ContentTopic(),
|
||||
Version: proto.Uint32(0),
|
||||
Timestamp: &msgTimestamp,
|
||||
}, nil)
|
||||
require.NoError(t, err)
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
messages = filter.Retrieve()
|
||||
require.Len(t, messages, 1)
|
||||
err = w.Unsubscribe(context.Background(), fID)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, w.Stop())
|
||||
}
|
||||
|
||||
func TestWakuV2Store(t *testing.T) {
|
||||
t.Skip("deprecated. Storenode must use nwaku")
|
||||
|
||||
// Configuration for the first Waku node
|
||||
config1 := &Config{
|
||||
Port: 0,
|
||||
ClusterID: 16,
|
||||
EnableDiscV5: false,
|
||||
DiscoveryLimit: 20,
|
||||
EnableStore: false,
|
||||
StoreCapacity: 100,
|
||||
StoreSeconds: 3600,
|
||||
EnableMissingMessageVerification: true,
|
||||
}
|
||||
w1PeersCh := make(chan peer.IDSlice, 100) // buffered not to block on the send side
|
||||
|
||||
// Start the first Waku node
|
||||
w1, err := New(nil, "", config1, nil, nil, nil, nil, func(cs types.ConnStatus) {
|
||||
w1PeersCh <- maps.Keys(cs.Peers)
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, w1.Start())
|
||||
defer func() {
|
||||
require.NoError(t, w1.Stop())
|
||||
close(w1PeersCh)
|
||||
}()
|
||||
|
||||
// Configuration for the second Waku node
|
||||
sql2, err := helpers.SetupTestMemorySQLDB(appdatabase.DbInitializer{})
|
||||
require.NoError(t, err)
|
||||
config2 := &Config{
|
||||
Port: 0,
|
||||
ClusterID: 16,
|
||||
EnableDiscV5: false,
|
||||
DiscoveryLimit: 20,
|
||||
EnableStore: true,
|
||||
StoreCapacity: 100,
|
||||
StoreSeconds: 3600,
|
||||
}
|
||||
|
||||
// Start the second Waku node
|
||||
w2, err := New(nil, "", config2, nil, sql2, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, w2.Start())
|
||||
w2EnvelopeCh := make(chan common.EnvelopeEvent, 100)
|
||||
w2.SubscribeEnvelopeEvents(w2EnvelopeCh)
|
||||
defer func() {
|
||||
require.NoError(t, w2.Stop())
|
||||
close(w2EnvelopeCh)
|
||||
}()
|
||||
|
||||
// Connect the two nodes directly
|
||||
peer2Addr, err := w2.ListenAddresses()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = w1.DialPeer(peer2Addr[0])
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a filter for the second node to catch messages
|
||||
filter := &common.Filter{
|
||||
Messages: common.NewMemoryMessageStore(),
|
||||
PubsubTopic: config2.DefaultShardPubsubTopic,
|
||||
ContentTopics: common.NewTopicSetFromBytes([][]byte{{1, 2, 3, 4}}),
|
||||
}
|
||||
|
||||
_, err = w2.Subscribe(filter)
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
// Send a message from the first node
|
||||
msgTimestamp := w1.CurrentTime().UnixNano()
|
||||
contentTopic := maps.Keys(filter.ContentTopics)[0]
|
||||
_, err = w1.Send(config1.DefaultShardPubsubTopic, &pb.WakuMessage{
|
||||
Payload: []byte{1, 2, 3, 4, 5},
|
||||
ContentTopic: contentTopic.ContentTopic(),
|
||||
Version: proto.Uint32(0),
|
||||
Timestamp: &msgTimestamp,
|
||||
}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
waitForEnvelope(t, contentTopic.ContentTopic(), w2EnvelopeCh)
|
||||
|
||||
// Retrieve the message from the second node's filter
|
||||
messages := filter.Retrieve()
|
||||
require.Len(t, messages, 1)
|
||||
|
||||
timestampInSeconds := msgTimestamp / int64(time.Second)
|
||||
marginInSeconds := 5
|
||||
// Query the second node's store for the message
|
||||
_, envelopeCount, err := w1.Query(
|
||||
context.Background(),
|
||||
w2.Host().ID(),
|
||||
store.FilterCriteria{
|
||||
TimeStart: proto.Int64((timestampInSeconds - int64(marginInSeconds)) * int64(time.Second)),
|
||||
TimeEnd: proto.Int64((timestampInSeconds + int64(marginInSeconds)) * int64(time.Second)),
|
||||
ContentFilter: protocol.NewContentFilter(config1.DefaultShardPubsubTopic, contentTopic.ContentTopic()),
|
||||
},
|
||||
nil,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.True(t, envelopeCount > 0, "no messages received from store node")
|
||||
}
|
||||
|
||||
func waitForPeerConnection(t *testing.T, peerID peer.ID, peerCh chan peer.IDSlice) {
|
||||
waitForPeerConnectionWithTimeout(t, peerID, peerCh, 3*time.Second)
|
||||
}
|
||||
|
||||
func waitForPeerConnectionWithTimeout(t *testing.T, peerID peer.ID, peerCh chan peer.IDSlice, timeout time.Duration) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
for {
|
||||
select {
|
||||
case peers := <-peerCh:
|
||||
for _, p := range peers {
|
||||
if p == peerID {
|
||||
return
|
||||
}
|
||||
}
|
||||
case <-ctx.Done():
|
||||
require.Fail(t, "timed out waiting for peer "+peerID.String())
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func waitForEnvelope(t *testing.T, contentTopic string, envCh chan common.EnvelopeEvent) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
defer cancel()
|
||||
for {
|
||||
select {
|
||||
case env := <-envCh:
|
||||
if env.Topic.ContentTopic() == contentTopic {
|
||||
return
|
||||
}
|
||||
case <-ctx.Done():
|
||||
require.Fail(t, "timed out waiting for envelope's topic "+contentTopic)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestOnlineChecker(t *testing.T) {
|
||||
w, err := New(nil, "shards.staging", nil, nil, nil, nil, nil, nil)
|
||||
require.NoError(t, w.Start())
|
||||
|
||||
require.NoError(t, err)
|
||||
require.False(t, w.onlineChecker.IsOnline())
|
||||
|
||||
w.ConnectionChanged(connection.State{Offline: false})
|
||||
require.True(t, w.onlineChecker.IsOnline())
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
<-w.goingOnline
|
||||
require.True(t, true)
|
||||
}()
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
w.ConnectionChanged(connection.State{Offline: true})
|
||||
require.False(t, w.onlineChecker.IsOnline())
|
||||
|
||||
// Test lightnode online checker
|
||||
config := &Config{}
|
||||
config.ClusterID = 16
|
||||
config.LightClient = true
|
||||
lightNode, err := New(nil, "shards.staging", config, nil, nil, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = lightNode.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.False(t, lightNode.onlineChecker.IsOnline())
|
||||
f := &common.Filter{}
|
||||
lightNode.filterManager.SubscribeFilter("test", protocol.NewContentFilter(f.PubsubTopic, f.ContentTopics.ContentTopics()...))
|
||||
|
||||
}
|
||||
|
||||
func TestLightpushRateLimit(t *testing.T) {
|
||||
logger, err := zap.NewDevelopment()
|
||||
require.NoError(t, err)
|
||||
|
||||
config0 := &Config{}
|
||||
setDefaultConfig(config0, false)
|
||||
w0PeersCh := make(chan peer.IDSlice, 5) // buffered not to block on the send side
|
||||
|
||||
// Start the relayu node
|
||||
w0, err := New(nil, "", config0, logger.Named("relayNode"), nil, nil, nil, func(cs types.ConnStatus) {
|
||||
w0PeersCh <- maps.Keys(cs.Peers)
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, w0.Start())
|
||||
defer func() {
|
||||
require.NoError(t, w0.Stop())
|
||||
close(w0PeersCh)
|
||||
}()
|
||||
|
||||
contentTopics := common.NewTopicSetFromBytes([][]byte{{1, 2, 3, 4}})
|
||||
filter := &common.Filter{
|
||||
PubsubTopic: config0.DefaultShardPubsubTopic,
|
||||
Messages: common.NewMemoryMessageStore(),
|
||||
ContentTopics: contentTopics,
|
||||
}
|
||||
|
||||
_, err = w0.Subscribe(filter)
|
||||
require.NoError(t, err)
|
||||
|
||||
config1 := &Config{}
|
||||
setDefaultConfig(config1, false)
|
||||
w1PeersCh := make(chan peer.IDSlice, 5) // buffered not to block on the send side
|
||||
|
||||
// Start the full node
|
||||
w1, err := New(nil, "", config1, logger.Named("fullNode"), nil, nil, nil, func(cs types.ConnStatus) {
|
||||
w1PeersCh <- maps.Keys(cs.Peers)
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, w1.Start())
|
||||
defer func() {
|
||||
require.NoError(t, w1.Stop())
|
||||
close(w1PeersCh)
|
||||
}()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
//Connect the relay peer and full node
|
||||
err = w1.DialPeer(ctx, w0.ListenAddresses()[0].String())
|
||||
require.NoError(t, err)
|
||||
|
||||
err = tt.RetryWithBackOff(func() error {
|
||||
if len(w1.Peers()) == 0 {
|
||||
return errors.New("no peers discovered")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
config2 := &Config{}
|
||||
setDefaultConfig(config2, true)
|
||||
w2PeersCh := make(chan peer.IDSlice, 5) // buffered not to block on the send side
|
||||
|
||||
// Start the light node
|
||||
w2, err := New(nil, "", config2, logger.Named("lightNode"), nil, nil, nil, func(cs types.ConnStatus) {
|
||||
w2PeersCh <- maps.Keys(cs.Peers)
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, w2.Start())
|
||||
defer func() {
|
||||
require.NoError(t, w2.Stop())
|
||||
close(w2PeersCh)
|
||||
}()
|
||||
|
||||
//Use this instead of DialPeer to make sure the peer is added to PeerStore and can be selected for Lighpush
|
||||
w2.AddDiscoveredPeer(w1.PeerID(), w1.ListenAddresses(), wps.Static, w1.cfg.DefaultShardedPubsubTopics, w1.node.ENR(), true)
|
||||
|
||||
waitForPeerConnectionWithTimeout(t, w2.Host().ID(), w1PeersCh, 5*time.Second)
|
||||
|
||||
event := make(chan common.EnvelopeEvent, 10)
|
||||
w2.SubscribeEnvelopeEvents(event)
|
||||
|
||||
for i := range [4]int{} {
|
||||
msgTimestamp := w2.timestamp()
|
||||
_, err := w2.Send(config2.DefaultShardPubsubTopic, &pb.WakuMessage{
|
||||
Payload: []byte{1, 2, 3, 4, 5, 6, byte(i)},
|
||||
ContentTopic: maps.Keys(contentTopics)[0].ContentTopic(),
|
||||
Version: proto.Uint32(0),
|
||||
Timestamp: &msgTimestamp,
|
||||
}, nil)
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(550 * time.Millisecond)
|
||||
|
||||
}
|
||||
|
||||
messages := filter.Retrieve()
|
||||
require.Len(t, messages, 2)
|
||||
|
||||
}
|
||||
|
||||
func TestTelemetryFormat(t *testing.T) {
|
||||
logger, err := zap.NewDevelopment()
|
||||
require.NoError(t, err)
|
||||
|
||||
tc := NewBandwidthTelemetryClient(logger, "#")
|
||||
|
||||
s := metrics.Stats{
|
||||
TotalIn: 10,
|
||||
TotalOut: 20,
|
||||
RateIn: 30,
|
||||
RateOut: 40,
|
||||
}
|
||||
|
||||
m := make(map[libp2pprotocol.ID]metrics.Stats)
|
||||
m[relay.WakuRelayID_v200] = s
|
||||
m[filter.FilterPushID_v20beta1] = s
|
||||
m[filter.FilterSubscribeID_v20beta1] = s
|
||||
m[legacy_store.StoreID_v20beta4] = s
|
||||
m[lightpush.LightPushID_v20beta1] = s
|
||||
|
||||
requestBody := tc.getTelemetryRequestBody(m)
|
||||
_, err = json.Marshal(requestBody)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
*/
|
|
@ -0,0 +1,58 @@
|
|||
package wakuv2
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type NwakuInfo struct {
|
||||
ListenAddresses []string `json:"listenAddresses"`
|
||||
EnrUri string `json:"enrUri"`
|
||||
}
|
||||
|
||||
func GetNwakuInfo(host *string, port *int) (NwakuInfo, error) {
|
||||
nwakuRestPort := 8645
|
||||
if port != nil {
|
||||
nwakuRestPort = *port
|
||||
}
|
||||
envNwakuRestPort := os.Getenv("NWAKU_REST_PORT")
|
||||
if envNwakuRestPort != "" {
|
||||
v, err := strconv.Atoi(envNwakuRestPort)
|
||||
if err != nil {
|
||||
return NwakuInfo{}, err
|
||||
}
|
||||
nwakuRestPort = v
|
||||
}
|
||||
|
||||
nwakuRestHost := "localhost"
|
||||
if host != nil {
|
||||
nwakuRestHost = *host
|
||||
}
|
||||
envNwakuRestHost := os.Getenv("NWAKU_REST_HOST")
|
||||
if envNwakuRestHost != "" {
|
||||
nwakuRestHost = envNwakuRestHost
|
||||
}
|
||||
|
||||
resp, err := http.Get(fmt.Sprintf("http://%s:%d/debug/v1/info", nwakuRestHost, nwakuRestPort))
|
||||
if err != nil {
|
||||
return NwakuInfo{}, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return NwakuInfo{}, err
|
||||
}
|
||||
|
||||
var data NwakuInfo
|
||||
err = json.Unmarshal(body, &data)
|
||||
if err != nil {
|
||||
return NwakuInfo{}, err
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package shard
|
||||
package wakuv2
|
||||
|
||||
import (
|
||||
wakuproto "github.com/waku-org/go-waku/waku/v2/protocol"
|
|
@ -1,3 +1,6 @@
|
|||
//go:build !use_nwaku
|
||||
// +build !use_nwaku
|
||||
|
||||
package wakuv2
|
||||
|
||||
import (
|
||||
|
@ -548,8 +551,9 @@ func TestWakuV2Store(t *testing.T) {
|
|||
}()
|
||||
|
||||
// Connect the two nodes directly
|
||||
peer2Addr := w2.node.ListenAddresses()[0].String()
|
||||
err = w1.node.DialPeer(context.Background(), peer2Addr)
|
||||
peer2Addr, err := w2.ListenAddresses()
|
||||
require.NoError(t, err)
|
||||
err = w1.node.DialPeer(context.Background(), peer2Addr[0].String())
|
||||
require.NoError(t, err)
|
||||
|
||||
waitForPeerConnection(t, w2.node.Host().ID(), w1PeersCh)
|
||||
|
@ -723,7 +727,9 @@ func TestLightpushRateLimit(t *testing.T) {
|
|||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
//Connect the relay peer and full node
|
||||
err = w1.node.DialPeer(ctx, w0.node.ListenAddresses()[0].String())
|
||||
peerAddr, err := w0.ListenAddresses()
|
||||
require.NoError(t, err)
|
||||
err = w1.node.DialPeer(ctx, peerAddr[0].String())
|
||||
require.NoError(t, err)
|
||||
|
||||
err = tt.RetryWithBackOff(func() error {
|
||||
|
@ -750,7 +756,9 @@ func TestLightpushRateLimit(t *testing.T) {
|
|||
}()
|
||||
|
||||
//Use this instead of DialPeer to make sure the peer is added to PeerStore and can be selected for Lighpush
|
||||
w2.node.AddDiscoveredPeer(w1.PeerID(), w1.node.ListenAddresses(), wps.Static, w1.cfg.DefaultShardedPubsubTopics, w1.node.ENR(), true)
|
||||
addresses, err := w1.ListenAddresses()
|
||||
require.NoError(t, err)
|
||||
w2.node.AddDiscoveredPeer(w1.PeerID(), addresses, wps.Static, w1.cfg.DefaultShardedPubsubTopics, w1.node.ENR(), true)
|
||||
|
||||
waitForPeerConnectionWithTimeout(t, w2.node.Host().ID(), w1PeersCh, 5*time.Second)
|
||||
|
||||
|
|
Loading…
Reference in New Issue