diff --git a/eth-node/bridge/geth/node.go b/eth-node/bridge/geth/node.go
index 91088080a..e123ec654 100644
--- a/eth-node/bridge/geth/node.go
+++ b/eth-node/bridge/geth/node.go
@@ -19,10 +19,10 @@ import (
type gethNodeWrapper struct {
stack *node.Node
waku1 *waku.Waku
- waku2 *wakuv2.Waku
+ waku2 *wakuv2.NWaku
}
-func NewNodeBridge(stack *node.Node, waku1 *waku.Waku, waku2 *wakuv2.Waku) types.Node {
+func NewNodeBridge(stack *node.Node, waku1 *waku.Waku, waku2 *wakuv2.NWaku) types.Node {
return &gethNodeWrapper{stack: stack, waku1: waku1, waku2: waku2}
}
@@ -38,7 +38,7 @@ func (w *gethNodeWrapper) SetWaku1(waku *waku.Waku) {
w.waku1 = waku
}
-func (w *gethNodeWrapper) SetWaku2(waku *wakuv2.Waku) {
+func (w *gethNodeWrapper) SetWaku2(waku *wakuv2.NWaku) {
w.waku2 = waku
}
diff --git a/eth-node/bridge/geth/wakuv2.go b/eth-node/bridge/geth/wakuv2.go
index 5b39e3744..c13279787 100644
--- a/eth-node/bridge/geth/wakuv2.go
+++ b/eth-node/bridge/geth/wakuv2.go
@@ -22,11 +22,11 @@ import (
)
type gethWakuV2Wrapper struct {
- waku *wakuv2.Waku
+ waku *wakuv2.NWaku
}
// NewGethWakuWrapper returns an object that wraps Geth's Waku in a types interface
-func NewGethWakuV2Wrapper(w *wakuv2.Waku) types.Waku {
+func NewGethWakuV2Wrapper(w *wakuv2.NWaku) types.Waku {
if w == nil {
panic("waku cannot be nil")
}
@@ -37,7 +37,7 @@ func NewGethWakuV2Wrapper(w *wakuv2.Waku) types.Waku {
}
// GetGethWhisperFrom retrieves the underlying whisper Whisper struct from a wrapped Whisper interface
-func GetGethWakuV2From(m types.Waku) *wakuv2.Waku {
+func GetGethWakuV2From(m types.Waku) *wakuv2.NWaku {
return m.(*gethWakuV2Wrapper).waku
}
@@ -275,7 +275,7 @@ func (w *gethWakuV2Wrapper) DialPeerByID(peerID peer.ID) error {
}
func (w *gethWakuV2Wrapper) ListenAddresses() ([]multiaddr.Multiaddr, error) {
- return w.waku.ListenAddresses(), nil
+ return w.waku.ListenAddresses()
}
func (w *gethWakuV2Wrapper) RelayPeersByTopic(topic string) (*types.PeerList, error) {
diff --git a/node/get_status_node.go b/node/get_status_node.go
index 8bbe25a86..ce6bcba9f 100644
--- a/node/get_status_node.go
+++ b/node/get_status_node.go
@@ -119,18 +119,19 @@ type StatusNode struct {
localNotificationsSrvc *localnotifications.Service
personalSrvc *personal.Service
timeSourceSrvc *timesource.NTPTimeSource
- wakuSrvc *waku.Waku
- wakuExtSrvc *wakuext.Service
- wakuV2Srvc *wakuv2.Waku
- wakuV2ExtSrvc *wakuv2ext.Service
- ensSrvc *ens.Service
- communityTokensSrvc *communitytokens.Service
- gifSrvc *gif.Service
- stickersSrvc *stickers.Service
- chatSrvc *chat.Service
- updatesSrvc *updates.Service
- pendingTracker *transactions.PendingTxTracker
- connectorSrvc *connector.Service
+ // nwakuSrvc *
+ wakuSrvc *waku.Waku
+ wakuExtSrvc *wakuext.Service
+ wakuV2Srvc *wakuv2.NWaku
+ wakuV2ExtSrvc *wakuv2ext.Service
+ ensSrvc *ens.Service
+ communityTokensSrvc *communitytokens.Service
+ gifSrvc *gif.Service
+ stickersSrvc *stickers.Service
+ chatSrvc *chat.Service
+ updatesSrvc *updates.Service
+ pendingTracker *transactions.PendingTxTracker
+ connectorSrvc *connector.Service
walletFeed event.Feed
}
diff --git a/node/status_node_services.go b/node/status_node_services.go
index 41aa52420..7a424465b 100644
--- a/node/status_node_services.go
+++ b/node/status_node_services.go
@@ -10,7 +10,6 @@ import (
"reflect"
"time"
- "github.com/status-im/status-go/protocol/common/shard"
"github.com/status-im/status-go/server"
"github.com/status-im/status-go/signal"
"github.com/status-im/status-go/transactions"
@@ -264,7 +263,7 @@ func (b *StatusNode) WakuExtService() *wakuext.Service {
func (b *StatusNode) WakuV2ExtService() *wakuv2ext.Service {
return b.wakuV2ExtSrvc
}
-func (b *StatusNode) WakuV2Service() *wakuv2.Waku {
+func (b *StatusNode) WakuV2Service() *wakuv2.NWaku {
return b.wakuV2Srvc
}
@@ -316,7 +315,7 @@ func (b *StatusNode) wakuService(wakuCfg *params.WakuConfig, clusterCfg *params.
}
-func (b *StatusNode) wakuV2Service(nodeConfig *params.NodeConfig) (*wakuv2.Waku, error) {
+func (b *StatusNode) wakuV2Service(nodeConfig *params.NodeConfig) (*wakuv2.NWaku, error) {
if b.wakuV2Srvc == nil {
cfg := &wakuv2.Config{
MaxMessageSize: wakucommon.DefaultMaxMessageSize,
@@ -333,7 +332,7 @@ func (b *StatusNode) wakuV2Service(nodeConfig *params.NodeConfig) (*wakuv2.Waku,
Nameserver: nodeConfig.WakuV2Config.Nameserver,
UDPPort: nodeConfig.WakuV2Config.UDPPort,
AutoUpdate: nodeConfig.WakuV2Config.AutoUpdate,
- DefaultShardPubsubTopic: shard.DefaultShardPubsubTopic(),
+ DefaultShardPubsubTopic: wakuv2.DefaultShardPubsubTopic(),
TelemetryServerURL: nodeConfig.WakuV2Config.TelemetryServerURL,
ClusterID: nodeConfig.ClusterConfig.ClusterID,
EnableMissingMessageVerification: nodeConfig.WakuV2Config.EnableMissingMessageVerification,
diff --git a/protocol/common/shard/shard.go b/protocol/common/shard/shard.go
deleted file mode 100644
index 011a6f452..000000000
--- a/protocol/common/shard/shard.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package shard
-
-import (
- wakuproto "github.com/waku-org/go-waku/waku/v2/protocol"
-
- "github.com/status-im/status-go/protocol/protobuf"
-)
-
-type Shard struct {
- Cluster uint16 `json:"cluster"`
- Index uint16 `json:"index"`
-}
-
-func FromProtobuff(p *protobuf.Shard) *Shard {
- if p == nil {
- return nil
- }
-
- return &Shard{
- Cluster: uint16(p.Cluster),
- Index: uint16(p.Index),
- }
-}
-
-func (s *Shard) Protobuffer() *protobuf.Shard {
- if s == nil {
- return nil
- }
-
- return &protobuf.Shard{
- Cluster: int32(s.Cluster),
- Index: int32(s.Index),
- }
-}
-func (s *Shard) PubsubTopic() string {
- if s != nil {
- return wakuproto.NewStaticShardingPubsubTopic(s.Cluster, s.Index).String()
- }
- return ""
-}
-
-const MainStatusShardCluster = 16
-const DefaultShardIndex = 32
-const NonProtectedShardIndex = 64
-
-func DefaultShardPubsubTopic() string {
- return wakuproto.NewStaticShardingPubsubTopic(MainStatusShardCluster, DefaultShardIndex).String()
-}
-
-func DefaultNonProtectedShard() *Shard {
- return &Shard{
- Cluster: MainStatusShardCluster,
- Index: NonProtectedShardIndex,
- }
-}
-
-func DefaultNonProtectedPubsubTopic() string {
- return DefaultNonProtectedShard().PubsubTopic()
-}
diff --git a/protocol/communities/community.go b/protocol/communities/community.go
index f0f5dd2e5..a7e3ae4d5 100644
--- a/protocol/communities/community.go
+++ b/protocol/communities/community.go
@@ -23,12 +23,12 @@ import (
"github.com/status-im/status-go/eth-node/types"
"github.com/status-im/status-go/images"
"github.com/status-im/status-go/protocol/common"
- "github.com/status-im/status-go/protocol/common/shard"
community_token "github.com/status-im/status-go/protocol/communities/token"
"github.com/status-im/status-go/protocol/protobuf"
"github.com/status-im/status-go/protocol/requests"
"github.com/status-im/status-go/protocol/v1"
"github.com/status-im/status-go/server"
+ "github.com/status-im/status-go/wakuv2"
)
const signatureLength = 65
@@ -55,7 +55,7 @@ type Config struct {
RequestsToJoin []*RequestToJoin
MemberIdentity *ecdsa.PrivateKey
EventsData *EventsData
- Shard *shard.Shard
+ Shard *wakuv2.Shard
PubsubTopicPrivateKey *ecdsa.PrivateKey
LastOpenedAt int64
}
@@ -172,7 +172,7 @@ func (o *Community) MarshalPublicAPIJSON() ([]byte, error) {
ActiveMembersCount uint64 `json:"activeMembersCount"`
PubsubTopic string `json:"pubsubTopic"`
PubsubTopicKey string `json:"pubsubTopicKey"`
- Shard *shard.Shard `json:"shard"`
+ Shard *wakuv2.Shard `json:"shard"`
}{
ID: o.ID(),
Verified: o.config.Verified,
@@ -308,7 +308,7 @@ func (o *Community) MarshalJSON() ([]byte, error) {
ActiveMembersCount uint64 `json:"activeMembersCount"`
PubsubTopic string `json:"pubsubTopic"`
PubsubTopicKey string `json:"pubsubTopicKey"`
- Shard *shard.Shard `json:"shard"`
+ Shard *wakuv2.Shard `json:"shard"`
LastOpenedAt int64 `json:"lastOpenedAt"`
Clock uint64 `json:"clock"`
}{
@@ -461,7 +461,7 @@ func (o *Community) DescriptionText() string {
return ""
}
-func (o *Community) Shard() *shard.Shard {
+func (o *Community) Shard() *wakuv2.Shard {
if o != nil && o.config != nil {
return o.config.Shard
}
diff --git a/protocol/communities/manager.go b/protocol/communities/manager.go
index 86f2bc6fb..03a722150 100644
--- a/protocol/communities/manager.go
+++ b/protocol/communities/manager.go
@@ -30,7 +30,6 @@ import (
multiaccountscommon "github.com/status-im/status-go/multiaccounts/common"
"github.com/status-im/status-go/params"
"github.com/status-im/status-go/protocol/common"
- "github.com/status-im/status-go/protocol/common/shard"
community_token "github.com/status-im/status-go/protocol/communities/token"
"github.com/status-im/status-go/protocol/encryption"
"github.com/status-im/status-go/protocol/ens"
@@ -45,6 +44,7 @@ import (
"github.com/status-im/status-go/services/wallet/token"
"github.com/status-im/status-go/signal"
"github.com/status-im/status-go/transactions"
+ "github.com/status-im/status-go/wakuv2"
)
type Publisher interface {
@@ -737,8 +737,8 @@ func (m *Manager) All() ([]*Community, error) {
}
type CommunityShard struct {
- CommunityID string `json:"communityID"`
- Shard *shard.Shard `json:"shard"`
+ CommunityID string `json:"communityID"`
+ Shard *wakuv2.Shard `json:"shard"`
}
type CuratedCommunities struct {
@@ -1546,7 +1546,7 @@ func (m *Manager) DeleteCommunity(id types.HexBytes) error {
return m.persistence.DeleteCommunitySettings(id)
}
-func (m *Manager) updateShard(community *Community, shard *shard.Shard, clock uint64) error {
+func (m *Manager) updateShard(community *Community, shard *wakuv2.Shard, clock uint64) error {
community.config.Shard = shard
if shard == nil {
return m.persistence.DeleteCommunityShard(community.ID())
@@ -1555,7 +1555,7 @@ func (m *Manager) updateShard(community *Community, shard *shard.Shard, clock ui
return m.persistence.SaveCommunityShard(community.ID(), shard, clock)
}
-func (m *Manager) UpdateShard(community *Community, shard *shard.Shard, clock uint64) error {
+func (m *Manager) UpdateShard(community *Community, shard *wakuv2.Shard, clock uint64) error {
m.communityLock.Lock(community.ID())
defer m.communityLock.Unlock(community.ID())
@@ -1563,7 +1563,7 @@ func (m *Manager) UpdateShard(community *Community, shard *shard.Shard, clock ui
}
// SetShard assigns a shard to a community
-func (m *Manager) SetShard(communityID types.HexBytes, shard *shard.Shard) (*Community, error) {
+func (m *Manager) SetShard(communityID types.HexBytes, shard *wakuv2.Shard) (*Community, error) {
m.communityLock.Lock(communityID)
defer m.communityLock.Unlock(communityID)
@@ -2155,11 +2155,11 @@ func (m *Manager) HandleCommunityDescriptionMessage(signer *ecdsa.PublicKey, des
if err != nil {
return nil, err
}
- var cShard *shard.Shard
+ var cShard *wakuv2.Shard
if communityShard == nil {
- cShard = &shard.Shard{Cluster: shard.MainStatusShardCluster, Index: shard.DefaultShardIndex}
+ cShard = &wakuv2.Shard{Cluster: wakuv2.MainStatusShardCluster, Index: wakuv2.DefaultShardIndex}
} else {
- cShard = shard.FromProtobuff(communityShard)
+ cShard = wakuv2.FromProtobuff(communityShard)
}
config := Config{
CommunityDescription: processedDescription,
@@ -3972,11 +3972,11 @@ func (m *Manager) GetByIDString(idString string) (*Community, error) {
return m.GetByID(id)
}
-func (m *Manager) GetCommunityShard(communityID types.HexBytes) (*shard.Shard, error) {
+func (m *Manager) GetCommunityShard(communityID types.HexBytes) (*wakuv2.Shard, error) {
return m.persistence.GetCommunityShard(communityID)
}
-func (m *Manager) SaveCommunityShard(communityID types.HexBytes, shard *shard.Shard, clock uint64) error {
+func (m *Manager) SaveCommunityShard(communityID types.HexBytes, shard *wakuv2.Shard, clock uint64) error {
m.communityLock.Lock(communityID)
defer m.communityLock.Unlock(communityID)
diff --git a/protocol/communities/persistence.go b/protocol/communities/persistence.go
index 84e20645e..a417b81a2 100644
--- a/protocol/communities/persistence.go
+++ b/protocol/communities/persistence.go
@@ -16,11 +16,11 @@ import (
"github.com/status-im/status-go/eth-node/crypto"
"github.com/status-im/status-go/eth-node/types"
"github.com/status-im/status-go/protocol/common"
- "github.com/status-im/status-go/protocol/common/shard"
"github.com/status-im/status-go/protocol/communities/token"
"github.com/status-im/status-go/protocol/encryption"
"github.com/status-im/status-go/protocol/protobuf"
"github.com/status-im/status-go/services/wallet/bigint"
+ "github.com/status-im/status-go/wakuv2"
)
type Persistence struct {
@@ -1766,7 +1766,7 @@ func (p *Persistence) AllNonApprovedCommunitiesRequestsToJoin() ([]*RequestToJoi
return nonApprovedRequestsToJoin, nil
}
-func (p *Persistence) SaveCommunityShard(communityID types.HexBytes, shard *shard.Shard, clock uint64) error {
+func (p *Persistence) SaveCommunityShard(communityID types.HexBytes, shard *wakuv2.Shard, clock uint64) error {
var cluster, index *uint16
if shard != nil {
@@ -1801,7 +1801,7 @@ func (p *Persistence) SaveCommunityShard(communityID types.HexBytes, shard *shar
}
// if data will not be found, will return sql.ErrNoRows. Must be handled on the caller side
-func (p *Persistence) GetCommunityShard(communityID types.HexBytes) (*shard.Shard, error) {
+func (p *Persistence) GetCommunityShard(communityID types.HexBytes) (*wakuv2.Shard, error) {
var cluster sql.NullInt64
var index sql.NullInt64
err := p.db.QueryRow(`SELECT shard_cluster, shard_index FROM communities_shards WHERE community_id = ?`,
@@ -1815,7 +1815,7 @@ func (p *Persistence) GetCommunityShard(communityID types.HexBytes) (*shard.Shar
return nil, nil
}
- return &shard.Shard{
+ return &wakuv2.Shard{
Cluster: uint16(cluster.Int64),
Index: uint16(index.Int64),
}, nil
diff --git a/protocol/communities/persistence_mapping.go b/protocol/communities/persistence_mapping.go
index a8964187e..8e743c571 100644
--- a/protocol/communities/persistence_mapping.go
+++ b/protocol/communities/persistence_mapping.go
@@ -7,8 +7,8 @@ import (
"github.com/status-im/status-go/eth-node/crypto"
"github.com/status-im/status-go/protocol/common"
- "github.com/status-im/status-go/protocol/common/shard"
"github.com/status-im/status-go/server"
+ "github.com/status-im/status-go/wakuv2"
)
func communityToRecord(community *Community) (*CommunityRecord, error) {
@@ -118,9 +118,9 @@ func recordBundleToCommunity(
}
}
- var s *shard.Shard = nil
+ var s *wakuv2.Shard = nil
if r.community.shardCluster != nil && r.community.shardIndex != nil {
- s = &shard.Shard{
+ s = &wakuv2.Shard{
Cluster: uint16(*r.community.shardCluster),
Index: uint16(*r.community.shardIndex),
}
diff --git a/protocol/linkpreview_unfurler_status.go b/protocol/linkpreview_unfurler_status.go
index d4e6e8e4d..6da0194ee 100644
--- a/protocol/linkpreview_unfurler_status.go
+++ b/protocol/linkpreview_unfurler_status.go
@@ -8,8 +8,8 @@ import (
"github.com/status-im/status-go/api/multiformat"
"github.com/status-im/status-go/images"
"github.com/status-im/status-go/protocol/common"
- "github.com/status-im/status-go/protocol/common/shard"
"github.com/status-im/status-go/protocol/communities"
+ "github.com/status-im/status-go/wakuv2"
)
type StatusUnfurler struct {
@@ -83,7 +83,7 @@ func (u *StatusUnfurler) buildContactData(publicKey string) (*common.StatusConta
return c, nil
}
-func (u *StatusUnfurler) buildCommunityData(communityID string, shard *shard.Shard) (*communities.Community, *common.StatusCommunityLinkPreview, error) {
+func (u *StatusUnfurler) buildCommunityData(communityID string, shard *wakuv2.Shard) (*communities.Community, *common.StatusCommunityLinkPreview, error) {
// This automatically checks the database
community, err := u.m.FetchCommunity(&FetchCommunityRequest{
CommunityKey: communityID,
@@ -108,7 +108,7 @@ func (u *StatusUnfurler) buildCommunityData(communityID string, shard *shard.Sha
return community, statusCommunityLinkPreviews, nil
}
-func (u *StatusUnfurler) buildChannelData(channelUUID string, communityID string, communityShard *shard.Shard) (*common.StatusCommunityChannelLinkPreview, error) {
+func (u *StatusUnfurler) buildChannelData(channelUUID string, communityID string, communityShard *wakuv2.Shard) (*common.StatusCommunityChannelLinkPreview, error) {
community, communityData, err := u.buildCommunityData(communityID, communityShard)
if err != nil {
return nil, fmt.Errorf("failed to build channel community data: %w", err)
diff --git a/protocol/messenger.go b/protocol/messenger.go
index a2987eb34..c1e09b6bb 100644
--- a/protocol/messenger.go
+++ b/protocol/messenger.go
@@ -38,13 +38,13 @@ import (
"github.com/status-im/status-go/eth-node/types"
"github.com/status-im/status-go/images"
multiaccountscommon "github.com/status-im/status-go/multiaccounts/common"
+ "github.com/status-im/status-go/wakuv2"
"github.com/status-im/status-go/multiaccounts"
"github.com/status-im/status-go/multiaccounts/accounts"
"github.com/status-im/status-go/multiaccounts/settings"
"github.com/status-im/status-go/protocol/anonmetrics"
"github.com/status-im/status-go/protocol/common"
- "github.com/status-im/status-go/protocol/common/shard"
"github.com/status-im/status-go/protocol/communities"
"github.com/status-im/status-go/protocol/encryption"
"github.com/status-im/status-go/protocol/encryption/multidevice"
@@ -1738,7 +1738,7 @@ func (m *Messenger) InitFilters() error {
logger := m.logger.With(zap.String("site", "Init"))
// Community requests will arrive in this pubsub topic
- err := m.SubscribeToPubsubTopic(shard.DefaultNonProtectedPubsubTopic(), nil)
+ err := m.SubscribeToPubsubTopic(wakuv2.DefaultNonProtectedPubsubTopic(), nil)
if err != nil {
return err
}
diff --git a/protocol/messenger_communities.go b/protocol/messenger_communities.go
index 06e0b616e..979ea3240 100644
--- a/protocol/messenger_communities.go
+++ b/protocol/messenger_communities.go
@@ -23,6 +23,7 @@ import (
"go.uber.org/zap"
utils "github.com/status-im/status-go/common"
+ "github.com/status-im/status-go/wakuv2"
"github.com/status-im/status-go/account"
multiaccountscommon "github.com/status-im/status-go/multiaccounts/common"
@@ -32,7 +33,6 @@ import (
"github.com/status-im/status-go/images"
"github.com/status-im/status-go/multiaccounts/accounts"
"github.com/status-im/status-go/protocol/common"
- "github.com/status-im/status-go/protocol/common/shard"
"github.com/status-im/status-go/protocol/communities"
"github.com/status-im/status-go/protocol/communities/token"
"github.com/status-im/status-go/protocol/discord"
@@ -86,10 +86,10 @@ const (
type FetchCommunityRequest struct {
// CommunityKey should be either a public or a private community key
- CommunityKey string `json:"communityKey"`
- Shard *shard.Shard `json:"shard"`
- TryDatabase bool `json:"tryDatabase"`
- WaitForResponse bool `json:"waitForResponse"`
+ CommunityKey string `json:"communityKey"`
+ Shard *wakuv2.Shard `json:"shard"`
+ TryDatabase bool `json:"tryDatabase"`
+ WaitForResponse bool `json:"waitForResponse"`
}
func (r *FetchCommunityRequest) Validate() error {
@@ -342,7 +342,7 @@ func (m *Messenger) handleCommunitiesSubscription(c chan *communities.Subscripti
Sender: community.PrivateKey(),
SkipEncryptionLayer: true,
MessageType: protobuf.ApplicationMetadataMessage_COMMUNITY_USER_KICKED,
- PubsubTopic: shard.DefaultNonProtectedPubsubTopic(),
+ PubsubTopic: wakuv2.DefaultNonProtectedPubsubTopic(),
}
_, err = m.sender.SendPrivate(context.Background(), pk, rawMessage)
@@ -675,7 +675,7 @@ func (m *Messenger) handleCommunitySharedAddressesRequest(state *ReceivedMessage
CommunityID: community.ID(),
SkipEncryptionLayer: true,
MessageType: protobuf.ApplicationMetadataMessage_COMMUNITY_SHARED_ADDRESSES_RESPONSE,
- PubsubTopic: shard.DefaultNonProtectedPubsubTopic(),
+ PubsubTopic: wakuv2.DefaultNonProtectedPubsubTopic(),
ResendType: common.ResendTypeRawMessage,
ResendMethod: common.ResendMethodSendPrivate,
Recipients: []*ecdsa.PublicKey{signer},
@@ -1041,7 +1041,7 @@ func (m *Messenger) JoinCommunity(ctx context.Context, communityID types.HexByte
return mr, nil
}
-func (m *Messenger) subscribeToCommunityShard(communityID []byte, shard *shard.Shard) error {
+func (m *Messenger) subscribeToCommunityShard(communityID []byte, shard *wakuv2.Shard) error {
if m.transport.WakuVersion() != 2 {
return nil
}
@@ -1062,7 +1062,7 @@ func (m *Messenger) subscribeToCommunityShard(communityID []byte, shard *shard.S
return m.transport.SubscribeToPubsubTopic(pubsubTopic, pubK)
}
-func (m *Messenger) unsubscribeFromShard(shard *shard.Shard) error {
+func (m *Messenger) unsubscribeFromShard(shard *wakuv2.Shard) error {
if m.transport.WakuVersion() != 2 {
return nil
}
@@ -1489,7 +1489,7 @@ func (m *Messenger) RequestToJoinCommunity(request *requests.RequestToJoinCommun
ResendType: common.ResendTypeRawMessage,
SkipEncryptionLayer: true,
MessageType: protobuf.ApplicationMetadataMessage_COMMUNITY_REQUEST_TO_JOIN,
- PubsubTopic: shard.DefaultNonProtectedPubsubTopic(),
+ PubsubTopic: wakuv2.DefaultNonProtectedPubsubTopic(),
Priority: &common.HighPriority,
}
@@ -1866,7 +1866,7 @@ func (m *Messenger) CancelRequestToJoinCommunity(ctx context.Context, request *r
CommunityID: community.ID(),
SkipEncryptionLayer: true,
MessageType: protobuf.ApplicationMetadataMessage_COMMUNITY_CANCEL_REQUEST_TO_JOIN,
- PubsubTopic: shard.DefaultNonProtectedPubsubTopic(),
+ PubsubTopic: wakuv2.DefaultNonProtectedPubsubTopic(),
ResendType: common.ResendTypeRawMessage,
Priority: &common.HighPriority,
}
@@ -2012,7 +2012,7 @@ func (m *Messenger) acceptRequestToJoinCommunity(requestToJoin *communities.Requ
CommunityID: community.ID(),
SkipEncryptionLayer: true,
MessageType: protobuf.ApplicationMetadataMessage_COMMUNITY_REQUEST_TO_JOIN_RESPONSE,
- PubsubTopic: shard.DefaultNonProtectedPubsubTopic(),
+ PubsubTopic: wakuv2.DefaultNonProtectedPubsubTopic(),
ResendType: common.ResendTypeRawMessage,
ResendMethod: common.ResendMethodSendPrivate,
Recipients: []*ecdsa.PublicKey{pk},
@@ -2475,7 +2475,7 @@ func (m *Messenger) DefaultFilters(o *communities.Community) []transport.Filters
{ChatID: updatesChannelID, PubsubTopic: communityPubsubTopic},
{ChatID: mlChannelID, PubsubTopic: communityPubsubTopic},
{ChatID: memberUpdateChannelID, PubsubTopic: communityPubsubTopic},
- {ChatID: uncompressedPubKey, PubsubTopic: shard.DefaultNonProtectedPubsubTopic()},
+ {ChatID: uncompressedPubKey, PubsubTopic: wakuv2.DefaultNonProtectedPubsubTopic()},
}
return filters
@@ -3534,7 +3534,7 @@ func (m *Messenger) HandleCommunityShardKey(state *ReceivedMessageState, message
}
func (m *Messenger) handleCommunityShardAndFiltersFromProto(community *communities.Community, message *protobuf.CommunityShardKey) error {
- err := m.communitiesManager.UpdateShard(community, shard.FromProtobuff(message.Shard), message.Clock)
+ err := m.communitiesManager.UpdateShard(community, wakuv2.FromProtobuff(message.Shard), message.Clock)
if err != nil {
return err
}
@@ -3556,7 +3556,7 @@ func (m *Messenger) handleCommunityShardAndFiltersFromProto(community *communiti
}
// Unsubscribing from existing shard
- if community.Shard() != nil && community.Shard() != shard.FromProtobuff(message.GetShard()) {
+ if community.Shard() != nil && community.Shard() != wakuv2.FromProtobuff(message.GetShard()) {
err := m.unsubscribeFromShard(community.Shard())
if err != nil {
return err
@@ -3570,7 +3570,7 @@ func (m *Messenger) handleCommunityShardAndFiltersFromProto(community *communiti
return err
}
// Update community filters in case of change of shard
- if community.Shard() != shard.FromProtobuff(message.GetShard()) {
+ if community.Shard() != wakuv2.FromProtobuff(message.GetShard()) {
err = m.UpdateCommunityFilters(community)
if err != nil {
return err
diff --git a/protocol/messenger_community_shard.go b/protocol/messenger_community_shard.go
index 8fdda061d..ca8f6abda 100644
--- a/protocol/messenger_community_shard.go
+++ b/protocol/messenger_community_shard.go
@@ -12,11 +12,11 @@ import (
"github.com/status-im/status-go/eth-node/crypto"
"github.com/status-im/status-go/eth-node/types"
"github.com/status-im/status-go/protocol/common"
- "github.com/status-im/status-go/protocol/common/shard"
"github.com/status-im/status-go/protocol/communities"
"github.com/status-im/status-go/protocol/protobuf"
"github.com/status-im/status-go/protocol/transport"
v1protocol "github.com/status-im/status-go/protocol/v1"
+ "github.com/status-im/status-go/wakuv2"
)
func (m *Messenger) sendPublicCommunityShardInfo(community *communities.Community) error {
@@ -57,7 +57,7 @@ func (m *Messenger) sendPublicCommunityShardInfo(community *communities.Communit
// we don't want to wrap in an encryption layer message
SkipEncryptionLayer: true,
MessageType: protobuf.ApplicationMetadataMessage_COMMUNITY_PUBLIC_SHARD_INFO,
- PubsubTopic: shard.DefaultNonProtectedPubsubTopic(), // it must be sent always to default shard pubsub topic
+ PubsubTopic: wakuv2.DefaultNonProtectedPubsubTopic(), // it must be sent always to default shard pubsub topic
Priority: &common.HighPriority,
}
@@ -89,7 +89,7 @@ func (m *Messenger) HandleCommunityPublicShardInfo(state *ReceivedMessageState,
return err
}
- err = m.communitiesManager.SaveCommunityShard(publicShardInfo.CommunityId, shard.FromProtobuff(publicShardInfo.Shard), publicShardInfo.Clock)
+ err = m.communitiesManager.SaveCommunityShard(publicShardInfo.CommunityId, wakuv2.FromProtobuff(publicShardInfo.Shard), publicShardInfo.Clock)
if err != nil && err != communities.ErrOldShardInfo {
logError(err)
return err
diff --git a/protocol/messenger_config.go b/protocol/messenger_config.go
index d98d9edea..b17f4c65b 100644
--- a/protocol/messenger_config.go
+++ b/protocol/messenger_config.go
@@ -114,7 +114,7 @@ type config struct {
telemetryServerURL string
telemetrySendPeriod time.Duration
- wakuService *wakuv2.Waku
+ wakuService *wakuv2.NWaku
messageResendMinDelay time.Duration
messageResendMaxCount int
@@ -387,7 +387,7 @@ func WithCommunityTokensService(s communities.CommunityTokensServiceInterface) O
}
}
-func WithWakuService(s *wakuv2.Waku) Option {
+func WithWakuService(s *wakuv2.NWaku) Option {
return func(c *config) error {
c.wakuService = s
return nil
diff --git a/protocol/messenger_share_urls.go b/protocol/messenger_share_urls.go
index 666a98217..5e6dc3ff8 100644
--- a/protocol/messenger_share_urls.go
+++ b/protocol/messenger_share_urls.go
@@ -11,12 +11,12 @@ import (
"github.com/status-im/status-go/eth-node/crypto"
"github.com/status-im/status-go/eth-node/types"
"github.com/status-im/status-go/protocol/common"
- "github.com/status-im/status-go/protocol/common/shard"
"github.com/status-im/status-go/protocol/communities"
"github.com/status-im/status-go/protocol/protobuf"
"github.com/status-im/status-go/protocol/requests"
"github.com/status-im/status-go/protocol/urls"
"github.com/status-im/status-go/services/utils"
+ "github.com/status-im/status-go/wakuv2"
)
type CommunityURLData struct {
@@ -46,7 +46,7 @@ type URLDataResponse struct {
Community *CommunityURLData `json:"community"`
Channel *CommunityChannelURLData `json:"channel"`
Contact *ContactURLData `json:"contact"`
- Shard *shard.Shard `json:"shard,omitempty"`
+ Shard *wakuv2.Shard `json:"shard,omitempty"`
}
const baseShareURL = "https://status.app"
@@ -201,7 +201,7 @@ func parseCommunityURLWithData(data string, chatKey string) (*URLDataResponse, e
TagIndices: tagIndices,
CommunityID: types.EncodeHex(communityID),
},
- Shard: shard.FromProtobuff(urlDataProto.Shard),
+ Shard: wakuv2.FromProtobuff(urlDataProto.Shard),
}, nil
}
@@ -377,7 +377,7 @@ func parseCommunityChannelURLWithData(data string, chatKey string) (*URLDataResp
Color: channelProto.Color,
ChannelUUID: channelProto.Uuid,
},
- Shard: shard.FromProtobuff(urlDataProto.Shard),
+ Shard: wakuv2.FromProtobuff(urlDataProto.Shard),
}, nil
}
diff --git a/protocol/messenger_store_node_request_manager.go b/protocol/messenger_store_node_request_manager.go
index e5a67e1af..f865e2ba5 100644
--- a/protocol/messenger_store_node_request_manager.go
+++ b/protocol/messenger_store_node_request_manager.go
@@ -8,7 +8,6 @@ import (
"time"
"github.com/status-im/status-go/eth-node/crypto"
- "github.com/status-im/status-go/protocol/common/shard"
"go.uber.org/zap"
@@ -16,6 +15,7 @@ import (
"github.com/status-im/status-go/protocol/communities"
"github.com/status-im/status-go/protocol/transport"
"github.com/status-im/status-go/services/mailservers"
+ "github.com/status-im/status-go/wakuv2"
)
const (
@@ -81,7 +81,7 @@ func (m *StoreNodeRequestManager) FetchCommunity(community communities.Community
zap.Any("community", community),
zap.Any("config", cfg))
- requestCommunity := func(communityID string, shard *shard.Shard) (*communities.Community, StoreNodeRequestStats, error) {
+ requestCommunity := func(communityID string, shard *wakuv2.Shard) (*communities.Community, StoreNodeRequestStats, error) {
channel, err := m.subscribeToRequest(storeNodeCommunityRequest, communityID, shard, cfg)
if err != nil {
return nil, StoreNodeRequestStats{}, fmt.Errorf("failed to create a request for community: %w", err)
@@ -99,7 +99,7 @@ func (m *StoreNodeRequestManager) FetchCommunity(community communities.Community
communityShard := community.Shard
if communityShard == nil {
id := transport.CommunityShardInfoTopic(community.CommunityID)
- fetchedShard, err := m.subscribeToRequest(storeNodeShardRequest, id, shard.DefaultNonProtectedShard(), cfg)
+ fetchedShard, err := m.subscribeToRequest(storeNodeShardRequest, id, wakuv2.DefaultNonProtectedShard(), cfg)
if err != nil {
return nil, StoreNodeRequestStats{}, fmt.Errorf("failed to create a shard info request: %w", err)
}
@@ -176,7 +176,7 @@ func (m *StoreNodeRequestManager) FetchContact(contactID string, opts []StoreNod
// subscribeToRequest checks if a request for given community/contact is already in progress, creates and installs
// a new one if not found, and returns a subscription to the result of the found/started request.
// The subscription can then be used to get the result of the request, this could be either a community/contact or an error.
-func (m *StoreNodeRequestManager) subscribeToRequest(requestType storeNodeRequestType, dataID string, shard *shard.Shard, cfg StoreNodeRequestConfig) (storeNodeResponseSubscription, error) {
+func (m *StoreNodeRequestManager) subscribeToRequest(requestType storeNodeRequestType, dataID string, shard *wakuv2.Shard, cfg StoreNodeRequestConfig) (storeNodeResponseSubscription, error) {
// It's important to unlock only after getting the subscription channel.
// We also lock `activeRequestsLock` during finalizing the requests. This ensures that the subscription
// created in this function will get the result even if the requests proceeds faster than this function ends.
@@ -230,7 +230,7 @@ func (m *StoreNodeRequestManager) newStoreNodeRequest() *storeNodeRequest {
// getFilter checks if a filter for a given community is already created and creates one of not found.
// Returns the found/created filter, a flag if the filter was created by the function and an error.
-func (m *StoreNodeRequestManager) getFilter(requestType storeNodeRequestType, dataID string, shard *shard.Shard) (*transport.Filter, bool, error) {
+func (m *StoreNodeRequestManager) getFilter(requestType storeNodeRequestType, dataID string, shard *wakuv2.Shard) (*transport.Filter, bool, error) {
// First check if such filter already exists.
filter := m.messenger.transport.FilterByChatID(dataID)
if filter != nil {
@@ -332,7 +332,7 @@ type storeNodeRequestResult struct {
// One of data fields (community or contact) will be present depending on request type
community *communities.Community
contact *Contact
- shard *shard.Shard
+ shard *wakuv2.Shard
}
type storeNodeResponseSubscription = chan storeNodeRequestResult
diff --git a/protocol/messenger_testing_utils.go b/protocol/messenger_testing_utils.go
index 906d04346..b869c8618 100644
--- a/protocol/messenger_testing_utils.go
+++ b/protocol/messenger_testing_utils.go
@@ -13,11 +13,11 @@ import (
"github.com/libp2p/go-libp2p/core/peer"
"github.com/status-im/status-go/protocol/wakusync"
+ "github.com/status-im/status-go/wakuv2"
"github.com/status-im/status-go/protocol/identity"
"github.com/status-im/status-go/eth-node/types"
- waku2 "github.com/status-im/status-go/wakuv2"
"github.com/stretchr/testify/suite"
@@ -205,7 +205,7 @@ func WaitOnSignaledCommunityFound(m *Messenger, action func(), condition func(co
}
}
-func WaitForConnectionStatus(s *suite.Suite, waku *waku2.Waku, action func() bool) {
+func WaitForConnectionStatus(s *suite.Suite, waku *wakuv2.NWaku, action func() bool) {
subscription := waku.SubscribeToConnStatusChanges()
defer subscription.Unsubscribe()
@@ -237,7 +237,7 @@ func hasAllPeers(m map[peer.ID]types.WakuV2Peer, checkSlice peer.IDSlice) bool {
return true
}
-func WaitForPeersConnected(s *suite.Suite, waku *waku2.Waku, action func() peer.IDSlice) {
+func WaitForPeersConnected(s *suite.Suite, waku *wakuv2.NWaku, action func() peer.IDSlice) {
subscription := waku.SubscribeToConnStatusChanges()
defer subscription.Unsubscribe()
diff --git a/protocol/requests/set_community_shard.go b/protocol/requests/set_community_shard.go
index 0b1240b56..6ae52d1bd 100644
--- a/protocol/requests/set_community_shard.go
+++ b/protocol/requests/set_community_shard.go
@@ -4,12 +4,12 @@ import (
"errors"
"github.com/status-im/status-go/eth-node/types"
- "github.com/status-im/status-go/protocol/common/shard"
+ "github.com/status-im/status-go/wakuv2"
)
type SetCommunityShard struct {
CommunityID types.HexBytes `json:"communityId"`
- Shard *shard.Shard `json:"shard,omitempty"`
+ Shard *wakuv2.Shard `json:"shard,omitempty"`
PrivateKey *types.HexBytes `json:"privateKey,omitempty"`
}
@@ -19,7 +19,7 @@ func (s *SetCommunityShard) Validate() error {
}
if s.Shard != nil {
// TODO: for now only MainStatusShard(16) is accepted
- if s.Shard.Cluster != shard.MainStatusShardCluster {
+ if s.Shard.Cluster != wakuv2.MainStatusShardCluster {
return errors.New("invalid shard cluster")
}
if s.Shard.Index > 1023 {
diff --git a/protocol/transport/filters_manager.go b/protocol/transport/filters_manager.go
index 5393d63bf..acf3002d7 100644
--- a/protocol/transport/filters_manager.go
+++ b/protocol/transport/filters_manager.go
@@ -11,7 +11,7 @@ import (
"go.uber.org/zap"
"github.com/status-im/status-go/eth-node/types"
- "github.com/status-im/status-go/protocol/common/shard"
+ "github.com/status-im/status-go/wakuv2"
)
const (
@@ -141,7 +141,7 @@ func (f *FiltersManager) InitPublicFilters(publicFiltersToInit []FiltersToInitia
}
type CommunityFilterToInitialize struct {
- Shard *shard.Shard
+ Shard *wakuv2.Shard
PrivKey *ecdsa.PrivateKey
}
@@ -158,7 +158,7 @@ func (f *FiltersManager) InitCommunityFilters(communityFiltersToInitialize []Com
}
topics := make([]string, 0)
- topics = append(topics, shard.DefaultNonProtectedPubsubTopic())
+ topics = append(topics, wakuv2.DefaultNonProtectedPubsubTopic())
topics = append(topics, communityFilter.Shard.PubsubTopic())
for _, pubsubTopic := range topics {
diff --git a/services/ext/api.go b/services/ext/api.go
index 3554281af..ef656d2ba 100644
--- a/services/ext/api.go
+++ b/services/ext/api.go
@@ -16,6 +16,7 @@ import (
"github.com/status-im/status-go/services/browsers"
"github.com/status-im/status-go/services/wallet"
"github.com/status-im/status-go/services/wallet/bigint"
+ "github.com/status-im/status-go/wakuv2"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/log"
@@ -32,7 +33,6 @@ import (
"github.com/status-im/status-go/multiaccounts/settings"
"github.com/status-im/status-go/protocol"
"github.com/status-im/status-go/protocol/common"
- "github.com/status-im/status-go/protocol/common/shard"
"github.com/status-im/status-go/protocol/communities"
"github.com/status-im/status-go/protocol/communities/token"
"github.com/status-im/status-go/protocol/discord"
@@ -1311,7 +1311,7 @@ func (api *PublicAPI) RequestCommunityInfoFromMailserver(communityID string) (*c
// Deprecated: RequestCommunityInfoFromMailserverWithShard is deprecated in favor of
// configurable FetchCommunity.
-func (api *PublicAPI) RequestCommunityInfoFromMailserverWithShard(communityID string, shard *shard.Shard) (*communities.Community, error) {
+func (api *PublicAPI) RequestCommunityInfoFromMailserverWithShard(communityID string, shard *wakuv2.Shard) (*communities.Community, error) {
request := &protocol.FetchCommunityRequest{
CommunityKey: communityID,
Shard: shard,
@@ -1336,7 +1336,7 @@ func (api *PublicAPI) RequestCommunityInfoFromMailserverAsync(communityID string
// Deprecated: RequestCommunityInfoFromMailserverAsyncWithShard is deprecated in favor of
// configurable FetchCommunity.
-func (api *PublicAPI) RequestCommunityInfoFromMailserverAsyncWithShard(communityID string, shard *shard.Shard) error {
+func (api *PublicAPI) RequestCommunityInfoFromMailserverAsyncWithShard(communityID string, shard *wakuv2.Shard) error {
request := &protocol.FetchCommunityRequest{
CommunityKey: communityID,
Shard: shard,
diff --git a/services/ext/service.go b/services/ext/service.go
index 95d1b49c7..c138d061c 100644
--- a/services/ext/service.go
+++ b/services/ext/service.go
@@ -123,7 +123,7 @@ func (s *Service) GetPeer(rawURL string) (*enode.Node, error) {
return enode.ParseV4(rawURL)
}
-func (s *Service) InitProtocol(nodeName string, identity *ecdsa.PrivateKey, appDb, walletDb *sql.DB, httpServer *server.MediaServer, multiAccountDb *multiaccounts.Database, acc *multiaccounts.Account, accountManager *account.GethManager, rpcClient *rpc.Client, walletService *wallet.Service, communityTokensService *communitytokens.Service, wakuService *wakuv2.Waku, logger *zap.Logger) error {
+func (s *Service) InitProtocol(nodeName string, identity *ecdsa.PrivateKey, appDb, walletDb *sql.DB, httpServer *server.MediaServer, multiAccountDb *multiaccounts.Database, acc *multiaccounts.Account, accountManager *account.GethManager, rpcClient *rpc.Client, walletService *wallet.Service, communityTokensService *communitytokens.Service, wakuService *wakuv2.NWaku, logger *zap.Logger) error {
var err error
if !s.config.ShhextConfig.PFSEnabled {
return nil
@@ -393,7 +393,7 @@ func buildMessengerOptions(
accountsDB *accounts.Database,
walletService *wallet.Service,
communityTokensService *communitytokens.Service,
- wakuService *wakuv2.Waku,
+ wakuService *wakuv2.NWaku,
logger *zap.Logger,
messengerSignalsHandler protocol.MessengerSignalsHandler,
accountManager account.Manager,
diff --git a/services/status/service.go b/services/status/service.go
index abfa1531e..d8fac8bd4 100644
--- a/services/status/service.go
+++ b/services/status/service.go
@@ -10,7 +10,7 @@ import (
"github.com/status-im/status-go/eth-node/types"
"github.com/status-im/status-go/protocol"
- "github.com/status-im/status-go/protocol/common/shard"
+ "github.com/status-im/status-go/wakuv2"
)
// Make sure that Service implements node.Lifecycle interface.
@@ -70,7 +70,7 @@ type PublicAPI struct {
service *Service
}
-func (p *PublicAPI) CommunityInfo(communityID types.HexBytes, shard *shard.Shard) (json.RawMessage, error) {
+func (p *PublicAPI) CommunityInfo(communityID types.HexBytes, shard *wakuv2.Shard) (json.RawMessage, error) {
if p.service.messenger == nil {
return nil, ErrNotInitialized
}
diff --git a/vendor/github.com/waku-org/go-waku/waku/v2/protocol/store/client.go b/vendor/github.com/waku-org/go-waku/waku/v2/protocol/store/client.go
index 5cda4eef2..9fa3c4318 100644
--- a/vendor/github.com/waku-org/go-waku/waku/v2/protocol/store/client.go
+++ b/vendor/github.com/waku-org/go-waku/waku/v2/protocol/store/client.go
@@ -113,40 +113,40 @@ func (s *WakuStore) Request(ctx context.Context, criteria Criteria, opts ...Requ
}
//Add Peer to peerstore.
- if s.pm != nil && params.peerAddr != nil {
- pData, err := s.pm.AddPeer(params.peerAddr, peerstore.Static, pubsubTopics, StoreQueryID_v300)
+ if s.pm != nil && params.PeerAddr != nil {
+ pData, err := s.pm.AddPeer(params.PeerAddr, peerstore.Static, pubsubTopics, StoreQueryID_v300)
if err != nil {
return nil, err
}
s.pm.Connect(pData)
- params.selectedPeer = pData.AddrInfo.ID
+ params.SelectedPeer = pData.AddrInfo.ID
}
- if s.pm != nil && params.selectedPeer == "" {
+ if s.pm != nil && params.SelectedPeer == "" {
if isFilterCriteria {
selectedPeers, err := s.pm.SelectPeers(
peermanager.PeerSelectionCriteria{
- SelectionType: params.peerSelectionType,
+ SelectionType: params.PeerSelectionType,
Proto: StoreQueryID_v300,
PubsubTopics: []string{filterCriteria.PubsubTopic},
- SpecificPeers: params.preferredPeers,
+ SpecificPeers: params.PreferredPeers,
Ctx: ctx,
},
)
if err != nil {
return nil, err
}
- params.selectedPeer = selectedPeers[0]
+ params.SelectedPeer = selectedPeers[0]
} else {
return nil, ErrMustSelectPeer
}
}
- if params.selectedPeer == "" {
+ if params.SelectedPeer == "" {
return nil, ErrNoPeersAvailable
}
- pageLimit := params.pageLimit
+ pageLimit := params.PageLimit
if pageLimit == 0 {
pageLimit = DefaultPageSize
} else if pageLimit > uint64(MaxPageSize) {
@@ -154,16 +154,16 @@ func (s *WakuStore) Request(ctx context.Context, criteria Criteria, opts ...Requ
}
storeRequest := &pb.StoreQueryRequest{
- RequestId: hex.EncodeToString(params.requestID),
- IncludeData: params.includeData,
- PaginationForward: params.forward,
+ RequestId: hex.EncodeToString(params.RequestID),
+ IncludeData: params.IncludeData,
+ PaginationForward: params.Forward,
PaginationLimit: proto.Uint64(pageLimit),
}
criteria.PopulateStoreRequest(storeRequest)
- if params.cursor != nil {
- storeRequest.PaginationCursor = params.cursor
+ if params.Cursor != nil {
+ storeRequest.PaginationCursor = params.Cursor
}
err := storeRequest.Validate()
@@ -171,7 +171,7 @@ func (s *WakuStore) Request(ctx context.Context, criteria Criteria, opts ...Requ
return nil, err
}
- response, err := s.queryFrom(ctx, storeRequest, params.selectedPeer)
+ response, err := s.queryFrom(ctx, storeRequest, params.SelectedPeer)
if err != nil {
return nil, err
}
@@ -181,7 +181,7 @@ func (s *WakuStore) Request(ctx context.Context, criteria Criteria, opts ...Requ
messages: response.Messages,
storeRequest: storeRequest,
storeResponse: response,
- peerID: params.selectedPeer,
+ peerID: params.SelectedPeer,
cursor: response.PaginationCursor,
}
diff --git a/vendor/github.com/waku-org/go-waku/waku/v2/protocol/store/options.go b/vendor/github.com/waku-org/go-waku/waku/v2/protocol/store/options.go
index b38afd53a..b0dd3de65 100644
--- a/vendor/github.com/waku-org/go-waku/waku/v2/protocol/store/options.go
+++ b/vendor/github.com/waku-org/go-waku/waku/v2/protocol/store/options.go
@@ -10,15 +10,15 @@ import (
)
type Parameters struct {
- selectedPeer peer.ID
- peerAddr multiaddr.Multiaddr
- peerSelectionType peermanager.PeerSelection
- preferredPeers peer.IDSlice
- requestID []byte
- cursor []byte
- pageLimit uint64
- forward bool
- includeData bool
+ SelectedPeer peer.ID
+ PeerAddr multiaddr.Multiaddr
+ PeerSelectionType peermanager.PeerSelection
+ PreferredPeers peer.IDSlice
+ RequestID []byte
+ Cursor []byte
+ PageLimit uint64
+ Forward bool
+ IncludeData bool
}
type RequestOption func(*Parameters) error
@@ -27,8 +27,8 @@ type RequestOption func(*Parameters) error
// Note that this option is mutually exclusive to WithPeerAddr, only one of them can be used.
func WithPeer(p peer.ID) RequestOption {
return func(params *Parameters) error {
- params.selectedPeer = p
- if params.peerAddr != nil {
+ params.SelectedPeer = p
+ if params.PeerAddr != nil {
return errors.New("WithPeer and WithPeerAddr options are mutually exclusive")
}
return nil
@@ -40,8 +40,8 @@ func WithPeer(p peer.ID) RequestOption {
// Note that this option is mutually exclusive to WithPeerAddr, only one of them can be used.
func WithPeerAddr(pAddr multiaddr.Multiaddr) RequestOption {
return func(params *Parameters) error {
- params.peerAddr = pAddr
- if params.selectedPeer != "" {
+ params.PeerAddr = pAddr
+ if params.SelectedPeer != "" {
return errors.New("WithPeerAddr and WithPeer options are mutually exclusive")
}
return nil
@@ -55,8 +55,8 @@ func WithPeerAddr(pAddr multiaddr.Multiaddr) RequestOption {
// Note: This option is avaiable only with peerManager
func WithAutomaticPeerSelection(fromThesePeers ...peer.ID) RequestOption {
return func(params *Parameters) error {
- params.peerSelectionType = peermanager.Automatic
- params.preferredPeers = fromThesePeers
+ params.PeerSelectionType = peermanager.Automatic
+ params.PreferredPeers = fromThesePeers
return nil
}
}
@@ -68,7 +68,7 @@ func WithAutomaticPeerSelection(fromThesePeers ...peer.ID) RequestOption {
// Note: This option is avaiable only with peerManager
func WithFastestPeerSelection(fromThesePeers ...peer.ID) RequestOption {
return func(params *Parameters) error {
- params.peerSelectionType = peermanager.LowestRTT
+ params.PeerSelectionType = peermanager.LowestRTT
return nil
}
}
@@ -77,7 +77,7 @@ func WithFastestPeerSelection(fromThesePeers ...peer.ID) RequestOption {
// creating a store request
func WithRequestID(requestID []byte) RequestOption {
return func(params *Parameters) error {
- params.requestID = requestID
+ params.RequestID = requestID
return nil
}
}
@@ -86,14 +86,14 @@ func WithRequestID(requestID []byte) RequestOption {
// when creating a store request
func WithAutomaticRequestID() RequestOption {
return func(params *Parameters) error {
- params.requestID = protocol.GenerateRequestID()
+ params.RequestID = protocol.GenerateRequestID()
return nil
}
}
func WithCursor(cursor []byte) RequestOption {
return func(params *Parameters) error {
- params.cursor = cursor
+ params.Cursor = cursor
return nil
}
}
@@ -101,8 +101,8 @@ func WithCursor(cursor []byte) RequestOption {
// WithPaging is an option used to specify the order and maximum number of records to return
func WithPaging(forward bool, limit uint64) RequestOption {
return func(params *Parameters) error {
- params.forward = forward
- params.pageLimit = limit
+ params.Forward = forward
+ params.PageLimit = limit
return nil
}
}
@@ -110,7 +110,7 @@ func WithPaging(forward bool, limit uint64) RequestOption {
// IncludeData is an option used to indicate whether you want to return the message content or not
func IncludeData(v bool) RequestOption {
return func(params *Parameters) error {
- params.includeData = v
+ params.IncludeData = v
return nil
}
}
diff --git a/wakuv2/api.go b/wakuv2/api.go
index f106b32f5..454a7fbab 100644
--- a/wakuv2/api.go
+++ b/wakuv2/api.go
@@ -1,17 +1,17 @@
-// Copyright 2019 The Waku Library Authors.
+// Copyright 2019 The NWaku Library Authors.
//
-// The Waku library is free software: you can redistribute it and/or modify
+// The NWaku library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
-// The Waku library is distributed in the hope that it will be useful,
+// The NWaku library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty off
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
-// along with the Waku library. If not, see .
+// along with the NWaku library. If not, see .
//
// This software uses the go-ethereum library, which is licensed
// under the GNU Lesser General Public Library, version 3 or any later.
@@ -52,14 +52,14 @@ var (
// PublicWakuAPI provides the waku RPC service that can be
// use publicly without security implications.
type PublicWakuAPI struct {
- w *Waku
+ w *NWaku
mu sync.Mutex
lastUsed map[string]time.Time // keeps track when a filter was polled for the last time.
}
// NewPublicWakuAPI create a new RPC waku service.
-func NewPublicWakuAPI(w *Waku) *PublicWakuAPI {
+func NewPublicWakuAPI(w *NWaku) *PublicWakuAPI {
api := &PublicWakuAPI{
w: w,
lastUsed: make(map[string]time.Time),
@@ -185,7 +185,7 @@ type NewMessage struct {
Priority *int `json:"priority"`
}
-// Post posts a message on the Waku network.
+// Post posts a message on the NWaku network.
// returns the hash of the message in case of success.
func (api *PublicWakuAPI) Post(ctx context.Context, req NewMessage) (hexutil.Bytes, error) {
var (
@@ -252,7 +252,7 @@ func (api *PublicWakuAPI) Post(ctx context.Context, req NewMessage) (hexutil.Byt
Version: &version,
ContentTopic: req.ContentTopic.ContentTopic(),
Timestamp: proto.Int64(api.w.timestamp()),
- Meta: []byte{}, // TODO: empty for now. Once we use Waku Archive v2, we should deprecate the timestamp and use an ULID here
+ Meta: []byte{}, // TODO: empty for now. Once we use NWaku Archive v2, we should deprecate the timestamp and use an ULID here
Ephemeral: &req.Ephemeral,
}
diff --git a/wakuv2/config.go b/wakuv2/config.go
index cf27a5e6d..12b964409 100644
--- a/wakuv2/config.go
+++ b/wakuv2/config.go
@@ -23,8 +23,6 @@ import (
"go.uber.org/zap"
- "github.com/status-im/status-go/protocol/common/shard"
-
ethdisc "github.com/ethereum/go-ethereum/p2p/dnsdisc"
"github.com/status-im/status-go/wakuv2/common"
@@ -117,10 +115,10 @@ func setDefaults(cfg *Config) *Config {
}
if cfg.DefaultShardPubsubTopic == "" {
- cfg.DefaultShardPubsubTopic = shard.DefaultShardPubsubTopic()
+ cfg.DefaultShardPubsubTopic = DefaultShardPubsubTopic()
//For now populating with both used shards, but this can be populated from user subscribed communities etc once community sharding is implemented
- cfg.DefaultShardedPubsubTopics = append(cfg.DefaultShardedPubsubTopics, shard.DefaultShardPubsubTopic())
- cfg.DefaultShardedPubsubTopics = append(cfg.DefaultShardedPubsubTopics, shard.DefaultNonProtectedPubsubTopic())
+ cfg.DefaultShardedPubsubTopics = append(cfg.DefaultShardedPubsubTopics, DefaultShardPubsubTopic())
+ cfg.DefaultShardedPubsubTopics = append(cfg.DefaultShardedPubsubTopics, DefaultNonProtectedPubsubTopic())
}
return cfg
diff --git a/wakuv2/message_publishing.go b/wakuv2/message_publishing.go
index 9fbf44fa8..5603275bf 100644
--- a/wakuv2/message_publishing.go
+++ b/wakuv2/message_publishing.go
@@ -1,13 +1,13 @@
package wakuv2
import (
+ "encoding/json"
"errors"
"go.uber.org/zap"
"github.com/waku-org/go-waku/waku/v2/api/publish"
"github.com/waku-org/go-waku/waku/v2/protocol"
- "github.com/waku-org/go-waku/waku/v2/protocol/lightpush"
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
@@ -35,7 +35,7 @@ func (pm PublishMethod) String() string {
// Send injects a message into the waku send queue, to be distributed in the
// network in the coming cycles.
-func (w *Waku) Send(pubsubTopic string, msg *pb.WakuMessage, priority *int) ([]byte, error) {
+func (w *NWaku) Send(pubsubTopic string, msg *pb.WakuMessage, priority *int) ([]byte, error) {
pubsubTopic = w.GetPubsubTopic(pubsubTopic)
if w.protectedTopicStore != nil {
privKey, err := w.protectedTopicStore.FetchPrivateKey(pubsubTopic)
@@ -77,7 +77,7 @@ func (w *Waku) Send(pubsubTopic string, msg *pb.WakuMessage, priority *int) ([]b
return envelope.Hash().Bytes(), nil
}
-func (w *Waku) broadcast() {
+func (w *NWaku) broadcast() {
for {
var envelope *protocol.Envelope
@@ -103,15 +103,30 @@ func (w *Waku) broadcast() {
publishMethod = LightPush
fn = func(env *protocol.Envelope, logger *zap.Logger) error {
logger.Info("publishing message via lightpush")
- _, err := w.node.Lightpush().Publish(w.ctx, env.Message(), lightpush.WithPubSubTopic(env.PubsubTopic()), lightpush.WithMaxPeers(peersToPublishForLightpush))
+ jsonMsg, err := json.Marshal(env.Message())
+ if err != nil {
+ return err
+ }
+ _, err = w.WakuLightpushPublish(string(jsonMsg), env.PubsubTopic())
return err
}
} else {
publishMethod = Relay
fn = func(env *protocol.Envelope, logger *zap.Logger) error {
- peerCnt := len(w.node.Relay().PubSub().ListPeers(env.PubsubTopic()))
+ peerCnt, err := w.ListPeersInMesh(env.PubsubTopic())
+ if err != nil {
+ return err
+ }
+
logger.Info("publishing message via relay", zap.Int("peerCnt", peerCnt))
- _, err := w.node.Relay().Publish(w.ctx, env.Message(), relay.WithPubSubTopic(env.PubsubTopic()))
+ timeoutMs := 1000
+ msg, err := json.Marshal(env.Message())
+
+ if err != nil {
+ return err
+ }
+
+ _, err = w.WakuRelayPublish(env.PubsubTopic(), string(msg), timeoutMs)
return err
}
}
@@ -138,7 +153,7 @@ func (w *Waku) broadcast() {
}
}
-func (w *Waku) publishEnvelope(envelope *protocol.Envelope, publishFn publish.PublishFn, logger *zap.Logger) {
+func (w *NWaku) publishEnvelope(envelope *protocol.Envelope, publishFn publish.PublishFn, logger *zap.Logger) {
defer w.wg.Done()
if err := publishFn(envelope, logger); err != nil {
diff --git a/wakuv2/nwaku.go b/wakuv2/nwaku.go
index ed9f3e80c..c536e51fe 100644
--- a/wakuv2/nwaku.go
+++ b/wakuv2/nwaku.go
@@ -1,58 +1,2426 @@
package wakuv2
+/*
+ #cgo LDFLAGS: -L../vendor/nwaku/build/ -lnegentropy -lwaku -Wl,--allow-multiple-definition
+ #cgo LDFLAGS: -Lvendor/nwaku/build/ -Wl,-rpath,vendor/nwaku/build/
+
+ #include "../vendor/nwaku/library/libwaku.h"
+ #include
+ #include
+
+ extern void globalEventCallback(int ret, char* msg, size_t len, void* userData);
+
+ typedef struct {
+ int ret;
+ char* msg;
+ size_t len;
+ } Resp;
+
+ void* allocResp() {
+ return calloc(1, sizeof(Resp));
+ }
+
+ void freeResp(void* resp) {
+ if (resp != NULL) {
+ free(resp);
+ }
+ }
+
+ char* getMyCharPtr(void* resp) {
+ if (resp == NULL) {
+ return NULL;
+ }
+ Resp* m = (Resp*) resp;
+ return m->msg;
+ }
+
+ size_t getMyCharLen(void* resp) {
+ if (resp == NULL) {
+ return 0;
+ }
+ Resp* m = (Resp*) resp;
+ return m->len;
+ }
+
+ int getRet(void* resp) {
+ if (resp == NULL) {
+ return 0;
+ }
+ Resp* m = (Resp*) resp;
+ return m->ret;
+ }
+
+ // resp must be set != NULL in case interest on retrieving data from the callback
+ void callback(int ret, char* msg, size_t len, void* resp) {
+ if (resp != NULL) {
+ Resp* m = (Resp*) resp;
+ m->ret = ret;
+ m->msg = msg;
+ m->len = len;
+ }
+ }
+
+ #define WAKU_CALL(call) \
+ do { \
+ int ret = call; \
+ if (ret != 0) { \
+ printf("Failed the call to: %s. Returned code: %d\n", #call, ret); \
+ exit(1); \
+ } \
+ } while (0)
+
+ void* cGoWakuNew(const char* configJson, void* resp) {
+ // We pass NULL because we are not interested in retrieving data from this callback
+ void* ret = waku_new(configJson, (WakuCallBack) callback, resp);
+ return ret;
+ }
+
+ void cGoWakuStart(void* wakuCtx, void* resp) {
+ WAKU_CALL(waku_start(wakuCtx, (WakuCallBack) callback, resp));
+ }
+
+ void cGoWakuStop(void* wakuCtx, void* resp) {
+ WAKU_CALL(waku_stop(wakuCtx, (WakuCallBack) callback, resp));
+ }
+
+ void cGoWakuDestroy(void* wakuCtx, void* resp) {
+ WAKU_CALL(waku_destroy(wakuCtx, (WakuCallBack) callback, resp));
+ }
+
+ void cGoWakuStartDiscV5(void* wakuCtx, void* resp) {
+ WAKU_CALL(waku_start_discv5(wakuCtx, (WakuCallBack) callback, resp));
+ }
+
+ void cGoWakuStopDiscV5(void* wakuCtx, void* resp) {
+ WAKU_CALL(waku_stop_discv5(wakuCtx, (WakuCallBack) callback, resp));
+ }
+
+ void cGoWakuVersion(void* wakuCtx, void* resp) {
+ WAKU_CALL(waku_version(wakuCtx, (WakuCallBack) callback, resp));
+ }
+
+ void cGoWakuSetEventCallback(void* wakuCtx) {
+ // The 'globalEventCallback' Go function is shared amongst all possible NWaku instances.
+
+ // Given that the 'globalEventCallback' is shared, we pass again the
+ // wakuCtx instance but in this case is needed to pick up the correct method
+ // that will handle the event.
+
+ // In other words, for every call the libwaku makes to globalEventCallback,
+ // the 'userData' parameter will bring the context of the node that registered
+ // that globalEventCallback.
+
+ // This technique is needed because cgo only allows to export Go functions and not methods.
+
+ waku_set_event_callback(wakuCtx, (WakuCallBack) globalEventCallback, wakuCtx);
+ }
+
+ void cGoWakuContentTopic(void* wakuCtx,
+ char* appName,
+ int appVersion,
+ char* contentTopicName,
+ char* encoding,
+ void* resp) {
+
+ WAKU_CALL( waku_content_topic(wakuCtx,
+ appName,
+ appVersion,
+ contentTopicName,
+ encoding,
+ (WakuCallBack) callback,
+ resp) );
+ }
+
+ void cGoWakuPubsubTopic(void* wakuCtx, char* topicName, void* resp) {
+ WAKU_CALL( waku_pubsub_topic(wakuCtx, topicName, (WakuCallBack) callback, resp) );
+ }
+
+ void cGoWakuDefaultPubsubTopic(void* wakuCtx, void* resp) {
+ WAKU_CALL (waku_default_pubsub_topic(wakuCtx, (WakuCallBack) callback, resp));
+ }
+
+ void cGoWakuRelayPublish(void* wakuCtx,
+ const char* pubSubTopic,
+ const char* jsonWakuMessage,
+ int timeoutMs,
+ void* resp) {
+
+ WAKU_CALL (waku_relay_publish(wakuCtx,
+ pubSubTopic,
+ jsonWakuMessage,
+ timeoutMs,
+ (WakuCallBack) callback,
+ resp));
+ }
+
+ void cGoWakuRelaySubscribe(void* wakuCtx, char* pubSubTopic, void* resp) {
+ WAKU_CALL ( waku_relay_subscribe(wakuCtx,
+ pubSubTopic,
+ (WakuCallBack) callback,
+ resp) );
+ }
+
+ void cGoWakuRelayUnsubscribe(void* wakuCtx, char* pubSubTopic, void* resp) {
+
+ WAKU_CALL ( waku_relay_unsubscribe(wakuCtx,
+ pubSubTopic,
+ (WakuCallBack) callback,
+ resp) );
+ }
+
+ void cGoWakuConnect(void* wakuCtx, char* peerMultiAddr, int timeoutMs, void* resp) {
+ WAKU_CALL( waku_connect(wakuCtx,
+ peerMultiAddr,
+ timeoutMs,
+ (WakuCallBack) callback,
+ resp) );
+ }
+
+ void cGoWakuListenAddresses(void* wakuCtx, void* resp) {
+ WAKU_CALL (waku_listen_addresses(wakuCtx, (WakuCallBack) callback, resp) );
+ }
+
+ void cGoWakuGetMyENR(void* ctx, void* resp) {
+ WAKU_CALL (waku_get_my_enr(ctx, (WakuCallBack) callback, resp) );
+ }
+
+ void cGoWakuListPeersInMesh(void* ctx, char* pubSubTopic, void* resp) {
+ WAKU_CALL (waku_relay_get_num_peers_in_mesh(ctx, pubSubTopic, (WakuCallBack) callback, resp) );
+ }
+
+ void cGoWakuLightpushPublish(void* wakuCtx,
+ const char* pubSubTopic,
+ const char* jsonWakuMessage,
+ void* resp) {
+
+ WAKU_CALL (waku_lightpush_publish(wakuCtx,
+ pubSubTopic,
+ jsonWakuMessage,
+ (WakuCallBack) callback,
+ resp));
+ }
+
+ void cGoWakuStoreQuery(void* wakuCtx,
+ const char* jsonQuery,
+ const char* peerAddr,
+ int timeoutMs,
+ void* resp) {
+
+ WAKU_CALL (waku_store_query(wakuCtx,
+ jsonQuery,
+ peerAddr,
+ timeoutMs,
+ (WakuCallBack) callback,
+ resp));
+ }
+
+*/
+import "C"
+
import (
+ "context"
+ "crypto/ecdsa"
+ "crypto/sha256"
+ "database/sql"
+ "encoding/hex"
"encoding/json"
+ "errors"
"fmt"
- "io"
- "net/http"
"os"
+ "os/signal"
+ "runtime"
"strconv"
+ "strings"
+ "sync"
+ "syscall"
+ "time"
+ "unsafe"
+
+ gethcommon "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/event"
+ "github.com/ethereum/go-ethereum/p2p"
+ "github.com/ethereum/go-ethereum/p2p/enode"
+ "github.com/ethereum/go-ethereum/rpc"
+ "github.com/jellydator/ttlcache/v3"
+ "github.com/libp2p/go-libp2p/core/metrics"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/multiformats/go-multiaddr"
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/status-im/status-go/connection"
+ "github.com/status-im/status-go/eth-node/types"
+ "github.com/status-im/status-go/logutils"
+ "github.com/status-im/status-go/timesource"
+ "github.com/status-im/status-go/wakuv2/common"
+ "github.com/status-im/status-go/wakuv2/persistence"
+ filterapi "github.com/waku-org/go-waku/waku/v2/api/filter"
+ "github.com/waku-org/go-waku/waku/v2/api/missing"
+ "github.com/waku-org/go-waku/waku/v2/api/publish"
+ "github.com/waku-org/go-waku/waku/v2/dnsdisc"
+ node "github.com/waku-org/go-waku/waku/v2/node"
+ "github.com/waku-org/go-waku/waku/v2/onlinechecker"
+ "github.com/waku-org/go-waku/waku/v2/peermanager"
+ wps "github.com/waku-org/go-waku/waku/v2/peerstore"
+ "github.com/waku-org/go-waku/waku/v2/protocol"
+ "github.com/waku-org/go-waku/waku/v2/protocol/legacy_store"
+ "github.com/waku-org/go-waku/waku/v2/protocol/pb"
+ "github.com/waku-org/go-waku/waku/v2/protocol/store"
+ storepb "github.com/waku-org/go-waku/waku/v2/protocol/store/pb"
+ "github.com/waku-org/go-waku/waku/v2/utils"
+ "go.uber.org/zap"
+ "golang.org/x/crypto/pbkdf2"
+ "golang.org/x/time/rate"
)
-type NwakuInfo struct {
- ListenAddresses []string `json:"listenAddresses"`
- EnrUri string `json:"enrUri"`
+const messageQueueLimit = 1024
+const requestTimeout = 30 * time.Second
+const bootnodesQueryBackoffMs = 200
+const bootnodesMaxRetries = 7
+const cacheTTL = 20 * time.Minute
+const maxRelayPeers = 300
+const randomPeersKeepAliveInterval = 5 * time.Second
+const allPeersKeepAliveInterval = 5 * time.Minute
+const peersToPublishForLightpush = 2
+const publishingLimiterRate = rate.Limit(2)
+const publishingLimitBurst = 4
+
+type SentEnvelope struct {
+ Envelope *protocol.Envelope
+ PublishMethod PublishMethod
}
-func GetNwakuInfo(host *string, port *int) (NwakuInfo, error) {
- nwakuRestPort := 8645
- if port != nil {
- nwakuRestPort = *port
- }
- envNwakuRestPort := os.Getenv("NWAKU_REST_PORT")
- if envNwakuRestPort != "" {
- v, err := strconv.Atoi(envNwakuRestPort)
- if err != nil {
- return NwakuInfo{}, err
+type ErrorSendingEnvelope struct {
+ Error error
+ SentEnvelope SentEnvelope
+}
+
+type ITelemetryClient interface {
+ PushReceivedEnvelope(ctx context.Context, receivedEnvelope *protocol.Envelope)
+ PushSentEnvelope(ctx context.Context, sentEnvelope SentEnvelope)
+ PushErrorSendingEnvelope(ctx context.Context, errorSendingEnvelope ErrorSendingEnvelope)
+ PushPeerCount(ctx context.Context, peerCount int)
+ PushPeerConnFailures(ctx context.Context, peerConnFailures map[string]int)
+}
+
+func (w *NWaku) SetStatusTelemetryClient(client ITelemetryClient) {
+ w.statusTelemetryClient = client
+}
+
+func newTTLCache() *ttlcache.Cache[gethcommon.Hash, *common.ReceivedMessage] {
+ cache := ttlcache.New[gethcommon.Hash, *common.ReceivedMessage](ttlcache.WithTTL[gethcommon.Hash, *common.ReceivedMessage](cacheTTL))
+ go cache.Start()
+ return cache
+}
+
+func (w *NWaku) SubscribeToConnStatusChanges() *types.ConnStatusSubscription {
+ w.connStatusMu.Lock()
+ defer w.connStatusMu.Unlock()
+ subscription := types.NewConnStatusSubscription()
+ w.connStatusSubscriptions[subscription.ID] = subscription
+ return subscription
+}
+
+func (w *NWaku) getDiscV5BootstrapNodes(ctx context.Context, addresses []string) ([]*enode.Node, error) {
+ wg := sync.WaitGroup{}
+ mu := sync.Mutex{}
+ var result []*enode.Node
+
+ w.seededBootnodesForDiscV5 = true
+
+ retrieveENR := func(d dnsdisc.DiscoveredNode, wg *sync.WaitGroup) {
+ mu.Lock()
+ defer mu.Unlock()
+ defer wg.Done()
+ if d.ENR != nil {
+ result = append(result, d.ENR)
}
- nwakuRestPort = v
}
- nwakuRestHost := "localhost"
- if host != nil {
- nwakuRestHost = *host
- }
- envNwakuRestHost := os.Getenv("NWAKU_REST_HOST")
- if envNwakuRestHost != "" {
- nwakuRestHost = envNwakuRestHost
- }
+ for _, addrString := range addresses {
+ if addrString == "" {
+ continue
+ }
- resp, err := http.Get(fmt.Sprintf("http://%s:%d/debug/v1/info", nwakuRestHost, nwakuRestPort))
- if err != nil {
- return NwakuInfo{}, err
+ if strings.HasPrefix(addrString, "enrtree://") {
+ // Use DNS Discovery
+ wg.Add(1)
+ go func(addr string) {
+ defer wg.Done()
+ if err := w.dnsDiscover(ctx, addr, retrieveENR); err != nil {
+ mu.Lock()
+ w.seededBootnodesForDiscV5 = false
+ mu.Unlock()
+ }
+ }(addrString)
+ } else {
+ // It's a normal enr
+ bootnode, err := enode.Parse(enode.ValidSchemes, addrString)
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, bootnode)
+ }
}
- defer resp.Body.Close()
+ wg.Wait()
- body, err := io.ReadAll(resp.Body)
- if err != nil {
- return NwakuInfo{}, err
- }
-
- var data NwakuInfo
- err = json.Unmarshal(body, &data)
- if err != nil {
- return NwakuInfo{}, err
- }
-
- return data, nil
+ return result, nil
+}
+
+type fnApplyToEachPeer func(d dnsdisc.DiscoveredNode, wg *sync.WaitGroup)
+
+func (w *NWaku) dnsDiscover(ctx context.Context, enrtreeAddress string, apply fnApplyToEachPeer) error {
+ w.logger.Info("retrieving nodes", zap.String("enr", enrtreeAddress))
+ ctx, cancel := context.WithTimeout(ctx, requestTimeout)
+ defer cancel()
+
+ w.dnsAddressCacheLock.Lock()
+ defer w.dnsAddressCacheLock.Unlock()
+
+ discNodes, ok := w.dnsAddressCache[enrtreeAddress]
+ if !ok {
+ nameserver := w.cfg.Nameserver
+ resolver := w.cfg.Resolver
+
+ var opts []dnsdisc.DNSDiscoveryOption
+ if nameserver != "" {
+ opts = append(opts, dnsdisc.WithNameserver(nameserver))
+ }
+ if resolver != nil {
+ opts = append(opts, dnsdisc.WithResolver(resolver))
+ }
+
+ discoveredNodes, err := dnsdisc.RetrieveNodes(ctx, enrtreeAddress, opts...)
+ if err != nil {
+ w.logger.Warn("dns discovery error ", zap.Error(err))
+ return err
+ }
+
+ if len(discoveredNodes) != 0 {
+ w.dnsAddressCache[enrtreeAddress] = append(w.dnsAddressCache[enrtreeAddress], discoveredNodes...)
+ discNodes = w.dnsAddressCache[enrtreeAddress]
+ }
+ }
+
+ wg := &sync.WaitGroup{}
+ wg.Add(len(discNodes))
+ for _, d := range discNodes {
+ apply(d, wg)
+ }
+ wg.Wait()
+
+ return nil
+}
+
+func (w *NWaku) discoverAndConnectPeers() {
+ fnApply := func(d dnsdisc.DiscoveredNode, wg *sync.WaitGroup) {
+ defer wg.Done()
+ if len(d.PeerInfo.Addrs) != 0 {
+ go w.connect(d.PeerInfo, d.ENR, wps.DNSDiscovery)
+ }
+ }
+
+ for _, addrString := range w.cfg.WakuNodes {
+ addrString := addrString
+ if strings.HasPrefix(addrString, "enrtree://") {
+ // Use DNS Discovery
+ go func() {
+ if err := w.dnsDiscover(w.ctx, addrString, fnApply); err != nil {
+ w.logger.Error("could not obtain dns discovery peers for ClusterConfig.WakuNodes", zap.Error(err), zap.String("dnsDiscURL", addrString))
+ }
+ }()
+ } else {
+ // It is a normal multiaddress
+ addr, err := multiaddr.NewMultiaddr(addrString)
+ if err != nil {
+ w.logger.Warn("invalid peer multiaddress", zap.String("ma", addrString), zap.Error(err))
+ continue
+ }
+
+ peerInfo, err := peer.AddrInfoFromP2pAddr(addr)
+ if err != nil {
+ w.logger.Warn("invalid peer multiaddress", zap.Stringer("addr", addr), zap.Error(err))
+ continue
+ }
+
+ go w.connect(*peerInfo, nil, wps.Static)
+ }
+ }
+}
+
+func (w *NWaku) connect(peerInfo peer.AddrInfo, enr *enode.Node, origin wps.Origin) {
+ // Connection will be prunned eventually by the connection manager if needed
+ // The peer connector in go-waku uses Connect, so it will execute identify as part of its
+ addr := peerInfo.Addrs[0]
+ w.WakuConnect(addr.String(), 1000)
+}
+
+func (w *NWaku) telemetryBandwidthStats(telemetryServerURL string) {
+ w.wg.Add(1)
+ defer w.wg.Done()
+
+ if telemetryServerURL == "" {
+ return
+ }
+
+ telemetry := NewBandwidthTelemetryClient(w.logger, telemetryServerURL)
+
+ ticker := time.NewTicker(time.Second * 20)
+ defer ticker.Stop()
+
+ today := time.Now()
+
+ for {
+ select {
+ case <-w.ctx.Done():
+ return
+ case now := <-ticker.C:
+ // Reset totals when day changes
+ if now.Day() != today.Day() {
+ today = now
+ w.bandwidthCounter.Reset()
+ }
+
+ go telemetry.PushProtocolStats(w.bandwidthCounter.GetBandwidthByProtocol())
+ }
+ }
+}
+
+func (w *NWaku) GetStats() types.StatsSummary {
+ stats := w.bandwidthCounter.GetBandwidthTotals()
+ return types.StatsSummary{
+ UploadRate: uint64(stats.RateOut),
+ DownloadRate: uint64(stats.RateIn),
+ }
+}
+
+func (w *NWaku) runPeerExchangeLoop() {
+ w.wg.Add(1)
+ defer w.wg.Done()
+
+ if !w.cfg.EnablePeerExchangeClient {
+ // Currently peer exchange client is only used for light nodes
+ return
+ }
+
+ ticker := time.NewTicker(time.Second * 5)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-w.ctx.Done():
+ w.logger.Debug("Peer exchange loop stopped")
+ return
+ case <-ticker.C:
+ w.logger.Info("Running peer exchange loop")
+
+ // We select only the nodes discovered via DNS Discovery that support peer exchange
+ // We assume that those peers are running peer exchange according to infra config,
+ // If not, the peer selection process in go-waku will filter them out anyway
+ w.dnsAddressCacheLock.RLock()
+ var peers peer.IDSlice
+ for _, record := range w.dnsAddressCache {
+ for _, discoveredNode := range record {
+ if len(discoveredNode.PeerInfo.Addrs) == 0 {
+ continue
+ }
+ // Attempt to connect to the peers.
+ // Peers will be added to the libp2p peer store thanks to identify
+ go w.connect(discoveredNode.PeerInfo, discoveredNode.ENR, wps.DNSDiscovery)
+ peers = append(peers, discoveredNode.PeerID)
+ }
+ }
+ w.dnsAddressCacheLock.RUnlock()
+
+ if len(peers) != 0 {
+ // TODO
+ // err := w.node.PeerExchange().Request(w.ctx, w.cfg.DiscoveryLimit, peer_exchange.WithAutomaticPeerSelection(peers...),
+ // peer_exchange.FilterByShard(int(w.defaultShardInfo.ClusterID), int(w.defaultShardInfo.ShardIDs[0])))
+ // if err != nil {
+ // w.logger.Error("couldnt request peers via peer exchange", zap.Error(err))
+ // }
+ }
+ }
+ }
+}
+
+func (w *NWaku) GetPubsubTopic(topic string) string {
+ if topic == "" {
+ topic = w.cfg.DefaultShardPubsubTopic
+ }
+
+ return topic
+}
+
+// CurrentTime returns current time.
+func (w *NWaku) CurrentTime() time.Time {
+ return w.timesource.Now()
+}
+
+// APIs returns the RPC descriptors the NWaku implementation offers
+func (w *NWaku) APIs() []rpc.API {
+ return []rpc.API{
+ {
+ Namespace: Name,
+ Version: VersionStr,
+ Service: NewPublicWakuAPI(w),
+ Public: false,
+ },
+ }
+}
+
+// Protocols returns the waku sub-protocols ran by this particular client.
+func (w *NWaku) Protocols() []p2p.Protocol {
+ return []p2p.Protocol{}
+}
+
+func (w *NWaku) SendEnvelopeEvent(event common.EnvelopeEvent) int {
+ return w.envelopeFeed.Send(event)
+}
+
+// SubscribeEnvelopeEvents subscribes to envelopes feed.
+// In order to prevent blocking waku producers events must be amply buffered.
+func (w *NWaku) SubscribeEnvelopeEvents(events chan<- common.EnvelopeEvent) event.Subscription {
+ return w.envelopeFeed.Subscribe(events)
+}
+
+// NewKeyPair generates a new cryptographic identity for the client, and injects
+// it into the known identities for message decryption. Returns ID of the new key pair.
+func (w *NWaku) NewKeyPair() (string, error) {
+ key, err := crypto.GenerateKey()
+ if err != nil || !validatePrivateKey(key) {
+ key, err = crypto.GenerateKey() // retry once
+ }
+ if err != nil {
+ return "", err
+ }
+ if !validatePrivateKey(key) {
+ return "", fmt.Errorf("failed to generate valid key")
+ }
+
+ id, err := toDeterministicID(hexutil.Encode(crypto.FromECDSAPub(&key.PublicKey)), common.KeyIDSize)
+ if err != nil {
+ return "", err
+ }
+
+ w.keyMu.Lock()
+ defer w.keyMu.Unlock()
+
+ if w.privateKeys[id] != nil {
+ return "", fmt.Errorf("failed to generate unique ID")
+ }
+ w.privateKeys[id] = key
+ return id, nil
+}
+
+// DeleteKeyPair deletes the specified key if it exists.
+func (w *NWaku) DeleteKeyPair(key string) bool {
+ deterministicID, err := toDeterministicID(key, common.KeyIDSize)
+ if err != nil {
+ return false
+ }
+
+ w.keyMu.Lock()
+ defer w.keyMu.Unlock()
+
+ if w.privateKeys[deterministicID] != nil {
+ delete(w.privateKeys, deterministicID)
+ return true
+ }
+ return false
+}
+
+// AddKeyPair imports a asymmetric private key and returns it identifier.
+func (w *NWaku) AddKeyPair(key *ecdsa.PrivateKey) (string, error) {
+ id, err := makeDeterministicID(hexutil.Encode(crypto.FromECDSAPub(&key.PublicKey)), common.KeyIDSize)
+ if err != nil {
+ return "", err
+ }
+ if w.HasKeyPair(id) {
+ return id, nil // no need to re-inject
+ }
+
+ w.keyMu.Lock()
+ w.privateKeys[id] = key
+ w.keyMu.Unlock()
+
+ return id, nil
+}
+
+// SelectKeyPair adds cryptographic identity, and makes sure
+// that it is the only private key known to the node.
+func (w *NWaku) SelectKeyPair(key *ecdsa.PrivateKey) error {
+ id, err := makeDeterministicID(hexutil.Encode(crypto.FromECDSAPub(&key.PublicKey)), common.KeyIDSize)
+ if err != nil {
+ return err
+ }
+
+ w.keyMu.Lock()
+ defer w.keyMu.Unlock()
+
+ w.privateKeys = make(map[string]*ecdsa.PrivateKey) // reset key store
+ w.privateKeys[id] = key
+
+ return nil
+}
+
+// DeleteKeyPairs removes all cryptographic identities known to the node
+func (w *NWaku) DeleteKeyPairs() error {
+ w.keyMu.Lock()
+ defer w.keyMu.Unlock()
+
+ w.privateKeys = make(map[string]*ecdsa.PrivateKey)
+
+ return nil
+}
+
+// HasKeyPair checks if the waku node is configured with the private key
+// of the specified public pair.
+func (w *NWaku) HasKeyPair(id string) bool {
+ deterministicID, err := toDeterministicID(id, common.KeyIDSize)
+ if err != nil {
+ return false
+ }
+
+ w.keyMu.RLock()
+ defer w.keyMu.RUnlock()
+ return w.privateKeys[deterministicID] != nil
+}
+
+// GetPrivateKey retrieves the private key of the specified identity.
+func (w *NWaku) GetPrivateKey(id string) (*ecdsa.PrivateKey, error) {
+ deterministicID, err := toDeterministicID(id, common.KeyIDSize)
+ if err != nil {
+ return nil, err
+ }
+
+ w.keyMu.RLock()
+ defer w.keyMu.RUnlock()
+ key := w.privateKeys[deterministicID]
+ if key == nil {
+ return nil, fmt.Errorf("invalid id")
+ }
+ return key, nil
+}
+
+// GenerateSymKey generates a random symmetric key and stores it under id,
+// which is then returned. Will be used in the future for session key exchange.
+func (w *NWaku) GenerateSymKey() (string, error) {
+ key, err := common.GenerateSecureRandomData(common.AESKeyLength)
+ if err != nil {
+ return "", err
+ } else if !common.ValidateDataIntegrity(key, common.AESKeyLength) {
+ return "", fmt.Errorf("error in GenerateSymKey: crypto/rand failed to generate random data")
+ }
+
+ id, err := common.GenerateRandomID()
+ if err != nil {
+ return "", fmt.Errorf("failed to generate ID: %s", err)
+ }
+
+ w.keyMu.Lock()
+ defer w.keyMu.Unlock()
+
+ if w.symKeys[id] != nil {
+ return "", fmt.Errorf("failed to generate unique ID")
+ }
+ w.symKeys[id] = key
+ return id, nil
+}
+
+// AddSymKey stores the key with a given id.
+func (w *NWaku) AddSymKey(id string, key []byte) (string, error) {
+ deterministicID, err := toDeterministicID(id, common.KeyIDSize)
+ if err != nil {
+ return "", err
+ }
+
+ w.keyMu.Lock()
+ defer w.keyMu.Unlock()
+
+ if w.symKeys[deterministicID] != nil {
+ return "", fmt.Errorf("key already exists: %v", id)
+ }
+ w.symKeys[deterministicID] = key
+ return deterministicID, nil
+}
+
+// AddSymKeyDirect stores the key, and returns its id.
+func (w *NWaku) AddSymKeyDirect(key []byte) (string, error) {
+ if len(key) != common.AESKeyLength {
+ return "", fmt.Errorf("wrong key size: %d", len(key))
+ }
+
+ id, err := common.GenerateRandomID()
+ if err != nil {
+ return "", fmt.Errorf("failed to generate ID: %s", err)
+ }
+
+ w.keyMu.Lock()
+ defer w.keyMu.Unlock()
+
+ if w.symKeys[id] != nil {
+ return "", fmt.Errorf("failed to generate unique ID")
+ }
+ w.symKeys[id] = key
+ return id, nil
+}
+
+// AddSymKeyFromPassword generates the key from password, stores it, and returns its id.
+func (w *NWaku) AddSymKeyFromPassword(password string) (string, error) {
+ id, err := common.GenerateRandomID()
+ if err != nil {
+ return "", fmt.Errorf("failed to generate ID: %s", err)
+ }
+ if w.HasSymKey(id) {
+ return "", fmt.Errorf("failed to generate unique ID")
+ }
+
+ // kdf should run no less than 0.1 seconds on an average computer,
+ // because it's an once in a session experience
+ derived := pbkdf2.Key([]byte(password), nil, 65356, common.AESKeyLength, sha256.New)
+
+ w.keyMu.Lock()
+ defer w.keyMu.Unlock()
+
+ // double check is necessary, because deriveKeyMaterial() is very slow
+ if w.symKeys[id] != nil {
+ return "", fmt.Errorf("critical error: failed to generate unique ID")
+ }
+ w.symKeys[id] = derived
+ return id, nil
+}
+
+// HasSymKey returns true if there is a key associated with the given id.
+// Otherwise returns false.
+func (w *NWaku) HasSymKey(id string) bool {
+ w.keyMu.RLock()
+ defer w.keyMu.RUnlock()
+ return w.symKeys[id] != nil
+}
+
+// DeleteSymKey deletes the key associated with the name string if it exists.
+func (w *NWaku) DeleteSymKey(id string) bool {
+ w.keyMu.Lock()
+ defer w.keyMu.Unlock()
+ if w.symKeys[id] != nil {
+ delete(w.symKeys, id)
+ return true
+ }
+ return false
+}
+
+// GetSymKey returns the symmetric key associated with the given id.
+func (w *NWaku) GetSymKey(id string) ([]byte, error) {
+ w.keyMu.RLock()
+ defer w.keyMu.RUnlock()
+ if w.symKeys[id] != nil {
+ return w.symKeys[id], nil
+ }
+ return nil, fmt.Errorf("non-existent key ID")
+}
+
+// Subscribe installs a new message handler used for filtering, decrypting
+// and subsequent storing of incoming messages.
+func (w *NWaku) Subscribe(f *common.Filter) (string, error) {
+ f.PubsubTopic = w.GetPubsubTopic(f.PubsubTopic)
+ id, err := w.filters.Install(f)
+ if err != nil {
+ return id, err
+ }
+
+ if w.cfg.LightClient {
+ cf := protocol.NewContentFilter(f.PubsubTopic, f.ContentTopics.ContentTopics()...)
+ w.filterManager.SubscribeFilter(id, cf)
+ }
+
+ return id, nil
+}
+
+// Unsubscribe removes an installed message handler.
+func (w *NWaku) Unsubscribe(ctx context.Context, id string) error {
+ ok := w.filters.Uninstall(id)
+ if !ok {
+ return fmt.Errorf("failed to unsubscribe: invalid ID '%s'", id)
+ }
+
+ if w.cfg.LightClient {
+ w.filterManager.UnsubscribeFilter(id)
+ }
+
+ return nil
+}
+
+// GetFilter returns the filter by id.
+func (w *NWaku) GetFilter(id string) *common.Filter {
+ return w.filters.Get(id)
+}
+
+// Unsubscribe removes an installed message handler.
+func (w *NWaku) UnsubscribeMany(ids []string) error {
+ for _, id := range ids {
+ w.logger.Info("cleaning up filter", zap.String("id", id))
+ ok := w.filters.Uninstall(id)
+ if !ok {
+ w.logger.Warn("could not remove filter with id", zap.String("id", id))
+ }
+ }
+ return nil
+}
+
+func (w *NWaku) SkipPublishToTopic(value bool) {
+ w.cfg.SkipPublishToTopic = value
+}
+
+func (w *NWaku) ConfirmMessageDelivered(hashes []gethcommon.Hash) {
+ if !w.cfg.EnableStoreConfirmationForMessagesSent {
+ return
+ }
+ w.messageSentCheck.DeleteByMessageIDs(hashes)
+}
+
+func (w *NWaku) SetStorePeerID(peerID peer.ID) {
+ if w.messageSentCheck != nil {
+ w.messageSentCheck.SetStorePeerID(peerID)
+ }
+}
+
+func (w *NWaku) Query(ctx context.Context,
+ peerID peer.ID,
+ query store.FilterCriteria,
+ cursor []byte,
+ opts []store.RequestOption,
+ processEnvelopes bool) ([]byte, int, error) {
+ requestID := protocol.GenerateRequestID()
+
+ opts = append(opts,
+ store.WithRequestID(requestID),
+ store.WithPeer(peerID),
+ store.WithCursor(cursor))
+
+ logger := w.logger.With(zap.String("requestID", hexutil.Encode(requestID)), zap.Stringer("peerID", peerID))
+
+ logger.Debug("store.query",
+ logutils.WakuMessageTimestamp("startTime", query.TimeStart),
+ logutils.WakuMessageTimestamp("endTime", query.TimeEnd),
+ zap.Strings("contentTopics", query.ContentTopics.ToList()),
+ zap.String("pubsubTopic", query.PubsubTopic),
+ zap.String("cursor", hexutil.Encode(cursor)),
+ )
+
+ // queryStart := time.Now()
+
+ params := new(store.Parameters)
+
+ optList := store.DefaultOptions()
+ optList = append(optList, opts...)
+ for _, opt := range optList {
+ err := opt(params)
+ if err != nil {
+ return nil, 0, err
+ }
+ }
+
+ storeRequest := &storepb.StoreQueryRequest{
+ RequestId: hex.EncodeToString(requestID),
+ IncludeData: params.IncludeData,
+ PaginationForward: params.Forward,
+ PaginationLimit: ¶ms.PageLimit,
+ }
+
+ jsonStoreRequest, err := json.Marshal(storeRequest)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ result, err := w.wakuStoreQuery(string(jsonStoreRequest), string(peerID), 10000)
+
+ fmt.Println("Store result ", result)
+
+ // result, err := w.node.Store().Query(ctx, query, opts...)
+ // queryDuration := time.Since(queryStart)
+ // if err != nil {
+ // logger.Error("error querying storenode", zap.Error(err))
+
+ // if w.onHistoricMessagesRequestFailed != nil {
+ // w.onHistoricMessagesRequestFailed(requestID, peerID, err)
+ // }
+ // return nil, 0, err
+ // }
+
+ // messages := result.Messages()
+ // envelopesCount := len(messages)
+ // w.logger.Debug("store.query response", zap.Duration("queryDuration", queryDuration), zap.Int("numMessages", envelopesCount), zap.Bool("hasCursor", result.IsComplete() && result.Cursor() != nil))
+ // for _, mkv := range messages {
+ // msg := mkv.Message
+
+ // // Temporarily setting RateLimitProof to nil so it matches the WakuMessage protobuffer we are sending
+ // // See https://github.com/vacp2p/rfc/issues/563
+ // mkv.Message.RateLimitProof = nil
+
+ // envelope := protocol.NewEnvelope(msg, msg.GetTimestamp(), query.PubsubTopic)
+
+ // err = w.OnNewEnvelopes(envelope, common.StoreMessageType, processEnvelopes)
+ // if err != nil {
+ // return nil, 0, err
+ // }
+ // }
+
+ // return result.Cursor(), envelopesCount, nil
+ return nil, 0, nil
+}
+
+// OnNewEnvelope is an interface from NWaku FilterManager API that gets invoked when any new message is received by Filter.
+func (w *NWaku) OnNewEnvelope(env *protocol.Envelope) error {
+ return w.OnNewEnvelopes(env, common.RelayedMessageType, false)
+}
+
+// Start implements node.Service, starting the background data propagation thread
+// of the NWaku protocol.
+func (w *NWaku) Start() error {
+ // if w.ctx == nil {
+ // w.ctx, w.cancel = context.WithCancel(context.Background())
+ // }
+
+ // var err error
+ // if w.node, err = node.New(w.options...); err != nil {
+ // return fmt.Errorf("failed to create a go-waku node: %v", err)
+ // }
+
+ // w.goingOnline = make(chan struct{})
+
+ err := w.WakuStart()
+ if err != nil {
+ fmt.Println("Error happened:", err.Error())
+ return err
+ }
+
+ ch := make(chan os.Signal, 1)
+ signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)
+ <-ch
+
+ // if err = w.node.Start(w.ctx); err != nil {
+ // return fmt.Errorf("failed to start go-waku node: %v", err)
+ // }
+
+ // w.logger.Info("WakuV2 PeerID", zap.Stringer("id", w.node.Host().ID()))
+
+ // w.discoverAndConnectPeers()
+
+ // if w.cfg.EnableDiscV5 {
+ // err := w.node.DiscV5().Start(w.ctx)
+ // if err != nil {
+ // return err
+ // }
+ // }
+
+ // w.wg.Add(1)
+ // go func() {
+ // defer w.wg.Done()
+ // ticker := time.NewTicker(5 * time.Second)
+ // defer ticker.Stop()
+ // for {
+ // select {
+ // case <-w.ctx.Done():
+ // return
+ // case <-ticker.C:
+ // w.checkForConnectionChanges()
+ // case <-w.topicHealthStatusChan:
+ // // TODO: https://github.com/status-im/status-go/issues/4628
+ // case <-w.connectionNotifChan:
+ // w.checkForConnectionChanges()
+ // }
+ // }
+ // }()
+
+ // go w.telemetryBandwidthStats(w.cfg.TelemetryServerURL)
+ //TODO: commenting for now so that only fleet nodes are used.
+ //Need to uncomment once filter peer scoring etc is implemented.
+ // go w.runPeerExchangeLoop()
+
+ // if w.cfg.EnableMissingMessageVerification {
+
+ // w.missingMsgVerifier = missing.NewMissingMessageVerifier(
+ // w.node.Store(),
+ // w,
+ // w.node.Timesource(),
+ // w.logger)
+
+ // w.missingMsgVerifier.Start(w.ctx)
+
+ // w.wg.Add(1)
+ // go func() {
+ // w.wg.Done()
+ // for {
+ // select {
+ // case <-w.ctx.Done():
+ // return
+ // case envelope := <-w.missingMsgVerifier.C:
+ // err = w.OnNewEnvelopes(envelope, common.MissingMessageType, false)
+ // if err != nil {
+ // w.logger.Error("OnNewEnvelopes error", zap.Error(err))
+ // }
+ // }
+ // }
+ // }()
+ // }
+
+ // if w.cfg.LightClient {
+ // // Create FilterManager that will main peer connectivity
+ // // for installed filters
+ // w.filterManager = filterapi.NewFilterManager(w.ctx, w.logger, w.cfg.MinPeersForFilter,
+ // w,
+ // w.node.FilterLightnode())
+ // }
+
+ // err = w.setupRelaySubscriptions()
+ // if err != nil {
+ // return err
+ // }
+
+ // numCPU := runtime.NumCPU()
+ // for i := 0; i < numCPU; i++ {
+ // go w.processQueueLoop()
+ // }
+
+ // go w.broadcast()
+
+ // go w.sendQueue.Start(w.ctx)
+
+ // if w.cfg.EnableStoreConfirmationForMessagesSent {
+ // w.confirmMessagesSent()
+ // }
+
+ // we should wait `seedBootnodesForDiscV5` shutdown smoothly before set w.ctx to nil within `w.Stop()`
+ // go w.seedBootnodesForDiscV5()
+
+ return nil
+}
+
+func (w *NWaku) checkForConnectionChanges() {
+
+ // isOnline := len(w.node.Host().Network().Peers()) > 0
+
+ // w.connStatusMu.Lock()
+
+ // latestConnStatus := types.ConnStatus{
+ // IsOnline: isOnline,
+ // Peers: FormatPeerStats(w.node),
+ // }
+
+ // w.logger.Debug("peer stats",
+ // zap.Int("peersCount", len(latestConnStatus.Peers)),
+ // zap.Any("stats", latestConnStatus))
+ // for k, subs := range w.connStatusSubscriptions {
+ // if !subs.Send(latestConnStatus) {
+ // delete(w.connStatusSubscriptions, k)
+ // }
+ // }
+
+ // w.connStatusMu.Unlock()
+
+ // if w.onPeerStats != nil {
+ // w.onPeerStats(latestConnStatus)
+ // }
+
+ // if w.statusTelemetryClient != nil {
+ // connFailures := FormatPeerConnFailures(w.node)
+ // w.statusTelemetryClient.PushPeerCount(w.ctx, w.PeerCount())
+ // w.statusTelemetryClient.PushPeerConnFailures(w.ctx, connFailures)
+ // }
+
+ // w.ConnectionChanged(connection.State{
+ // Type: w.state.Type, //setting state type as previous one since there won't be a change here
+ // Offline: !latestConnStatus.IsOnline,
+ // })
+}
+
+// func (w *NWaku) confirmMessagesSent() {
+// w.messageSentCheck = publish.NewMessageSentCheck(w.ctx, w.node.Store(), w.node.Timesource(), w.logger)
+// go w.messageSentCheck.Start()
+
+// go func() {
+// for {
+// select {
+// case <-w.ctx.Done():
+// return
+// case hash := <-w.messageSentCheck.MessageStoredChan:
+// w.SendEnvelopeEvent(common.EnvelopeEvent{
+// Hash: hash,
+// Event: common.EventEnvelopeSent,
+// })
+// case hash := <-w.messageSentCheck.MessageExpiredChan:
+// w.SendEnvelopeEvent(common.EnvelopeEvent{
+// Hash: hash,
+// Event: common.EventEnvelopeExpired,
+// })
+// }
+// }
+// }()
+// }
+
+func (w *NWaku) MessageExists(mh pb.MessageHash) (bool, error) {
+ w.poolMu.Lock()
+ defer w.poolMu.Unlock()
+ return w.envelopeCache.Has(gethcommon.Hash(mh)), nil
+}
+
+func (w *NWaku) SetTopicsToVerifyForMissingMessages(peerID peer.ID, pubsubTopic string, contentTopics []string) {
+ if !w.cfg.EnableMissingMessageVerification {
+ return
+ }
+
+ w.missingMsgVerifier.SetCriteriaInterest(peerID, protocol.NewContentFilter(pubsubTopic, contentTopics...))
+}
+
+func (w *NWaku) setupRelaySubscriptions() error {
+ if w.cfg.LightClient {
+ return nil
+ }
+
+ if w.protectedTopicStore != nil {
+ protectedTopics, err := w.protectedTopicStore.ProtectedTopics()
+ if err != nil {
+ return err
+ }
+
+ for _, pt := range protectedTopics {
+ // Adding subscription to protected topics
+ // err = w.subscribeToPubsubTopicWithWakuRelay(pt.Topic, pt.PubKey)
+ // if err != nil {
+ // return err
+ // }
+
+ fmt.Println("Subscription to topic: ", pt.Topic)
+ err = w.WakuRelaySubscribe(pt.Topic)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ err := w.WakuRelaySubscribe(w.cfg.DefaultShardPubsubTopic)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (w *NWaku) OnNewEnvelopes(envelope *protocol.Envelope, msgType common.MessageType, processImmediately bool) error {
+ if envelope == nil {
+ return nil
+ }
+
+ recvMessage := common.NewReceivedMessage(envelope, msgType)
+ if recvMessage == nil {
+ return nil
+ }
+
+ if w.statusTelemetryClient != nil {
+ w.statusTelemetryClient.PushReceivedEnvelope(w.ctx, envelope)
+ }
+
+ logger := w.logger.With(
+ zap.String("messageType", msgType),
+ zap.Stringer("envelopeHash", envelope.Hash()),
+ zap.String("pubsubTopic", envelope.PubsubTopic()),
+ zap.String("contentTopic", envelope.Message().ContentTopic),
+ logutils.WakuMessageTimestamp("timestamp", envelope.Message().Timestamp),
+ )
+
+ logger.Debug("received new envelope")
+ trouble := false
+
+ _, err := w.add(recvMessage, processImmediately)
+ if err != nil {
+ logger.Info("invalid envelope received", zap.Error(err))
+ trouble = true
+ }
+
+ common.EnvelopesValidatedCounter.Inc()
+
+ if trouble {
+ return errors.New("received invalid envelope")
+ }
+
+ return nil
+}
+
+// addEnvelope adds an envelope to the envelope map, used for sending
+func (w *NWaku) addEnvelope(envelope *common.ReceivedMessage) {
+ w.poolMu.Lock()
+ w.envelopeCache.Set(envelope.Hash(), envelope, ttlcache.DefaultTTL)
+ w.poolMu.Unlock()
+}
+
+func (w *NWaku) add(recvMessage *common.ReceivedMessage, processImmediately bool) (bool, error) {
+ common.EnvelopesReceivedCounter.Inc()
+
+ w.poolMu.Lock()
+ envelope := w.envelopeCache.Get(recvMessage.Hash())
+ alreadyCached := envelope != nil
+ w.poolMu.Unlock()
+
+ if !alreadyCached {
+ recvMessage.Processed.Store(false)
+ w.addEnvelope(recvMessage)
+ }
+
+ logger := w.logger.With(zap.String("envelopeHash", recvMessage.Hash().Hex()))
+
+ if alreadyCached {
+ logger.Debug("w envelope already cached")
+ common.EnvelopesCachedCounter.WithLabelValues("hit").Inc()
+ } else {
+ logger.Debug("cached w envelope")
+ common.EnvelopesCachedCounter.WithLabelValues("miss").Inc()
+ common.EnvelopesSizeMeter.Observe(float64(len(recvMessage.Envelope.Message().Payload)))
+ }
+
+ if !alreadyCached || !envelope.Value().Processed.Load() {
+ if processImmediately {
+ logger.Debug("immediately processing envelope")
+ w.processMessage(recvMessage)
+ } else {
+ logger.Debug("posting event")
+ w.postEvent(recvMessage) // notify the local node about the new message
+ }
+ }
+
+ return true, nil
+}
+
+// postEvent queues the message for further processing.
+func (w *NWaku) postEvent(envelope *common.ReceivedMessage) {
+ w.msgQueue <- envelope
+}
+
+// processQueueLoop delivers the messages to the watchers during the lifetime of the waku node.
+func (w *NWaku) processQueueLoop() {
+ if w.ctx == nil {
+ return
+ }
+ for {
+ select {
+ case <-w.ctx.Done():
+ return
+ case e := <-w.msgQueue:
+ w.processMessage(e)
+ }
+ }
+}
+
+func (w *NWaku) processMessage(e *common.ReceivedMessage) {
+ logger := w.logger.With(
+ zap.Stringer("envelopeHash", e.Envelope.Hash()),
+ zap.String("pubsubTopic", e.PubsubTopic),
+ zap.String("contentTopic", e.ContentTopic.ContentTopic()),
+ zap.Int64("timestamp", e.Envelope.Message().GetTimestamp()),
+ )
+
+ if e.MsgType == common.StoreMessageType {
+ // We need to insert it first, and then remove it if not matched,
+ // as messages are processed asynchronously
+ w.storeMsgIDsMu.Lock()
+ w.storeMsgIDs[e.Hash()] = true
+ w.storeMsgIDsMu.Unlock()
+ }
+
+ ephemeral := e.Envelope.Message().Ephemeral
+ if w.cfg.EnableStoreConfirmationForMessagesSent && e.MsgType == common.SendMessageType && (ephemeral == nil || !*ephemeral) {
+ w.messageSentCheck.Add(e.PubsubTopic, e.Hash(), e.Sent)
+ }
+
+ matched := w.filters.NotifyWatchers(e)
+
+ // If not matched we remove it
+ if !matched {
+ logger.Debug("filters did not match")
+ w.storeMsgIDsMu.Lock()
+ delete(w.storeMsgIDs, e.Hash())
+ w.storeMsgIDsMu.Unlock()
+ } else {
+ logger.Debug("filters did match")
+ e.Processed.Store(true)
+ }
+
+ w.envelopeFeed.Send(common.EnvelopeEvent{
+ Topic: e.ContentTopic,
+ Hash: e.Hash(),
+ Event: common.EventEnvelopeAvailable,
+ })
+}
+
+// GetEnvelope retrieves an envelope from the message queue by its hash.
+// It returns nil if the envelope can not be found.
+func (w *NWaku) GetEnvelope(hash gethcommon.Hash) *common.ReceivedMessage {
+ w.poolMu.RLock()
+ defer w.poolMu.RUnlock()
+
+ envelope := w.envelopeCache.Get(hash)
+ if envelope == nil {
+ return nil
+ }
+
+ return envelope.Value()
+}
+
+// isEnvelopeCached checks if envelope with specific hash has already been received and cached.
+func (w *NWaku) IsEnvelopeCached(hash gethcommon.Hash) bool {
+ w.poolMu.Lock()
+ defer w.poolMu.Unlock()
+
+ return w.envelopeCache.Has(hash)
+}
+
+func (w *NWaku) ClearEnvelopesCache() {
+ w.poolMu.Lock()
+ defer w.poolMu.Unlock()
+
+ w.envelopeCache.Stop()
+ w.envelopeCache = newTTLCache()
+}
+
+func (w *NWaku) PeerCount() int {
+ return 0
+ // return w.node.PeerCount()
+}
+
+func (w *NWaku) Peers() types.PeerStats {
+ return nil
+ // return FormatPeerStats(w.node)
+}
+
+func (w *NWaku) RelayPeersByTopic(topic string) (*types.PeerList, error) {
+ if w.cfg.LightClient {
+ return nil, errors.New("only available in relay mode")
+ }
+
+ // return &types.PeerList{
+ // FullMeshPeers: w.node.Relay().PubSub().MeshPeers(topic),
+ // AllPeers: w.node.Relay().PubSub().ListPeers(topic),
+ // }, nil
+ return nil, nil
+}
+
+func (w *NWaku) SubscribeToPubsubTopic(topic string, pubkey *ecdsa.PublicKey) error {
+ topic = w.GetPubsubTopic(topic)
+
+ if !w.cfg.LightClient {
+ err := w.WakuRelaySubscribe(topic)
+ // err := w.subscribeToPubsubTopicWithWakuRelay(topic, pubkey)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (w *NWaku) UnsubscribeFromPubsubTopic(topic string) error {
+ topic = w.GetPubsubTopic(topic)
+
+ if !w.cfg.LightClient {
+ err := w.WakuRelayUnsubscribe(topic)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (w *NWaku) RetrievePubsubTopicKey(topic string) (*ecdsa.PrivateKey, error) {
+ topic = w.GetPubsubTopic(topic)
+ if w.protectedTopicStore == nil {
+ return nil, nil
+ }
+
+ return w.protectedTopicStore.FetchPrivateKey(topic)
+}
+
+func (w *NWaku) StorePubsubTopicKey(topic string, privKey *ecdsa.PrivateKey) error {
+ topic = w.GetPubsubTopic(topic)
+ if w.protectedTopicStore == nil {
+ return nil
+ }
+
+ return w.protectedTopicStore.Insert(topic, privKey, &privKey.PublicKey)
+}
+
+func (w *NWaku) RemovePubsubTopicKey(topic string) error {
+ topic = w.GetPubsubTopic(topic)
+ if w.protectedTopicStore == nil {
+ return nil
+ }
+
+ return w.protectedTopicStore.Delete(topic)
+}
+
+func (w *NWaku) handleNetworkChangeFromApp(state connection.State) {
+ //If connection state is reported by something other than peerCount becoming 0 e.g from mobile app, disconnect all peers
+ // if (state.Offline && len(w.node.Host().Network().Peers()) > 0) ||
+ // (w.state.Type != state.Type && !w.state.Offline && !state.Offline) { // network switched between wifi and cellular
+ // w.logger.Info("connection switched or offline detected via mobile, disconnecting all peers")
+ // w.node.DisconnectAllPeers()
+ // if w.cfg.LightClient {
+ // w.filterManager.NetworkChange()
+ // }
+ // }
+}
+
+func (w *NWaku) ConnectionChanged(state connection.State) {
+ isOnline := !state.Offline
+ if w.cfg.LightClient {
+ //TODO: Update this as per https://github.com/waku-org/go-waku/issues/1114
+ go w.filterManager.OnConnectionStatusChange("", isOnline)
+ w.handleNetworkChangeFromApp(state)
+ } else {
+ // for lightClient state update and onlineChange is handled in filterManager.
+ // going online
+ if isOnline && !w.onlineChecker.IsOnline() {
+ //TODO: analyze if we need to discover and connect to peers for relay.
+ w.discoverAndConnectPeers()
+ select {
+ case w.goingOnline <- struct{}{}:
+ default:
+ w.logger.Warn("could not write on connection changed channel")
+ }
+ }
+ // update state
+ w.onlineChecker.SetOnline(isOnline)
+ }
+ w.state = state
+}
+
+func (w *NWaku) AddStorePeer(address multiaddr.Multiaddr) (peer.ID, error) {
+ // peerID, err := w.node.AddPeer(address, wps.Static, w.cfg.DefaultShardedPubsubTopics, store.StoreQueryID_v300)
+ // if err != nil {
+ // return "", err
+ // }
+ // return peerID, nil
+ return "", nil
+}
+
+func (w *NWaku) timestamp() int64 {
+ return w.timesource.Now().UnixNano()
+}
+
+func (w *NWaku) AddRelayPeer(address multiaddr.Multiaddr) (peer.ID, error) {
+ // peerID, err := w.node.AddPeer(address, wps.Static, w.cfg.DefaultShardedPubsubTopics, relay.WakuRelayID_v200)
+ // if err != nil {
+ // return "", err
+ // }
+ // return peerID, nil
+ return "", nil
+}
+
+func (w *NWaku) DialPeer(address multiaddr.Multiaddr) error {
+ // ctx, cancel := context.WithTimeout(w.ctx, requestTimeout)
+ // defer cancel()
+ // return w.node.DialPeerWithMultiAddress(ctx, address)
+ return nil
+}
+
+func (w *NWaku) DialPeerByID(peerID peer.ID) error {
+ // ctx, cancel := context.WithTimeout(w.ctx, requestTimeout)
+ // defer cancel()
+ // return w.node.DialPeerByID(ctx, peerID)
+ return nil
+}
+
+func (w *NWaku) DropPeer(peerID peer.ID) error {
+ // return w.node.ClosePeerById(peerID)
+ return nil
+}
+
+func (w *NWaku) ProcessingP2PMessages() bool {
+ w.storeMsgIDsMu.Lock()
+ defer w.storeMsgIDsMu.Unlock()
+ return len(w.storeMsgIDs) != 0
+}
+
+func (w *NWaku) MarkP2PMessageAsProcessed(hash gethcommon.Hash) {
+ w.storeMsgIDsMu.Lock()
+ defer w.storeMsgIDsMu.Unlock()
+ delete(w.storeMsgIDs, hash)
+}
+
+func (w *NWaku) Clean() error {
+ w.msgQueue = make(chan *common.ReceivedMessage, messageQueueLimit)
+
+ for _, f := range w.filters.All() {
+ f.Messages = common.NewMemoryMessageStore()
+ }
+
+ return nil
+}
+
+func (w *NWaku) PeerID() peer.ID {
+ // return w.node.Host().ID()
+ return ""
+}
+
+func (w *NWaku) Peerstore() peerstore.Peerstore {
+ // return w.node.Host().Peerstore()
+ return nil
+}
+
+// validatePrivateKey checks the format of the given private key.
+func validatePrivateKey(k *ecdsa.PrivateKey) bool {
+ if k == nil || k.D == nil || k.D.Sign() == 0 {
+ return false
+ }
+ return common.ValidatePublicKey(&k.PublicKey)
+}
+
+// makeDeterministicID generates a deterministic ID, based on a given input
+func makeDeterministicID(input string, keyLen int) (id string, err error) {
+ buf := pbkdf2.Key([]byte(input), nil, 4096, keyLen, sha256.New)
+ if !common.ValidateDataIntegrity(buf, common.KeyIDSize) {
+ return "", fmt.Errorf("error in GenerateDeterministicID: failed to generate key")
+ }
+ id = gethcommon.Bytes2Hex(buf)
+ return id, err
+}
+
+// toDeterministicID reviews incoming id, and transforms it to format
+// expected internally be private key store. Originally, public keys
+// were used as keys, now random keys are being used. And in order to
+// make it easier to consume, we now allow both random IDs and public
+// keys to be passed.
+func toDeterministicID(id string, expectedLen int) (string, error) {
+ if len(id) != (expectedLen * 2) { // we received hex key, so number of chars in id is doubled
+ var err error
+ id, err = makeDeterministicID(id, expectedLen)
+ if err != nil {
+ return "", err
+ }
+ }
+
+ return id, nil
+}
+
+func FormatPeerStats(wakuNode *node.WakuNode) types.PeerStats {
+ p := make(types.PeerStats)
+ for k, v := range wakuNode.PeerStats() {
+ p[k] = types.WakuV2Peer{
+ Addresses: utils.EncapsulatePeerID(k, wakuNode.Host().Peerstore().PeerInfo(k).Addrs...),
+ Protocols: v,
+ }
+ }
+ return p
+}
+
+func (w *NWaku) StoreNode() *store.WakuStore {
+ // return w.node.Store()
+ return nil
+}
+
+func FormatPeerConnFailures(wakuNode *node.WakuNode) map[string]int {
+ p := make(map[string]int)
+ for _, peerID := range wakuNode.Host().Network().Peers() {
+ peerInfo := wakuNode.Host().Peerstore().PeerInfo(peerID)
+ connFailures := wakuNode.Host().Peerstore().(wps.WakuPeerstore).ConnFailures(peerInfo)
+ if connFailures > 0 {
+ p[peerID.String()] = connFailures
+ }
+ }
+ return p
+}
+
+func (w *NWaku) LegacyStoreNode() legacy_store.Store {
+ // return w.node.LegacyStore()
+ return nil
+}
+
+type WakuMessageHash = string
+type WakuPubsubTopic = string
+type WakuContentTopic = string
+
+type WakuConfig struct {
+ Host string `json:"host,omitempty"`
+ Port int `json:"port,omitempty"`
+ NodeKey string `json:"key,omitempty"`
+ EnableRelay bool `json:"relay"`
+ LogLevel string `json:"logLevel"`
+}
+
+var jamon unsafe.Pointer
+
+type NWaku struct {
+ wakuCtx unsafe.Pointer
+
+ appDB *sql.DB
+
+ dnsAddressCache map[string][]dnsdisc.DiscoveredNode // Map to store the multiaddresses returned by dns discovery
+ dnsAddressCacheLock *sync.RWMutex // lock to handle access to the map
+
+ // Filter-related
+ filters *common.Filters // Message filters installed with Subscribe function
+ filterManager *filterapi.FilterManager
+
+ privateKeys map[string]*ecdsa.PrivateKey // Private key storage
+ symKeys map[string][]byte // Symmetric key storage
+ keyMu sync.RWMutex // Mutex associated with key stores
+
+ envelopeCache *ttlcache.Cache[gethcommon.Hash, *common.ReceivedMessage] // Pool of envelopes currently tracked by this node
+ poolMu sync.RWMutex // Mutex to sync the message and expiration pools
+
+ bandwidthCounter *metrics.BandwidthCounter
+
+ protectedTopicStore *persistence.ProtectedTopicsStore
+
+ sendQueue *publish.MessageQueue
+ limiter *publish.PublishRateLimiter
+
+ missingMsgVerifier *missing.MissingMessageVerifier
+
+ msgQueue chan *common.ReceivedMessage // Message queue for waku messages that havent been decoded
+
+ ctx context.Context
+ cancel context.CancelFunc
+ wg sync.WaitGroup
+
+ cfg *Config
+ options []node.WakuNodeOption
+
+ envelopeFeed event.Feed
+
+ storeMsgIDs map[gethcommon.Hash]bool // Map of the currently processing ids
+ storeMsgIDsMu sync.RWMutex
+
+ messageSentCheck *publish.MessageSentCheck
+
+ topicHealthStatusChan chan peermanager.TopicHealthStatus
+ connectionNotifChan chan node.PeerConnection
+ connStatusSubscriptions map[string]*types.ConnStatusSubscription
+ connStatusMu sync.Mutex
+ onlineChecker *onlinechecker.DefaultOnlineChecker
+ state connection.State
+
+ logger *zap.Logger
+
+ // NTP Synced timesource
+ timesource *timesource.NTPTimeSource
+
+ // seededBootnodesForDiscV5 indicates whether we manage to retrieve discovery
+ // bootnodes successfully
+ seededBootnodesForDiscV5 bool
+
+ // goingOnline is channel that notifies when connectivity has changed from offline to online
+ goingOnline chan struct{}
+
+ // discV5BootstrapNodes is the ENR to be used to fetch bootstrap nodes for discovery
+ discV5BootstrapNodes []string
+
+ onHistoricMessagesRequestFailed func([]byte, peer.ID, error)
+ onPeerStats func(types.ConnStatus)
+
+ statusTelemetryClient ITelemetryClient
+
+ defaultShardInfo protocol.RelayShards
+}
+
+func (w *NWaku) Stop() error {
+ return w.WakuStop()
+}
+
+func WakuSetup() {
+ C.waku_setup()
+}
+
+func printStackTrace() {
+ // Create a buffer to hold the stack trace
+ buf := make([]byte, 102400)
+ // Capture the stack trace into the buffer
+ n := runtime.Stack(buf, false)
+ // Print the stack trace
+ fmt.Printf("Current stack trace:\n%s\n", buf[:n])
+}
+
+func wakuNew(nodeKey *ecdsa.PrivateKey,
+ fleet string,
+ cfg *Config,
+ logger *zap.Logger,
+ appDB *sql.DB,
+ ts *timesource.NTPTimeSource,
+ onHistoricMessagesRequestFailed func([]byte, peer.ID, error), onPeerStats func(types.ConnStatus)) (*NWaku, error) {
+
+ nwakuConfig := WakuConfig{
+ Host: cfg.Host,
+ Port: 30303,
+ NodeKey: "11d0dcea28e86f81937a3bd1163473c7fbc0a0db54fd72914849bc47bdf78710",
+ EnableRelay: true,
+ LogLevel: "DEBUG",
+ }
+
+ var err error
+ if logger == nil {
+ logger, err = zap.NewDevelopment()
+ if err != nil {
+ return nil, err
+ }
+ }
+ if ts == nil {
+ ts = timesource.Default()
+ }
+
+ cfg = setDefaults(cfg)
+ if err = cfg.Validate(logger); err != nil {
+ return nil, err
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+
+ jsonConfig, err := json.Marshal(nwakuConfig)
+ if err != nil {
+ return nil, err
+ }
+
+ var cJsonConfig = C.CString(string(jsonConfig))
+ var resp = C.allocResp()
+
+ defer C.free(unsafe.Pointer(cJsonConfig))
+ defer C.freeResp(resp)
+
+ wakuCtx := C.cGoWakuNew(cJsonConfig, resp)
+ jamon = wakuCtx
+ // Notice that the events for self node are handled by the 'MyEventCallback' method
+
+ if C.getRet(resp) == C.RET_OK {
+
+ return &NWaku{
+ wakuCtx: wakuCtx,
+ cfg: cfg,
+ privateKeys: make(map[string]*ecdsa.PrivateKey),
+ symKeys: make(map[string][]byte),
+ envelopeCache: newTTLCache(),
+ msgQueue: make(chan *common.ReceivedMessage, messageQueueLimit),
+ topicHealthStatusChan: make(chan peermanager.TopicHealthStatus, 100),
+ connectionNotifChan: make(chan node.PeerConnection, 20),
+ connStatusSubscriptions: make(map[string]*types.ConnStatusSubscription),
+ ctx: ctx,
+ cancel: cancel,
+ wg: sync.WaitGroup{},
+ dnsAddressCache: make(map[string][]dnsdisc.DiscoveredNode),
+ dnsAddressCacheLock: &sync.RWMutex{},
+ storeMsgIDs: make(map[gethcommon.Hash]bool),
+ timesource: ts,
+ storeMsgIDsMu: sync.RWMutex{},
+ logger: logger,
+ discV5BootstrapNodes: cfg.DiscV5BootstrapNodes,
+ onHistoricMessagesRequestFailed: onHistoricMessagesRequestFailed,
+ onPeerStats: onPeerStats,
+ onlineChecker: onlinechecker.NewDefaultOnlineChecker(false).(*onlinechecker.DefaultOnlineChecker),
+ sendQueue: publish.NewMessageQueue(1000, cfg.UseThrottledPublish),
+ }, nil
+ }
+
+ errMsg := "error wakuNew: " + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp)))
+ return nil, errors.New(errMsg)
+}
+
+func (self *NWaku) WakuStart() error {
+
+ var resp = C.allocResp()
+ defer C.freeResp(resp)
+ C.cGoWakuStart(self.wakuCtx, resp)
+
+ if C.getRet(resp) == C.RET_OK {
+ return nil
+ }
+ errMsg := "error WakuStart: " + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp)))
+ return errors.New(errMsg)
+}
+
+func (self *NWaku) WakuStop() error {
+ var resp = C.allocResp()
+ defer C.freeResp(resp)
+ C.cGoWakuStop(self.wakuCtx, resp)
+
+ if C.getRet(resp) == C.RET_OK {
+ return nil
+ }
+ errMsg := "error WakuStop: " + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp)))
+ return errors.New(errMsg)
+}
+
+func (self *NWaku) WakuDestroy() error {
+ var resp = C.allocResp()
+ defer C.freeResp(resp)
+ C.cGoWakuDestroy(self.wakuCtx, resp)
+
+ if C.getRet(resp) == C.RET_OK {
+ return nil
+ }
+ errMsg := "error WakuDestroy: " + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp)))
+ return errors.New(errMsg)
+}
+
+func (self *NWaku) StartDiscV5() error {
+ var resp = C.allocResp()
+ defer C.freeResp(resp)
+ C.cGoWakuStartDiscV5(self.wakuCtx, resp)
+
+ if C.getRet(resp) == C.RET_OK {
+ return nil
+ }
+ errMsg := "error WakuStartDiscV5: " + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp)))
+ return errors.New(errMsg)
+}
+
+func (self *NWaku) StopDiscV5() error {
+ var resp = C.allocResp()
+ defer C.freeResp(resp)
+ C.cGoWakuStopDiscV5(self.wakuCtx, resp)
+
+ if C.getRet(resp) == C.RET_OK {
+ return nil
+ }
+ errMsg := "error WakuStopDiscV5: " + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp)))
+ return errors.New(errMsg)
+}
+
+func (self *NWaku) WakuVersion() (string, error) {
+ var resp = C.allocResp()
+ defer C.freeResp(resp)
+
+ C.cGoWakuVersion(self.wakuCtx, resp)
+
+ if C.getRet(resp) == C.RET_OK {
+ var version = C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp)))
+ return version, nil
+ }
+
+ errMsg := "error WakuVersion: " +
+ C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp)))
+ return "", errors.New(errMsg)
+}
+
+//export globalEventCallback
+func globalEventCallback(callerRet C.int, msg *C.char, len C.size_t, userData unsafe.Pointer) {
+ // This is shared among all Golang instances
+ self := NWaku{wakuCtx: userData}
+ self.MyEventCallback(callerRet, msg, len)
+}
+
+func (self *NWaku) MyEventCallback(callerRet C.int, msg *C.char, len C.size_t) {
+ fmt.Println("Event received:", C.GoStringN(msg, C.int(len)))
+}
+
+func (self *NWaku) WakuSetEventCallback() {
+ // Notice that the events for self node are handled by the 'MyEventCallback' method
+ C.cGoWakuSetEventCallback(self.wakuCtx)
+}
+
+func (self *NWaku) FormatContentTopic(
+ appName string,
+ appVersion int,
+ contentTopicName string,
+ encoding string) (WakuContentTopic, error) {
+
+ var cAppName = C.CString(appName)
+ var cContentTopicName = C.CString(contentTopicName)
+ var cEncoding = C.CString(encoding)
+ var resp = C.allocResp()
+
+ defer C.free(unsafe.Pointer(cAppName))
+ defer C.free(unsafe.Pointer(cContentTopicName))
+ defer C.free(unsafe.Pointer(cEncoding))
+ defer C.freeResp(resp)
+
+ C.cGoWakuContentTopic(self.wakuCtx,
+ cAppName,
+ C.int(appVersion),
+ cContentTopicName,
+ cEncoding,
+ resp)
+
+ if C.getRet(resp) == C.RET_OK {
+ var contentTopic = C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp)))
+ return contentTopic, nil
+ }
+
+ errMsg := "error FormatContentTopic: " +
+ C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp)))
+
+ return "", errors.New(errMsg)
+}
+
+func (self *NWaku) FormatPubsubTopic(topicName string) (WakuPubsubTopic, error) {
+ var cTopicName = C.CString(topicName)
+ var resp = C.allocResp()
+
+ defer C.free(unsafe.Pointer(cTopicName))
+ defer C.freeResp(resp)
+
+ C.cGoWakuPubsubTopic(self.wakuCtx, cTopicName, resp)
+ if C.getRet(resp) == C.RET_OK {
+ var pubsubTopic = C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp)))
+ return pubsubTopic, nil
+ }
+
+ errMsg := "error FormatPubsubTopic: " +
+ C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp)))
+
+ return "", errors.New(errMsg)
+}
+
+func (self *NWaku) WakuDefaultPubsubTopic() (WakuPubsubTopic, error) {
+ var resp = C.allocResp()
+ defer C.freeResp(resp)
+ C.cGoWakuDefaultPubsubTopic(self.wakuCtx, resp)
+ if C.getRet(resp) == C.RET_OK {
+ var defaultPubsubTopic = C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp)))
+ return defaultPubsubTopic, nil
+ }
+
+ errMsg := "error WakuDefaultPubsubTopic: " +
+ C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp)))
+
+ return "", errors.New(errMsg)
+}
+
+func (self *NWaku) WakuRelayPublish(
+ pubsubTopic string,
+ message string,
+ timeoutMs int) (WakuMessageHash, error) {
+
+ var cPubsubTopic = C.CString(pubsubTopic)
+ var msg = C.CString(message)
+ var resp = C.allocResp()
+
+ defer C.freeResp(resp)
+ defer C.free(unsafe.Pointer(cPubsubTopic))
+ defer C.free(unsafe.Pointer(msg))
+
+ C.cGoWakuRelayPublish(self.wakuCtx, cPubsubTopic, msg, C.int(timeoutMs), resp)
+ if C.getRet(resp) == C.RET_OK {
+ msgHash := C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp)))
+ return msgHash, nil
+ }
+ errMsg := "error WakuRelayPublish: " +
+ C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp)))
+ return "", errors.New(errMsg)
+}
+
+func (self *NWaku) WakuRelaySubscribe(pubsubTopic string) error {
+ var resp = C.allocResp()
+ var cPubsubTopic = C.CString(pubsubTopic)
+
+ defer C.freeResp(resp)
+ defer C.free(unsafe.Pointer(cPubsubTopic))
+
+ if self.wakuCtx == nil {
+ fmt.Println("ctx is nil")
+ }
+ // if self.cPubsubTopic == nil {
+ // fmt.Println("cPubsubTopic is nil")
+ // }
+ // if self.resp == nil {
+ // fmt.Println("resp is nil")
+ // }
+
+ C.cGoWakuRelaySubscribe(self.wakuCtx, cPubsubTopic, resp)
+
+ if C.getRet(resp) == C.RET_OK {
+ return nil
+ }
+ errMsg := "error WakuRelaySubscribe: " +
+ C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp)))
+ return errors.New(errMsg)
+}
+
+func (self *NWaku) WakuRelayUnsubscribe(pubsubTopic string) error {
+ var resp = C.allocResp()
+ var cPubsubTopic = C.CString(pubsubTopic)
+ defer C.freeResp(resp)
+ defer C.free(unsafe.Pointer(cPubsubTopic))
+ C.cGoWakuRelayUnsubscribe(self.wakuCtx, cPubsubTopic, resp)
+
+ if C.getRet(resp) == C.RET_OK {
+ return nil
+ }
+ errMsg := "error WakuRelayUnsubscribe: " +
+ C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp)))
+ return errors.New(errMsg)
+}
+
+func (self *NWaku) WakuLightpushPublish(
+ pubsubTopic string,
+ message string) (string, error) {
+
+ var cPubsubTopic = C.CString(pubsubTopic)
+ var msg = C.CString(message)
+ var resp = C.allocResp()
+
+ defer C.freeResp(resp)
+ defer C.free(unsafe.Pointer(cPubsubTopic))
+ defer C.free(unsafe.Pointer(msg))
+
+ C.cGoWakuLightpushPublish(self.wakuCtx, cPubsubTopic, msg, resp)
+ if C.getRet(resp) == C.RET_OK {
+ msg := C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp)))
+ return msg, nil
+ }
+ errMsg := "error WakuLightpushPublish: " +
+ C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp)))
+ return "", errors.New(errMsg)
+}
+
+func (self *NWaku) wakuStoreQuery(
+ jsonQuery string,
+ peerAddr string,
+ timeoutMs int) (string, error) {
+
+ var cJsonQuery = C.CString(jsonQuery)
+ var cPeerAddr = C.CString(peerAddr)
+ var resp = C.allocResp()
+
+ defer C.free(unsafe.Pointer(cJsonQuery))
+ defer C.free(unsafe.Pointer(cPeerAddr))
+ defer C.freeResp(resp)
+
+ C.cGoWakuStoreQuery(self.wakuCtx, cJsonQuery, cPeerAddr, C.int(timeoutMs), resp)
+ if C.getRet(resp) == C.RET_OK {
+ msg := C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp)))
+ return msg, nil
+ }
+ errMsg := "error WakuStoreQuery: " +
+ C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp)))
+ return "", errors.New(errMsg)
+}
+
+func (self *NWaku) WakuConnect(peerMultiAddr string, timeoutMs int) error {
+ var resp = C.allocResp()
+ var cPeerMultiAddr = C.CString(peerMultiAddr)
+ defer C.freeResp(resp)
+ defer C.free(unsafe.Pointer(cPeerMultiAddr))
+
+ C.cGoWakuConnect(self.wakuCtx, cPeerMultiAddr, C.int(timeoutMs), resp)
+
+ if C.getRet(resp) == C.RET_OK {
+ return nil
+ }
+ errMsg := "error WakuConnect: " +
+ C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp)))
+ return errors.New(errMsg)
+}
+
+func (self *NWaku) ListenAddresses() ([]multiaddr.Multiaddr, error) {
+ var resp = C.allocResp()
+ defer C.freeResp(resp)
+ C.cGoWakuListenAddresses(self.wakuCtx, resp)
+
+ if C.getRet(resp) == C.RET_OK {
+
+ var addrsRet []multiaddr.Multiaddr
+ listenAddresses := C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp)))
+ addrss := strings.Split(listenAddresses, ",")
+ for _, addr := range addrss {
+ addr, err := ma.NewMultiaddr(addr)
+ if err != nil {
+ return nil, err
+ }
+
+ addrsRet = append(addrsRet, addr)
+ }
+
+ fmt.Println("AAAAAA listen addresses: ", listenAddresses)
+
+ return addrsRet, nil
+ }
+ errMsg := "error WakuListenAddresses: " +
+ C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp)))
+
+ return nil, errors.New(errMsg)
+}
+
+func (self *NWaku) ENR() (*enode.Node, error) {
+ var resp = C.allocResp()
+ defer C.freeResp(resp)
+ C.cGoWakuGetMyENR(self.wakuCtx, resp)
+
+ if C.getRet(resp) == C.RET_OK {
+ enrStr := C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp)))
+ return enode.Parse(enode.ValidSchemes, enrStr)
+ }
+ errMsg := "error WakuGetMyENR: " +
+ C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp)))
+ return nil, errors.New(errMsg)
+}
+
+func (self *NWaku) ListPeersInMesh(pubsubTopic string) (int, error) {
+ var resp = C.allocResp()
+ var cPubsubTopic = C.CString(pubsubTopic)
+ defer C.freeResp(resp)
+ defer C.free(unsafe.Pointer(cPubsubTopic))
+
+ C.cGoWakuListPeersInMesh(self.wakuCtx, cPubsubTopic, resp)
+
+ if C.getRet(resp) == C.RET_OK {
+ numPeersStr := C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp)))
+ numPeers, err := strconv.Atoi(numPeersStr)
+ if err != nil {
+ fmt.Println(":", err)
+ errMsg := "ListPeersInMesh - error converting string to int: " + err.Error()
+ return 0, errors.New(errMsg)
+ }
+ return numPeers, nil
+ }
+ errMsg := "error ListPeersInMesh: " +
+ C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp)))
+ return 0, errors.New(errMsg)
+}
+
+// func main() {
+
+// config := WakuConfig{
+// Host: "0.0.0.0",
+// Port: 30304,
+// NodeKey: "11d0dcea28e86f81937a3bd1163473c7fbc0a0db54fd72914849bc47bdf78710",
+// EnableRelay: true,
+// LogLevel: "DEBUG",
+// }
+
+// node, err := wakuNew(config)
+// if err != nil {
+// fmt.Println("Error happened:", err.Error())
+// return
+// }
+
+// node.WakuSetEventCallback()
+
+// defaultPubsubTopic, err := node.WakuDefaultPubsubTopic()
+// if err != nil {
+// fmt.Println("Error happened:", err.Error())
+// return
+// }
+
+// err = node.WakuRelaySubscribe(defaultPubsubTopic)
+// if err != nil {
+// fmt.Println("Error happened:", err.Error())
+// return
+// }
+
+// err = node.WakuConnect(
+// // tries to connect to a localhost node with key: 0d714a1fada214dead6dc9c7274585eca0ff292451866e7d6d677dc818e8ccd2
+// "/ip4/0.0.0.0/tcp/60000/p2p/16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN",
+// 10000)
+// if err != nil {
+// fmt.Println("Error happened:", err.Error())
+// return
+// }
+
+// err = node.WakuStart()
+// if err != nil {
+// fmt.Println("Error happened:", err.Error())
+// return
+// }
+
+// version, err := node.WakuVersion()
+// if err != nil {
+// fmt.Println("Error happened:", err.Error())
+// return
+// }
+
+// formattedContentTopic, err := node.FormatContentTopic("appName", 1, "cTopicName", "enc")
+// if err != nil {
+// fmt.Println("Error happened:", err.Error())
+// return
+// }
+
+// formattedPubsubTopic, err := node.FormatPubsubTopic("my-ctopic")
+// if err != nil {
+// fmt.Println("Error happened:", err.Error())
+// return
+// }
+
+// listenAddresses, err := node.WakuListenAddresses()
+// if err != nil {
+// fmt.Println("Error happened:", err.Error())
+// return
+// }
+
+// fmt.Println("Version:", version)
+// fmt.Println("Custom content topic:", formattedContentTopic)
+// fmt.Println("Custom pubsub topic:", formattedPubsubTopic)
+// fmt.Println("Default pubsub topic:", defaultPubsubTopic)
+// fmt.Println("Listen addresses:", listenAddresses)
+
+// // Wait for a SIGINT or SIGTERM signal
+// ch := make(chan os.Signal, 1)
+// signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)
+// <-ch
+
+// err = node.WakuStop()
+// if err != nil {
+// fmt.Println("Error happened:", err.Error())
+// return
+// }
+
+// err = node.WakuDestroy()
+// if err != nil {
+// fmt.Println("Error happened:", err.Error())
+// return
+// }
+// }
+
+// MaxMessageSize returns the maximum accepted message size.
+func (w *NWaku) MaxMessageSize() uint32 {
+ return w.cfg.MaxMessageSize
+}
+
+// New creates a WakuV2 client ready to communicate through the LibP2P network.
+func New(nodeKey *ecdsa.PrivateKey,
+ fleet string,
+ cfg *Config,
+ logger *zap.Logger,
+ appDB *sql.DB,
+ ts *timesource.NTPTimeSource,
+ onHistoricMessagesRequestFailed func([]byte, peer.ID, error),
+ onPeerStats func(types.ConnStatus)) (*NWaku, error) {
+
+ // Lock the main goroutine to its current OS thread
+ runtime.LockOSThread()
+
+ WakuSetup() // This should only be called once in the whole app's life
+
+ node, err := wakuNew(nodeKey,
+ fleet,
+ cfg, logger, appDB, ts, onHistoricMessagesRequestFailed,
+ onPeerStats)
+ if err != nil {
+ return nil, err
+ }
+
+ defaultPubsubTopic, err := node.WakuDefaultPubsubTopic()
+ if err != nil {
+ fmt.Println("Error happened:", err.Error())
+ }
+
+ err = node.WakuRelaySubscribe(defaultPubsubTopic)
+ if err != nil {
+ fmt.Println("Error happened:", err.Error())
+ }
+
+ node.WakuSetEventCallback()
+
+ return node, nil
+
+ // if !cfg.UseThrottledPublish || testing.Testing() {
+ // // To avoid delaying the tests, or for when we dont want to rate limit, we set up an infinite rate limiter,
+ // // basically disabling the rate limit functionality
+ // waku.limiter = publish.NewPublishRateLimiter(rate.Inf, 1)
+
+ // } else {
+ // waku.limiter = publish.NewPublishRateLimiter(publishingLimiterRate, publishingLimitBurst)
+ // }
+
+ // waku.filters = common.NewFilters(waku.cfg.DefaultShardPubsubTopic, waku.logger)
+ // waku.bandwidthCounter = metrics.NewBandwidthCounter()
+
+ // if nodeKey == nil {
+ // // No nodekey is provided, create an ephemeral key
+ // nodeKey, err = crypto.GenerateKey()
+ // if err != nil {
+ // return nil, fmt.Errorf("failed to generate a random go-waku private key: %v", err)
+ // }
+ // }
+
+ // hostAddr, err := net.ResolveTCPAddr("tcp", fmt.Sprint(cfg.Host, ":", cfg.Port))
+ // if err != nil {
+ // return nil, fmt.Errorf("failed to setup the network interface: %v", err)
+ // }
+
+ // libp2pOpts := node.DefaultLibP2POptions
+ // libp2pOpts = append(libp2pOpts, libp2p.BandwidthReporter(waku.bandwidthCounter))
+ // libp2pOpts = append(libp2pOpts, libp2p.NATPortMap())
+
+ // opts := []node.WakuNodeOption{
+ // node.WithLibP2POptions(libp2pOpts...),
+ // node.WithPrivateKey(nodeKey),
+ // node.WithHostAddress(hostAddr),
+ // node.WithConnectionNotification(waku.connectionNotifChan),
+ // node.WithTopicHealthStatusChannel(waku.topicHealthStatusChan),
+ // node.WithKeepAlive(randomPeersKeepAliveInterval, allPeersKeepAliveInterval),
+ // node.WithLogger(logger),
+ // node.WithLogLevel(logger.Level()),
+ // node.WithClusterID(cfg.ClusterID),
+ // node.WithMaxMsgSize(1024 * 1024),
+ // }
+
+ // if cfg.EnableDiscV5 {
+ // bootnodes, err := waku.getDiscV5BootstrapNodes(waku.ctx, cfg.DiscV5BootstrapNodes)
+ // if err != nil {
+ // logger.Error("failed to get bootstrap nodes", zap.Error(err))
+ // return nil, err
+ // }
+ // opts = append(opts, node.WithDiscoveryV5(uint(cfg.UDPPort), bootnodes, cfg.AutoUpdate))
+ // }
+ // shards, err := protocol.TopicsToRelayShards(cfg.DefaultShardPubsubTopic)
+ // if err != nil {
+ // logger.Error("FATAL ERROR: failed to parse relay shards", zap.Error(err))
+ // return nil, errors.New("failed to parse relay shard, invalid pubsubTopic configuration")
+ // }
+ // if len(shards) == 0 { //Hack so that tests don't fail. TODO: Need to remove this once tests are changed to use proper cluster and shard.
+ // shardInfo := protocol.RelayShards{ClusterID: 0, ShardIDs: []uint16{0}}
+ // shards = append(shards, shardInfo)
+ // }
+ // waku.defaultShardInfo = shards[0]
+ // if cfg.LightClient {
+ // opts = append(opts, node.WithWakuFilterLightNode())
+ // waku.defaultShardInfo = shards[0]
+ // opts = append(opts, node.WithMaxPeerConnections(cfg.DiscoveryLimit))
+ // cfg.EnableStoreConfirmationForMessagesSent = false
+ // //TODO: temporary work-around to improve lightClient connectivity, need to be removed once community sharding is implemented
+ // opts = append(opts, node.WithPubSubTopics(cfg.DefaultShardedPubsubTopics))
+ // } else {
+ // relayOpts := []pubsub.Option{
+ // pubsub.WithMaxMessageSize(int(waku.cfg.MaxMessageSize)),
+ // }
+
+ // if waku.logger.Level() == zap.DebugLevel {
+ // relayOpts = append(relayOpts, pubsub.WithEventTracer(waku))
+ // }
+
+ // opts = append(opts, node.WithWakuRelayAndMinPeers(waku.cfg.MinPeersForRelay, relayOpts...))
+ // opts = append(opts, node.WithMaxPeerConnections(maxRelayPeers))
+ // cfg.EnablePeerExchangeClient = true //Enabling this until discv5 issues are resolved. This will enable more peers to be connected for relay mesh.
+ // cfg.EnableStoreConfirmationForMessagesSent = true
+ // }
+
+ // if cfg.EnableStore {
+ // if appDB == nil {
+ // return nil, errors.New("appDB is required for store")
+ // }
+ // opts = append(opts, node.WithWakuStore())
+ // dbStore, err := persistence.NewDBStore(logger, persistence.WithDB(appDB), persistence.WithRetentionPolicy(cfg.StoreCapacity, time.Duration(cfg.StoreSeconds)*time.Second))
+ // if err != nil {
+ // return nil, err
+ // }
+ // opts = append(opts, node.WithMessageProvider(dbStore))
+ // }
+
+ // if !cfg.LightClient {
+ // opts = append(opts, node.WithWakuFilterFullNode(filter.WithMaxSubscribers(20)))
+ // opts = append(opts, node.WithLightPush(lightpush.WithRateLimiter(1, 1)))
+ // }
+
+ // if appDB != nil {
+ // waku.protectedTopicStore, err = persistence.NewProtectedTopicsStore(logger, appDB)
+ // if err != nil {
+ // return nil, err
+ // }
+ // }
+
+ // if cfg.EnablePeerExchangeServer {
+ // opts = append(opts, node.WithPeerExchange(peer_exchange.WithRateLimiter(1, 1)))
+ // }
+
+ // waku.options = opts
+ // waku.logger.Info("setup the go-waku node successfully")
+
+ // return waku, nil
}
diff --git a/wakuv2/tracer.go b/wakuv2/tracer.go
index 163d8f7b6..e34894de1 100644
--- a/wakuv2/tracer.go
+++ b/wakuv2/tracer.go
@@ -11,7 +11,7 @@ import (
// Trace implements EventTracer interface.
// We use custom logging, because we want to base58-encode the peerIDs. And also make the messageIDs readable.
-func (w *Waku) Trace(evt *pubsub_pb.TraceEvent) {
+func (w *NWaku) Trace(evt *pubsub_pb.TraceEvent) {
f := []zap.Field{
zap.String("type", evt.Type.String()),
diff --git a/wakuv2/waku.go b/wakuv2/waku.go
deleted file mode 100644
index 5eb5fc0d5..000000000
--- a/wakuv2/waku.go
+++ /dev/null
@@ -1,1858 +0,0 @@
-// Copyright 2019 The Waku Library Authors.
-//
-// The Waku library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The Waku library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty off
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the Waku library. If not, see .
-//
-// This software uses the go-ethereum library, which is licensed
-// under the GNU Lesser General Public Library, version 3 or any later.
-
-package wakuv2
-
-import (
- "context"
- "crypto/ecdsa"
- "crypto/sha256"
- "database/sql"
- "errors"
- "fmt"
- "math"
- "net"
- "runtime"
- "strings"
- "sync"
- "testing"
- "time"
-
- "github.com/jellydator/ttlcache/v3"
- "github.com/libp2p/go-libp2p/core/peer"
- "github.com/libp2p/go-libp2p/core/peerstore"
- "github.com/multiformats/go-multiaddr"
-
- "go.uber.org/zap"
-
- "golang.org/x/crypto/pbkdf2"
- "golang.org/x/time/rate"
-
- gethcommon "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/common/hexutil"
- "github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/event"
- "github.com/ethereum/go-ethereum/p2p"
- "github.com/ethereum/go-ethereum/p2p/enode"
- "github.com/ethereum/go-ethereum/rpc"
-
- "github.com/libp2p/go-libp2p"
- pubsub "github.com/libp2p/go-libp2p-pubsub"
- "github.com/libp2p/go-libp2p/core/metrics"
-
- filterapi "github.com/waku-org/go-waku/waku/v2/api/filter"
- "github.com/waku-org/go-waku/waku/v2/api/missing"
- "github.com/waku-org/go-waku/waku/v2/api/publish"
- "github.com/waku-org/go-waku/waku/v2/dnsdisc"
- "github.com/waku-org/go-waku/waku/v2/onlinechecker"
- "github.com/waku-org/go-waku/waku/v2/peermanager"
- wps "github.com/waku-org/go-waku/waku/v2/peerstore"
- "github.com/waku-org/go-waku/waku/v2/protocol"
- "github.com/waku-org/go-waku/waku/v2/protocol/filter"
- "github.com/waku-org/go-waku/waku/v2/protocol/legacy_store"
- "github.com/waku-org/go-waku/waku/v2/protocol/lightpush"
- "github.com/waku-org/go-waku/waku/v2/protocol/peer_exchange"
- "github.com/waku-org/go-waku/waku/v2/protocol/relay"
- "github.com/waku-org/go-waku/waku/v2/protocol/store"
- "github.com/waku-org/go-waku/waku/v2/utils"
-
- "github.com/status-im/status-go/connection"
- "github.com/status-im/status-go/eth-node/types"
- "github.com/status-im/status-go/logutils"
- "github.com/status-im/status-go/timesource"
- "github.com/status-im/status-go/wakuv2/common"
- "github.com/status-im/status-go/wakuv2/persistence"
-
- node "github.com/waku-org/go-waku/waku/v2/node"
- "github.com/waku-org/go-waku/waku/v2/protocol/pb"
-)
-
-const messageQueueLimit = 1024
-const requestTimeout = 30 * time.Second
-const bootnodesQueryBackoffMs = 200
-const bootnodesMaxRetries = 7
-const cacheTTL = 20 * time.Minute
-const maxRelayPeers = 300
-const randomPeersKeepAliveInterval = 5 * time.Second
-const allPeersKeepAliveInterval = 5 * time.Minute
-const peersToPublishForLightpush = 2
-const publishingLimiterRate = rate.Limit(2)
-const publishingLimitBurst = 4
-
-type SentEnvelope struct {
- Envelope *protocol.Envelope
- PublishMethod PublishMethod
-}
-
-type ErrorSendingEnvelope struct {
- Error error
- SentEnvelope SentEnvelope
-}
-
-type ITelemetryClient interface {
- PushReceivedEnvelope(ctx context.Context, receivedEnvelope *protocol.Envelope)
- PushSentEnvelope(ctx context.Context, sentEnvelope SentEnvelope)
- PushErrorSendingEnvelope(ctx context.Context, errorSendingEnvelope ErrorSendingEnvelope)
- PushPeerCount(ctx context.Context, peerCount int)
- PushPeerConnFailures(ctx context.Context, peerConnFailures map[string]int)
-}
-
-// Waku represents a dark communication interface through the Ethereum
-// network, using its very own P2P communication layer.
-type Waku struct {
- node *node.WakuNode // reference to a libp2p waku node
- appDB *sql.DB
-
- dnsAddressCache map[string][]dnsdisc.DiscoveredNode // Map to store the multiaddresses returned by dns discovery
- dnsAddressCacheLock *sync.RWMutex // lock to handle access to the map
-
- // Filter-related
- filters *common.Filters // Message filters installed with Subscribe function
- filterManager *filterapi.FilterManager
-
- privateKeys map[string]*ecdsa.PrivateKey // Private key storage
- symKeys map[string][]byte // Symmetric key storage
- keyMu sync.RWMutex // Mutex associated with key stores
-
- envelopeCache *ttlcache.Cache[gethcommon.Hash, *common.ReceivedMessage] // Pool of envelopes currently tracked by this node
- poolMu sync.RWMutex // Mutex to sync the message and expiration pools
-
- bandwidthCounter *metrics.BandwidthCounter
-
- protectedTopicStore *persistence.ProtectedTopicsStore
-
- sendQueue *publish.MessageQueue
- limiter *publish.PublishRateLimiter
-
- missingMsgVerifier *missing.MissingMessageVerifier
-
- msgQueue chan *common.ReceivedMessage // Message queue for waku messages that havent been decoded
-
- ctx context.Context
- cancel context.CancelFunc
- wg sync.WaitGroup
-
- cfg *Config
- options []node.WakuNodeOption
-
- envelopeFeed event.Feed
-
- storeMsgIDs map[gethcommon.Hash]bool // Map of the currently processing ids
- storeMsgIDsMu sync.RWMutex
-
- messageSentCheck *publish.MessageSentCheck
-
- topicHealthStatusChan chan peermanager.TopicHealthStatus
- connectionNotifChan chan node.PeerConnection
- connStatusSubscriptions map[string]*types.ConnStatusSubscription
- connStatusMu sync.Mutex
- onlineChecker *onlinechecker.DefaultOnlineChecker
- state connection.State
-
- logger *zap.Logger
-
- // NTP Synced timesource
- timesource *timesource.NTPTimeSource
-
- // seededBootnodesForDiscV5 indicates whether we manage to retrieve discovery
- // bootnodes successfully
- seededBootnodesForDiscV5 bool
-
- // goingOnline is channel that notifies when connectivity has changed from offline to online
- goingOnline chan struct{}
-
- // discV5BootstrapNodes is the ENR to be used to fetch bootstrap nodes for discovery
- discV5BootstrapNodes []string
-
- onHistoricMessagesRequestFailed func([]byte, peer.ID, error)
- onPeerStats func(types.ConnStatus)
-
- statusTelemetryClient ITelemetryClient
-
- defaultShardInfo protocol.RelayShards
-}
-
-func (w *Waku) SetStatusTelemetryClient(client ITelemetryClient) {
- w.statusTelemetryClient = client
-}
-
-func newTTLCache() *ttlcache.Cache[gethcommon.Hash, *common.ReceivedMessage] {
- cache := ttlcache.New[gethcommon.Hash, *common.ReceivedMessage](ttlcache.WithTTL[gethcommon.Hash, *common.ReceivedMessage](cacheTTL))
- go cache.Start()
- return cache
-}
-
-// New creates a WakuV2 client ready to communicate through the LibP2P network.
-func New(nodeKey *ecdsa.PrivateKey, fleet string, cfg *Config, logger *zap.Logger, appDB *sql.DB, ts *timesource.NTPTimeSource, onHistoricMessagesRequestFailed func([]byte, peer.ID, error), onPeerStats func(types.ConnStatus)) (*Waku, error) {
- var err error
- if logger == nil {
- logger, err = zap.NewDevelopment()
- if err != nil {
- return nil, err
- }
- }
-
- if ts == nil {
- ts = timesource.Default()
- }
-
- cfg = setDefaults(cfg)
- if err = cfg.Validate(logger); err != nil {
- return nil, err
- }
-
- logger.Info("starting wakuv2 with config", zap.Any("config", cfg))
-
- ctx, cancel := context.WithCancel(context.Background())
-
- waku := &Waku{
- appDB: appDB,
- cfg: cfg,
- privateKeys: make(map[string]*ecdsa.PrivateKey),
- symKeys: make(map[string][]byte),
- envelopeCache: newTTLCache(),
- msgQueue: make(chan *common.ReceivedMessage, messageQueueLimit),
- topicHealthStatusChan: make(chan peermanager.TopicHealthStatus, 100),
- connectionNotifChan: make(chan node.PeerConnection, 20),
- connStatusSubscriptions: make(map[string]*types.ConnStatusSubscription),
- ctx: ctx,
- cancel: cancel,
- wg: sync.WaitGroup{},
- dnsAddressCache: make(map[string][]dnsdisc.DiscoveredNode),
- dnsAddressCacheLock: &sync.RWMutex{},
- storeMsgIDs: make(map[gethcommon.Hash]bool),
- timesource: ts,
- storeMsgIDsMu: sync.RWMutex{},
- logger: logger,
- discV5BootstrapNodes: cfg.DiscV5BootstrapNodes,
- onHistoricMessagesRequestFailed: onHistoricMessagesRequestFailed,
- onPeerStats: onPeerStats,
- onlineChecker: onlinechecker.NewDefaultOnlineChecker(false).(*onlinechecker.DefaultOnlineChecker),
- sendQueue: publish.NewMessageQueue(1000, cfg.UseThrottledPublish),
- }
-
- if !cfg.UseThrottledPublish || testing.Testing() {
- // To avoid delaying the tests, or for when we dont want to rate limit, we set up an infinite rate limiter,
- // basically disabling the rate limit functionality
- waku.limiter = publish.NewPublishRateLimiter(rate.Inf, 1)
-
- } else {
- waku.limiter = publish.NewPublishRateLimiter(publishingLimiterRate, publishingLimitBurst)
- }
-
- waku.filters = common.NewFilters(waku.cfg.DefaultShardPubsubTopic, waku.logger)
- waku.bandwidthCounter = metrics.NewBandwidthCounter()
-
- if nodeKey == nil {
- // No nodekey is provided, create an ephemeral key
- nodeKey, err = crypto.GenerateKey()
- if err != nil {
- return nil, fmt.Errorf("failed to generate a random go-waku private key: %v", err)
- }
- }
-
- hostAddr, err := net.ResolveTCPAddr("tcp", fmt.Sprint(cfg.Host, ":", cfg.Port))
- if err != nil {
- return nil, fmt.Errorf("failed to setup the network interface: %v", err)
- }
-
- libp2pOpts := node.DefaultLibP2POptions
- libp2pOpts = append(libp2pOpts, libp2p.BandwidthReporter(waku.bandwidthCounter))
- libp2pOpts = append(libp2pOpts, libp2p.NATPortMap())
-
- opts := []node.WakuNodeOption{
- node.WithLibP2POptions(libp2pOpts...),
- node.WithPrivateKey(nodeKey),
- node.WithHostAddress(hostAddr),
- node.WithConnectionNotification(waku.connectionNotifChan),
- node.WithTopicHealthStatusChannel(waku.topicHealthStatusChan),
- node.WithKeepAlive(randomPeersKeepAliveInterval, allPeersKeepAliveInterval),
- node.WithLogger(logger),
- node.WithLogLevel(logger.Level()),
- node.WithClusterID(cfg.ClusterID),
- node.WithMaxMsgSize(1024 * 1024),
- }
-
- if cfg.EnableDiscV5 {
- bootnodes, err := waku.getDiscV5BootstrapNodes(waku.ctx, cfg.DiscV5BootstrapNodes)
- if err != nil {
- logger.Error("failed to get bootstrap nodes", zap.Error(err))
- return nil, err
- }
- opts = append(opts, node.WithDiscoveryV5(uint(cfg.UDPPort), bootnodes, cfg.AutoUpdate))
- }
- shards, err := protocol.TopicsToRelayShards(cfg.DefaultShardPubsubTopic)
- if err != nil {
- logger.Error("FATAL ERROR: failed to parse relay shards", zap.Error(err))
- return nil, errors.New("failed to parse relay shard, invalid pubsubTopic configuration")
- }
- if len(shards) == 0 { //Hack so that tests don't fail. TODO: Need to remove this once tests are changed to use proper cluster and shard.
- shardInfo := protocol.RelayShards{ClusterID: 0, ShardIDs: []uint16{0}}
- shards = append(shards, shardInfo)
- }
- waku.defaultShardInfo = shards[0]
- if cfg.LightClient {
- opts = append(opts, node.WithWakuFilterLightNode())
- waku.defaultShardInfo = shards[0]
- opts = append(opts, node.WithMaxPeerConnections(cfg.DiscoveryLimit))
- cfg.EnableStoreConfirmationForMessagesSent = false
- //TODO: temporary work-around to improve lightClient connectivity, need to be removed once community sharding is implemented
- opts = append(opts, node.WithPubSubTopics(cfg.DefaultShardedPubsubTopics))
- } else {
- relayOpts := []pubsub.Option{
- pubsub.WithMaxMessageSize(int(waku.cfg.MaxMessageSize)),
- }
-
- if waku.logger.Level() == zap.DebugLevel {
- relayOpts = append(relayOpts, pubsub.WithEventTracer(waku))
- }
-
- opts = append(opts, node.WithWakuRelayAndMinPeers(waku.cfg.MinPeersForRelay, relayOpts...))
- opts = append(opts, node.WithMaxPeerConnections(maxRelayPeers))
- cfg.EnablePeerExchangeClient = true //Enabling this until discv5 issues are resolved. This will enable more peers to be connected for relay mesh.
- cfg.EnableStoreConfirmationForMessagesSent = true
- }
-
- if cfg.EnableStore {
- if appDB == nil {
- return nil, errors.New("appDB is required for store")
- }
- opts = append(opts, node.WithWakuStore())
- dbStore, err := persistence.NewDBStore(logger, persistence.WithDB(appDB), persistence.WithRetentionPolicy(cfg.StoreCapacity, time.Duration(cfg.StoreSeconds)*time.Second))
- if err != nil {
- return nil, err
- }
- opts = append(opts, node.WithMessageProvider(dbStore))
- }
-
- if !cfg.LightClient {
- opts = append(opts, node.WithWakuFilterFullNode(filter.WithMaxSubscribers(20)))
- opts = append(opts, node.WithLightPush(lightpush.WithRateLimiter(1, 1)))
- }
-
- if appDB != nil {
- waku.protectedTopicStore, err = persistence.NewProtectedTopicsStore(logger, appDB)
- if err != nil {
- return nil, err
- }
- }
-
- if cfg.EnablePeerExchangeServer {
- opts = append(opts, node.WithPeerExchange(peer_exchange.WithRateLimiter(1, 1)))
- }
-
- waku.options = opts
- waku.logger.Info("setup the go-waku node successfully")
-
- return waku, nil
-}
-
-func (w *Waku) SubscribeToConnStatusChanges() *types.ConnStatusSubscription {
- w.connStatusMu.Lock()
- defer w.connStatusMu.Unlock()
- subscription := types.NewConnStatusSubscription()
- w.connStatusSubscriptions[subscription.ID] = subscription
- return subscription
-}
-
-func (w *Waku) GetNodeENRString() (string, error) {
- if w.node == nil {
- return "", errors.New("node not initialized")
- }
- return w.node.ENR().String(), nil
-}
-
-func (w *Waku) getDiscV5BootstrapNodes(ctx context.Context, addresses []string) ([]*enode.Node, error) {
- wg := sync.WaitGroup{}
- mu := sync.Mutex{}
- var result []*enode.Node
-
- w.seededBootnodesForDiscV5 = true
-
- retrieveENR := func(d dnsdisc.DiscoveredNode, wg *sync.WaitGroup) {
- mu.Lock()
- defer mu.Unlock()
- defer wg.Done()
- if d.ENR != nil {
- result = append(result, d.ENR)
- }
- }
-
- for _, addrString := range addresses {
- if addrString == "" {
- continue
- }
-
- if strings.HasPrefix(addrString, "enrtree://") {
- // Use DNS Discovery
- wg.Add(1)
- go func(addr string) {
- defer wg.Done()
- if err := w.dnsDiscover(ctx, addr, retrieveENR); err != nil {
- mu.Lock()
- w.seededBootnodesForDiscV5 = false
- mu.Unlock()
- }
- }(addrString)
- } else {
- // It's a normal enr
- bootnode, err := enode.Parse(enode.ValidSchemes, addrString)
- if err != nil {
- return nil, err
- }
- result = append(result, bootnode)
- }
- }
- wg.Wait()
-
- return result, nil
-}
-
-type fnApplyToEachPeer func(d dnsdisc.DiscoveredNode, wg *sync.WaitGroup)
-
-func (w *Waku) dnsDiscover(ctx context.Context, enrtreeAddress string, apply fnApplyToEachPeer) error {
- w.logger.Info("retrieving nodes", zap.String("enr", enrtreeAddress))
- ctx, cancel := context.WithTimeout(ctx, requestTimeout)
- defer cancel()
-
- w.dnsAddressCacheLock.Lock()
- defer w.dnsAddressCacheLock.Unlock()
-
- discNodes, ok := w.dnsAddressCache[enrtreeAddress]
- if !ok {
- nameserver := w.cfg.Nameserver
- resolver := w.cfg.Resolver
-
- var opts []dnsdisc.DNSDiscoveryOption
- if nameserver != "" {
- opts = append(opts, dnsdisc.WithNameserver(nameserver))
- }
- if resolver != nil {
- opts = append(opts, dnsdisc.WithResolver(resolver))
- }
-
- discoveredNodes, err := dnsdisc.RetrieveNodes(ctx, enrtreeAddress, opts...)
- if err != nil {
- w.logger.Warn("dns discovery error ", zap.Error(err))
- return err
- }
-
- if len(discoveredNodes) != 0 {
- w.dnsAddressCache[enrtreeAddress] = append(w.dnsAddressCache[enrtreeAddress], discoveredNodes...)
- discNodes = w.dnsAddressCache[enrtreeAddress]
- }
- }
-
- wg := &sync.WaitGroup{}
- wg.Add(len(discNodes))
- for _, d := range discNodes {
- apply(d, wg)
- }
- wg.Wait()
-
- return nil
-}
-
-func (w *Waku) discoverAndConnectPeers() {
- fnApply := func(d dnsdisc.DiscoveredNode, wg *sync.WaitGroup) {
- defer wg.Done()
- if len(d.PeerInfo.Addrs) != 0 {
- go w.connect(d.PeerInfo, d.ENR, wps.DNSDiscovery)
- }
- }
-
- for _, addrString := range w.cfg.WakuNodes {
- addrString := addrString
- if strings.HasPrefix(addrString, "enrtree://") {
- // Use DNS Discovery
- go func() {
- if err := w.dnsDiscover(w.ctx, addrString, fnApply); err != nil {
- w.logger.Error("could not obtain dns discovery peers for ClusterConfig.WakuNodes", zap.Error(err), zap.String("dnsDiscURL", addrString))
- }
- }()
- } else {
- // It is a normal multiaddress
- addr, err := multiaddr.NewMultiaddr(addrString)
- if err != nil {
- w.logger.Warn("invalid peer multiaddress", zap.String("ma", addrString), zap.Error(err))
- continue
- }
-
- peerInfo, err := peer.AddrInfoFromP2pAddr(addr)
- if err != nil {
- w.logger.Warn("invalid peer multiaddress", zap.Stringer("addr", addr), zap.Error(err))
- continue
- }
-
- go w.connect(*peerInfo, nil, wps.Static)
- }
- }
-}
-
-func (w *Waku) connect(peerInfo peer.AddrInfo, enr *enode.Node, origin wps.Origin) {
- // Connection will be prunned eventually by the connection manager if needed
- // The peer connector in go-waku uses Connect, so it will execute identify as part of its
- w.node.AddDiscoveredPeer(peerInfo.ID, peerInfo.Addrs, origin, w.cfg.DefaultShardedPubsubTopics, enr, true)
-}
-
-func (w *Waku) telemetryBandwidthStats(telemetryServerURL string) {
- w.wg.Add(1)
- defer w.wg.Done()
-
- if telemetryServerURL == "" {
- return
- }
-
- telemetry := NewBandwidthTelemetryClient(w.logger, telemetryServerURL)
-
- ticker := time.NewTicker(time.Second * 20)
- defer ticker.Stop()
-
- today := time.Now()
-
- for {
- select {
- case <-w.ctx.Done():
- return
- case now := <-ticker.C:
- // Reset totals when day changes
- if now.Day() != today.Day() {
- today = now
- w.bandwidthCounter.Reset()
- }
-
- go telemetry.PushProtocolStats(w.bandwidthCounter.GetBandwidthByProtocol())
- }
- }
-}
-
-func (w *Waku) GetStats() types.StatsSummary {
- stats := w.bandwidthCounter.GetBandwidthTotals()
- return types.StatsSummary{
- UploadRate: uint64(stats.RateOut),
- DownloadRate: uint64(stats.RateIn),
- }
-}
-
-func (w *Waku) runPeerExchangeLoop() {
- w.wg.Add(1)
- defer w.wg.Done()
-
- if !w.cfg.EnablePeerExchangeClient {
- // Currently peer exchange client is only used for light nodes
- return
- }
-
- ticker := time.NewTicker(time.Second * 5)
- defer ticker.Stop()
-
- for {
- select {
- case <-w.ctx.Done():
- w.logger.Debug("Peer exchange loop stopped")
- return
- case <-ticker.C:
- w.logger.Info("Running peer exchange loop")
-
- // We select only the nodes discovered via DNS Discovery that support peer exchange
- // We assume that those peers are running peer exchange according to infra config,
- // If not, the peer selection process in go-waku will filter them out anyway
- w.dnsAddressCacheLock.RLock()
- var peers peer.IDSlice
- for _, record := range w.dnsAddressCache {
- for _, discoveredNode := range record {
- if len(discoveredNode.PeerInfo.Addrs) == 0 {
- continue
- }
- // Attempt to connect to the peers.
- // Peers will be added to the libp2p peer store thanks to identify
- go w.connect(discoveredNode.PeerInfo, discoveredNode.ENR, wps.DNSDiscovery)
- peers = append(peers, discoveredNode.PeerID)
- }
- }
- w.dnsAddressCacheLock.RUnlock()
-
- if len(peers) != 0 {
- err := w.node.PeerExchange().Request(w.ctx, w.cfg.DiscoveryLimit, peer_exchange.WithAutomaticPeerSelection(peers...),
- peer_exchange.FilterByShard(int(w.defaultShardInfo.ClusterID), int(w.defaultShardInfo.ShardIDs[0])))
- if err != nil {
- w.logger.Error("couldnt request peers via peer exchange", zap.Error(err))
- }
- }
- }
- }
-}
-
-func (w *Waku) GetPubsubTopic(topic string) string {
- if topic == "" {
- topic = w.cfg.DefaultShardPubsubTopic
- }
-
- return topic
-}
-
-func (w *Waku) unsubscribeFromPubsubTopicWithWakuRelay(topic string) error {
- topic = w.GetPubsubTopic(topic)
-
- if !w.node.Relay().IsSubscribed(topic) {
- return nil
- }
-
- contentFilter := protocol.NewContentFilter(topic)
-
- return w.node.Relay().Unsubscribe(w.ctx, contentFilter)
-}
-
-func (w *Waku) subscribeToPubsubTopicWithWakuRelay(topic string, pubkey *ecdsa.PublicKey) error {
- if w.cfg.LightClient {
- return errors.New("only available for full nodes")
- }
-
- topic = w.GetPubsubTopic(topic)
-
- if w.node.Relay().IsSubscribed(topic) {
- return nil
- }
-
- if pubkey != nil {
- err := w.node.Relay().AddSignedTopicValidator(topic, pubkey)
- if err != nil {
- return err
- }
- }
-
- contentFilter := protocol.NewContentFilter(topic)
-
- sub, err := w.node.Relay().Subscribe(w.ctx, contentFilter)
- if err != nil {
- return err
- }
-
- w.wg.Add(1)
- go func() {
- defer w.wg.Done()
- for {
- select {
- case <-w.ctx.Done():
- err := w.node.Relay().Unsubscribe(w.ctx, contentFilter)
- if err != nil && !errors.Is(err, context.Canceled) {
- w.logger.Error("could not unsubscribe", zap.Error(err))
- }
- return
- case env := <-sub[0].Ch:
- err := w.OnNewEnvelopes(env, common.RelayedMessageType, false)
- if err != nil {
- w.logger.Error("OnNewEnvelopes error", zap.Error(err))
- }
- }
- }
- }()
-
- return nil
-}
-
-// MaxMessageSize returns the maximum accepted message size.
-func (w *Waku) MaxMessageSize() uint32 {
- return w.cfg.MaxMessageSize
-}
-
-// CurrentTime returns current time.
-func (w *Waku) CurrentTime() time.Time {
- return w.timesource.Now()
-}
-
-// APIs returns the RPC descriptors the Waku implementation offers
-func (w *Waku) APIs() []rpc.API {
- return []rpc.API{
- {
- Namespace: Name,
- Version: VersionStr,
- Service: NewPublicWakuAPI(w),
- Public: false,
- },
- }
-}
-
-// Protocols returns the waku sub-protocols ran by this particular client.
-func (w *Waku) Protocols() []p2p.Protocol {
- return []p2p.Protocol{}
-}
-
-func (w *Waku) SendEnvelopeEvent(event common.EnvelopeEvent) int {
- return w.envelopeFeed.Send(event)
-}
-
-// SubscribeEnvelopeEvents subscribes to envelopes feed.
-// In order to prevent blocking waku producers events must be amply buffered.
-func (w *Waku) SubscribeEnvelopeEvents(events chan<- common.EnvelopeEvent) event.Subscription {
- return w.envelopeFeed.Subscribe(events)
-}
-
-// NewKeyPair generates a new cryptographic identity for the client, and injects
-// it into the known identities for message decryption. Returns ID of the new key pair.
-func (w *Waku) NewKeyPair() (string, error) {
- key, err := crypto.GenerateKey()
- if err != nil || !validatePrivateKey(key) {
- key, err = crypto.GenerateKey() // retry once
- }
- if err != nil {
- return "", err
- }
- if !validatePrivateKey(key) {
- return "", fmt.Errorf("failed to generate valid key")
- }
-
- id, err := toDeterministicID(hexutil.Encode(crypto.FromECDSAPub(&key.PublicKey)), common.KeyIDSize)
- if err != nil {
- return "", err
- }
-
- w.keyMu.Lock()
- defer w.keyMu.Unlock()
-
- if w.privateKeys[id] != nil {
- return "", fmt.Errorf("failed to generate unique ID")
- }
- w.privateKeys[id] = key
- return id, nil
-}
-
-// DeleteKeyPair deletes the specified key if it exists.
-func (w *Waku) DeleteKeyPair(key string) bool {
- deterministicID, err := toDeterministicID(key, common.KeyIDSize)
- if err != nil {
- return false
- }
-
- w.keyMu.Lock()
- defer w.keyMu.Unlock()
-
- if w.privateKeys[deterministicID] != nil {
- delete(w.privateKeys, deterministicID)
- return true
- }
- return false
-}
-
-// AddKeyPair imports a asymmetric private key and returns it identifier.
-func (w *Waku) AddKeyPair(key *ecdsa.PrivateKey) (string, error) {
- id, err := makeDeterministicID(hexutil.Encode(crypto.FromECDSAPub(&key.PublicKey)), common.KeyIDSize)
- if err != nil {
- return "", err
- }
- if w.HasKeyPair(id) {
- return id, nil // no need to re-inject
- }
-
- w.keyMu.Lock()
- w.privateKeys[id] = key
- w.keyMu.Unlock()
-
- return id, nil
-}
-
-// SelectKeyPair adds cryptographic identity, and makes sure
-// that it is the only private key known to the node.
-func (w *Waku) SelectKeyPair(key *ecdsa.PrivateKey) error {
- id, err := makeDeterministicID(hexutil.Encode(crypto.FromECDSAPub(&key.PublicKey)), common.KeyIDSize)
- if err != nil {
- return err
- }
-
- w.keyMu.Lock()
- defer w.keyMu.Unlock()
-
- w.privateKeys = make(map[string]*ecdsa.PrivateKey) // reset key store
- w.privateKeys[id] = key
-
- return nil
-}
-
-// DeleteKeyPairs removes all cryptographic identities known to the node
-func (w *Waku) DeleteKeyPairs() error {
- w.keyMu.Lock()
- defer w.keyMu.Unlock()
-
- w.privateKeys = make(map[string]*ecdsa.PrivateKey)
-
- return nil
-}
-
-// HasKeyPair checks if the waku node is configured with the private key
-// of the specified public pair.
-func (w *Waku) HasKeyPair(id string) bool {
- deterministicID, err := toDeterministicID(id, common.KeyIDSize)
- if err != nil {
- return false
- }
-
- w.keyMu.RLock()
- defer w.keyMu.RUnlock()
- return w.privateKeys[deterministicID] != nil
-}
-
-// GetPrivateKey retrieves the private key of the specified identity.
-func (w *Waku) GetPrivateKey(id string) (*ecdsa.PrivateKey, error) {
- deterministicID, err := toDeterministicID(id, common.KeyIDSize)
- if err != nil {
- return nil, err
- }
-
- w.keyMu.RLock()
- defer w.keyMu.RUnlock()
- key := w.privateKeys[deterministicID]
- if key == nil {
- return nil, fmt.Errorf("invalid id")
- }
- return key, nil
-}
-
-// GenerateSymKey generates a random symmetric key and stores it under id,
-// which is then returned. Will be used in the future for session key exchange.
-func (w *Waku) GenerateSymKey() (string, error) {
- key, err := common.GenerateSecureRandomData(common.AESKeyLength)
- if err != nil {
- return "", err
- } else if !common.ValidateDataIntegrity(key, common.AESKeyLength) {
- return "", fmt.Errorf("error in GenerateSymKey: crypto/rand failed to generate random data")
- }
-
- id, err := common.GenerateRandomID()
- if err != nil {
- return "", fmt.Errorf("failed to generate ID: %s", err)
- }
-
- w.keyMu.Lock()
- defer w.keyMu.Unlock()
-
- if w.symKeys[id] != nil {
- return "", fmt.Errorf("failed to generate unique ID")
- }
- w.symKeys[id] = key
- return id, nil
-}
-
-// AddSymKey stores the key with a given id.
-func (w *Waku) AddSymKey(id string, key []byte) (string, error) {
- deterministicID, err := toDeterministicID(id, common.KeyIDSize)
- if err != nil {
- return "", err
- }
-
- w.keyMu.Lock()
- defer w.keyMu.Unlock()
-
- if w.symKeys[deterministicID] != nil {
- return "", fmt.Errorf("key already exists: %v", id)
- }
- w.symKeys[deterministicID] = key
- return deterministicID, nil
-}
-
-// AddSymKeyDirect stores the key, and returns its id.
-func (w *Waku) AddSymKeyDirect(key []byte) (string, error) {
- if len(key) != common.AESKeyLength {
- return "", fmt.Errorf("wrong key size: %d", len(key))
- }
-
- id, err := common.GenerateRandomID()
- if err != nil {
- return "", fmt.Errorf("failed to generate ID: %s", err)
- }
-
- w.keyMu.Lock()
- defer w.keyMu.Unlock()
-
- if w.symKeys[id] != nil {
- return "", fmt.Errorf("failed to generate unique ID")
- }
- w.symKeys[id] = key
- return id, nil
-}
-
-// AddSymKeyFromPassword generates the key from password, stores it, and returns its id.
-func (w *Waku) AddSymKeyFromPassword(password string) (string, error) {
- id, err := common.GenerateRandomID()
- if err != nil {
- return "", fmt.Errorf("failed to generate ID: %s", err)
- }
- if w.HasSymKey(id) {
- return "", fmt.Errorf("failed to generate unique ID")
- }
-
- // kdf should run no less than 0.1 seconds on an average computer,
- // because it's an once in a session experience
- derived := pbkdf2.Key([]byte(password), nil, 65356, common.AESKeyLength, sha256.New)
-
- w.keyMu.Lock()
- defer w.keyMu.Unlock()
-
- // double check is necessary, because deriveKeyMaterial() is very slow
- if w.symKeys[id] != nil {
- return "", fmt.Errorf("critical error: failed to generate unique ID")
- }
- w.symKeys[id] = derived
- return id, nil
-}
-
-// HasSymKey returns true if there is a key associated with the given id.
-// Otherwise returns false.
-func (w *Waku) HasSymKey(id string) bool {
- w.keyMu.RLock()
- defer w.keyMu.RUnlock()
- return w.symKeys[id] != nil
-}
-
-// DeleteSymKey deletes the key associated with the name string if it exists.
-func (w *Waku) DeleteSymKey(id string) bool {
- w.keyMu.Lock()
- defer w.keyMu.Unlock()
- if w.symKeys[id] != nil {
- delete(w.symKeys, id)
- return true
- }
- return false
-}
-
-// GetSymKey returns the symmetric key associated with the given id.
-func (w *Waku) GetSymKey(id string) ([]byte, error) {
- w.keyMu.RLock()
- defer w.keyMu.RUnlock()
- if w.symKeys[id] != nil {
- return w.symKeys[id], nil
- }
- return nil, fmt.Errorf("non-existent key ID")
-}
-
-// Subscribe installs a new message handler used for filtering, decrypting
-// and subsequent storing of incoming messages.
-func (w *Waku) Subscribe(f *common.Filter) (string, error) {
- f.PubsubTopic = w.GetPubsubTopic(f.PubsubTopic)
- id, err := w.filters.Install(f)
- if err != nil {
- return id, err
- }
-
- if w.cfg.LightClient {
- cf := protocol.NewContentFilter(f.PubsubTopic, f.ContentTopics.ContentTopics()...)
- w.filterManager.SubscribeFilter(id, cf)
- }
-
- return id, nil
-}
-
-// Unsubscribe removes an installed message handler.
-func (w *Waku) Unsubscribe(ctx context.Context, id string) error {
- ok := w.filters.Uninstall(id)
- if !ok {
- return fmt.Errorf("failed to unsubscribe: invalid ID '%s'", id)
- }
-
- if w.cfg.LightClient {
- w.filterManager.UnsubscribeFilter(id)
- }
-
- return nil
-}
-
-// GetFilter returns the filter by id.
-func (w *Waku) GetFilter(id string) *common.Filter {
- return w.filters.Get(id)
-}
-
-// Unsubscribe removes an installed message handler.
-func (w *Waku) UnsubscribeMany(ids []string) error {
- for _, id := range ids {
- w.logger.Info("cleaning up filter", zap.String("id", id))
- ok := w.filters.Uninstall(id)
- if !ok {
- w.logger.Warn("could not remove filter with id", zap.String("id", id))
- }
- }
- return nil
-}
-
-func (w *Waku) SkipPublishToTopic(value bool) {
- w.cfg.SkipPublishToTopic = value
-}
-
-func (w *Waku) ConfirmMessageDelivered(hashes []gethcommon.Hash) {
- if !w.cfg.EnableStoreConfirmationForMessagesSent {
- return
- }
- w.messageSentCheck.DeleteByMessageIDs(hashes)
-}
-
-func (w *Waku) SetStorePeerID(peerID peer.ID) {
- if w.messageSentCheck != nil {
- w.messageSentCheck.SetStorePeerID(peerID)
- }
-}
-
-func (w *Waku) Query(ctx context.Context, peerID peer.ID, query store.FilterCriteria, cursor []byte, opts []store.RequestOption, processEnvelopes bool) ([]byte, int, error) {
- requestID := protocol.GenerateRequestID()
-
- opts = append(opts,
- store.WithRequestID(requestID),
- store.WithPeer(peerID),
- store.WithCursor(cursor))
-
- logger := w.logger.With(zap.String("requestID", hexutil.Encode(requestID)), zap.Stringer("peerID", peerID))
-
- logger.Debug("store.query",
- logutils.WakuMessageTimestamp("startTime", query.TimeStart),
- logutils.WakuMessageTimestamp("endTime", query.TimeEnd),
- zap.Strings("contentTopics", query.ContentTopics.ToList()),
- zap.String("pubsubTopic", query.PubsubTopic),
- zap.String("cursor", hexutil.Encode(cursor)),
- )
-
- queryStart := time.Now()
- result, err := w.node.Store().Query(ctx, query, opts...)
- queryDuration := time.Since(queryStart)
- if err != nil {
- logger.Error("error querying storenode", zap.Error(err))
-
- if w.onHistoricMessagesRequestFailed != nil {
- w.onHistoricMessagesRequestFailed(requestID, peerID, err)
- }
- return nil, 0, err
- }
-
- messages := result.Messages()
- envelopesCount := len(messages)
- w.logger.Debug("store.query response", zap.Duration("queryDuration", queryDuration), zap.Int("numMessages", envelopesCount), zap.Bool("hasCursor", result.IsComplete() && result.Cursor() != nil))
- for _, mkv := range messages {
- msg := mkv.Message
-
- // Temporarily setting RateLimitProof to nil so it matches the WakuMessage protobuffer we are sending
- // See https://github.com/vacp2p/rfc/issues/563
- mkv.Message.RateLimitProof = nil
-
- envelope := protocol.NewEnvelope(msg, msg.GetTimestamp(), query.PubsubTopic)
-
- err = w.OnNewEnvelopes(envelope, common.StoreMessageType, processEnvelopes)
- if err != nil {
- return nil, 0, err
- }
- }
-
- return result.Cursor(), envelopesCount, nil
-}
-
-// OnNewEnvelope is an interface from Waku FilterManager API that gets invoked when any new message is received by Filter.
-func (w *Waku) OnNewEnvelope(env *protocol.Envelope) error {
- return w.OnNewEnvelopes(env, common.RelayedMessageType, false)
-}
-
-// Start implements node.Service, starting the background data propagation thread
-// of the Waku protocol.
-func (w *Waku) Start() error {
- if w.ctx == nil {
- w.ctx, w.cancel = context.WithCancel(context.Background())
- }
-
- var err error
- if w.node, err = node.New(w.options...); err != nil {
- return fmt.Errorf("failed to create a go-waku node: %v", err)
- }
-
- w.goingOnline = make(chan struct{})
-
- if err = w.node.Start(w.ctx); err != nil {
- return fmt.Errorf("failed to start go-waku node: %v", err)
- }
-
- w.logger.Info("WakuV2 PeerID", zap.Stringer("id", w.node.Host().ID()))
-
- w.discoverAndConnectPeers()
-
- if w.cfg.EnableDiscV5 {
- err := w.node.DiscV5().Start(w.ctx)
- if err != nil {
- return err
- }
- }
-
- w.wg.Add(1)
- go func() {
- defer w.wg.Done()
- ticker := time.NewTicker(5 * time.Second)
- defer ticker.Stop()
- for {
- select {
- case <-w.ctx.Done():
- return
- case <-ticker.C:
- w.checkForConnectionChanges()
- case <-w.topicHealthStatusChan:
- // TODO: https://github.com/status-im/status-go/issues/4628
- case <-w.connectionNotifChan:
- w.checkForConnectionChanges()
- }
- }
- }()
-
- go w.telemetryBandwidthStats(w.cfg.TelemetryServerURL)
- //TODO: commenting for now so that only fleet nodes are used.
- //Need to uncomment once filter peer scoring etc is implemented.
- go w.runPeerExchangeLoop()
-
- if w.cfg.EnableMissingMessageVerification {
-
- w.missingMsgVerifier = missing.NewMissingMessageVerifier(
- w.node.Store(),
- w,
- w.node.Timesource(),
- w.logger)
-
- w.missingMsgVerifier.Start(w.ctx)
-
- w.wg.Add(1)
- go func() {
- w.wg.Done()
- for {
- select {
- case <-w.ctx.Done():
- return
- case envelope := <-w.missingMsgVerifier.C:
- err = w.OnNewEnvelopes(envelope, common.MissingMessageType, false)
- if err != nil {
- w.logger.Error("OnNewEnvelopes error", zap.Error(err))
- }
- }
- }
- }()
- }
-
- if w.cfg.LightClient {
- // Create FilterManager that will main peer connectivity
- // for installed filters
- w.filterManager = filterapi.NewFilterManager(w.ctx, w.logger, w.cfg.MinPeersForFilter,
- w,
- w.node.FilterLightnode())
- }
-
- err = w.setupRelaySubscriptions()
- if err != nil {
- return err
- }
-
- numCPU := runtime.NumCPU()
- for i := 0; i < numCPU; i++ {
- go w.processQueueLoop()
- }
-
- go w.broadcast()
-
- go w.sendQueue.Start(w.ctx)
-
- if w.cfg.EnableStoreConfirmationForMessagesSent {
- w.confirmMessagesSent()
- }
-
- // we should wait `seedBootnodesForDiscV5` shutdown smoothly before set w.ctx to nil within `w.Stop()`
- go w.seedBootnodesForDiscV5()
-
- return nil
-}
-
-func (w *Waku) checkForConnectionChanges() {
-
- isOnline := len(w.node.Host().Network().Peers()) > 0
-
- w.connStatusMu.Lock()
-
- latestConnStatus := types.ConnStatus{
- IsOnline: isOnline,
- Peers: FormatPeerStats(w.node),
- }
-
- w.logger.Debug("peer stats",
- zap.Int("peersCount", len(latestConnStatus.Peers)),
- zap.Any("stats", latestConnStatus))
- for k, subs := range w.connStatusSubscriptions {
- if !subs.Send(latestConnStatus) {
- delete(w.connStatusSubscriptions, k)
- }
- }
-
- w.connStatusMu.Unlock()
-
- if w.onPeerStats != nil {
- w.onPeerStats(latestConnStatus)
- }
-
- if w.statusTelemetryClient != nil {
- connFailures := FormatPeerConnFailures(w.node)
- w.statusTelemetryClient.PushPeerCount(w.ctx, w.PeerCount())
- w.statusTelemetryClient.PushPeerConnFailures(w.ctx, connFailures)
- }
-
- w.ConnectionChanged(connection.State{
- Type: w.state.Type, //setting state type as previous one since there won't be a change here
- Offline: !latestConnStatus.IsOnline,
- })
-}
-
-func (w *Waku) confirmMessagesSent() {
- w.messageSentCheck = publish.NewMessageSentCheck(w.ctx, w.node.Store(), w.node.Timesource(), w.logger)
- go w.messageSentCheck.Start()
-
- go func() {
- for {
- select {
- case <-w.ctx.Done():
- return
- case hash := <-w.messageSentCheck.MessageStoredChan:
- w.SendEnvelopeEvent(common.EnvelopeEvent{
- Hash: hash,
- Event: common.EventEnvelopeSent,
- })
- case hash := <-w.messageSentCheck.MessageExpiredChan:
- w.SendEnvelopeEvent(common.EnvelopeEvent{
- Hash: hash,
- Event: common.EventEnvelopeExpired,
- })
- }
- }
- }()
-}
-
-func (w *Waku) MessageExists(mh pb.MessageHash) (bool, error) {
- w.poolMu.Lock()
- defer w.poolMu.Unlock()
- return w.envelopeCache.Has(gethcommon.Hash(mh)), nil
-}
-
-func (w *Waku) SetTopicsToVerifyForMissingMessages(peerID peer.ID, pubsubTopic string, contentTopics []string) {
- if !w.cfg.EnableMissingMessageVerification {
- return
- }
-
- w.missingMsgVerifier.SetCriteriaInterest(peerID, protocol.NewContentFilter(pubsubTopic, contentTopics...))
-}
-
-func (w *Waku) setupRelaySubscriptions() error {
- if w.cfg.LightClient {
- return nil
- }
-
- if w.protectedTopicStore != nil {
- protectedTopics, err := w.protectedTopicStore.ProtectedTopics()
- if err != nil {
- return err
- }
-
- for _, pt := range protectedTopics {
- // Adding subscription to protected topics
- err = w.subscribeToPubsubTopicWithWakuRelay(pt.Topic, pt.PubKey)
- if err != nil {
- return err
- }
- }
- }
-
- err := w.subscribeToPubsubTopicWithWakuRelay(w.cfg.DefaultShardPubsubTopic, nil)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-// Stop implements node.Service, stopping the background data propagation thread
-// of the Waku protocol.
-func (w *Waku) Stop() error {
- w.cancel()
-
- w.envelopeCache.Stop()
-
- w.node.Stop()
-
- if w.protectedTopicStore != nil {
- err := w.protectedTopicStore.Close()
- if err != nil {
- return err
- }
- }
-
- close(w.goingOnline)
- w.wg.Wait()
-
- w.ctx = nil
- w.cancel = nil
-
- return nil
-}
-
-func (w *Waku) OnNewEnvelopes(envelope *protocol.Envelope, msgType common.MessageType, processImmediately bool) error {
- if envelope == nil {
- return nil
- }
-
- recvMessage := common.NewReceivedMessage(envelope, msgType)
- if recvMessage == nil {
- return nil
- }
-
- if w.statusTelemetryClient != nil {
- w.statusTelemetryClient.PushReceivedEnvelope(w.ctx, envelope)
- }
-
- logger := w.logger.With(
- zap.String("messageType", msgType),
- zap.Stringer("envelopeHash", envelope.Hash()),
- zap.String("pubsubTopic", envelope.PubsubTopic()),
- zap.String("contentTopic", envelope.Message().ContentTopic),
- logutils.WakuMessageTimestamp("timestamp", envelope.Message().Timestamp),
- )
-
- logger.Debug("received new envelope")
- trouble := false
-
- _, err := w.add(recvMessage, processImmediately)
- if err != nil {
- logger.Info("invalid envelope received", zap.Error(err))
- trouble = true
- }
-
- common.EnvelopesValidatedCounter.Inc()
-
- if trouble {
- return errors.New("received invalid envelope")
- }
-
- return nil
-}
-
-// addEnvelope adds an envelope to the envelope map, used for sending
-func (w *Waku) addEnvelope(envelope *common.ReceivedMessage) {
- w.poolMu.Lock()
- w.envelopeCache.Set(envelope.Hash(), envelope, ttlcache.DefaultTTL)
- w.poolMu.Unlock()
-}
-
-func (w *Waku) add(recvMessage *common.ReceivedMessage, processImmediately bool) (bool, error) {
- common.EnvelopesReceivedCounter.Inc()
-
- w.poolMu.Lock()
- envelope := w.envelopeCache.Get(recvMessage.Hash())
- alreadyCached := envelope != nil
- w.poolMu.Unlock()
-
- if !alreadyCached {
- recvMessage.Processed.Store(false)
- w.addEnvelope(recvMessage)
- }
-
- logger := w.logger.With(zap.String("envelopeHash", recvMessage.Hash().Hex()))
-
- if alreadyCached {
- logger.Debug("w envelope already cached")
- common.EnvelopesCachedCounter.WithLabelValues("hit").Inc()
- } else {
- logger.Debug("cached w envelope")
- common.EnvelopesCachedCounter.WithLabelValues("miss").Inc()
- common.EnvelopesSizeMeter.Observe(float64(len(recvMessage.Envelope.Message().Payload)))
- }
-
- if !alreadyCached || !envelope.Value().Processed.Load() {
- if processImmediately {
- logger.Debug("immediately processing envelope")
- w.processMessage(recvMessage)
- } else {
- logger.Debug("posting event")
- w.postEvent(recvMessage) // notify the local node about the new message
- }
- }
-
- return true, nil
-}
-
-// postEvent queues the message for further processing.
-func (w *Waku) postEvent(envelope *common.ReceivedMessage) {
- w.msgQueue <- envelope
-}
-
-// processQueueLoop delivers the messages to the watchers during the lifetime of the waku node.
-func (w *Waku) processQueueLoop() {
- if w.ctx == nil {
- return
- }
- for {
- select {
- case <-w.ctx.Done():
- return
- case e := <-w.msgQueue:
- w.processMessage(e)
- }
- }
-}
-
-func (w *Waku) processMessage(e *common.ReceivedMessage) {
- logger := w.logger.With(
- zap.Stringer("envelopeHash", e.Envelope.Hash()),
- zap.String("pubsubTopic", e.PubsubTopic),
- zap.String("contentTopic", e.ContentTopic.ContentTopic()),
- zap.Int64("timestamp", e.Envelope.Message().GetTimestamp()),
- )
-
- if e.MsgType == common.StoreMessageType {
- // We need to insert it first, and then remove it if not matched,
- // as messages are processed asynchronously
- w.storeMsgIDsMu.Lock()
- w.storeMsgIDs[e.Hash()] = true
- w.storeMsgIDsMu.Unlock()
- }
-
- ephemeral := e.Envelope.Message().Ephemeral
- if w.cfg.EnableStoreConfirmationForMessagesSent && e.MsgType == common.SendMessageType && (ephemeral == nil || !*ephemeral) {
- w.messageSentCheck.Add(e.PubsubTopic, e.Hash(), e.Sent)
- }
-
- matched := w.filters.NotifyWatchers(e)
-
- // If not matched we remove it
- if !matched {
- logger.Debug("filters did not match")
- w.storeMsgIDsMu.Lock()
- delete(w.storeMsgIDs, e.Hash())
- w.storeMsgIDsMu.Unlock()
- } else {
- logger.Debug("filters did match")
- e.Processed.Store(true)
- }
-
- w.envelopeFeed.Send(common.EnvelopeEvent{
- Topic: e.ContentTopic,
- Hash: e.Hash(),
- Event: common.EventEnvelopeAvailable,
- })
-}
-
-// GetEnvelope retrieves an envelope from the message queue by its hash.
-// It returns nil if the envelope can not be found.
-func (w *Waku) GetEnvelope(hash gethcommon.Hash) *common.ReceivedMessage {
- w.poolMu.RLock()
- defer w.poolMu.RUnlock()
-
- envelope := w.envelopeCache.Get(hash)
- if envelope == nil {
- return nil
- }
-
- return envelope.Value()
-}
-
-// isEnvelopeCached checks if envelope with specific hash has already been received and cached.
-func (w *Waku) IsEnvelopeCached(hash gethcommon.Hash) bool {
- w.poolMu.Lock()
- defer w.poolMu.Unlock()
-
- return w.envelopeCache.Has(hash)
-}
-
-func (w *Waku) ClearEnvelopesCache() {
- w.poolMu.Lock()
- defer w.poolMu.Unlock()
-
- w.envelopeCache.Stop()
- w.envelopeCache = newTTLCache()
-}
-
-func (w *Waku) PeerCount() int {
- return w.node.PeerCount()
-}
-
-func (w *Waku) Peers() types.PeerStats {
- return FormatPeerStats(w.node)
-}
-
-func (w *Waku) RelayPeersByTopic(topic string) (*types.PeerList, error) {
- if w.cfg.LightClient {
- return nil, errors.New("only available in relay mode")
- }
-
- return &types.PeerList{
- FullMeshPeers: w.node.Relay().PubSub().MeshPeers(topic),
- AllPeers: w.node.Relay().PubSub().ListPeers(topic),
- }, nil
-}
-
-func (w *Waku) ListenAddresses() []multiaddr.Multiaddr {
- return w.node.ListenAddresses()
-}
-
-func (w *Waku) ENR() (*enode.Node, error) {
- enr := w.node.ENR()
- if enr == nil {
- return nil, errors.New("enr not available")
- }
-
- return enr, nil
-}
-
-func (w *Waku) SubscribeToPubsubTopic(topic string, pubkey *ecdsa.PublicKey) error {
- topic = w.GetPubsubTopic(topic)
-
- if !w.cfg.LightClient {
- err := w.subscribeToPubsubTopicWithWakuRelay(topic, pubkey)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func (w *Waku) UnsubscribeFromPubsubTopic(topic string) error {
- topic = w.GetPubsubTopic(topic)
-
- if !w.cfg.LightClient {
- err := w.unsubscribeFromPubsubTopicWithWakuRelay(topic)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func (w *Waku) RetrievePubsubTopicKey(topic string) (*ecdsa.PrivateKey, error) {
- topic = w.GetPubsubTopic(topic)
- if w.protectedTopicStore == nil {
- return nil, nil
- }
-
- return w.protectedTopicStore.FetchPrivateKey(topic)
-}
-
-func (w *Waku) StorePubsubTopicKey(topic string, privKey *ecdsa.PrivateKey) error {
- topic = w.GetPubsubTopic(topic)
- if w.protectedTopicStore == nil {
- return nil
- }
-
- return w.protectedTopicStore.Insert(topic, privKey, &privKey.PublicKey)
-}
-
-func (w *Waku) RemovePubsubTopicKey(topic string) error {
- topic = w.GetPubsubTopic(topic)
- if w.protectedTopicStore == nil {
- return nil
- }
-
- return w.protectedTopicStore.Delete(topic)
-}
-
-func (w *Waku) StartDiscV5() error {
- if w.node.DiscV5() == nil {
- return errors.New("discv5 is not setup")
- }
-
- return w.node.DiscV5().Start(w.ctx)
-}
-
-func (w *Waku) StopDiscV5() error {
- if w.node.DiscV5() == nil {
- return errors.New("discv5 is not setup")
- }
-
- w.node.DiscV5().Stop()
- return nil
-}
-
-func (w *Waku) handleNetworkChangeFromApp(state connection.State) {
- //If connection state is reported by something other than peerCount becoming 0 e.g from mobile app, disconnect all peers
- if (state.Offline && len(w.node.Host().Network().Peers()) > 0) ||
- (w.state.Type != state.Type && !w.state.Offline && !state.Offline) { // network switched between wifi and cellular
- w.logger.Info("connection switched or offline detected via mobile, disconnecting all peers")
- w.node.DisconnectAllPeers()
- if w.cfg.LightClient {
- w.filterManager.NetworkChange()
- }
- }
-}
-
-func (w *Waku) ConnectionChanged(state connection.State) {
- isOnline := !state.Offline
- if w.cfg.LightClient {
- //TODO: Update this as per https://github.com/waku-org/go-waku/issues/1114
- go w.filterManager.OnConnectionStatusChange("", isOnline)
- w.handleNetworkChangeFromApp(state)
- } else {
- // for lightClient state update and onlineChange is handled in filterManager.
- // going online
- if isOnline && !w.onlineChecker.IsOnline() {
- //TODO: analyze if we need to discover and connect to peers for relay.
- w.discoverAndConnectPeers()
- select {
- case w.goingOnline <- struct{}{}:
- default:
- w.logger.Warn("could not write on connection changed channel")
- }
- }
- // update state
- w.onlineChecker.SetOnline(isOnline)
- }
- w.state = state
-}
-
-// seedBootnodesForDiscV5 tries to fetch bootnodes
-// from an ENR periodically.
-// It backs off exponentially until maxRetries, at which point it restarts from 0
-// It also restarts if there's a connection change signalled from the client
-func (w *Waku) seedBootnodesForDiscV5() {
- w.wg.Add(1)
- defer w.wg.Done()
-
- if !w.cfg.EnableDiscV5 || w.node.DiscV5() == nil {
- return
- }
-
- ticker := time.NewTicker(500 * time.Millisecond)
- defer ticker.Stop()
- var retries = 0
-
- now := func() int64 {
- return time.Now().UnixNano() / int64(time.Millisecond)
-
- }
-
- var lastTry = now()
-
- canQuery := func() bool {
- backoff := bootnodesQueryBackoffMs * int64(math.Exp2(float64(retries)))
-
- return lastTry+backoff < now()
- }
-
- for {
- select {
- case <-ticker.C:
- if w.seededBootnodesForDiscV5 && len(w.node.Host().Network().Peers()) > 3 {
- w.logger.Debug("not querying bootnodes", zap.Bool("seeded", w.seededBootnodesForDiscV5), zap.Int("peer-count", len(w.node.Host().Network().Peers())))
- continue
- }
- if canQuery() {
- w.logger.Info("querying bootnodes to restore connectivity", zap.Int("peer-count", len(w.node.Host().Network().Peers())))
- err := w.restartDiscV5()
- if err != nil {
- w.logger.Warn("failed to restart discv5", zap.Error(err))
- }
-
- lastTry = now()
- retries++
- // We reset the retries after a while and restart
- if retries > bootnodesMaxRetries {
- retries = 0
- }
-
- } else {
- w.logger.Info("can't query bootnodes", zap.Int("peer-count", len(w.node.Host().Network().Peers())), zap.Int64("lastTry", lastTry), zap.Int64("now", now()), zap.Int64("backoff", bootnodesQueryBackoffMs*int64(math.Exp2(float64(retries)))), zap.Int("retries", retries))
-
- }
- // If we go online, trigger immediately
- case <-w.goingOnline:
- if w.cfg.EnableDiscV5 {
- if canQuery() {
- err := w.restartDiscV5()
- if err != nil {
- w.logger.Warn("failed to restart discv5", zap.Error(err))
- }
-
- }
- retries = 0
- lastTry = now()
- }
-
- case <-w.ctx.Done():
- w.logger.Debug("bootnode seeding stopped")
- return
- }
- }
-}
-
-// Restart discv5, re-retrieving bootstrap nodes
-func (w *Waku) restartDiscV5() error {
- ctx, cancel := context.WithTimeout(w.ctx, 30*time.Second)
- defer cancel()
- bootnodes, err := w.getDiscV5BootstrapNodes(ctx, w.discV5BootstrapNodes)
- if err != nil {
- return err
- }
- if len(bootnodes) == 0 {
- return errors.New("failed to fetch bootnodes")
- }
-
- if w.node.DiscV5().ErrOnNotRunning() != nil {
- w.logger.Info("is not started restarting")
- err := w.node.DiscV5().Start(w.ctx)
- if err != nil {
- w.logger.Error("Could not start DiscV5", zap.Error(err))
- }
- } else {
- w.node.DiscV5().Stop()
- w.logger.Info("is started restarting")
-
- select {
- case <-w.ctx.Done(): // Don't start discv5 if we are stopping waku
- return nil
- default:
- }
-
- err := w.node.DiscV5().Start(w.ctx)
- if err != nil {
- w.logger.Error("Could not start DiscV5", zap.Error(err))
- }
- }
-
- w.logger.Info("restarting discv5 with nodes", zap.Any("nodes", bootnodes))
- return w.node.SetDiscV5Bootnodes(bootnodes)
-}
-
-func (w *Waku) AddStorePeer(address multiaddr.Multiaddr) (peer.ID, error) {
- peerID, err := w.node.AddPeer(address, wps.Static, w.cfg.DefaultShardedPubsubTopics, store.StoreQueryID_v300)
- if err != nil {
- return "", err
- }
- return peerID, nil
-}
-
-func (w *Waku) timestamp() int64 {
- return w.timesource.Now().UnixNano()
-}
-
-func (w *Waku) AddRelayPeer(address multiaddr.Multiaddr) (peer.ID, error) {
- peerID, err := w.node.AddPeer(address, wps.Static, w.cfg.DefaultShardedPubsubTopics, relay.WakuRelayID_v200)
- if err != nil {
- return "", err
- }
- return peerID, nil
-}
-
-func (w *Waku) DialPeer(address multiaddr.Multiaddr) error {
- ctx, cancel := context.WithTimeout(w.ctx, requestTimeout)
- defer cancel()
- return w.node.DialPeerWithMultiAddress(ctx, address)
-}
-
-func (w *Waku) DialPeerByID(peerID peer.ID) error {
- ctx, cancel := context.WithTimeout(w.ctx, requestTimeout)
- defer cancel()
- return w.node.DialPeerByID(ctx, peerID)
-}
-
-func (w *Waku) DropPeer(peerID peer.ID) error {
- return w.node.ClosePeerById(peerID)
-}
-
-func (w *Waku) ProcessingP2PMessages() bool {
- w.storeMsgIDsMu.Lock()
- defer w.storeMsgIDsMu.Unlock()
- return len(w.storeMsgIDs) != 0
-}
-
-func (w *Waku) MarkP2PMessageAsProcessed(hash gethcommon.Hash) {
- w.storeMsgIDsMu.Lock()
- defer w.storeMsgIDsMu.Unlock()
- delete(w.storeMsgIDs, hash)
-}
-
-func (w *Waku) Clean() error {
- w.msgQueue = make(chan *common.ReceivedMessage, messageQueueLimit)
-
- for _, f := range w.filters.All() {
- f.Messages = common.NewMemoryMessageStore()
- }
-
- return nil
-}
-
-func (w *Waku) PeerID() peer.ID {
- return w.node.Host().ID()
-}
-
-func (w *Waku) Peerstore() peerstore.Peerstore {
- return w.node.Host().Peerstore()
-}
-
-// validatePrivateKey checks the format of the given private key.
-func validatePrivateKey(k *ecdsa.PrivateKey) bool {
- if k == nil || k.D == nil || k.D.Sign() == 0 {
- return false
- }
- return common.ValidatePublicKey(&k.PublicKey)
-}
-
-// makeDeterministicID generates a deterministic ID, based on a given input
-func makeDeterministicID(input string, keyLen int) (id string, err error) {
- buf := pbkdf2.Key([]byte(input), nil, 4096, keyLen, sha256.New)
- if !common.ValidateDataIntegrity(buf, common.KeyIDSize) {
- return "", fmt.Errorf("error in GenerateDeterministicID: failed to generate key")
- }
- id = gethcommon.Bytes2Hex(buf)
- return id, err
-}
-
-// toDeterministicID reviews incoming id, and transforms it to format
-// expected internally be private key store. Originally, public keys
-// were used as keys, now random keys are being used. And in order to
-// make it easier to consume, we now allow both random IDs and public
-// keys to be passed.
-func toDeterministicID(id string, expectedLen int) (string, error) {
- if len(id) != (expectedLen * 2) { // we received hex key, so number of chars in id is doubled
- var err error
- id, err = makeDeterministicID(id, expectedLen)
- if err != nil {
- return "", err
- }
- }
-
- return id, nil
-}
-
-func FormatPeerStats(wakuNode *node.WakuNode) types.PeerStats {
- p := make(types.PeerStats)
- for k, v := range wakuNode.PeerStats() {
- p[k] = types.WakuV2Peer{
- Addresses: utils.EncapsulatePeerID(k, wakuNode.Host().Peerstore().PeerInfo(k).Addrs...),
- Protocols: v,
- }
- }
- return p
-}
-
-func (w *Waku) StoreNode() *store.WakuStore {
- return w.node.Store()
-}
-
-func FormatPeerConnFailures(wakuNode *node.WakuNode) map[string]int {
- p := make(map[string]int)
- for _, peerID := range wakuNode.Host().Network().Peers() {
- peerInfo := wakuNode.Host().Peerstore().PeerInfo(peerID)
- connFailures := wakuNode.Host().Peerstore().(wps.WakuPeerstore).ConnFailures(peerInfo)
- if connFailures > 0 {
- p[peerID.String()] = connFailures
- }
- }
- return p
-}
-
-func (w *Waku) LegacyStoreNode() legacy_store.Store {
- return w.node.LegacyStore()
-}