feat!: extract storenode cycle to go-waku api

This commit is contained in:
Richard Ramos 2024-09-19 16:16:53 -04:00
parent f15c64ced3
commit 95b6a17719
No known key found for this signature in database
GPG Key ID: 1CE87DB518195760
35 changed files with 1512 additions and 1391 deletions

View File

@ -9,6 +9,8 @@ import (
"github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/peer"
"github.com/multiformats/go-multiaddr" "github.com/multiformats/go-multiaddr"
"github.com/waku-org/go-waku/waku/v2/api/history"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enode"
gocommon "github.com/status-im/status-go/common" gocommon "github.com/status-im/status-go/common"
@ -274,10 +276,6 @@ func (w *GethWakuWrapper) MarkP2PMessageAsProcessed(hash common.Hash) {
w.waku.MarkP2PMessageAsProcessed(hash) w.waku.MarkP2PMessageAsProcessed(hash)
} }
func (w *GethWakuWrapper) RequestStoreMessages(ctx context.Context, peerID peer.ID, r types.MessagesRequest, processEnvelopes bool) (types.StoreRequestCursor, int, error) {
return nil, 0, errors.New("not implemented")
}
func (w *GethWakuWrapper) ConnectionChanged(_ connection.State) {} func (w *GethWakuWrapper) ConnectionChanged(_ connection.State) {}
func (w *GethWakuWrapper) ClearEnvelopesCache() { func (w *GethWakuWrapper) ClearEnvelopesCache() {
@ -314,13 +312,59 @@ func (w *wakuFilterWrapper) ID() string {
func (w *GethWakuWrapper) ConfirmMessageDelivered(hashes []common.Hash) { func (w *GethWakuWrapper) ConfirmMessageDelivered(hashes []common.Hash) {
} }
func (w *GethWakuWrapper) SetStorePeerID(peerID peer.ID) {
}
func (w *GethWakuWrapper) PeerID() peer.ID { func (w *GethWakuWrapper) PeerID() peer.ID {
panic("not implemented") panic("not available in WakuV1")
} }
func (w *GethWakuWrapper) PingPeer(context.Context, peer.ID) (time.Duration, error) { func (w *GethWakuWrapper) GetActiveStorenode() peer.ID {
return 0, errors.New("not available in WakuV1") panic("not available in WakuV1")
}
func (w *GethWakuWrapper) OnStorenodeAvailableOneShot() <-chan struct{} {
panic("not available in WakuV1")
}
func (w *GethWakuWrapper) OnStorenodeChanged() <-chan peer.ID {
panic("not available in WakuV1")
}
func (w *GethWakuWrapper) OnStorenodeNotWorking() <-chan struct{} {
panic("not available in WakuV1")
}
func (w *GethWakuWrapper) OnStorenodeAvailable() <-chan peer.ID {
panic("not available in WakuV1")
}
func (w *GethWakuWrapper) WaitForAvailableStoreNode(timeout time.Duration) bool {
return false
}
func (w *GethWakuWrapper) SetStorenodeConfigProvider(c history.StorenodeConfigProvider) {
panic("not available in WakuV1")
}
func (w *GethWakuWrapper) ProcessMailserverBatch(
ctx context.Context,
batch types.MailserverBatch,
storenodeID peer.ID,
pageLimit uint64,
shouldProcessNextPage func(int) (bool, uint64),
processEnvelopes bool,
) error {
return errors.New("not available in WakuV1")
}
func (w *GethWakuWrapper) IsStorenodeAvailable(peerID peer.ID) bool {
panic("not available in WakuV1")
}
func (w *GethWakuWrapper) PerformStorenodeTask(fn func() error, opts ...history.StorenodeTaskOption) error {
panic("not available in WakuV1")
}
func (w *GethWakuWrapper) DisconnectActiveStorenode(ctx context.Context, backoff time.Duration, shouldCycle bool) {
panic("not available in WakuV1")
} }

View File

@ -9,6 +9,7 @@ import (
"github.com/multiformats/go-multiaddr" "github.com/multiformats/go-multiaddr"
"google.golang.org/protobuf/proto" "google.golang.org/protobuf/proto"
"github.com/waku-org/go-waku/waku/v2/api/history"
"github.com/waku-org/go-waku/waku/v2/protocol" "github.com/waku-org/go-waku/waku/v2/protocol"
"github.com/waku-org/go-waku/waku/v2/protocol/store" "github.com/waku-org/go-waku/waku/v2/protocol/store"
@ -176,39 +177,6 @@ func (w *gethWakuV2Wrapper) createFilterWrapper(id string, keyAsym *ecdsa.Privat
}, id), nil }, id), nil
} }
func (w *gethWakuV2Wrapper) RequestStoreMessages(ctx context.Context, peerID peer.ID, r types.MessagesRequest, processEnvelopes bool) (types.StoreRequestCursor, int, error) {
options := []store.RequestOption{
store.WithPaging(false, uint64(r.Limit)),
}
var cursor []byte
if r.StoreCursor != nil {
cursor = r.StoreCursor
}
contentTopics := []string{}
for _, topic := range r.ContentTopics {
contentTopics = append(contentTopics, wakucommon.BytesToTopic(topic).ContentTopic())
}
query := store.FilterCriteria{
TimeStart: proto.Int64(int64(r.From) * int64(time.Second)),
TimeEnd: proto.Int64(int64(r.To) * int64(time.Second)),
ContentFilter: protocol.NewContentFilter(w.waku.GetPubsubTopic(r.PubsubTopic), contentTopics...),
}
pbCursor, envelopesCount, err := w.waku.Query(ctx, peerID, query, cursor, options, processEnvelopes)
if err != nil {
return nil, 0, err
}
if pbCursor != nil {
return pbCursor, envelopesCount, nil
}
return nil, envelopesCount, nil
}
func (w *gethWakuV2Wrapper) StartDiscV5() error { func (w *gethWakuV2Wrapper) StartDiscV5() error {
return w.waku.StartDiscV5() return w.waku.StartDiscV5()
} }
@ -289,7 +257,7 @@ func (w *gethWakuV2Wrapper) SubscribeToConnStatusChanges() (*types.ConnStatusSub
func (w *gethWakuV2Wrapper) SetCriteriaForMissingMessageVerification(peerID peer.ID, pubsubTopic string, contentTopics []types.TopicType) error { func (w *gethWakuV2Wrapper) SetCriteriaForMissingMessageVerification(peerID peer.ID, pubsubTopic string, contentTopics []types.TopicType) error {
var cTopics []string var cTopics []string
for _, ct := range contentTopics { for _, ct := range contentTopics {
cTopics = append(cTopics, wakucommon.TopicType(ct).ContentTopic()) cTopics = append(cTopics, wakucommon.BytesToTopic(ct.Bytes()).ContentTopic())
} }
pubsubTopic = w.waku.GetPubsubTopic(pubsubTopic) pubsubTopic = w.waku.GetPubsubTopic(pubsubTopic)
w.waku.SetTopicsToVerifyForMissingMessages(peerID, pubsubTopic, cTopics) w.waku.SetTopicsToVerifyForMissingMessages(peerID, pubsubTopic, cTopics)
@ -338,14 +306,75 @@ func (w *gethWakuV2Wrapper) ConfirmMessageDelivered(hashes []common.Hash) {
w.waku.ConfirmMessageDelivered(hashes) w.waku.ConfirmMessageDelivered(hashes)
} }
func (w *gethWakuV2Wrapper) SetStorePeerID(peerID peer.ID) {
w.waku.SetStorePeerID(peerID)
}
func (w *gethWakuV2Wrapper) PeerID() peer.ID { func (w *gethWakuV2Wrapper) PeerID() peer.ID {
return w.waku.PeerID() return w.waku.PeerID()
} }
func (w *gethWakuV2Wrapper) PingPeer(ctx context.Context, peerID peer.ID) (time.Duration, error) { func (w *gethWakuV2Wrapper) GetActiveStorenode() peer.ID {
return w.waku.PingPeer(ctx, peerID) return w.waku.StorenodeCycle.GetActiveStorenode()
}
func (w *gethWakuV2Wrapper) OnStorenodeAvailableOneShot() <-chan struct{} {
return w.waku.StorenodeCycle.StorenodeAvailableOneshotEmitter.Subscribe()
}
func (w *gethWakuV2Wrapper) OnStorenodeChanged() <-chan peer.ID {
return w.waku.StorenodeCycle.StorenodeChangedEmitter.Subscribe()
}
func (w *gethWakuV2Wrapper) OnStorenodeNotWorking() <-chan struct{} {
return w.waku.StorenodeCycle.StorenodeNotWorkingEmitter.Subscribe()
}
func (w *gethWakuV2Wrapper) OnStorenodeAvailable() <-chan peer.ID {
return w.waku.StorenodeCycle.StorenodeAvailableEmitter.Subscribe()
}
func (w *gethWakuV2Wrapper) WaitForAvailableStoreNode(timeout time.Duration) bool {
return w.waku.StorenodeCycle.WaitForAvailableStoreNode(context.TODO(), timeout)
}
func (w *gethWakuV2Wrapper) SetStorenodeConfigProvider(c history.StorenodeConfigProvider) {
w.waku.StorenodeCycle.SetStorenodeConfigProvider(c)
}
func (w *gethWakuV2Wrapper) ProcessMailserverBatch(
ctx context.Context,
batch types.MailserverBatch,
storenodeID peer.ID,
pageLimit uint64,
shouldProcessNextPage func(int) (bool, uint64),
processEnvelopes bool,
) error {
pubsubTopic := w.waku.GetPubsubTopic(batch.PubsubTopic)
contentTopics := []string{}
for _, topic := range batch.Topics {
contentTopics = append(contentTopics, wakucommon.BytesToTopic(topic.Bytes()).ContentTopic())
}
criteria := store.FilterCriteria{
TimeStart: proto.Int64(int64(batch.From) * int64(time.Second)),
TimeEnd: proto.Int64(int64(batch.To) * int64(time.Second)),
ContentFilter: protocol.NewContentFilter(pubsubTopic, contentTopics...),
}
return w.waku.HistoryRetriever.Query(ctx, criteria, storenodeID, pageLimit, shouldProcessNextPage, processEnvelopes)
}
func (w *gethWakuV2Wrapper) IsStorenodeAvailable(peerID peer.ID) bool {
return w.waku.StorenodeCycle.IsStorenodeAvailable(peerID)
}
func (w *gethWakuV2Wrapper) PerformStorenodeTask(fn func() error, opts ...history.StorenodeTaskOption) error {
return w.waku.StorenodeCycle.PerformStorenodeTask(fn, opts...)
}
func (w *gethWakuV2Wrapper) DisconnectActiveStorenode(ctx context.Context, backoff time.Duration, shouldCycle bool) {
w.waku.StorenodeCycle.Lock()
defer w.waku.StorenodeCycle.Unlock()
w.waku.StorenodeCycle.DisconnectActiveStorenode(backoff)
if shouldCycle {
w.waku.StorenodeCycle.Cycle(ctx)
}
} }

View File

@ -1,59 +1,5 @@
package types package types
import (
"time"
)
const (
// MaxLimitInMessagesRequest represents the maximum number of messages
// that can be requested from the mailserver
MaxLimitInMessagesRequest = 1000
)
// MessagesRequest contains details of a request of historic messages.
type MessagesRequest struct {
// ID of the request. The current implementation requires ID to be 32-byte array,
// however, it's not enforced for future implementation.
ID []byte `json:"id"`
// From is a lower bound of time range.
From uint32 `json:"from"`
// To is a upper bound of time range.
To uint32 `json:"to"`
// Limit determines the number of messages sent by the mail server
// for the current paginated request.
Limit uint32 `json:"limit"`
// Cursor is used as starting point for paginated requests.
Cursor []byte `json:"cursor"`
// StoreCursor is used as starting point for WAKUV2 paginatedRequests
StoreCursor StoreRequestCursor `json:"storeCursor"`
// Bloom is a filter to match requested messages.
Bloom []byte `json:"bloom"`
// PubsubTopic is the gossipsub topic on which the message was broadcasted
PubsubTopic string `json:"pubsubTopic"`
// ContentTopics is a list of topics. A returned message should
// belong to one of the topics from the list.
ContentTopics [][]byte `json:"contentTopics"`
}
type StoreRequestCursor []byte
// SetDefaults sets the From and To defaults
func (r *MessagesRequest) SetDefaults(now time.Time) {
// set From and To defaults
if r.To == 0 {
r.To = uint32(now.UTC().Unix())
}
if r.From == 0 {
oneDay := uint32(86400) // -24 hours
if r.To < oneDay {
r.From = 0
} else {
r.From = r.To - oneDay
}
}
}
// MailServerResponse is the response payload sent by the mailserver. // MailServerResponse is the response payload sent by the mailserver.
type MailServerResponse struct { type MailServerResponse struct {
LastEnvelopeHash Hash LastEnvelopeHash Hash

View File

@ -34,6 +34,10 @@ func (t TopicType) String() string {
return EncodeHex(t[:]) return EncodeHex(t[:])
} }
func (t TopicType) Bytes() []byte {
return TopicTypeToByteArray(t)
}
// MarshalText returns the hex representation of t. // MarshalText returns the hex representation of t.
func (t TopicType) MarshalText() ([]byte, error) { func (t TopicType) MarshalText() ([]byte, error) {
return HexBytes(t[:]).MarshalText() return HexBytes(t[:]).MarshalText()

View File

@ -3,7 +3,10 @@ package types
import ( import (
"context" "context"
"crypto/ecdsa" "crypto/ecdsa"
"crypto/sha256"
"encoding/hex"
"encoding/json" "encoding/json"
"fmt"
"sync" "sync"
"time" "time"
@ -12,6 +15,8 @@ import (
"github.com/multiformats/go-multiaddr" "github.com/multiformats/go-multiaddr"
"github.com/pborman/uuid" "github.com/pborman/uuid"
"github.com/waku-org/go-waku/waku/v2/api/history"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enode"
"github.com/status-im/status-go/connection" "github.com/status-im/status-go/connection"
@ -176,9 +181,6 @@ type Waku interface {
Unsubscribe(ctx context.Context, id string) error Unsubscribe(ctx context.Context, id string) error
UnsubscribeMany(ids []string) error UnsubscribeMany(ids []string) error
// RequestStoreMessages uses the WAKU2-STORE protocol to request historic messages
RequestStoreMessages(ctx context.Context, peerID peer.ID, request MessagesRequest, processEnvelopes bool) (StoreRequestCursor, int, error)
// ProcessingP2PMessages indicates whether there are in-flight p2p messages // ProcessingP2PMessages indicates whether there are in-flight p2p messages
ProcessingP2PMessages() bool ProcessingP2PMessages() bool
@ -194,12 +196,60 @@ type Waku interface {
// ConfirmMessageDelivered updates a message has been delivered in waku // ConfirmMessageDelivered updates a message has been delivered in waku
ConfirmMessageDelivered(hash []common.Hash) ConfirmMessageDelivered(hash []common.Hash)
// SetStorePeerID updates the peer id of store node
SetStorePeerID(peerID peer.ID)
// PeerID returns node's PeerID // PeerID returns node's PeerID
PeerID() peer.ID PeerID() peer.ID
// PingPeer returns the reply time // GetActiveStorenode returns the peer ID of the currently active storenode. It will be empty if no storenode is active
PingPeer(ctx context.Context, peerID peer.ID) (time.Duration, error) GetActiveStorenode() peer.ID
// OnStorenodeAvailableOneShot returns a channel that will be triggered only once when a storenode becomes available
OnStorenodeAvailableOneShot() <-chan struct{}
// OnStorenodeChanged is triggered when a new storenode is promoted to become the active storenode or when the active storenode is removed
OnStorenodeChanged() <-chan peer.ID
// OnStorenodeNotWorking is triggered when the last active storenode fails to return results consistently
OnStorenodeNotWorking() <-chan struct{}
// OnStorenodeAvailable is triggered when there is a new active storenode selected
OnStorenodeAvailable() <-chan peer.ID
// WaitForAvailableStoreNode will wait for a storenode to be available until `timeout` happens
WaitForAvailableStoreNode(timeout time.Duration) bool
// SetStorenodeConfigProvider will set the configuration provider for the storenode cycle
SetStorenodeConfigProvider(c history.StorenodeConfigProvider)
// ProcessMailserverBatch will receive a criteria and storenode and execute a query
ProcessMailserverBatch(
ctx context.Context,
batch MailserverBatch,
storenodeID peer.ID,
pageLimit uint64,
shouldProcessNextPage func(int) (bool, uint64),
processEnvelopes bool,
) error
// IsStorenodeAvailable is used to determine whether a storenode is available or not
IsStorenodeAvailable(peerID peer.ID) bool
PerformStorenodeTask(fn func() error, opts ...history.StorenodeTaskOption) error
// DisconnectActiveStorenode will trigger a disconnection of the active storenode, and potentially execute a cycling so a new storenode is promoted
DisconnectActiveStorenode(ctx context.Context, backoff time.Duration, shouldCycle bool)
}
type MailserverBatch struct {
From uint32
To uint32
Cursor string
PubsubTopic string
Topics []TopicType
ChatIDs []string
}
func (mb *MailserverBatch) Hash() string {
data := fmt.Sprintf("%d%d%s%s%v%v", mb.From, mb.To, mb.Cursor, mb.PubsubTopic, mb.Topics, mb.ChatIDs)
hash := sha256.Sum256([]byte(data))
return hex.EncodeToString(hash[:4])
} }

2
go.mod
View File

@ -96,7 +96,7 @@ require (
github.com/schollz/peerdiscovery v1.7.0 github.com/schollz/peerdiscovery v1.7.0
github.com/siphiuel/lc-proxy-wrapper v0.0.0-20230516150924-246507cee8c7 github.com/siphiuel/lc-proxy-wrapper v0.0.0-20230516150924-246507cee8c7
github.com/urfave/cli/v2 v2.27.2 github.com/urfave/cli/v2 v2.27.2
github.com/waku-org/go-waku v0.8.1-0.20241004054019-0ed94ce0b1cb github.com/waku-org/go-waku v0.8.1-0.20241014185851-76275f6fb835
github.com/wk8/go-ordered-map/v2 v2.1.7 github.com/wk8/go-ordered-map/v2 v2.1.7
github.com/yeqown/go-qrcode/v2 v2.2.1 github.com/yeqown/go-qrcode/v2 v2.2.1
github.com/yeqown/go-qrcode/writer/standard v1.2.1 github.com/yeqown/go-qrcode/writer/standard v1.2.1

4
go.sum
View File

@ -2150,8 +2150,8 @@ github.com/waku-org/go-libp2p-pubsub v0.12.0-gowaku.0.20240823143342-b0f2429ca27
github.com/waku-org/go-libp2p-pubsub v0.12.0-gowaku.0.20240823143342-b0f2429ca27f/go.mod h1:Oi0zw9aw8/Y5GC99zt+Ef2gYAl+0nZlwdJonDyOz/sE= github.com/waku-org/go-libp2p-pubsub v0.12.0-gowaku.0.20240823143342-b0f2429ca27f/go.mod h1:Oi0zw9aw8/Y5GC99zt+Ef2gYAl+0nZlwdJonDyOz/sE=
github.com/waku-org/go-libp2p-rendezvous v0.0.0-20240110193335-a67d1cc760a0 h1:R4YYx2QamhBRl/moIxkDCNW+OP7AHbyWLBygDc/xIMo= github.com/waku-org/go-libp2p-rendezvous v0.0.0-20240110193335-a67d1cc760a0 h1:R4YYx2QamhBRl/moIxkDCNW+OP7AHbyWLBygDc/xIMo=
github.com/waku-org/go-libp2p-rendezvous v0.0.0-20240110193335-a67d1cc760a0/go.mod h1:EhZP9fee0DYjKH/IOQvoNSy1tSHp2iZadsHGphcAJgY= github.com/waku-org/go-libp2p-rendezvous v0.0.0-20240110193335-a67d1cc760a0/go.mod h1:EhZP9fee0DYjKH/IOQvoNSy1tSHp2iZadsHGphcAJgY=
github.com/waku-org/go-waku v0.8.1-0.20241004054019-0ed94ce0b1cb h1:E3J49PH9iXpjaOOI/VrEX/VhSk3obKjxVehGEDzZgXI= github.com/waku-org/go-waku v0.8.1-0.20241014185851-76275f6fb835 h1:Vp6BhXiDEilmchHy8OLMZVhugudsnvveNkAKD5nhAGk=
github.com/waku-org/go-waku v0.8.1-0.20241004054019-0ed94ce0b1cb/go.mod h1:1BRnyg2mQ2aBNLTBaPq6vEvobzywGykPOhGQFbHGf74= github.com/waku-org/go-waku v0.8.1-0.20241014185851-76275f6fb835/go.mod h1:1BRnyg2mQ2aBNLTBaPq6vEvobzywGykPOhGQFbHGf74=
github.com/waku-org/go-zerokit-rln v0.1.14-0.20240102145250-fa738c0bdf59 h1:jisj+OCI6QydLtFq3Pyhu49wl9ytPN7oAHjMfepHDrA= github.com/waku-org/go-zerokit-rln v0.1.14-0.20240102145250-fa738c0bdf59 h1:jisj+OCI6QydLtFq3Pyhu49wl9ytPN7oAHjMfepHDrA=
github.com/waku-org/go-zerokit-rln v0.1.14-0.20240102145250-fa738c0bdf59/go.mod h1:1PdBdPzyTaKt3VnpAHk3zj+r9dXPFOr3IHZP9nFle6E= github.com/waku-org/go-zerokit-rln v0.1.14-0.20240102145250-fa738c0bdf59/go.mod h1:1PdBdPzyTaKt3VnpAHk3zj+r9dXPFOr3IHZP9nFle6E=
github.com/waku-org/go-zerokit-rln-apple v0.0.0-20230916172309-ee0ee61dde2b h1:KgZVhsLkxsj5gb/FfndSCQu6VYwALrCOgYI3poR95yE= github.com/waku-org/go-zerokit-rln-apple v0.0.0-20230916172309-ee0ee61dde2b h1:KgZVhsLkxsj5gb/FfndSCQu6VYwALrCOgYI3poR95yE=

View File

@ -139,7 +139,6 @@ type Messenger struct {
allInstallations *installationMap allInstallations *installationMap
modifiedInstallations *stringBoolMap modifiedInstallations *stringBoolMap
installationID string installationID string
mailserverCycle mailserverCycle
communityStorenodes *storenodes.CommunityStorenodes communityStorenodes *storenodes.CommunityStorenodes
database *sql.DB database *sql.DB
multiAccounts *multiaccounts.Database multiAccounts *multiaccounts.Database
@ -172,7 +171,6 @@ type Messenger struct {
// TODO(samyoul) Determine if/how the remaining usage of this mutex can be removed // TODO(samyoul) Determine if/how the remaining usage of this mutex can be removed
mutex sync.Mutex mutex sync.Mutex
mailPeersMutex sync.RWMutex
handleMessagesMutex sync.Mutex handleMessagesMutex sync.Mutex
handleImportMessagesMutex sync.Mutex handleImportMessagesMutex sync.Mutex
@ -199,50 +197,6 @@ type Messenger struct {
mvdsStatusChangeEvent chan datasyncnode.PeerStatusChangeEvent mvdsStatusChangeEvent chan datasyncnode.PeerStatusChangeEvent
} }
type connStatus int
const (
disconnected connStatus = iota + 1
connected
)
type peerStatus struct {
status connStatus
canConnectAfter time.Time
lastConnectionAttempt time.Time
mailserver mailserversDB.Mailserver
}
type mailserverCycle struct {
sync.RWMutex
allMailservers []mailserversDB.Mailserver
activeMailserver *mailserversDB.Mailserver
peers map[string]peerStatus
availabilitySubscriptions *availabilitySubscriptions
}
type availabilitySubscriptions struct {
sync.Mutex
subscriptions []chan struct{}
}
func (s *availabilitySubscriptions) Subscribe() <-chan struct{} {
s.Lock()
defer s.Unlock()
c := make(chan struct{})
s.subscriptions = append(s.subscriptions, c)
return c
}
func (s *availabilitySubscriptions) EmitMailserverAvailable() {
s.Lock()
defer s.Unlock()
for _, subs := range s.subscriptions {
close(subs)
}
s.subscriptions = nil
}
type EnvelopeEventsInterceptor struct { type EnvelopeEventsInterceptor struct {
EnvelopeEventsHandler transport.EnvelopeEventsHandler EnvelopeEventsHandler transport.EnvelopeEventsHandler
Messenger *Messenger Messenger *Messenger
@ -624,19 +578,15 @@ func NewMessenger(
peerStore: peerStore, peerStore: peerStore,
mvdsStatusChangeEvent: make(chan datasyncnode.PeerStatusChangeEvent, 5), mvdsStatusChangeEvent: make(chan datasyncnode.PeerStatusChangeEvent, 5),
verificationDatabase: verification.NewPersistence(database), verificationDatabase: verification.NewPersistence(database),
mailserverCycle: mailserverCycle{ mailserversDatabase: c.mailserversDatabase,
peers: make(map[string]peerStatus), communityStorenodes: storenodes.NewCommunityStorenodes(storenodes.NewDB(database), logger),
availabilitySubscriptions: &availabilitySubscriptions{}, account: c.account,
}, quit: make(chan struct{}),
mailserversDatabase: c.mailserversDatabase, ctx: ctx,
communityStorenodes: storenodes.NewCommunityStorenodes(storenodes.NewDB(database), logger), cancel: cancel,
account: c.account, importingCommunities: make(map[string]bool),
quit: make(chan struct{}), importingChannels: make(map[string]bool),
ctx: ctx, importRateLimiter: rate.NewLimiter(rate.Every(importSlowRate), 1),
cancel: cancel,
importingCommunities: make(map[string]bool),
importingChannels: make(map[string]bool),
importRateLimiter: rate.NewLimiter(rate.Every(importSlowRate), 1),
importDelayer: struct { importDelayer: struct {
wait chan struct{} wait chan struct{}
once sync.Once once sync.Once
@ -883,22 +833,26 @@ func (m *Messenger) Start() (*MessengerResponse, error) {
} }
response := &MessengerResponse{} response := &MessengerResponse{}
mailservers, err := m.allMailservers() storenodes, err := m.AllMailservers()
if err != nil { if err != nil {
return nil, err return nil, err
} }
response.Mailservers = mailservers err = m.setupStorenodes(storenodes)
err = m.StartMailserverCycle(mailservers)
if err != nil { if err != nil {
return nil, err return nil, err
} }
response.Mailservers = storenodes
m.transport.SetStorenodeConfigProvider(m)
if err := m.communityStorenodes.ReloadFromDB(); err != nil { if err := m.communityStorenodes.ReloadFromDB(); err != nil {
return nil, err return nil, err
} }
go m.checkForMissingMessagesLoop() go m.checkForMissingMessagesLoop()
go m.checkForStorenodeCycleSignals()
controlledCommunities, err := m.communitiesManager.Controlled() controlledCommunities, err := m.communitiesManager.Controlled()
if err != nil { if err != nil {
@ -906,10 +860,9 @@ func (m *Messenger) Start() (*MessengerResponse, error) {
} }
if m.archiveManager.IsReady() { if m.archiveManager.IsReady() {
available := m.mailserverCycle.availabilitySubscriptions.Subscribe()
go func() { go func() {
defer gocommon.LogOnPanic() defer gocommon.LogOnPanic()
<-available <-m.transport.OnStorenodeAvailableOneShot()
m.InitHistoryArchiveTasks(controlledCommunities) m.InitHistoryArchiveTasks(controlledCommunities)
}() }()
} }

View File

@ -16,6 +16,7 @@ import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/libp2p/go-libp2p/core/peer"
gethcommon "github.com/ethereum/go-ethereum/common" gethcommon "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
@ -40,6 +41,7 @@ import (
"github.com/status-im/status-go/protocol/encryption" "github.com/status-im/status-go/protocol/encryption"
"github.com/status-im/status-go/protocol/protobuf" "github.com/status-im/status-go/protocol/protobuf"
"github.com/status-im/status-go/protocol/requests" "github.com/status-im/status-go/protocol/requests"
"github.com/status-im/status-go/protocol/storenodes"
"github.com/status-im/status-go/protocol/transport" "github.com/status-im/status-go/protocol/transport"
v1protocol "github.com/status-im/status-go/protocol/v1" v1protocol "github.com/status-im/status-go/protocol/v1"
localnotifications "github.com/status-im/status-go/services/local-notifications" localnotifications "github.com/status-im/status-go/services/local-notifications"
@ -3970,8 +3972,8 @@ func (m *Messenger) InitHistoryArchiveTasks(communities []*communities.Community
} }
// Request possibly missed waku messages for community // Request possibly missed waku messages for community
ms := m.getActiveMailserver(c.ID().String()) ms := m.getCommunityMailserver(c.ID().String())
_, err = m.syncFiltersFrom(*ms, filters, uint32(latestWakuMessageTimestamp)) _, err = m.syncFiltersFrom(ms, filters, uint32(latestWakuMessageTimestamp))
if err != nil { if err != nil {
m.logger.Error("failed to request missing messages", zap.Error(err)) m.logger.Error("failed to request missing messages", zap.Error(err))
continue continue
@ -5155,3 +5157,28 @@ func (m *Messenger) startRequestMissingCommunityChannelsHRKeysLoop() {
} }
}() }()
} }
// getCommunityMailserver returns the active mailserver if a communityID is present then it'll return the mailserver
// for that community if it has a mailserver setup otherwise it'll return the global mailserver
func (m *Messenger) getCommunityMailserver(communityID ...string) peer.ID {
if m.transport.WakuVersion() != 2 {
return ""
}
if len(communityID) == 0 || communityID[0] == "" {
return m.transport.GetActiveStorenode()
}
ms, err := m.communityStorenodes.GetStorenodeByCommunityID(communityID[0])
if err != nil {
if !errors.Is(err, storenodes.ErrNotFound) {
m.logger.Error("getting storenode for community, using global", zap.String("communityID", communityID[0]), zap.Error(err))
}
// if we don't find a specific mailserver for the community, we just use the regular mailserverCycle's one
return m.transport.GetActiveStorenode()
}
peerID, _ := ms.PeerID()
return peerID
}

View File

@ -1,19 +1,17 @@
package protocol package protocol
import ( import (
"context"
"crypto/sha256"
"encoding/hex"
"fmt" "fmt"
"math" "math"
"sort" "sort"
"sync"
"time" "time"
"github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors" "github.com/pkg/errors"
"go.uber.org/zap" "go.uber.org/zap"
"github.com/waku-org/go-waku/waku/v2/api/history"
gocommon "github.com/status-im/status-go/common" gocommon "github.com/status-im/status-go/common"
"github.com/status-im/status-go/connection" "github.com/status-im/status-go/connection"
"github.com/status-im/status-go/eth-node/crypto" "github.com/status-im/status-go/eth-node/crypto"
@ -31,22 +29,21 @@ const (
// tolerance is how many seconds of potentially out-of-order messages we want to fetch // tolerance is how many seconds of potentially out-of-order messages we want to fetch
tolerance uint32 = 60 tolerance uint32 = 60
mailserverRequestTimeout = 30 * time.Second
mailserverMaxTries uint = 2
mailserverMaxFailedRequests uint = 2
oneDayDuration = 24 * time.Hour oneDayDuration = 24 * time.Hour
oneMonthDuration = 31 * oneDayDuration oneMonthDuration = 31 * oneDayDuration
)
// maxTopicsPerRequest sets the batch size to limit the number of topics per store query backoffByUserAction = 0 * time.Second
var maxTopicsPerRequest int = 10 )
var ErrNoFiltersForChat = errors.New("no filter registered for given chat") var ErrNoFiltersForChat = errors.New("no filter registered for given chat")
func (m *Messenger) shouldSync() (bool, error) { func (m *Messenger) shouldSync() (bool, error) {
if m.transport.WakuVersion() != 2 {
return false, nil
}
// TODO (pablo) support community store node as well // TODO (pablo) support community store node as well
if m.mailserverCycle.activeMailserver == nil || !m.Online() { if m.transport.GetActiveStorenode() == "" || !m.Online() {
return false, nil return false, nil
} }
@ -72,9 +69,9 @@ func (m *Messenger) scheduleSyncChat(chat *Chat) (bool, error) {
go func() { go func() {
defer gocommon.LogOnPanic() defer gocommon.LogOnPanic()
ms := m.getActiveMailserver(chat.CommunityID) peerID := m.getCommunityMailserver(chat.CommunityID)
_, err = m.performMailserverRequest(ms, func(mailServer mailservers.Mailserver) (*MessengerResponse, error) { _, err = m.performStorenodeTask(func() (*MessengerResponse, error) {
response, err := m.syncChatWithFilters(mailServer, chat.ID) response, err := m.syncChatWithFilters(peerID, chat.ID)
if err != nil { if err != nil {
m.logger.Error("failed to sync chat", zap.Error(err)) m.logger.Error("failed to sync chat", zap.Error(err))
@ -85,7 +82,7 @@ func (m *Messenger) scheduleSyncChat(chat *Chat) (bool, error) {
m.config.messengerSignalsHandler.MessengerResponse(response) m.config.messengerSignalsHandler.MessengerResponse(response)
} }
return response, nil return response, nil
}) }, history.WithPeerID(peerID))
if err != nil { if err != nil {
m.logger.Error("failed to perform mailserver request", zap.Error(err)) m.logger.Error("failed to perform mailserver request", zap.Error(err))
} }
@ -93,65 +90,41 @@ func (m *Messenger) scheduleSyncChat(chat *Chat) (bool, error) {
return true, nil return true, nil
} }
func (m *Messenger) connectToNewMailserverAndWait() error { func (m *Messenger) performStorenodeTask(task func() (*MessengerResponse, error), opts ...history.StorenodeTaskOption) (*MessengerResponse, error) {
// Handle pinned mailservers responseCh := make(chan *MessengerResponse)
m.logger.Info("disconnecting mailserver") errCh := make(chan error)
pinnedMailserver, err := m.getPinnedMailserver()
if err != nil {
m.logger.Error("could not obtain the pinned mailserver", zap.Error(err))
return err
}
// If pinned mailserver is not nil, no need to disconnect and wait for it to be available
if pinnedMailserver == nil {
m.disconnectActiveMailserver(graylistBackoff)
}
return m.findNewMailserver() go func() {
} err := m.transport.PerformStorenodeTask(func() error {
r, err := task()
if err != nil {
return err
}
func (m *Messenger) performMailserverRequest(ms *mailservers.Mailserver, fn func(mailServer mailservers.Mailserver) (*MessengerResponse, error)) (*MessengerResponse, error) { select {
if ms == nil { case responseCh <- r:
return nil, errors.New("mailserver not available") default:
} //
}
m.mailserverCycle.RLock() return nil
defer m.mailserverCycle.RUnlock() }, opts...)
var tries uint = 0 if err != nil {
for tries < mailserverMaxTries { errCh <- err
if !m.communityStorenodes.IsCommunityStoreNode(ms.ID) && !m.isMailserverAvailable(ms.ID) {
return nil, errors.New("storenode not available")
} }
m.logger.Info("trying performing mailserver requests", zap.Uint("try", tries), zap.String("mailserverID", ms.ID)) }()
// Peform request select {
response, err := fn(*ms) // pass by value because we don't want the fn to modify the mailserver case err := <-errCh:
if err == nil { return nil, err
// Reset failed requests case r := <-responseCh:
m.logger.Debug("mailserver request performed successfully", if r != nil {
zap.String("mailserverID", ms.ID)) return r, nil
ms.FailedRequests = 0
return response, nil
} }
return nil, errors.New("no response available")
m.logger.Error("failed to perform mailserver request", case <-m.ctx.Done():
zap.String("mailserverID", ms.ID), return nil, m.ctx.Err()
zap.Uint("tries", tries),
zap.Error(err),
)
tries++
// Increment failed requests
ms.FailedRequests++
// Change mailserver
if ms.FailedRequests >= mailserverMaxFailedRequests {
return nil, errors.New("too many failed requests")
}
// Wait a couple of second not to spam
time.Sleep(2 * time.Second)
} }
return nil, errors.New("failed to perform mailserver request")
} }
func (m *Messenger) scheduleSyncFilters(filters []*transport.Filter) (bool, error) { func (m *Messenger) scheduleSyncFilters(filters []*transport.Filter) (bool, error) {
@ -170,9 +143,9 @@ func (m *Messenger) scheduleSyncFilters(filters []*transport.Filter) (bool, erro
// split filters by community store node so we can request the filters to the correct mailserver // split filters by community store node so we can request the filters to the correct mailserver
filtersByMs := m.SplitFiltersByStoreNode(filters) filtersByMs := m.SplitFiltersByStoreNode(filters)
for communityID, filtersForMs := range filtersByMs { for communityID, filtersForMs := range filtersByMs {
ms := m.getActiveMailserver(communityID) peerID := m.getCommunityMailserver(communityID)
_, err := m.performMailserverRequest(ms, func(ms mailservers.Mailserver) (*MessengerResponse, error) { _, err := m.performStorenodeTask(func() (*MessengerResponse, error) {
response, err := m.syncFilters(ms, filtersForMs) response, err := m.syncFilters(peerID, filtersForMs)
if err != nil { if err != nil {
m.logger.Error("failed to sync filter", zap.Error(err)) m.logger.Error("failed to sync filter", zap.Error(err))
@ -183,7 +156,7 @@ func (m *Messenger) scheduleSyncFilters(filters []*transport.Filter) (bool, erro
m.config.messengerSignalsHandler.MessengerResponse(response) m.config.messengerSignalsHandler.MessengerResponse(response)
} }
return response, nil return response, nil
}) }, history.WithPeerID(peerID))
if err != nil { if err != nil {
m.logger.Error("failed to perform mailserver request", zap.Error(err)) m.logger.Error("failed to perform mailserver request", zap.Error(err))
} }
@ -252,13 +225,13 @@ func (m *Messenger) topicsForChat(chatID string) (string, []types.TopicType, err
return filters[0].PubsubTopic, contentTopics, nil return filters[0].PubsubTopic, contentTopics, nil
} }
func (m *Messenger) syncChatWithFilters(ms mailservers.Mailserver, chatID string) (*MessengerResponse, error) { func (m *Messenger) syncChatWithFilters(peerID peer.ID, chatID string) (*MessengerResponse, error) {
filters, err := m.filtersForChat(chatID) filters, err := m.filtersForChat(chatID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return m.syncFilters(ms, filters) return m.syncFilters(peerID, filters)
} }
func (m *Messenger) syncBackup() error { func (m *Messenger) syncBackup() error {
@ -277,9 +250,9 @@ func (m *Messenger) syncBackup() error {
from, to := m.calculateMailserverTimeBounds(oneMonthDuration) from, to := m.calculateMailserverTimeBounds(oneMonthDuration)
batch := MailserverBatch{From: from, To: to, Topics: []types.TopicType{filter.ContentTopic}} batch := types.MailserverBatch{From: from, To: to, Topics: []types.TopicType{filter.ContentTopic}}
ms := m.getActiveMailserver(filter.ChatID) ms := m.getCommunityMailserver(filter.ChatID)
err = m.processMailserverBatch(*ms, batch) err = m.processMailserverBatch(ms, batch)
if err != nil { if err != nil {
return err return err
} }
@ -374,11 +347,11 @@ func (m *Messenger) RequestAllHistoricMessages(forceFetchingBackup, withRetries
filtersByMs := m.SplitFiltersByStoreNode(filters) filtersByMs := m.SplitFiltersByStoreNode(filters)
allResponses := &MessengerResponse{} allResponses := &MessengerResponse{}
for communityID, filtersForMs := range filtersByMs { for communityID, filtersForMs := range filtersByMs {
ms := m.getActiveMailserver(communityID) peerID := m.getCommunityMailserver(communityID)
if withRetries { if withRetries {
response, err := m.performMailserverRequest(ms, func(ms mailservers.Mailserver) (*MessengerResponse, error) { response, err := m.performStorenodeTask(func() (*MessengerResponse, error) {
return m.syncFilters(ms, filtersForMs) return m.syncFilters(peerID, filtersForMs)
}) }, history.WithPeerID(peerID))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -388,7 +361,7 @@ func (m *Messenger) RequestAllHistoricMessages(forceFetchingBackup, withRetries
} }
continue continue
} }
response, err := m.syncFilters(*ms, filtersForMs) response, err := m.syncFilters(peerID, filtersForMs)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -404,10 +377,15 @@ const missingMessageCheckPeriod = 30 * time.Second
func (m *Messenger) checkForMissingMessagesLoop() { func (m *Messenger) checkForMissingMessagesLoop() {
defer gocommon.LogOnPanic() defer gocommon.LogOnPanic()
if m.transport.WakuVersion() != 2 {
return
}
t := time.NewTicker(missingMessageCheckPeriod) t := time.NewTicker(missingMessageCheckPeriod)
defer t.Stop() defer t.Stop()
mailserverAvailableSignal := m.mailserverCycle.availabilitySubscriptions.Subscribe() mailserverAvailableSignal := m.transport.OnStorenodeAvailable()
for { for {
select { select {
@ -416,7 +394,6 @@ func (m *Messenger) checkForMissingMessagesLoop() {
// Wait for mailserver available, also triggered on mailserver change // Wait for mailserver available, also triggered on mailserver change
case <-mailserverAvailableSignal: case <-mailserverAvailableSignal:
mailserverAvailableSignal = m.mailserverCycle.availabilitySubscriptions.Subscribe()
case <-t.C: case <-t.C:
@ -425,16 +402,11 @@ func (m *Messenger) checkForMissingMessagesLoop() {
filters := m.transport.Filters() filters := m.transport.Filters()
filtersByMs := m.SplitFiltersByStoreNode(filters) filtersByMs := m.SplitFiltersByStoreNode(filters)
for communityID, filtersForMs := range filtersByMs { for communityID, filtersForMs := range filtersByMs {
ms := m.getActiveMailserver(communityID) peerID := m.getCommunityMailserver(communityID)
if ms == nil { if peerID == "" {
continue continue
} }
peerID, err := ms.PeerID()
if err != nil {
m.logger.Error("could not obtain the peerID")
return
}
m.transport.SetCriteriaForMissingMessageVerification(peerID, filtersForMs) m.transport.SetCriteriaForMissingMessageVerification(peerID, filtersForMs)
} }
} }
@ -444,7 +416,7 @@ func getPrioritizedBatches() []int {
return []int{1, 5, 10} return []int{1, 5, 10}
} }
func (m *Messenger) syncFiltersFrom(ms mailservers.Mailserver, filters []*transport.Filter, lastRequest uint32) (*MessengerResponse, error) { func (m *Messenger) syncFiltersFrom(peerID peer.ID, filters []*transport.Filter, lastRequest uint32) (*MessengerResponse, error) {
canSync, err := m.canSyncWithStoreNodes() canSync, err := m.canSyncWithStoreNodes()
if err != nil { if err != nil {
return nil, err return nil, err
@ -464,7 +436,7 @@ func (m *Messenger) syncFiltersFrom(ms mailservers.Mailserver, filters []*transp
topicsData[fmt.Sprintf("%s-%s", topic.PubsubTopic, topic.ContentTopic)] = topic topicsData[fmt.Sprintf("%s-%s", topic.PubsubTopic, topic.ContentTopic)] = topic
} }
batches := make(map[string]map[int]MailserverBatch) batches := make(map[string]map[int]types.MailserverBatch)
to := m.calculateMailserverTo() to := m.calculateMailserverTo()
var syncedTopics []mailservers.MailserverTopic var syncedTopics []mailservers.MailserverTopic
@ -502,7 +474,7 @@ func (m *Messenger) syncFiltersFrom(ms mailservers.Mailserver, filters []*transp
for pubsubTopic, contentTopics := range contentTopicsPerPubsubTopic { for pubsubTopic, contentTopics := range contentTopicsPerPubsubTopic {
if _, ok := batches[pubsubTopic]; !ok { if _, ok := batches[pubsubTopic]; !ok {
batches[pubsubTopic] = make(map[int]MailserverBatch) batches[pubsubTopic] = make(map[int]types.MailserverBatch)
} }
for _, filter := range contentTopics { for _, filter := range contentTopics {
@ -561,7 +533,7 @@ func (m *Messenger) syncFiltersFrom(ms mailservers.Mailserver, filters []*transp
return nil, err return nil, err
} }
} }
batch = MailserverBatch{From: from, To: to} batch = types.MailserverBatch{From: from, To: to}
} }
batch.ChatIDs = append(batch.ChatIDs, chatID) batch.ChatIDs = append(batch.ChatIDs, chatID)
@ -579,7 +551,7 @@ func (m *Messenger) syncFiltersFrom(ms mailservers.Mailserver, filters []*transp
m.config.messengerSignalsHandler.HistoryRequestStarted(len(batches)) m.config.messengerSignalsHandler.HistoryRequestStarted(len(batches))
} }
var batches24h []MailserverBatch var batches24h []types.MailserverBatch
for pubsubTopic := range batches { for pubsubTopic := range batches {
batchKeys := make([]int, 0, len(batches[pubsubTopic])) batchKeys := make([]int, 0, len(batches[pubsubTopic]))
for k := range batches[pubsubTopic] { for k := range batches[pubsubTopic] {
@ -594,7 +566,7 @@ func (m *Messenger) syncFiltersFrom(ms mailservers.Mailserver, filters []*transp
for _, k := range keysToIterate { for _, k := range keysToIterate {
batch := batches[pubsubTopic][k] batch := batches[pubsubTopic][k]
dayBatch := MailserverBatch{ dayBatch := types.MailserverBatch{
To: batch.To, To: batch.To,
Cursor: batch.Cursor, Cursor: batch.Cursor,
PubsubTopic: batch.PubsubTopic, PubsubTopic: batch.PubsubTopic,
@ -624,7 +596,7 @@ func (m *Messenger) syncFiltersFrom(ms mailservers.Mailserver, filters []*transp
} }
for _, batch := range batches24h { for _, batch := range batches24h {
err := m.processMailserverBatch(ms, batch) err := m.processMailserverBatch(peerID, batch)
if err != nil { if err != nil {
m.logger.Error("error syncing topics", zap.Error(err)) m.logger.Error("error syncing topics", zap.Error(err))
return nil, err return nil, err
@ -682,8 +654,8 @@ func (m *Messenger) syncFiltersFrom(ms mailservers.Mailserver, filters []*transp
return response, nil return response, nil
} }
func (m *Messenger) syncFilters(ms mailservers.Mailserver, filters []*transport.Filter) (*MessengerResponse, error) { func (m *Messenger) syncFilters(peerID peer.ID, filters []*transport.Filter) (*MessengerResponse, error) {
return m.syncFiltersFrom(ms, filters, 0) return m.syncFiltersFrom(peerID, filters, 0)
} }
func (m *Messenger) calculateGapForChat(chat *Chat, from uint32) (*common.Message, error) { func (m *Messenger) calculateGapForChat(chat *Chat, from uint32) (*common.Message, error) {
@ -722,187 +694,6 @@ func (m *Messenger) calculateGapForChat(chat *Chat, from uint32) (*common.Messag
return message, m.persistence.SaveMessages([]*common.Message{message}) return message, m.persistence.SaveMessages([]*common.Message{message})
} }
type work struct {
pubsubTopic string
contentTopics []types.TopicType
cursor types.StoreRequestCursor
limit uint32
}
type messageRequester interface {
SendMessagesRequestForTopics(
ctx context.Context,
peerID peer.ID,
from, to uint32,
previousStoreCursor types.StoreRequestCursor,
pubsubTopic string,
contentTopics []types.TopicType,
limit uint32,
waitForResponse bool,
processEnvelopes bool,
) (cursor types.StoreRequestCursor, envelopesCount int, err error)
}
func processMailserverBatch(
ctx context.Context,
messageRequester messageRequester,
batch MailserverBatch,
storenodeID peer.ID,
logger *zap.Logger,
pageLimit uint32,
shouldProcessNextPage func(int) (bool, uint32),
processEnvelopes bool,
) error {
var topicStrings []string
for _, t := range batch.Topics {
topicStrings = append(topicStrings, t.String())
}
logger = logger.With(zap.String("batch hash", batch.Hash()))
logger.Info("syncing topic",
zap.Any("chatIDs", batch.ChatIDs),
zap.String("fromString", time.Unix(int64(batch.From), 0).Format(time.RFC3339)),
zap.String("toString", time.Unix(int64(batch.To), 0).Format(time.RFC3339)),
zap.Any("topic", topicStrings),
zap.Int64("from", int64(batch.From)),
zap.Int64("to", int64(batch.To)))
wg := sync.WaitGroup{}
workWg := sync.WaitGroup{}
workCh := make(chan work, 1000) // each batch item is split in 10 topics bunch and sent to this channel
workCompleteCh := make(chan struct{}) // once all batch items are processed, this channel is triggered
semaphore := make(chan int, 3) // limit the number of concurrent queries
errCh := make(chan error)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// Producer
wg.Add(1)
go func() {
defer gocommon.LogOnPanic()
defer func() {
logger.Debug("mailserver batch producer complete")
wg.Done()
}()
allWorks := int(math.Ceil(float64(len(batch.Topics)) / float64(maxTopicsPerRequest)))
workWg.Add(allWorks)
for i := 0; i < len(batch.Topics); i += maxTopicsPerRequest {
j := i + maxTopicsPerRequest
if j > len(batch.Topics) {
j = len(batch.Topics)
}
select {
case <-ctx.Done():
logger.Debug("processBatch producer - context done")
return
default:
logger.Debug("processBatch producer - creating work")
workCh <- work{
pubsubTopic: batch.PubsubTopic,
contentTopics: batch.Topics[i:j],
limit: pageLimit,
}
time.Sleep(50 * time.Millisecond)
}
}
go func() {
defer gocommon.LogOnPanic()
workWg.Wait()
workCompleteCh <- struct{}{}
}()
logger.Debug("processBatch producer complete")
}()
var result error
loop:
for {
select {
case <-ctx.Done():
logger.Debug("processBatch cleanup - context done")
result = ctx.Err()
if errors.Is(result, context.Canceled) {
result = nil
}
break loop
case w, ok := <-workCh:
if !ok {
continue
}
logger.Debug("processBatch - received work")
semaphore <- 1
go func(w work) { // Consumer
defer gocommon.LogOnPanic()
defer func() {
workWg.Done()
<-semaphore
}()
queryCtx, queryCancel := context.WithTimeout(ctx, mailserverRequestTimeout)
cursor, envelopesCount, err := messageRequester.SendMessagesRequestForTopics(queryCtx, storenodeID, batch.From, batch.To, w.cursor, w.pubsubTopic, w.contentTopics, w.limit, true, processEnvelopes)
queryCancel()
if err != nil {
logger.Debug("failed to send request", zap.Error(err))
errCh <- err
return
}
processNextPage := true
nextPageLimit := pageLimit
if shouldProcessNextPage != nil {
processNextPage, nextPageLimit = shouldProcessNextPage(envelopesCount)
}
if !processNextPage {
return
}
// Check the cursor after calling `shouldProcessNextPage`.
// The app might use process the fetched envelopes in the callback for own needs.
if cursor == nil {
return
}
logger.Debug("processBatch producer - creating work (cursor)")
workWg.Add(1)
workCh <- work{
pubsubTopic: w.pubsubTopic,
contentTopics: w.contentTopics,
cursor: cursor,
limit: nextPageLimit,
}
}(w)
case err := <-errCh:
logger.Debug("processBatch - received error", zap.Error(err))
cancel() // Kill go routines
return err
case <-workCompleteCh:
logger.Debug("processBatch - all jobs complete")
cancel() // Kill go routines
}
}
wg.Wait()
// NOTE(camellos): Disabling for now, not critical and I'd rather take a bit more time
// to test it
//logger.Info("waiting until message processed")
//m.waitUntilP2PMessagesProcessed()
logger.Info("synced topic", zap.NamedError("hasError", result))
return result
}
func (m *Messenger) canSyncWithStoreNodes() (bool, error) { func (m *Messenger) canSyncWithStoreNodes() (bool, error) {
if m.featureFlags.StoreNodesDisabled { if m.featureFlags.StoreNodesDisabled {
return false, nil return false, nil
@ -918,7 +709,7 @@ func (m *Messenger) DisableStoreNodes() {
m.featureFlags.StoreNodesDisabled = true m.featureFlags.StoreNodesDisabled = true
} }
func (m *Messenger) processMailserverBatch(ms mailservers.Mailserver, batch MailserverBatch) error { func (m *Messenger) processMailserverBatch(peerID peer.ID, batch types.MailserverBatch) error {
canSync, err := m.canSyncWithStoreNodes() canSync, err := m.canSyncWithStoreNodes()
if err != nil { if err != nil {
return err return err
@ -927,15 +718,10 @@ func (m *Messenger) processMailserverBatch(ms mailservers.Mailserver, batch Mail
return nil return nil
} }
mailserverID, err := ms.PeerID() return m.transport.ProcessMailserverBatch(m.ctx, batch, peerID, defaultStoreNodeRequestPageSize, nil, false)
if err != nil {
return err
}
logger := m.logger.With(zap.String("mailserverID", ms.ID))
return processMailserverBatch(m.ctx, m.transport, batch, mailserverID, logger, defaultStoreNodeRequestPageSize, nil, false)
} }
func (m *Messenger) processMailserverBatchWithOptions(ms mailservers.Mailserver, batch MailserverBatch, pageLimit uint32, shouldProcessNextPage func(int) (bool, uint32), processEnvelopes bool) error { func (m *Messenger) processMailserverBatchWithOptions(peerID peer.ID, batch types.MailserverBatch, pageLimit uint64, shouldProcessNextPage func(int) (bool, uint64), processEnvelopes bool) error {
canSync, err := m.canSyncWithStoreNodes() canSync, err := m.canSyncWithStoreNodes()
if err != nil { if err != nil {
return err return err
@ -944,27 +730,7 @@ func (m *Messenger) processMailserverBatchWithOptions(ms mailservers.Mailserver,
return nil return nil
} }
mailserverID, err := ms.PeerID() return m.transport.ProcessMailserverBatch(m.ctx, batch, peerID, pageLimit, shouldProcessNextPage, processEnvelopes)
if err != nil {
return err
}
logger := m.logger.With(zap.String("mailserverID", ms.ID))
return processMailserverBatch(m.ctx, m.transport, batch, mailserverID, logger, pageLimit, shouldProcessNextPage, processEnvelopes)
}
type MailserverBatch struct {
From uint32
To uint32
Cursor string
PubsubTopic string
Topics []types.TopicType
ChatIDs []string
}
func (mb *MailserverBatch) Hash() string {
data := fmt.Sprintf("%d%d%s%s%v%v", mb.From, mb.To, mb.Cursor, mb.PubsubTopic, mb.Topics, mb.ChatIDs)
hash := sha256.Sum256([]byte(data))
return hex.EncodeToString(hash[:4])
} }
func (m *Messenger) SyncChatFromSyncedFrom(chatID string) (uint32, error) { func (m *Messenger) SyncChatFromSyncedFrom(chatID string) (uint32, error) {
@ -973,9 +739,9 @@ func (m *Messenger) SyncChatFromSyncedFrom(chatID string) (uint32, error) {
return 0, ErrChatNotFound return 0, ErrChatNotFound
} }
ms := m.getActiveMailserver(chat.CommunityID) peerID := m.getCommunityMailserver(chat.CommunityID)
var from uint32 var from uint32
_, err := m.performMailserverRequest(ms, func(ms mailservers.Mailserver) (*MessengerResponse, error) { _, err := m.performStorenodeTask(func() (*MessengerResponse, error) {
canSync, err := m.canSyncWithStoreNodes() canSync, err := m.canSyncWithStoreNodes()
if err != nil { if err != nil {
return nil, err return nil, err
@ -994,7 +760,7 @@ func (m *Messenger) SyncChatFromSyncedFrom(chatID string) (uint32, error) {
return nil, err return nil, err
} }
batch := MailserverBatch{ batch := types.MailserverBatch{
ChatIDs: []string{chatID}, ChatIDs: []string{chatID},
To: chat.SyncedFrom, To: chat.SyncedFrom,
From: chat.SyncedFrom - defaultSyncPeriod, From: chat.SyncedFrom - defaultSyncPeriod,
@ -1005,7 +771,7 @@ func (m *Messenger) SyncChatFromSyncedFrom(chatID string) (uint32, error) {
m.config.messengerSignalsHandler.HistoryRequestStarted(1) m.config.messengerSignalsHandler.HistoryRequestStarted(1)
} }
err = m.processMailserverBatch(ms, batch) err = m.processMailserverBatch(peerID, batch)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1022,7 +788,7 @@ func (m *Messenger) SyncChatFromSyncedFrom(chatID string) (uint32, error) {
err = m.persistence.SetSyncTimestamps(batch.From, chat.SyncedTo, chat.ID) err = m.persistence.SetSyncTimestamps(batch.From, chat.SyncedTo, chat.ID)
from = batch.From from = batch.From
return nil, err return nil, err
}) }, history.WithPeerID(peerID))
if err != nil { if err != nil {
return 0, err return 0, err
} }
@ -1062,7 +828,7 @@ func (m *Messenger) FillGaps(chatID string, messageIDs []string) error {
} }
} }
batch := MailserverBatch{ batch := types.MailserverBatch{
ChatIDs: []string{chatID}, ChatIDs: []string{chatID},
To: highestTo, To: highestTo,
From: lowestFrom, From: lowestFrom,
@ -1074,8 +840,8 @@ func (m *Messenger) FillGaps(chatID string, messageIDs []string) error {
m.config.messengerSignalsHandler.HistoryRequestStarted(1) m.config.messengerSignalsHandler.HistoryRequestStarted(1)
} }
ms := m.getActiveMailserver(chat.CommunityID) peerID := m.getCommunityMailserver(chat.CommunityID)
err = m.processMailserverBatch(*ms, batch) err = m.processMailserverBatch(peerID, batch)
if err != nil { if err != nil {
return err return err
} }
@ -1087,39 +853,18 @@ func (m *Messenger) FillGaps(chatID string, messageIDs []string) error {
return m.persistence.DeleteMessages(messageIDs) return m.persistence.DeleteMessages(messageIDs)
} }
func (m *Messenger) waitUntilP2PMessagesProcessed() { // nolint: unused
ticker := time.NewTicker(50 * time.Millisecond)
for { //nolint: gosimple
select {
case <-ticker.C:
if !m.transport.ProcessingP2PMessages() {
ticker.Stop()
return
}
}
}
}
func (m *Messenger) LoadFilters(filters []*transport.Filter) ([]*transport.Filter, error) { func (m *Messenger) LoadFilters(filters []*transport.Filter) ([]*transport.Filter, error) {
return m.transport.LoadFilters(filters) return m.transport.LoadFilters(filters)
} }
func (m *Messenger) ToggleUseMailservers(value bool) error { func (m *Messenger) ToggleUseMailservers(value bool) error {
m.mailserverCycle.Lock()
defer m.mailserverCycle.Unlock()
err := m.settings.SetUseMailservers(value) err := m.settings.SetUseMailservers(value)
if err != nil { if err != nil {
return err return err
} }
m.disconnectActiveMailserver(backoffByUserAction) m.transport.DisconnectActiveStorenode(m.ctx, backoffByUserAction, value)
if value {
m.cycleMailservers()
return nil
}
return nil return nil
} }
@ -1129,8 +874,8 @@ func (m *Messenger) SetPinnedMailservers(mailservers map[string]string) error {
return err return err
} }
m.disconnectActiveMailserver(backoffByUserAction) m.transport.DisconnectActiveStorenode(m.ctx, backoffByUserAction, true)
m.cycleMailservers()
return nil return nil
} }
@ -1162,8 +907,8 @@ func (m *Messenger) fetchMessages(chatID string, duration time.Duration) (uint32
return 0, ErrChatNotFound return 0, ErrChatNotFound
} }
ms := m.getActiveMailserver(chat.CommunityID) peerID := m.getCommunityMailserver(chat.CommunityID)
_, err := m.performMailserverRequest(ms, func(ms mailservers.Mailserver) (*MessengerResponse, error) { _, err := m.performStorenodeTask(func() (*MessengerResponse, error) {
canSync, err := m.canSyncWithStoreNodes() canSync, err := m.canSyncWithStoreNodes()
if err != nil { if err != nil {
return nil, err return nil, err
@ -1172,13 +917,13 @@ func (m *Messenger) fetchMessages(chatID string, duration time.Duration) (uint32
return nil, nil return nil, nil
} }
m.logger.Debug("fetching messages", zap.String("chatID", chatID), zap.String("mailserver", ms.Name)) m.logger.Debug("fetching messages", zap.String("chatID", chatID), zap.Stringer("storenodeID", peerID))
pubsubTopic, topics, err := m.topicsForChat(chatID) pubsubTopic, topics, err := m.topicsForChat(chatID)
if err != nil { if err != nil {
return nil, nil return nil, nil
} }
batch := MailserverBatch{ batch := types.MailserverBatch{
ChatIDs: []string{chatID}, ChatIDs: []string{chatID},
From: from, From: from,
To: to, To: to,
@ -1189,7 +934,7 @@ func (m *Messenger) fetchMessages(chatID string, duration time.Duration) (uint32
m.config.messengerSignalsHandler.HistoryRequestStarted(1) m.config.messengerSignalsHandler.HistoryRequestStarted(1)
} }
err = m.processMailserverBatch(ms, batch) err = m.processMailserverBatch(peerID, batch)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1206,7 +951,7 @@ func (m *Messenger) fetchMessages(chatID string, duration time.Duration) (uint32
err = m.persistence.SetSyncTimestamps(batch.From, chat.SyncedTo, chat.ID) err = m.persistence.SetSyncTimestamps(batch.From, chat.SyncedTo, chat.ID)
from = batch.From from = batch.From
return nil, err return nil, err
}) }, history.WithPeerID(peerID))
if err != nil { if err != nil {
return 0, err return 0, err
} }

View File

@ -1,178 +1,18 @@
package protocol package protocol
import ( import (
"context" "github.com/libp2p/go-libp2p/core/peer"
"crypto/rand"
"math"
"math/big"
"net"
"runtime"
"sort"
"sync"
"time"
"github.com/pkg/errors"
"go.uber.org/zap" "go.uber.org/zap"
"github.com/waku-org/go-waku/waku/v2/utils" "github.com/waku-org/go-waku/waku/v2/utils"
"github.com/status-im/status-go/common"
gocommon "github.com/status-im/status-go/common" gocommon "github.com/status-im/status-go/common"
"github.com/status-im/status-go/params" "github.com/status-im/status-go/params"
"github.com/status-im/status-go/protocol/storenodes"
"github.com/status-im/status-go/services/mailservers" "github.com/status-im/status-go/services/mailservers"
"github.com/status-im/status-go/signal" "github.com/status-im/status-go/signal"
) )
const defaultBackoff = 10 * time.Second func (m *Messenger) AllMailservers() ([]mailservers.Mailserver, error) {
const graylistBackoff = 3 * time.Minute
const backoffByUserAction = 0
const isAndroidEmulator = runtime.GOOS == "android" && runtime.GOARCH == "amd64"
const findNearestMailServer = !isAndroidEmulator
const overrideDNS = runtime.GOOS == "android" || runtime.GOOS == "ios"
const bootstrapDNS = "8.8.8.8:53"
type byRTTMsAndCanConnectBefore []SortedMailserver
func (s byRTTMsAndCanConnectBefore) Len() int {
return len(s)
}
func (s byRTTMsAndCanConnectBefore) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s byRTTMsAndCanConnectBefore) Less(i, j int) bool {
// Slightly inaccurate as time sensitive sorting, but it does not matter so much
now := time.Now()
if s[i].CanConnectAfter.Before(now) && s[j].CanConnectAfter.Before(now) {
return s[i].RTT < s[j].RTT
}
return s[i].CanConnectAfter.Before(s[j].CanConnectAfter)
}
func (m *Messenger) StartMailserverCycle(mailservers []mailservers.Mailserver) error {
if m.transport.WakuVersion() != 2 {
m.logger.Warn("not starting mailserver cycle: requires wakuv2")
return nil
}
m.mailserverCycle.allMailservers = mailservers
if len(mailservers) == 0 {
m.logger.Warn("not starting mailserver cycle: empty mailservers list")
return nil
}
for _, storenode := range mailservers {
peerInfo, err := storenode.PeerInfo()
if err != nil {
return err
}
for _, addr := range utils.EncapsulatePeerID(peerInfo.ID, peerInfo.Addrs...) {
_, err := m.transport.AddStorePeer(addr)
if err != nil {
return err
}
}
}
go m.verifyStorenodeStatus()
m.logger.Debug("starting mailserver cycle",
zap.Uint("WakuVersion", m.transport.WakuVersion()),
zap.Any("mailservers", mailservers),
)
return nil
}
func (m *Messenger) DisconnectActiveMailserver() {
m.mailserverCycle.Lock()
defer m.mailserverCycle.Unlock()
m.disconnectActiveMailserver(graylistBackoff)
}
func (m *Messenger) disconnectMailserver(backoffDuration time.Duration) error {
if m.mailserverCycle.activeMailserver == nil {
m.logger.Info("no active mailserver")
return nil
}
m.logger.Info("disconnecting active mailserver", zap.String("nodeID", m.mailserverCycle.activeMailserver.ID))
m.mailPeersMutex.Lock()
pInfo, ok := m.mailserverCycle.peers[m.mailserverCycle.activeMailserver.ID]
if ok {
pInfo.status = disconnected
pInfo.canConnectAfter = time.Now().Add(backoffDuration)
m.mailserverCycle.peers[m.mailserverCycle.activeMailserver.ID] = pInfo
} else {
m.mailserverCycle.peers[m.mailserverCycle.activeMailserver.ID] = peerStatus{
status: disconnected,
mailserver: *m.mailserverCycle.activeMailserver,
canConnectAfter: time.Now().Add(backoffDuration),
}
}
m.mailPeersMutex.Unlock()
m.mailserverCycle.activeMailserver = nil
return nil
}
func (m *Messenger) disconnectActiveMailserver(backoffDuration time.Duration) {
err := m.disconnectMailserver(backoffDuration)
if err != nil {
m.logger.Error("failed to disconnect mailserver", zap.Error(err))
}
signal.SendMailserverChanged(nil)
}
func (m *Messenger) cycleMailservers() {
m.logger.Info("Automatically switching mailserver")
if m.mailserverCycle.activeMailserver != nil {
m.disconnectActiveMailserver(graylistBackoff)
}
useMailserver, err := m.settings.CanUseMailservers()
if err != nil {
m.logger.Error("failed to get use mailservers", zap.Error(err))
return
}
if !useMailserver {
m.logger.Info("Skipping mailserver search due to useMailserver being false")
return
}
err = m.findNewMailserver()
if err != nil {
m.logger.Error("Error getting new mailserver", zap.Error(err))
}
}
func poolSize(fleetSize int) int {
return int(math.Ceil(float64(fleetSize) / 4))
}
func (m *Messenger) getFleet() (string, error) {
var fleet string
dbFleet, err := m.settings.GetFleet()
if err != nil {
return "", err
}
if dbFleet != "" {
fleet = dbFleet
} else if m.config.clusterConfig.Fleet != "" {
fleet = m.config.clusterConfig.Fleet
} else {
fleet = params.FleetStatusProd
}
return fleet, nil
}
func (m *Messenger) allMailservers() ([]mailservers.Mailserver, error) {
// Get configured fleet // Get configured fleet
fleet, err := m.getFleet() fleet, err := m.getFleet()
if err != nil { if err != nil {
@ -199,221 +39,46 @@ func (m *Messenger) allMailservers() ([]mailservers.Mailserver, error) {
return allMailservers, nil return allMailservers, nil
} }
type SortedMailserver struct { func (m *Messenger) setupStorenodes(storenodes []mailservers.Mailserver) error {
Mailserver mailservers.Mailserver if m.transport.WakuVersion() != 2 {
RTT time.Duration
CanConnectAfter time.Time
}
func (m *Messenger) getAvailableMailserversSortedByRTT(allMailservers []mailservers.Mailserver) []mailservers.Mailserver {
// TODO: this can be replaced by peer selector once code is moved to go-waku api
availableMailservers := make(map[string]time.Duration)
availableMailserversMutex := sync.Mutex{}
availableMailserversWg := sync.WaitGroup{}
for _, mailserver := range allMailservers {
availableMailserversWg.Add(1)
go func(mailserver mailservers.Mailserver) {
defer gocommon.LogOnPanic()
defer availableMailserversWg.Done()
peerID, err := mailserver.PeerID()
if err != nil {
return
}
ctx, cancel := context.WithTimeout(m.ctx, 4*time.Second)
defer cancel()
rtt, err := m.transport.PingPeer(ctx, peerID)
if err == nil { // pinging mailservers might fail, but we don't care
availableMailserversMutex.Lock()
availableMailservers[mailserver.ID] = rtt
availableMailserversMutex.Unlock()
}
}(mailserver)
}
availableMailserversWg.Wait()
if len(availableMailservers) == 0 {
m.logger.Warn("No mailservers available") // Do nothing...
return nil return nil
} }
mailserversByID := make(map[string]mailservers.Mailserver) for _, storenode := range storenodes {
for idx := range allMailservers {
mailserversByID[allMailservers[idx].ID] = allMailservers[idx]
}
var sortedMailservers []SortedMailserver
for mailserverID, rtt := range availableMailservers {
ms := mailserversByID[mailserverID]
sortedMailserver := SortedMailserver{
Mailserver: ms,
RTT: rtt,
}
m.mailPeersMutex.Lock()
pInfo, ok := m.mailserverCycle.peers[ms.ID]
m.mailPeersMutex.Unlock()
if ok {
if time.Now().Before(pInfo.canConnectAfter) {
continue // We can't connect to this node yet
}
}
sortedMailservers = append(sortedMailservers, sortedMailserver)
}
sort.Sort(byRTTMsAndCanConnectBefore(sortedMailservers))
result := make([]mailservers.Mailserver, len(sortedMailservers)) peerInfo, err := storenode.PeerInfo()
for i, s := range sortedMailservers {
result[i] = s.Mailserver
}
return result
}
func (m *Messenger) findNewMailserver() error {
// we have to override DNS manually because of https://github.com/status-im/status-mobile/issues/19581
if overrideDNS {
var dialer net.Dialer
net.DefaultResolver = &net.Resolver{
PreferGo: false,
Dial: func(context context.Context, _, _ string) (net.Conn, error) {
conn, err := dialer.DialContext(context, "udp", bootstrapDNS)
if err != nil {
return nil, err
}
return conn, nil
},
}
}
pinnedMailserver, err := m.getPinnedMailserver()
if err != nil {
m.logger.Error("Could not obtain the pinned mailserver", zap.Error(err))
return err
}
if pinnedMailserver != nil {
return m.connectToMailserver(*pinnedMailserver)
}
m.logger.Info("Finding a new mailserver...")
allMailservers := m.mailserverCycle.allMailservers
// TODO: remove this check once sockets are stable on x86_64 emulators
if findNearestMailServer {
allMailservers = m.getAvailableMailserversSortedByRTT(allMailservers)
}
// Picks a random mailserver amongs the ones with the lowest latency
// The pool size is 1/4 of the mailservers were pinged successfully
pSize := poolSize(len(allMailservers) - 1)
if pSize <= 0 {
pSize = len(allMailservers)
if pSize <= 0 {
m.logger.Warn("No storenodes available") // Do nothing...
return nil
}
}
r, err := rand.Int(rand.Reader, big.NewInt(int64(pSize)))
if err != nil {
return err
}
ms := allMailservers[r.Int64()]
return m.connectToMailserver(ms)
}
func (m *Messenger) mailserverStatus(mailserverID string) connStatus {
m.mailPeersMutex.RLock()
defer m.mailPeersMutex.RUnlock()
peer, ok := m.mailserverCycle.peers[mailserverID]
if !ok {
return disconnected
}
return peer.status
}
func (m *Messenger) connectToMailserver(ms mailservers.Mailserver) error {
m.logger.Info("connecting to mailserver", zap.String("mailserverID", ms.ID))
m.mailserverCycle.activeMailserver = &ms
signal.SendMailserverChanged(m.mailserverCycle.activeMailserver)
mailserverStatus := m.mailserverStatus(ms.ID)
if mailserverStatus != connected {
m.mailPeersMutex.Lock()
m.mailserverCycle.peers[ms.ID] = peerStatus{
status: connected,
lastConnectionAttempt: time.Now(),
canConnectAfter: time.Now().Add(defaultBackoff),
mailserver: ms,
}
m.mailPeersMutex.Unlock()
m.mailserverCycle.activeMailserver.FailedRequests = 0
peerID, err := m.mailserverCycle.activeMailserver.PeerID()
if err != nil { if err != nil {
m.logger.Error("could not decode the peer id of mailserver", zap.Error(err))
return err return err
} }
m.logger.Info("mailserver available", zap.String("mailserverID", m.mailserverCycle.activeMailserver.ID)) for _, addr := range utils.EncapsulatePeerID(peerInfo.ID, peerInfo.Addrs...) {
m.mailserverCycle.availabilitySubscriptions.EmitMailserverAvailable() _, err := m.transport.AddStorePeer(addr)
signal.SendMailserverAvailable(m.mailserverCycle.activeMailserver) if err != nil {
return err
m.transport.SetStorePeerID(peerID) }
}
// Query mailserver
m.asyncRequestAllHistoricMessages()
} }
return nil return nil
} }
// getActiveMailserver returns the active mailserver if a communityID is present then it'll return the mailserver func (m *Messenger) getFleet() (string, error) {
// for that community if it has a mailserver setup otherwise it'll return the global mailserver var fleet string
func (m *Messenger) getActiveMailserver(communityID ...string) *mailservers.Mailserver { dbFleet, err := m.settings.GetFleet()
if len(communityID) == 0 || communityID[0] == "" {
return m.mailserverCycle.activeMailserver
}
ms, err := m.communityStorenodes.GetStorenodeByCommunityID(communityID[0])
if err != nil { if err != nil {
if !errors.Is(err, storenodes.ErrNotFound) { return "", err
m.logger.Error("getting storenode for community, using global", zap.String("communityID", communityID[0]), zap.Error(err))
}
// if we don't find a specific mailserver for the community, we just use the regular mailserverCycle's one
return m.mailserverCycle.activeMailserver
} }
return &ms if dbFleet != "" {
} fleet = dbFleet
} else if m.config.clusterConfig.Fleet != "" {
func (m *Messenger) getActiveMailserverID(communityID ...string) string { fleet = m.config.clusterConfig.Fleet
ms := m.getActiveMailserver(communityID...) } else {
if ms == nil { fleet = params.FleetStatusProd
return ""
} }
return ms.ID return fleet, nil
}
func (m *Messenger) isMailserverAvailable(mailserverID string) bool {
return m.mailserverStatus(mailserverID) == connected
}
func (m *Messenger) penalizeMailserver(id string) {
m.mailPeersMutex.Lock()
defer m.mailPeersMutex.Unlock()
pInfo, ok := m.mailserverCycle.peers[id]
if !ok {
pInfo.status = disconnected
}
pInfo.canConnectAfter = time.Now().Add(graylistBackoff)
m.mailserverCycle.peers[id] = pInfo
} }
func (m *Messenger) asyncRequestAllHistoricMessages() { func (m *Messenger) asyncRequestAllHistoricMessages() {
if !m.config.codeControlFlags.AutoRequestHistoricMessages { if !m.config.codeControlFlags.AutoRequestHistoricMessages || m.transport.WakuVersion() == 1 {
return return
} }
@ -428,128 +93,117 @@ func (m *Messenger) asyncRequestAllHistoricMessages() {
}() }()
} }
func (m *Messenger) verifyStorenodeStatus() { func (m *Messenger) GetPinnedStorenode() (peer.ID, error) {
defer common.LogOnPanic()
ticker := time.NewTicker(1 * time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
err := m.disconnectStorenodeIfRequired()
if err != nil {
m.logger.Error("failed to handle mailserver cycle event", zap.Error(err))
continue
}
case <-m.quit:
return
}
}
}
func (m *Messenger) getPinnedMailserver() (*mailservers.Mailserver, error) {
fleet, err := m.getFleet() fleet, err := m.getFleet()
if err != nil { if err != nil {
return nil, err return "", err
} }
pinnedMailservers, err := m.settings.GetPinnedMailservers() pinnedMailservers, err := m.settings.GetPinnedMailservers()
if err != nil { if err != nil {
return nil, err return "", err
} }
pinnedMailserver, ok := pinnedMailservers[fleet] pinnedMailserver, ok := pinnedMailservers[fleet]
if !ok { if !ok {
return nil, nil return "", nil
} }
fleetMailservers := mailservers.DefaultMailservers() fleetMailservers := mailservers.DefaultMailservers()
for _, c := range fleetMailservers { for _, c := range fleetMailservers {
if c.Fleet == fleet && c.ID == pinnedMailserver { if c.Fleet == fleet && c.ID == pinnedMailserver {
return &c, nil return c.PeerID()
} }
} }
if m.mailserversDatabase != nil { if m.mailserversDatabase != nil {
customMailservers, err := m.mailserversDatabase.Mailservers() customMailservers, err := m.mailserversDatabase.Mailservers()
if err != nil { if err != nil {
return nil, err return "", err
} }
for _, c := range customMailservers { for _, c := range customMailservers {
if c.Fleet == fleet && c.ID == pinnedMailserver { if c.Fleet == fleet && c.ID == pinnedMailserver {
return &c, nil return c.PeerID()
} }
} }
} }
return nil, nil return "", nil
} }
func (m *Messenger) disconnectStorenodeIfRequired() error { func (m *Messenger) UseStorenodes() (bool, error) {
m.logger.Debug("wakuV2 storenode status verification") return m.settings.CanUseMailservers()
if m.mailserverCycle.activeMailserver == nil {
// No active storenode, find a new one
m.cycleMailservers()
return nil
}
// Check whether we want to disconnect the active storenode
if m.mailserverCycle.activeMailserver.FailedRequests >= mailserverMaxFailedRequests {
m.penalizeMailserver(m.mailserverCycle.activeMailserver.ID)
signal.SendMailserverNotWorking()
m.logger.Info("too many failed requests", zap.String("storenode", m.mailserverCycle.activeMailserver.ID))
m.mailserverCycle.activeMailserver.FailedRequests = 0
return m.connectToNewMailserverAndWait()
}
return nil
} }
func (m *Messenger) waitForAvailableStoreNode(timeout time.Duration) bool { func (m *Messenger) Storenodes() ([]peer.ID, error) {
// Add 1 second to timeout, because the mailserver cycle has 1 second ticker, which doesn't tick on start. mailservers, err := m.AllMailservers()
// This can be improved after merging https://github.com/status-im/status-go/pull/4380. if err != nil {
// NOTE: https://stackoverflow.com/questions/32705582/how-to-get-time-tick-to-tick-immediately return nil, err
timeout += time.Second }
finish := make(chan struct{}) var result []peer.ID
cancel := make(chan struct{}) for _, m := range mailservers {
peerID, err := m.PeerID()
if err != nil {
return nil, err
}
result = append(result, peerID)
}
wg := sync.WaitGroup{} return result, nil
wg.Add(1) }
go func() { func (m *Messenger) checkForStorenodeCycleSignals() {
defer gocommon.LogOnPanic() if m.transport.WakuVersion() != 2 {
defer func() { return
wg.Done() }
}()
for !m.isMailserverAvailable(m.getActiveMailserverID()) { changed := m.transport.OnStorenodeChanged()
select { notWorking := m.transport.OnStorenodeNotWorking()
case <-m.mailserverCycle.availabilitySubscriptions.Subscribe(): available := m.transport.OnStorenodeAvailable()
case <-cancel:
return allMailservers, err := m.AllMailservers()
if err != nil {
m.logger.Error("Could not retrieve mailserver list", zap.Error(err))
return
}
mailserverMap := make(map[peer.ID]mailservers.Mailserver)
for _, ms := range allMailservers {
peerID, err := ms.PeerID()
if err != nil {
m.logger.Error("could not retrieve peerID", zap.Error(err))
return
}
mailserverMap[peerID] = ms
}
for {
select {
case <-m.ctx.Done():
return
case <-notWorking:
signal.SendMailserverNotWorking()
case activeMailserver := <-changed:
if activeMailserver != "" {
ms, ok := mailserverMap[activeMailserver]
if ok {
signal.SendMailserverChanged(&ms)
}
} else {
signal.SendMailserverChanged(nil)
}
case activeMailserver := <-available:
if activeMailserver != "" {
ms, ok := mailserverMap[activeMailserver]
if ok {
signal.SendMailserverAvailable(&ms)
}
m.asyncRequestAllHistoricMessages()
} }
} }
}()
go func() {
defer gocommon.LogOnPanic()
defer func() {
close(finish)
}()
wg.Wait()
}()
select {
case <-finish:
case <-time.After(timeout):
close(cancel)
case <-m.ctx.Done():
close(cancel)
} }
return m.isMailserverAvailable(m.getActiveMailserverID())
} }

View File

@ -1,167 +0,0 @@
package protocol
import (
"context"
"crypto/rand"
"encoding/hex"
"errors"
"math/big"
"testing"
"time"
"github.com/google/uuid"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/stretchr/testify/require"
"github.com/status-im/status-go/eth-node/types"
"github.com/status-im/status-go/protocol/tt"
)
type queryResponse struct {
topics []types.TopicType
err error // Indicates if this response will simulate an error returned by SendMessagesRequestForTopics
cursor []byte
}
type mockTransport struct {
queryResponses map[string]queryResponse
}
func newMockTransport() *mockTransport {
return &mockTransport{
queryResponses: make(map[string]queryResponse),
}
}
func getInitialResponseKey(topics []types.TopicType) string {
return hex.EncodeToString(append([]byte("start"), topics[0][:]...))
}
func (t *mockTransport) SendMessagesRequestForTopics(
ctx context.Context,
peerID peer.ID,
from, to uint32,
prevCursor types.StoreRequestCursor,
pubsubTopic string,
contentTopics []types.TopicType,
limit uint32,
waitForResponse bool,
processEnvelopes bool,
) (cursor types.StoreRequestCursor, envelopesCount int, err error) {
var response queryResponse
if prevCursor == nil {
initialResponse := getInitialResponseKey(contentTopics)
response = t.queryResponses[initialResponse]
} else {
response = t.queryResponses[hex.EncodeToString(prevCursor)]
}
return response.cursor, 0, response.err
}
func (t *mockTransport) Populate(topics []types.TopicType, responses int, includeRandomError bool) error {
if responses <= 0 || len(topics) == 0 {
return errors.New("invalid input parameters")
}
var topicBatches [][]types.TopicType
for i := 0; i < len(topics); i += maxTopicsPerRequest {
// Split batch in 10-contentTopic subbatches
j := i + maxTopicsPerRequest
if j > len(topics) {
j = len(topics)
}
topicBatches = append(topicBatches, topics[i:j])
}
randomErrIdx, err := rand.Int(rand.Reader, big.NewInt(int64(len(topicBatches))))
if err != nil {
return err
}
randomErrIdxInt := int(randomErrIdx.Int64())
for i, topicBatch := range topicBatches {
// Setup initial response
initialResponseKey := getInitialResponseKey(topicBatch)
t.queryResponses[initialResponseKey] = queryResponse{
topics: topicBatch,
err: nil,
}
prevKey := initialResponseKey
for x := 0; x < responses-1; x++ {
newResponseCursor := []byte(uuid.New().String())
newResponseKey := hex.EncodeToString(newResponseCursor)
var err error
if includeRandomError && i == randomErrIdxInt && x == responses-2 { // Include an error in last request
err = errors.New("random error")
}
t.queryResponses[newResponseKey] = queryResponse{
topics: topicBatch,
err: err,
}
// Updating prev response cursor to point to the new response
prevResponse := t.queryResponses[prevKey]
prevResponse.cursor = newResponseCursor
t.queryResponses[prevKey] = prevResponse
prevKey = newResponseKey
}
}
return nil
}
func TestProcessMailserverBatchHappyPath(t *testing.T) {
ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second)
defer cancel()
logger := tt.MustCreateTestLogger()
mailserverID, err := peer.Decode("16Uiu2HAkw3x97MbbZSWHbdF5bob45vcZvPPK4s4Mjyv2mxyB9GS3")
require.NoError(t, err)
topics := []types.TopicType{}
for i := 0; i < 22; i++ {
topics = append(topics, types.BytesToTopic([]byte{0, 0, 0, byte(i)}))
}
testTransport := newMockTransport()
err = testTransport.Populate(topics, 10, false)
require.NoError(t, err)
testBatch := MailserverBatch{
Topics: topics,
}
err = processMailserverBatch(ctx, testTransport, testBatch, mailserverID, logger, defaultStoreNodeRequestPageSize, nil, false)
require.NoError(t, err)
}
func TestProcessMailserverBatchFailure(t *testing.T) {
ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second)
defer cancel()
logger := tt.MustCreateTestLogger()
mailserverID, err := peer.Decode("16Uiu2HAkw3x97MbbZSWHbdF5bob45vcZvPPK4s4Mjyv2mxyB9GS3")
require.NoError(t, err)
topics := []types.TopicType{}
for i := 0; i < 5; i++ {
topics = append(topics, types.BytesToTopic([]byte{0, 0, 0, byte(i)}))
}
testTransport := newMockTransport()
err = testTransport.Populate(topics, 4, true)
require.NoError(t, err)
testBatch := MailserverBatch{
Topics: topics,
}
err = processMailserverBatch(ctx, testTransport, testBatch, mailserverID, logger, defaultStoreNodeRequestPageSize, nil, false)
require.Error(t, err)
}

View File

@ -10,13 +10,13 @@ import (
gocommon "github.com/status-im/status-go/common" gocommon "github.com/status-im/status-go/common"
"github.com/status-im/status-go/eth-node/crypto" "github.com/status-im/status-go/eth-node/crypto"
"github.com/status-im/status-go/protocol/common/shard" "github.com/status-im/status-go/protocol/common/shard"
"github.com/waku-org/go-waku/waku/v2/api/history"
"go.uber.org/zap" "go.uber.org/zap"
"github.com/status-im/status-go/eth-node/types" "github.com/status-im/status-go/eth-node/types"
"github.com/status-im/status-go/protocol/communities" "github.com/status-im/status-go/protocol/communities"
"github.com/status-im/status-go/protocol/transport" "github.com/status-im/status-go/protocol/transport"
"github.com/status-im/status-go/services/mailservers"
) )
const ( const (
@ -57,7 +57,7 @@ type StoreNodeRequestManager struct {
// activeRequestsLock should be locked each time activeRequests is being accessed or changed. // activeRequestsLock should be locked each time activeRequests is being accessed or changed.
activeRequestsLock sync.RWMutex activeRequestsLock sync.RWMutex
onPerformingBatch func(MailserverBatch) onPerformingBatch func(types.MailserverBatch)
} }
func NewStoreNodeRequestManager(m *Messenger) *StoreNodeRequestManager { func NewStoreNodeRequestManager(m *Messenger) *StoreNodeRequestManager {
@ -374,7 +374,7 @@ func (r *storeNodeRequest) finalize() {
} }
} }
func (r *storeNodeRequest) shouldFetchNextPage(envelopesCount int) (bool, uint32) { func (r *storeNodeRequest) shouldFetchNextPage(envelopesCount int) (bool, uint64) {
logger := r.manager.logger.With( logger := r.manager.logger.With(
zap.Any("requestID", r.requestID), zap.Any("requestID", r.requestID),
zap.Int("envelopesCount", envelopesCount)) zap.Int("envelopesCount", envelopesCount))
@ -524,13 +524,13 @@ func (r *storeNodeRequest) routine() {
communityID := r.requestID.getCommunityID() communityID := r.requestID.getCommunityID()
if r.requestID.RequestType != storeNodeCommunityRequest || !r.manager.messenger.communityStorenodes.HasStorenodeSetup(communityID) { if r.requestID.RequestType != storeNodeCommunityRequest || !r.manager.messenger.communityStorenodes.HasStorenodeSetup(communityID) {
if !r.manager.messenger.waitForAvailableStoreNode(storeNodeAvailableTimeout) { if !r.manager.messenger.transport.WaitForAvailableStoreNode(storeNodeAvailableTimeout) {
r.result.err = fmt.Errorf("store node is not available") r.result.err = fmt.Errorf("store node is not available")
return return
} }
} }
storeNode := r.manager.messenger.getActiveMailserver(communityID) storeNode := r.manager.messenger.getCommunityMailserver(communityID)
// Check if community already exists locally and get Clock. // Check if community already exists locally and get Clock.
if r.requestID.RequestType == storeNodeCommunityRequest { if r.requestID.RequestType == storeNodeCommunityRequest {
@ -543,8 +543,8 @@ func (r *storeNodeRequest) routine() {
// Start store node request // Start store node request
from, to := r.manager.messenger.calculateMailserverTimeBounds(oneMonthDuration) from, to := r.manager.messenger.calculateMailserverTimeBounds(oneMonthDuration)
_, err := r.manager.messenger.performMailserverRequest(storeNode, func(ms mailservers.Mailserver) (*MessengerResponse, error) { _, err := r.manager.messenger.performStorenodeTask(func() (*MessengerResponse, error) {
batch := MailserverBatch{ batch := types.MailserverBatch{
From: from, From: from,
To: to, To: to,
PubsubTopic: r.pubsubTopic, PubsubTopic: r.pubsubTopic,
@ -555,8 +555,8 @@ func (r *storeNodeRequest) routine() {
r.manager.onPerformingBatch(batch) r.manager.onPerformingBatch(batch)
} }
return nil, r.manager.messenger.processMailserverBatchWithOptions(ms, batch, r.config.InitialPageSize, r.shouldFetchNextPage, true) return nil, r.manager.messenger.processMailserverBatchWithOptions(storeNode, batch, r.config.InitialPageSize, r.shouldFetchNextPage, true)
}) }, history.WithPeerID(storeNode))
r.result.err = err r.result.err = err
} }

View File

@ -3,8 +3,8 @@ package protocol
type StoreNodeRequestConfig struct { type StoreNodeRequestConfig struct {
WaitForResponse bool WaitForResponse bool
StopWhenDataFound bool StopWhenDataFound bool
InitialPageSize uint32 InitialPageSize uint64
FurtherPageSize uint32 FurtherPageSize uint64
} }
type StoreNodeRequestOption func(*StoreNodeRequestConfig) type StoreNodeRequestOption func(*StoreNodeRequestConfig)
@ -40,13 +40,13 @@ func WithStopWhenDataFound(stopWhenDataFound bool) StoreNodeRequestOption {
} }
} }
func WithInitialPageSize(initialPageSize uint32) StoreNodeRequestOption { func WithInitialPageSize(initialPageSize uint64) StoreNodeRequestOption {
return func(c *StoreNodeRequestConfig) { return func(c *StoreNodeRequestConfig) {
c.InitialPageSize = initialPageSize c.InitialPageSize = initialPageSize
} }
} }
func WithFurtherPageSize(furtherPageSize uint32) StoreNodeRequestOption { func WithFurtherPageSize(furtherPageSize uint64) StoreNodeRequestOption {
return func(c *StoreNodeRequestConfig) { return func(c *StoreNodeRequestConfig) {
c.FurtherPageSize = furtherPageSize c.FurtherPageSize = furtherPageSize
} }

View File

@ -351,10 +351,10 @@ func (s *MessengerStoreNodeCommunitySuite) TestToggleUseMailservers() {
// Enable use of mailservers // Enable use of mailservers
err := s.owner.ToggleUseMailservers(true) err := s.owner.ToggleUseMailservers(true)
s.Require().NoError(err) s.Require().NoError(err)
s.Require().NotNil(s.owner.mailserverCycle.activeMailserver) s.Require().NotNil(s.owner.transport.GetActiveStorenode())
// Disable use of mailservers // Disable use of mailservers
err = s.owner.ToggleUseMailservers(false) err = s.owner.ToggleUseMailservers(false)
s.Require().NoError(err) s.Require().NoError(err)
s.Require().Nil(s.owner.mailserverCycle.activeMailserver) s.Require().Nil(s.owner.transport.GetActiveStorenode())
} }

View File

@ -235,7 +235,7 @@ func (s *MessengerStoreNodeRequestSuite) newMessenger(shh types.Waku, logger *za
} }
func (s *MessengerStoreNodeRequestSuite) createCommunity(m *Messenger) *communities.Community { func (s *MessengerStoreNodeRequestSuite) createCommunity(m *Messenger) *communities.Community {
s.waitForAvailableStoreNode(m) s.WaitForAvailableStoreNode(m)
storeNodeSubscription := s.setupStoreNodeEnvelopesWatcher(nil) storeNodeSubscription := s.setupStoreNodeEnvelopesWatcher(nil)
@ -309,7 +309,7 @@ func (s *MessengerStoreNodeRequestSuite) fetchProfile(m *Messenger, contactID st
} }
} }
func (s *MessengerStoreNodeRequestSuite) waitForAvailableStoreNode(messenger *Messenger) { func (s *MessengerStoreNodeRequestSuite) WaitForAvailableStoreNode(messenger *Messenger) {
WaitForAvailableStoreNode(&s.Suite, messenger, storeNodeConnectTimeout) WaitForAvailableStoreNode(&s.Suite, messenger, storeNodeConnectTimeout)
} }
@ -419,11 +419,11 @@ func (s *MessengerStoreNodeRequestSuite) TestSimultaneousCommunityInfoRequests()
community := s.createCommunity(s.owner) community := s.createCommunity(s.owner)
storeNodeRequestsCount := 0 storeNodeRequestsCount := 0
s.bob.storeNodeRequestsManager.onPerformingBatch = func(batch MailserverBatch) { s.bob.storeNodeRequestsManager.onPerformingBatch = func(batch types.MailserverBatch) {
storeNodeRequestsCount++ storeNodeRequestsCount++
} }
s.waitForAvailableStoreNode(s.bob) s.WaitForAvailableStoreNode(s.bob)
wg := sync.WaitGroup{} wg := sync.WaitGroup{}
@ -453,7 +453,7 @@ func (s *MessengerStoreNodeRequestSuite) TestRequestNonExistentCommunity() {
s.createBob() s.createBob()
s.waitForAvailableStoreNode(s.bob) s.WaitForAvailableStoreNode(s.bob)
fetchedCommunity, err := s.bob.FetchCommunity(&request) fetchedCommunity, err := s.bob.FetchCommunity(&request)
s.Require().NoError(err) s.Require().NoError(err)
@ -722,7 +722,7 @@ func (s *MessengerStoreNodeRequestSuite) TestRequestShardAndCommunityInfo() {
s.waitForEnvelopes(storeNodeSubscription, 1) s.waitForEnvelopes(storeNodeSubscription, 1)
s.waitForAvailableStoreNode(s.bob) s.WaitForAvailableStoreNode(s.bob)
communityShard := community.CommunityShard() communityShard := community.CommunityShard()
@ -1195,7 +1195,7 @@ func (s *MessengerStoreNodeRequestSuite) TestFetchingCommunityWithOwnerToken() {
s.createOwner() s.createOwner()
s.createBob() s.createBob()
s.waitForAvailableStoreNode(s.owner) s.WaitForAvailableStoreNode(s.owner)
community := s.createCommunity(s.owner) community := s.createCommunity(s.owner)
// owner mints owner token // owner mints owner token
@ -1228,7 +1228,7 @@ func (s *MessengerStoreNodeRequestSuite) TestFetchingCommunityWithOwnerToken() {
s.Require().NoError(err) s.Require().NoError(err)
s.Require().Len(community.TokenPermissions(), 1) s.Require().Len(community.TokenPermissions(), 1)
s.waitForAvailableStoreNode(s.bob) s.WaitForAvailableStoreNode(s.bob)
s.fetchCommunity(s.bob, community.CommunityShard(), community) s.fetchCommunity(s.bob, community.CommunityShard(), community)
} }

View File

@ -365,7 +365,7 @@ func SetIdentityImagesAndWaitForChange(s *suite.Suite, messenger *Messenger, tim
} }
func WaitForAvailableStoreNode(s *suite.Suite, m *Messenger, timeout time.Duration) { func WaitForAvailableStoreNode(s *suite.Suite, m *Messenger, timeout time.Duration) {
available := m.waitForAvailableStoreNode(timeout) available := m.transport.WaitForAvailableStoreNode(timeout)
s.Require().True(available) s.Require().True(available)
} }

View File

@ -6,6 +6,10 @@ import (
"go.uber.org/zap" "go.uber.org/zap"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/waku-org/go-waku/waku/v2/utils"
"github.com/status-im/status-go/eth-node/types" "github.com/status-im/status-go/eth-node/types"
"github.com/status-im/status-go/services/mailservers" "github.com/status-im/status-go/services/mailservers"
) )
@ -51,14 +55,17 @@ func (m *CommunityStorenodes) GetStorenodeByCommunityID(communityID string) (mai
return toMailserver(msData.storenodes[0]), nil return toMailserver(msData.storenodes[0]), nil
} }
func (m *CommunityStorenodes) IsCommunityStoreNode(id string) bool { func (m *CommunityStorenodes) IsCommunityStoreNode(peerID peer.ID) bool {
m.storenodesByCommunityIDMutex.RLock() m.storenodesByCommunityIDMutex.RLock()
defer m.storenodesByCommunityIDMutex.RUnlock() defer m.storenodesByCommunityIDMutex.RUnlock()
for _, data := range m.storenodesByCommunityID { for _, data := range m.storenodesByCommunityID {
for _, snode := range data.storenodes { for _, snode := range data.storenodes {
if snode.StorenodeID == id { commStorenodeID, err := utils.GetPeerID(snode.Address)
return true if err == nil {
if commStorenodeID == peerID {
return true
}
} }
} }
} }

View File

@ -4,17 +4,17 @@ import (
"context" "context"
"crypto/ecdsa" "crypto/ecdsa"
"database/sql" "database/sql"
"encoding/hex"
"sync" "sync"
"time" "time"
"github.com/google/uuid"
"github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/peer"
"github.com/multiformats/go-multiaddr" "github.com/multiformats/go-multiaddr"
"github.com/pkg/errors" "github.com/pkg/errors"
"go.uber.org/zap" "go.uber.org/zap"
"golang.org/x/exp/maps" "golang.org/x/exp/maps"
"github.com/waku-org/go-waku/waku/v2/api/history"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enode"
gocommon "github.com/status-im/status-go/common" gocommon "github.com/status-im/status-go/common"
@ -462,89 +462,6 @@ func (t *Transport) Peers() types.PeerStats {
return t.waku.Peers() return t.waku.Peers()
} }
func (t *Transport) createMessagesRequest(
ctx context.Context,
peerID peer.ID,
from, to uint32,
previousStoreCursor types.StoreRequestCursor,
pubsubTopic string,
contentTopics []types.TopicType,
limit uint32,
waitForResponse bool,
processEnvelopes bool,
) (storeCursor types.StoreRequestCursor, envelopesCount int, err error) {
r := createMessagesRequest(from, to, nil, previousStoreCursor, pubsubTopic, contentTopics, limit)
if waitForResponse {
resultCh := make(chan struct {
storeCursor types.StoreRequestCursor
envelopesCount int
err error
})
go func() {
defer gocommon.LogOnPanic()
storeCursor, envelopesCount, err = t.waku.RequestStoreMessages(ctx, peerID, r, processEnvelopes)
resultCh <- struct {
storeCursor types.StoreRequestCursor
envelopesCount int
err error
}{storeCursor, envelopesCount, err}
}()
select {
case result := <-resultCh:
return result.storeCursor, result.envelopesCount, result.err
case <-ctx.Done():
return nil, 0, ctx.Err()
}
} else {
go func() {
defer gocommon.LogOnPanic()
_, _, err = t.waku.RequestStoreMessages(ctx, peerID, r, false)
if err != nil {
t.logger.Error("failed to request store messages", zap.Error(err))
}
}()
}
return
}
func (t *Transport) SendMessagesRequestForTopics(
ctx context.Context,
peerID peer.ID,
from, to uint32,
prevCursor types.StoreRequestCursor,
pubsubTopic string,
contentTopics []types.TopicType,
limit uint32,
waitForResponse bool,
processEnvelopes bool,
) (cursor types.StoreRequestCursor, envelopesCount int, err error) {
return t.createMessagesRequest(ctx, peerID, from, to, prevCursor, pubsubTopic, contentTopics, limit, waitForResponse, processEnvelopes)
}
func createMessagesRequest(from, to uint32, cursor []byte, storeCursor types.StoreRequestCursor, pubsubTopic string, topics []types.TopicType, limit uint32) types.MessagesRequest {
aUUID := uuid.New()
// uuid is 16 bytes, converted to hex it's 32 bytes as expected by types.MessagesRequest
id := []byte(hex.EncodeToString(aUUID[:]))
var topicBytes [][]byte
for idx := range topics {
topicBytes = append(topicBytes, topics[idx][:])
}
return types.MessagesRequest{
ID: id,
From: from,
To: to,
Limit: limit,
Cursor: cursor,
PubsubTopic: pubsubTopic,
ContentTopics: topicBytes,
StoreCursor: storeCursor,
}
}
// ConfirmMessagesProcessed marks the messages as processed in the cache so // ConfirmMessagesProcessed marks the messages as processed in the cache so
// they won't be passed to the next layer anymore // they won't be passed to the next layer anymore
func (t *Transport) ConfirmMessagesProcessed(ids []string, timestamp uint64) error { func (t *Transport) ConfirmMessagesProcessed(ids []string, timestamp uint64) error {
@ -635,10 +552,6 @@ func (t *Transport) ConnectionChanged(state connection.State) {
t.waku.ConnectionChanged(state) t.waku.ConnectionChanged(state)
} }
func (t *Transport) PingPeer(ctx context.Context, peerID peer.ID) (time.Duration, error) {
return t.waku.PingPeer(ctx, peerID)
}
// Subscribe to a pubsub topic, passing an optional public key if the pubsub topic is protected // Subscribe to a pubsub topic, passing an optional public key if the pubsub topic is protected
func (t *Transport) SubscribeToPubsubTopic(topic string, optPublicKey *ecdsa.PublicKey) error { func (t *Transport) SubscribeToPubsubTopic(topic string, optPublicKey *ecdsa.PublicKey) error {
if t.waku.Version() == 2 { if t.waku.Version() == 2 {
@ -685,10 +598,6 @@ func (t *Transport) ConfirmMessageDelivered(messageID string) {
t.waku.ConfirmMessageDelivered(commHashes) t.waku.ConfirmMessageDelivered(commHashes)
} }
func (t *Transport) SetStorePeerID(peerID peer.ID) {
t.waku.SetStorePeerID(peerID)
}
func (t *Transport) SetCriteriaForMissingMessageVerification(peerID peer.ID, filters []*Filter) { func (t *Transport) SetCriteriaForMissingMessageVerification(peerID peer.ID, filters []*Filter) {
if t.waku.Version() != 2 { if t.waku.Version() != 2 {
return return
@ -721,3 +630,56 @@ func (t *Transport) SetCriteriaForMissingMessageVerification(peerID peer.ID, fil
} }
} }
} }
func (t *Transport) GetActiveStorenode() peer.ID {
return t.waku.GetActiveStorenode()
}
func (t *Transport) DisconnectActiveStorenode(ctx context.Context, backoffReason time.Duration, shouldCycle bool) {
t.waku.DisconnectActiveStorenode(ctx, backoffReason, shouldCycle)
}
func (t *Transport) OnStorenodeAvailableOneShot() <-chan struct{} {
return t.waku.OnStorenodeAvailableOneShot()
}
func (t *Transport) OnStorenodeChanged() <-chan peer.ID {
return t.waku.OnStorenodeChanged()
}
func (t *Transport) OnStorenodeNotWorking() <-chan struct{} {
return t.waku.OnStorenodeNotWorking()
}
func (t *Transport) OnStorenodeAvailable() <-chan peer.ID {
return t.waku.OnStorenodeAvailable()
}
func (t *Transport) WaitForAvailableStoreNode(timeout time.Duration) bool {
return t.waku.WaitForAvailableStoreNode(timeout)
}
func (t *Transport) IsStorenodeAvailable(peerID peer.ID) bool {
return t.waku.IsStorenodeAvailable(peerID)
}
func (t *Transport) PerformStorenodeTask(fn func() error, opts ...history.StorenodeTaskOption) error {
return t.waku.PerformStorenodeTask(fn, opts...)
}
func (t *Transport) ProcessMailserverBatch(
ctx context.Context,
batch types.MailserverBatch,
storenodeID peer.ID,
pageLimit uint64,
shouldProcessNextPage func(int) (bool, uint64),
processEnvelopes bool,
) error {
return t.waku.ProcessMailserverBatch(ctx, batch, storenodeID, pageLimit, shouldProcessNextPage, processEnvelopes)
}
func (t *Transport) SetStorenodeConfigProvider(c history.StorenodeConfigProvider) {
if t.WakuVersion() == 2 {
t.waku.SetStorenodeConfigProvider(c)
}
}

View File

@ -1409,10 +1409,6 @@ func (api *PublicAPI) RequestAllHistoricMessagesWithRetries(forceFetchingBackup
return api.service.messenger.RequestAllHistoricMessages(forceFetchingBackup, true) return api.service.messenger.RequestAllHistoricMessages(forceFetchingBackup, true)
} }
func (api *PublicAPI) DisconnectActiveMailserver() {
api.service.messenger.DisconnectActiveMailserver()
}
// Echo is a method for testing purposes. // Echo is a method for testing purposes.
func (api *PublicAPI) Echo(ctx context.Context, message string) (string, error) { func (api *PublicAPI) Echo(ctx context.Context, message string) (string, error) {
return message, nil return message, nil

View File

@ -74,6 +74,14 @@ func (t timestamp) String() string {
return time.Unix(0, int64(t)).Format(time.RFC3339) return time.Unix(0, int64(t)).Format(time.RFC3339)
} }
func Timep(key string, time *int64) zapcore.Field {
if time == nil {
return zap.String(key, "-")
} else {
return Time(key, *time)
}
}
func Epoch(key string, time time.Time) zap.Field { func Epoch(key string, time time.Time) zap.Field {
return zap.String(key, fmt.Sprintf("%d", time.UnixNano())) return zap.String(key, fmt.Sprintf("%d", time.UnixNano()))
} }

View File

@ -0,0 +1,524 @@
package history
import (
"context"
"crypto/rand"
"errors"
"fmt"
"math"
"math/big"
"net"
"net/http"
"runtime"
"sort"
"sync"
"time"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/p2p/protocol/ping"
"github.com/waku-org/go-waku/waku/v2/protocol/store"
"go.uber.org/zap"
)
const defaultBackoff = 10 * time.Second
const graylistBackoff = 3 * time.Minute
const storenodeVerificationInterval = time.Second
const storenodeMaxFailedRequests uint = 2
const minStorenodesToChooseFrom = 3
const isAndroidEmulator = runtime.GOOS == "android" && runtime.GOARCH == "amd64"
const findNearestMailServer = !isAndroidEmulator
const overrideDNS = runtime.GOOS == "android" || runtime.GOOS == "ios"
const bootstrapDNS = "8.8.8.8:53"
type connStatus int
const (
disconnected connStatus = iota + 1
connected
)
type peerStatus struct {
status connStatus
canConnectAfter time.Time
lastConnectionAttempt time.Time
}
type StorenodeConfigProvider interface {
UseStorenodes() (bool, error)
GetPinnedStorenode() (peer.ID, error)
Storenodes() ([]peer.ID, error)
}
type StorenodeCycle struct {
sync.RWMutex
logger *zap.Logger
host host.Host
storenodeConfigProvider StorenodeConfigProvider
StorenodeAvailableOneshotEmitter *OneShotEmitter[struct{}]
StorenodeChangedEmitter *Emitter[peer.ID]
StorenodeNotWorkingEmitter *Emitter[struct{}]
StorenodeAvailableEmitter *Emitter[peer.ID]
failedRequests map[peer.ID]uint
peersMutex sync.RWMutex
activeStorenode peer.ID
peers map[peer.ID]peerStatus
}
func NewStorenodeCycle(logger *zap.Logger) *StorenodeCycle {
return &StorenodeCycle{
StorenodeAvailableOneshotEmitter: NewOneshotEmitter[struct{}](),
StorenodeChangedEmitter: NewEmitter[peer.ID](),
StorenodeNotWorkingEmitter: NewEmitter[struct{}](),
StorenodeAvailableEmitter: NewEmitter[peer.ID](),
logger: logger.Named("storenode-cycle"),
}
}
func (m *StorenodeCycle) Start(ctx context.Context, h host.Host) {
m.logger.Debug("starting storenode cycle")
m.host = h
m.failedRequests = make(map[peer.ID]uint)
m.peers = make(map[peer.ID]peerStatus)
go m.verifyStorenodeStatus(ctx)
}
func (m *StorenodeCycle) DisconnectActiveStorenode(backoff time.Duration) {
m.Lock()
defer m.Unlock()
m.disconnectActiveStorenode(backoff)
}
func (m *StorenodeCycle) connectToNewStorenodeAndWait(ctx context.Context) error {
// Handle pinned storenodes
m.logger.Info("disconnecting storenode")
pinnedStorenode, err := m.storenodeConfigProvider.GetPinnedStorenode()
if err != nil {
m.logger.Error("could not obtain the pinned storenode", zap.Error(err))
return err
}
// If no pinned storenode, no need to disconnect and wait for it to be available
if pinnedStorenode == "" {
m.disconnectActiveStorenode(graylistBackoff)
}
return m.findNewStorenode(ctx)
}
func (m *StorenodeCycle) disconnectStorenode(backoffDuration time.Duration) error {
if m.activeStorenode == "" {
m.logger.Info("no active storenode")
return nil
}
m.logger.Info("disconnecting active storenode", zap.Stringer("peerID", m.activeStorenode))
m.peersMutex.Lock()
pInfo, ok := m.peers[m.activeStorenode]
if ok {
pInfo.status = disconnected
pInfo.canConnectAfter = time.Now().Add(backoffDuration)
m.peers[m.activeStorenode] = pInfo
} else {
m.peers[m.activeStorenode] = peerStatus{
status: disconnected,
canConnectAfter: time.Now().Add(backoffDuration),
}
}
m.peersMutex.Unlock()
m.activeStorenode = ""
return nil
}
func (m *StorenodeCycle) disconnectActiveStorenode(backoffDuration time.Duration) {
err := m.disconnectStorenode(backoffDuration)
if err != nil {
m.logger.Error("failed to disconnect storenode", zap.Error(err))
}
m.StorenodeChangedEmitter.Emit("")
}
func (m *StorenodeCycle) Cycle(ctx context.Context) {
if m.storenodeConfigProvider == nil {
m.logger.Debug("storenodeConfigProvider not yet setup")
return
}
m.logger.Info("Automatically switching storenode")
if m.activeStorenode != "" {
m.disconnectActiveStorenode(graylistBackoff)
}
useStorenode, err := m.storenodeConfigProvider.UseStorenodes()
if err != nil {
m.logger.Error("failed to get use storenodes", zap.Error(err))
return
}
if !useStorenode {
m.logger.Info("Skipping storenode search due to useStorenode being false")
return
}
err = m.findNewStorenode(ctx)
if err != nil {
m.logger.Error("Error getting new storenode", zap.Error(err))
}
}
func poolSize(fleetSize int) int {
return int(math.Ceil(float64(fleetSize) / 4))
}
func (m *StorenodeCycle) getAvailableStorenodesSortedByRTT(ctx context.Context, allStorenodes []peer.ID) []peer.ID {
availableStorenodes := make(map[peer.ID]time.Duration)
availableStorenodesMutex := sync.Mutex{}
availableStorenodesWg := sync.WaitGroup{}
for _, storenode := range allStorenodes {
availableStorenodesWg.Add(1)
go func(peerID peer.ID) {
defer availableStorenodesWg.Done()
ctx, cancel := context.WithTimeout(ctx, 4*time.Second)
defer cancel()
rtt, err := m.pingPeer(ctx, peerID)
if err == nil { // pinging storenodes might fail, but we don't care
availableStorenodesMutex.Lock()
availableStorenodes[peerID] = rtt
availableStorenodesMutex.Unlock()
}
}(storenode)
}
availableStorenodesWg.Wait()
if len(availableStorenodes) == 0 {
m.logger.Warn("No storenodes available") // Do nothing..
return nil
}
var sortedStorenodes []SortedStorenode
for storenodeID, rtt := range availableStorenodes {
sortedStorenode := SortedStorenode{
Storenode: storenodeID,
RTT: rtt,
}
m.peersMutex.Lock()
pInfo, ok := m.peers[storenodeID]
m.peersMutex.Unlock()
if ok && time.Now().Before(pInfo.canConnectAfter) {
continue // We can't connect to this node yet
}
sortedStorenodes = append(sortedStorenodes, sortedStorenode)
}
sort.Sort(byRTTMsAndCanConnectBefore(sortedStorenodes))
result := make([]peer.ID, len(sortedStorenodes))
for i, s := range sortedStorenodes {
result[i] = s.Storenode
}
return result
}
func (m *StorenodeCycle) pingPeer(ctx context.Context, peerID peer.ID) (time.Duration, error) {
pingResultCh := ping.Ping(ctx, m.host, peerID)
select {
case <-ctx.Done():
return 0, ctx.Err()
case r := <-pingResultCh:
if r.Error != nil {
return 0, r.Error
}
return r.RTT, nil
}
}
func (m *StorenodeCycle) findNewStorenode(ctx context.Context) error {
// we have to override DNS manually because of https://github.com/status-im/status-mobile/issues/19581
if overrideDNS {
var dialer net.Dialer
net.DefaultResolver = &net.Resolver{
PreferGo: false,
Dial: func(context context.Context, _, _ string) (net.Conn, error) {
conn, err := dialer.DialContext(context, "udp", bootstrapDNS)
if err != nil {
return nil, err
}
return conn, nil
},
}
}
pinnedStorenode, err := m.storenodeConfigProvider.GetPinnedStorenode()
if err != nil {
m.logger.Error("Could not obtain the pinned storenode", zap.Error(err))
return err
}
if pinnedStorenode != "" {
return m.setActiveStorenode(pinnedStorenode)
}
m.logger.Info("Finding a new storenode..")
allStorenodes, err := m.storenodeConfigProvider.Storenodes()
if err != nil {
return err
}
// TODO: remove this check once sockets are stable on x86_64 emulators
if findNearestMailServer {
allStorenodes = m.getAvailableStorenodesSortedByRTT(ctx, allStorenodes)
}
// Picks a random storenode amongs the ones with the lowest latency
// The pool size is 1/4 of the storenodes were pinged successfully
// If the pool size is less than `minStorenodesToChooseFrom`, it will
// pick a storenode fromm all the available storenodes
pSize := poolSize(len(allStorenodes) - 1)
if pSize <= minStorenodesToChooseFrom {
pSize = len(allStorenodes)
if pSize <= 0 {
m.logger.Warn("No storenodes available") // Do nothing..
return nil
}
}
r, err := rand.Int(rand.Reader, big.NewInt(int64(pSize)))
if err != nil {
return err
}
ms := allStorenodes[r.Int64()]
return m.setActiveStorenode(ms)
}
func (m *StorenodeCycle) storenodeStatus(peerID peer.ID) connStatus {
m.peersMutex.RLock()
defer m.peersMutex.RUnlock()
peer, ok := m.peers[peerID]
if !ok {
return disconnected
}
return peer.status
}
func (m *StorenodeCycle) setActiveStorenode(peerID peer.ID) error {
m.activeStorenode = peerID
m.StorenodeChangedEmitter.Emit(m.activeStorenode)
storenodeStatus := m.storenodeStatus(peerID)
if storenodeStatus != connected {
m.peersMutex.Lock()
m.peers[peerID] = peerStatus{
status: connected,
lastConnectionAttempt: time.Now(),
canConnectAfter: time.Now().Add(defaultBackoff),
}
m.peersMutex.Unlock()
m.failedRequests[peerID] = 0
m.logger.Info("storenode available", zap.Stringer("peerID", m.activeStorenode))
m.StorenodeAvailableOneshotEmitter.Emit(struct{}{}) // Maybe can be refactored away?
m.StorenodeAvailableEmitter.Emit(m.activeStorenode)
}
return nil
}
func (m *StorenodeCycle) GetActiveStorenode() peer.ID {
m.RLock()
defer m.RUnlock()
return m.activeStorenode
}
func (m *StorenodeCycle) IsStorenodeAvailable(peerID peer.ID) bool {
return m.storenodeStatus(peerID) == connected
}
func (m *StorenodeCycle) penalizeStorenode(id peer.ID) {
m.peersMutex.Lock()
defer m.peersMutex.Unlock()
pInfo, ok := m.peers[id]
if !ok {
pInfo.status = disconnected
}
pInfo.canConnectAfter = time.Now().Add(graylistBackoff)
m.peers[id] = pInfo
}
func (m *StorenodeCycle) verifyStorenodeStatus(ctx context.Context) {
ticker := time.NewTicker(storenodeVerificationInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
err := m.disconnectStorenodeIfRequired(ctx)
if err != nil {
m.logger.Error("failed to handle storenode cycle event", zap.Error(err))
continue
}
case <-ctx.Done():
return
}
}
}
func (m *StorenodeCycle) disconnectStorenodeIfRequired(ctx context.Context) error {
m.logger.Debug("wakuV2 storenode status verification")
if m.activeStorenode == "" {
// No active storenode, find a new one
m.Cycle(ctx)
return nil
}
// Check whether we want to disconnect the active storenode
if m.failedRequests[m.activeStorenode] >= storenodeMaxFailedRequests {
m.penalizeStorenode(m.activeStorenode)
m.StorenodeNotWorkingEmitter.Emit(struct{}{})
m.logger.Info("too many failed requests", zap.Stringer("storenode", m.activeStorenode))
m.failedRequests[m.activeStorenode] = 0
return m.connectToNewStorenodeAndWait(ctx)
}
return nil
}
func (m *StorenodeCycle) SetStorenodeConfigProvider(provider StorenodeConfigProvider) {
m.storenodeConfigProvider = provider
}
func (m *StorenodeCycle) WaitForAvailableStoreNode(ctx context.Context, timeout time.Duration) bool {
// Add 1 second to timeout, because the storenode cycle has 1 second ticker, which doesn't tick on start.
// This can be improved after merging https://github.com/status-im/status-go/pull/4380.
// NOTE: https://stackoverflow.com/questions/32705582/how-to-get-time-tick-to-tick-immediately
timeout += time.Second
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
defer wg.Done()
for !m.IsStorenodeAvailable(m.activeStorenode) {
select {
case <-m.StorenodeAvailableOneshotEmitter.Subscribe():
case <-ctx.Done():
return
}
}
}()
select {
case <-waitForWaitGroup(&wg):
case <-ctx.Done():
}
return m.IsStorenodeAvailable(m.activeStorenode)
}
func waitForWaitGroup(wg *sync.WaitGroup) <-chan struct{} {
ch := make(chan struct{})
go func() {
wg.Wait()
close(ch)
}()
return ch
}
type storenodeTaskParameters struct {
customPeerID peer.ID
}
type StorenodeTaskOption func(*storenodeTaskParameters)
func WithPeerID(peerID peer.ID) StorenodeTaskOption {
return func(stp *storenodeTaskParameters) {
stp.customPeerID = peerID
}
}
func (m *StorenodeCycle) PerformStorenodeTask(fn func() error, options ...StorenodeTaskOption) error {
params := storenodeTaskParameters{}
for _, opt := range options {
opt(&params)
}
peerID := params.customPeerID
if peerID == "" {
peerID = m.GetActiveStorenode()
}
if peerID == "" {
return errors.New("storenode not available")
}
m.RLock()
defer m.RUnlock()
var tries uint = 0
for tries < storenodeMaxFailedRequests {
if params.customPeerID == "" && m.storenodeStatus(peerID) != connected {
return errors.New("storenode not available")
}
m.logger.Info("trying performing history requests", zap.Uint("try", tries), zap.Stringer("peerID", peerID))
// Peform request
err := fn()
if err == nil {
// Reset failed requests
m.logger.Debug("history request performed successfully", zap.Stringer("peerID", peerID))
m.failedRequests[peerID] = 0
return nil
}
m.logger.Error("failed to perform history request",
zap.Stringer("peerID", peerID),
zap.Uint("tries", tries),
zap.Error(err),
)
tries++
if storeErr, ok := err.(*store.StoreError); ok {
if storeErr.Code == http.StatusTooManyRequests {
m.disconnectActiveStorenode(defaultBackoff)
return fmt.Errorf("ratelimited at storenode %s: %w", peerID, err)
}
}
// Increment failed requests
m.failedRequests[peerID]++
// Change storenode
if m.failedRequests[peerID] >= storenodeMaxFailedRequests {
return errors.New("too many failed requests")
}
// Wait a couple of second not to spam
time.Sleep(2 * time.Second)
}
return errors.New("failed to perform history request")
}

View File

@ -0,0 +1,48 @@
package history
import "sync"
type Emitter[T any] struct {
sync.Mutex
subscriptions []chan T
}
func NewEmitter[T any]() *Emitter[T] {
return &Emitter[T]{}
}
func (s *Emitter[T]) Subscribe() <-chan T {
s.Lock()
defer s.Unlock()
c := make(chan T)
s.subscriptions = append(s.subscriptions, c)
return c
}
func (s *Emitter[T]) Emit(value T) {
s.Lock()
defer s.Unlock()
for _, sub := range s.subscriptions {
sub <- value
}
}
type OneShotEmitter[T any] struct {
Emitter[T]
}
func NewOneshotEmitter[T any]() *OneShotEmitter[T] {
return &OneShotEmitter[T]{}
}
func (s *OneShotEmitter[T]) Emit(value T) {
s.Lock()
defer s.Unlock()
for _, subs := range s.subscriptions {
subs <- value
close(subs)
}
s.subscriptions = nil
}

View File

@ -0,0 +1,296 @@
package history
import (
"context"
"errors"
"math"
"sync"
"time"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/waku-org/go-waku/logging"
"github.com/waku-org/go-waku/waku/v2/protocol"
"github.com/waku-org/go-waku/waku/v2/protocol/store"
"go.uber.org/zap"
)
const maxTopicsPerRequest int = 10
const mailserverRequestTimeout = 30 * time.Second
type work struct {
criteria store.FilterCriteria
cursor []byte
limit uint64
}
type HistoryRetriever struct {
store Store
logger *zap.Logger
historyProcessor HistoryProcessor
}
type HistoryProcessor interface {
OnEnvelope(env *protocol.Envelope, processEnvelopes bool) error
OnRequestFailed(requestID []byte, peerID peer.ID, err error)
}
type Store interface {
Query(ctx context.Context, criteria store.FilterCriteria, opts ...store.RequestOption) (store.Result, error)
}
func NewHistoryRetriever(store Store, historyProcessor HistoryProcessor, logger *zap.Logger) *HistoryRetriever {
return &HistoryRetriever{
store: store,
logger: logger.Named("history-retriever"),
historyProcessor: historyProcessor,
}
}
func (hr *HistoryRetriever) Query(
ctx context.Context,
criteria store.FilterCriteria,
storenodeID peer.ID,
pageLimit uint64,
shouldProcessNextPage func(int) (bool, uint64),
processEnvelopes bool,
) error {
logger := hr.logger.With(
logging.Timep("fromString", criteria.TimeStart),
logging.Timep("toString", criteria.TimeEnd),
zap.String("pubsubTopic", criteria.PubsubTopic),
zap.Strings("contentTopics", criteria.ContentTopicsList()),
zap.Int64p("from", criteria.TimeStart),
zap.Int64p("to", criteria.TimeEnd),
)
logger.Info("syncing")
wg := sync.WaitGroup{}
workWg := sync.WaitGroup{}
workCh := make(chan work, 1000) // each batch item is split in 10 topics bunch and sent to this channel
workCompleteCh := make(chan struct{}) // once all batch items are processed, this channel is triggered
semaphore := make(chan struct{}, 3) // limit the number of concurrent queries
errCh := make(chan error)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// TODO: refactor this by extracting the consumer into a separate go routine.
// Producer
wg.Add(1)
go func() {
defer func() {
logger.Debug("mailserver batch producer complete")
wg.Done()
}()
contentTopicList := criteria.ContentTopics.ToList()
// TODO: split into 24h batches
allWorks := int(math.Ceil(float64(len(contentTopicList)) / float64(maxTopicsPerRequest)))
workWg.Add(allWorks)
for i := 0; i < len(contentTopicList); i += maxTopicsPerRequest {
j := i + maxTopicsPerRequest
if j > len(contentTopicList) {
j = len(contentTopicList)
}
select {
case <-ctx.Done():
logger.Debug("processBatch producer - context done")
return
default:
logger.Debug("processBatch producer - creating work")
workCh <- work{
criteria: store.FilterCriteria{
ContentFilter: protocol.NewContentFilter(criteria.PubsubTopic, contentTopicList[i:j]...),
TimeStart: criteria.TimeStart,
TimeEnd: criteria.TimeEnd,
},
limit: pageLimit,
}
}
}
go func() {
workWg.Wait()
workCompleteCh <- struct{}{}
}()
logger.Debug("processBatch producer complete")
}()
var result error
loop:
for {
select {
case <-ctx.Done():
logger.Debug("processBatch cleanup - context done")
result = ctx.Err()
if errors.Is(result, context.Canceled) {
result = nil
}
break loop
case w, ok := <-workCh:
if !ok {
continue
}
select {
case <-ctx.Done():
return ctx.Err()
default:
// continue...
}
logger.Debug("processBatch - received work")
semaphore <- struct{}{}
go func(w work) { // Consumer
defer func() {
workWg.Done()
<-semaphore
}()
queryCtx, queryCancel := context.WithTimeout(ctx, mailserverRequestTimeout)
cursor, envelopesCount, err := hr.createMessagesRequest(queryCtx, storenodeID, w.criteria, w.cursor, w.limit, true, processEnvelopes, logger)
queryCancel()
if err != nil {
logger.Debug("failed to send request", zap.Error(err))
errCh <- err
return
}
processNextPage := true
nextPageLimit := pageLimit
if shouldProcessNextPage != nil {
processNextPage, nextPageLimit = shouldProcessNextPage(envelopesCount)
}
if !processNextPage {
return
}
// Check the cursor after calling `shouldProcessNextPage`.
// The app might use process the fetched envelopes in the callback for own needs.
if cursor == nil {
return
}
logger.Debug("processBatch producer - creating work (cursor)")
workWg.Add(1)
workCh <- work{
criteria: w.criteria,
cursor: cursor,
limit: nextPageLimit,
}
}(w)
case err := <-errCh:
logger.Debug("processBatch - received error", zap.Error(err))
cancel() // Kill go routines
return err
case <-workCompleteCh:
logger.Debug("processBatch - all jobs complete")
cancel() // Kill go routines
}
}
wg.Wait()
logger.Info("synced topic", zap.NamedError("hasError", result))
return result
}
func (hr *HistoryRetriever) createMessagesRequest(
ctx context.Context,
peerID peer.ID,
criteria store.FilterCriteria,
cursor []byte,
limit uint64,
waitForResponse bool,
processEnvelopes bool,
logger *zap.Logger,
) (storeCursor []byte, envelopesCount int, err error) {
if waitForResponse {
resultCh := make(chan struct {
storeCursor []byte
envelopesCount int
err error
})
go func() {
storeCursor, envelopesCount, err = hr.requestStoreMessages(ctx, peerID, criteria, cursor, limit, processEnvelopes)
resultCh <- struct {
storeCursor []byte
envelopesCount int
err error
}{storeCursor, envelopesCount, err}
}()
select {
case result := <-resultCh:
return result.storeCursor, result.envelopesCount, result.err
case <-ctx.Done():
return nil, 0, ctx.Err()
}
} else {
go func() {
_, _, err = hr.requestStoreMessages(ctx, peerID, criteria, cursor, limit, false)
if err != nil {
logger.Error("failed to request store messages", zap.Error(err))
}
}()
}
return
}
func (hr *HistoryRetriever) requestStoreMessages(ctx context.Context, peerID peer.ID, criteria store.FilterCriteria, cursor []byte, limit uint64, processEnvelopes bool) ([]byte, int, error) {
requestID := protocol.GenerateRequestID()
logger := hr.logger.With(zap.String("requestID", hexutil.Encode(requestID)), zap.Stringer("peerID", peerID))
opts := []store.RequestOption{
store.WithPaging(false, limit),
store.WithRequestID(requestID),
store.WithPeer(peerID),
store.WithCursor(cursor)}
logger.Debug("store.query",
logging.Timep("startTime", criteria.TimeStart),
logging.Timep("endTime", criteria.TimeEnd),
zap.Strings("contentTopics", criteria.ContentTopics.ToList()),
zap.String("pubsubTopic", criteria.PubsubTopic),
zap.String("cursor", hexutil.Encode(cursor)),
)
queryStart := time.Now()
result, err := hr.store.Query(ctx, criteria, opts...)
queryDuration := time.Since(queryStart)
if err != nil {
logger.Error("error querying storenode", zap.Error(err))
hr.historyProcessor.OnRequestFailed(requestID, peerID, err)
return nil, 0, err
}
messages := result.Messages()
envelopesCount := len(messages)
logger.Debug("store.query response", zap.Duration("queryDuration", queryDuration), zap.Int("numMessages", envelopesCount), zap.Bool("hasCursor", result.IsComplete() && result.Cursor() != nil))
for _, mkv := range messages {
envelope := protocol.NewEnvelope(mkv.Message, mkv.Message.GetTimestamp(), mkv.GetPubsubTopic())
err := hr.historyProcessor.OnEnvelope(envelope, processEnvelopes)
if err != nil {
return nil, 0, err
}
}
return result.Cursor(), envelopesCount, nil
}

View File

@ -0,0 +1,32 @@
package history
import (
"time"
"github.com/libp2p/go-libp2p/core/peer"
)
type SortedStorenode struct {
Storenode peer.ID
RTT time.Duration
CanConnectAfter time.Time
}
type byRTTMsAndCanConnectBefore []SortedStorenode
func (s byRTTMsAndCanConnectBefore) Len() int {
return len(s)
}
func (s byRTTMsAndCanConnectBefore) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s byRTTMsAndCanConnectBefore) Less(i, j int) bool {
// Slightly inaccurate as time sensitive sorting, but it does not matter so much
now := time.Now()
if s[i].CanConnectAfter.Before(now) && s[j].CanConnectAfter.Before(now) {
return s[i].RTT < s[j].RTT
}
return s[i].CanConnectAfter.Before(s[j].CanConnectAfter)
}

View File

@ -178,7 +178,7 @@ func (m *MissingMessageVerifier) fetchHistory(c chan<- *protocol.Envelope, inter
} }
} }
func (m *MissingMessageVerifier) storeQueryWithRetry(ctx context.Context, queryFunc func(ctx context.Context) (*store.Result, error), logger *zap.Logger, logMsg string) (*store.Result, error) { func (m *MissingMessageVerifier) storeQueryWithRetry(ctx context.Context, queryFunc func(ctx context.Context) (store.Result, error), logger *zap.Logger, logMsg string) (store.Result, error) {
retry := true retry := true
count := 1 count := 1
for retry && count <= m.params.maxAttemptsToRetrieveHistory { for retry && count <= m.params.maxAttemptsToRetrieveHistory {
@ -212,7 +212,7 @@ func (m *MissingMessageVerifier) fetchMessagesBatch(c chan<- *protocol.Envelope,
logging.Epoch("to", now), logging.Epoch("to", now),
) )
result, err := m.storeQueryWithRetry(interest.ctx, func(ctx context.Context) (*store.Result, error) { result, err := m.storeQueryWithRetry(interest.ctx, func(ctx context.Context) (store.Result, error) {
return m.store.Query(ctx, store.FilterCriteria{ return m.store.Query(ctx, store.FilterCriteria{
ContentFilter: protocol.NewContentFilter(interest.contentFilter.PubsubTopic, contentTopics[batchFrom:batchTo]...), ContentFilter: protocol.NewContentFilter(interest.contentFilter.PubsubTopic, contentTopics[batchFrom:batchTo]...),
TimeStart: proto.Int64(interest.lastChecked.Add(-m.params.delay).UnixNano()), TimeStart: proto.Int64(interest.lastChecked.Add(-m.params.delay).UnixNano()),
@ -243,7 +243,7 @@ func (m *MissingMessageVerifier) fetchMessagesBatch(c chan<- *protocol.Envelope,
missingHashes = append(missingHashes, hash) missingHashes = append(missingHashes, hash)
} }
result, err = m.storeQueryWithRetry(interest.ctx, func(ctx context.Context) (*store.Result, error) { result, err = m.storeQueryWithRetry(interest.ctx, func(ctx context.Context) (store.Result, error) {
if err = result.Next(ctx); err != nil { if err = result.Next(ctx); err != nil {
return nil, err return nil, err
} }
@ -282,7 +282,7 @@ func (m *MissingMessageVerifier) fetchMessagesBatch(c chan<- *protocol.Envelope,
defer utils.LogOnPanic() defer utils.LogOnPanic()
defer wg.Wait() defer wg.Wait()
result, err := m.storeQueryWithRetry(interest.ctx, func(ctx context.Context) (*store.Result, error) { result, err := m.storeQueryWithRetry(interest.ctx, func(ctx context.Context) (store.Result, error) {
queryCtx, cancel := context.WithTimeout(ctx, m.params.storeQueryTimeout) queryCtx, cancel := context.WithTimeout(ctx, m.params.storeQueryTimeout)
defer cancel() defer cancel()
return m.store.QueryByHash(queryCtx, messageHashes, store.WithPeer(interest.peerID), store.WithPaging(false, maxMsgHashesPerRequest)) return m.store.QueryByHash(queryCtx, messageHashes, store.WithPeer(interest.peerID), store.WithPaging(false, maxMsgHashesPerRequest))
@ -303,7 +303,7 @@ func (m *MissingMessageVerifier) fetchMessagesBatch(c chan<- *protocol.Envelope,
} }
} }
result, err = m.storeQueryWithRetry(interest.ctx, func(ctx context.Context) (*store.Result, error) { result, err = m.storeQueryWithRetry(interest.ctx, func(ctx context.Context) (store.Result, error) {
if err = result.Next(ctx); err != nil { if err = result.Next(ctx); err != nil {
return nil, err return nil, err
} }

View File

@ -8,8 +8,8 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/libp2p/go-libp2p/core/peer"
apicommon "github.com/waku-org/go-waku/waku/v2/api/common" apicommon "github.com/waku-org/go-waku/waku/v2/api/common"
"github.com/waku-org/go-waku/waku/v2/api/history"
"github.com/waku-org/go-waku/waku/v2/protocol" "github.com/waku-org/go-waku/waku/v2/protocol"
"github.com/waku-org/go-waku/waku/v2/protocol/pb" "github.com/waku-org/go-waku/waku/v2/protocol/pb"
"github.com/waku-org/go-waku/waku/v2/protocol/store" "github.com/waku-org/go-waku/waku/v2/protocol/store"
@ -29,7 +29,6 @@ type ISentCheck interface {
Start() Start()
Add(topic string, messageID common.Hash, sentTime uint32) Add(topic string, messageID common.Hash, sentTime uint32)
DeleteByMessageIDs(messageIDs []common.Hash) DeleteByMessageIDs(messageIDs []common.Hash)
SetStorePeerID(peerID peer.ID)
} }
// MessageSentCheck tracks the outgoing messages and check against store node // MessageSentCheck tracks the outgoing messages and check against store node
@ -38,11 +37,11 @@ type ISentCheck interface {
type MessageSentCheck struct { type MessageSentCheck struct {
messageIDs map[string]map[common.Hash]uint32 messageIDs map[string]map[common.Hash]uint32
messageIDsMu sync.RWMutex messageIDsMu sync.RWMutex
storePeerID peer.ID
messageStoredChan chan common.Hash messageStoredChan chan common.Hash
messageExpiredChan chan common.Hash messageExpiredChan chan common.Hash
ctx context.Context ctx context.Context
store *store.WakuStore store *store.WakuStore
storenodeCycle *history.StorenodeCycle
timesource timesource.Timesource timesource timesource.Timesource
logger *zap.Logger logger *zap.Logger
maxHashQueryLength uint64 maxHashQueryLength uint64
@ -53,7 +52,7 @@ type MessageSentCheck struct {
} }
// NewMessageSentCheck creates a new instance of MessageSentCheck with default parameters // NewMessageSentCheck creates a new instance of MessageSentCheck with default parameters
func NewMessageSentCheck(ctx context.Context, store *store.WakuStore, timesource timesource.Timesource, msgStoredChan chan common.Hash, msgExpiredChan chan common.Hash, logger *zap.Logger) *MessageSentCheck { func NewMessageSentCheck(ctx context.Context, store *store.WakuStore, cycle *history.StorenodeCycle, timesource timesource.Timesource, msgStoredChan chan common.Hash, msgExpiredChan chan common.Hash, logger *zap.Logger) *MessageSentCheck {
return &MessageSentCheck{ return &MessageSentCheck{
messageIDs: make(map[string]map[common.Hash]uint32), messageIDs: make(map[string]map[common.Hash]uint32),
messageIDsMu: sync.RWMutex{}, messageIDsMu: sync.RWMutex{},
@ -61,6 +60,7 @@ func NewMessageSentCheck(ctx context.Context, store *store.WakuStore, timesource
messageExpiredChan: msgExpiredChan, messageExpiredChan: msgExpiredChan,
ctx: ctx, ctx: ctx,
store: store, store: store,
storenodeCycle: cycle,
timesource: timesource, timesource: timesource,
logger: logger, logger: logger,
maxHashQueryLength: DefaultMaxHashQueryLength, maxHashQueryLength: DefaultMaxHashQueryLength,
@ -139,11 +139,6 @@ func (m *MessageSentCheck) DeleteByMessageIDs(messageIDs []common.Hash) {
} }
} }
// SetStorePeerID sets the peer id of store node
func (m *MessageSentCheck) SetStorePeerID(peerID peer.ID) {
m.storePeerID = peerID
}
// Start checks if the tracked outgoing messages are stored periodically // Start checks if the tracked outgoing messages are stored periodically
func (m *MessageSentCheck) Start() { func (m *MessageSentCheck) Start() {
defer utils.LogOnPanic() defer utils.LogOnPanic()
@ -211,7 +206,7 @@ func (m *MessageSentCheck) Start() {
} }
func (m *MessageSentCheck) messageHashBasedQuery(ctx context.Context, hashes []common.Hash, relayTime []uint32, pubsubTopic string) []common.Hash { func (m *MessageSentCheck) messageHashBasedQuery(ctx context.Context, hashes []common.Hash, relayTime []uint32, pubsubTopic string) []common.Hash {
selectedPeer := m.storePeerID selectedPeer := m.storenodeCycle.GetActiveStorenode()
if selectedPeer == "" { if selectedPeer == "" {
m.logger.Error("no store peer id available", zap.String("pubsubTopic", pubsubTopic)) m.logger.Error("no store peer id available", zap.String("pubsubTopic", pubsubTopic))
return []common.Hash{} return []common.Hash{}

View File

@ -6,7 +6,6 @@ import (
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/waku-org/go-waku/waku/v2/protocol" "github.com/waku-org/go-waku/waku/v2/protocol"
"github.com/waku-org/go-waku/waku/v2/protocol/lightpush" "github.com/waku-org/go-waku/waku/v2/protocol/lightpush"
"github.com/waku-org/go-waku/waku/v2/protocol/relay" "github.com/waku-org/go-waku/waku/v2/protocol/relay"
@ -162,9 +161,3 @@ func (ms *MessageSender) MessagesDelivered(messageIDs []common.Hash) {
ms.messageSentCheck.DeleteByMessageIDs(messageIDs) ms.messageSentCheck.DeleteByMessageIDs(messageIDs)
} }
} }
func (ms *MessageSender) SetStorePeerID(peerID peer.ID) {
if ms.messageSentCheck != nil {
ms.messageSentCheck.SetStorePeerID(peerID)
}
}

View File

@ -50,8 +50,8 @@ type StoreError struct {
} }
// NewStoreError creates a new instance of StoreError // NewStoreError creates a new instance of StoreError
func NewStoreError(code int, message string) StoreError { func NewStoreError(code int, message string) *StoreError {
return StoreError{ return &StoreError{
Code: code, Code: code,
Message: message, Message: message,
} }
@ -99,7 +99,7 @@ func (s *WakuStore) SetHost(h host.Host) {
// Request is used to send a store query. This function requires understanding how to prepare a store query // Request is used to send a store query. This function requires understanding how to prepare a store query
// and most of the time you can use `Query`, `QueryByHash` and `Exists` instead, as they provide // and most of the time you can use `Query`, `QueryByHash` and `Exists` instead, as they provide
// a simpler API // a simpler API
func (s *WakuStore) Request(ctx context.Context, criteria Criteria, opts ...RequestOption) (*Result, error) { func (s *WakuStore) Request(ctx context.Context, criteria Criteria, opts ...RequestOption) (Result, error) {
params := new(Parameters) params := new(Parameters)
optList := DefaultOptions() optList := DefaultOptions()
@ -182,7 +182,7 @@ func (s *WakuStore) Request(ctx context.Context, criteria Criteria, opts ...Requ
return nil, err return nil, err
} }
result := &Result{ result := &resultImpl{
store: s, store: s,
messages: response.Messages, messages: response.Messages,
storeRequest: storeRequest, storeRequest: storeRequest,
@ -195,12 +195,12 @@ func (s *WakuStore) Request(ctx context.Context, criteria Criteria, opts ...Requ
} }
// Query retrieves all the messages that match a criteria. Use the options to indicate whether to return the message themselves or not. // Query retrieves all the messages that match a criteria. Use the options to indicate whether to return the message themselves or not.
func (s *WakuStore) Query(ctx context.Context, criteria FilterCriteria, opts ...RequestOption) (*Result, error) { func (s *WakuStore) Query(ctx context.Context, criteria FilterCriteria, opts ...RequestOption) (Result, error) {
return s.Request(ctx, criteria, opts...) return s.Request(ctx, criteria, opts...)
} }
// Query retrieves all the messages with specific message hashes // Query retrieves all the messages with specific message hashes
func (s *WakuStore) QueryByHash(ctx context.Context, messageHashes []wpb.MessageHash, opts ...RequestOption) (*Result, error) { func (s *WakuStore) QueryByHash(ctx context.Context, messageHashes []wpb.MessageHash, opts ...RequestOption) (Result, error) {
return s.Request(ctx, MessageHashCriteria{messageHashes}, opts...) return s.Request(ctx, MessageHashCriteria{messageHashes}, opts...)
} }
@ -214,17 +214,17 @@ func (s *WakuStore) Exists(ctx context.Context, messageHash wpb.MessageHash, opt
return false, err return false, err
} }
return len(result.messages) != 0, nil return len(result.Messages()) != 0, nil
} }
func (s *WakuStore) next(ctx context.Context, r *Result, opts ...RequestOption) (*Result, error) { func (s *WakuStore) next(ctx context.Context, r Result, opts ...RequestOption) (*resultImpl, error) {
if r.IsComplete() { if r.IsComplete() {
return &Result{ return &resultImpl{
store: s, store: s,
messages: nil, messages: nil,
cursor: nil, cursor: nil,
storeRequest: r.storeRequest, storeRequest: r.Query(),
storeResponse: r.storeResponse, storeResponse: r.Response(),
peerID: r.PeerID(), peerID: r.PeerID(),
}, nil }, nil
} }
@ -240,7 +240,7 @@ func (s *WakuStore) next(ctx context.Context, r *Result, opts ...RequestOption)
} }
} }
storeRequest := proto.Clone(r.storeRequest).(*pb.StoreQueryRequest) storeRequest := proto.Clone(r.Query()).(*pb.StoreQueryRequest)
storeRequest.RequestId = hex.EncodeToString(protocol.GenerateRequestID()) storeRequest.RequestId = hex.EncodeToString(protocol.GenerateRequestID())
storeRequest.PaginationCursor = r.Cursor() storeRequest.PaginationCursor = r.Cursor()
@ -249,7 +249,7 @@ func (s *WakuStore) next(ctx context.Context, r *Result, opts ...RequestOption)
return nil, err return nil, err
} }
result := &Result{ result := &resultImpl{
store: s, store: s,
messages: response.Messages, messages: response.Messages,
storeRequest: storeRequest, storeRequest: storeRequest,
@ -317,7 +317,7 @@ func (s *WakuStore) queryFrom(ctx context.Context, storeRequest *pb.StoreQueryRe
if storeResponse.GetStatusCode() != ok { if storeResponse.GetStatusCode() != ok {
err := NewStoreError(int(storeResponse.GetStatusCode()), storeResponse.GetStatusDesc()) err := NewStoreError(int(storeResponse.GetStatusCode()), storeResponse.GetStatusDesc())
return nil, &err return nil, err
} }
return storeResponse, nil return storeResponse, nil
} }

View File

@ -22,6 +22,10 @@ type Parameters struct {
skipRatelimit bool skipRatelimit bool
} }
func (p *Parameters) Cursor() []byte {
return p.cursor
}
type RequestOption func(*Parameters) error type RequestOption func(*Parameters) error
// WithPeer is an option used to specify the peerID to request the message history. // WithPeer is an option used to specify the peerID to request the message history.

View File

@ -8,7 +8,17 @@ import (
) )
// Result represents a valid response from a store node // Result represents a valid response from a store node
type Result struct { type Result interface {
Cursor() []byte
IsComplete() bool
PeerID() peer.ID
Query() *pb.StoreQueryRequest
Response() *pb.StoreQueryResponse
Next(ctx context.Context, opts ...RequestOption) error
Messages() []*pb.WakuMessageKeyValue
}
type resultImpl struct {
done bool done bool
messages []*pb.WakuMessageKeyValue messages []*pb.WakuMessageKeyValue
@ -19,27 +29,27 @@ type Result struct {
peerID peer.ID peerID peer.ID
} }
func (r *Result) Cursor() []byte { func (r *resultImpl) Cursor() []byte {
return r.cursor return r.cursor
} }
func (r *Result) IsComplete() bool { func (r *resultImpl) IsComplete() bool {
return r.done return r.done
} }
func (r *Result) PeerID() peer.ID { func (r *resultImpl) PeerID() peer.ID {
return r.peerID return r.peerID
} }
func (r *Result) Query() *pb.StoreQueryRequest { func (r *resultImpl) Query() *pb.StoreQueryRequest {
return r.storeRequest return r.storeRequest
} }
func (r *Result) Response() *pb.StoreQueryResponse { func (r *resultImpl) Response() *pb.StoreQueryResponse {
return r.storeResponse return r.storeResponse
} }
func (r *Result) Next(ctx context.Context, opts ...RequestOption) error { func (r *resultImpl) Next(ctx context.Context, opts ...RequestOption) error {
if r.cursor == nil { if r.cursor == nil {
r.done = true r.done = true
r.messages = nil r.messages = nil
@ -57,6 +67,6 @@ func (r *Result) Next(ctx context.Context, opts ...RequestOption) error {
return nil return nil
} }
func (r *Result) Messages() []*pb.WakuMessageKeyValue { func (r *resultImpl) Messages() []*pb.WakuMessageKeyValue {
return r.messages return r.messages
} }

3
vendor/modules.txt vendored
View File

@ -1040,13 +1040,14 @@ github.com/waku-org/go-discover/discover/v5wire
github.com/waku-org/go-libp2p-rendezvous github.com/waku-org/go-libp2p-rendezvous
github.com/waku-org/go-libp2p-rendezvous/db github.com/waku-org/go-libp2p-rendezvous/db
github.com/waku-org/go-libp2p-rendezvous/pb github.com/waku-org/go-libp2p-rendezvous/pb
# github.com/waku-org/go-waku v0.8.1-0.20241004054019-0ed94ce0b1cb # github.com/waku-org/go-waku v0.8.1-0.20241014185851-76275f6fb835
## explicit; go 1.21 ## explicit; go 1.21
github.com/waku-org/go-waku/logging github.com/waku-org/go-waku/logging
github.com/waku-org/go-waku/tests github.com/waku-org/go-waku/tests
github.com/waku-org/go-waku/waku/persistence github.com/waku-org/go-waku/waku/persistence
github.com/waku-org/go-waku/waku/v2/api/common github.com/waku-org/go-waku/waku/v2/api/common
github.com/waku-org/go-waku/waku/v2/api/filter github.com/waku-org/go-waku/waku/v2/api/filter
github.com/waku-org/go-waku/waku/v2/api/history
github.com/waku-org/go-waku/waku/v2/api/missing github.com/waku-org/go-waku/waku/v2/api/missing
github.com/waku-org/go-waku/waku/v2/api/publish github.com/waku-org/go-waku/waku/v2/api/publish
github.com/waku-org/go-waku/waku/v2/discv5 github.com/waku-org/go-waku/waku/v2/discv5

View File

@ -0,0 +1,25 @@
package wakuv2
import (
"github.com/libp2p/go-libp2p/core/peer"
"github.com/status-im/status-go/wakuv2/common"
"github.com/waku-org/go-waku/waku/v2/api/history"
"github.com/waku-org/go-waku/waku/v2/protocol"
)
type HistoryProcessorWrapper struct {
waku *Waku
}
func NewHistoryProcessorWrapper(waku *Waku) history.HistoryProcessor {
return &HistoryProcessorWrapper{waku}
}
func (hr *HistoryProcessorWrapper) OnEnvelope(env *protocol.Envelope, processEnvelopes bool) error {
return hr.waku.OnNewEnvelopes(env, common.StoreMessageType, processEnvelopes)
}
func (hr *HistoryProcessorWrapper) OnRequestFailed(requestID []byte, peerID peer.ID, err error) {
hr.waku.onHistoricMessagesRequestFailed(requestID, peerID, err)
}

View File

@ -54,9 +54,9 @@ import (
"github.com/libp2p/go-libp2p" "github.com/libp2p/go-libp2p"
pubsub "github.com/libp2p/go-libp2p-pubsub" pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/metrics" "github.com/libp2p/go-libp2p/core/metrics"
"github.com/libp2p/go-libp2p/p2p/protocol/ping"
filterapi "github.com/waku-org/go-waku/waku/v2/api/filter" filterapi "github.com/waku-org/go-waku/waku/v2/api/filter"
"github.com/waku-org/go-waku/waku/v2/api/history"
"github.com/waku-org/go-waku/waku/v2/api/missing" "github.com/waku-org/go-waku/waku/v2/api/missing"
"github.com/waku-org/go-waku/waku/v2/api/publish" "github.com/waku-org/go-waku/waku/v2/api/publish"
"github.com/waku-org/go-waku/waku/v2/dnsdisc" "github.com/waku-org/go-waku/waku/v2/dnsdisc"
@ -171,6 +171,9 @@ type Waku struct {
onlineChecker *onlinechecker.DefaultOnlineChecker onlineChecker *onlinechecker.DefaultOnlineChecker
state connection.State state connection.State
StorenodeCycle *history.StorenodeCycle
HistoryRetriever *history.HistoryRetriever
logger *zap.Logger logger *zap.Logger
// NTP Synced timesource // NTP Synced timesource
@ -359,6 +362,7 @@ func New(nodeKey *ecdsa.PrivateKey, fleet string, cfg *Config, logger *zap.Logge
} }
waku.options = opts waku.options = opts
waku.logger.Info("setup the go-waku node successfully") waku.logger.Info("setup the go-waku node successfully")
return waku, nil return waku, nil
@ -1037,61 +1041,6 @@ func (w *Waku) ConfirmMessageDelivered(hashes []gethcommon.Hash) {
} }
} }
func (w *Waku) SetStorePeerID(peerID peer.ID) {
w.messageSender.SetStorePeerID(peerID)
}
func (w *Waku) Query(ctx context.Context, peerID peer.ID, query store.FilterCriteria, cursor []byte, opts []store.RequestOption, processEnvelopes bool) ([]byte, int, error) {
requestID := protocol.GenerateRequestID()
opts = append(opts,
store.WithRequestID(requestID),
store.WithPeer(peerID),
store.WithCursor(cursor))
logger := w.logger.With(zap.String("requestID", hexutil.Encode(requestID)), zap.Stringer("peerID", peerID))
logger.Debug("store.query",
logutils.WakuMessageTimestamp("startTime", query.TimeStart),
logutils.WakuMessageTimestamp("endTime", query.TimeEnd),
zap.Strings("contentTopics", query.ContentTopics.ToList()),
zap.String("pubsubTopic", query.PubsubTopic),
zap.String("cursor", hexutil.Encode(cursor)),
)
queryStart := time.Now()
result, err := w.node.Store().Query(ctx, query, opts...)
queryDuration := time.Since(queryStart)
if err != nil {
logger.Error("error querying storenode", zap.Error(err))
if w.onHistoricMessagesRequestFailed != nil {
w.onHistoricMessagesRequestFailed(requestID, peerID, err)
}
return nil, 0, err
}
messages := result.Messages()
envelopesCount := len(messages)
w.logger.Debug("store.query response", zap.Duration("queryDuration", queryDuration), zap.Int("numMessages", envelopesCount), zap.Bool("hasCursor", result.IsComplete() && result.Cursor() != nil))
for _, mkv := range messages {
msg := mkv.Message
// Temporarily setting RateLimitProof to nil so it matches the WakuMessage protobuffer we are sending
// See https://github.com/vacp2p/rfc/issues/563
mkv.Message.RateLimitProof = nil
envelope := protocol.NewEnvelope(msg, msg.GetTimestamp(), query.PubsubTopic)
err = w.OnNewEnvelopes(envelope, common.StoreMessageType, processEnvelopes)
if err != nil {
return nil, 0, err
}
}
return result.Cursor(), envelopesCount, nil
}
// OnNewEnvelope is an interface from Waku FilterManager API that gets invoked when any new message is received by Filter. // OnNewEnvelope is an interface from Waku FilterManager API that gets invoked when any new message is received by Filter.
func (w *Waku) OnNewEnvelope(env *protocol.Envelope) error { func (w *Waku) OnNewEnvelope(env *protocol.Envelope) error {
return w.OnNewEnvelopes(env, common.RelayedMessageType, false) return w.OnNewEnvelopes(env, common.RelayedMessageType, false)
@ -1115,6 +1064,11 @@ func (w *Waku) Start() error {
return fmt.Errorf("failed to start go-waku node: %v", err) return fmt.Errorf("failed to start go-waku node: %v", err)
} }
w.StorenodeCycle = history.NewStorenodeCycle(w.logger)
w.HistoryRetriever = history.NewHistoryRetriever(w.node.Store(), NewHistoryProcessorWrapper(w), w.logger)
w.StorenodeCycle.Start(w.ctx, w.node.Host())
w.logger.Info("WakuV2 PeerID", zap.Stringer("id", w.node.Host().ID())) w.logger.Info("WakuV2 PeerID", zap.Stringer("id", w.node.Host().ID()))
w.discoverAndConnectPeers() w.discoverAndConnectPeers()
@ -1348,7 +1302,7 @@ func (w *Waku) startMessageSender() error {
if w.cfg.EnableStoreConfirmationForMessagesSent { if w.cfg.EnableStoreConfirmationForMessagesSent {
msgStoredChan := make(chan gethcommon.Hash, 1000) msgStoredChan := make(chan gethcommon.Hash, 1000)
msgExpiredChan := make(chan gethcommon.Hash, 1000) msgExpiredChan := make(chan gethcommon.Hash, 1000)
messageSentCheck := publish.NewMessageSentCheck(w.ctx, w.node.Store(), w.node.Timesource(), msgStoredChan, msgExpiredChan, w.logger) messageSentCheck := publish.NewMessageSentCheck(w.ctx, w.node.Store(), w.StorenodeCycle, w.node.Timesource(), msgStoredChan, msgExpiredChan, w.logger)
sender.WithMessageSentCheck(messageSentCheck) sender.WithMessageSentCheck(messageSentCheck)
w.wg.Add(1) w.wg.Add(1)
@ -1967,19 +1921,6 @@ func (w *Waku) PeerID() peer.ID {
return w.node.Host().ID() return w.node.Host().ID()
} }
func (w *Waku) PingPeer(ctx context.Context, peerID peer.ID) (time.Duration, error) {
pingResultCh := ping.Ping(ctx, w.node.Host(), peerID)
select {
case <-ctx.Done():
return 0, ctx.Err()
case r := <-pingResultCh:
if r.Error != nil {
return 0, r.Error
}
return r.RTT, nil
}
}
func (w *Waku) Peerstore() peerstore.Peerstore { func (w *Waku) Peerstore() peerstore.Peerstore {
return w.node.Host().Peerstore() return w.node.Host().Peerstore()
} }

View File

@ -280,19 +280,16 @@ func TestBasicWakuV2(t *testing.T) {
b.InitialInterval = 500 * time.Millisecond b.InitialInterval = 500 * time.Millisecond
} }
err = tt.RetryWithBackOff(func() error { err = tt.RetryWithBackOff(func() error {
_, envelopeCount, err := w.Query( result, err := w.node.Store().Query(
context.Background(), context.Background(),
storeNode.PeerID,
store.FilterCriteria{ store.FilterCriteria{
ContentFilter: protocol.NewContentFilter(config.DefaultShardPubsubTopic, contentTopic.ContentTopic()), ContentFilter: protocol.NewContentFilter(config.DefaultShardPubsubTopic, contentTopic.ContentTopic()),
TimeStart: proto.Int64((timestampInSeconds - int64(marginInSeconds)) * int64(time.Second)), TimeStart: proto.Int64((timestampInSeconds - int64(marginInSeconds)) * int64(time.Second)),
TimeEnd: proto.Int64((timestampInSeconds + int64(marginInSeconds)) * int64(time.Second)), TimeEnd: proto.Int64((timestampInSeconds + int64(marginInSeconds)) * int64(time.Second)),
}, },
nil, store.WithPeer(storeNode.PeerID),
nil,
false,
) )
if err != nil || envelopeCount == 0 { if err != nil || len(result.Messages()) == 0 {
// in case of failure extend timestamp margin up to 40secs // in case of failure extend timestamp margin up to 40secs
if marginInSeconds < 40 { if marginInSeconds < 40 {
marginInSeconds += 5 marginInSeconds += 5
@ -586,20 +583,17 @@ func TestWakuV2Store(t *testing.T) {
timestampInSeconds := msgTimestamp / int64(time.Second) timestampInSeconds := msgTimestamp / int64(time.Second)
marginInSeconds := 5 marginInSeconds := 5
// Query the second node's store for the message // Query the second node's store for the message
_, envelopeCount, err := w1.Query( result, err := w1.node.Store().Query(
context.Background(), context.Background(),
w2.node.Host().ID(),
store.FilterCriteria{ store.FilterCriteria{
TimeStart: proto.Int64((timestampInSeconds - int64(marginInSeconds)) * int64(time.Second)), TimeStart: proto.Int64((timestampInSeconds - int64(marginInSeconds)) * int64(time.Second)),
TimeEnd: proto.Int64((timestampInSeconds + int64(marginInSeconds)) * int64(time.Second)), TimeEnd: proto.Int64((timestampInSeconds + int64(marginInSeconds)) * int64(time.Second)),
ContentFilter: protocol.NewContentFilter(config1.DefaultShardPubsubTopic, contentTopic.ContentTopic()), ContentFilter: protocol.NewContentFilter(config1.DefaultShardPubsubTopic, contentTopic.ContentTopic()),
}, },
nil, store.WithPeer(w2.node.Host().ID()),
nil,
false,
) )
require.NoError(t, err) require.NoError(t, err)
require.True(t, envelopeCount > 0, "no messages received from store node") require.True(t, len(result.Messages()) > 0, "no messages received from store node")
} }
func waitForPeerConnection(t *testing.T, peerID peer.ID, peerCh chan peer.IDSlice) { func waitForPeerConnection(t *testing.T, peerID peer.ID, peerCh chan peer.IDSlice) {