feat!: extract storenode cycle to go-waku api
Extracts the storenode cycle code to go-waku.
This commit is contained in:
parent
987a9e8707
commit
0c838b0188
|
@ -9,6 +9,8 @@ import (
|
|||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
|
||||
"github.com/waku-org/go-waku/waku/v2/api/history"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
gocommon "github.com/status-im/status-go/common"
|
||||
|
@ -274,10 +276,6 @@ func (w *GethWakuWrapper) MarkP2PMessageAsProcessed(hash common.Hash) {
|
|||
w.waku.MarkP2PMessageAsProcessed(hash)
|
||||
}
|
||||
|
||||
func (w *GethWakuWrapper) RequestStoreMessages(ctx context.Context, peerID peer.ID, r types.MessagesRequest, processEnvelopes bool) (types.StoreRequestCursor, int, error) {
|
||||
return nil, 0, errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (w *GethWakuWrapper) ConnectionChanged(_ connection.State) {}
|
||||
|
||||
func (w *GethWakuWrapper) ClearEnvelopesCache() {
|
||||
|
@ -314,13 +312,55 @@ func (w *wakuFilterWrapper) ID() string {
|
|||
func (w *GethWakuWrapper) ConfirmMessageDelivered(hashes []common.Hash) {
|
||||
}
|
||||
|
||||
func (w *GethWakuWrapper) SetStorePeerID(peerID peer.ID) {
|
||||
}
|
||||
|
||||
func (w *GethWakuWrapper) PeerID() peer.ID {
|
||||
panic("not implemented")
|
||||
panic("not available in WakuV1")
|
||||
}
|
||||
|
||||
func (w *GethWakuWrapper) PingPeer(context.Context, peer.ID) (time.Duration, error) {
|
||||
return 0, errors.New("not available in WakuV1")
|
||||
func (w *GethWakuWrapper) GetActiveStorenode() peer.ID {
|
||||
panic("not available in WakuV1")
|
||||
}
|
||||
|
||||
func (w *GethWakuWrapper) OnStorenodeChanged() <-chan peer.ID {
|
||||
panic("not available in WakuV1")
|
||||
}
|
||||
|
||||
func (w *GethWakuWrapper) OnStorenodeNotWorking() <-chan struct{} {
|
||||
panic("not available in WakuV1")
|
||||
}
|
||||
|
||||
func (w *GethWakuWrapper) OnStorenodeAvailable() <-chan peer.ID {
|
||||
panic("not available in WakuV1")
|
||||
}
|
||||
|
||||
func (w *GethWakuWrapper) WaitForAvailableStoreNode(ctx context.Context) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (w *GethWakuWrapper) SetStorenodeConfigProvider(c history.StorenodeConfigProvider) {
|
||||
panic("not available in WakuV1")
|
||||
}
|
||||
|
||||
func (w *GethWakuWrapper) ProcessMailserverBatch(
|
||||
ctx context.Context,
|
||||
batch types.MailserverBatch,
|
||||
storenodeID peer.ID,
|
||||
pageLimit uint64,
|
||||
shouldProcessNextPage func(int) (bool, uint64),
|
||||
processEnvelopes bool,
|
||||
) error {
|
||||
return errors.New("not available in WakuV1")
|
||||
}
|
||||
|
||||
func (w *GethWakuWrapper) IsStorenodeAvailable(peerID peer.ID) bool {
|
||||
panic("not available in WakuV1")
|
||||
|
||||
}
|
||||
|
||||
func (w *GethWakuWrapper) PerformStorenodeTask(fn func() error, opts ...history.StorenodeTaskOption) error {
|
||||
panic("not available in WakuV1")
|
||||
|
||||
}
|
||||
|
||||
func (w *GethWakuWrapper) DisconnectActiveStorenode(ctx context.Context, backoff time.Duration, shouldCycle bool) {
|
||||
panic("not available in WakuV1")
|
||||
}
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"github.com/multiformats/go-multiaddr"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/waku-org/go-waku/waku/v2/api/history"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/store"
|
||||
|
||||
|
@ -176,39 +177,6 @@ func (w *gethWakuV2Wrapper) createFilterWrapper(id string, keyAsym *ecdsa.Privat
|
|||
}, id), nil
|
||||
}
|
||||
|
||||
func (w *gethWakuV2Wrapper) RequestStoreMessages(ctx context.Context, peerID peer.ID, r types.MessagesRequest, processEnvelopes bool) (types.StoreRequestCursor, int, error) {
|
||||
options := []store.RequestOption{
|
||||
store.WithPaging(false, uint64(r.Limit)),
|
||||
}
|
||||
|
||||
var cursor []byte
|
||||
if r.StoreCursor != nil {
|
||||
cursor = r.StoreCursor
|
||||
}
|
||||
|
||||
contentTopics := []string{}
|
||||
for _, topic := range r.ContentTopics {
|
||||
contentTopics = append(contentTopics, wakucommon.BytesToTopic(topic).ContentTopic())
|
||||
}
|
||||
|
||||
query := store.FilterCriteria{
|
||||
TimeStart: proto.Int64(int64(r.From) * int64(time.Second)),
|
||||
TimeEnd: proto.Int64(int64(r.To) * int64(time.Second)),
|
||||
ContentFilter: protocol.NewContentFilter(w.waku.GetPubsubTopic(r.PubsubTopic), contentTopics...),
|
||||
}
|
||||
|
||||
pbCursor, envelopesCount, err := w.waku.Query(ctx, peerID, query, cursor, options, processEnvelopes)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
if pbCursor != nil {
|
||||
return pbCursor, envelopesCount, nil
|
||||
}
|
||||
|
||||
return nil, envelopesCount, nil
|
||||
}
|
||||
|
||||
func (w *gethWakuV2Wrapper) StartDiscV5() error {
|
||||
return w.waku.StartDiscV5()
|
||||
}
|
||||
|
@ -289,7 +257,7 @@ func (w *gethWakuV2Wrapper) SubscribeToConnStatusChanges() (*types.ConnStatusSub
|
|||
func (w *gethWakuV2Wrapper) SetCriteriaForMissingMessageVerification(peerID peer.ID, pubsubTopic string, contentTopics []types.TopicType) error {
|
||||
var cTopics []string
|
||||
for _, ct := range contentTopics {
|
||||
cTopics = append(cTopics, wakucommon.TopicType(ct).ContentTopic())
|
||||
cTopics = append(cTopics, wakucommon.BytesToTopic(ct.Bytes()).ContentTopic())
|
||||
}
|
||||
pubsubTopic = w.waku.GetPubsubTopic(pubsubTopic)
|
||||
w.waku.SetTopicsToVerifyForMissingMessages(peerID, pubsubTopic, cTopics)
|
||||
|
@ -338,14 +306,71 @@ func (w *gethWakuV2Wrapper) ConfirmMessageDelivered(hashes []common.Hash) {
|
|||
w.waku.ConfirmMessageDelivered(hashes)
|
||||
}
|
||||
|
||||
func (w *gethWakuV2Wrapper) SetStorePeerID(peerID peer.ID) {
|
||||
w.waku.SetStorePeerID(peerID)
|
||||
}
|
||||
|
||||
func (w *gethWakuV2Wrapper) PeerID() peer.ID {
|
||||
return w.waku.PeerID()
|
||||
}
|
||||
|
||||
func (w *gethWakuV2Wrapper) PingPeer(ctx context.Context, peerID peer.ID) (time.Duration, error) {
|
||||
return w.waku.PingPeer(ctx, peerID)
|
||||
func (w *gethWakuV2Wrapper) GetActiveStorenode() peer.ID {
|
||||
return w.waku.StorenodeCycle.GetActiveStorenode()
|
||||
}
|
||||
|
||||
func (w *gethWakuV2Wrapper) OnStorenodeChanged() <-chan peer.ID {
|
||||
return w.waku.StorenodeCycle.StorenodeChangedEmitter.Subscribe()
|
||||
}
|
||||
|
||||
func (w *gethWakuV2Wrapper) OnStorenodeNotWorking() <-chan struct{} {
|
||||
return w.waku.StorenodeCycle.StorenodeNotWorkingEmitter.Subscribe()
|
||||
}
|
||||
|
||||
func (w *gethWakuV2Wrapper) OnStorenodeAvailable() <-chan peer.ID {
|
||||
return w.waku.StorenodeCycle.StorenodeAvailableEmitter.Subscribe()
|
||||
}
|
||||
|
||||
func (w *gethWakuV2Wrapper) WaitForAvailableStoreNode(ctx context.Context) bool {
|
||||
return w.waku.StorenodeCycle.WaitForAvailableStoreNode(ctx)
|
||||
}
|
||||
|
||||
func (w *gethWakuV2Wrapper) SetStorenodeConfigProvider(c history.StorenodeConfigProvider) {
|
||||
w.waku.StorenodeCycle.SetStorenodeConfigProvider(c)
|
||||
}
|
||||
|
||||
func (w *gethWakuV2Wrapper) ProcessMailserverBatch(
|
||||
ctx context.Context,
|
||||
batch types.MailserverBatch,
|
||||
storenodeID peer.ID,
|
||||
pageLimit uint64,
|
||||
shouldProcessNextPage func(int) (bool, uint64),
|
||||
processEnvelopes bool,
|
||||
) error {
|
||||
pubsubTopic := w.waku.GetPubsubTopic(batch.PubsubTopic)
|
||||
contentTopics := []string{}
|
||||
for _, topic := range batch.Topics {
|
||||
contentTopics = append(contentTopics, wakucommon.BytesToTopic(topic.Bytes()).ContentTopic())
|
||||
}
|
||||
|
||||
criteria := store.FilterCriteria{
|
||||
TimeStart: proto.Int64(batch.From.UnixNano()),
|
||||
TimeEnd: proto.Int64(batch.To.UnixNano()),
|
||||
ContentFilter: protocol.NewContentFilter(pubsubTopic, contentTopics...),
|
||||
}
|
||||
|
||||
return w.waku.HistoryRetriever.Query(ctx, criteria, storenodeID, pageLimit, shouldProcessNextPage, processEnvelopes)
|
||||
}
|
||||
|
||||
func (w *gethWakuV2Wrapper) IsStorenodeAvailable(peerID peer.ID) bool {
|
||||
return w.waku.StorenodeCycle.IsStorenodeAvailable(peerID)
|
||||
}
|
||||
|
||||
func (w *gethWakuV2Wrapper) PerformStorenodeTask(fn func() error, opts ...history.StorenodeTaskOption) error {
|
||||
return w.waku.StorenodeCycle.PerformStorenodeTask(fn, opts...)
|
||||
}
|
||||
|
||||
func (w *gethWakuV2Wrapper) DisconnectActiveStorenode(ctx context.Context, backoff time.Duration, shouldCycle bool) {
|
||||
w.waku.StorenodeCycle.Lock()
|
||||
defer w.waku.StorenodeCycle.Unlock()
|
||||
|
||||
w.waku.StorenodeCycle.DisconnectActiveStorenode(backoff)
|
||||
if shouldCycle {
|
||||
w.waku.StorenodeCycle.Cycle(ctx)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,59 +1,5 @@
|
|||
package types
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// MaxLimitInMessagesRequest represents the maximum number of messages
|
||||
// that can be requested from the mailserver
|
||||
MaxLimitInMessagesRequest = 1000
|
||||
)
|
||||
|
||||
// MessagesRequest contains details of a request of historic messages.
|
||||
type MessagesRequest struct {
|
||||
// ID of the request. The current implementation requires ID to be 32-byte array,
|
||||
// however, it's not enforced for future implementation.
|
||||
ID []byte `json:"id"`
|
||||
// From is a lower bound of time range.
|
||||
From uint32 `json:"from"`
|
||||
// To is a upper bound of time range.
|
||||
To uint32 `json:"to"`
|
||||
// Limit determines the number of messages sent by the mail server
|
||||
// for the current paginated request.
|
||||
Limit uint32 `json:"limit"`
|
||||
// Cursor is used as starting point for paginated requests.
|
||||
Cursor []byte `json:"cursor"`
|
||||
// StoreCursor is used as starting point for WAKUV2 paginatedRequests
|
||||
StoreCursor StoreRequestCursor `json:"storeCursor"`
|
||||
// Bloom is a filter to match requested messages.
|
||||
Bloom []byte `json:"bloom"`
|
||||
// PubsubTopic is the gossipsub topic on which the message was broadcasted
|
||||
PubsubTopic string `json:"pubsubTopic"`
|
||||
// ContentTopics is a list of topics. A returned message should
|
||||
// belong to one of the topics from the list.
|
||||
ContentTopics [][]byte `json:"contentTopics"`
|
||||
}
|
||||
|
||||
type StoreRequestCursor []byte
|
||||
|
||||
// SetDefaults sets the From and To defaults
|
||||
func (r *MessagesRequest) SetDefaults(now time.Time) {
|
||||
// set From and To defaults
|
||||
if r.To == 0 {
|
||||
r.To = uint32(now.UTC().Unix())
|
||||
}
|
||||
|
||||
if r.From == 0 {
|
||||
oneDay := uint32(86400) // -24 hours
|
||||
if r.To < oneDay {
|
||||
r.From = 0
|
||||
} else {
|
||||
r.From = r.To - oneDay
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// MailServerResponse is the response payload sent by the mailserver.
|
||||
type MailServerResponse struct {
|
||||
LastEnvelopeHash Hash
|
||||
|
|
|
@ -34,6 +34,10 @@ func (t TopicType) String() string {
|
|||
return EncodeHex(t[:])
|
||||
}
|
||||
|
||||
func (t TopicType) Bytes() []byte {
|
||||
return TopicTypeToByteArray(t)
|
||||
}
|
||||
|
||||
// MarshalText returns the hex representation of t.
|
||||
func (t TopicType) MarshalText() ([]byte, error) {
|
||||
return HexBytes(t[:]).MarshalText()
|
||||
|
|
|
@ -3,7 +3,10 @@ package types
|
|||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -12,6 +15,8 @@ import (
|
|||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/pborman/uuid"
|
||||
|
||||
"github.com/waku-org/go-waku/waku/v2/api/history"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/status-im/status-go/connection"
|
||||
|
@ -176,9 +181,6 @@ type Waku interface {
|
|||
Unsubscribe(ctx context.Context, id string) error
|
||||
UnsubscribeMany(ids []string) error
|
||||
|
||||
// RequestStoreMessages uses the WAKU2-STORE protocol to request historic messages
|
||||
RequestStoreMessages(ctx context.Context, peerID peer.ID, request MessagesRequest, processEnvelopes bool) (StoreRequestCursor, int, error)
|
||||
|
||||
// ProcessingP2PMessages indicates whether there are in-flight p2p messages
|
||||
ProcessingP2PMessages() bool
|
||||
|
||||
|
@ -194,12 +196,57 @@ type Waku interface {
|
|||
// ConfirmMessageDelivered updates a message has been delivered in waku
|
||||
ConfirmMessageDelivered(hash []common.Hash)
|
||||
|
||||
// SetStorePeerID updates the peer id of store node
|
||||
SetStorePeerID(peerID peer.ID)
|
||||
|
||||
// PeerID returns node's PeerID
|
||||
PeerID() peer.ID
|
||||
|
||||
// PingPeer returns the reply time
|
||||
PingPeer(ctx context.Context, peerID peer.ID) (time.Duration, error)
|
||||
// GetActiveStorenode returns the peer ID of the currently active storenode. It will be empty if no storenode is active
|
||||
GetActiveStorenode() peer.ID
|
||||
|
||||
// OnStorenodeChanged is triggered when a new storenode is promoted to become the active storenode or when the active storenode is removed
|
||||
OnStorenodeChanged() <-chan peer.ID
|
||||
|
||||
// OnStorenodeNotWorking is triggered when the last active storenode fails to return results consistently
|
||||
OnStorenodeNotWorking() <-chan struct{}
|
||||
|
||||
// OnStorenodeAvailable is triggered when there is a new active storenode selected
|
||||
OnStorenodeAvailable() <-chan peer.ID
|
||||
|
||||
// WaitForAvailableStoreNode will wait for a storenode to be available depending on the context
|
||||
WaitForAvailableStoreNode(ctx context.Context) bool
|
||||
|
||||
// SetStorenodeConfigProvider will set the configuration provider for the storenode cycle
|
||||
SetStorenodeConfigProvider(c history.StorenodeConfigProvider)
|
||||
|
||||
// ProcessMailserverBatch will receive a criteria and storenode and execute a query
|
||||
ProcessMailserverBatch(
|
||||
ctx context.Context,
|
||||
batch MailserverBatch,
|
||||
storenodeID peer.ID,
|
||||
pageLimit uint64,
|
||||
shouldProcessNextPage func(int) (bool, uint64),
|
||||
processEnvelopes bool,
|
||||
) error
|
||||
|
||||
// IsStorenodeAvailable is used to determine whether a storenode is available or not
|
||||
IsStorenodeAvailable(peerID peer.ID) bool
|
||||
|
||||
PerformStorenodeTask(fn func() error, opts ...history.StorenodeTaskOption) error
|
||||
|
||||
// DisconnectActiveStorenode will trigger a disconnection of the active storenode, and potentially execute a cycling so a new storenode is promoted
|
||||
DisconnectActiveStorenode(ctx context.Context, backoff time.Duration, shouldCycle bool)
|
||||
}
|
||||
|
||||
type MailserverBatch struct {
|
||||
From time.Time
|
||||
To time.Time
|
||||
Cursor string
|
||||
PubsubTopic string
|
||||
Topics []TopicType
|
||||
ChatIDs []string
|
||||
}
|
||||
|
||||
func (mb *MailserverBatch) Hash() string {
|
||||
data := fmt.Sprintf("%d%d%s%s%v%v", mb.From.UnixNano(), mb.To.UnixNano(), mb.Cursor, mb.PubsubTopic, mb.Topics, mb.ChatIDs)
|
||||
hash := sha256.Sum256([]byte(data))
|
||||
return hex.EncodeToString(hash[:4])
|
||||
}
|
||||
|
|
2
go.mod
2
go.mod
|
@ -97,7 +97,7 @@ require (
|
|||
github.com/schollz/peerdiscovery v1.7.0
|
||||
github.com/siphiuel/lc-proxy-wrapper v0.0.0-20230516150924-246507cee8c7
|
||||
github.com/urfave/cli/v2 v2.27.2
|
||||
github.com/waku-org/go-waku v0.8.1-0.20241004054019-0ed94ce0b1cb
|
||||
github.com/waku-org/go-waku v0.8.1-0.20241021202955-3c4e40c729a0
|
||||
github.com/wk8/go-ordered-map/v2 v2.1.7
|
||||
github.com/yeqown/go-qrcode/v2 v2.2.1
|
||||
github.com/yeqown/go-qrcode/writer/standard v1.2.1
|
||||
|
|
4
go.sum
4
go.sum
|
@ -2152,8 +2152,8 @@ github.com/waku-org/go-libp2p-pubsub v0.12.0-gowaku.0.20240823143342-b0f2429ca27
|
|||
github.com/waku-org/go-libp2p-pubsub v0.12.0-gowaku.0.20240823143342-b0f2429ca27f/go.mod h1:Oi0zw9aw8/Y5GC99zt+Ef2gYAl+0nZlwdJonDyOz/sE=
|
||||
github.com/waku-org/go-libp2p-rendezvous v0.0.0-20240110193335-a67d1cc760a0 h1:R4YYx2QamhBRl/moIxkDCNW+OP7AHbyWLBygDc/xIMo=
|
||||
github.com/waku-org/go-libp2p-rendezvous v0.0.0-20240110193335-a67d1cc760a0/go.mod h1:EhZP9fee0DYjKH/IOQvoNSy1tSHp2iZadsHGphcAJgY=
|
||||
github.com/waku-org/go-waku v0.8.1-0.20241004054019-0ed94ce0b1cb h1:E3J49PH9iXpjaOOI/VrEX/VhSk3obKjxVehGEDzZgXI=
|
||||
github.com/waku-org/go-waku v0.8.1-0.20241004054019-0ed94ce0b1cb/go.mod h1:1BRnyg2mQ2aBNLTBaPq6vEvobzywGykPOhGQFbHGf74=
|
||||
github.com/waku-org/go-waku v0.8.1-0.20241021202955-3c4e40c729a0 h1:PNKcOPMn0yoC2NQaJPPB8FvHT/YtaU8hZAoovSl42KM=
|
||||
github.com/waku-org/go-waku v0.8.1-0.20241021202955-3c4e40c729a0/go.mod h1:1BRnyg2mQ2aBNLTBaPq6vEvobzywGykPOhGQFbHGf74=
|
||||
github.com/waku-org/go-zerokit-rln v0.1.14-0.20240102145250-fa738c0bdf59 h1:jisj+OCI6QydLtFq3Pyhu49wl9ytPN7oAHjMfepHDrA=
|
||||
github.com/waku-org/go-zerokit-rln v0.1.14-0.20240102145250-fa738c0bdf59/go.mod h1:1PdBdPzyTaKt3VnpAHk3zj+r9dXPFOr3IHZP9nFle6E=
|
||||
github.com/waku-org/go-zerokit-rln-apple v0.0.0-20230916172309-ee0ee61dde2b h1:KgZVhsLkxsj5gb/FfndSCQu6VYwALrCOgYI3poR95yE=
|
||||
|
|
|
@ -139,7 +139,6 @@ type Messenger struct {
|
|||
allInstallations *installationMap
|
||||
modifiedInstallations *stringBoolMap
|
||||
installationID string
|
||||
mailserverCycle mailserverCycle
|
||||
communityStorenodes *storenodes.CommunityStorenodes
|
||||
database *sql.DB
|
||||
multiAccounts *multiaccounts.Database
|
||||
|
@ -172,7 +171,6 @@ type Messenger struct {
|
|||
|
||||
// TODO(samyoul) Determine if/how the remaining usage of this mutex can be removed
|
||||
mutex sync.Mutex
|
||||
mailPeersMutex sync.RWMutex
|
||||
handleMessagesMutex sync.Mutex
|
||||
handleImportMessagesMutex sync.Mutex
|
||||
|
||||
|
@ -199,50 +197,6 @@ type Messenger struct {
|
|||
mvdsStatusChangeEvent chan datasyncnode.PeerStatusChangeEvent
|
||||
}
|
||||
|
||||
type connStatus int
|
||||
|
||||
const (
|
||||
disconnected connStatus = iota + 1
|
||||
connected
|
||||
)
|
||||
|
||||
type peerStatus struct {
|
||||
status connStatus
|
||||
canConnectAfter time.Time
|
||||
lastConnectionAttempt time.Time
|
||||
mailserver mailserversDB.Mailserver
|
||||
}
|
||||
type mailserverCycle struct {
|
||||
sync.RWMutex
|
||||
allMailservers []mailserversDB.Mailserver
|
||||
activeMailserver *mailserversDB.Mailserver
|
||||
peers map[string]peerStatus
|
||||
availabilitySubscriptions *availabilitySubscriptions
|
||||
}
|
||||
|
||||
type availabilitySubscriptions struct {
|
||||
sync.Mutex
|
||||
subscriptions []chan struct{}
|
||||
}
|
||||
|
||||
func (s *availabilitySubscriptions) Subscribe() <-chan struct{} {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
c := make(chan struct{})
|
||||
s.subscriptions = append(s.subscriptions, c)
|
||||
return c
|
||||
}
|
||||
|
||||
func (s *availabilitySubscriptions) EmitMailserverAvailable() {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
for _, subs := range s.subscriptions {
|
||||
close(subs)
|
||||
}
|
||||
s.subscriptions = nil
|
||||
}
|
||||
|
||||
type EnvelopeEventsInterceptor struct {
|
||||
EnvelopeEventsHandler transport.EnvelopeEventsHandler
|
||||
Messenger *Messenger
|
||||
|
@ -624,19 +578,15 @@ func NewMessenger(
|
|||
peerStore: peerStore,
|
||||
mvdsStatusChangeEvent: make(chan datasyncnode.PeerStatusChangeEvent, 5),
|
||||
verificationDatabase: verification.NewPersistence(database),
|
||||
mailserverCycle: mailserverCycle{
|
||||
peers: make(map[string]peerStatus),
|
||||
availabilitySubscriptions: &availabilitySubscriptions{},
|
||||
},
|
||||
mailserversDatabase: c.mailserversDatabase,
|
||||
communityStorenodes: storenodes.NewCommunityStorenodes(storenodes.NewDB(database), logger),
|
||||
account: c.account,
|
||||
quit: make(chan struct{}),
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
importingCommunities: make(map[string]bool),
|
||||
importingChannels: make(map[string]bool),
|
||||
importRateLimiter: rate.NewLimiter(rate.Every(importSlowRate), 1),
|
||||
mailserversDatabase: c.mailserversDatabase,
|
||||
communityStorenodes: storenodes.NewCommunityStorenodes(storenodes.NewDB(database), logger),
|
||||
account: c.account,
|
||||
quit: make(chan struct{}),
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
importingCommunities: make(map[string]bool),
|
||||
importingChannels: make(map[string]bool),
|
||||
importRateLimiter: rate.NewLimiter(rate.Every(importSlowRate), 1),
|
||||
importDelayer: struct {
|
||||
wait chan struct{}
|
||||
once sync.Once
|
||||
|
@ -883,22 +833,26 @@ func (m *Messenger) Start() (*MessengerResponse, error) {
|
|||
}
|
||||
response := &MessengerResponse{}
|
||||
|
||||
mailservers, err := m.allMailservers()
|
||||
storenodes, err := m.AllMailservers()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
response.Mailservers = mailservers
|
||||
err = m.StartMailserverCycle(mailservers)
|
||||
err = m.setupStorenodes(storenodes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
response.Mailservers = storenodes
|
||||
|
||||
m.transport.SetStorenodeConfigProvider(m)
|
||||
|
||||
if err := m.communityStorenodes.ReloadFromDB(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go m.checkForMissingMessagesLoop()
|
||||
go m.checkForStorenodeCycleSignals()
|
||||
|
||||
controlledCommunities, err := m.communitiesManager.Controlled()
|
||||
if err != nil {
|
||||
|
@ -906,10 +860,15 @@ func (m *Messenger) Start() (*MessengerResponse, error) {
|
|||
}
|
||||
|
||||
if m.archiveManager.IsReady() {
|
||||
available := m.mailserverCycle.availabilitySubscriptions.Subscribe()
|
||||
go func() {
|
||||
defer gocommon.LogOnPanic()
|
||||
<-available
|
||||
|
||||
select {
|
||||
case <-m.ctx.Done():
|
||||
return
|
||||
case <-m.transport.OnStorenodeAvailable():
|
||||
}
|
||||
|
||||
m.InitHistoryArchiveTasks(controlledCommunities)
|
||||
}()
|
||||
}
|
||||
|
|
|
@ -16,6 +16,7 @@ import (
|
|||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/google/uuid"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
|
||||
gethcommon "github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
|
@ -40,6 +41,7 @@ import (
|
|||
"github.com/status-im/status-go/protocol/encryption"
|
||||
"github.com/status-im/status-go/protocol/protobuf"
|
||||
"github.com/status-im/status-go/protocol/requests"
|
||||
"github.com/status-im/status-go/protocol/storenodes"
|
||||
"github.com/status-im/status-go/protocol/transport"
|
||||
v1protocol "github.com/status-im/status-go/protocol/v1"
|
||||
localnotifications "github.com/status-im/status-go/services/local-notifications"
|
||||
|
@ -3291,7 +3293,7 @@ func (m *Messenger) FetchCommunity(request *FetchCommunityRequest) (*communities
|
|||
WithWaitForResponseOption(request.WaitForResponse),
|
||||
}
|
||||
|
||||
community, _, err := m.storeNodeRequestsManager.FetchCommunity(communityAddress, options)
|
||||
community, _, err := m.storeNodeRequestsManager.FetchCommunity(m.ctx, communityAddress, options)
|
||||
|
||||
return community, err
|
||||
}
|
||||
|
@ -3299,7 +3301,7 @@ func (m *Messenger) FetchCommunity(request *FetchCommunityRequest) (*communities
|
|||
// fetchCommunities installs filter for community and requests its details from store node.
|
||||
// When response received it will be passed through signals handler.
|
||||
func (m *Messenger) fetchCommunities(communities []communities.CommunityShard) error {
|
||||
return m.storeNodeRequestsManager.FetchCommunities(communities, []StoreNodeRequestOption{})
|
||||
return m.storeNodeRequestsManager.FetchCommunities(m.ctx, communities, []StoreNodeRequestOption{})
|
||||
}
|
||||
|
||||
// passStoredCommunityInfoToSignalHandler calls signal handler with community info
|
||||
|
@ -3970,8 +3972,8 @@ func (m *Messenger) InitHistoryArchiveTasks(communities []*communities.Community
|
|||
}
|
||||
|
||||
// Request possibly missed waku messages for community
|
||||
ms := m.getActiveMailserver(c.ID().String())
|
||||
_, err = m.syncFiltersFrom(*ms, filters, uint32(latestWakuMessageTimestamp))
|
||||
ms := m.getCommunityStorenode(c.ID().String())
|
||||
_, err = m.syncFiltersFrom(ms, filters, uint32(latestWakuMessageTimestamp))
|
||||
if err != nil {
|
||||
m.logger.Error("failed to request missing messages", zap.Error(err))
|
||||
continue
|
||||
|
@ -5155,3 +5157,32 @@ func (m *Messenger) startRequestMissingCommunityChannelsHRKeysLoop() {
|
|||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// getCommunityStorenode returns the active mailserver if a communityID is present then it'll return the mailserver
|
||||
// for that community if it has a mailserver setup otherwise it'll return the global mailserver
|
||||
func (m *Messenger) getCommunityStorenode(communityID ...string) peer.ID {
|
||||
if m.transport.WakuVersion() != 2 {
|
||||
return ""
|
||||
}
|
||||
|
||||
if len(communityID) == 0 || communityID[0] == "" {
|
||||
return m.transport.GetActiveStorenode()
|
||||
}
|
||||
|
||||
ms, err := m.communityStorenodes.GetStorenodeByCommunityID(communityID[0])
|
||||
if err != nil {
|
||||
if !errors.Is(err, storenodes.ErrNotFound) {
|
||||
m.logger.Error("getting storenode for community, using global", zap.String("communityID", communityID[0]), zap.Error(err))
|
||||
}
|
||||
// if we don't find a specific mailserver for the community, we just use the regular mailserverCycle's one
|
||||
return m.transport.GetActiveStorenode()
|
||||
}
|
||||
|
||||
peerID, err := ms.PeerID()
|
||||
if err != nil {
|
||||
m.logger.Error("getting storenode for community, using global", zap.String("communityID", communityID[0]), zap.Error(err))
|
||||
return m.transport.GetActiveStorenode()
|
||||
}
|
||||
|
||||
return peerID
|
||||
}
|
||||
|
|
|
@ -1321,7 +1321,7 @@ func (m *Messenger) FetchContact(contactID string, waitForResponse bool) (*Conta
|
|||
options := []StoreNodeRequestOption{
|
||||
WithWaitForResponseOption(waitForResponse),
|
||||
}
|
||||
contact, _, err := m.storeNodeRequestsManager.FetchContact(contactID, options)
|
||||
contact, _, err := m.storeNodeRequestsManager.FetchContact(m.ctx, contactID, options)
|
||||
return contact, err
|
||||
}
|
||||
|
||||
|
|
|
@ -1,19 +1,16 @@
|
|||
package protocol
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/waku-org/go-waku/waku/v2/api/history"
|
||||
|
||||
gocommon "github.com/status-im/status-go/common"
|
||||
"github.com/status-im/status-go/connection"
|
||||
"github.com/status-im/status-go/eth-node/crypto"
|
||||
|
@ -31,22 +28,21 @@ const (
|
|||
// tolerance is how many seconds of potentially out-of-order messages we want to fetch
|
||||
tolerance uint32 = 60
|
||||
|
||||
mailserverRequestTimeout = 30 * time.Second
|
||||
mailserverMaxTries uint = 2
|
||||
mailserverMaxFailedRequests uint = 2
|
||||
|
||||
oneDayDuration = 24 * time.Hour
|
||||
oneMonthDuration = 31 * oneDayDuration
|
||||
)
|
||||
|
||||
// maxTopicsPerRequest sets the batch size to limit the number of topics per store query
|
||||
var maxTopicsPerRequest int = 10
|
||||
backoffByUserAction = 0 * time.Second
|
||||
)
|
||||
|
||||
var ErrNoFiltersForChat = errors.New("no filter registered for given chat")
|
||||
|
||||
func (m *Messenger) shouldSync() (bool, error) {
|
||||
if m.transport.WakuVersion() != 2 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// TODO (pablo) support community store node as well
|
||||
if m.mailserverCycle.activeMailserver == nil || !m.Online() {
|
||||
if m.transport.GetActiveStorenode() == "" || !m.Online() {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
|
@ -72,9 +68,9 @@ func (m *Messenger) scheduleSyncChat(chat *Chat) (bool, error) {
|
|||
|
||||
go func() {
|
||||
defer gocommon.LogOnPanic()
|
||||
ms := m.getActiveMailserver(chat.CommunityID)
|
||||
_, err = m.performMailserverRequest(ms, func(mailServer mailservers.Mailserver) (*MessengerResponse, error) {
|
||||
response, err := m.syncChatWithFilters(mailServer, chat.ID)
|
||||
peerID := m.getCommunityStorenode(chat.CommunityID)
|
||||
_, err = m.performStorenodeTask(func() (*MessengerResponse, error) {
|
||||
response, err := m.syncChatWithFilters(peerID, chat.ID)
|
||||
|
||||
if err != nil {
|
||||
m.logger.Error("failed to sync chat", zap.Error(err))
|
||||
|
@ -85,7 +81,7 @@ func (m *Messenger) scheduleSyncChat(chat *Chat) (bool, error) {
|
|||
m.config.messengerSignalsHandler.MessengerResponse(response)
|
||||
}
|
||||
return response, nil
|
||||
})
|
||||
}, history.WithPeerID(peerID))
|
||||
if err != nil {
|
||||
m.logger.Error("failed to perform mailserver request", zap.Error(err))
|
||||
}
|
||||
|
@ -93,65 +89,34 @@ func (m *Messenger) scheduleSyncChat(chat *Chat) (bool, error) {
|
|||
return true, nil
|
||||
}
|
||||
|
||||
func (m *Messenger) connectToNewMailserverAndWait() error {
|
||||
// Handle pinned mailservers
|
||||
m.logger.Info("disconnecting mailserver")
|
||||
pinnedMailserver, err := m.getPinnedMailserver()
|
||||
func (m *Messenger) performStorenodeTask(task func() (*MessengerResponse, error), opts ...history.StorenodeTaskOption) (*MessengerResponse, error) {
|
||||
responseCh := make(chan *MessengerResponse, 1)
|
||||
err := m.transport.PerformStorenodeTask(func() error {
|
||||
r, err := task()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
select {
|
||||
case <-m.ctx.Done():
|
||||
return m.ctx.Err()
|
||||
case responseCh <- r:
|
||||
return nil
|
||||
}
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
m.logger.Error("could not obtain the pinned mailserver", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
// If pinned mailserver is not nil, no need to disconnect and wait for it to be available
|
||||
if pinnedMailserver == nil {
|
||||
m.disconnectActiveMailserver(graylistBackoff)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return m.findNewMailserver()
|
||||
}
|
||||
|
||||
func (m *Messenger) performMailserverRequest(ms *mailservers.Mailserver, fn func(mailServer mailservers.Mailserver) (*MessengerResponse, error)) (*MessengerResponse, error) {
|
||||
if ms == nil {
|
||||
return nil, errors.New("mailserver not available")
|
||||
}
|
||||
|
||||
m.mailserverCycle.RLock()
|
||||
defer m.mailserverCycle.RUnlock()
|
||||
var tries uint = 0
|
||||
for tries < mailserverMaxTries {
|
||||
if !m.communityStorenodes.IsCommunityStoreNode(ms.ID) && !m.isMailserverAvailable(ms.ID) {
|
||||
return nil, errors.New("storenode not available")
|
||||
select {
|
||||
case r := <-responseCh:
|
||||
if r != nil {
|
||||
return r, nil
|
||||
}
|
||||
m.logger.Info("trying performing mailserver requests", zap.Uint("try", tries), zap.String("mailserverID", ms.ID))
|
||||
|
||||
// Peform request
|
||||
response, err := fn(*ms) // pass by value because we don't want the fn to modify the mailserver
|
||||
if err == nil {
|
||||
// Reset failed requests
|
||||
m.logger.Debug("mailserver request performed successfully",
|
||||
zap.String("mailserverID", ms.ID))
|
||||
ms.FailedRequests = 0
|
||||
return response, nil
|
||||
}
|
||||
|
||||
m.logger.Error("failed to perform mailserver request",
|
||||
zap.String("mailserverID", ms.ID),
|
||||
zap.Uint("tries", tries),
|
||||
zap.Error(err),
|
||||
)
|
||||
|
||||
tries++
|
||||
// Increment failed requests
|
||||
ms.FailedRequests++
|
||||
|
||||
// Change mailserver
|
||||
if ms.FailedRequests >= mailserverMaxFailedRequests {
|
||||
return nil, errors.New("too many failed requests")
|
||||
}
|
||||
// Wait a couple of second not to spam
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
return nil, errors.New("no response available")
|
||||
case <-m.ctx.Done():
|
||||
return nil, m.ctx.Err()
|
||||
}
|
||||
return nil, errors.New("failed to perform mailserver request")
|
||||
}
|
||||
|
||||
func (m *Messenger) scheduleSyncFilters(filters []*transport.Filter) (bool, error) {
|
||||
|
@ -170,9 +135,9 @@ func (m *Messenger) scheduleSyncFilters(filters []*transport.Filter) (bool, erro
|
|||
// split filters by community store node so we can request the filters to the correct mailserver
|
||||
filtersByMs := m.SplitFiltersByStoreNode(filters)
|
||||
for communityID, filtersForMs := range filtersByMs {
|
||||
ms := m.getActiveMailserver(communityID)
|
||||
_, err := m.performMailserverRequest(ms, func(ms mailservers.Mailserver) (*MessengerResponse, error) {
|
||||
response, err := m.syncFilters(ms, filtersForMs)
|
||||
peerID := m.getCommunityStorenode(communityID)
|
||||
_, err := m.performStorenodeTask(func() (*MessengerResponse, error) {
|
||||
response, err := m.syncFilters(peerID, filtersForMs)
|
||||
|
||||
if err != nil {
|
||||
m.logger.Error("failed to sync filter", zap.Error(err))
|
||||
|
@ -183,7 +148,7 @@ func (m *Messenger) scheduleSyncFilters(filters []*transport.Filter) (bool, erro
|
|||
m.config.messengerSignalsHandler.MessengerResponse(response)
|
||||
}
|
||||
return response, nil
|
||||
})
|
||||
}, history.WithPeerID(peerID))
|
||||
if err != nil {
|
||||
m.logger.Error("failed to perform mailserver request", zap.Error(err))
|
||||
}
|
||||
|
@ -193,15 +158,14 @@ func (m *Messenger) scheduleSyncFilters(filters []*transport.Filter) (bool, erro
|
|||
return true, nil
|
||||
}
|
||||
|
||||
func (m *Messenger) calculateMailserverTo() uint32 {
|
||||
seconds := float64(m.GetCurrentTimeInMillis()) / 1000
|
||||
return uint32(math.Ceil(seconds))
|
||||
func (m *Messenger) calculateMailserverTo() time.Time {
|
||||
return time.Unix(0, int64(time.Duration(m.GetCurrentTimeInMillis())*time.Millisecond))
|
||||
}
|
||||
|
||||
func (m *Messenger) calculateMailserverTimeBounds(duration time.Duration) (uint32, uint32) {
|
||||
now := float64(m.GetCurrentTimeInMillis()) / 1000
|
||||
to := uint32(math.Ceil(now))
|
||||
from := uint32(math.Floor(now)) - uint32(duration.Seconds())
|
||||
func (m *Messenger) calculateMailserverTimeBounds(duration time.Duration) (time.Time, time.Time) {
|
||||
now := time.Unix(0, int64(time.Duration(m.GetCurrentTimeInMillis())*time.Millisecond))
|
||||
to := now
|
||||
from := now.Add(-duration)
|
||||
return from, to
|
||||
}
|
||||
|
||||
|
@ -252,13 +216,13 @@ func (m *Messenger) topicsForChat(chatID string) (string, []types.TopicType, err
|
|||
return filters[0].PubsubTopic, contentTopics, nil
|
||||
}
|
||||
|
||||
func (m *Messenger) syncChatWithFilters(ms mailservers.Mailserver, chatID string) (*MessengerResponse, error) {
|
||||
func (m *Messenger) syncChatWithFilters(peerID peer.ID, chatID string) (*MessengerResponse, error) {
|
||||
filters, err := m.filtersForChat(chatID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return m.syncFilters(ms, filters)
|
||||
return m.syncFilters(peerID, filters)
|
||||
}
|
||||
|
||||
func (m *Messenger) syncBackup() error {
|
||||
|
@ -277,9 +241,9 @@ func (m *Messenger) syncBackup() error {
|
|||
|
||||
from, to := m.calculateMailserverTimeBounds(oneMonthDuration)
|
||||
|
||||
batch := MailserverBatch{From: from, To: to, Topics: []types.TopicType{filter.ContentTopic}}
|
||||
ms := m.getActiveMailserver(filter.ChatID)
|
||||
err = m.processMailserverBatch(*ms, batch)
|
||||
batch := types.MailserverBatch{From: from, To: to, Topics: []types.TopicType{filter.ContentTopic}}
|
||||
ms := m.getCommunityStorenode(filter.ChatID)
|
||||
err = m.processMailserverBatch(ms, batch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -374,11 +338,11 @@ func (m *Messenger) RequestAllHistoricMessages(forceFetchingBackup, withRetries
|
|||
filtersByMs := m.SplitFiltersByStoreNode(filters)
|
||||
allResponses := &MessengerResponse{}
|
||||
for communityID, filtersForMs := range filtersByMs {
|
||||
ms := m.getActiveMailserver(communityID)
|
||||
peerID := m.getCommunityStorenode(communityID)
|
||||
if withRetries {
|
||||
response, err := m.performMailserverRequest(ms, func(ms mailservers.Mailserver) (*MessengerResponse, error) {
|
||||
return m.syncFilters(ms, filtersForMs)
|
||||
})
|
||||
response, err := m.performStorenodeTask(func() (*MessengerResponse, error) {
|
||||
return m.syncFilters(peerID, filtersForMs)
|
||||
}, history.WithPeerID(peerID))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -388,7 +352,7 @@ func (m *Messenger) RequestAllHistoricMessages(forceFetchingBackup, withRetries
|
|||
}
|
||||
continue
|
||||
}
|
||||
response, err := m.syncFilters(*ms, filtersForMs)
|
||||
response, err := m.syncFilters(peerID, filtersForMs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -404,10 +368,15 @@ const missingMessageCheckPeriod = 30 * time.Second
|
|||
|
||||
func (m *Messenger) checkForMissingMessagesLoop() {
|
||||
defer gocommon.LogOnPanic()
|
||||
|
||||
if m.transport.WakuVersion() != 2 {
|
||||
return
|
||||
}
|
||||
|
||||
t := time.NewTicker(missingMessageCheckPeriod)
|
||||
defer t.Stop()
|
||||
|
||||
mailserverAvailableSignal := m.mailserverCycle.availabilitySubscriptions.Subscribe()
|
||||
mailserverAvailableSignal := m.transport.OnStorenodeAvailable()
|
||||
|
||||
for {
|
||||
select {
|
||||
|
@ -416,7 +385,6 @@ func (m *Messenger) checkForMissingMessagesLoop() {
|
|||
|
||||
// Wait for mailserver available, also triggered on mailserver change
|
||||
case <-mailserverAvailableSignal:
|
||||
mailserverAvailableSignal = m.mailserverCycle.availabilitySubscriptions.Subscribe()
|
||||
|
||||
case <-t.C:
|
||||
|
||||
|
@ -425,16 +393,11 @@ func (m *Messenger) checkForMissingMessagesLoop() {
|
|||
filters := m.transport.Filters()
|
||||
filtersByMs := m.SplitFiltersByStoreNode(filters)
|
||||
for communityID, filtersForMs := range filtersByMs {
|
||||
ms := m.getActiveMailserver(communityID)
|
||||
if ms == nil {
|
||||
peerID := m.getCommunityStorenode(communityID)
|
||||
if peerID == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
peerID, err := ms.PeerID()
|
||||
if err != nil {
|
||||
m.logger.Error("could not obtain the peerID")
|
||||
return
|
||||
}
|
||||
m.transport.SetCriteriaForMissingMessageVerification(peerID, filtersForMs)
|
||||
}
|
||||
}
|
||||
|
@ -444,7 +407,7 @@ func getPrioritizedBatches() []int {
|
|||
return []int{1, 5, 10}
|
||||
}
|
||||
|
||||
func (m *Messenger) syncFiltersFrom(ms mailservers.Mailserver, filters []*transport.Filter, lastRequest uint32) (*MessengerResponse, error) {
|
||||
func (m *Messenger) syncFiltersFrom(peerID peer.ID, filters []*transport.Filter, lastRequest uint32) (*MessengerResponse, error) {
|
||||
canSync, err := m.canSyncWithStoreNodes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -464,7 +427,7 @@ func (m *Messenger) syncFiltersFrom(ms mailservers.Mailserver, filters []*transp
|
|||
topicsData[fmt.Sprintf("%s-%s", topic.PubsubTopic, topic.ContentTopic)] = topic
|
||||
}
|
||||
|
||||
batches := make(map[string]map[int]MailserverBatch)
|
||||
batches := make(map[string]map[int]types.MailserverBatch)
|
||||
|
||||
to := m.calculateMailserverTo()
|
||||
var syncedTopics []mailservers.MailserverTopic
|
||||
|
@ -502,7 +465,7 @@ func (m *Messenger) syncFiltersFrom(ms mailservers.Mailserver, filters []*transp
|
|||
|
||||
for pubsubTopic, contentTopics := range contentTopicsPerPubsubTopic {
|
||||
if _, ok := batches[pubsubTopic]; !ok {
|
||||
batches[pubsubTopic] = make(map[int]MailserverBatch)
|
||||
batches[pubsubTopic] = make(map[int]types.MailserverBatch)
|
||||
}
|
||||
|
||||
for _, filter := range contentTopics {
|
||||
|
@ -561,7 +524,7 @@ func (m *Messenger) syncFiltersFrom(ms mailservers.Mailserver, filters []*transp
|
|||
return nil, err
|
||||
}
|
||||
}
|
||||
batch = MailserverBatch{From: from, To: to}
|
||||
batch = types.MailserverBatch{From: time.Unix(int64(from), 0), To: to}
|
||||
}
|
||||
|
||||
batch.ChatIDs = append(batch.ChatIDs, chatID)
|
||||
|
@ -570,7 +533,7 @@ func (m *Messenger) syncFiltersFrom(ms mailservers.Mailserver, filters []*transp
|
|||
batches[pubsubTopic][batchID] = batch
|
||||
|
||||
// Set last request to the new `to`
|
||||
topicData.LastRequest = int(to)
|
||||
topicData.LastRequest = int(to.Unix())
|
||||
syncedTopics = append(syncedTopics, topicData)
|
||||
}
|
||||
}
|
||||
|
@ -579,7 +542,7 @@ func (m *Messenger) syncFiltersFrom(ms mailservers.Mailserver, filters []*transp
|
|||
m.config.messengerSignalsHandler.HistoryRequestStarted(len(batches))
|
||||
}
|
||||
|
||||
var batches24h []MailserverBatch
|
||||
var batches24h []types.MailserverBatch
|
||||
for pubsubTopic := range batches {
|
||||
batchKeys := make([]int, 0, len(batches[pubsubTopic]))
|
||||
for k := range batches[pubsubTopic] {
|
||||
|
@ -594,7 +557,7 @@ func (m *Messenger) syncFiltersFrom(ms mailservers.Mailserver, filters []*transp
|
|||
for _, k := range keysToIterate {
|
||||
batch := batches[pubsubTopic][k]
|
||||
|
||||
dayBatch := MailserverBatch{
|
||||
dayBatch := types.MailserverBatch{
|
||||
To: batch.To,
|
||||
Cursor: batch.Cursor,
|
||||
PubsubTopic: batch.PubsubTopic,
|
||||
|
@ -602,8 +565,8 @@ func (m *Messenger) syncFiltersFrom(ms mailservers.Mailserver, filters []*transp
|
|||
ChatIDs: batch.ChatIDs,
|
||||
}
|
||||
|
||||
from := batch.To - uint32(oneDayDuration.Seconds())
|
||||
if from > batch.From {
|
||||
from := batch.To.Add(-oneDayDuration)
|
||||
if from.After(batch.From) {
|
||||
dayBatch.From = from
|
||||
batches24h = append(batches24h, dayBatch)
|
||||
|
||||
|
@ -624,7 +587,7 @@ func (m *Messenger) syncFiltersFrom(ms mailservers.Mailserver, filters []*transp
|
|||
}
|
||||
|
||||
for _, batch := range batches24h {
|
||||
err := m.processMailserverBatch(ms, batch)
|
||||
err := m.processMailserverBatch(peerID, batch)
|
||||
if err != nil {
|
||||
m.logger.Error("error syncing topics", zap.Error(err))
|
||||
return nil, err
|
||||
|
@ -649,15 +612,15 @@ func (m *Messenger) syncFiltersFrom(ms mailservers.Mailserver, filters []*transp
|
|||
if !ok || !chat.Active || chat.Timeline() || chat.ProfileUpdates() {
|
||||
continue
|
||||
}
|
||||
gap, err := m.calculateGapForChat(chat, batch.From)
|
||||
gap, err := m.calculateGapForChat(chat, uint32(batch.From.Unix()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if chat.SyncedFrom == 0 || chat.SyncedFrom > batch.From {
|
||||
chat.SyncedFrom = batch.From
|
||||
if chat.SyncedFrom == 0 || chat.SyncedFrom > uint32(batch.From.Unix()) {
|
||||
chat.SyncedFrom = uint32(batch.From.Unix())
|
||||
}
|
||||
|
||||
chat.SyncedTo = to
|
||||
chat.SyncedTo = uint32(to.Unix())
|
||||
|
||||
err = m.persistence.SetSyncTimestamps(chat.SyncedFrom, chat.SyncedTo, chat.ID)
|
||||
if err != nil {
|
||||
|
@ -682,8 +645,8 @@ func (m *Messenger) syncFiltersFrom(ms mailservers.Mailserver, filters []*transp
|
|||
return response, nil
|
||||
}
|
||||
|
||||
func (m *Messenger) syncFilters(ms mailservers.Mailserver, filters []*transport.Filter) (*MessengerResponse, error) {
|
||||
return m.syncFiltersFrom(ms, filters, 0)
|
||||
func (m *Messenger) syncFilters(peerID peer.ID, filters []*transport.Filter) (*MessengerResponse, error) {
|
||||
return m.syncFiltersFrom(peerID, filters, 0)
|
||||
}
|
||||
|
||||
func (m *Messenger) calculateGapForChat(chat *Chat, from uint32) (*common.Message, error) {
|
||||
|
@ -722,187 +685,6 @@ func (m *Messenger) calculateGapForChat(chat *Chat, from uint32) (*common.Messag
|
|||
return message, m.persistence.SaveMessages([]*common.Message{message})
|
||||
}
|
||||
|
||||
type work struct {
|
||||
pubsubTopic string
|
||||
contentTopics []types.TopicType
|
||||
cursor types.StoreRequestCursor
|
||||
limit uint32
|
||||
}
|
||||
|
||||
type messageRequester interface {
|
||||
SendMessagesRequestForTopics(
|
||||
ctx context.Context,
|
||||
peerID peer.ID,
|
||||
from, to uint32,
|
||||
previousStoreCursor types.StoreRequestCursor,
|
||||
pubsubTopic string,
|
||||
contentTopics []types.TopicType,
|
||||
limit uint32,
|
||||
waitForResponse bool,
|
||||
processEnvelopes bool,
|
||||
) (cursor types.StoreRequestCursor, envelopesCount int, err error)
|
||||
}
|
||||
|
||||
func processMailserverBatch(
|
||||
ctx context.Context,
|
||||
messageRequester messageRequester,
|
||||
batch MailserverBatch,
|
||||
storenodeID peer.ID,
|
||||
logger *zap.Logger,
|
||||
pageLimit uint32,
|
||||
shouldProcessNextPage func(int) (bool, uint32),
|
||||
processEnvelopes bool,
|
||||
) error {
|
||||
|
||||
var topicStrings []string
|
||||
for _, t := range batch.Topics {
|
||||
topicStrings = append(topicStrings, t.String())
|
||||
}
|
||||
logger = logger.With(zap.String("batch hash", batch.Hash()))
|
||||
logger.Info("syncing topic",
|
||||
zap.Any("chatIDs", batch.ChatIDs),
|
||||
zap.String("fromString", time.Unix(int64(batch.From), 0).Format(time.RFC3339)),
|
||||
zap.String("toString", time.Unix(int64(batch.To), 0).Format(time.RFC3339)),
|
||||
zap.Any("topic", topicStrings),
|
||||
zap.Int64("from", int64(batch.From)),
|
||||
zap.Int64("to", int64(batch.To)))
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
workWg := sync.WaitGroup{}
|
||||
workCh := make(chan work, 1000) // each batch item is split in 10 topics bunch and sent to this channel
|
||||
workCompleteCh := make(chan struct{}) // once all batch items are processed, this channel is triggered
|
||||
semaphore := make(chan int, 3) // limit the number of concurrent queries
|
||||
errCh := make(chan error)
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
// Producer
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer gocommon.LogOnPanic()
|
||||
defer func() {
|
||||
logger.Debug("mailserver batch producer complete")
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
allWorks := int(math.Ceil(float64(len(batch.Topics)) / float64(maxTopicsPerRequest)))
|
||||
workWg.Add(allWorks)
|
||||
|
||||
for i := 0; i < len(batch.Topics); i += maxTopicsPerRequest {
|
||||
j := i + maxTopicsPerRequest
|
||||
if j > len(batch.Topics) {
|
||||
j = len(batch.Topics)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
logger.Debug("processBatch producer - context done")
|
||||
return
|
||||
default:
|
||||
logger.Debug("processBatch producer - creating work")
|
||||
workCh <- work{
|
||||
pubsubTopic: batch.PubsubTopic,
|
||||
contentTopics: batch.Topics[i:j],
|
||||
limit: pageLimit,
|
||||
}
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer gocommon.LogOnPanic()
|
||||
workWg.Wait()
|
||||
workCompleteCh <- struct{}{}
|
||||
}()
|
||||
|
||||
logger.Debug("processBatch producer complete")
|
||||
}()
|
||||
|
||||
var result error
|
||||
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
logger.Debug("processBatch cleanup - context done")
|
||||
result = ctx.Err()
|
||||
if errors.Is(result, context.Canceled) {
|
||||
result = nil
|
||||
}
|
||||
break loop
|
||||
case w, ok := <-workCh:
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
logger.Debug("processBatch - received work")
|
||||
semaphore <- 1
|
||||
go func(w work) { // Consumer
|
||||
defer gocommon.LogOnPanic()
|
||||
defer func() {
|
||||
workWg.Done()
|
||||
<-semaphore
|
||||
}()
|
||||
|
||||
queryCtx, queryCancel := context.WithTimeout(ctx, mailserverRequestTimeout)
|
||||
cursor, envelopesCount, err := messageRequester.SendMessagesRequestForTopics(queryCtx, storenodeID, batch.From, batch.To, w.cursor, w.pubsubTopic, w.contentTopics, w.limit, true, processEnvelopes)
|
||||
queryCancel()
|
||||
|
||||
if err != nil {
|
||||
logger.Debug("failed to send request", zap.Error(err))
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
|
||||
processNextPage := true
|
||||
nextPageLimit := pageLimit
|
||||
|
||||
if shouldProcessNextPage != nil {
|
||||
processNextPage, nextPageLimit = shouldProcessNextPage(envelopesCount)
|
||||
}
|
||||
|
||||
if !processNextPage {
|
||||
return
|
||||
}
|
||||
|
||||
// Check the cursor after calling `shouldProcessNextPage`.
|
||||
// The app might use process the fetched envelopes in the callback for own needs.
|
||||
if cursor == nil {
|
||||
return
|
||||
}
|
||||
|
||||
logger.Debug("processBatch producer - creating work (cursor)")
|
||||
|
||||
workWg.Add(1)
|
||||
workCh <- work{
|
||||
pubsubTopic: w.pubsubTopic,
|
||||
contentTopics: w.contentTopics,
|
||||
cursor: cursor,
|
||||
limit: nextPageLimit,
|
||||
}
|
||||
}(w)
|
||||
case err := <-errCh:
|
||||
logger.Debug("processBatch - received error", zap.Error(err))
|
||||
cancel() // Kill go routines
|
||||
return err
|
||||
case <-workCompleteCh:
|
||||
logger.Debug("processBatch - all jobs complete")
|
||||
cancel() // Kill go routines
|
||||
}
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
// NOTE(camellos): Disabling for now, not critical and I'd rather take a bit more time
|
||||
// to test it
|
||||
//logger.Info("waiting until message processed")
|
||||
//m.waitUntilP2PMessagesProcessed()
|
||||
|
||||
logger.Info("synced topic", zap.NamedError("hasError", result))
|
||||
return result
|
||||
}
|
||||
|
||||
func (m *Messenger) canSyncWithStoreNodes() (bool, error) {
|
||||
if m.featureFlags.StoreNodesDisabled {
|
||||
return false, nil
|
||||
|
@ -918,7 +700,7 @@ func (m *Messenger) DisableStoreNodes() {
|
|||
m.featureFlags.StoreNodesDisabled = true
|
||||
}
|
||||
|
||||
func (m *Messenger) processMailserverBatch(ms mailservers.Mailserver, batch MailserverBatch) error {
|
||||
func (m *Messenger) processMailserverBatch(peerID peer.ID, batch types.MailserverBatch) error {
|
||||
canSync, err := m.canSyncWithStoreNodes()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -927,15 +709,10 @@ func (m *Messenger) processMailserverBatch(ms mailservers.Mailserver, batch Mail
|
|||
return nil
|
||||
}
|
||||
|
||||
mailserverID, err := ms.PeerID()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logger := m.logger.With(zap.String("mailserverID", ms.ID))
|
||||
return processMailserverBatch(m.ctx, m.transport, batch, mailserverID, logger, defaultStoreNodeRequestPageSize, nil, false)
|
||||
return m.transport.ProcessMailserverBatch(m.ctx, batch, peerID, defaultStoreNodeRequestPageSize, nil, false)
|
||||
}
|
||||
|
||||
func (m *Messenger) processMailserverBatchWithOptions(ms mailservers.Mailserver, batch MailserverBatch, pageLimit uint32, shouldProcessNextPage func(int) (bool, uint32), processEnvelopes bool) error {
|
||||
func (m *Messenger) processMailserverBatchWithOptions(peerID peer.ID, batch types.MailserverBatch, pageLimit uint64, shouldProcessNextPage func(int) (bool, uint64), processEnvelopes bool) error {
|
||||
canSync, err := m.canSyncWithStoreNodes()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -944,27 +721,7 @@ func (m *Messenger) processMailserverBatchWithOptions(ms mailservers.Mailserver,
|
|||
return nil
|
||||
}
|
||||
|
||||
mailserverID, err := ms.PeerID()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logger := m.logger.With(zap.String("mailserverID", ms.ID))
|
||||
return processMailserverBatch(m.ctx, m.transport, batch, mailserverID, logger, pageLimit, shouldProcessNextPage, processEnvelopes)
|
||||
}
|
||||
|
||||
type MailserverBatch struct {
|
||||
From uint32
|
||||
To uint32
|
||||
Cursor string
|
||||
PubsubTopic string
|
||||
Topics []types.TopicType
|
||||
ChatIDs []string
|
||||
}
|
||||
|
||||
func (mb *MailserverBatch) Hash() string {
|
||||
data := fmt.Sprintf("%d%d%s%s%v%v", mb.From, mb.To, mb.Cursor, mb.PubsubTopic, mb.Topics, mb.ChatIDs)
|
||||
hash := sha256.Sum256([]byte(data))
|
||||
return hex.EncodeToString(hash[:4])
|
||||
return m.transport.ProcessMailserverBatch(m.ctx, batch, peerID, pageLimit, shouldProcessNextPage, processEnvelopes)
|
||||
}
|
||||
|
||||
func (m *Messenger) SyncChatFromSyncedFrom(chatID string) (uint32, error) {
|
||||
|
@ -973,9 +730,9 @@ func (m *Messenger) SyncChatFromSyncedFrom(chatID string) (uint32, error) {
|
|||
return 0, ErrChatNotFound
|
||||
}
|
||||
|
||||
ms := m.getActiveMailserver(chat.CommunityID)
|
||||
peerID := m.getCommunityStorenode(chat.CommunityID)
|
||||
var from uint32
|
||||
_, err := m.performMailserverRequest(ms, func(ms mailservers.Mailserver) (*MessengerResponse, error) {
|
||||
_, err := m.performStorenodeTask(func() (*MessengerResponse, error) {
|
||||
canSync, err := m.canSyncWithStoreNodes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -994,10 +751,10 @@ func (m *Messenger) SyncChatFromSyncedFrom(chatID string) (uint32, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
batch := MailserverBatch{
|
||||
batch := types.MailserverBatch{
|
||||
ChatIDs: []string{chatID},
|
||||
To: chat.SyncedFrom,
|
||||
From: chat.SyncedFrom - defaultSyncPeriod,
|
||||
To: time.Unix(int64(chat.SyncedFrom), 0),
|
||||
From: time.Unix(int64(chat.SyncedFrom-defaultSyncPeriod), 0),
|
||||
PubsubTopic: pubsubTopic,
|
||||
Topics: topics,
|
||||
}
|
||||
|
@ -1005,7 +762,7 @@ func (m *Messenger) SyncChatFromSyncedFrom(chatID string) (uint32, error) {
|
|||
m.config.messengerSignalsHandler.HistoryRequestStarted(1)
|
||||
}
|
||||
|
||||
err = m.processMailserverBatch(ms, batch)
|
||||
err = m.processMailserverBatch(peerID, batch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1013,16 +770,16 @@ func (m *Messenger) SyncChatFromSyncedFrom(chatID string) (uint32, error) {
|
|||
if m.config.messengerSignalsHandler != nil {
|
||||
m.config.messengerSignalsHandler.HistoryRequestCompleted()
|
||||
}
|
||||
if chat.SyncedFrom == 0 || chat.SyncedFrom > batch.From {
|
||||
chat.SyncedFrom = batch.From
|
||||
if chat.SyncedFrom == 0 || chat.SyncedFrom > uint32(batch.From.Unix()) {
|
||||
chat.SyncedFrom = uint32(batch.From.Unix())
|
||||
}
|
||||
|
||||
m.logger.Debug("setting sync timestamps", zap.Int64("from", int64(batch.From)), zap.Int64("to", int64(chat.SyncedTo)), zap.String("chatID", chatID))
|
||||
m.logger.Debug("setting sync timestamps", zap.Int64("from", batch.From.Unix()), zap.Int64("to", int64(chat.SyncedTo)), zap.String("chatID", chatID))
|
||||
|
||||
err = m.persistence.SetSyncTimestamps(batch.From, chat.SyncedTo, chat.ID)
|
||||
from = batch.From
|
||||
err = m.persistence.SetSyncTimestamps(uint32(batch.From.Unix()), chat.SyncedTo, chat.ID)
|
||||
from = uint32(batch.From.Unix())
|
||||
return nil, err
|
||||
})
|
||||
}, history.WithPeerID(peerID))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -1062,10 +819,10 @@ func (m *Messenger) FillGaps(chatID string, messageIDs []string) error {
|
|||
}
|
||||
}
|
||||
|
||||
batch := MailserverBatch{
|
||||
batch := types.MailserverBatch{
|
||||
ChatIDs: []string{chatID},
|
||||
To: highestTo,
|
||||
From: lowestFrom,
|
||||
To: time.Unix(int64(highestTo), 0),
|
||||
From: time.Unix(int64(lowestFrom), 0),
|
||||
PubsubTopic: pubsubTopic,
|
||||
Topics: topics,
|
||||
}
|
||||
|
@ -1074,8 +831,8 @@ func (m *Messenger) FillGaps(chatID string, messageIDs []string) error {
|
|||
m.config.messengerSignalsHandler.HistoryRequestStarted(1)
|
||||
}
|
||||
|
||||
ms := m.getActiveMailserver(chat.CommunityID)
|
||||
err = m.processMailserverBatch(*ms, batch)
|
||||
peerID := m.getCommunityStorenode(chat.CommunityID)
|
||||
err = m.processMailserverBatch(peerID, batch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1087,39 +844,18 @@ func (m *Messenger) FillGaps(chatID string, messageIDs []string) error {
|
|||
return m.persistence.DeleteMessages(messageIDs)
|
||||
}
|
||||
|
||||
func (m *Messenger) waitUntilP2PMessagesProcessed() { // nolint: unused
|
||||
|
||||
ticker := time.NewTicker(50 * time.Millisecond)
|
||||
|
||||
for { //nolint: gosimple
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if !m.transport.ProcessingP2PMessages() {
|
||||
ticker.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Messenger) LoadFilters(filters []*transport.Filter) ([]*transport.Filter, error) {
|
||||
return m.transport.LoadFilters(filters)
|
||||
}
|
||||
|
||||
func (m *Messenger) ToggleUseMailservers(value bool) error {
|
||||
m.mailserverCycle.Lock()
|
||||
defer m.mailserverCycle.Unlock()
|
||||
|
||||
err := m.settings.SetUseMailservers(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.disconnectActiveMailserver(backoffByUserAction)
|
||||
if value {
|
||||
m.cycleMailservers()
|
||||
return nil
|
||||
}
|
||||
m.transport.DisconnectActiveStorenode(m.ctx, backoffByUserAction, value)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1129,8 +865,8 @@ func (m *Messenger) SetPinnedMailservers(mailservers map[string]string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
m.disconnectActiveMailserver(backoffByUserAction)
|
||||
m.cycleMailservers()
|
||||
m.transport.DisconnectActiveStorenode(m.ctx, backoffByUserAction, true)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1162,8 +898,8 @@ func (m *Messenger) fetchMessages(chatID string, duration time.Duration) (uint32
|
|||
return 0, ErrChatNotFound
|
||||
}
|
||||
|
||||
ms := m.getActiveMailserver(chat.CommunityID)
|
||||
_, err := m.performMailserverRequest(ms, func(ms mailservers.Mailserver) (*MessengerResponse, error) {
|
||||
peerID := m.getCommunityStorenode(chat.CommunityID)
|
||||
_, err := m.performStorenodeTask(func() (*MessengerResponse, error) {
|
||||
canSync, err := m.canSyncWithStoreNodes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -1172,13 +908,13 @@ func (m *Messenger) fetchMessages(chatID string, duration time.Duration) (uint32
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
m.logger.Debug("fetching messages", zap.String("chatID", chatID), zap.String("mailserver", ms.Name))
|
||||
m.logger.Debug("fetching messages", zap.String("chatID", chatID), zap.Stringer("peerID", peerID))
|
||||
pubsubTopic, topics, err := m.topicsForChat(chatID)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
batch := MailserverBatch{
|
||||
batch := types.MailserverBatch{
|
||||
ChatIDs: []string{chatID},
|
||||
From: from,
|
||||
To: to,
|
||||
|
@ -1189,7 +925,7 @@ func (m *Messenger) fetchMessages(chatID string, duration time.Duration) (uint32
|
|||
m.config.messengerSignalsHandler.HistoryRequestStarted(1)
|
||||
}
|
||||
|
||||
err = m.processMailserverBatch(ms, batch)
|
||||
err = m.processMailserverBatch(peerID, batch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1197,19 +933,19 @@ func (m *Messenger) fetchMessages(chatID string, duration time.Duration) (uint32
|
|||
if m.config.messengerSignalsHandler != nil {
|
||||
m.config.messengerSignalsHandler.HistoryRequestCompleted()
|
||||
}
|
||||
if chat.SyncedFrom == 0 || chat.SyncedFrom > batch.From {
|
||||
chat.SyncedFrom = batch.From
|
||||
if chat.SyncedFrom == 0 || chat.SyncedFrom > uint32(batch.From.Second()) {
|
||||
chat.SyncedFrom = uint32(batch.From.Second())
|
||||
}
|
||||
|
||||
m.logger.Debug("setting sync timestamps", zap.Int64("from", int64(batch.From)), zap.Int64("to", int64(chat.SyncedTo)), zap.String("chatID", chatID))
|
||||
m.logger.Debug("setting sync timestamps", zap.Int64("from", batch.From.Unix()), zap.Int64("to", int64(chat.SyncedTo)), zap.String("chatID", chatID))
|
||||
|
||||
err = m.persistence.SetSyncTimestamps(batch.From, chat.SyncedTo, chat.ID)
|
||||
err = m.persistence.SetSyncTimestamps(uint32(batch.From.Unix()), chat.SyncedTo, chat.ID)
|
||||
from = batch.From
|
||||
return nil, err
|
||||
})
|
||||
}, history.WithPeerID(peerID))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return from, nil
|
||||
return uint32(from.Unix()), nil
|
||||
}
|
||||
|
|
|
@ -1,178 +1,18 @@
|
|||
package protocol
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"math"
|
||||
"math/big"
|
||||
"net"
|
||||
"runtime"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
|
||||
"github.com/status-im/status-go/common"
|
||||
gocommon "github.com/status-im/status-go/common"
|
||||
"github.com/status-im/status-go/params"
|
||||
"github.com/status-im/status-go/protocol/storenodes"
|
||||
"github.com/status-im/status-go/services/mailservers"
|
||||
"github.com/status-im/status-go/signal"
|
||||
)
|
||||
|
||||
const defaultBackoff = 10 * time.Second
|
||||
const graylistBackoff = 3 * time.Minute
|
||||
const backoffByUserAction = 0
|
||||
const isAndroidEmulator = runtime.GOOS == "android" && runtime.GOARCH == "amd64"
|
||||
const findNearestMailServer = !isAndroidEmulator
|
||||
const overrideDNS = runtime.GOOS == "android" || runtime.GOOS == "ios"
|
||||
const bootstrapDNS = "8.8.8.8:53"
|
||||
|
||||
type byRTTMsAndCanConnectBefore []SortedMailserver
|
||||
|
||||
func (s byRTTMsAndCanConnectBefore) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s byRTTMsAndCanConnectBefore) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
func (s byRTTMsAndCanConnectBefore) Less(i, j int) bool {
|
||||
// Slightly inaccurate as time sensitive sorting, but it does not matter so much
|
||||
now := time.Now()
|
||||
if s[i].CanConnectAfter.Before(now) && s[j].CanConnectAfter.Before(now) {
|
||||
return s[i].RTT < s[j].RTT
|
||||
}
|
||||
return s[i].CanConnectAfter.Before(s[j].CanConnectAfter)
|
||||
}
|
||||
|
||||
func (m *Messenger) StartMailserverCycle(mailservers []mailservers.Mailserver) error {
|
||||
if m.transport.WakuVersion() != 2 {
|
||||
m.logger.Warn("not starting mailserver cycle: requires wakuv2")
|
||||
return nil
|
||||
}
|
||||
|
||||
m.mailserverCycle.allMailservers = mailservers
|
||||
|
||||
if len(mailservers) == 0 {
|
||||
m.logger.Warn("not starting mailserver cycle: empty mailservers list")
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, storenode := range mailservers {
|
||||
|
||||
peerInfo, err := storenode.PeerInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, addr := range utils.EncapsulatePeerID(peerInfo.ID, peerInfo.Addrs...) {
|
||||
_, err := m.transport.AddStorePeer(addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
go m.verifyStorenodeStatus()
|
||||
|
||||
m.logger.Debug("starting mailserver cycle",
|
||||
zap.Uint("WakuVersion", m.transport.WakuVersion()),
|
||||
zap.Any("mailservers", mailservers),
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Messenger) DisconnectActiveMailserver() {
|
||||
m.mailserverCycle.Lock()
|
||||
defer m.mailserverCycle.Unlock()
|
||||
m.disconnectActiveMailserver(graylistBackoff)
|
||||
}
|
||||
|
||||
func (m *Messenger) disconnectMailserver(backoffDuration time.Duration) error {
|
||||
if m.mailserverCycle.activeMailserver == nil {
|
||||
m.logger.Info("no active mailserver")
|
||||
return nil
|
||||
}
|
||||
m.logger.Info("disconnecting active mailserver", zap.String("nodeID", m.mailserverCycle.activeMailserver.ID))
|
||||
m.mailPeersMutex.Lock()
|
||||
pInfo, ok := m.mailserverCycle.peers[m.mailserverCycle.activeMailserver.ID]
|
||||
if ok {
|
||||
pInfo.status = disconnected
|
||||
|
||||
pInfo.canConnectAfter = time.Now().Add(backoffDuration)
|
||||
m.mailserverCycle.peers[m.mailserverCycle.activeMailserver.ID] = pInfo
|
||||
} else {
|
||||
m.mailserverCycle.peers[m.mailserverCycle.activeMailserver.ID] = peerStatus{
|
||||
status: disconnected,
|
||||
mailserver: *m.mailserverCycle.activeMailserver,
|
||||
canConnectAfter: time.Now().Add(backoffDuration),
|
||||
}
|
||||
}
|
||||
m.mailPeersMutex.Unlock()
|
||||
|
||||
m.mailserverCycle.activeMailserver = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Messenger) disconnectActiveMailserver(backoffDuration time.Duration) {
|
||||
err := m.disconnectMailserver(backoffDuration)
|
||||
if err != nil {
|
||||
m.logger.Error("failed to disconnect mailserver", zap.Error(err))
|
||||
}
|
||||
signal.SendMailserverChanged(nil)
|
||||
}
|
||||
|
||||
func (m *Messenger) cycleMailservers() {
|
||||
m.logger.Info("Automatically switching mailserver")
|
||||
|
||||
if m.mailserverCycle.activeMailserver != nil {
|
||||
m.disconnectActiveMailserver(graylistBackoff)
|
||||
}
|
||||
|
||||
useMailserver, err := m.settings.CanUseMailservers()
|
||||
if err != nil {
|
||||
m.logger.Error("failed to get use mailservers", zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
if !useMailserver {
|
||||
m.logger.Info("Skipping mailserver search due to useMailserver being false")
|
||||
return
|
||||
}
|
||||
|
||||
err = m.findNewMailserver()
|
||||
if err != nil {
|
||||
m.logger.Error("Error getting new mailserver", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
func poolSize(fleetSize int) int {
|
||||
return int(math.Ceil(float64(fleetSize) / 4))
|
||||
}
|
||||
|
||||
func (m *Messenger) getFleet() (string, error) {
|
||||
var fleet string
|
||||
dbFleet, err := m.settings.GetFleet()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if dbFleet != "" {
|
||||
fleet = dbFleet
|
||||
} else if m.config.clusterConfig.Fleet != "" {
|
||||
fleet = m.config.clusterConfig.Fleet
|
||||
} else {
|
||||
fleet = params.FleetStatusProd
|
||||
}
|
||||
return fleet, nil
|
||||
}
|
||||
|
||||
func (m *Messenger) allMailservers() ([]mailservers.Mailserver, error) {
|
||||
func (m *Messenger) AllMailservers() ([]mailservers.Mailserver, error) {
|
||||
// Get configured fleet
|
||||
fleet, err := m.getFleet()
|
||||
if err != nil {
|
||||
|
@ -199,221 +39,46 @@ func (m *Messenger) allMailservers() ([]mailservers.Mailserver, error) {
|
|||
return allMailservers, nil
|
||||
}
|
||||
|
||||
type SortedMailserver struct {
|
||||
Mailserver mailservers.Mailserver
|
||||
RTT time.Duration
|
||||
CanConnectAfter time.Time
|
||||
}
|
||||
|
||||
func (m *Messenger) getAvailableMailserversSortedByRTT(allMailservers []mailservers.Mailserver) []mailservers.Mailserver {
|
||||
// TODO: this can be replaced by peer selector once code is moved to go-waku api
|
||||
availableMailservers := make(map[string]time.Duration)
|
||||
availableMailserversMutex := sync.Mutex{}
|
||||
availableMailserversWg := sync.WaitGroup{}
|
||||
for _, mailserver := range allMailservers {
|
||||
availableMailserversWg.Add(1)
|
||||
go func(mailserver mailservers.Mailserver) {
|
||||
defer gocommon.LogOnPanic()
|
||||
defer availableMailserversWg.Done()
|
||||
|
||||
peerID, err := mailserver.PeerID()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(m.ctx, 4*time.Second)
|
||||
defer cancel()
|
||||
|
||||
rtt, err := m.transport.PingPeer(ctx, peerID)
|
||||
if err == nil { // pinging mailservers might fail, but we don't care
|
||||
availableMailserversMutex.Lock()
|
||||
availableMailservers[mailserver.ID] = rtt
|
||||
availableMailserversMutex.Unlock()
|
||||
}
|
||||
}(mailserver)
|
||||
}
|
||||
availableMailserversWg.Wait()
|
||||
|
||||
if len(availableMailservers) == 0 {
|
||||
m.logger.Warn("No mailservers available") // Do nothing...
|
||||
func (m *Messenger) setupStorenodes(storenodes []mailservers.Mailserver) error {
|
||||
if m.transport.WakuVersion() != 2 {
|
||||
return nil
|
||||
}
|
||||
|
||||
mailserversByID := make(map[string]mailservers.Mailserver)
|
||||
for idx := range allMailservers {
|
||||
mailserversByID[allMailservers[idx].ID] = allMailservers[idx]
|
||||
}
|
||||
var sortedMailservers []SortedMailserver
|
||||
for mailserverID, rtt := range availableMailservers {
|
||||
ms := mailserversByID[mailserverID]
|
||||
sortedMailserver := SortedMailserver{
|
||||
Mailserver: ms,
|
||||
RTT: rtt,
|
||||
}
|
||||
m.mailPeersMutex.Lock()
|
||||
pInfo, ok := m.mailserverCycle.peers[ms.ID]
|
||||
m.mailPeersMutex.Unlock()
|
||||
if ok {
|
||||
if time.Now().Before(pInfo.canConnectAfter) {
|
||||
continue // We can't connect to this node yet
|
||||
}
|
||||
}
|
||||
sortedMailservers = append(sortedMailservers, sortedMailserver)
|
||||
}
|
||||
sort.Sort(byRTTMsAndCanConnectBefore(sortedMailservers))
|
||||
for _, storenode := range storenodes {
|
||||
|
||||
result := make([]mailservers.Mailserver, len(sortedMailservers))
|
||||
for i, s := range sortedMailservers {
|
||||
result[i] = s.Mailserver
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (m *Messenger) findNewMailserver() error {
|
||||
// we have to override DNS manually because of https://github.com/status-im/status-mobile/issues/19581
|
||||
if overrideDNS {
|
||||
var dialer net.Dialer
|
||||
net.DefaultResolver = &net.Resolver{
|
||||
PreferGo: false,
|
||||
Dial: func(context context.Context, _, _ string) (net.Conn, error) {
|
||||
conn, err := dialer.DialContext(context, "udp", bootstrapDNS)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pinnedMailserver, err := m.getPinnedMailserver()
|
||||
if err != nil {
|
||||
m.logger.Error("Could not obtain the pinned mailserver", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
if pinnedMailserver != nil {
|
||||
return m.connectToMailserver(*pinnedMailserver)
|
||||
}
|
||||
|
||||
m.logger.Info("Finding a new mailserver...")
|
||||
|
||||
allMailservers := m.mailserverCycle.allMailservers
|
||||
|
||||
// TODO: remove this check once sockets are stable on x86_64 emulators
|
||||
if findNearestMailServer {
|
||||
allMailservers = m.getAvailableMailserversSortedByRTT(allMailservers)
|
||||
}
|
||||
|
||||
// Picks a random mailserver amongs the ones with the lowest latency
|
||||
// The pool size is 1/4 of the mailservers were pinged successfully
|
||||
pSize := poolSize(len(allMailservers) - 1)
|
||||
if pSize <= 0 {
|
||||
pSize = len(allMailservers)
|
||||
if pSize <= 0 {
|
||||
m.logger.Warn("No storenodes available") // Do nothing...
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
r, err := rand.Int(rand.Reader, big.NewInt(int64(pSize)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ms := allMailservers[r.Int64()]
|
||||
return m.connectToMailserver(ms)
|
||||
}
|
||||
|
||||
func (m *Messenger) mailserverStatus(mailserverID string) connStatus {
|
||||
m.mailPeersMutex.RLock()
|
||||
defer m.mailPeersMutex.RUnlock()
|
||||
peer, ok := m.mailserverCycle.peers[mailserverID]
|
||||
if !ok {
|
||||
return disconnected
|
||||
}
|
||||
return peer.status
|
||||
}
|
||||
|
||||
func (m *Messenger) connectToMailserver(ms mailservers.Mailserver) error {
|
||||
|
||||
m.logger.Info("connecting to mailserver", zap.String("mailserverID", ms.ID))
|
||||
|
||||
m.mailserverCycle.activeMailserver = &ms
|
||||
signal.SendMailserverChanged(m.mailserverCycle.activeMailserver)
|
||||
|
||||
mailserverStatus := m.mailserverStatus(ms.ID)
|
||||
if mailserverStatus != connected {
|
||||
m.mailPeersMutex.Lock()
|
||||
m.mailserverCycle.peers[ms.ID] = peerStatus{
|
||||
status: connected,
|
||||
lastConnectionAttempt: time.Now(),
|
||||
canConnectAfter: time.Now().Add(defaultBackoff),
|
||||
mailserver: ms,
|
||||
}
|
||||
m.mailPeersMutex.Unlock()
|
||||
|
||||
m.mailserverCycle.activeMailserver.FailedRequests = 0
|
||||
peerID, err := m.mailserverCycle.activeMailserver.PeerID()
|
||||
peerInfo, err := storenode.PeerInfo()
|
||||
if err != nil {
|
||||
m.logger.Error("could not decode the peer id of mailserver", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
m.logger.Info("mailserver available", zap.String("mailserverID", m.mailserverCycle.activeMailserver.ID))
|
||||
m.mailserverCycle.availabilitySubscriptions.EmitMailserverAvailable()
|
||||
signal.SendMailserverAvailable(m.mailserverCycle.activeMailserver)
|
||||
|
||||
m.transport.SetStorePeerID(peerID)
|
||||
|
||||
// Query mailserver
|
||||
m.asyncRequestAllHistoricMessages()
|
||||
for _, addr := range utils.EncapsulatePeerID(peerInfo.ID, peerInfo.Addrs...) {
|
||||
_, err := m.transport.AddStorePeer(addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getActiveMailserver returns the active mailserver if a communityID is present then it'll return the mailserver
|
||||
// for that community if it has a mailserver setup otherwise it'll return the global mailserver
|
||||
func (m *Messenger) getActiveMailserver(communityID ...string) *mailservers.Mailserver {
|
||||
if len(communityID) == 0 || communityID[0] == "" {
|
||||
return m.mailserverCycle.activeMailserver
|
||||
}
|
||||
ms, err := m.communityStorenodes.GetStorenodeByCommunityID(communityID[0])
|
||||
func (m *Messenger) getFleet() (string, error) {
|
||||
var fleet string
|
||||
dbFleet, err := m.settings.GetFleet()
|
||||
if err != nil {
|
||||
if !errors.Is(err, storenodes.ErrNotFound) {
|
||||
m.logger.Error("getting storenode for community, using global", zap.String("communityID", communityID[0]), zap.Error(err))
|
||||
}
|
||||
// if we don't find a specific mailserver for the community, we just use the regular mailserverCycle's one
|
||||
return m.mailserverCycle.activeMailserver
|
||||
return "", err
|
||||
}
|
||||
return &ms
|
||||
}
|
||||
|
||||
func (m *Messenger) getActiveMailserverID(communityID ...string) string {
|
||||
ms := m.getActiveMailserver(communityID...)
|
||||
if ms == nil {
|
||||
return ""
|
||||
if dbFleet != "" {
|
||||
fleet = dbFleet
|
||||
} else if m.config.clusterConfig.Fleet != "" {
|
||||
fleet = m.config.clusterConfig.Fleet
|
||||
} else {
|
||||
fleet = params.FleetStatusProd
|
||||
}
|
||||
return ms.ID
|
||||
}
|
||||
|
||||
func (m *Messenger) isMailserverAvailable(mailserverID string) bool {
|
||||
return m.mailserverStatus(mailserverID) == connected
|
||||
}
|
||||
|
||||
func (m *Messenger) penalizeMailserver(id string) {
|
||||
m.mailPeersMutex.Lock()
|
||||
defer m.mailPeersMutex.Unlock()
|
||||
pInfo, ok := m.mailserverCycle.peers[id]
|
||||
if !ok {
|
||||
pInfo.status = disconnected
|
||||
}
|
||||
|
||||
pInfo.canConnectAfter = time.Now().Add(graylistBackoff)
|
||||
m.mailserverCycle.peers[id] = pInfo
|
||||
return fleet, nil
|
||||
}
|
||||
|
||||
func (m *Messenger) asyncRequestAllHistoricMessages() {
|
||||
if !m.config.codeControlFlags.AutoRequestHistoricMessages {
|
||||
if !m.config.codeControlFlags.AutoRequestHistoricMessages || m.transport.WakuVersion() == 1 {
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -428,128 +93,119 @@ func (m *Messenger) asyncRequestAllHistoricMessages() {
|
|||
}()
|
||||
}
|
||||
|
||||
func (m *Messenger) verifyStorenodeStatus() {
|
||||
defer common.LogOnPanic()
|
||||
ticker := time.NewTicker(1 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
err := m.disconnectStorenodeIfRequired()
|
||||
if err != nil {
|
||||
m.logger.Error("failed to handle mailserver cycle event", zap.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
case <-m.quit:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Messenger) getPinnedMailserver() (*mailservers.Mailserver, error) {
|
||||
func (m *Messenger) GetPinnedStorenode() (peer.ID, error) {
|
||||
fleet, err := m.getFleet()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return "", err
|
||||
}
|
||||
|
||||
pinnedMailservers, err := m.settings.GetPinnedMailservers()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return "", err
|
||||
}
|
||||
|
||||
pinnedMailserver, ok := pinnedMailservers[fleet]
|
||||
if !ok {
|
||||
return nil, nil
|
||||
return "", nil
|
||||
}
|
||||
|
||||
fleetMailservers := mailservers.DefaultMailservers()
|
||||
|
||||
for _, c := range fleetMailservers {
|
||||
if c.Fleet == fleet && c.ID == pinnedMailserver {
|
||||
return &c, nil
|
||||
return c.PeerID()
|
||||
}
|
||||
}
|
||||
|
||||
if m.mailserversDatabase != nil {
|
||||
customMailservers, err := m.mailserversDatabase.Mailservers()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, c := range customMailservers {
|
||||
if c.Fleet == fleet && c.ID == pinnedMailserver {
|
||||
return &c, nil
|
||||
return c.PeerID()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (m *Messenger) disconnectStorenodeIfRequired() error {
|
||||
m.logger.Debug("wakuV2 storenode status verification")
|
||||
|
||||
if m.mailserverCycle.activeMailserver == nil {
|
||||
// No active storenode, find a new one
|
||||
m.cycleMailservers()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check whether we want to disconnect the active storenode
|
||||
if m.mailserverCycle.activeMailserver.FailedRequests >= mailserverMaxFailedRequests {
|
||||
m.penalizeMailserver(m.mailserverCycle.activeMailserver.ID)
|
||||
signal.SendMailserverNotWorking()
|
||||
m.logger.Info("too many failed requests", zap.String("storenode", m.mailserverCycle.activeMailserver.ID))
|
||||
m.mailserverCycle.activeMailserver.FailedRequests = 0
|
||||
return m.connectToNewMailserverAndWait()
|
||||
}
|
||||
|
||||
return nil
|
||||
func (m *Messenger) UseStorenodes() (bool, error) {
|
||||
return m.settings.CanUseMailservers()
|
||||
}
|
||||
|
||||
func (m *Messenger) waitForAvailableStoreNode(timeout time.Duration) bool {
|
||||
// Add 1 second to timeout, because the mailserver cycle has 1 second ticker, which doesn't tick on start.
|
||||
// This can be improved after merging https://github.com/status-im/status-go/pull/4380.
|
||||
// NOTE: https://stackoverflow.com/questions/32705582/how-to-get-time-tick-to-tick-immediately
|
||||
timeout += time.Second
|
||||
func (m *Messenger) Storenodes() ([]peer.ID, error) {
|
||||
mailservers, err := m.AllMailservers()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
finish := make(chan struct{})
|
||||
cancel := make(chan struct{})
|
||||
var result []peer.ID
|
||||
for _, m := range mailservers {
|
||||
peerID, err := m.PeerID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result = append(result, peerID)
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
return result, nil
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer gocommon.LogOnPanic()
|
||||
defer func() {
|
||||
wg.Done()
|
||||
}()
|
||||
for !m.isMailserverAvailable(m.getActiveMailserverID()) {
|
||||
select {
|
||||
case <-m.mailserverCycle.availabilitySubscriptions.Subscribe():
|
||||
case <-cancel:
|
||||
return
|
||||
func (m *Messenger) checkForStorenodeCycleSignals() {
|
||||
defer gocommon.LogOnPanic()
|
||||
|
||||
if m.transport.WakuVersion() != 2 {
|
||||
return
|
||||
}
|
||||
|
||||
changed := m.transport.OnStorenodeChanged()
|
||||
notWorking := m.transport.OnStorenodeNotWorking()
|
||||
available := m.transport.OnStorenodeAvailable()
|
||||
|
||||
allMailservers, err := m.AllMailservers()
|
||||
if err != nil {
|
||||
m.logger.Error("Could not retrieve mailserver list", zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
mailserverMap := make(map[peer.ID]mailservers.Mailserver)
|
||||
for _, ms := range allMailservers {
|
||||
peerID, err := ms.PeerID()
|
||||
if err != nil {
|
||||
m.logger.Error("could not retrieve peerID", zap.Error(err))
|
||||
return
|
||||
}
|
||||
mailserverMap[peerID] = ms
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-m.ctx.Done():
|
||||
return
|
||||
case <-notWorking:
|
||||
signal.SendMailserverNotWorking()
|
||||
|
||||
case activeMailserver := <-changed:
|
||||
if activeMailserver != "" {
|
||||
ms, ok := mailserverMap[activeMailserver]
|
||||
if ok {
|
||||
signal.SendMailserverChanged(&ms)
|
||||
}
|
||||
} else {
|
||||
signal.SendMailserverChanged(nil)
|
||||
}
|
||||
case activeMailserver := <-available:
|
||||
if activeMailserver != "" {
|
||||
ms, ok := mailserverMap[activeMailserver]
|
||||
if ok {
|
||||
signal.SendMailserverAvailable(&ms)
|
||||
}
|
||||
m.asyncRequestAllHistoricMessages()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
defer gocommon.LogOnPanic()
|
||||
defer func() {
|
||||
close(finish)
|
||||
}()
|
||||
wg.Wait()
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-finish:
|
||||
case <-time.After(timeout):
|
||||
close(cancel)
|
||||
case <-m.ctx.Done():
|
||||
close(cancel)
|
||||
}
|
||||
|
||||
return m.isMailserverAvailable(m.getActiveMailserverID())
|
||||
}
|
||||
|
|
|
@ -1,167 +0,0 @@
|
|||
package protocol
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"math/big"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
"github.com/status-im/status-go/protocol/tt"
|
||||
)
|
||||
|
||||
type queryResponse struct {
|
||||
topics []types.TopicType
|
||||
err error // Indicates if this response will simulate an error returned by SendMessagesRequestForTopics
|
||||
cursor []byte
|
||||
}
|
||||
|
||||
type mockTransport struct {
|
||||
queryResponses map[string]queryResponse
|
||||
}
|
||||
|
||||
func newMockTransport() *mockTransport {
|
||||
return &mockTransport{
|
||||
queryResponses: make(map[string]queryResponse),
|
||||
}
|
||||
}
|
||||
|
||||
func getInitialResponseKey(topics []types.TopicType) string {
|
||||
return hex.EncodeToString(append([]byte("start"), topics[0][:]...))
|
||||
}
|
||||
|
||||
func (t *mockTransport) SendMessagesRequestForTopics(
|
||||
ctx context.Context,
|
||||
peerID peer.ID,
|
||||
from, to uint32,
|
||||
prevCursor types.StoreRequestCursor,
|
||||
pubsubTopic string,
|
||||
contentTopics []types.TopicType,
|
||||
limit uint32,
|
||||
waitForResponse bool,
|
||||
processEnvelopes bool,
|
||||
) (cursor types.StoreRequestCursor, envelopesCount int, err error) {
|
||||
var response queryResponse
|
||||
if prevCursor == nil {
|
||||
initialResponse := getInitialResponseKey(contentTopics)
|
||||
response = t.queryResponses[initialResponse]
|
||||
} else {
|
||||
response = t.queryResponses[hex.EncodeToString(prevCursor)]
|
||||
}
|
||||
return response.cursor, 0, response.err
|
||||
}
|
||||
|
||||
func (t *mockTransport) Populate(topics []types.TopicType, responses int, includeRandomError bool) error {
|
||||
if responses <= 0 || len(topics) == 0 {
|
||||
return errors.New("invalid input parameters")
|
||||
}
|
||||
|
||||
var topicBatches [][]types.TopicType
|
||||
|
||||
for i := 0; i < len(topics); i += maxTopicsPerRequest {
|
||||
// Split batch in 10-contentTopic subbatches
|
||||
j := i + maxTopicsPerRequest
|
||||
if j > len(topics) {
|
||||
j = len(topics)
|
||||
}
|
||||
topicBatches = append(topicBatches, topics[i:j])
|
||||
}
|
||||
|
||||
randomErrIdx, err := rand.Int(rand.Reader, big.NewInt(int64(len(topicBatches))))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
randomErrIdxInt := int(randomErrIdx.Int64())
|
||||
|
||||
for i, topicBatch := range topicBatches {
|
||||
// Setup initial response
|
||||
initialResponseKey := getInitialResponseKey(topicBatch)
|
||||
t.queryResponses[initialResponseKey] = queryResponse{
|
||||
topics: topicBatch,
|
||||
err: nil,
|
||||
}
|
||||
|
||||
prevKey := initialResponseKey
|
||||
for x := 0; x < responses-1; x++ {
|
||||
newResponseCursor := []byte(uuid.New().String())
|
||||
newResponseKey := hex.EncodeToString(newResponseCursor)
|
||||
|
||||
var err error
|
||||
if includeRandomError && i == randomErrIdxInt && x == responses-2 { // Include an error in last request
|
||||
err = errors.New("random error")
|
||||
}
|
||||
|
||||
t.queryResponses[newResponseKey] = queryResponse{
|
||||
topics: topicBatch,
|
||||
err: err,
|
||||
}
|
||||
|
||||
// Updating prev response cursor to point to the new response
|
||||
prevResponse := t.queryResponses[prevKey]
|
||||
prevResponse.cursor = newResponseCursor
|
||||
t.queryResponses[prevKey] = prevResponse
|
||||
|
||||
prevKey = newResponseKey
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestProcessMailserverBatchHappyPath(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
logger := tt.MustCreateTestLogger()
|
||||
|
||||
mailserverID, err := peer.Decode("16Uiu2HAkw3x97MbbZSWHbdF5bob45vcZvPPK4s4Mjyv2mxyB9GS3")
|
||||
require.NoError(t, err)
|
||||
topics := []types.TopicType{}
|
||||
for i := 0; i < 22; i++ {
|
||||
topics = append(topics, types.BytesToTopic([]byte{0, 0, 0, byte(i)}))
|
||||
}
|
||||
|
||||
testTransport := newMockTransport()
|
||||
err = testTransport.Populate(topics, 10, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
testBatch := MailserverBatch{
|
||||
Topics: topics,
|
||||
}
|
||||
|
||||
err = processMailserverBatch(ctx, testTransport, testBatch, mailserverID, logger, defaultStoreNodeRequestPageSize, nil, false)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestProcessMailserverBatchFailure(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
logger := tt.MustCreateTestLogger()
|
||||
|
||||
mailserverID, err := peer.Decode("16Uiu2HAkw3x97MbbZSWHbdF5bob45vcZvPPK4s4Mjyv2mxyB9GS3")
|
||||
require.NoError(t, err)
|
||||
topics := []types.TopicType{}
|
||||
for i := 0; i < 5; i++ {
|
||||
topics = append(topics, types.BytesToTopic([]byte{0, 0, 0, byte(i)}))
|
||||
}
|
||||
|
||||
testTransport := newMockTransport()
|
||||
err = testTransport.Populate(topics, 4, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
testBatch := MailserverBatch{
|
||||
Topics: topics,
|
||||
}
|
||||
|
||||
err = processMailserverBatch(ctx, testTransport, testBatch, mailserverID, logger, defaultStoreNodeRequestPageSize, nil, false)
|
||||
require.Error(t, err)
|
||||
}
|
|
@ -1,6 +1,7 @@
|
|||
package protocol
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
@ -10,13 +11,13 @@ import (
|
|||
gocommon "github.com/status-im/status-go/common"
|
||||
"github.com/status-im/status-go/eth-node/crypto"
|
||||
"github.com/status-im/status-go/protocol/common/shard"
|
||||
"github.com/waku-org/go-waku/waku/v2/api/history"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
"github.com/status-im/status-go/protocol/communities"
|
||||
"github.com/status-im/status-go/protocol/transport"
|
||||
"github.com/status-im/status-go/services/mailservers"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -57,7 +58,7 @@ type StoreNodeRequestManager struct {
|
|||
// activeRequestsLock should be locked each time activeRequests is being accessed or changed.
|
||||
activeRequestsLock sync.RWMutex
|
||||
|
||||
onPerformingBatch func(MailserverBatch)
|
||||
onPerformingBatch func(types.MailserverBatch)
|
||||
}
|
||||
|
||||
func NewStoreNodeRequestManager(m *Messenger) *StoreNodeRequestManager {
|
||||
|
@ -75,7 +76,7 @@ func NewStoreNodeRequestManager(m *Messenger) *StoreNodeRequestManager {
|
|||
// the function will also wait for the store node response and return the fetched community.
|
||||
// Automatically waits for an available store node.
|
||||
// When a `nil` community and `nil` error is returned, that means the community wasn't found at the store node.
|
||||
func (m *StoreNodeRequestManager) FetchCommunity(community communities.CommunityShard, opts []StoreNodeRequestOption) (*communities.Community, StoreNodeRequestStats, error) {
|
||||
func (m *StoreNodeRequestManager) FetchCommunity(ctx context.Context, community communities.CommunityShard, opts []StoreNodeRequestOption) (*communities.Community, StoreNodeRequestStats, error) {
|
||||
cfg := buildStoreNodeRequestConfig(opts)
|
||||
|
||||
m.logger.Info("requesting community from store node",
|
||||
|
@ -83,7 +84,7 @@ func (m *StoreNodeRequestManager) FetchCommunity(community communities.Community
|
|||
zap.Any("config", cfg))
|
||||
|
||||
requestCommunity := func(communityID string, shard *shard.Shard) (*communities.Community, StoreNodeRequestStats, error) {
|
||||
channel, err := m.subscribeToRequest(storeNodeCommunityRequest, communityID, shard, cfg)
|
||||
channel, err := m.subscribeToRequest(ctx, storeNodeCommunityRequest, communityID, shard, cfg)
|
||||
if err != nil {
|
||||
return nil, StoreNodeRequestStats{}, fmt.Errorf("failed to create a request for community: %w", err)
|
||||
}
|
||||
|
@ -100,7 +101,7 @@ func (m *StoreNodeRequestManager) FetchCommunity(community communities.Community
|
|||
communityShard := community.Shard
|
||||
if communityShard == nil {
|
||||
id := transport.CommunityShardInfoTopic(community.CommunityID)
|
||||
fetchedShard, err := m.subscribeToRequest(storeNodeShardRequest, id, shard.DefaultNonProtectedShard(), cfg)
|
||||
fetchedShard, err := m.subscribeToRequest(ctx, storeNodeShardRequest, id, shard.DefaultNonProtectedShard(), cfg)
|
||||
if err != nil {
|
||||
return nil, StoreNodeRequestStats{}, fmt.Errorf("failed to create a shard info request: %w", err)
|
||||
}
|
||||
|
@ -134,7 +135,7 @@ func (m *StoreNodeRequestManager) FetchCommunity(community communities.Community
|
|||
// those content topics is spammed with to many envelopes, then on each iteration we will have to fetch all
|
||||
// of this spam first to get the envelopes in other content topics. To avoid this we keep independent requests
|
||||
// for each content topic.
|
||||
func (m *StoreNodeRequestManager) FetchCommunities(communities []communities.CommunityShard, opts []StoreNodeRequestOption) error {
|
||||
func (m *StoreNodeRequestManager) FetchCommunities(ctx context.Context, communities []communities.CommunityShard, opts []StoreNodeRequestOption) error {
|
||||
m.logger.Info("requesting communities from store node", zap.Any("communities", communities))
|
||||
|
||||
// when fetching multiple communities we don't wait for the response
|
||||
|
@ -143,7 +144,7 @@ func (m *StoreNodeRequestManager) FetchCommunities(communities []communities.Com
|
|||
var outErr error
|
||||
|
||||
for _, community := range communities {
|
||||
_, _, err := m.FetchCommunity(community, opts)
|
||||
_, _, err := m.FetchCommunity(ctx, community, opts)
|
||||
if err != nil {
|
||||
outErr = fmt.Errorf("%sfailed to create a request for community %s: %w", outErr, community.CommunityID, err)
|
||||
}
|
||||
|
@ -154,7 +155,7 @@ func (m *StoreNodeRequestManager) FetchCommunities(communities []communities.Com
|
|||
|
||||
// FetchContact - similar to FetchCommunity
|
||||
// If a `nil` contact and a `nil` error are returned, it means that the contact wasn't found at the store node.
|
||||
func (m *StoreNodeRequestManager) FetchContact(contactID string, opts []StoreNodeRequestOption) (*Contact, StoreNodeRequestStats, error) {
|
||||
func (m *StoreNodeRequestManager) FetchContact(ctx context.Context, contactID string, opts []StoreNodeRequestOption) (*Contact, StoreNodeRequestStats, error) {
|
||||
|
||||
cfg := buildStoreNodeRequestConfig(opts)
|
||||
|
||||
|
@ -162,7 +163,7 @@ func (m *StoreNodeRequestManager) FetchContact(contactID string, opts []StoreNod
|
|||
zap.Any("contactID", contactID),
|
||||
zap.Any("config", cfg))
|
||||
|
||||
channel, err := m.subscribeToRequest(storeNodeContactRequest, contactID, nil, cfg)
|
||||
channel, err := m.subscribeToRequest(ctx, storeNodeContactRequest, contactID, nil, cfg)
|
||||
if err != nil {
|
||||
return nil, StoreNodeRequestStats{}, fmt.Errorf("failed to create a request for community: %w", err)
|
||||
}
|
||||
|
@ -178,7 +179,7 @@ func (m *StoreNodeRequestManager) FetchContact(contactID string, opts []StoreNod
|
|||
// subscribeToRequest checks if a request for given community/contact is already in progress, creates and installs
|
||||
// a new one if not found, and returns a subscription to the result of the found/started request.
|
||||
// The subscription can then be used to get the result of the request, this could be either a community/contact or an error.
|
||||
func (m *StoreNodeRequestManager) subscribeToRequest(requestType storeNodeRequestType, dataID string, shard *shard.Shard, cfg StoreNodeRequestConfig) (storeNodeResponseSubscription, error) {
|
||||
func (m *StoreNodeRequestManager) subscribeToRequest(ctx context.Context, requestType storeNodeRequestType, dataID string, shard *shard.Shard, cfg StoreNodeRequestConfig) (storeNodeResponseSubscription, error) {
|
||||
// It's important to unlock only after getting the subscription channel.
|
||||
// We also lock `activeRequestsLock` during finalizing the requests. This ensures that the subscription
|
||||
// created in this function will get the result even if the requests proceeds faster than this function ends.
|
||||
|
@ -206,7 +207,7 @@ func (m *StoreNodeRequestManager) subscribeToRequest(requestType storeNodeReques
|
|||
return nil, fmt.Errorf("failed to create community filter: %w", err)
|
||||
}
|
||||
|
||||
request = m.newStoreNodeRequest()
|
||||
request = m.newStoreNodeRequest(ctx)
|
||||
request.config = cfg
|
||||
request.pubsubTopic = filter.PubsubTopic
|
||||
request.requestID = requestID
|
||||
|
@ -223,9 +224,10 @@ func (m *StoreNodeRequestManager) subscribeToRequest(requestType storeNodeReques
|
|||
}
|
||||
|
||||
// newStoreNodeRequest creates a new storeNodeRequest struct
|
||||
func (m *StoreNodeRequestManager) newStoreNodeRequest() *storeNodeRequest {
|
||||
func (m *StoreNodeRequestManager) newStoreNodeRequest(ctx context.Context) *storeNodeRequest {
|
||||
return &storeNodeRequest{
|
||||
manager: m,
|
||||
ctx: ctx,
|
||||
subscriptions: make([]storeNodeResponseSubscription, 0),
|
||||
}
|
||||
}
|
||||
|
@ -306,6 +308,7 @@ const (
|
|||
// For a valid storeNodeRequest to be performed, the user must set all the struct fields and call start method.
|
||||
type storeNodeRequest struct {
|
||||
requestID storeNodeRequestID
|
||||
ctx context.Context
|
||||
|
||||
// request parameters
|
||||
pubsubTopic string
|
||||
|
@ -374,7 +377,7 @@ func (r *storeNodeRequest) finalize() {
|
|||
}
|
||||
}
|
||||
|
||||
func (r *storeNodeRequest) shouldFetchNextPage(envelopesCount int) (bool, uint32) {
|
||||
func (r *storeNodeRequest) shouldFetchNextPage(envelopesCount int) (bool, uint64) {
|
||||
logger := r.manager.logger.With(
|
||||
zap.Any("requestID", r.requestID),
|
||||
zap.Int("envelopesCount", envelopesCount))
|
||||
|
@ -524,13 +527,15 @@ func (r *storeNodeRequest) routine() {
|
|||
communityID := r.requestID.getCommunityID()
|
||||
|
||||
if r.requestID.RequestType != storeNodeCommunityRequest || !r.manager.messenger.communityStorenodes.HasStorenodeSetup(communityID) {
|
||||
if !r.manager.messenger.waitForAvailableStoreNode(storeNodeAvailableTimeout) {
|
||||
ctx, cancel := context.WithTimeout(r.ctx, storeNodeAvailableTimeout)
|
||||
defer cancel()
|
||||
if !r.manager.messenger.transport.WaitForAvailableStoreNode(ctx) {
|
||||
r.result.err = fmt.Errorf("store node is not available")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
storeNode := r.manager.messenger.getActiveMailserver(communityID)
|
||||
storeNode := r.manager.messenger.getCommunityStorenode(communityID)
|
||||
|
||||
// Check if community already exists locally and get Clock.
|
||||
if r.requestID.RequestType == storeNodeCommunityRequest {
|
||||
|
@ -543,8 +548,8 @@ func (r *storeNodeRequest) routine() {
|
|||
// Start store node request
|
||||
from, to := r.manager.messenger.calculateMailserverTimeBounds(oneMonthDuration)
|
||||
|
||||
_, err := r.manager.messenger.performMailserverRequest(storeNode, func(ms mailservers.Mailserver) (*MessengerResponse, error) {
|
||||
batch := MailserverBatch{
|
||||
_, err := r.manager.messenger.performStorenodeTask(func() (*MessengerResponse, error) {
|
||||
batch := types.MailserverBatch{
|
||||
From: from,
|
||||
To: to,
|
||||
PubsubTopic: r.pubsubTopic,
|
||||
|
@ -555,8 +560,8 @@ func (r *storeNodeRequest) routine() {
|
|||
r.manager.onPerformingBatch(batch)
|
||||
}
|
||||
|
||||
return nil, r.manager.messenger.processMailserverBatchWithOptions(ms, batch, r.config.InitialPageSize, r.shouldFetchNextPage, true)
|
||||
})
|
||||
return nil, r.manager.messenger.processMailserverBatchWithOptions(storeNode, batch, r.config.InitialPageSize, r.shouldFetchNextPage, true)
|
||||
}, history.WithPeerID(storeNode))
|
||||
|
||||
r.result.err = err
|
||||
}
|
||||
|
|
|
@ -3,8 +3,8 @@ package protocol
|
|||
type StoreNodeRequestConfig struct {
|
||||
WaitForResponse bool
|
||||
StopWhenDataFound bool
|
||||
InitialPageSize uint32
|
||||
FurtherPageSize uint32
|
||||
InitialPageSize uint64
|
||||
FurtherPageSize uint64
|
||||
}
|
||||
|
||||
type StoreNodeRequestOption func(*StoreNodeRequestConfig)
|
||||
|
@ -40,13 +40,13 @@ func WithStopWhenDataFound(stopWhenDataFound bool) StoreNodeRequestOption {
|
|||
}
|
||||
}
|
||||
|
||||
func WithInitialPageSize(initialPageSize uint32) StoreNodeRequestOption {
|
||||
func WithInitialPageSize(initialPageSize uint64) StoreNodeRequestOption {
|
||||
return func(c *StoreNodeRequestConfig) {
|
||||
c.InitialPageSize = initialPageSize
|
||||
}
|
||||
}
|
||||
|
||||
func WithFurtherPageSize(furtherPageSize uint32) StoreNodeRequestOption {
|
||||
func WithFurtherPageSize(furtherPageSize uint64) StoreNodeRequestOption {
|
||||
return func(c *StoreNodeRequestConfig) {
|
||||
c.FurtherPageSize = furtherPageSize
|
||||
}
|
||||
|
|
|
@ -149,7 +149,7 @@ func (s *MessengerStoreNodeCommunitySuite) newMessenger(name string, storenodeAd
|
|||
}
|
||||
|
||||
func (s *MessengerStoreNodeCommunitySuite) createCommunityWithChat(m *Messenger) (*communities.Community, *Chat) {
|
||||
WaitForAvailableStoreNode(&s.Suite, m, 500*time.Millisecond)
|
||||
WaitForAvailableStoreNode(&s.Suite, m, context.TODO())
|
||||
|
||||
storeNodeSubscription := s.setupStoreNodeEnvelopesWatcher(nil)
|
||||
|
||||
|
@ -197,7 +197,7 @@ func (s *MessengerStoreNodeCommunitySuite) fetchCommunity(m *Messenger, communit
|
|||
WithWaitForResponseOption(true),
|
||||
}
|
||||
|
||||
fetchedCommunity, stats, err := m.storeNodeRequestsManager.FetchCommunity(communityShard, options)
|
||||
fetchedCommunity, stats, err := m.storeNodeRequestsManager.FetchCommunity(context.TODO(), communityShard, options)
|
||||
|
||||
s.Require().NoError(err)
|
||||
s.requireCommunitiesEqual(fetchedCommunity, expectedCommunity)
|
||||
|
@ -351,10 +351,10 @@ func (s *MessengerStoreNodeCommunitySuite) TestToggleUseMailservers() {
|
|||
// Enable use of mailservers
|
||||
err := s.owner.ToggleUseMailservers(true)
|
||||
s.Require().NoError(err)
|
||||
s.Require().NotNil(s.owner.mailserverCycle.activeMailserver)
|
||||
s.Require().NotNil(s.owner.transport.GetActiveStorenode())
|
||||
|
||||
// Disable use of mailservers
|
||||
err = s.owner.ToggleUseMailservers(false)
|
||||
s.Require().NoError(err)
|
||||
s.Require().Nil(s.owner.mailserverCycle.activeMailserver)
|
||||
s.Require().Nil(s.owner.transport.GetActiveStorenode())
|
||||
}
|
||||
|
|
|
@ -235,7 +235,7 @@ func (s *MessengerStoreNodeRequestSuite) newMessenger(shh types.Waku, logger *za
|
|||
}
|
||||
|
||||
func (s *MessengerStoreNodeRequestSuite) createCommunity(m *Messenger) *communities.Community {
|
||||
s.waitForAvailableStoreNode(m)
|
||||
s.WaitForAvailableStoreNode(m)
|
||||
|
||||
storeNodeSubscription := s.setupStoreNodeEnvelopesWatcher(nil)
|
||||
|
||||
|
@ -290,7 +290,7 @@ func (s *MessengerStoreNodeRequestSuite) fetchCommunity(m *Messenger, communityS
|
|||
WithWaitForResponseOption(true),
|
||||
}
|
||||
|
||||
fetchedCommunity, stats, err := m.storeNodeRequestsManager.FetchCommunity(communityShard, options)
|
||||
fetchedCommunity, stats, err := m.storeNodeRequestsManager.FetchCommunity(context.TODO(), communityShard, options)
|
||||
|
||||
s.Require().NoError(err)
|
||||
s.requireCommunitiesEqual(fetchedCommunity, expectedCommunity)
|
||||
|
@ -309,8 +309,10 @@ func (s *MessengerStoreNodeRequestSuite) fetchProfile(m *Messenger, contactID st
|
|||
}
|
||||
}
|
||||
|
||||
func (s *MessengerStoreNodeRequestSuite) waitForAvailableStoreNode(messenger *Messenger) {
|
||||
WaitForAvailableStoreNode(&s.Suite, messenger, storeNodeConnectTimeout)
|
||||
func (s *MessengerStoreNodeRequestSuite) WaitForAvailableStoreNode(messenger *Messenger) {
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), storeNodeConnectTimeout)
|
||||
defer cancel()
|
||||
WaitForAvailableStoreNode(&s.Suite, messenger, ctx)
|
||||
}
|
||||
|
||||
func (s *MessengerStoreNodeRequestSuite) setupEnvelopesWatcher(wakuNode *waku2.Waku, topic *wakuV2common.TopicType, cb func(envelope *wakuV2common.ReceivedMessage)) {
|
||||
|
@ -419,11 +421,11 @@ func (s *MessengerStoreNodeRequestSuite) TestSimultaneousCommunityInfoRequests()
|
|||
community := s.createCommunity(s.owner)
|
||||
|
||||
storeNodeRequestsCount := 0
|
||||
s.bob.storeNodeRequestsManager.onPerformingBatch = func(batch MailserverBatch) {
|
||||
s.bob.storeNodeRequestsManager.onPerformingBatch = func(batch types.MailserverBatch) {
|
||||
storeNodeRequestsCount++
|
||||
}
|
||||
|
||||
s.waitForAvailableStoreNode(s.bob)
|
||||
s.WaitForAvailableStoreNode(s.bob)
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
|
||||
|
@ -453,7 +455,7 @@ func (s *MessengerStoreNodeRequestSuite) TestRequestNonExistentCommunity() {
|
|||
|
||||
s.createBob()
|
||||
|
||||
s.waitForAvailableStoreNode(s.bob)
|
||||
s.WaitForAvailableStoreNode(s.bob)
|
||||
fetchedCommunity, err := s.bob.FetchCommunity(&request)
|
||||
|
||||
s.Require().NoError(err)
|
||||
|
@ -722,7 +724,7 @@ func (s *MessengerStoreNodeRequestSuite) TestRequestShardAndCommunityInfo() {
|
|||
|
||||
s.waitForEnvelopes(storeNodeSubscription, 1)
|
||||
|
||||
s.waitForAvailableStoreNode(s.bob)
|
||||
s.WaitForAvailableStoreNode(s.bob)
|
||||
|
||||
communityShard := community.CommunityShard()
|
||||
|
||||
|
@ -806,7 +808,7 @@ func (s *MessengerStoreNodeRequestSuite) TestRequestCommunityEnvelopesOrder() {
|
|||
}
|
||||
|
||||
// Fetch the community
|
||||
fetchedCommunity, _, err := s.bob.storeNodeRequestsManager.FetchCommunity(community.CommunityShard(), options)
|
||||
fetchedCommunity, _, err := s.bob.storeNodeRequestsManager.FetchCommunity(context.TODO(), community.CommunityShard(), options)
|
||||
s.Require().NoError(err)
|
||||
s.requireCommunitiesEqual(fetchedCommunity, community)
|
||||
|
||||
|
@ -1160,7 +1162,7 @@ func (s *MessengerStoreNodeRequestSuite) TestFetchRealCommunity() {
|
|||
}
|
||||
storeNodeRequestOptions = append(storeNodeRequestOptions, exampleToRun.CustomOptions...)
|
||||
|
||||
fetchedCommunity, stats, err := user.storeNodeRequestsManager.FetchCommunity(communityAddress, storeNodeRequestOptions)
|
||||
fetchedCommunity, stats, err := user.storeNodeRequestsManager.FetchCommunity(context.TODO(), communityAddress, storeNodeRequestOptions)
|
||||
|
||||
result.EnvelopesCount = stats.FetchedEnvelopesCount
|
||||
result.FetchedCommunity = fetchedCommunity
|
||||
|
@ -1195,7 +1197,7 @@ func (s *MessengerStoreNodeRequestSuite) TestFetchingCommunityWithOwnerToken() {
|
|||
s.createOwner()
|
||||
s.createBob()
|
||||
|
||||
s.waitForAvailableStoreNode(s.owner)
|
||||
s.WaitForAvailableStoreNode(s.owner)
|
||||
community := s.createCommunity(s.owner)
|
||||
|
||||
// owner mints owner token
|
||||
|
@ -1228,7 +1230,7 @@ func (s *MessengerStoreNodeRequestSuite) TestFetchingCommunityWithOwnerToken() {
|
|||
s.Require().NoError(err)
|
||||
s.Require().Len(community.TokenPermissions(), 1)
|
||||
|
||||
s.waitForAvailableStoreNode(s.bob)
|
||||
s.WaitForAvailableStoreNode(s.bob)
|
||||
|
||||
s.fetchCommunity(s.bob, community.CommunityShard(), community)
|
||||
}
|
||||
|
|
|
@ -364,8 +364,10 @@ func SetIdentityImagesAndWaitForChange(s *suite.Suite, messenger *Messenger, tim
|
|||
s.Require().True(ok)
|
||||
}
|
||||
|
||||
func WaitForAvailableStoreNode(s *suite.Suite, m *Messenger, timeout time.Duration) {
|
||||
available := m.waitForAvailableStoreNode(timeout)
|
||||
func WaitForAvailableStoreNode(s *suite.Suite, m *Messenger, ctx context.Context) {
|
||||
ctx, cancel := context.WithTimeout(ctx, 500*time.Millisecond)
|
||||
defer cancel()
|
||||
available := m.transport.WaitForAvailableStoreNode(ctx)
|
||||
s.Require().True(available)
|
||||
}
|
||||
|
||||
|
|
|
@ -6,6 +6,10 @@ import (
|
|||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
"github.com/status-im/status-go/services/mailservers"
|
||||
)
|
||||
|
@ -51,13 +55,14 @@ func (m *CommunityStorenodes) GetStorenodeByCommunityID(communityID string) (mai
|
|||
return toMailserver(msData.storenodes[0]), nil
|
||||
}
|
||||
|
||||
func (m *CommunityStorenodes) IsCommunityStoreNode(id string) bool {
|
||||
func (m *CommunityStorenodes) IsCommunityStoreNode(peerID peer.ID) bool {
|
||||
m.storenodesByCommunityIDMutex.RLock()
|
||||
defer m.storenodesByCommunityIDMutex.RUnlock()
|
||||
|
||||
for _, data := range m.storenodesByCommunityID {
|
||||
for _, snode := range data.storenodes {
|
||||
if snode.StorenodeID == id {
|
||||
commStorenodeID, err := utils.GetPeerID(snode.Address)
|
||||
if err == nil && commStorenodeID == peerID {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,17 +4,17 @@ import (
|
|||
"context"
|
||||
"crypto/ecdsa"
|
||||
"database/sql"
|
||||
"encoding/hex"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/pkg/errors"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/exp/maps"
|
||||
|
||||
"github.com/waku-org/go-waku/waku/v2/api/history"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
gocommon "github.com/status-im/status-go/common"
|
||||
|
@ -462,89 +462,6 @@ func (t *Transport) Peers() types.PeerStats {
|
|||
return t.waku.Peers()
|
||||
}
|
||||
|
||||
func (t *Transport) createMessagesRequest(
|
||||
ctx context.Context,
|
||||
peerID peer.ID,
|
||||
from, to uint32,
|
||||
previousStoreCursor types.StoreRequestCursor,
|
||||
pubsubTopic string,
|
||||
contentTopics []types.TopicType,
|
||||
limit uint32,
|
||||
waitForResponse bool,
|
||||
processEnvelopes bool,
|
||||
) (storeCursor types.StoreRequestCursor, envelopesCount int, err error) {
|
||||
r := createMessagesRequest(from, to, nil, previousStoreCursor, pubsubTopic, contentTopics, limit)
|
||||
|
||||
if waitForResponse {
|
||||
resultCh := make(chan struct {
|
||||
storeCursor types.StoreRequestCursor
|
||||
envelopesCount int
|
||||
err error
|
||||
})
|
||||
|
||||
go func() {
|
||||
defer gocommon.LogOnPanic()
|
||||
storeCursor, envelopesCount, err = t.waku.RequestStoreMessages(ctx, peerID, r, processEnvelopes)
|
||||
resultCh <- struct {
|
||||
storeCursor types.StoreRequestCursor
|
||||
envelopesCount int
|
||||
err error
|
||||
}{storeCursor, envelopesCount, err}
|
||||
}()
|
||||
|
||||
select {
|
||||
case result := <-resultCh:
|
||||
return result.storeCursor, result.envelopesCount, result.err
|
||||
case <-ctx.Done():
|
||||
return nil, 0, ctx.Err()
|
||||
}
|
||||
} else {
|
||||
go func() {
|
||||
defer gocommon.LogOnPanic()
|
||||
_, _, err = t.waku.RequestStoreMessages(ctx, peerID, r, false)
|
||||
if err != nil {
|
||||
t.logger.Error("failed to request store messages", zap.Error(err))
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (t *Transport) SendMessagesRequestForTopics(
|
||||
ctx context.Context,
|
||||
peerID peer.ID,
|
||||
from, to uint32,
|
||||
prevCursor types.StoreRequestCursor,
|
||||
pubsubTopic string,
|
||||
contentTopics []types.TopicType,
|
||||
limit uint32,
|
||||
waitForResponse bool,
|
||||
processEnvelopes bool,
|
||||
) (cursor types.StoreRequestCursor, envelopesCount int, err error) {
|
||||
return t.createMessagesRequest(ctx, peerID, from, to, prevCursor, pubsubTopic, contentTopics, limit, waitForResponse, processEnvelopes)
|
||||
}
|
||||
|
||||
func createMessagesRequest(from, to uint32, cursor []byte, storeCursor types.StoreRequestCursor, pubsubTopic string, topics []types.TopicType, limit uint32) types.MessagesRequest {
|
||||
aUUID := uuid.New()
|
||||
// uuid is 16 bytes, converted to hex it's 32 bytes as expected by types.MessagesRequest
|
||||
id := []byte(hex.EncodeToString(aUUID[:]))
|
||||
var topicBytes [][]byte
|
||||
for idx := range topics {
|
||||
topicBytes = append(topicBytes, topics[idx][:])
|
||||
}
|
||||
return types.MessagesRequest{
|
||||
ID: id,
|
||||
From: from,
|
||||
To: to,
|
||||
Limit: limit,
|
||||
Cursor: cursor,
|
||||
PubsubTopic: pubsubTopic,
|
||||
ContentTopics: topicBytes,
|
||||
StoreCursor: storeCursor,
|
||||
}
|
||||
}
|
||||
|
||||
// ConfirmMessagesProcessed marks the messages as processed in the cache so
|
||||
// they won't be passed to the next layer anymore
|
||||
func (t *Transport) ConfirmMessagesProcessed(ids []string, timestamp uint64) error {
|
||||
|
@ -635,10 +552,6 @@ func (t *Transport) ConnectionChanged(state connection.State) {
|
|||
t.waku.ConnectionChanged(state)
|
||||
}
|
||||
|
||||
func (t *Transport) PingPeer(ctx context.Context, peerID peer.ID) (time.Duration, error) {
|
||||
return t.waku.PingPeer(ctx, peerID)
|
||||
}
|
||||
|
||||
// Subscribe to a pubsub topic, passing an optional public key if the pubsub topic is protected
|
||||
func (t *Transport) SubscribeToPubsubTopic(topic string, optPublicKey *ecdsa.PublicKey) error {
|
||||
if t.waku.Version() == 2 {
|
||||
|
@ -685,10 +598,6 @@ func (t *Transport) ConfirmMessageDelivered(messageID string) {
|
|||
t.waku.ConfirmMessageDelivered(commHashes)
|
||||
}
|
||||
|
||||
func (t *Transport) SetStorePeerID(peerID peer.ID) {
|
||||
t.waku.SetStorePeerID(peerID)
|
||||
}
|
||||
|
||||
func (t *Transport) SetCriteriaForMissingMessageVerification(peerID peer.ID, filters []*Filter) {
|
||||
if t.waku.Version() != 2 {
|
||||
return
|
||||
|
@ -721,3 +630,52 @@ func (t *Transport) SetCriteriaForMissingMessageVerification(peerID peer.ID, fil
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Transport) GetActiveStorenode() peer.ID {
|
||||
return t.waku.GetActiveStorenode()
|
||||
}
|
||||
|
||||
func (t *Transport) DisconnectActiveStorenode(ctx context.Context, backoffReason time.Duration, shouldCycle bool) {
|
||||
t.waku.DisconnectActiveStorenode(ctx, backoffReason, shouldCycle)
|
||||
}
|
||||
|
||||
func (t *Transport) OnStorenodeChanged() <-chan peer.ID {
|
||||
return t.waku.OnStorenodeChanged()
|
||||
}
|
||||
|
||||
func (t *Transport) OnStorenodeNotWorking() <-chan struct{} {
|
||||
return t.waku.OnStorenodeNotWorking()
|
||||
}
|
||||
|
||||
func (t *Transport) OnStorenodeAvailable() <-chan peer.ID {
|
||||
return t.waku.OnStorenodeAvailable()
|
||||
}
|
||||
|
||||
func (t *Transport) WaitForAvailableStoreNode(ctx context.Context) bool {
|
||||
return t.waku.WaitForAvailableStoreNode(ctx)
|
||||
}
|
||||
|
||||
func (t *Transport) IsStorenodeAvailable(peerID peer.ID) bool {
|
||||
return t.waku.IsStorenodeAvailable(peerID)
|
||||
}
|
||||
|
||||
func (t *Transport) PerformStorenodeTask(fn func() error, opts ...history.StorenodeTaskOption) error {
|
||||
return t.waku.PerformStorenodeTask(fn, opts...)
|
||||
}
|
||||
|
||||
func (t *Transport) ProcessMailserverBatch(
|
||||
ctx context.Context,
|
||||
batch types.MailserverBatch,
|
||||
storenodeID peer.ID,
|
||||
pageLimit uint64,
|
||||
shouldProcessNextPage func(int) (bool, uint64),
|
||||
processEnvelopes bool,
|
||||
) error {
|
||||
return t.waku.ProcessMailserverBatch(ctx, batch, storenodeID, pageLimit, shouldProcessNextPage, processEnvelopes)
|
||||
}
|
||||
|
||||
func (t *Transport) SetStorenodeConfigProvider(c history.StorenodeConfigProvider) {
|
||||
if t.WakuVersion() == 2 {
|
||||
t.waku.SetStorenodeConfigProvider(c)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1386,10 +1386,6 @@ func (api *PublicAPI) RequestAllHistoricMessagesWithRetries(forceFetchingBackup
|
|||
return api.service.messenger.RequestAllHistoricMessages(forceFetchingBackup, true)
|
||||
}
|
||||
|
||||
func (api *PublicAPI) DisconnectActiveMailserver() {
|
||||
api.service.messenger.DisconnectActiveMailserver()
|
||||
}
|
||||
|
||||
// Echo is a method for testing purposes.
|
||||
func (api *PublicAPI) Echo(ctx context.Context, message string) (string, error) {
|
||||
return message, nil
|
||||
|
|
|
@ -74,6 +74,14 @@ func (t timestamp) String() string {
|
|||
return time.Unix(0, int64(t)).Format(time.RFC3339)
|
||||
}
|
||||
|
||||
func Timep(key string, time *int64) zapcore.Field {
|
||||
if time == nil {
|
||||
return zap.String(key, "-")
|
||||
} else {
|
||||
return Time(key, *time)
|
||||
}
|
||||
}
|
||||
|
||||
func Epoch(key string, time time.Time) zap.Field {
|
||||
return zap.String(key, fmt.Sprintf("%d", time.UnixNano()))
|
||||
}
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/store"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/store/pb"
|
||||
)
|
||||
|
||||
type StoreRequestResult interface {
|
||||
Cursor() []byte
|
||||
IsComplete() bool
|
||||
PeerID() peer.ID
|
||||
Next(ctx context.Context, opts ...store.RequestOption) error // TODO: see how to decouple store.RequestOption
|
||||
Messages() []*pb.WakuMessageKeyValue
|
||||
}
|
|
@ -0,0 +1,536 @@
|
|||
package history
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
"net"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/p2p/protocol/ping"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/store"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const defaultBackoff = 10 * time.Second
|
||||
const graylistBackoff = 3 * time.Minute
|
||||
const storenodeVerificationInterval = time.Second
|
||||
const storenodeMaxFailedRequests uint = 2
|
||||
const minStorenodesToChooseFrom = 3
|
||||
const isAndroidEmulator = runtime.GOOS == "android" && runtime.GOARCH == "amd64"
|
||||
const findNearestMailServer = !isAndroidEmulator
|
||||
const overrideDNS = runtime.GOOS == "android" || runtime.GOOS == "ios"
|
||||
const bootstrapDNS = "8.8.8.8:53"
|
||||
|
||||
type connStatus int
|
||||
|
||||
const (
|
||||
disconnected connStatus = iota + 1
|
||||
connected
|
||||
)
|
||||
|
||||
type peerStatus struct {
|
||||
status connStatus
|
||||
canConnectAfter time.Time
|
||||
lastConnectionAttempt time.Time
|
||||
}
|
||||
|
||||
type StorenodeConfigProvider interface {
|
||||
UseStorenodes() (bool, error)
|
||||
GetPinnedStorenode() (peer.ID, error)
|
||||
Storenodes() ([]peer.ID, error)
|
||||
}
|
||||
|
||||
type StorenodeCycle struct {
|
||||
sync.RWMutex
|
||||
|
||||
logger *zap.Logger
|
||||
|
||||
host host.Host
|
||||
|
||||
storenodeConfigProvider StorenodeConfigProvider
|
||||
|
||||
StorenodeAvailableOneshotEmitter *OneShotEmitter[struct{}]
|
||||
StorenodeChangedEmitter *Emitter[peer.ID]
|
||||
StorenodeNotWorkingEmitter *Emitter[struct{}]
|
||||
StorenodeAvailableEmitter *Emitter[peer.ID]
|
||||
|
||||
failedRequests map[peer.ID]uint
|
||||
|
||||
peersMutex sync.RWMutex
|
||||
activeStorenode peer.ID
|
||||
peers map[peer.ID]peerStatus
|
||||
}
|
||||
|
||||
func NewStorenodeCycle(logger *zap.Logger) *StorenodeCycle {
|
||||
return &StorenodeCycle{
|
||||
StorenodeAvailableOneshotEmitter: NewOneshotEmitter[struct{}](),
|
||||
StorenodeChangedEmitter: NewEmitter[peer.ID](),
|
||||
StorenodeNotWorkingEmitter: NewEmitter[struct{}](),
|
||||
StorenodeAvailableEmitter: NewEmitter[peer.ID](),
|
||||
logger: logger.Named("storenode-cycle"),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *StorenodeCycle) Start(ctx context.Context, h host.Host) {
|
||||
m.logger.Debug("starting storenode cycle")
|
||||
m.host = h
|
||||
m.failedRequests = make(map[peer.ID]uint)
|
||||
m.peers = make(map[peer.ID]peerStatus)
|
||||
|
||||
go m.verifyStorenodeStatus(ctx)
|
||||
}
|
||||
|
||||
func (m *StorenodeCycle) DisconnectActiveStorenode(backoff time.Duration) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
m.disconnectActiveStorenode(backoff)
|
||||
}
|
||||
|
||||
func (m *StorenodeCycle) connectToNewStorenodeAndWait(ctx context.Context) error {
|
||||
// Handle pinned storenodes
|
||||
m.logger.Info("disconnecting storenode")
|
||||
pinnedStorenode, err := m.storenodeConfigProvider.GetPinnedStorenode()
|
||||
if err != nil {
|
||||
m.logger.Error("could not obtain the pinned storenode", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
// If no pinned storenode, no need to disconnect and wait for it to be available
|
||||
if pinnedStorenode == "" {
|
||||
m.disconnectActiveStorenode(graylistBackoff)
|
||||
}
|
||||
|
||||
return m.findNewStorenode(ctx)
|
||||
}
|
||||
|
||||
func (m *StorenodeCycle) disconnectStorenode(backoffDuration time.Duration) error {
|
||||
if m.activeStorenode == "" {
|
||||
m.logger.Info("no active storenode")
|
||||
return nil
|
||||
}
|
||||
|
||||
m.logger.Info("disconnecting active storenode", zap.Stringer("peerID", m.activeStorenode))
|
||||
|
||||
m.peersMutex.Lock()
|
||||
pInfo, ok := m.peers[m.activeStorenode]
|
||||
if ok {
|
||||
pInfo.status = disconnected
|
||||
pInfo.canConnectAfter = time.Now().Add(backoffDuration)
|
||||
m.peers[m.activeStorenode] = pInfo
|
||||
} else {
|
||||
m.peers[m.activeStorenode] = peerStatus{
|
||||
status: disconnected,
|
||||
canConnectAfter: time.Now().Add(backoffDuration),
|
||||
}
|
||||
}
|
||||
m.peersMutex.Unlock()
|
||||
|
||||
m.activeStorenode = ""
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *StorenodeCycle) disconnectActiveStorenode(backoffDuration time.Duration) {
|
||||
err := m.disconnectStorenode(backoffDuration)
|
||||
if err != nil {
|
||||
m.logger.Error("failed to disconnect storenode", zap.Error(err))
|
||||
}
|
||||
|
||||
m.StorenodeChangedEmitter.Emit("")
|
||||
}
|
||||
|
||||
func (m *StorenodeCycle) Cycle(ctx context.Context) {
|
||||
if m.storenodeConfigProvider == nil {
|
||||
m.logger.Debug("storenodeConfigProvider not yet setup")
|
||||
return
|
||||
}
|
||||
|
||||
m.logger.Info("Automatically switching storenode")
|
||||
|
||||
if m.activeStorenode != "" {
|
||||
m.disconnectActiveStorenode(graylistBackoff)
|
||||
}
|
||||
|
||||
useStorenode, err := m.storenodeConfigProvider.UseStorenodes()
|
||||
if err != nil {
|
||||
m.logger.Error("failed to get use storenodes", zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
if !useStorenode {
|
||||
m.logger.Info("Skipping storenode search due to useStorenode being false")
|
||||
return
|
||||
}
|
||||
|
||||
err = m.findNewStorenode(ctx)
|
||||
if err != nil {
|
||||
m.logger.Error("Error getting new storenode", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
func poolSize(fleetSize int) int {
|
||||
return int(math.Ceil(float64(fleetSize) / 4))
|
||||
}
|
||||
|
||||
func (m *StorenodeCycle) getAvailableStorenodesSortedByRTT(ctx context.Context, allStorenodes []peer.ID) []peer.ID {
|
||||
availableStorenodes := make(map[peer.ID]time.Duration)
|
||||
availableStorenodesMutex := sync.Mutex{}
|
||||
availableStorenodesWg := sync.WaitGroup{}
|
||||
for _, storenode := range allStorenodes {
|
||||
availableStorenodesWg.Add(1)
|
||||
go func(peerID peer.ID) {
|
||||
defer availableStorenodesWg.Done()
|
||||
ctx, cancel := context.WithTimeout(ctx, 4*time.Second)
|
||||
defer cancel()
|
||||
|
||||
rtt, err := m.pingPeer(ctx, peerID)
|
||||
if err == nil { // pinging storenodes might fail, but we don't care
|
||||
availableStorenodesMutex.Lock()
|
||||
availableStorenodes[peerID] = rtt
|
||||
availableStorenodesMutex.Unlock()
|
||||
}
|
||||
}(storenode)
|
||||
}
|
||||
availableStorenodesWg.Wait()
|
||||
|
||||
if len(availableStorenodes) == 0 {
|
||||
m.logger.Warn("No storenodes available") // Do nothing..
|
||||
return nil
|
||||
}
|
||||
|
||||
var sortedStorenodes []SortedStorenode
|
||||
for storenodeID, rtt := range availableStorenodes {
|
||||
sortedStorenode := SortedStorenode{
|
||||
Storenode: storenodeID,
|
||||
RTT: rtt,
|
||||
}
|
||||
m.peersMutex.Lock()
|
||||
pInfo, ok := m.peers[storenodeID]
|
||||
m.peersMutex.Unlock()
|
||||
if ok && time.Now().Before(pInfo.canConnectAfter) {
|
||||
continue // We can't connect to this node yet
|
||||
}
|
||||
sortedStorenodes = append(sortedStorenodes, sortedStorenode)
|
||||
}
|
||||
sort.Sort(byRTTMsAndCanConnectBefore(sortedStorenodes))
|
||||
|
||||
result := make([]peer.ID, len(sortedStorenodes))
|
||||
for i, s := range sortedStorenodes {
|
||||
result[i] = s.Storenode
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (m *StorenodeCycle) pingPeer(ctx context.Context, peerID peer.ID) (time.Duration, error) {
|
||||
pingResultCh := ping.Ping(ctx, m.host, peerID)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return 0, ctx.Err()
|
||||
case r := <-pingResultCh:
|
||||
if r.Error != nil {
|
||||
return 0, r.Error
|
||||
}
|
||||
return r.RTT, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (m *StorenodeCycle) findNewStorenode(ctx context.Context) error {
|
||||
// we have to override DNS manually because of https://github.com/status-im/status-mobile/issues/19581
|
||||
if overrideDNS {
|
||||
var dialer net.Dialer
|
||||
net.DefaultResolver = &net.Resolver{
|
||||
PreferGo: false,
|
||||
Dial: func(context context.Context, _, _ string) (net.Conn, error) {
|
||||
conn, err := dialer.DialContext(context, "udp", bootstrapDNS)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pinnedStorenode, err := m.storenodeConfigProvider.GetPinnedStorenode()
|
||||
if err != nil {
|
||||
m.logger.Error("Could not obtain the pinned storenode", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
if pinnedStorenode != "" {
|
||||
return m.setActiveStorenode(pinnedStorenode)
|
||||
}
|
||||
|
||||
m.logger.Info("Finding a new storenode..")
|
||||
|
||||
allStorenodes, err := m.storenodeConfigProvider.Storenodes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: remove this check once sockets are stable on x86_64 emulators
|
||||
if findNearestMailServer {
|
||||
allStorenodes = m.getAvailableStorenodesSortedByRTT(ctx, allStorenodes)
|
||||
}
|
||||
|
||||
// Picks a random storenode amongs the ones with the lowest latency
|
||||
// The pool size is 1/4 of the storenodes were pinged successfully
|
||||
// If the pool size is less than `minStorenodesToChooseFrom`, it will
|
||||
// pick a storenode fromm all the available storenodes
|
||||
pSize := poolSize(len(allStorenodes) - 1)
|
||||
if pSize <= minStorenodesToChooseFrom {
|
||||
pSize = len(allStorenodes)
|
||||
if pSize <= 0 {
|
||||
m.logger.Warn("No storenodes available") // Do nothing..
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
r, err := rand.Int(rand.Reader, big.NewInt(int64(pSize)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ms := allStorenodes[r.Int64()]
|
||||
return m.setActiveStorenode(ms)
|
||||
}
|
||||
|
||||
func (m *StorenodeCycle) storenodeStatus(peerID peer.ID) connStatus {
|
||||
m.peersMutex.RLock()
|
||||
defer m.peersMutex.RUnlock()
|
||||
|
||||
peer, ok := m.peers[peerID]
|
||||
if !ok {
|
||||
return disconnected
|
||||
}
|
||||
return peer.status
|
||||
}
|
||||
|
||||
func (m *StorenodeCycle) setActiveStorenode(peerID peer.ID) error {
|
||||
m.activeStorenode = peerID
|
||||
|
||||
m.StorenodeChangedEmitter.Emit(m.activeStorenode)
|
||||
|
||||
storenodeStatus := m.storenodeStatus(peerID)
|
||||
if storenodeStatus != connected {
|
||||
m.peersMutex.Lock()
|
||||
m.peers[peerID] = peerStatus{
|
||||
status: connected,
|
||||
lastConnectionAttempt: time.Now(),
|
||||
canConnectAfter: time.Now().Add(defaultBackoff),
|
||||
}
|
||||
m.peersMutex.Unlock()
|
||||
|
||||
m.failedRequests[peerID] = 0
|
||||
m.logger.Info("storenode available", zap.Stringer("peerID", m.activeStorenode))
|
||||
|
||||
m.StorenodeAvailableOneshotEmitter.Emit(struct{}{}) // Maybe can be refactored away?
|
||||
m.StorenodeAvailableEmitter.Emit(m.activeStorenode)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *StorenodeCycle) GetActiveStorenode() peer.ID {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
|
||||
return m.activeStorenode
|
||||
}
|
||||
|
||||
func (m *StorenodeCycle) IsStorenodeAvailable(peerID peer.ID) bool {
|
||||
return m.storenodeStatus(peerID) == connected
|
||||
}
|
||||
|
||||
func (m *StorenodeCycle) penalizeStorenode(id peer.ID) {
|
||||
m.peersMutex.Lock()
|
||||
defer m.peersMutex.Unlock()
|
||||
pInfo, ok := m.peers[id]
|
||||
if !ok {
|
||||
pInfo.status = disconnected
|
||||
}
|
||||
|
||||
pInfo.canConnectAfter = time.Now().Add(graylistBackoff)
|
||||
m.peers[id] = pInfo
|
||||
}
|
||||
|
||||
func (m *StorenodeCycle) verifyStorenodeStatus(ctx context.Context) {
|
||||
ticker := time.NewTicker(storenodeVerificationInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
err := m.disconnectStorenodeIfRequired(ctx)
|
||||
if err != nil {
|
||||
m.logger.Error("failed to handle storenode cycle event", zap.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *StorenodeCycle) disconnectStorenodeIfRequired(ctx context.Context) error {
|
||||
m.logger.Debug("wakuV2 storenode status verification")
|
||||
|
||||
if m.activeStorenode == "" {
|
||||
// No active storenode, find a new one
|
||||
m.Cycle(ctx)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check whether we want to disconnect the active storenode
|
||||
if m.failedRequests[m.activeStorenode] >= storenodeMaxFailedRequests {
|
||||
m.penalizeStorenode(m.activeStorenode)
|
||||
m.StorenodeNotWorkingEmitter.Emit(struct{}{})
|
||||
|
||||
m.logger.Info("too many failed requests", zap.Stringer("storenode", m.activeStorenode))
|
||||
m.failedRequests[m.activeStorenode] = 0
|
||||
return m.connectToNewStorenodeAndWait(ctx)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *StorenodeCycle) SetStorenodeConfigProvider(provider StorenodeConfigProvider) {
|
||||
m.storenodeConfigProvider = provider
|
||||
}
|
||||
|
||||
func (m *StorenodeCycle) WaitForAvailableStoreNode(ctx context.Context) bool {
|
||||
// Note: Add 1 second to timeout, because the storenode cycle has 1 second ticker, which doesn't tick on start.
|
||||
// This can be improved after merging https://github.com/status-im/status-go/pull/4380.
|
||||
// NOTE: https://stackoverflow.com/questions/32705582/how-to-get-time-tick-to-tick-immediately
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for !m.IsStorenodeAvailable(m.activeStorenode) {
|
||||
select {
|
||||
case <-m.StorenodeAvailableOneshotEmitter.Subscribe():
|
||||
case <-ctx.Done():
|
||||
if errors.Is(ctx.Err(), context.Canceled) {
|
||||
return
|
||||
}
|
||||
|
||||
// Wait for an additional second, but handle cancellation
|
||||
select {
|
||||
case <-time.After(1 * time.Second):
|
||||
case <-ctx.Done(): // context was cancelled
|
||||
}
|
||||
|
||||
return
|
||||
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-waitForWaitGroup(&wg):
|
||||
case <-ctx.Done():
|
||||
// Wait for an additional second, but handle cancellation
|
||||
select {
|
||||
case <-time.After(1 * time.Second):
|
||||
case <-ctx.Done(): // context was cancelled o
|
||||
}
|
||||
}
|
||||
|
||||
return m.IsStorenodeAvailable(m.activeStorenode)
|
||||
}
|
||||
|
||||
func waitForWaitGroup(wg *sync.WaitGroup) <-chan struct{} {
|
||||
ch := make(chan struct{})
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(ch)
|
||||
}()
|
||||
return ch
|
||||
}
|
||||
|
||||
type storenodeTaskParameters struct {
|
||||
customPeerID peer.ID
|
||||
}
|
||||
|
||||
type StorenodeTaskOption func(*storenodeTaskParameters)
|
||||
|
||||
func WithPeerID(peerID peer.ID) StorenodeTaskOption {
|
||||
return func(stp *storenodeTaskParameters) {
|
||||
stp.customPeerID = peerID
|
||||
}
|
||||
}
|
||||
|
||||
func (m *StorenodeCycle) PerformStorenodeTask(fn func() error, options ...StorenodeTaskOption) error {
|
||||
params := storenodeTaskParameters{}
|
||||
for _, opt := range options {
|
||||
opt(¶ms)
|
||||
}
|
||||
|
||||
peerID := params.customPeerID
|
||||
if peerID == "" {
|
||||
peerID = m.GetActiveStorenode()
|
||||
}
|
||||
|
||||
if peerID == "" {
|
||||
return errors.New("storenode not available")
|
||||
}
|
||||
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
|
||||
var tries uint = 0
|
||||
for tries < storenodeMaxFailedRequests {
|
||||
if params.customPeerID == "" && m.storenodeStatus(peerID) != connected {
|
||||
return errors.New("storenode not available")
|
||||
}
|
||||
m.logger.Info("trying performing history requests", zap.Uint("try", tries), zap.Stringer("peerID", peerID))
|
||||
|
||||
// Peform request
|
||||
err := fn()
|
||||
if err == nil {
|
||||
// Reset failed requests
|
||||
m.logger.Debug("history request performed successfully", zap.Stringer("peerID", peerID))
|
||||
m.failedRequests[peerID] = 0
|
||||
return nil
|
||||
}
|
||||
|
||||
m.logger.Error("failed to perform history request",
|
||||
zap.Stringer("peerID", peerID),
|
||||
zap.Uint("tries", tries),
|
||||
zap.Error(err),
|
||||
)
|
||||
|
||||
tries++
|
||||
|
||||
if storeErr, ok := err.(*store.StoreError); ok {
|
||||
if storeErr.Code == http.StatusTooManyRequests {
|
||||
m.disconnectActiveStorenode(defaultBackoff)
|
||||
return fmt.Errorf("ratelimited at storenode %s: %w", peerID, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Increment failed requests
|
||||
m.failedRequests[peerID]++
|
||||
|
||||
// Change storenode
|
||||
if m.failedRequests[peerID] >= storenodeMaxFailedRequests {
|
||||
return errors.New("too many failed requests")
|
||||
}
|
||||
// Wait a couple of second not to spam
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
}
|
||||
return errors.New("failed to perform history request")
|
||||
}
|
48
vendor/github.com/waku-org/go-waku/waku/v2/api/history/emitters.go
generated
vendored
Normal file
48
vendor/github.com/waku-org/go-waku/waku/v2/api/history/emitters.go
generated
vendored
Normal file
|
@ -0,0 +1,48 @@
|
|||
package history
|
||||
|
||||
import "sync"
|
||||
|
||||
type Emitter[T any] struct {
|
||||
sync.Mutex
|
||||
subscriptions []chan T
|
||||
}
|
||||
|
||||
func NewEmitter[T any]() *Emitter[T] {
|
||||
return &Emitter[T]{}
|
||||
}
|
||||
|
||||
func (s *Emitter[T]) Subscribe() <-chan T {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
c := make(chan T)
|
||||
s.subscriptions = append(s.subscriptions, c)
|
||||
return c
|
||||
}
|
||||
|
||||
func (s *Emitter[T]) Emit(value T) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
for _, sub := range s.subscriptions {
|
||||
sub <- value
|
||||
}
|
||||
}
|
||||
|
||||
type OneShotEmitter[T any] struct {
|
||||
Emitter[T]
|
||||
}
|
||||
|
||||
func NewOneshotEmitter[T any]() *OneShotEmitter[T] {
|
||||
return &OneShotEmitter[T]{}
|
||||
}
|
||||
|
||||
func (s *OneShotEmitter[T]) Emit(value T) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
for _, subs := range s.subscriptions {
|
||||
subs <- value
|
||||
close(subs)
|
||||
}
|
||||
s.subscriptions = nil
|
||||
}
|
296
vendor/github.com/waku-org/go-waku/waku/v2/api/history/history.go
generated
vendored
Normal file
296
vendor/github.com/waku-org/go-waku/waku/v2/api/history/history.go
generated
vendored
Normal file
|
@ -0,0 +1,296 @@
|
|||
package history
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/waku-org/go-waku/logging"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/store"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const maxTopicsPerRequest int = 10
|
||||
const mailserverRequestTimeout = 30 * time.Second
|
||||
|
||||
type work struct {
|
||||
criteria store.FilterCriteria
|
||||
cursor []byte
|
||||
limit uint64
|
||||
}
|
||||
|
||||
type HistoryRetriever struct {
|
||||
store Store
|
||||
logger *zap.Logger
|
||||
historyProcessor HistoryProcessor
|
||||
}
|
||||
|
||||
type HistoryProcessor interface {
|
||||
OnEnvelope(env *protocol.Envelope, processEnvelopes bool) error
|
||||
OnRequestFailed(requestID []byte, peerID peer.ID, err error)
|
||||
}
|
||||
|
||||
type Store interface {
|
||||
Query(ctx context.Context, criteria store.FilterCriteria, opts ...store.RequestOption) (store.Result, error)
|
||||
}
|
||||
|
||||
func NewHistoryRetriever(store Store, historyProcessor HistoryProcessor, logger *zap.Logger) *HistoryRetriever {
|
||||
return &HistoryRetriever{
|
||||
store: store,
|
||||
logger: logger.Named("history-retriever"),
|
||||
historyProcessor: historyProcessor,
|
||||
}
|
||||
}
|
||||
|
||||
func (hr *HistoryRetriever) Query(
|
||||
ctx context.Context,
|
||||
criteria store.FilterCriteria,
|
||||
storenodeID peer.ID,
|
||||
pageLimit uint64,
|
||||
shouldProcessNextPage func(int) (bool, uint64),
|
||||
processEnvelopes bool,
|
||||
) error {
|
||||
logger := hr.logger.With(
|
||||
logging.Timep("fromString", criteria.TimeStart),
|
||||
logging.Timep("toString", criteria.TimeEnd),
|
||||
zap.String("pubsubTopic", criteria.PubsubTopic),
|
||||
zap.Strings("contentTopics", criteria.ContentTopicsList()),
|
||||
zap.Int64p("from", criteria.TimeStart),
|
||||
zap.Int64p("to", criteria.TimeEnd),
|
||||
)
|
||||
|
||||
logger.Info("syncing")
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
workWg := sync.WaitGroup{}
|
||||
workCh := make(chan work, 1000) // each batch item is split in 10 topics bunch and sent to this channel
|
||||
workCompleteCh := make(chan struct{}) // once all batch items are processed, this channel is triggered
|
||||
semaphore := make(chan struct{}, 3) // limit the number of concurrent queries
|
||||
errCh := make(chan error)
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
// TODO: refactor this by extracting the consumer into a separate go routine.
|
||||
|
||||
// Producer
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer func() {
|
||||
logger.Debug("mailserver batch producer complete")
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
contentTopicList := criteria.ContentTopics.ToList()
|
||||
|
||||
// TODO: split into 24h batches
|
||||
|
||||
allWorks := int(math.Ceil(float64(len(contentTopicList)) / float64(maxTopicsPerRequest)))
|
||||
workWg.Add(allWorks)
|
||||
|
||||
for i := 0; i < len(contentTopicList); i += maxTopicsPerRequest {
|
||||
j := i + maxTopicsPerRequest
|
||||
if j > len(contentTopicList) {
|
||||
j = len(contentTopicList)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
logger.Debug("processBatch producer - context done")
|
||||
return
|
||||
default:
|
||||
logger.Debug("processBatch producer - creating work")
|
||||
workCh <- work{
|
||||
criteria: store.FilterCriteria{
|
||||
ContentFilter: protocol.NewContentFilter(criteria.PubsubTopic, contentTopicList[i:j]...),
|
||||
TimeStart: criteria.TimeStart,
|
||||
TimeEnd: criteria.TimeEnd,
|
||||
},
|
||||
limit: pageLimit,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
go func() {
|
||||
workWg.Wait()
|
||||
workCompleteCh <- struct{}{}
|
||||
}()
|
||||
|
||||
logger.Debug("processBatch producer complete")
|
||||
}()
|
||||
|
||||
var result error
|
||||
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
logger.Debug("processBatch cleanup - context done")
|
||||
result = ctx.Err()
|
||||
if errors.Is(result, context.Canceled) {
|
||||
result = nil
|
||||
}
|
||||
break loop
|
||||
case w, ok := <-workCh:
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
// continue...
|
||||
}
|
||||
|
||||
logger.Debug("processBatch - received work")
|
||||
|
||||
semaphore <- struct{}{}
|
||||
go func(w work) { // Consumer
|
||||
defer func() {
|
||||
workWg.Done()
|
||||
<-semaphore
|
||||
}()
|
||||
|
||||
queryCtx, queryCancel := context.WithTimeout(ctx, mailserverRequestTimeout)
|
||||
cursor, envelopesCount, err := hr.createMessagesRequest(queryCtx, storenodeID, w.criteria, w.cursor, w.limit, true, processEnvelopes, logger)
|
||||
queryCancel()
|
||||
|
||||
if err != nil {
|
||||
logger.Debug("failed to send request", zap.Error(err))
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
|
||||
processNextPage := true
|
||||
nextPageLimit := pageLimit
|
||||
if shouldProcessNextPage != nil {
|
||||
processNextPage, nextPageLimit = shouldProcessNextPage(envelopesCount)
|
||||
}
|
||||
|
||||
if !processNextPage {
|
||||
return
|
||||
}
|
||||
|
||||
// Check the cursor after calling `shouldProcessNextPage`.
|
||||
// The app might use process the fetched envelopes in the callback for own needs.
|
||||
if cursor == nil {
|
||||
return
|
||||
}
|
||||
|
||||
logger.Debug("processBatch producer - creating work (cursor)")
|
||||
|
||||
workWg.Add(1)
|
||||
workCh <- work{
|
||||
criteria: w.criteria,
|
||||
cursor: cursor,
|
||||
limit: nextPageLimit,
|
||||
}
|
||||
}(w)
|
||||
case err := <-errCh:
|
||||
logger.Debug("processBatch - received error", zap.Error(err))
|
||||
cancel() // Kill go routines
|
||||
return err
|
||||
case <-workCompleteCh:
|
||||
logger.Debug("processBatch - all jobs complete")
|
||||
cancel() // Kill go routines
|
||||
}
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
logger.Info("synced topic", zap.NamedError("hasError", result))
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (hr *HistoryRetriever) createMessagesRequest(
|
||||
ctx context.Context,
|
||||
peerID peer.ID,
|
||||
criteria store.FilterCriteria,
|
||||
cursor []byte,
|
||||
limit uint64,
|
||||
waitForResponse bool,
|
||||
processEnvelopes bool,
|
||||
logger *zap.Logger,
|
||||
) (storeCursor []byte, envelopesCount int, err error) {
|
||||
if waitForResponse {
|
||||
resultCh := make(chan struct {
|
||||
storeCursor []byte
|
||||
envelopesCount int
|
||||
err error
|
||||
})
|
||||
|
||||
go func() {
|
||||
storeCursor, envelopesCount, err = hr.requestStoreMessages(ctx, peerID, criteria, cursor, limit, processEnvelopes)
|
||||
resultCh <- struct {
|
||||
storeCursor []byte
|
||||
envelopesCount int
|
||||
err error
|
||||
}{storeCursor, envelopesCount, err}
|
||||
}()
|
||||
|
||||
select {
|
||||
case result := <-resultCh:
|
||||
return result.storeCursor, result.envelopesCount, result.err
|
||||
case <-ctx.Done():
|
||||
return nil, 0, ctx.Err()
|
||||
}
|
||||
} else {
|
||||
go func() {
|
||||
_, _, err = hr.requestStoreMessages(ctx, peerID, criteria, cursor, limit, false)
|
||||
if err != nil {
|
||||
logger.Error("failed to request store messages", zap.Error(err))
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (hr *HistoryRetriever) requestStoreMessages(ctx context.Context, peerID peer.ID, criteria store.FilterCriteria, cursor []byte, limit uint64, processEnvelopes bool) ([]byte, int, error) {
|
||||
requestID := protocol.GenerateRequestID()
|
||||
logger := hr.logger.With(zap.String("requestID", hexutil.Encode(requestID)), zap.Stringer("peerID", peerID))
|
||||
|
||||
opts := []store.RequestOption{
|
||||
store.WithPaging(false, limit),
|
||||
store.WithRequestID(requestID),
|
||||
store.WithPeer(peerID),
|
||||
store.WithCursor(cursor)}
|
||||
|
||||
logger.Debug("store.query",
|
||||
logging.Timep("startTime", criteria.TimeStart),
|
||||
logging.Timep("endTime", criteria.TimeEnd),
|
||||
zap.Strings("contentTopics", criteria.ContentTopics.ToList()),
|
||||
zap.String("pubsubTopic", criteria.PubsubTopic),
|
||||
zap.String("cursor", hexutil.Encode(cursor)),
|
||||
)
|
||||
|
||||
queryStart := time.Now()
|
||||
result, err := hr.store.Query(ctx, criteria, opts...)
|
||||
queryDuration := time.Since(queryStart)
|
||||
if err != nil {
|
||||
logger.Error("error querying storenode", zap.Error(err))
|
||||
|
||||
hr.historyProcessor.OnRequestFailed(requestID, peerID, err)
|
||||
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
messages := result.Messages()
|
||||
envelopesCount := len(messages)
|
||||
logger.Debug("store.query response", zap.Duration("queryDuration", queryDuration), zap.Int("numMessages", envelopesCount), zap.Bool("hasCursor", result.IsComplete() && result.Cursor() != nil))
|
||||
for _, mkv := range messages {
|
||||
envelope := protocol.NewEnvelope(mkv.Message, mkv.Message.GetTimestamp(), mkv.GetPubsubTopic())
|
||||
err := hr.historyProcessor.OnEnvelope(envelope, processEnvelopes)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
}
|
||||
return result.Cursor(), envelopesCount, nil
|
||||
}
|
|
@ -0,0 +1,32 @@
|
|||
package history
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
type SortedStorenode struct {
|
||||
Storenode peer.ID
|
||||
RTT time.Duration
|
||||
CanConnectAfter time.Time
|
||||
}
|
||||
|
||||
type byRTTMsAndCanConnectBefore []SortedStorenode
|
||||
|
||||
func (s byRTTMsAndCanConnectBefore) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s byRTTMsAndCanConnectBefore) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
func (s byRTTMsAndCanConnectBefore) Less(i, j int) bool {
|
||||
// Slightly inaccurate as time sensitive sorting, but it does not matter so much
|
||||
now := time.Now()
|
||||
if s[i].CanConnectAfter.Before(now) && s[j].CanConnectAfter.Before(now) {
|
||||
return s[i].RTT < s[j].RTT
|
||||
}
|
||||
return s[i].CanConnectAfter.Before(s[j].CanConnectAfter)
|
||||
}
|
33
vendor/github.com/waku-org/go-waku/waku/v2/api/missing/default_requestor.go
generated
vendored
Normal file
33
vendor/github.com/waku-org/go-waku/waku/v2/api/missing/default_requestor.go
generated
vendored
Normal file
|
@ -0,0 +1,33 @@
|
|||
package missing
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/waku-org/go-waku/waku/v2/api/common"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/store"
|
||||
)
|
||||
|
||||
func NewDefaultStorenodeRequestor(store *store.WakuStore) StorenodeRequestor {
|
||||
return &defaultStorenodeRequestor{
|
||||
store: store,
|
||||
}
|
||||
}
|
||||
|
||||
type defaultStorenodeRequestor struct {
|
||||
store *store.WakuStore
|
||||
}
|
||||
|
||||
func (d *defaultStorenodeRequestor) GetMessagesByHash(ctx context.Context, peerID peer.ID, pageSize uint64, messageHashes []pb.MessageHash) (common.StoreRequestResult, error) {
|
||||
return d.store.QueryByHash(ctx, messageHashes, store.WithPeer(peerID), store.WithPaging(false, pageSize))
|
||||
}
|
||||
|
||||
func (d *defaultStorenodeRequestor) QueryWithCriteria(ctx context.Context, peerID peer.ID, pageSize uint64, pubsubTopic string, contentTopics []string, from *int64, to *int64) (common.StoreRequestResult, error) {
|
||||
return d.store.Query(ctx, store.FilterCriteria{
|
||||
ContentFilter: protocol.NewContentFilter(pubsubTopic, contentTopics...),
|
||||
TimeStart: from,
|
||||
TimeEnd: to,
|
||||
}, store.WithPeer(peerID), store.WithPaging(false, pageSize), store.IncludeData(false))
|
||||
}
|
|
@ -11,9 +11,9 @@ import (
|
|||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/waku-org/go-waku/logging"
|
||||
"github.com/waku-org/go-waku/waku/v2/api/common"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/store"
|
||||
"github.com/waku-org/go-waku/waku/v2/timesource"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
"go.uber.org/zap"
|
||||
|
@ -22,6 +22,7 @@ import (
|
|||
|
||||
const maxContentTopicsPerRequest = 10
|
||||
const maxMsgHashesPerRequest = 50
|
||||
const messageFetchPageSize = 100
|
||||
|
||||
// MessageTracker should keep track of messages it has seen before and
|
||||
// provide a way to determine whether a message exists or not. This
|
||||
|
@ -30,25 +31,30 @@ type MessageTracker interface {
|
|||
MessageExists(pb.MessageHash) (bool, error)
|
||||
}
|
||||
|
||||
type StorenodeRequestor interface {
|
||||
GetMessagesByHash(ctx context.Context, peerID peer.ID, pageSize uint64, messageHashes []pb.MessageHash) (common.StoreRequestResult, error)
|
||||
QueryWithCriteria(ctx context.Context, peerID peer.ID, pageSize uint64, pubsubTopic string, contentTopics []string, from *int64, to *int64) (common.StoreRequestResult, error)
|
||||
}
|
||||
|
||||
// MissingMessageVerifier is used to periodically retrieve missing messages from store nodes that have some specific criteria
|
||||
type MissingMessageVerifier struct {
|
||||
ctx context.Context
|
||||
params missingMessageVerifierParams
|
||||
|
||||
messageTracker MessageTracker
|
||||
storenodeRequestor StorenodeRequestor
|
||||
messageTracker MessageTracker
|
||||
|
||||
criteriaInterest map[string]criteriaInterest // Track message verification requests and when was the last time a pubsub topic was verified for missing messages
|
||||
criteriaInterestMu sync.RWMutex
|
||||
|
||||
C <-chan *protocol.Envelope
|
||||
|
||||
store *store.WakuStore
|
||||
timesource timesource.Timesource
|
||||
logger *zap.Logger
|
||||
}
|
||||
|
||||
// NewMissingMessageVerifier creates an instance of a MissingMessageVerifier
|
||||
func NewMissingMessageVerifier(store *store.WakuStore, messageTracker MessageTracker, timesource timesource.Timesource, logger *zap.Logger, options ...MissingMessageVerifierOption) *MissingMessageVerifier {
|
||||
func NewMissingMessageVerifier(storenodeRequester StorenodeRequestor, messageTracker MessageTracker, timesource timesource.Timesource, logger *zap.Logger, options ...MissingMessageVerifierOption) *MissingMessageVerifier {
|
||||
options = append(defaultMissingMessagesVerifierOptions, options...)
|
||||
params := missingMessageVerifierParams{}
|
||||
for _, opt := range options {
|
||||
|
@ -56,11 +62,11 @@ func NewMissingMessageVerifier(store *store.WakuStore, messageTracker MessageTra
|
|||
}
|
||||
|
||||
return &MissingMessageVerifier{
|
||||
store: store,
|
||||
timesource: timesource,
|
||||
messageTracker: messageTracker,
|
||||
logger: logger.Named("missing-msg-verifier"),
|
||||
params: params,
|
||||
storenodeRequestor: storenodeRequester,
|
||||
timesource: timesource,
|
||||
messageTracker: messageTracker,
|
||||
logger: logger.Named("missing-msg-verifier"),
|
||||
params: params,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -178,7 +184,7 @@ func (m *MissingMessageVerifier) fetchHistory(c chan<- *protocol.Envelope, inter
|
|||
}
|
||||
}
|
||||
|
||||
func (m *MissingMessageVerifier) storeQueryWithRetry(ctx context.Context, queryFunc func(ctx context.Context) (*store.Result, error), logger *zap.Logger, logMsg string) (*store.Result, error) {
|
||||
func (m *MissingMessageVerifier) storeQueryWithRetry(ctx context.Context, queryFunc func(ctx context.Context) (common.StoreRequestResult, error), logger *zap.Logger, logMsg string) (common.StoreRequestResult, error) {
|
||||
retry := true
|
||||
count := 1
|
||||
for retry && count <= m.params.maxAttemptsToRetrieveHistory {
|
||||
|
@ -212,12 +218,16 @@ func (m *MissingMessageVerifier) fetchMessagesBatch(c chan<- *protocol.Envelope,
|
|||
logging.Epoch("to", now),
|
||||
)
|
||||
|
||||
result, err := m.storeQueryWithRetry(interest.ctx, func(ctx context.Context) (*store.Result, error) {
|
||||
return m.store.Query(ctx, store.FilterCriteria{
|
||||
ContentFilter: protocol.NewContentFilter(interest.contentFilter.PubsubTopic, contentTopics[batchFrom:batchTo]...),
|
||||
TimeStart: proto.Int64(interest.lastChecked.Add(-m.params.delay).UnixNano()),
|
||||
TimeEnd: proto.Int64(now.Add(-m.params.delay).UnixNano()),
|
||||
}, store.WithPeer(interest.peerID), store.WithPaging(false, 100), store.IncludeData(false))
|
||||
result, err := m.storeQueryWithRetry(interest.ctx, func(ctx context.Context) (common.StoreRequestResult, error) {
|
||||
return m.storenodeRequestor.QueryWithCriteria(
|
||||
ctx,
|
||||
interest.peerID,
|
||||
messageFetchPageSize,
|
||||
interest.contentFilter.PubsubTopic,
|
||||
contentTopics[batchFrom:batchTo],
|
||||
proto.Int64(interest.lastChecked.Add(-m.params.delay).UnixNano()),
|
||||
proto.Int64(now.Add(-m.params.delay).UnixNano()),
|
||||
)
|
||||
}, logger, "retrieving history to check for missing messages")
|
||||
if err != nil {
|
||||
if !errors.Is(err, context.Canceled) {
|
||||
|
@ -243,7 +253,7 @@ func (m *MissingMessageVerifier) fetchMessagesBatch(c chan<- *protocol.Envelope,
|
|||
missingHashes = append(missingHashes, hash)
|
||||
}
|
||||
|
||||
result, err = m.storeQueryWithRetry(interest.ctx, func(ctx context.Context) (*store.Result, error) {
|
||||
result, err = m.storeQueryWithRetry(interest.ctx, func(ctx context.Context) (common.StoreRequestResult, error) {
|
||||
if err = result.Next(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -282,10 +292,10 @@ func (m *MissingMessageVerifier) fetchMessagesBatch(c chan<- *protocol.Envelope,
|
|||
defer utils.LogOnPanic()
|
||||
defer wg.Wait()
|
||||
|
||||
result, err := m.storeQueryWithRetry(interest.ctx, func(ctx context.Context) (*store.Result, error) {
|
||||
result, err := m.storeQueryWithRetry(interest.ctx, func(ctx context.Context) (common.StoreRequestResult, error) {
|
||||
queryCtx, cancel := context.WithTimeout(ctx, m.params.storeQueryTimeout)
|
||||
defer cancel()
|
||||
return m.store.QueryByHash(queryCtx, messageHashes, store.WithPeer(interest.peerID), store.WithPaging(false, maxMsgHashesPerRequest))
|
||||
return m.storenodeRequestor.GetMessagesByHash(queryCtx, interest.peerID, maxMsgHashesPerRequest, messageHashes)
|
||||
}, logger, "retrieving missing messages")
|
||||
if err != nil {
|
||||
if !errors.Is(err, context.Canceled) {
|
||||
|
@ -303,7 +313,7 @@ func (m *MissingMessageVerifier) fetchMessagesBatch(c chan<- *protocol.Envelope,
|
|||
}
|
||||
}
|
||||
|
||||
result, err = m.storeQueryWithRetry(interest.ctx, func(ctx context.Context) (*store.Result, error) {
|
||||
result, err = m.storeQueryWithRetry(interest.ctx, func(ctx context.Context) (common.StoreRequestResult, error) {
|
||||
if err = result.Next(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
50
vendor/github.com/waku-org/go-waku/waku/v2/api/publish/default_publisher.go
generated
vendored
Normal file
50
vendor/github.com/waku-org/go-waku/waku/v2/api/publish/default_publisher.go
generated
vendored
Normal file
|
@ -0,0 +1,50 @@
|
|||
package publish
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/lightpush"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||
)
|
||||
|
||||
var ErrRelayNotAvailable = errors.New("relay is not available")
|
||||
var ErrLightpushNotAvailable = errors.New("lightpush is not available")
|
||||
|
||||
func NewDefaultPublisher(lightpush *lightpush.WakuLightPush, relay *relay.WakuRelay) Publisher {
|
||||
return &defaultPublisher{
|
||||
lightpush: lightpush,
|
||||
relay: relay,
|
||||
}
|
||||
}
|
||||
|
||||
type defaultPublisher struct {
|
||||
lightpush *lightpush.WakuLightPush
|
||||
relay *relay.WakuRelay
|
||||
}
|
||||
|
||||
func (d *defaultPublisher) RelayListPeers(pubsubTopic string) ([]peer.ID, error) {
|
||||
if d.relay == nil {
|
||||
return nil, ErrRelayNotAvailable
|
||||
}
|
||||
|
||||
return d.relay.PubSub().ListPeers(pubsubTopic), nil
|
||||
}
|
||||
|
||||
func (d *defaultPublisher) RelayPublish(ctx context.Context, message *pb.WakuMessage, pubsubTopic string) (pb.MessageHash, error) {
|
||||
if d.relay == nil {
|
||||
return pb.MessageHash{}, ErrRelayNotAvailable
|
||||
}
|
||||
|
||||
return d.relay.Publish(ctx, message, relay.WithPubSubTopic(pubsubTopic))
|
||||
}
|
||||
|
||||
func (d *defaultPublisher) LightpushPublish(ctx context.Context, message *pb.WakuMessage, pubsubTopic string, maxPeers int) (pb.MessageHash, error) {
|
||||
if d.lightpush == nil {
|
||||
return pb.MessageHash{}, ErrLightpushNotAvailable
|
||||
}
|
||||
|
||||
return d.lightpush.Publish(ctx, message, lightpush.WithPubSubTopic(pubsubTopic), lightpush.WithMaxPeers(maxPeers))
|
||||
}
|
39
vendor/github.com/waku-org/go-waku/waku/v2/api/publish/default_verifier.go
generated
vendored
Normal file
39
vendor/github.com/waku-org/go-waku/waku/v2/api/publish/default_verifier.go
generated
vendored
Normal file
|
@ -0,0 +1,39 @@
|
|||
package publish
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/store"
|
||||
)
|
||||
|
||||
func NewDefaultStorenodeMessageVerifier(store *store.WakuStore) StorenodeMessageVerifier {
|
||||
return &defaultStorenodeMessageVerifier{
|
||||
store: store,
|
||||
}
|
||||
}
|
||||
|
||||
type defaultStorenodeMessageVerifier struct {
|
||||
store *store.WakuStore
|
||||
}
|
||||
|
||||
func (d *defaultStorenodeMessageVerifier) MessageHashesExist(ctx context.Context, requestID []byte, peerID peer.ID, pageSize uint64, messageHashes []pb.MessageHash) ([]pb.MessageHash, error) {
|
||||
var opts []store.RequestOption
|
||||
opts = append(opts, store.WithRequestID(requestID))
|
||||
opts = append(opts, store.WithPeer(peerID))
|
||||
opts = append(opts, store.WithPaging(false, pageSize))
|
||||
opts = append(opts, store.IncludeData(false))
|
||||
|
||||
response, err := d.store.QueryByHash(ctx, messageHashes, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := make([]pb.MessageHash, len(response.Messages()))
|
||||
for i, msg := range response.Messages() {
|
||||
result[i] = msg.WakuMessageHash()
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
|
@ -10,9 +10,9 @@ import (
|
|||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
apicommon "github.com/waku-org/go-waku/waku/v2/api/common"
|
||||
"github.com/waku-org/go-waku/waku/v2/api/history"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/store"
|
||||
"github.com/waku-org/go-waku/waku/v2/timesource"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
"go.uber.org/zap"
|
||||
|
@ -29,7 +29,11 @@ type ISentCheck interface {
|
|||
Start()
|
||||
Add(topic string, messageID common.Hash, sentTime uint32)
|
||||
DeleteByMessageIDs(messageIDs []common.Hash)
|
||||
SetStorePeerID(peerID peer.ID)
|
||||
}
|
||||
|
||||
type StorenodeMessageVerifier interface {
|
||||
// MessagesExist returns a list of the messages it found from a list of message hashes
|
||||
MessageHashesExist(ctx context.Context, requestID []byte, peerID peer.ID, pageSize uint64, messageHashes []pb.MessageHash) ([]pb.MessageHash, error)
|
||||
}
|
||||
|
||||
// MessageSentCheck tracks the outgoing messages and check against store node
|
||||
|
@ -38,11 +42,11 @@ type ISentCheck interface {
|
|||
type MessageSentCheck struct {
|
||||
messageIDs map[string]map[common.Hash]uint32
|
||||
messageIDsMu sync.RWMutex
|
||||
storePeerID peer.ID
|
||||
messageStoredChan chan common.Hash
|
||||
messageExpiredChan chan common.Hash
|
||||
ctx context.Context
|
||||
store *store.WakuStore
|
||||
messageVerifier StorenodeMessageVerifier
|
||||
storenodeCycle *history.StorenodeCycle
|
||||
timesource timesource.Timesource
|
||||
logger *zap.Logger
|
||||
maxHashQueryLength uint64
|
||||
|
@ -53,14 +57,15 @@ type MessageSentCheck struct {
|
|||
}
|
||||
|
||||
// NewMessageSentCheck creates a new instance of MessageSentCheck with default parameters
|
||||
func NewMessageSentCheck(ctx context.Context, store *store.WakuStore, timesource timesource.Timesource, msgStoredChan chan common.Hash, msgExpiredChan chan common.Hash, logger *zap.Logger) *MessageSentCheck {
|
||||
func NewMessageSentCheck(ctx context.Context, messageVerifier StorenodeMessageVerifier, cycle *history.StorenodeCycle, timesource timesource.Timesource, msgStoredChan chan common.Hash, msgExpiredChan chan common.Hash, logger *zap.Logger) *MessageSentCheck {
|
||||
return &MessageSentCheck{
|
||||
messageIDs: make(map[string]map[common.Hash]uint32),
|
||||
messageIDsMu: sync.RWMutex{},
|
||||
messageStoredChan: msgStoredChan,
|
||||
messageExpiredChan: msgExpiredChan,
|
||||
ctx: ctx,
|
||||
store: store,
|
||||
messageVerifier: messageVerifier,
|
||||
storenodeCycle: cycle,
|
||||
timesource: timesource,
|
||||
logger: logger,
|
||||
maxHashQueryLength: DefaultMaxHashQueryLength,
|
||||
|
@ -139,11 +144,6 @@ func (m *MessageSentCheck) DeleteByMessageIDs(messageIDs []common.Hash) {
|
|||
}
|
||||
}
|
||||
|
||||
// SetStorePeerID sets the peer id of store node
|
||||
func (m *MessageSentCheck) SetStorePeerID(peerID peer.ID) {
|
||||
m.storePeerID = peerID
|
||||
}
|
||||
|
||||
// Start checks if the tracked outgoing messages are stored periodically
|
||||
func (m *MessageSentCheck) Start() {
|
||||
defer utils.LogOnPanic()
|
||||
|
@ -211,18 +211,13 @@ func (m *MessageSentCheck) Start() {
|
|||
}
|
||||
|
||||
func (m *MessageSentCheck) messageHashBasedQuery(ctx context.Context, hashes []common.Hash, relayTime []uint32, pubsubTopic string) []common.Hash {
|
||||
selectedPeer := m.storePeerID
|
||||
selectedPeer := m.storenodeCycle.GetActiveStorenode()
|
||||
if selectedPeer == "" {
|
||||
m.logger.Error("no store peer id available", zap.String("pubsubTopic", pubsubTopic))
|
||||
return []common.Hash{}
|
||||
}
|
||||
|
||||
var opts []store.RequestOption
|
||||
requestID := protocol.GenerateRequestID()
|
||||
opts = append(opts, store.WithRequestID(requestID))
|
||||
opts = append(opts, store.WithPeer(selectedPeer))
|
||||
opts = append(opts, store.WithPaging(false, m.maxHashQueryLength))
|
||||
opts = append(opts, store.IncludeData(false))
|
||||
|
||||
messageHashes := make([]pb.MessageHash, len(hashes))
|
||||
for i, hash := range hashes {
|
||||
|
@ -233,20 +228,20 @@ func (m *MessageSentCheck) messageHashBasedQuery(ctx context.Context, hashes []c
|
|||
|
||||
queryCtx, cancel := context.WithTimeout(ctx, m.storeQueryTimeout)
|
||||
defer cancel()
|
||||
result, err := m.store.QueryByHash(queryCtx, messageHashes, opts...)
|
||||
result, err := m.messageVerifier.MessageHashesExist(queryCtx, requestID, selectedPeer, m.maxHashQueryLength, messageHashes)
|
||||
if err != nil {
|
||||
m.logger.Error("store.queryByHash failed", zap.String("requestID", hexutil.Encode(requestID)), zap.Stringer("peerID", selectedPeer), zap.Error(err))
|
||||
return []common.Hash{}
|
||||
}
|
||||
|
||||
m.logger.Debug("store.queryByHash result", zap.String("requestID", hexutil.Encode(requestID)), zap.Int("messages", len(result.Messages())))
|
||||
m.logger.Debug("store.queryByHash result", zap.String("requestID", hexutil.Encode(requestID)), zap.Int("messages", len(result)))
|
||||
|
||||
var ackHashes []common.Hash
|
||||
var missedHashes []common.Hash
|
||||
for i, hash := range hashes {
|
||||
found := false
|
||||
for _, msg := range result.Messages() {
|
||||
if bytes.Equal(msg.GetMessageHash(), hash.Bytes()) {
|
||||
for _, msgHash := range result {
|
||||
if bytes.Equal(msgHash.Bytes(), hash.Bytes()) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
|
|
|
@ -8,8 +8,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/lightpush"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
@ -37,10 +36,20 @@ func (pm PublishMethod) String() string {
|
|||
}
|
||||
}
|
||||
|
||||
type Publisher interface {
|
||||
// RelayListPeers returns the list of peers for a pubsub topic
|
||||
RelayListPeers(pubsubTopic string) ([]peer.ID, error)
|
||||
|
||||
// RelayPublish publishes a message via WakuRelay
|
||||
RelayPublish(ctx context.Context, message *pb.WakuMessage, pubsubTopic string) (pb.MessageHash, error)
|
||||
|
||||
// LightpushPublish publishes a message via WakuLightPush
|
||||
LightpushPublish(ctx context.Context, message *pb.WakuMessage, pubsubTopic string, maxPeers int) (pb.MessageHash, error)
|
||||
}
|
||||
|
||||
type MessageSender struct {
|
||||
publishMethod PublishMethod
|
||||
lightPush *lightpush.WakuLightPush
|
||||
relay *relay.WakuRelay
|
||||
publisher Publisher
|
||||
messageSentCheck ISentCheck
|
||||
rateLimiter *PublishRateLimiter
|
||||
logger *zap.Logger
|
||||
|
@ -65,14 +74,13 @@ func (r *Request) WithPublishMethod(publishMethod PublishMethod) *Request {
|
|||
return r
|
||||
}
|
||||
|
||||
func NewMessageSender(publishMethod PublishMethod, lightPush *lightpush.WakuLightPush, relay *relay.WakuRelay, logger *zap.Logger) (*MessageSender, error) {
|
||||
func NewMessageSender(publishMethod PublishMethod, publisher Publisher, logger *zap.Logger) (*MessageSender, error) {
|
||||
if publishMethod == UnknownMethod {
|
||||
return nil, errors.New("publish method is required")
|
||||
}
|
||||
return &MessageSender{
|
||||
publishMethod: publishMethod,
|
||||
lightPush: lightPush,
|
||||
relay: relay,
|
||||
publisher: publisher,
|
||||
rateLimiter: NewPublishRateLimiter(DefaultPublishingLimiterRate, DefaultPublishingLimitBurst),
|
||||
logger: logger,
|
||||
}, nil
|
||||
|
@ -109,26 +117,23 @@ func (ms *MessageSender) Send(req *Request) error {
|
|||
|
||||
switch publishMethod {
|
||||
case LightPush:
|
||||
if ms.lightPush == nil {
|
||||
return errors.New("lightpush is not available")
|
||||
}
|
||||
logger.Info("publishing message via lightpush")
|
||||
_, err := ms.lightPush.Publish(
|
||||
_, err := ms.publisher.LightpushPublish(
|
||||
req.ctx,
|
||||
req.envelope.Message(),
|
||||
lightpush.WithPubSubTopic(req.envelope.PubsubTopic()),
|
||||
lightpush.WithMaxPeers(DefaultPeersToPublishForLightpush),
|
||||
req.envelope.PubsubTopic(),
|
||||
DefaultPeersToPublishForLightpush,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case Relay:
|
||||
if ms.relay == nil {
|
||||
return errors.New("relay is not available")
|
||||
peers, err := ms.publisher.RelayListPeers(req.envelope.PubsubTopic())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
peerCnt := len(ms.relay.PubSub().ListPeers(req.envelope.PubsubTopic()))
|
||||
logger.Info("publishing message via relay", zap.Int("peerCnt", peerCnt))
|
||||
_, err := ms.relay.Publish(req.ctx, req.envelope.Message(), relay.WithPubSubTopic(req.envelope.PubsubTopic()))
|
||||
logger.Info("publishing message via relay", zap.Int("peerCnt", len(peers)))
|
||||
_, err = ms.publisher.RelayPublish(req.ctx, req.envelope.Message(), req.envelope.PubsubTopic())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -162,9 +167,3 @@ func (ms *MessageSender) MessagesDelivered(messageIDs []common.Hash) {
|
|||
ms.messageSentCheck.DeleteByMessageIDs(messageIDs)
|
||||
}
|
||||
}
|
||||
|
||||
func (ms *MessageSender) SetStorePeerID(peerID peer.ID) {
|
||||
if ms.messageSentCheck != nil {
|
||||
ms.messageSentCheck.SetStorePeerID(peerID)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,8 +50,8 @@ type StoreError struct {
|
|||
}
|
||||
|
||||
// NewStoreError creates a new instance of StoreError
|
||||
func NewStoreError(code int, message string) StoreError {
|
||||
return StoreError{
|
||||
func NewStoreError(code int, message string) *StoreError {
|
||||
return &StoreError{
|
||||
Code: code,
|
||||
Message: message,
|
||||
}
|
||||
|
@ -99,7 +99,7 @@ func (s *WakuStore) SetHost(h host.Host) {
|
|||
// Request is used to send a store query. This function requires understanding how to prepare a store query
|
||||
// and most of the time you can use `Query`, `QueryByHash` and `Exists` instead, as they provide
|
||||
// a simpler API
|
||||
func (s *WakuStore) Request(ctx context.Context, criteria Criteria, opts ...RequestOption) (*Result, error) {
|
||||
func (s *WakuStore) Request(ctx context.Context, criteria Criteria, opts ...RequestOption) (Result, error) {
|
||||
params := new(Parameters)
|
||||
|
||||
optList := DefaultOptions()
|
||||
|
@ -182,7 +182,7 @@ func (s *WakuStore) Request(ctx context.Context, criteria Criteria, opts ...Requ
|
|||
return nil, err
|
||||
}
|
||||
|
||||
result := &Result{
|
||||
result := &resultImpl{
|
||||
store: s,
|
||||
messages: response.Messages,
|
||||
storeRequest: storeRequest,
|
||||
|
@ -195,12 +195,12 @@ func (s *WakuStore) Request(ctx context.Context, criteria Criteria, opts ...Requ
|
|||
}
|
||||
|
||||
// Query retrieves all the messages that match a criteria. Use the options to indicate whether to return the message themselves or not.
|
||||
func (s *WakuStore) Query(ctx context.Context, criteria FilterCriteria, opts ...RequestOption) (*Result, error) {
|
||||
func (s *WakuStore) Query(ctx context.Context, criteria FilterCriteria, opts ...RequestOption) (Result, error) {
|
||||
return s.Request(ctx, criteria, opts...)
|
||||
}
|
||||
|
||||
// Query retrieves all the messages with specific message hashes
|
||||
func (s *WakuStore) QueryByHash(ctx context.Context, messageHashes []wpb.MessageHash, opts ...RequestOption) (*Result, error) {
|
||||
func (s *WakuStore) QueryByHash(ctx context.Context, messageHashes []wpb.MessageHash, opts ...RequestOption) (Result, error) {
|
||||
return s.Request(ctx, MessageHashCriteria{messageHashes}, opts...)
|
||||
}
|
||||
|
||||
|
@ -214,17 +214,17 @@ func (s *WakuStore) Exists(ctx context.Context, messageHash wpb.MessageHash, opt
|
|||
return false, err
|
||||
}
|
||||
|
||||
return len(result.messages) != 0, nil
|
||||
return len(result.Messages()) != 0, nil
|
||||
}
|
||||
|
||||
func (s *WakuStore) next(ctx context.Context, r *Result, opts ...RequestOption) (*Result, error) {
|
||||
func (s *WakuStore) next(ctx context.Context, r Result, opts ...RequestOption) (*resultImpl, error) {
|
||||
if r.IsComplete() {
|
||||
return &Result{
|
||||
return &resultImpl{
|
||||
store: s,
|
||||
messages: nil,
|
||||
cursor: nil,
|
||||
storeRequest: r.storeRequest,
|
||||
storeResponse: r.storeResponse,
|
||||
storeRequest: r.Query(),
|
||||
storeResponse: r.Response(),
|
||||
peerID: r.PeerID(),
|
||||
}, nil
|
||||
}
|
||||
|
@ -240,7 +240,7 @@ func (s *WakuStore) next(ctx context.Context, r *Result, opts ...RequestOption)
|
|||
}
|
||||
}
|
||||
|
||||
storeRequest := proto.Clone(r.storeRequest).(*pb.StoreQueryRequest)
|
||||
storeRequest := proto.Clone(r.Query()).(*pb.StoreQueryRequest)
|
||||
storeRequest.RequestId = hex.EncodeToString(protocol.GenerateRequestID())
|
||||
storeRequest.PaginationCursor = r.Cursor()
|
||||
|
||||
|
@ -249,7 +249,7 @@ func (s *WakuStore) next(ctx context.Context, r *Result, opts ...RequestOption)
|
|||
return nil, err
|
||||
}
|
||||
|
||||
result := &Result{
|
||||
result := &resultImpl{
|
||||
store: s,
|
||||
messages: response.Messages,
|
||||
storeRequest: storeRequest,
|
||||
|
@ -263,7 +263,7 @@ func (s *WakuStore) next(ctx context.Context, r *Result, opts ...RequestOption)
|
|||
}
|
||||
|
||||
func (s *WakuStore) queryFrom(ctx context.Context, storeRequest *pb.StoreQueryRequest, params *Parameters) (*pb.StoreQueryResponse, error) {
|
||||
logger := s.log.With(logging.HostID("peer", params.selectedPeer), zap.String("requestId", hex.EncodeToString([]byte(storeRequest.RequestId))))
|
||||
logger := s.log.With(logging.HostID("peer", params.selectedPeer), zap.String("requestId", storeRequest.RequestId))
|
||||
|
||||
logger.Debug("sending store request")
|
||||
|
||||
|
@ -317,7 +317,7 @@ func (s *WakuStore) queryFrom(ctx context.Context, storeRequest *pb.StoreQueryRe
|
|||
|
||||
if storeResponse.GetStatusCode() != ok {
|
||||
err := NewStoreError(int(storeResponse.GetStatusCode()), storeResponse.GetStatusDesc())
|
||||
return nil, &err
|
||||
return nil, err
|
||||
}
|
||||
return storeResponse, nil
|
||||
}
|
||||
|
|
|
@ -22,6 +22,10 @@ type Parameters struct {
|
|||
skipRatelimit bool
|
||||
}
|
||||
|
||||
func (p *Parameters) Cursor() []byte {
|
||||
return p.cursor
|
||||
}
|
||||
|
||||
type RequestOption func(*Parameters) error
|
||||
|
||||
// WithPeer is an option used to specify the peerID to request the message history.
|
||||
|
|
|
@ -8,7 +8,17 @@ import (
|
|||
)
|
||||
|
||||
// Result represents a valid response from a store node
|
||||
type Result struct {
|
||||
type Result interface {
|
||||
Cursor() []byte
|
||||
IsComplete() bool
|
||||
PeerID() peer.ID
|
||||
Query() *pb.StoreQueryRequest
|
||||
Response() *pb.StoreQueryResponse
|
||||
Next(ctx context.Context, opts ...RequestOption) error
|
||||
Messages() []*pb.WakuMessageKeyValue
|
||||
}
|
||||
|
||||
type resultImpl struct {
|
||||
done bool
|
||||
|
||||
messages []*pb.WakuMessageKeyValue
|
||||
|
@ -19,27 +29,27 @@ type Result struct {
|
|||
peerID peer.ID
|
||||
}
|
||||
|
||||
func (r *Result) Cursor() []byte {
|
||||
func (r *resultImpl) Cursor() []byte {
|
||||
return r.cursor
|
||||
}
|
||||
|
||||
func (r *Result) IsComplete() bool {
|
||||
func (r *resultImpl) IsComplete() bool {
|
||||
return r.done
|
||||
}
|
||||
|
||||
func (r *Result) PeerID() peer.ID {
|
||||
func (r *resultImpl) PeerID() peer.ID {
|
||||
return r.peerID
|
||||
}
|
||||
|
||||
func (r *Result) Query() *pb.StoreQueryRequest {
|
||||
func (r *resultImpl) Query() *pb.StoreQueryRequest {
|
||||
return r.storeRequest
|
||||
}
|
||||
|
||||
func (r *Result) Response() *pb.StoreQueryResponse {
|
||||
func (r *resultImpl) Response() *pb.StoreQueryResponse {
|
||||
return r.storeResponse
|
||||
}
|
||||
|
||||
func (r *Result) Next(ctx context.Context, opts ...RequestOption) error {
|
||||
func (r *resultImpl) Next(ctx context.Context, opts ...RequestOption) error {
|
||||
if r.cursor == nil {
|
||||
r.done = true
|
||||
r.messages = nil
|
||||
|
@ -57,6 +67,6 @@ func (r *Result) Next(ctx context.Context, opts ...RequestOption) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (r *Result) Messages() []*pb.WakuMessageKeyValue {
|
||||
func (r *resultImpl) Messages() []*pb.WakuMessageKeyValue {
|
||||
return r.messages
|
||||
}
|
||||
|
|
|
@ -1044,13 +1044,14 @@ github.com/waku-org/go-discover/discover/v5wire
|
|||
github.com/waku-org/go-libp2p-rendezvous
|
||||
github.com/waku-org/go-libp2p-rendezvous/db
|
||||
github.com/waku-org/go-libp2p-rendezvous/pb
|
||||
# github.com/waku-org/go-waku v0.8.1-0.20241004054019-0ed94ce0b1cb
|
||||
# github.com/waku-org/go-waku v0.8.1-0.20241021202955-3c4e40c729a0
|
||||
## explicit; go 1.21
|
||||
github.com/waku-org/go-waku/logging
|
||||
github.com/waku-org/go-waku/tests
|
||||
github.com/waku-org/go-waku/waku/persistence
|
||||
github.com/waku-org/go-waku/waku/v2/api/common
|
||||
github.com/waku-org/go-waku/waku/v2/api/filter
|
||||
github.com/waku-org/go-waku/waku/v2/api/history
|
||||
github.com/waku-org/go-waku/waku/v2/api/missing
|
||||
github.com/waku-org/go-waku/waku/v2/api/publish
|
||||
github.com/waku-org/go-waku/waku/v2/discv5
|
||||
|
|
|
@ -0,0 +1,25 @@
|
|||
package wakuv2
|
||||
|
||||
import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
|
||||
"github.com/status-im/status-go/wakuv2/common"
|
||||
"github.com/waku-org/go-waku/waku/v2/api/history"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
)
|
||||
|
||||
type HistoryProcessorWrapper struct {
|
||||
waku *Waku
|
||||
}
|
||||
|
||||
func NewHistoryProcessorWrapper(waku *Waku) history.HistoryProcessor {
|
||||
return &HistoryProcessorWrapper{waku}
|
||||
}
|
||||
|
||||
func (hr *HistoryProcessorWrapper) OnEnvelope(env *protocol.Envelope, processEnvelopes bool) error {
|
||||
return hr.waku.OnNewEnvelopes(env, common.StoreMessageType, processEnvelopes)
|
||||
}
|
||||
|
||||
func (hr *HistoryProcessorWrapper) OnRequestFailed(requestID []byte, peerID peer.ID, err error) {
|
||||
hr.waku.onHistoricMessagesRequestFailed(requestID, peerID, err)
|
||||
}
|
|
@ -54,9 +54,9 @@ import (
|
|||
"github.com/libp2p/go-libp2p"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/metrics"
|
||||
"github.com/libp2p/go-libp2p/p2p/protocol/ping"
|
||||
|
||||
filterapi "github.com/waku-org/go-waku/waku/v2/api/filter"
|
||||
"github.com/waku-org/go-waku/waku/v2/api/history"
|
||||
"github.com/waku-org/go-waku/waku/v2/api/missing"
|
||||
"github.com/waku-org/go-waku/waku/v2/api/publish"
|
||||
"github.com/waku-org/go-waku/waku/v2/dnsdisc"
|
||||
|
@ -171,6 +171,9 @@ type Waku struct {
|
|||
onlineChecker *onlinechecker.DefaultOnlineChecker
|
||||
state connection.State
|
||||
|
||||
StorenodeCycle *history.StorenodeCycle
|
||||
HistoryRetriever *history.HistoryRetriever
|
||||
|
||||
logger *zap.Logger
|
||||
|
||||
// NTP Synced timesource
|
||||
|
@ -359,6 +362,7 @@ func New(nodeKey *ecdsa.PrivateKey, fleet string, cfg *Config, logger *zap.Logge
|
|||
}
|
||||
|
||||
waku.options = opts
|
||||
|
||||
waku.logger.Info("setup the go-waku node successfully")
|
||||
|
||||
return waku, nil
|
||||
|
@ -1037,61 +1041,6 @@ func (w *Waku) ConfirmMessageDelivered(hashes []gethcommon.Hash) {
|
|||
}
|
||||
}
|
||||
|
||||
func (w *Waku) SetStorePeerID(peerID peer.ID) {
|
||||
w.messageSender.SetStorePeerID(peerID)
|
||||
}
|
||||
|
||||
func (w *Waku) Query(ctx context.Context, peerID peer.ID, query store.FilterCriteria, cursor []byte, opts []store.RequestOption, processEnvelopes bool) ([]byte, int, error) {
|
||||
requestID := protocol.GenerateRequestID()
|
||||
|
||||
opts = append(opts,
|
||||
store.WithRequestID(requestID),
|
||||
store.WithPeer(peerID),
|
||||
store.WithCursor(cursor))
|
||||
|
||||
logger := w.logger.With(zap.String("requestID", hexutil.Encode(requestID)), zap.Stringer("peerID", peerID))
|
||||
|
||||
logger.Debug("store.query",
|
||||
logutils.WakuMessageTimestamp("startTime", query.TimeStart),
|
||||
logutils.WakuMessageTimestamp("endTime", query.TimeEnd),
|
||||
zap.Strings("contentTopics", query.ContentTopics.ToList()),
|
||||
zap.String("pubsubTopic", query.PubsubTopic),
|
||||
zap.String("cursor", hexutil.Encode(cursor)),
|
||||
)
|
||||
|
||||
queryStart := time.Now()
|
||||
result, err := w.node.Store().Query(ctx, query, opts...)
|
||||
queryDuration := time.Since(queryStart)
|
||||
if err != nil {
|
||||
logger.Error("error querying storenode", zap.Error(err))
|
||||
|
||||
if w.onHistoricMessagesRequestFailed != nil {
|
||||
w.onHistoricMessagesRequestFailed(requestID, peerID, err)
|
||||
}
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
messages := result.Messages()
|
||||
envelopesCount := len(messages)
|
||||
w.logger.Debug("store.query response", zap.Duration("queryDuration", queryDuration), zap.Int("numMessages", envelopesCount), zap.Bool("hasCursor", result.IsComplete() && result.Cursor() != nil))
|
||||
for _, mkv := range messages {
|
||||
msg := mkv.Message
|
||||
|
||||
// Temporarily setting RateLimitProof to nil so it matches the WakuMessage protobuffer we are sending
|
||||
// See https://github.com/vacp2p/rfc/issues/563
|
||||
mkv.Message.RateLimitProof = nil
|
||||
|
||||
envelope := protocol.NewEnvelope(msg, msg.GetTimestamp(), query.PubsubTopic)
|
||||
|
||||
err = w.OnNewEnvelopes(envelope, common.StoreMessageType, processEnvelopes)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
}
|
||||
|
||||
return result.Cursor(), envelopesCount, nil
|
||||
}
|
||||
|
||||
// OnNewEnvelope is an interface from Waku FilterManager API that gets invoked when any new message is received by Filter.
|
||||
func (w *Waku) OnNewEnvelope(env *protocol.Envelope) error {
|
||||
return w.OnNewEnvelopes(env, common.RelayedMessageType, false)
|
||||
|
@ -1115,6 +1064,11 @@ func (w *Waku) Start() error {
|
|||
return fmt.Errorf("failed to start go-waku node: %v", err)
|
||||
}
|
||||
|
||||
w.StorenodeCycle = history.NewStorenodeCycle(w.logger)
|
||||
w.HistoryRetriever = history.NewHistoryRetriever(w.node.Store(), NewHistoryProcessorWrapper(w), w.logger)
|
||||
|
||||
w.StorenodeCycle.Start(w.ctx, w.node.Host())
|
||||
|
||||
w.logger.Info("WakuV2 PeerID", zap.Stringer("id", w.node.Host().ID()))
|
||||
|
||||
w.discoverAndConnectPeers()
|
||||
|
@ -1191,7 +1145,7 @@ func (w *Waku) Start() error {
|
|||
|
||||
if w.cfg.EnableMissingMessageVerification {
|
||||
w.missingMsgVerifier = missing.NewMissingMessageVerifier(
|
||||
w.node.Store(),
|
||||
missing.NewDefaultStorenodeRequestor(w.node.Store()),
|
||||
w,
|
||||
w.node.Timesource(),
|
||||
w.logger)
|
||||
|
@ -1339,7 +1293,7 @@ func (w *Waku) startMessageSender() error {
|
|||
publishMethod = publish.LightPush
|
||||
}
|
||||
|
||||
sender, err := publish.NewMessageSender(publishMethod, w.node.Lightpush(), w.node.Relay(), w.logger)
|
||||
sender, err := publish.NewMessageSender(publishMethod, publish.NewDefaultPublisher(w.node.Lightpush(), w.node.Relay()), w.logger)
|
||||
if err != nil {
|
||||
w.logger.Error("failed to create message sender", zap.Error(err))
|
||||
return err
|
||||
|
@ -1348,7 +1302,7 @@ func (w *Waku) startMessageSender() error {
|
|||
if w.cfg.EnableStoreConfirmationForMessagesSent {
|
||||
msgStoredChan := make(chan gethcommon.Hash, 1000)
|
||||
msgExpiredChan := make(chan gethcommon.Hash, 1000)
|
||||
messageSentCheck := publish.NewMessageSentCheck(w.ctx, w.node.Store(), w.node.Timesource(), msgStoredChan, msgExpiredChan, w.logger)
|
||||
messageSentCheck := publish.NewMessageSentCheck(w.ctx, publish.NewDefaultStorenodeMessageVerifier(w.node.Store()), w.StorenodeCycle, w.node.Timesource(), msgStoredChan, msgExpiredChan, w.logger)
|
||||
sender.WithMessageSentCheck(messageSentCheck)
|
||||
|
||||
w.wg.Add(1)
|
||||
|
@ -1967,19 +1921,6 @@ func (w *Waku) PeerID() peer.ID {
|
|||
return w.node.Host().ID()
|
||||
}
|
||||
|
||||
func (w *Waku) PingPeer(ctx context.Context, peerID peer.ID) (time.Duration, error) {
|
||||
pingResultCh := ping.Ping(ctx, w.node.Host(), peerID)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return 0, ctx.Err()
|
||||
case r := <-pingResultCh:
|
||||
if r.Error != nil {
|
||||
return 0, r.Error
|
||||
}
|
||||
return r.RTT, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Waku) Peerstore() peerstore.Peerstore {
|
||||
return w.node.Host().Peerstore()
|
||||
}
|
||||
|
|
|
@ -280,19 +280,16 @@ func TestBasicWakuV2(t *testing.T) {
|
|||
b.InitialInterval = 500 * time.Millisecond
|
||||
}
|
||||
err = tt.RetryWithBackOff(func() error {
|
||||
_, envelopeCount, err := w.Query(
|
||||
result, err := w.node.Store().Query(
|
||||
context.Background(),
|
||||
storeNode.PeerID,
|
||||
store.FilterCriteria{
|
||||
ContentFilter: protocol.NewContentFilter(config.DefaultShardPubsubTopic, contentTopic.ContentTopic()),
|
||||
TimeStart: proto.Int64((timestampInSeconds - int64(marginInSeconds)) * int64(time.Second)),
|
||||
TimeEnd: proto.Int64((timestampInSeconds + int64(marginInSeconds)) * int64(time.Second)),
|
||||
},
|
||||
nil,
|
||||
nil,
|
||||
false,
|
||||
store.WithPeer(storeNode.PeerID),
|
||||
)
|
||||
if err != nil || envelopeCount == 0 {
|
||||
if err != nil || len(result.Messages()) == 0 {
|
||||
// in case of failure extend timestamp margin up to 40secs
|
||||
if marginInSeconds < 40 {
|
||||
marginInSeconds += 5
|
||||
|
@ -586,20 +583,17 @@ func TestWakuV2Store(t *testing.T) {
|
|||
timestampInSeconds := msgTimestamp / int64(time.Second)
|
||||
marginInSeconds := 5
|
||||
// Query the second node's store for the message
|
||||
_, envelopeCount, err := w1.Query(
|
||||
result, err := w1.node.Store().Query(
|
||||
context.Background(),
|
||||
w2.node.Host().ID(),
|
||||
store.FilterCriteria{
|
||||
TimeStart: proto.Int64((timestampInSeconds - int64(marginInSeconds)) * int64(time.Second)),
|
||||
TimeEnd: proto.Int64((timestampInSeconds + int64(marginInSeconds)) * int64(time.Second)),
|
||||
ContentFilter: protocol.NewContentFilter(config1.DefaultShardPubsubTopic, contentTopic.ContentTopic()),
|
||||
},
|
||||
nil,
|
||||
nil,
|
||||
false,
|
||||
store.WithPeer(w2.node.Host().ID()),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.True(t, envelopeCount > 0, "no messages received from store node")
|
||||
require.True(t, len(result.Messages()) > 0, "no messages received from store node")
|
||||
}
|
||||
|
||||
func waitForPeerConnection(t *testing.T, peerID peer.ID, peerCh chan peer.IDSlice) {
|
||||
|
|
Loading…
Reference in New Issue