refactor: store

This commit is contained in:
Richard Ramos 2022-11-25 16:54:11 -04:00 committed by RichΛrd
parent 05e33105c4
commit ff8c3009c7
21 changed files with 929 additions and 908 deletions

11
.gitignore vendored
View File

@ -1,10 +1,15 @@
*.db
*.db-shm
*.db-wal
nodekey nodekey
rlnCredentials.txt rlnCredentials.txt
*.log *.log
# sqlite db
*.db
*.db-shm
*.db-wal
*.sqlite3
*.sqlite3-shm
*.sqlite3-wal
# Binaries for programs and plugins # Binaries for programs and plugins
*.exe *.exe
*.exe~ *.exe~

View File

@ -130,12 +130,6 @@ func main() {
Usage: "Interval of time for pinging peers to keep the connection alive.", Usage: "Interval of time for pinging peers to keep the connection alive.",
Destination: &options.KeepAlive, Destination: &options.KeepAlive,
}, },
&cli.BoolFlag{
Name: "use-db",
Aliases: []string{"sqlite-store"},
Usage: "Use SQLiteDB to persist information",
Destination: &options.UseDB,
},
&cli.BoolFlag{ &cli.BoolFlag{
Name: "persist-peers", Name: "persist-peers",
Usage: "Enable peer persistence", Usage: "Enable peer persistence",
@ -148,13 +142,6 @@ func main() {
Value: "any", Value: "any",
Destination: &options.NAT, // TODO: accept none,any,upnp,extaddr Destination: &options.NAT, // TODO: accept none,any,upnp,extaddr
}, },
&cli.PathFlag{
Name: "db-path",
Aliases: []string{"dbpath"},
Value: "./store.db",
Usage: "Path to DB file",
Destination: &options.DBPath,
},
&cli.StringFlag{ &cli.StringFlag{
Name: "advertise-address", Name: "advertise-address",
Usage: "External address to advertise to other nodes (overrides --address and --ws-address flags)", Usage: "External address to advertise to other nodes (overrides --address and --ws-address flags)",
@ -217,28 +204,6 @@ func main() {
Usage: "Minimum number of peers to publish to Relay", Usage: "Minimum number of peers to publish to Relay",
Destination: &options.Relay.MinRelayPeersToPublish, Destination: &options.Relay.MinRelayPeersToPublish,
}, },
&cli.BoolFlag{
Name: "store",
Usage: "Enable store protocol to persist messages",
Destination: &options.Store.Enable,
},
&cli.BoolFlag{
Name: "resume",
Usage: "Fix the gaps in message history",
Destination: &options.Store.ShouldResume,
},
&cli.DurationFlag{
Name: "store-duration",
Value: time.Hour * 24 * 30,
Usage: "maximum number of seconds before a message is removed from the store",
Destination: &options.Store.RetentionTime,
},
&cli.IntFlag{
Name: "store-capacity",
Value: 50000,
Usage: "maximum number of messages to store",
Destination: &options.Store.RetentionMaxMessages,
},
&cli.GenericFlag{ &cli.GenericFlag{
Name: "storenode", Name: "storenode",
Usage: "Multiaddr of a peer that supports store protocol. Option may be repeated", Usage: "Multiaddr of a peer that supports store protocol. Option may be repeated",
@ -246,6 +211,36 @@ func main() {
Values: &options.Store.Nodes, Values: &options.Store.Nodes,
}, },
}, },
&cli.BoolFlag{
Name: "store",
Usage: "Enable store protocol to persist messages",
Destination: &options.Store.Enable,
},
&cli.StringFlag{
Name: "store-message-db-url",
Usage: "The database connection URL for peristent storage. (Set empty to use in memory db)",
Value: "sqlite://store.sqlite3",
Destination: &options.Store.DatabaseURL,
},
&cli.DurationFlag{
Name: "store-message-retention-time",
Value: time.Hour * 24 * 2,
Usage: "maximum number of seconds before a message is removed from the store. Set to 0 to disable it",
Destination: &options.Store.RetentionTime,
},
&cli.IntFlag{
Name: "store-message-retention-capacity",
Value: 0,
Usage: "maximum number of messages to store. Set to 0 to disable it",
Destination: &options.Store.RetentionMaxMessages,
},
&cli.GenericFlag{
Name: "store-resume-peer",
Usage: "Peer multiaddress to resume the message store at boot. Option may be repeated",
Value: &cliutils.MultiaddrSlice{
Values: &options.Store.ResumeNodes,
},
},
&cli.BoolFlag{ &cli.BoolFlag{
Name: "swap", Name: "swap",
Usage: "Enable swap protocol", Usage: "Enable swap protocol",

View File

@ -51,6 +51,7 @@ func NewMetricsServer(address string, port int, log *zap.Logger) *Server {
// Register the views // Register the views
if err := view.Register( if err := view.Register(
metrics.MessageView, metrics.MessageView,
metrics.StoreMessagesView,
metrics.FilterSubscriptionsView, metrics.FilterSubscriptionsView,
metrics.StoreErrorTypesView, metrics.StoreErrorTypesView,
metrics.LightpushErrorTypesView, metrics.LightpushErrorTypesView,

View File

@ -11,6 +11,8 @@ import (
"net" "net"
"os" "os"
"os/signal" "os/signal"
"regexp"
"strings"
"syscall" "syscall"
"time" "time"
@ -76,6 +78,14 @@ func freePort() (int, error) {
return port, nil return port, nil
} }
func validateDBUrl(val string) error {
matched, err := regexp.Match(`^[\w\+]+:\/\/[\w\/\\\.\:\@]+$`, []byte(val))
if !matched || err != nil {
return errors.New("invalid 'db url' option format")
}
return nil
}
const dialTimeout = 7 * time.Second const dialTimeout = 7 * time.Second
// Execute starts a go-waku node with settings determined by the Options parameter // Execute starts a go-waku node with settings determined by the Options parameter
@ -100,20 +110,33 @@ func Execute(options Options) {
failOnErr(err, "deriving peer ID from private key") failOnErr(err, "deriving peer ID from private key")
logger := utils.Logger().With(logging.HostID("node", id)) logger := utils.Logger().With(logging.HostID("node", id))
if options.DBPath == "" && options.UseDB { var db *sql.DB
failOnErr(errors.New("dbpath can't be null"), "") if options.Store.Enable {
dbURL := ""
if options.Store.DatabaseURL != "" {
err := validateDBUrl(options.Store.DatabaseURL)
failOnErr(err, "connecting to the db")
dbURL = options.Store.DatabaseURL
} else {
// In memoryDB
dbURL = "sqlite://:memory:"
} }
var db *sql.DB // TODO: this should be refactored to use any kind of DB, not just sqlite
if options.UseDB { // Extract to separate module
db, err = sqlite.NewDB(options.DBPath)
failOnErr(err, "Could not connect to DB") dbURLParts := strings.Split(dbURL, "://")
logger.Debug("using database: ", zap.String("path", options.DBPath)) dbEngine := dbURLParts[0]
dbParams := dbURLParts[1]
switch dbEngine {
case "sqlite":
db, err = sqlite.NewDB(dbParams)
failOnErr(err, "Could not connect to DB")
logger.Info("using database: ", zap.String("path", dbParams))
default:
failOnErr(errors.New("unknown database engine"), fmt.Sprintf("%s is not supported by go-waku", dbEngine))
}
} else {
db, err = sqlite.NewDB(":memory:")
failOnErr(err, "Could not create in-memory DB")
logger.Debug("using in-memory database")
} }
ctx := context.Background() ctx := context.Background()
@ -175,8 +198,7 @@ func Execute(options Options) {
return return
} }
if options.UseDB { if options.Store.Enable && options.PersistPeers {
if options.PersistPeers {
// Create persistent peerstore // Create persistent peerstore
queries, err := sqlite.NewQueries("peerstore", db) queries, err := sqlite.NewQueries("peerstore", db)
failOnErr(err, "Peerstore") failOnErr(err, "Peerstore")
@ -188,7 +210,6 @@ func Execute(options Options) {
libp2pOpts = append(libp2pOpts, libp2p.Peerstore(peerStore)) libp2pOpts = append(libp2pOpts, libp2p.Peerstore(peerStore))
} }
}
nodeOpts = append(nodeOpts, node.WithLibP2POptions(libp2pOpts...)) nodeOpts = append(nodeOpts, node.WithLibP2POptions(libp2pOpts...))
@ -202,8 +223,8 @@ func Execute(options Options) {
nodeOpts = append(nodeOpts, node.WithWakuFilter(!options.Filter.DisableFullNode, filter.WithTimeout(options.Filter.Timeout))) nodeOpts = append(nodeOpts, node.WithWakuFilter(!options.Filter.DisableFullNode, filter.WithTimeout(options.Filter.Timeout)))
} }
nodeOpts = append(nodeOpts, node.WithWakuStore(options.Store.Enable, options.Store.ShouldResume)) nodeOpts = append(nodeOpts, node.WithWakuStore(options.Store.Enable, options.Store.ResumeNodes))
if options.Store.Enable && options.UseDB { if options.Store.Enable {
dbStore, err := persistence.NewDBStore(logger, persistence.WithDB(db), persistence.WithRetentionPolicy(options.Store.RetentionMaxMessages, options.Store.RetentionTime)) dbStore, err := persistence.NewDBStore(logger, persistence.WithDB(db), persistence.WithRetentionPolicy(options.Store.RetentionMaxMessages, options.Store.RetentionTime))
failOnErr(err, "DBStore") failOnErr(err, "DBStore")
nodeOpts = append(nodeOpts, node.WithMessageProvider(dbStore)) nodeOpts = append(nodeOpts, node.WithMessageProvider(dbStore))
@ -288,7 +309,7 @@ func Execute(options Options) {
logger.Error("adding peer exchange peer", logging.MultiAddrs("node", *options.PeerExchange.Node), zap.Error(err)) logger.Error("adding peer exchange peer", logging.MultiAddrs("node", *options.PeerExchange.Node), zap.Error(err))
} else { } else {
desiredOutDegree := 6 // TODO: obtain this from gossipsub D desiredOutDegree := 6 // TODO: obtain this from gossipsub D
if err = wakuNode.PeerExchange().Request(ctx, desiredOutDegree, peer_exchange.WithPeer(*peerId)); err != nil { if err = wakuNode.PeerExchange().Request(ctx, desiredOutDegree, peer_exchange.WithPeer(peerId)); err != nil {
logger.Error("requesting peers via peer exchange", zap.Error(err)) logger.Error("requesting peers via peer exchange", zap.Error(err))
} }
} }
@ -373,7 +394,7 @@ func Execute(options Options) {
failOnErr(err, "MetricsClose") failOnErr(err, "MetricsClose")
} }
if options.UseDB { if options.Store.Enable {
err = db.Close() err = db.Close()
failOnErr(err, "DBClose") failOnErr(err, "DBClose")
} }

View File

@ -68,9 +68,10 @@ type LightpushOptions struct {
// node and provide message history to nodes that ask for it. // node and provide message history to nodes that ask for it.
type StoreOptions struct { type StoreOptions struct {
Enable bool Enable bool
ShouldResume bool DatabaseURL string
RetentionTime time.Duration RetentionTime time.Duration
RetentionMaxMessages int RetentionMaxMessages int
ResumeNodes []multiaddr.Multiaddr
Nodes []multiaddr.Multiaddr Nodes []multiaddr.Multiaddr
} }
@ -150,8 +151,6 @@ type Options struct {
Overwrite bool Overwrite bool
StaticNodes []multiaddr.Multiaddr StaticNodes []multiaddr.Multiaddr
KeepAlive time.Duration KeepAlive time.Duration
UseDB bool
DBPath string
AdvertiseAddress string AdvertiseAddress string
ShowAddresses bool ShowAddresses bool
LogLevel string LogLevel string

View File

@ -152,13 +152,13 @@ func NewDBStore(log *zap.Logger, options ...DBOption) (*DBStore, error) {
} }
result.wg.Add(1) result.wg.Add(1)
go result.checkForOlderRecords(10 * time.Second) // is 10s okay? go result.checkForOlderRecords(60 * time.Second)
return result, nil return result, nil
} }
func (d *DBStore) cleanOlderRecords() error { func (d *DBStore) cleanOlderRecords() error {
d.log.Debug("Cleaning older records...") d.log.Info("Cleaning older records...")
// Delete older messages // Delete older messages
if d.maxDuration > 0 { if d.maxDuration > 0 {
@ -184,6 +184,8 @@ func (d *DBStore) cleanOlderRecords() error {
d.log.Debug("deleting excess records from the DB", zap.Duration("duration", elapsed)) d.log.Debug("deleting excess records from the DB", zap.Duration("duration", elapsed))
} }
d.log.Info("Older records removed")
return nil return nil
} }
@ -257,19 +259,6 @@ func (d *DBStore) Query(query *pb.HistoryQuery) (*pb.Index, []StoredMessage, err
parameters = append(parameters, query.PubsubTopic) parameters = append(parameters, query.PubsubTopic)
} }
if query.StartTime != 0 {
conditions = append(conditions, "id >= ?")
startTimeDBKey := NewDBKey(uint64(query.StartTime), "", []byte{})
parameters = append(parameters, startTimeDBKey.Bytes())
}
if query.EndTime != 0 {
conditions = append(conditions, "id <= ?")
endTimeDBKey := NewDBKey(uint64(query.EndTime), "", []byte{})
parameters = append(parameters, endTimeDBKey.Bytes())
}
if len(query.ContentFilters) != 0 { if len(query.ContentFilters) != 0 {
var ctPlaceHolder []string var ctPlaceHolder []string
for _, ct := range query.ContentFilters { for _, ct := range query.ContentFilters {
@ -281,7 +270,9 @@ func (d *DBStore) Query(query *pb.HistoryQuery) (*pb.Index, []StoredMessage, err
conditions = append(conditions, "contentTopic IN ("+strings.Join(ctPlaceHolder, ", ")+")") conditions = append(conditions, "contentTopic IN ("+strings.Join(ctPlaceHolder, ", ")+")")
} }
usesCursor := false
if query.PagingInfo.Cursor != nil { if query.PagingInfo.Cursor != nil {
usesCursor = true
var exists bool var exists bool
cursorDBKey := NewDBKey(uint64(query.PagingInfo.Cursor.SenderTime), query.PagingInfo.Cursor.PubsubTopic, query.PagingInfo.Cursor.Digest) cursorDBKey := NewDBKey(uint64(query.PagingInfo.Cursor.SenderTime), query.PagingInfo.Cursor.PubsubTopic, query.PagingInfo.Cursor.Digest)
@ -306,6 +297,23 @@ func (d *DBStore) Query(query *pb.HistoryQuery) (*pb.Index, []StoredMessage, err
} }
} }
if query.StartTime != 0 {
if !usesCursor || query.PagingInfo.Direction == pb.PagingInfo_BACKWARD {
conditions = append(conditions, "id >= ?")
startTimeDBKey := NewDBKey(uint64(query.StartTime), "", []byte{})
parameters = append(parameters, startTimeDBKey.Bytes())
}
}
if query.EndTime != 0 {
if !usesCursor || query.PagingInfo.Direction == pb.PagingInfo_FORWARD {
conditions = append(conditions, "id <= ?")
endTimeDBKey := NewDBKey(uint64(query.EndTime), "", []byte{})
parameters = append(parameters, endTimeDBKey.Bytes())
}
}
conditionStr := "" conditionStr := ""
if len(conditions) != 0 { if len(conditions) != 0 {
conditionStr = "WHERE " + strings.Join(conditions, " AND ") conditionStr = "WHERE " + strings.Join(conditions, " AND ")
@ -342,7 +350,7 @@ func (d *DBStore) Query(query *pb.HistoryQuery) (*pb.Index, []StoredMessage, err
} }
defer rows.Close() defer rows.Close()
cursor := &pb.Index{} var cursor *pb.Index
if len(result) != 0 { if len(result) != 0 {
if len(result) > int(query.PagingInfo.PageSize) { if len(result) > int(query.PagingInfo.PageSize) {
result = result[0:query.PagingInfo.PageSize] result = result[0:query.PagingInfo.PageSize]

View File

@ -19,6 +19,7 @@ var (
StoreMessages = stats.Int64("store_messages", "Number of historical messages", stats.UnitDimensionless) StoreMessages = stats.Int64("store_messages", "Number of historical messages", stats.UnitDimensionless)
FilterSubscriptions = stats.Int64("filter_subscriptions", "Number of filter subscriptions", stats.UnitDimensionless) FilterSubscriptions = stats.Int64("filter_subscriptions", "Number of filter subscriptions", stats.UnitDimensionless)
StoreErrors = stats.Int64("errors", "Number of errors in store protocol", stats.UnitDimensionless) StoreErrors = stats.Int64("errors", "Number of errors in store protocol", stats.UnitDimensionless)
StoreQueries = stats.Int64("store_queries", "Number of store queries", stats.UnitDimensionless)
LightpushErrors = stats.Int64("errors", "Number of errors in lightpush protocol", stats.UnitDimensionless) LightpushErrors = stats.Int64("errors", "Number of errors in lightpush protocol", stats.UnitDimensionless)
PeerExchangeError = stats.Int64("errors", "Number of errors in peer exchange protocol", stats.UnitDimensionless) PeerExchangeError = stats.Int64("errors", "Number of errors in peer exchange protocol", stats.UnitDimensionless)
) )
@ -48,6 +49,12 @@ var (
Description: "The number of the messages received", Description: "The number of the messages received",
Aggregation: view.Count(), Aggregation: view.Count(),
} }
StoreQueriesView = &view.View{
Name: "gowaku_store_queries",
Measure: StoreQueries,
Description: "The number of the store queries received",
Aggregation: view.Count(),
}
StoreMessagesView = &view.View{ StoreMessagesView = &view.View{
Name: "gowaku_store_messages", Name: "gowaku_store_messages",
Measure: StoreMessages, Measure: StoreMessages,
@ -102,6 +109,10 @@ func RecordMessage(ctx context.Context, tagType string, len int) {
} }
} }
func RecordStoreQuery(ctx context.Context) {
stats.Record(ctx, StoreQueries.M(1))
}
func RecordStoreError(ctx context.Context, tagType string) { func RecordStoreError(ctx context.Context, tagType string) {
if err := stats.RecordWithTags(ctx, []tag.Mutator{tag.Insert(ErrorType, tagType)}, StoreErrors.M(1)); err != nil { if err := stats.RecordWithTags(ctx, []tag.Mutator{tag.Insert(ErrorType, tagType)}, StoreErrors.M(1)); err != nil {
utils.Logger().Error("failed to record with tags", zap.Error(err)) utils.Logger().Error("failed to record with tags", zap.Error(err))

View File

@ -80,7 +80,7 @@ func TestConnectionStatusChanges(t *testing.T) {
node3, err := New(ctx, node3, err := New(ctx,
WithHostAddress(hostAddr3), WithHostAddress(hostAddr3),
WithWakuRelay(), WithWakuRelay(),
WithWakuStore(false, false), WithWakuStore(false, nil),
WithMessageProvider(dbStore), WithMessageProvider(dbStore),
) )
require.NoError(t, err) require.NoError(t, err)

View File

@ -502,38 +502,28 @@ func (w *WakuNode) mountPeerExchange() error {
func (w *WakuNode) startStore() { func (w *WakuNode) startStore() {
w.store.Start(w.ctx) w.store.Start(w.ctx)
if w.opts.shouldResume { if len(w.opts.resumeNodes) != 0 {
// TODO: extract this to a function and run it when you go offline // TODO: extract this to a function and run it when you go offline
// TODO: determine if a store is listening to a topic // TODO: determine if a store is listening to a topic
var peerIDs []peer.ID
for _, n := range w.opts.resumeNodes {
pID, err := w.AddPeer(n, string(store.StoreID_v20beta4))
if err != nil {
w.log.Warn("adding peer to peerstore", logging.MultiAddrs("peer", n), zap.Error(err))
}
peerIDs = append(peerIDs, pID)
}
w.wg.Add(1) w.wg.Add(1)
go func() { go func() {
defer w.wg.Done() defer w.wg.Done()
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for {
peerVerif:
for {
select {
case <-w.quit:
return
case <-ticker.C:
_, err := utils.SelectPeer(w.host, string(store.StoreID_v20beta4), nil, w.log)
if err == nil {
break peerVerif
}
}
}
ctxWithTimeout, ctxCancel := context.WithTimeout(w.ctx, 20*time.Second) ctxWithTimeout, ctxCancel := context.WithTimeout(w.ctx, 20*time.Second)
defer ctxCancel() defer ctxCancel()
if _, err := w.store.Resume(ctxWithTimeout, string(relay.DefaultWakuTopic), nil); err != nil { if _, err := w.store.Resume(ctxWithTimeout, string(relay.DefaultWakuTopic), peerIDs); err != nil {
w.log.Info("Retrying in 10s...") w.log.Error("Could not resume history", zap.Error(err))
time.Sleep(10 * time.Second) time.Sleep(10 * time.Second)
} else {
break
}
} }
}() }()
} }
@ -551,13 +541,13 @@ func (w *WakuNode) addPeer(info *peer.AddrInfo, protocols ...string) error {
} }
// AddPeer is used to add a peer and the protocols it support to the node peerstore // AddPeer is used to add a peer and the protocols it support to the node peerstore
func (w *WakuNode) AddPeer(address ma.Multiaddr, protocols ...string) (*peer.ID, error) { func (w *WakuNode) AddPeer(address ma.Multiaddr, protocols ...string) (peer.ID, error) {
info, err := peer.AddrInfoFromP2pAddr(address) info, err := peer.AddrInfoFromP2pAddr(address)
if err != nil { if err != nil {
return nil, err return "", err
} }
return &info.ID, w.addPeer(info, protocols...) return info.ID, w.addPeer(info, protocols...)
} }
// DialPeerWithMultiAddress is used to connect to a peer using a multiaddress // DialPeerWithMultiAddress is used to connect to a peer using a multiaddress

View File

@ -100,7 +100,7 @@ func Test5000(t *testing.T) {
go func() { go func() {
defer wg.Done() defer wg.Done()
ticker := time.NewTimer(20 * time.Second) ticker := time.NewTimer(30 * time.Second)
defer ticker.Stop() defer ticker.Stop()
for { for {
@ -118,7 +118,7 @@ func Test5000(t *testing.T) {
go func() { go func() {
defer wg.Done() defer wg.Done()
ticker := time.NewTimer(20 * time.Second) ticker := time.NewTimer(30 * time.Second)
defer ticker.Stop() defer ticker.Stop()
for { for {
@ -177,7 +177,7 @@ func TestDecoupledStoreFromRelay(t *testing.T) {
wakuNode2, err := New(ctx, wakuNode2, err := New(ctx,
WithHostAddress(hostAddr2), WithHostAddress(hostAddr2),
WithWakuFilter(false), WithWakuFilter(false),
WithWakuStore(true, false), WithWakuStore(true, nil),
WithMessageProvider(dbStore), WithMessageProvider(dbStore),
) )
require.NoError(t, err) require.NoError(t, err)

View File

@ -63,8 +63,8 @@ type WakuNodeParameters struct {
enableStore bool enableStore bool
enableSwap bool enableSwap bool
shouldResume bool
storeMsgs bool storeMsgs bool
resumeNodes []multiaddr.Multiaddr
messageProvider store.MessageProvider messageProvider store.MessageProvider
swapMode int swapMode int
@ -300,11 +300,11 @@ func WithWakuFilter(fullNode bool, filterOpts ...filter.Option) WakuNodeOption {
// WithWakuStore enables the Waku V2 Store protocol and if the messages should // WithWakuStore enables the Waku V2 Store protocol and if the messages should
// be stored or not in a message provider // be stored or not in a message provider
func WithWakuStore(shouldStoreMessages bool, shouldResume bool) WakuNodeOption { func WithWakuStore(shouldStoreMessages bool, resumeNodes []multiaddr.Multiaddr) WakuNodeOption {
return func(params *WakuNodeParameters) error { return func(params *WakuNodeParameters) error {
params.enableStore = true params.enableStore = true
params.storeMsgs = shouldStoreMessages params.storeMsgs = shouldStoreMessages
params.shouldResume = shouldResume params.resumeNodes = resumeNodes
return nil return nil
} }
} }

View File

@ -42,7 +42,7 @@ func TestWakuOptions(t *testing.T) {
WithWakuRelay(), WithWakuRelay(),
WithWakuFilter(true), WithWakuFilter(true),
WithDiscoveryV5(123, nil, false), WithDiscoveryV5(123, nil, false),
WithWakuStore(true, true), WithWakuStore(true, nil),
WithMessageProvider(&persistence.DBStore{}), WithMessageProvider(&persistence.DBStore{}),
WithLightPush(), WithLightPush(),
WithKeepAlive(time.Hour), WithKeepAlive(time.Hour),

View File

@ -150,12 +150,6 @@ func TestResumeWithoutSpecifyingPeer(t *testing.T) {
err = host2.Peerstore().AddProtocols(host1.ID(), string(StoreID_v20beta4)) err = host2.Peerstore().AddProtocols(host1.ID(), string(StoreID_v20beta4))
require.NoError(t, err) require.NoError(t, err)
msgCount, err := s2.Resume(ctx, "test", []peer.ID{}) _, err = s2.Resume(ctx, "test", []peer.ID{})
require.Error(t, err)
require.NoError(t, err)
require.Equal(t, 1, msgCount)
allMsgs, err := s2.msgProvider.GetAll()
require.NoError(t, err)
require.Len(t, allMsgs, 1)
} }

View File

@ -1,739 +0,0 @@
package store
import (
"context"
"encoding/hex"
"errors"
"math"
"sync"
"time"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
libp2pProtocol "github.com/libp2p/go-libp2p/core/protocol"
"github.com/libp2p/go-msgio/protoio"
"go.uber.org/zap"
"github.com/waku-org/go-waku/logging"
"github.com/waku-org/go-waku/waku/persistence"
"github.com/waku-org/go-waku/waku/v2/metrics"
"github.com/waku-org/go-waku/waku/v2/protocol"
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
"github.com/waku-org/go-waku/waku/v2/protocol/swap"
"github.com/waku-org/go-waku/waku/v2/utils"
)
// StoreID_v20beta4 is the current Waku Store protocol identifier
const StoreID_v20beta4 = libp2pProtocol.ID("/vac/waku/store/2.0.0-beta4")
// MaxPageSize is the maximum number of waku messages to return per page
const MaxPageSize = 100
// MaxContentFilters is the maximum number of allowed content filters in a query
const MaxContentFilters = 10
// MaxTimeVariance is the maximum duration in the future allowed for a message timestamp
const MaxTimeVariance = time.Duration(20) * time.Second
var (
// ErrMaxContentFilters is returned when the number of content topics in the query
// exceeds the limit
ErrMaxContentFilters = errors.New("exceeds the maximum number of content filters allowed")
// ErrNoPeersAvailable is returned when there are no store peers in the peer store
// that could be used to retrieve message history
ErrNoPeersAvailable = errors.New("no suitable remote peers")
// ErrInvalidId is returned when no RequestID is given
ErrInvalidId = errors.New("invalid request id")
// ErrFailedToResumeHistory is returned when the node attempted to retrieve historic
// messages to fill its own message history but for some reason it failed
ErrFailedToResumeHistory = errors.New("failed to resume the history")
// ErrFailedQuery is emitted when the query fails to return results
ErrFailedQuery = errors.New("failed to resolve the query")
ErrFutureMessage = errors.New("message timestamp in the future")
)
func findMessages(query *pb.HistoryQuery, msgProvider MessageProvider) ([]*pb.WakuMessage, *pb.PagingInfo, error) {
if query.PagingInfo == nil {
query.PagingInfo = &pb.PagingInfo{
Direction: pb.PagingInfo_FORWARD,
}
}
if query.PagingInfo.PageSize == 0 || query.PagingInfo.PageSize > uint64(MaxPageSize) {
query.PagingInfo.PageSize = MaxPageSize
}
if len(query.ContentFilters) > MaxContentFilters {
return nil, nil, ErrMaxContentFilters
}
cursor, queryResult, err := msgProvider.Query(query)
if err != nil {
return nil, nil, err
}
if len(queryResult) == 0 { // no pagination is needed for an empty list
newPagingInfo := &pb.PagingInfo{PageSize: 0, Cursor: query.PagingInfo.Cursor, Direction: query.PagingInfo.Direction}
return nil, newPagingInfo, nil
}
newPagingInfo := &pb.PagingInfo{PageSize: query.PagingInfo.PageSize, Cursor: cursor, Direction: query.PagingInfo.Direction}
if newPagingInfo.PageSize > uint64(len(queryResult)) {
newPagingInfo.PageSize = uint64(len(queryResult))
}
resultMessages := make([]*pb.WakuMessage, len(queryResult))
for i := range queryResult {
resultMessages[i] = queryResult[i].Message
}
return resultMessages, newPagingInfo, nil
}
func (store *WakuStore) FindMessages(query *pb.HistoryQuery) *pb.HistoryResponse {
result := new(pb.HistoryResponse)
messages, newPagingInfo, err := findMessages(query, store.msgProvider)
if err != nil {
if err == persistence.ErrInvalidCursor {
result.Error = pb.HistoryResponse_INVALID_CURSOR
} else {
// TODO: return error in pb.HistoryResponse
store.log.Error("obtaining messages from db", zap.Error(err))
}
}
result.Messages = messages
result.PagingInfo = newPagingInfo
return result
}
type MessageProvider interface {
GetAll() ([]persistence.StoredMessage, error)
Query(query *pb.HistoryQuery) (*pb.Index, []persistence.StoredMessage, error)
Put(env *protocol.Envelope) error
MostRecentTimestamp() (int64, error)
Stop()
Count() (int, error)
}
type Query struct {
Topic string
ContentTopics []string
StartTime int64
EndTime int64
}
// Result represents a valid response from a store node
type Result struct {
Messages []*pb.WakuMessage
query *pb.HistoryQuery
cursor *pb.Index
peerId peer.ID
}
func (r *Result) Cursor() *pb.Index {
return r.cursor
}
func (r *Result) IsComplete() bool {
return len(r.cursor.Digest) == 0
}
func (r *Result) PeerID() peer.ID {
return r.peerId
}
func (r *Result) Query() *pb.HistoryQuery {
return r.query
}
type WakuStore struct {
ctx context.Context
MsgC chan *protocol.Envelope
wg *sync.WaitGroup
log *zap.Logger
started bool
quit chan struct{}
msgProvider MessageProvider
h host.Host
swap *swap.WakuSwap
}
type criteriaFN = func(msg *pb.WakuMessage) (bool, error)
type Store interface {
Start(ctx context.Context)
Query(ctx context.Context, query Query, opts ...HistoryRequestOption) (*Result, error)
Find(ctx context.Context, query Query, cb criteriaFN, opts ...HistoryRequestOption) (*pb.WakuMessage, error)
Next(ctx context.Context, r *Result) (*Result, error)
Resume(ctx context.Context, pubsubTopic string, peerList []peer.ID) (int, error)
MessageChannel() chan *protocol.Envelope
Stop()
}
// NewWakuStore creates a WakuStore using an specific MessageProvider for storing the messages
func NewWakuStore(host host.Host, swap *swap.WakuSwap, p MessageProvider, log *zap.Logger) *WakuStore {
wakuStore := new(WakuStore)
wakuStore.msgProvider = p
wakuStore.h = host
wakuStore.swap = swap
wakuStore.wg = &sync.WaitGroup{}
wakuStore.log = log.Named("store")
wakuStore.quit = make(chan struct{})
return wakuStore
}
// SetMessageProvider allows switching the message provider used with a WakuStore
func (store *WakuStore) SetMessageProvider(p MessageProvider) {
store.msgProvider = p
}
// Start initializes the WakuStore by enabling the protocol and fetching records from a message provider
func (store *WakuStore) Start(ctx context.Context) {
if store.started {
return
}
if store.msgProvider == nil {
store.log.Info("Store protocol started (no message provider)")
return
}
store.started = true
store.ctx = ctx
store.MsgC = make(chan *protocol.Envelope, 1024)
store.h.SetStreamHandlerMatch(StoreID_v20beta4, protocol.PrefixTextMatch(string(StoreID_v20beta4)), store.onRequest)
store.wg.Add(2)
go store.storeIncomingMessages(ctx)
go store.updateMetrics(ctx)
store.log.Info("Store protocol started")
}
func (store *WakuStore) storeMessage(env *protocol.Envelope) error {
// Ensure that messages don't "jump" to the front of the queue with future timestamps
if env.Index().SenderTime-env.Index().ReceiverTime > int64(MaxTimeVariance) {
return ErrFutureMessage
}
if env.Message().Ephemeral {
return nil
}
err := store.msgProvider.Put(env)
if err != nil {
store.log.Error("storing message", zap.Error(err))
metrics.RecordStoreError(store.ctx, "store_failure")
return err
}
return nil
}
func (store *WakuStore) storeIncomingMessages(ctx context.Context) {
defer store.wg.Done()
for envelope := range store.MsgC {
_ = store.storeMessage(envelope)
}
}
func (store *WakuStore) updateMetrics(ctx context.Context) {
ticker := time.NewTicker(3 * time.Second)
defer ticker.Stop()
defer store.wg.Done()
for {
select {
case <-ticker.C:
msgCount, err := store.msgProvider.Count()
if err != nil {
store.log.Error("updating store metrics", zap.Error(err))
} else {
metrics.RecordMessage(store.ctx, "stored", msgCount)
}
case <-store.quit:
return
}
}
}
func (store *WakuStore) onRequest(s network.Stream) {
defer s.Close()
logger := store.log.With(logging.HostID("peer", s.Conn().RemotePeer()))
historyRPCRequest := &pb.HistoryRPC{}
writer := protoio.NewDelimitedWriter(s)
reader := protoio.NewDelimitedReader(s, math.MaxInt32)
err := reader.ReadMsg(historyRPCRequest)
if err != nil {
logger.Error("reading request", zap.Error(err))
metrics.RecordStoreError(store.ctx, "decodeRPCFailure")
return
}
logger = logger.With(zap.String("id", historyRPCRequest.RequestId))
if query := historyRPCRequest.Query; query != nil {
logger = logger.With(logging.Filters(query.GetContentFilters()))
}
logger.Info("received query")
historyResponseRPC := &pb.HistoryRPC{}
historyResponseRPC.RequestId = historyRPCRequest.RequestId
historyResponseRPC.Response = store.FindMessages(historyRPCRequest.Query)
logger = logger.With(zap.Int("messages", len(historyResponseRPC.Response.Messages)))
err = writer.WriteMsg(historyResponseRPC)
if err != nil {
logger.Error("writing response", zap.Error(err), logging.PagingInfo(historyResponseRPC.Response.PagingInfo))
_ = s.Reset()
} else {
logger.Info("response sent")
}
}
type HistoryRequestParameters struct {
selectedPeer peer.ID
requestId []byte
cursor *pb.Index
pageSize uint64
asc bool
s *WakuStore
}
type HistoryRequestOption func(*HistoryRequestParameters)
// WithPeer is an option used to specify the peerID to request the message history
func WithPeer(p peer.ID) HistoryRequestOption {
return func(params *HistoryRequestParameters) {
params.selectedPeer = p
}
}
// WithAutomaticPeerSelection is an option used to randomly select a peer from the peer store
// to request the message history. If a list of specific peers is passed, the peer will be chosen
// from that list assuming it supports the chosen protocol, otherwise it will chose a peer
// from the node peerstore
func WithAutomaticPeerSelection(fromThesePeers ...peer.ID) HistoryRequestOption {
return func(params *HistoryRequestParameters) {
p, err := utils.SelectPeer(params.s.h, string(StoreID_v20beta4), fromThesePeers, params.s.log)
if err == nil {
params.selectedPeer = *p
} else {
params.s.log.Info("selecting peer", zap.Error(err))
}
}
}
// WithFastestPeerSelection is an option used to select a peer from the peer store
// with the lowest ping. If a list of specific peers is passed, the peer will be chosen
// from that list assuming it supports the chosen protocol, otherwise it will chose a peer
// from the node peerstore
func WithFastestPeerSelection(ctx context.Context, fromThesePeers ...peer.ID) HistoryRequestOption {
return func(params *HistoryRequestParameters) {
p, err := utils.SelectPeerWithLowestRTT(ctx, params.s.h, string(StoreID_v20beta4), fromThesePeers, params.s.log)
if err == nil {
params.selectedPeer = *p
} else {
params.s.log.Info("selecting peer", zap.Error(err))
}
}
}
func WithRequestId(requestId []byte) HistoryRequestOption {
return func(params *HistoryRequestParameters) {
params.requestId = requestId
}
}
func WithAutomaticRequestId() HistoryRequestOption {
return func(params *HistoryRequestParameters) {
params.requestId = protocol.GenerateRequestId()
}
}
func WithCursor(c *pb.Index) HistoryRequestOption {
return func(params *HistoryRequestParameters) {
params.cursor = c
}
}
// WithPaging is an option used to specify the order and maximum number of records to return
func WithPaging(asc bool, pageSize uint64) HistoryRequestOption {
return func(params *HistoryRequestParameters) {
params.asc = asc
params.pageSize = pageSize
}
}
// Default options to be used when querying a store node for results
func DefaultOptions() []HistoryRequestOption {
return []HistoryRequestOption{
WithAutomaticRequestId(),
WithAutomaticPeerSelection(),
WithPaging(true, MaxPageSize),
}
}
func (store *WakuStore) queryFrom(ctx context.Context, q *pb.HistoryQuery, selectedPeer peer.ID, requestId []byte) (*pb.HistoryResponse, error) {
logger := store.log.With(logging.HostID("peer", selectedPeer))
logger.Info("querying message history")
// We connect first so dns4 addresses are resolved (NewStream does not do it)
err := store.h.Connect(ctx, store.h.Peerstore().PeerInfo(selectedPeer))
if err != nil {
logger.Error("connecting to peer", zap.Error(err))
return nil, err
}
connOpt, err := store.h.NewStream(ctx, selectedPeer, StoreID_v20beta4)
if err != nil {
logger.Error("creating stream to peer", zap.Error(err))
return nil, err
}
defer connOpt.Close()
defer func() {
_ = connOpt.Reset()
}()
historyRequest := &pb.HistoryRPC{Query: q, RequestId: hex.EncodeToString(requestId)}
writer := protoio.NewDelimitedWriter(connOpt)
reader := protoio.NewDelimitedReader(connOpt, math.MaxInt32)
err = writer.WriteMsg(historyRequest)
if err != nil {
logger.Error("writing request", zap.Error(err))
return nil, err
}
historyResponseRPC := &pb.HistoryRPC{}
err = reader.ReadMsg(historyResponseRPC)
if err != nil {
logger.Error("reading response", zap.Error(err))
metrics.RecordStoreError(store.ctx, "decodeRPCFailure")
return nil, err
}
if historyResponseRPC.Response == nil {
historyResponseRPC.Response = new(pb.HistoryResponse)
}
if historyResponseRPC.Response.PagingInfo == nil {
historyResponseRPC.Response.PagingInfo = new(pb.PagingInfo)
historyResponseRPC.Response.PagingInfo.Cursor = new(pb.Index)
}
metrics.RecordMessage(ctx, "retrieved", len(historyResponseRPC.Response.Messages))
return historyResponseRPC.Response, nil
}
func (store *WakuStore) Query(ctx context.Context, query Query, opts ...HistoryRequestOption) (*Result, error) {
q := &pb.HistoryQuery{
PubsubTopic: query.Topic,
ContentFilters: []*pb.ContentFilter{},
StartTime: query.StartTime,
EndTime: query.EndTime,
PagingInfo: &pb.PagingInfo{},
}
for _, cf := range query.ContentTopics {
q.ContentFilters = append(q.ContentFilters, &pb.ContentFilter{ContentTopic: cf})
}
if len(q.ContentFilters) > MaxContentFilters {
return nil, ErrMaxContentFilters
}
params := new(HistoryRequestParameters)
params.s = store
optList := DefaultOptions()
optList = append(optList, opts...)
for _, opt := range optList {
opt(params)
}
if params.selectedPeer == "" {
return nil, ErrNoPeersAvailable
}
if len(params.requestId) == 0 {
return nil, ErrInvalidId
}
if params.cursor != nil {
q.PagingInfo.Cursor = params.cursor
}
if params.asc {
q.PagingInfo.Direction = pb.PagingInfo_FORWARD
} else {
q.PagingInfo.Direction = pb.PagingInfo_BACKWARD
}
pageSize := params.pageSize
if pageSize == 0 || pageSize > uint64(MaxPageSize) {
pageSize = MaxPageSize
}
q.PagingInfo.PageSize = pageSize
response, err := store.queryFrom(ctx, q, params.selectedPeer, params.requestId)
if err != nil {
return nil, err
}
if response.Error == pb.HistoryResponse_INVALID_CURSOR {
return nil, errors.New("invalid cursor")
}
var messageIDs [][]byte
for _, m := range response.Messages {
messageID, _, _ := m.Hash()
messageIDs = append(messageIDs, messageID)
}
store.log.Info("waku.store retrieved", logging.HexArray("hashes", messageIDs))
result := &Result{
Messages: response.Messages,
query: q,
cursor: response.PagingInfo.Cursor,
peerId: params.selectedPeer,
}
return result, nil
}
// Find the first message that matches a criteria. criteriaCB is a function that will be invoked for each message and returns true if the message matches the criteria
func (store *WakuStore) Find(ctx context.Context, query Query, cb criteriaFN, opts ...HistoryRequestOption) (*pb.WakuMessage, error) {
if cb == nil {
return nil, errors.New("callback can't be null")
}
result, err := store.Query(ctx, query, opts...)
if err != nil {
return nil, err
}
for {
for _, m := range result.Messages {
found, err := cb(m)
if err != nil {
return nil, err
}
if found {
return m, nil
}
}
if result.IsComplete() {
break
}
result, err = store.Next(ctx, result)
if err != nil {
return nil, err
}
}
return nil, nil
}
// Next is used with to retrieve the next page of rows from a query response.
// If no more records are found, the result will not contain any messages.
// This function is useful for iterating over results without having to manually
// specify the cursor and pagination order and max number of results
func (store *WakuStore) Next(ctx context.Context, r *Result) (*Result, error) {
if r.IsComplete() {
return &Result{
Messages: []*pb.WakuMessage{},
cursor: &pb.Index{},
query: r.query,
peerId: r.PeerID(),
}, nil
}
q := &pb.HistoryQuery{
PubsubTopic: r.Query().PubsubTopic,
ContentFilters: r.Query().ContentFilters,
StartTime: r.Query().StartTime,
EndTime: r.Query().EndTime,
PagingInfo: &pb.PagingInfo{
PageSize: r.Query().PagingInfo.PageSize,
Direction: r.Query().PagingInfo.Direction,
Cursor: &pb.Index{
Digest: r.Cursor().Digest,
ReceiverTime: r.Cursor().ReceiverTime,
SenderTime: r.Cursor().SenderTime,
PubsubTopic: r.Cursor().PubsubTopic,
},
},
}
response, err := store.queryFrom(ctx, q, r.PeerID(), protocol.GenerateRequestId())
if err != nil {
return nil, err
}
if response.Error == pb.HistoryResponse_INVALID_CURSOR {
return nil, errors.New("invalid cursor")
}
return &Result{
Messages: response.Messages,
cursor: response.PagingInfo.Cursor,
query: q,
peerId: r.PeerID(),
}, nil
}
func (store *WakuStore) queryLoop(ctx context.Context, query *pb.HistoryQuery, candidateList []peer.ID) ([]*pb.WakuMessage, error) {
// loops through the candidateList in order and sends the query to each until one of the query gets resolved successfully
// returns the number of retrieved messages, or error if all the requests fail
queryWg := sync.WaitGroup{}
queryWg.Add(len(candidateList))
resultChan := make(chan *pb.HistoryResponse, len(candidateList))
for _, peer := range candidateList {
func() {
defer queryWg.Done()
result, err := store.queryFrom(ctx, query, peer, protocol.GenerateRequestId())
if err == nil {
resultChan <- result
return
}
store.log.Error("resuming history", logging.HostID("peer", peer), zap.Error(err))
}()
}
queryWg.Wait()
close(resultChan)
var messages []*pb.WakuMessage
hasResults := false
for result := range resultChan {
hasResults = true
messages = append(messages, result.Messages...)
}
if hasResults {
return messages, nil
}
return nil, ErrFailedQuery
}
func (store *WakuStore) findLastSeen() (int64, error) {
return store.msgProvider.MostRecentTimestamp()
}
func max(x, y int64) int64 {
if x > y {
return x
}
return y
}
// Resume retrieves the history of waku messages published on the default waku pubsub topic since the last time the waku store node has been online
// messages are stored in the store node's messages field and in the message db
// the offline time window is measured as the difference between the current time and the timestamp of the most recent persisted waku message
// an offset of 20 second is added to the time window to count for nodes asynchrony
// the history is fetched from one of the peers persisted in the waku store node's peer manager unit
// peerList indicates the list of peers to query from. The history is fetched from the first available peer in this list. Such candidates should be found through a discovery method (to be developed).
// if no peerList is passed, one of the peers in the underlying peer manager unit of the store protocol is picked randomly to fetch the history from. The history gets fetched successfully if the dialed peer has been online during the queried time window.
// the resume proc returns the number of retrieved messages if no error occurs, otherwise returns the error string
func (store *WakuStore) Resume(ctx context.Context, pubsubTopic string, peerList []peer.ID) (int, error) {
if !store.started {
return 0, errors.New("can't resume: store has not started")
}
currentTime := utils.GetUnixEpoch()
lastSeenTime, err := store.findLastSeen()
if err != nil {
return 0, err
}
var offset int64 = int64(20 * time.Nanosecond)
currentTime = currentTime + offset
lastSeenTime = max(lastSeenTime-offset, 0)
rpc := &pb.HistoryQuery{
PubsubTopic: pubsubTopic,
StartTime: lastSeenTime,
EndTime: currentTime,
PagingInfo: &pb.PagingInfo{
PageSize: 0,
Direction: pb.PagingInfo_BACKWARD,
},
}
if len(peerList) == 0 {
p, err := utils.SelectPeer(store.h, string(StoreID_v20beta4), nil, store.log)
if err != nil {
store.log.Info("selecting peer", zap.Error(err))
return -1, ErrNoPeersAvailable
}
peerList = append(peerList, *p)
}
messages, err := store.queryLoop(ctx, rpc, peerList)
if err != nil {
store.log.Error("resuming history", zap.Error(err))
return -1, ErrFailedToResumeHistory
}
msgCount := 0
for _, msg := range messages {
if err = store.storeMessage(protocol.NewEnvelope(msg, utils.GetUnixEpoch(), pubsubTopic)); err == nil {
msgCount++
}
}
store.log.Info("retrieved messages since the last online time", zap.Int("messages", len(messages)))
return msgCount, nil
}
func (store *WakuStore) MessageChannel() chan *protocol.Envelope {
return store.MsgC
}
// TODO: queryWithAccounting
// Stop closes the store message channel and removes the protocol stream handler
func (store *WakuStore) Stop() {
store.started = false
if store.MsgC != nil {
close(store.MsgC)
}
if store.msgProvider != nil {
store.quit <- struct{}{}
}
if store.h != nil {
store.h.RemoveStreamHandler(StoreID_v20beta4)
}
store.wg.Wait()
}

View File

@ -0,0 +1,348 @@
package store
import (
"context"
"encoding/hex"
"errors"
"math"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-msgio/protoio"
"go.uber.org/zap"
"github.com/waku-org/go-waku/logging"
"github.com/waku-org/go-waku/waku/v2/metrics"
"github.com/waku-org/go-waku/waku/v2/protocol"
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
"github.com/waku-org/go-waku/waku/v2/utils"
)
type Query struct {
Topic string
ContentTopics []string
StartTime int64
EndTime int64
}
// Result represents a valid response from a store node
type Result struct {
Messages []*pb.WakuMessage
query *pb.HistoryQuery
cursor *pb.Index
peerId peer.ID
}
func (r *Result) Cursor() *pb.Index {
return r.cursor
}
func (r *Result) IsComplete() bool {
return r.cursor == nil
}
func (r *Result) PeerID() peer.ID {
return r.peerId
}
func (r *Result) Query() *pb.HistoryQuery {
return r.query
}
type criteriaFN = func(msg *pb.WakuMessage) (bool, error)
type HistoryRequestParameters struct {
selectedPeer peer.ID
requestId []byte
cursor *pb.Index
pageSize uint64
asc bool
s *WakuStore
}
type HistoryRequestOption func(*HistoryRequestParameters)
// WithPeer is an option used to specify the peerID to request the message history
func WithPeer(p peer.ID) HistoryRequestOption {
return func(params *HistoryRequestParameters) {
params.selectedPeer = p
}
}
// WithAutomaticPeerSelection is an option used to randomly select a peer from the peer store
// to request the message history. If a list of specific peers is passed, the peer will be chosen
// from that list assuming it supports the chosen protocol, otherwise it will chose a peer
// from the node peerstore
func WithAutomaticPeerSelection(fromThesePeers ...peer.ID) HistoryRequestOption {
return func(params *HistoryRequestParameters) {
p, err := utils.SelectPeer(params.s.h, string(StoreID_v20beta4), fromThesePeers, params.s.log)
if err == nil {
params.selectedPeer = *p
} else {
params.s.log.Info("selecting peer", zap.Error(err))
}
}
}
// WithFastestPeerSelection is an option used to select a peer from the peer store
// with the lowest ping. If a list of specific peers is passed, the peer will be chosen
// from that list assuming it supports the chosen protocol, otherwise it will chose a peer
// from the node peerstore
func WithFastestPeerSelection(ctx context.Context, fromThesePeers ...peer.ID) HistoryRequestOption {
return func(params *HistoryRequestParameters) {
p, err := utils.SelectPeerWithLowestRTT(ctx, params.s.h, string(StoreID_v20beta4), fromThesePeers, params.s.log)
if err == nil {
params.selectedPeer = *p
} else {
params.s.log.Info("selecting peer", zap.Error(err))
}
}
}
func WithRequestId(requestId []byte) HistoryRequestOption {
return func(params *HistoryRequestParameters) {
params.requestId = requestId
}
}
func WithAutomaticRequestId() HistoryRequestOption {
return func(params *HistoryRequestParameters) {
params.requestId = protocol.GenerateRequestId()
}
}
func WithCursor(c *pb.Index) HistoryRequestOption {
return func(params *HistoryRequestParameters) {
params.cursor = c
}
}
// WithPaging is an option used to specify the order and maximum number of records to return
func WithPaging(asc bool, pageSize uint64) HistoryRequestOption {
return func(params *HistoryRequestParameters) {
params.asc = asc
params.pageSize = pageSize
}
}
// Default options to be used when querying a store node for results
func DefaultOptions() []HistoryRequestOption {
return []HistoryRequestOption{
WithAutomaticRequestId(),
WithAutomaticPeerSelection(),
WithPaging(true, MaxPageSize),
}
}
func (store *WakuStore) queryFrom(ctx context.Context, q *pb.HistoryQuery, selectedPeer peer.ID, requestId []byte) (*pb.HistoryResponse, error) {
logger := store.log.With(logging.HostID("peer", selectedPeer))
logger.Info("querying message history")
// We connect first so dns4 addresses are resolved (NewStream does not do it)
err := store.h.Connect(ctx, store.h.Peerstore().PeerInfo(selectedPeer))
if err != nil {
logger.Error("connecting to peer", zap.Error(err))
return nil, err
}
connOpt, err := store.h.NewStream(ctx, selectedPeer, StoreID_v20beta4)
if err != nil {
logger.Error("creating stream to peer", zap.Error(err))
return nil, err
}
defer connOpt.Close()
defer func() {
_ = connOpt.Reset()
}()
historyRequest := &pb.HistoryRPC{Query: q, RequestId: hex.EncodeToString(requestId)}
writer := protoio.NewDelimitedWriter(connOpt)
reader := protoio.NewDelimitedReader(connOpt, math.MaxInt32)
err = writer.WriteMsg(historyRequest)
if err != nil {
logger.Error("writing request", zap.Error(err))
return nil, err
}
historyResponseRPC := &pb.HistoryRPC{}
err = reader.ReadMsg(historyResponseRPC)
if err != nil {
logger.Error("reading response", zap.Error(err))
metrics.RecordStoreError(store.ctx, "decodeRPCFailure")
return nil, err
}
if historyResponseRPC.Response == nil {
logger.Error("empty response")
metrics.RecordStoreError(store.ctx, "emptyRpcResponseFailure")
return nil, ErrEmptyResponse
}
metrics.RecordMessage(ctx, "retrieved", len(historyResponseRPC.Response.Messages))
return historyResponseRPC.Response, nil
}
func (store *WakuStore) Query(ctx context.Context, query Query, opts ...HistoryRequestOption) (*Result, error) {
q := &pb.HistoryQuery{
PubsubTopic: query.Topic,
ContentFilters: []*pb.ContentFilter{},
StartTime: query.StartTime,
EndTime: query.EndTime,
PagingInfo: &pb.PagingInfo{},
}
for _, cf := range query.ContentTopics {
q.ContentFilters = append(q.ContentFilters, &pb.ContentFilter{ContentTopic: cf})
}
if len(q.ContentFilters) > MaxContentFilters {
return nil, ErrMaxContentFilters
}
params := new(HistoryRequestParameters)
params.s = store
optList := DefaultOptions()
optList = append(optList, opts...)
for _, opt := range optList {
opt(params)
}
if params.selectedPeer == "" {
return nil, ErrNoPeersAvailable
}
if len(params.requestId) == 0 {
return nil, ErrInvalidId
}
if params.cursor != nil {
q.PagingInfo.Cursor = params.cursor
}
if params.asc {
q.PagingInfo.Direction = pb.PagingInfo_FORWARD
} else {
q.PagingInfo.Direction = pb.PagingInfo_BACKWARD
}
pageSize := params.pageSize
if pageSize == 0 || pageSize > uint64(MaxPageSize) {
pageSize = MaxPageSize
}
q.PagingInfo.PageSize = pageSize
response, err := store.queryFrom(ctx, q, params.selectedPeer, params.requestId)
if err != nil {
return nil, err
}
if response.Error == pb.HistoryResponse_INVALID_CURSOR {
return nil, errors.New("invalid cursor")
}
var messageIDs [][]byte
for _, m := range response.Messages {
messageID, _, _ := m.Hash()
messageIDs = append(messageIDs, messageID)
}
store.log.Info("waku.store retrieved", logging.HexArray("hashes", messageIDs))
result := &Result{
Messages: response.Messages,
query: q,
cursor: response.PagingInfo.Cursor,
peerId: params.selectedPeer,
}
return result, nil
}
// Find the first message that matches a criteria. criteriaCB is a function that will be invoked for each message and returns true if the message matches the criteria
func (store *WakuStore) Find(ctx context.Context, query Query, cb criteriaFN, opts ...HistoryRequestOption) (*pb.WakuMessage, error) {
if cb == nil {
return nil, errors.New("callback can't be null")
}
result, err := store.Query(ctx, query, opts...)
if err != nil {
return nil, err
}
for {
for _, m := range result.Messages {
found, err := cb(m)
if err != nil {
return nil, err
}
if found {
return m, nil
}
}
if result.IsComplete() {
break
}
result, err = store.Next(ctx, result)
if err != nil {
return nil, err
}
}
return nil, nil
}
// Next is used with to retrieve the next page of rows from a query response.
// If no more records are found, the result will not contain any messages.
// This function is useful for iterating over results without having to manually
// specify the cursor and pagination order and max number of results
func (store *WakuStore) Next(ctx context.Context, r *Result) (*Result, error) {
if r.IsComplete() {
return &Result{
Messages: []*pb.WakuMessage{},
cursor: nil,
query: r.query,
peerId: r.PeerID(),
}, nil
}
q := &pb.HistoryQuery{
PubsubTopic: r.Query().PubsubTopic,
ContentFilters: r.Query().ContentFilters,
StartTime: r.Query().StartTime,
EndTime: r.Query().EndTime,
PagingInfo: &pb.PagingInfo{
PageSize: r.Query().PagingInfo.PageSize,
Direction: r.Query().PagingInfo.Direction,
Cursor: &pb.Index{
Digest: r.Cursor().Digest,
ReceiverTime: r.Cursor().ReceiverTime,
SenderTime: r.Cursor().SenderTime,
PubsubTopic: r.Cursor().PubsubTopic,
},
},
}
response, err := store.queryFrom(ctx, q, r.PeerID(), protocol.GenerateRequestId())
if err != nil {
return nil, err
}
if response.Error == pb.HistoryResponse_INVALID_CURSOR {
return nil, errors.New("invalid cursor")
}
return &Result{
Messages: response.Messages,
cursor: response.PagingInfo.Cursor,
query: q,
peerId: r.PeerID(),
}, nil
}

View File

@ -0,0 +1,74 @@
package store
import (
"context"
"errors"
"sync"
"github.com/libp2p/go-libp2p/core/host"
libp2pProtocol "github.com/libp2p/go-libp2p/core/protocol"
"github.com/waku-org/go-waku/waku/v2/protocol"
"github.com/waku-org/go-waku/waku/v2/protocol/swap"
"go.uber.org/zap"
)
// StoreID_v20beta4 is the current Waku Store protocol identifier
const StoreID_v20beta4 = libp2pProtocol.ID("/vac/waku/store/2.0.0-beta4")
// MaxPageSize is the maximum number of waku messages to return per page
const MaxPageSize = 100
// MaxContentFilters is the maximum number of allowed content filters in a query
const MaxContentFilters = 10
var (
// ErrMaxContentFilters is returned when the number of content topics in the query
// exceeds the limit
ErrMaxContentFilters = errors.New("exceeds the maximum number of content filters allowed")
// ErrNoPeersAvailable is returned when there are no store peers in the peer store
// that could be used to retrieve message history
ErrNoPeersAvailable = errors.New("no suitable remote peers")
// ErrInvalidId is returned when no RequestID is given
ErrInvalidId = errors.New("invalid request id")
// ErrFailedToResumeHistory is returned when the node attempted to retrieve historic
// messages to fill its own message history but for some reason it failed
ErrFailedToResumeHistory = errors.New("failed to resume the history")
// ErrFailedQuery is emitted when the query fails to return results
ErrFailedQuery = errors.New("failed to resolve the query")
ErrFutureMessage = errors.New("message timestamp in the future")
ErrEmptyResponse = errors.New("empty store response")
)
type WakuStore struct {
ctx context.Context
MsgC chan *protocol.Envelope
wg *sync.WaitGroup
log *zap.Logger
started bool
quit chan struct{}
msgProvider MessageProvider
h host.Host
swap *swap.WakuSwap
}
// NewWakuStore creates a WakuStore using an specific MessageProvider for storing the messages
func NewWakuStore(host host.Host, swap *swap.WakuSwap, p MessageProvider, log *zap.Logger) *WakuStore {
wakuStore := new(WakuStore)
wakuStore.msgProvider = p
wakuStore.h = host
wakuStore.swap = swap
wakuStore.wg = &sync.WaitGroup{}
wakuStore.log = log.Named("store")
wakuStore.quit = make(chan struct{})
return wakuStore
}

View File

@ -67,8 +67,6 @@ func TestForwardPagination(t *testing.T) {
require.Len(t, messages, 2) require.Len(t, messages, 2)
require.Equal(t, []*pb.WakuMessage{msgList[4].Message(), msgList[5].Message()}, messages) require.Equal(t, []*pb.WakuMessage{msgList[4].Message(), msgList[5].Message()}, messages)
require.Equal(t, msgList[5].Index(), newPagingInfo.Cursor) require.Equal(t, msgList[5].Index(), newPagingInfo.Cursor)
require.Equal(t, pagingInfo.Direction, newPagingInfo.Direction)
require.Equal(t, pagingInfo.PageSize, newPagingInfo.PageSize)
// test for an initial pagination request with an empty cursor // test for an initial pagination request with an empty cursor
pagingInfo = &pb.PagingInfo{PageSize: 2, Direction: pb.PagingInfo_FORWARD} pagingInfo = &pb.PagingInfo{PageSize: 2, Direction: pb.PagingInfo_FORWARD}
@ -77,8 +75,6 @@ func TestForwardPagination(t *testing.T) {
require.Len(t, messages, 2) require.Len(t, messages, 2)
require.Equal(t, []*pb.WakuMessage{msgList[0].Message(), msgList[1].Message()}, messages) require.Equal(t, []*pb.WakuMessage{msgList[0].Message(), msgList[1].Message()}, messages)
require.Equal(t, msgList[1].Index(), newPagingInfo.Cursor) require.Equal(t, msgList[1].Index(), newPagingInfo.Cursor)
require.Equal(t, pagingInfo.Direction, newPagingInfo.Direction)
require.Equal(t, pagingInfo.PageSize, newPagingInfo.PageSize)
// test for an initial pagination request with an empty cursor to fetch the entire history // test for an initial pagination request with an empty cursor to fetch the entire history
pagingInfo = &pb.PagingInfo{PageSize: 13, Direction: pb.PagingInfo_FORWARD} pagingInfo = &pb.PagingInfo{PageSize: 13, Direction: pb.PagingInfo_FORWARD}
@ -86,18 +82,14 @@ func TestForwardPagination(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.Len(t, messages, 10) require.Len(t, messages, 10)
require.Equal(t, msgList[9].Message(), messages[9]) require.Equal(t, msgList[9].Message(), messages[9])
require.Equal(t, &pb.Index{}, newPagingInfo.Cursor) require.Nil(t, newPagingInfo.Cursor)
require.Equal(t, pagingInfo.Direction, newPagingInfo.Direction)
require.Equal(t, uint64(10), newPagingInfo.PageSize)
// test for an empty msgList // test for an empty msgList
pagingInfo = &pb.PagingInfo{PageSize: 2, Direction: pb.PagingInfo_FORWARD} pagingInfo = &pb.PagingInfo{PageSize: 2, Direction: pb.PagingInfo_FORWARD}
messages, newPagingInfo, err = findMessages(&pb.HistoryQuery{PagingInfo: pagingInfo}, MemoryDB(t)) messages, newPagingInfo, err = findMessages(&pb.HistoryQuery{PagingInfo: pagingInfo}, MemoryDB(t))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, messages, 0) require.Len(t, messages, 0)
require.Equal(t, pagingInfo.Cursor, newPagingInfo.Cursor) require.Nil(t, newPagingInfo.Cursor)
require.Equal(t, pagingInfo.Direction, newPagingInfo.Direction)
require.Equal(t, uint64(0), newPagingInfo.PageSize)
// test for a page size larger than the remaining messages // test for a page size larger than the remaining messages
pagingInfo = &pb.PagingInfo{PageSize: 10, Cursor: msgList[3].Index(), Direction: pb.PagingInfo_FORWARD} pagingInfo = &pb.PagingInfo{PageSize: 10, Cursor: msgList[3].Index(), Direction: pb.PagingInfo_FORWARD}
@ -105,26 +97,21 @@ func TestForwardPagination(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.Len(t, messages, 6) require.Len(t, messages, 6)
require.Equal(t, []*pb.WakuMessage{msgList[4].Message(), msgList[5].Message(), msgList[6].Message(), msgList[7].Message(), msgList[8].Message(), msgList[9].Message()}, messages) require.Equal(t, []*pb.WakuMessage{msgList[4].Message(), msgList[5].Message(), msgList[6].Message(), msgList[7].Message(), msgList[8].Message(), msgList[9].Message()}, messages)
require.Equal(t, &pb.Index{}, newPagingInfo.Cursor) require.Nil(t, newPagingInfo.Cursor)
require.Equal(t, pagingInfo.Direction, newPagingInfo.Direction)
require.Equal(t, uint64(6), newPagingInfo.PageSize)
// test for a page size larger than the maximum allowed page size // test for a page size larger than the maximum allowed page size
pagingInfo = &pb.PagingInfo{PageSize: MaxPageSize + 1, Cursor: msgList[3].Index(), Direction: pb.PagingInfo_FORWARD} pagingInfo = &pb.PagingInfo{PageSize: MaxPageSize + 1, Cursor: msgList[3].Index(), Direction: pb.PagingInfo_FORWARD}
messages, newPagingInfo, err = findMessages(&pb.HistoryQuery{PagingInfo: pagingInfo}, db) messages, newPagingInfo, err = findMessages(&pb.HistoryQuery{PagingInfo: pagingInfo}, db)
require.NoError(t, err) require.NoError(t, err)
require.True(t, len(messages) <= MaxPageSize) require.True(t, len(messages) <= MaxPageSize)
require.Equal(t, pagingInfo.Direction, newPagingInfo.Direction) require.Nil(t, newPagingInfo.Cursor)
require.True(t, newPagingInfo.PageSize <= MaxPageSize)
// test for a cursor pointing to the end of the message list // test for a cursor pointing to the end of the message list
pagingInfo = &pb.PagingInfo{PageSize: 10, Cursor: msgList[9].Index(), Direction: pb.PagingInfo_FORWARD} pagingInfo = &pb.PagingInfo{PageSize: 10, Cursor: msgList[9].Index(), Direction: pb.PagingInfo_FORWARD}
messages, newPagingInfo, err = findMessages(&pb.HistoryQuery{PagingInfo: pagingInfo}, db) messages, newPagingInfo, err = findMessages(&pb.HistoryQuery{PagingInfo: pagingInfo}, db)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, messages, 0) require.Len(t, messages, 0)
require.Equal(t, msgList[9].Index(), newPagingInfo.Cursor) require.Nil(t, newPagingInfo.Cursor)
require.Equal(t, pagingInfo.Direction, newPagingInfo.Direction)
require.Equal(t, uint64(0), newPagingInfo.PageSize)
// test for an invalid cursor // test for an invalid cursor
invalidIndex := protocol.NewEnvelope(&pb.WakuMessage{Payload: []byte{255, 255, 255}}, utils.GetUnixEpoch(), "test").Index() invalidIndex := protocol.NewEnvelope(&pb.WakuMessage{Payload: []byte{255, 255, 255}}, utils.GetUnixEpoch(), "test").Index()
@ -141,9 +128,7 @@ func TestForwardPagination(t *testing.T) {
messages, newPagingInfo, err = findMessages(&pb.HistoryQuery{PagingInfo: pagingInfo}, singleItemDB) messages, newPagingInfo, err = findMessages(&pb.HistoryQuery{PagingInfo: pagingInfo}, singleItemDB)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, messages, 1) require.Len(t, messages, 1)
require.Equal(t, &pb.Index{}, newPagingInfo.Cursor) require.Nil(t, newPagingInfo.Cursor)
require.Equal(t, pagingInfo.Direction, newPagingInfo.Direction)
require.Equal(t, uint64(1), newPagingInfo.PageSize)
} }
func TestBackwardPagination(t *testing.T) { func TestBackwardPagination(t *testing.T) {
@ -162,8 +147,6 @@ func TestBackwardPagination(t *testing.T) {
require.Equal(t, []*pb.WakuMessage{msgList[1].Message(), msgList[2].Message()}, messages) require.Equal(t, []*pb.WakuMessage{msgList[1].Message(), msgList[2].Message()}, messages)
require.Equal(t, msgList[1].Index(), newPagingInfo.Cursor) require.Equal(t, msgList[1].Index(), newPagingInfo.Cursor)
require.Equal(t, pagingInfo.Direction, newPagingInfo.Direction)
require.Equal(t, pagingInfo.PageSize, newPagingInfo.PageSize)
// test for an initial pagination request with an empty cursor // test for an initial pagination request with an empty cursor
pagingInfo = &pb.PagingInfo{PageSize: 2, Direction: pb.PagingInfo_BACKWARD} pagingInfo = &pb.PagingInfo{PageSize: 2, Direction: pb.PagingInfo_BACKWARD}
@ -172,8 +155,6 @@ func TestBackwardPagination(t *testing.T) {
require.Len(t, messages, 2) require.Len(t, messages, 2)
require.Equal(t, []*pb.WakuMessage{msgList[8].Message(), msgList[9].Message()}, messages) require.Equal(t, []*pb.WakuMessage{msgList[8].Message(), msgList[9].Message()}, messages)
require.Equal(t, msgList[8].Index(), newPagingInfo.Cursor) require.Equal(t, msgList[8].Index(), newPagingInfo.Cursor)
require.Equal(t, pagingInfo.Direction, newPagingInfo.Direction)
require.Equal(t, pagingInfo.PageSize, newPagingInfo.PageSize)
// test for an initial pagination request with an empty cursor to fetch the entire history // test for an initial pagination request with an empty cursor to fetch the entire history
pagingInfo = &pb.PagingInfo{PageSize: 13, Direction: pb.PagingInfo_BACKWARD} pagingInfo = &pb.PagingInfo{PageSize: 13, Direction: pb.PagingInfo_BACKWARD}
@ -182,9 +163,7 @@ func TestBackwardPagination(t *testing.T) {
require.Len(t, messages, 10) require.Len(t, messages, 10)
require.Equal(t, msgList[0].Message(), messages[0]) require.Equal(t, msgList[0].Message(), messages[0])
require.Equal(t, msgList[9].Message(), messages[9]) require.Equal(t, msgList[9].Message(), messages[9])
require.Equal(t, &pb.Index{}, newPagingInfo.Cursor) require.Nil(t, newPagingInfo.Cursor)
require.Equal(t, pagingInfo.Direction, newPagingInfo.Direction)
require.Equal(t, uint64(10), newPagingInfo.PageSize)
// test for an empty msgList // test for an empty msgList
pagingInfo = &pb.PagingInfo{PageSize: 2, Direction: pb.PagingInfo_BACKWARD} pagingInfo = &pb.PagingInfo{PageSize: 2, Direction: pb.PagingInfo_BACKWARD}
@ -201,9 +180,7 @@ func TestBackwardPagination(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.Len(t, messages, 3) require.Len(t, messages, 3)
require.Equal(t, []*pb.WakuMessage{msgList[0].Message(), msgList[1].Message(), msgList[2].Message()}, messages) require.Equal(t, []*pb.WakuMessage{msgList[0].Message(), msgList[1].Message(), msgList[2].Message()}, messages)
require.Equal(t, &pb.Index{}, newPagingInfo.Cursor) require.Nil(t, newPagingInfo.Cursor)
require.Equal(t, pagingInfo.Direction, newPagingInfo.Direction)
require.Equal(t, uint64(3), newPagingInfo.PageSize)
// test for a page size larger than the maximum allowed page size // test for a page size larger than the maximum allowed page size
pagingInfo = &pb.PagingInfo{PageSize: MaxPageSize + 1, Cursor: msgList[3].Index(), Direction: pb.PagingInfo_BACKWARD} pagingInfo = &pb.PagingInfo{PageSize: MaxPageSize + 1, Cursor: msgList[3].Index(), Direction: pb.PagingInfo_BACKWARD}
@ -218,9 +195,7 @@ func TestBackwardPagination(t *testing.T) {
messages, newPagingInfo, err = findMessages(&pb.HistoryQuery{PagingInfo: pagingInfo}, db) messages, newPagingInfo, err = findMessages(&pb.HistoryQuery{PagingInfo: pagingInfo}, db)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, messages, 0) require.Len(t, messages, 0)
require.Equal(t, msgList[0].Index(), newPagingInfo.Cursor) require.Nil(t, newPagingInfo.Cursor)
require.Equal(t, pagingInfo.Direction, newPagingInfo.Direction)
require.Equal(t, uint64(0), newPagingInfo.PageSize)
// test for an invalid cursor // test for an invalid cursor
invalidIndex := protocol.NewEnvelope(&pb.WakuMessage{Payload: []byte{255, 255, 255}}, utils.GetUnixEpoch(), "test").Index() invalidIndex := protocol.NewEnvelope(&pb.WakuMessage{Payload: []byte{255, 255, 255}}, utils.GetUnixEpoch(), "test").Index()
@ -237,7 +212,5 @@ func TestBackwardPagination(t *testing.T) {
messages, newPagingInfo, err = findMessages(&pb.HistoryQuery{PagingInfo: pagingInfo}, singleItemDB) messages, newPagingInfo, err = findMessages(&pb.HistoryQuery{PagingInfo: pagingInfo}, singleItemDB)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, messages, 1) require.Len(t, messages, 1)
require.Equal(t, &pb.Index{}, newPagingInfo.Cursor) require.Nil(t, newPagingInfo.Cursor)
require.Equal(t, pagingInfo.Direction, newPagingInfo.Direction)
require.Equal(t, uint64(1), newPagingInfo.PageSize)
} }

View File

@ -0,0 +1,341 @@
package store
import (
"context"
"errors"
"math"
"sync"
"time"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-msgio/protoio"
"go.uber.org/zap"
"github.com/waku-org/go-waku/logging"
"github.com/waku-org/go-waku/waku/persistence"
"github.com/waku-org/go-waku/waku/v2/metrics"
"github.com/waku-org/go-waku/waku/v2/protocol"
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
"github.com/waku-org/go-waku/waku/v2/utils"
)
// MaxTimeVariance is the maximum duration in the future allowed for a message timestamp
const MaxTimeVariance = time.Duration(20) * time.Second
func findMessages(query *pb.HistoryQuery, msgProvider MessageProvider) ([]*pb.WakuMessage, *pb.PagingInfo, error) {
if query.PagingInfo == nil {
query.PagingInfo = &pb.PagingInfo{
Direction: pb.PagingInfo_FORWARD,
}
}
if query.PagingInfo.PageSize == 0 || query.PagingInfo.PageSize > uint64(MaxPageSize) {
query.PagingInfo.PageSize = MaxPageSize
}
if len(query.ContentFilters) > MaxContentFilters {
return nil, nil, ErrMaxContentFilters
}
cursor, queryResult, err := msgProvider.Query(query)
if err != nil {
return nil, nil, err
}
if len(queryResult) == 0 { // no pagination is needed for an empty list
return nil, &pb.PagingInfo{Cursor: nil}, nil
}
resultMessages := make([]*pb.WakuMessage, len(queryResult))
for i := range queryResult {
resultMessages[i] = queryResult[i].Message
}
return resultMessages, &pb.PagingInfo{Cursor: cursor}, nil
}
func (store *WakuStore) FindMessages(query *pb.HistoryQuery) *pb.HistoryResponse {
result := new(pb.HistoryResponse)
messages, newPagingInfo, err := findMessages(query, store.msgProvider)
if err != nil {
if err == persistence.ErrInvalidCursor {
result.Error = pb.HistoryResponse_INVALID_CURSOR
} else {
// TODO: return error in pb.HistoryResponse
store.log.Error("obtaining messages from db", zap.Error(err))
}
}
result.Messages = messages
result.PagingInfo = newPagingInfo
return result
}
type MessageProvider interface {
GetAll() ([]persistence.StoredMessage, error)
Query(query *pb.HistoryQuery) (*pb.Index, []persistence.StoredMessage, error)
Put(env *protocol.Envelope) error
MostRecentTimestamp() (int64, error)
Stop()
Count() (int, error)
}
type Store interface {
Start(ctx context.Context)
Query(ctx context.Context, query Query, opts ...HistoryRequestOption) (*Result, error)
Find(ctx context.Context, query Query, cb criteriaFN, opts ...HistoryRequestOption) (*pb.WakuMessage, error)
Next(ctx context.Context, r *Result) (*Result, error)
Resume(ctx context.Context, pubsubTopic string, peerList []peer.ID) (int, error)
MessageChannel() chan *protocol.Envelope
Stop()
}
// SetMessageProvider allows switching the message provider used with a WakuStore
func (store *WakuStore) SetMessageProvider(p MessageProvider) {
store.msgProvider = p
}
// Start initializes the WakuStore by enabling the protocol and fetching records from a message provider
func (store *WakuStore) Start(ctx context.Context) {
if store.started {
return
}
if store.msgProvider == nil {
store.log.Info("Store protocol started (no message provider)")
return
}
store.started = true
store.ctx = ctx
store.MsgC = make(chan *protocol.Envelope, 1024)
store.h.SetStreamHandlerMatch(StoreID_v20beta4, protocol.PrefixTextMatch(string(StoreID_v20beta4)), store.onRequest)
store.wg.Add(2)
go store.storeIncomingMessages(ctx)
go store.updateMetrics(ctx)
store.log.Info("Store protocol started")
}
func (store *WakuStore) storeMessage(env *protocol.Envelope) error {
// Ensure that messages don't "jump" to the front of the queue with future timestamps
if env.Index().SenderTime-env.Index().ReceiverTime > int64(MaxTimeVariance) {
return ErrFutureMessage
}
if env.Message().Ephemeral {
return nil
}
err := store.msgProvider.Put(env)
if err != nil {
store.log.Error("storing message", zap.Error(err))
metrics.RecordStoreError(store.ctx, "store_failure")
return err
}
return nil
}
func (store *WakuStore) storeIncomingMessages(ctx context.Context) {
defer store.wg.Done()
for envelope := range store.MsgC {
go func(env *protocol.Envelope) {
_ = store.storeMessage(env)
}(envelope)
}
}
func (store *WakuStore) updateMetrics(ctx context.Context) {
ticker := time.NewTicker(3 * time.Second)
defer ticker.Stop()
defer store.wg.Done()
for {
select {
case <-ticker.C:
msgCount, err := store.msgProvider.Count()
if err != nil {
store.log.Error("updating store metrics", zap.Error(err))
} else {
metrics.RecordMessage(store.ctx, "stored", msgCount)
}
case <-store.quit:
return
}
}
}
func (store *WakuStore) onRequest(s network.Stream) {
defer s.Close()
logger := store.log.With(logging.HostID("peer", s.Conn().RemotePeer()))
historyRPCRequest := &pb.HistoryRPC{}
writer := protoio.NewDelimitedWriter(s)
reader := protoio.NewDelimitedReader(s, math.MaxInt32)
err := reader.ReadMsg(historyRPCRequest)
if err != nil {
logger.Error("reading request", zap.Error(err))
metrics.RecordStoreError(store.ctx, "decodeRPCFailure")
return
}
logger = logger.With(zap.String("id", historyRPCRequest.RequestId))
if query := historyRPCRequest.Query; query != nil {
logger = logger.With(logging.Filters(query.GetContentFilters()))
} else {
logger.Error("reading request", zap.Error(err))
metrics.RecordStoreError(store.ctx, "emptyRpcQueryFailure")
return
}
logger.Info("received history query")
metrics.RecordStoreQuery(store.ctx)
historyResponseRPC := &pb.HistoryRPC{}
historyResponseRPC.RequestId = historyRPCRequest.RequestId
historyResponseRPC.Response = store.FindMessages(historyRPCRequest.Query)
logger = logger.With(zap.Int("messages", len(historyResponseRPC.Response.Messages)))
err = writer.WriteMsg(historyResponseRPC)
if err != nil {
logger.Error("writing response", zap.Error(err), logging.PagingInfo(historyResponseRPC.Response.PagingInfo))
_ = s.Reset()
} else {
logger.Info("response sent")
}
}
func (store *WakuStore) MessageChannel() chan *protocol.Envelope {
return store.MsgC
}
// TODO: queryWithAccounting
// Stop closes the store message channel and removes the protocol stream handler
func (store *WakuStore) Stop() {
store.started = false
if store.MsgC != nil {
close(store.MsgC)
}
if store.msgProvider != nil {
store.quit <- struct{}{}
}
if store.h != nil {
store.h.RemoveStreamHandler(StoreID_v20beta4)
}
store.wg.Wait()
}
func (store *WakuStore) queryLoop(ctx context.Context, query *pb.HistoryQuery, candidateList []peer.ID) ([]*pb.WakuMessage, error) {
// loops through the candidateList in order and sends the query to each until one of the query gets resolved successfully
// returns the number of retrieved messages, or error if all the requests fail
queryWg := sync.WaitGroup{}
queryWg.Add(len(candidateList))
resultChan := make(chan *pb.HistoryResponse, len(candidateList))
for _, peer := range candidateList {
func() {
defer queryWg.Done()
result, err := store.queryFrom(ctx, query, peer, protocol.GenerateRequestId())
if err == nil {
resultChan <- result
return
}
store.log.Error("resuming history", logging.HostID("peer", peer), zap.Error(err))
}()
}
queryWg.Wait()
close(resultChan)
var messages []*pb.WakuMessage
hasResults := false
for result := range resultChan {
hasResults = true
messages = append(messages, result.Messages...)
}
if hasResults {
return messages, nil
}
return nil, ErrFailedQuery
}
func (store *WakuStore) findLastSeen() (int64, error) {
return store.msgProvider.MostRecentTimestamp()
}
func max(x, y int64) int64 {
if x > y {
return x
}
return y
}
// Resume retrieves the history of waku messages published on the default waku pubsub topic since the last time the waku store node has been online
// messages are stored in the store node's messages field and in the message db
// the offline time window is measured as the difference between the current time and the timestamp of the most recent persisted waku message
// an offset of 20 second is added to the time window to count for nodes asynchrony
// the history is fetched from one of the peers persisted in the waku store node's peer manager unit
// peerList indicates the list of peers to query from. The history is fetched from the first available peer in this list. Such candidates should be found through a discovery method (to be developed).
// if no peerList is passed, one of the peers in the underlying peer manager unit of the store protocol is picked randomly to fetch the history from. The history gets fetched successfully if the dialed peer has been online during the queried time window.
// the resume proc returns the number of retrieved messages if no error occurs, otherwise returns the error string
func (store *WakuStore) Resume(ctx context.Context, pubsubTopic string, peerList []peer.ID) (int, error) {
if !store.started {
return 0, errors.New("can't resume: store has not started")
}
currentTime := utils.GetUnixEpoch()
lastSeenTime, err := store.findLastSeen()
if err != nil {
return 0, err
}
var offset int64 = int64(20 * time.Nanosecond)
currentTime = currentTime + offset
lastSeenTime = max(lastSeenTime-offset, 0)
rpc := &pb.HistoryQuery{
PubsubTopic: pubsubTopic,
StartTime: lastSeenTime,
EndTime: currentTime,
PagingInfo: &pb.PagingInfo{
PageSize: 0,
Direction: pb.PagingInfo_BACKWARD,
},
}
if len(peerList) == 0 {
return -1, ErrNoPeersAvailable
}
messages, err := store.queryLoop(ctx, rpc, peerList)
if err != nil {
store.log.Error("resuming history", zap.Error(err))
return -1, ErrFailedToResumeHistory
}
msgCount := 0
for _, msg := range messages {
if err = store.storeMessage(protocol.NewEnvelope(msg, utils.GetUnixEpoch(), pubsubTopic)); err == nil {
msgCount++
}
}
store.log.Info("retrieved messages since the last online time", zap.Int("messages", len(messages)))
return msgCount, nil
}

View File

@ -10,7 +10,7 @@ import (
) )
func TestWakuRest(t *testing.T) { func TestWakuRest(t *testing.T) {
options := node.WithWakuStore(false, false) options := node.WithWakuStore(false, nil)
n, err := node.New(context.Background(), options) n, err := node.New(context.Background(), options)
require.NoError(t, err) require.NoError(t, err)

View File

@ -10,7 +10,7 @@ import (
) )
func makeStoreService(t *testing.T) *StoreService { func makeStoreService(t *testing.T) *StoreService {
options := node.WithWakuStore(false, false) options := node.WithWakuStore(false, nil)
n, err := node.New(context.Background(), options) n, err := node.New(context.Background(), options)
require.NoError(t, err) require.NoError(t, err)
err = n.Start() err = n.Start()

View File

@ -10,7 +10,7 @@ import (
) )
func TestWakuRpc(t *testing.T) { func TestWakuRpc(t *testing.T) {
options := node.WithWakuStore(false, false) options := node.WithWakuStore(false, nil)
n, err := node.New(context.Background(), options) n, err := node.New(context.Background(), options)
require.NoError(t, err) require.NoError(t, err)