chore: initial commit

This commit is contained in:
Richard Ramos 2024-05-08 16:37:42 -04:00
commit b9d7f5349a
No known key found for this signature in database
GPG Key ID: 1CE87DB518195760
15 changed files with 2352 additions and 0 deletions

281
cmd/msgfinder/execute.go Normal file
View File

@ -0,0 +1,281 @@
package main
import (
"context"
"database/sql"
"time"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/log"
"github.com/multiformats/go-multiaddr"
"github.com/waku-org/go-waku/waku/v2/node"
"github.com/waku-org/go-waku/waku/v2/protocol"
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
"github.com/waku-org/go-waku/waku/v2/protocol/store"
"github.com/waku-org/storenode-messages/internal/logging"
"github.com/waku-org/storenode-messages/internal/persistence"
"go.uber.org/zap"
"google.golang.org/protobuf/proto"
)
type MessageExistence int
const (
Unknown MessageExistence = iota
Exists
DoesNotExist
)
const maxAttempts = 3
func Execute(ctx context.Context, options Options) error {
// Set encoding for logs (console, json, ...)
// Note that libp2p reads the encoding from GOLOG_LOG_FMT env var.
logging.InitLogger(options.LogEncoding, options.LogOutput)
logger := logging.Logger()
var db *sql.DB
var migrationFn func(*sql.DB, *zap.Logger) error
db, migrationFn, err := persistence.ParseURL(options.DatabaseURL, logger)
if err != nil {
return err
}
dbStore, err := persistence.NewDBStore(logger, persistence.WithDB(db), persistence.WithMigrations(migrationFn))
if err != nil {
return err
}
defer dbStore.Stop()
wakuNode, err := node.New(
node.WithNTP(),
node.WithClusterID(uint16(options.ClusterID)),
)
err = wakuNode.Start(ctx)
if err != nil {
return err
}
defer wakuNode.Stop()
err = dbStore.Start(ctx, wakuNode.Timesource())
if err != nil {
return err
}
timeInterval := 2 * time.Minute
delay := 5 * time.Minute
ticker := time.NewTicker(timeInterval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
break
case <-ticker.C:
// [MessageHash][StoreNode] = exists?
msgMap := make(map[pb.MessageHash]map[string]MessageExistence)
msgTopic := make(map[pb.MessageHash]string)
topicSyncStatus, err := dbStore.GetTopicSyncStatus(ctx, options.ClusterID, options.PubSubTopics.Value())
if err != nil {
return err
}
trx, err := dbStore.GetTrx(ctx)
if err != nil {
return err
}
// TODO: commit or revert trx
for topic, lastSyncTimestamp := range topicSyncStatus {
now := wakuNode.Timesource().Now()
// Query is done with a delay
startTime := now.Add(-(timeInterval + delay))
if lastSyncTimestamp != nil {
startTime = *lastSyncTimestamp
}
endTime := now.Add(-delay)
if startTime.After(endTime) {
log.Warn("too soon to retrieve messages for topic", zap.String("topic", topic))
continue
}
// Determine if the messages exist across all nodes
for _, node := range options.StoreNodes {
// TODO: make async
storeNodeFailure := false
var result *store.Result
retry1:
for i := 0; i < maxAttempts; i++ {
result, err = wakuNode.Store().Query(ctx, store.FilterCriteria{
ContentFilter: protocol.NewContentFilter(topic),
TimeStart: proto.Int64(startTime.UnixNano()),
TimeEnd: proto.Int64(endTime.UnixNano()),
}, store.WithPeerAddr(node), store.IncludeData(false))
if err != nil {
logger.Error("could not query storenode", zap.Stringer("storenode", node), zap.Error(err))
storeNodeFailure = true
time.Sleep(2 * time.Second)
} else {
storeNodeFailure = false
break retry1
}
}
if storeNodeFailure {
// TODO: Notify that storenode was not available from X to Y time
logger.Error("storenode not available", zap.Stringer("storenode", node), zap.Time("startTime", startTime), zap.Time("endTime", endTime))
} else {
for {
storeNodeFailure = false
var hasNext bool
retry2:
for i := 0; i < maxAttempts; i++ {
hasNext, err = result.Next(ctx)
if err != nil {
logger.Error("could not query storenode", zap.Stringer("storenode", node), zap.Error(err))
storeNodeFailure = true
time.Sleep(2 * time.Second)
} else {
break retry2
}
}
if storeNodeFailure {
// TODO: Notify that storenode was not available from X to Y time
logger.Error("storenode not available",
zap.Stringer("storenode", node),
zap.Time("startTime", startTime),
zap.Time("endTime", endTime),
zap.String("topic", topic),
zap.String("cursor", hexutil.Encode(result.Cursor())))
} else {
if !hasNext { // No more messages available
break
}
for _, mkv := range result.Messages() {
hash := mkv.WakuMessageHash()
_, ok := msgMap[hash]
if !ok {
msgMap[hash] = make(map[string]MessageExistence)
}
msgMap[hash][node.String()] = Exists
msgTopic[hash] = mkv.PubsubTopic
}
}
}
}
}
// Update db with last sync time
dbStore.UpdateTopicSyncState(trx, options.ClusterID, topic, endTime)
}
// Verify for each storenode which messages are not available, and query for their existence using message hash
// Node -> msgHash
msgsToVerify := make(map[string][]pb.MessageHash)
for msgHash, nodes := range msgMap {
for node, existence := range nodes {
if existence != Exists {
msgsToVerify[node] = append(msgsToVerify[node], msgHash)
}
}
}
// TODO: async
for node, messageHashes := range msgsToVerify {
nodeMultiaddr, err := multiaddr.NewMultiaddr(node)
if err != nil {
return err
}
storeNodeFailure := false
var result *store.Result
retry3:
for i := 0; i < maxAttempts; i++ {
result, err = wakuNode.Store().QueryByHash(ctx, messageHashes, store.IncludeData(false), store.WithPeerAddr(nodeMultiaddr))
if err != nil {
logger.Error("could not query storenode", zap.Stringer("storenode", nodeMultiaddr), zap.Error(err))
storeNodeFailure = true
time.Sleep(2 * time.Second)
} else {
break retry3
}
}
if storeNodeFailure {
// TODO: Notify that storenode was not available from X to Y time
logger.Error("storenode not available",
zap.String("storenode", node),
zap.Any("hashes", messageHashes))
} else {
for {
storeNodeFailure = false
var hasNext bool
retry4:
for i := 0; i < maxAttempts; i++ {
hasNext, err = result.Next(ctx)
if err != nil {
logger.Error("could not query storenode", zap.String("storenode", node), zap.Error(err))
storeNodeFailure = true
time.Sleep(2 * time.Second)
} else {
break retry4
}
}
if storeNodeFailure {
// TODO: Notify that storenode was not available from X to Y time
logger.Error("storenode not available",
zap.String("storenode", node),
zap.Any("hashes", messageHashes),
zap.String("cursor", hexutil.Encode(result.Cursor())))
} else {
if !hasNext { // No more messages available
break
}
for _, mkv := range result.Messages() {
hash := mkv.WakuMessageHash()
_, ok := msgMap[hash]
if !ok {
msgMap[hash] = make(map[string]MessageExistence)
}
msgMap[hash][node] = Exists
}
}
}
}
}
// TODO: if a message is not available, store in DB in which store nodes it wasnt available and from which time to which time
for msgHash, nodes := range msgMap {
var missingIn []string
for node, existence := range nodes {
if existence != Exists {
missingIn = append(missingIn, node)
}
}
err := dbStore.RecordMissingMessage(trx, msgHash, msgTopic[msgHash], missingIn)
if err != nil {
return err
}
}
err = trx.Commit()
// TODO: revert?
if err != nil {
return err
}
}
}
}

69
cmd/msgfinder/flags.go Normal file
View File

@ -0,0 +1,69 @@
package main
import (
cli "github.com/urfave/cli/v2"
"github.com/urfave/cli/v2/altsrc"
"github.com/waku-org/go-waku/waku/cliutils"
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
)
var cliFlags = []cli.Flag{
&cli.StringFlag{Name: "config-file", Usage: "loads configuration from a TOML file (cmd-line parameters take precedence)"},
cliutils.NewGenericFlagMultiValue(&cli.GenericFlag{
Name: "storenode",
Required: true,
Usage: "Multiaddr of peers that supports storeV3 protocol. Option may be repeated",
Value: &cliutils.MultiaddrSlice{
Values: &options.StoreNodes,
},
EnvVars: []string{"MSGVERIF_STORENODE"},
}),
altsrc.NewUintFlag(&cli.UintFlag{
Name: "cluster-id",
Usage: "ClusterID to use",
Destination: &options.ClusterID,
Required: true,
EnvVars: []string{"MSGVERIF_CLUSTER_ID"},
}),
altsrc.NewStringSliceFlag(&cli.StringSliceFlag{
Name: "pubsub-topic",
Required: true,
Usage: "Pubsub topic used for the query. Argument may be repeated.",
Value: cli.NewStringSlice(relay.DefaultWakuTopic),
Destination: &options.PubSubTopics,
EnvVars: []string{"MSGVERIF_PUBSUB_TOPICS"},
}),
altsrc.NewStringFlag(&cli.StringFlag{
Name: "db-url",
Usage: "The database connection URL for persistent storage.",
Value: "sqlite3://storage.db",
Destination: &options.DatabaseURL,
EnvVars: []string{"MSG_VERIF_DB_URL"},
}),
cliutils.NewGenericFlagSingleValue(&cli.GenericFlag{
Name: "log-level",
Aliases: []string{"l"},
Value: &cliutils.ChoiceValue{
Choices: []string{"DEBUG", "INFO", "WARN", "ERROR", "DPANIC", "PANIC", "FATAL"},
Value: &options.LogLevel,
},
Usage: "Define the logging level (allowed values: DEBUG, INFO, WARN, ERROR, DPANIC, PANIC, FATAL)",
EnvVars: []string{"MSGVERIF_LOG_LEVEL"},
}),
cliutils.NewGenericFlagSingleValue(&cli.GenericFlag{
Name: "log-encoding",
Usage: "Define the encoding used for the logs (allowed values: console, nocolor, json)",
Value: &cliutils.ChoiceValue{
Choices: []string{"console", "nocolor", "json"},
Value: &options.LogEncoding,
},
EnvVars: []string{"MSGVERIF_LOG_ENCODING"},
}),
altsrc.NewStringFlag(&cli.StringFlag{
Name: "log-output",
Value: "stdout",
Usage: "specifies where logging output should be written (stdout, file, file:./filename.log)",
Destination: &options.LogOutput,
EnvVars: []string{"MSGVERIF_LOG_OUTPUT"},
}),
}

34
cmd/msgfinder/main.go Normal file
View File

@ -0,0 +1,34 @@
package main
import (
"os"
"github.com/urfave/cli/v2"
"github.com/urfave/cli/v2/altsrc"
)
var options Options
func main() {
// Defaults
options.LogLevel = "INFO"
options.LogEncoding = "console"
cliFlags := []cli.Flag{}
app := &cli.App{
Name: "storenode-messages",
Version: "0.0.1",
Before: altsrc.InitInputSourceWithContext(cliFlags, altsrc.NewTomlSourceFromFlagFunc("config-file")),
Flags: cliFlags,
Action: func(c *cli.Context) error {
Execute(c.Context, options)
return nil
},
}
err := app.Run(os.Args)
if err != nil {
panic(err)
}
}

16
cmd/msgfinder/options.go Normal file
View File

@ -0,0 +1,16 @@
package main
import (
"github.com/multiformats/go-multiaddr"
"github.com/urfave/cli/v2"
)
type Options struct {
LogLevel string
LogEncoding string
LogOutput string
ClusterID uint
PubSubTopics cli.StringSlice
DatabaseURL string
StoreNodes []multiaddr.Multiaddr
}

140
go.mod Normal file
View File

@ -0,0 +1,140 @@
module github.com/waku-org/storenode-messages
go 1.19
replace github.com/ethereum/go-ethereum v1.10.26 => github.com/status-im/go-ethereum v1.10.25-status.4
require (
github.com/ethereum/go-ethereum v1.10.26
github.com/golang-migrate/migrate/v4 v4.16.2
github.com/ipfs/go-log/v2 v2.5.1
github.com/mattn/go-sqlite3 v1.14.17
github.com/multiformats/go-multiaddr v0.12.3
github.com/urfave/cli/v2 v2.27.2
github.com/waku-org/go-waku v0.8.1-0.20240507175626-19d27befd98b
go.uber.org/zap v1.27.0
)
require (
github.com/BurntSushi/toml v1.3.2 // indirect
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect
github.com/avast/retry-go/v4 v4.5.1 // indirect
github.com/beevik/ntp v0.3.0 // indirect
github.com/benbjohnson/clock v1.3.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/btcsuite/btcd v0.20.1-beta // indirect
github.com/btcsuite/btcd/btcec/v2 v2.2.1 // indirect
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d // indirect
github.com/cenkalti/backoff/v4 v4.1.2 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/containerd/cgroups v1.1.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
github.com/cruxic/go-hmac-drbg v0.0.0-20170206035330-84c46983886d // indirect
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
github.com/deckarep/golang-set v1.8.0 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/elastic/gosigar v0.14.2 // indirect
github.com/flynn/noise v1.0.0 // indirect
github.com/francoispqt/gojay v1.2.13 // indirect
github.com/go-ole/go-ole v1.2.1 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/gopacket v1.1.19 // indirect
github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
github.com/hashicorp/golang-lru/v2 v2.0.5 // indirect
github.com/huin/goupnp v1.3.0 // indirect
github.com/ipfs/go-cid v0.4.1 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
github.com/klauspost/compress v1.17.2 // indirect
github.com/klauspost/cpuid/v2 v2.2.6 // indirect
github.com/koron/go-ssdp v0.0.4 // indirect
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
github.com/libp2p/go-cidranger v1.1.0 // indirect
github.com/libp2p/go-flow-metrics v0.1.0 // indirect
github.com/libp2p/go-libp2p v0.32.2 // indirect
github.com/libp2p/go-libp2p-asn-util v0.3.0 // indirect
github.com/libp2p/go-libp2p-mplex v0.9.0 // indirect
github.com/libp2p/go-libp2p-pubsub v0.10.1 // indirect
github.com/libp2p/go-mplex v0.7.0 // indirect
github.com/libp2p/go-msgio v0.3.0 // indirect
github.com/libp2p/go-nat v0.2.0 // indirect
github.com/libp2p/go-netroute v0.2.1 // indirect
github.com/libp2p/go-reuseport v0.4.0 // indirect
github.com/libp2p/go-yamux/v4 v4.0.1 // indirect
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/miekg/dns v1.1.56 // indirect
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
github.com/minio/sha256-simd v1.0.1 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/multiformats/go-base32 v0.1.0 // indirect
github.com/multiformats/go-base36 v0.2.0 // indirect
github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
github.com/multiformats/go-multibase v0.2.0 // indirect
github.com/multiformats/go-multicodec v0.9.0 // indirect
github.com/multiformats/go-multihash v0.2.3 // indirect
github.com/multiformats/go-multistream v0.5.0 // indirect
github.com/multiformats/go-varint v0.0.7 // indirect
github.com/onsi/ginkgo/v2 v2.13.0 // indirect
github.com/opencontainers/runtime-spec v1.1.0 // indirect
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.14.0 // indirect
github.com/prometheus/client_model v0.4.0 // indirect
github.com/prometheus/common v0.42.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect
github.com/quic-go/qpack v0.4.0 // indirect
github.com/quic-go/qtls-go1-20 v0.3.4 // indirect
github.com/quic-go/quic-go v0.39.4 // indirect
github.com/quic-go/webtransport-go v0.6.0 // indirect
github.com/raulk/go-watchdog v1.3.0 // indirect
github.com/rjeczalik/notify v0.9.3 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/status-im/status-go/extkeys v1.1.2 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a // indirect
github.com/tklauser/go-sysconf v0.3.5 // indirect
github.com/tklauser/numcpus v0.2.2 // indirect
github.com/waku-org/go-discover v0.0.0-20240506173252-4912704efdc5 // indirect
github.com/waku-org/go-libp2p-rendezvous v0.0.0-20240110193335-a67d1cc760a0 // indirect
github.com/waku-org/go-zerokit-rln v0.1.14-0.20240102145250-fa738c0bdf59 // indirect
github.com/waku-org/go-zerokit-rln-apple v0.0.0-20230916172309-ee0ee61dde2b // indirect
github.com/waku-org/go-zerokit-rln-arm v0.0.0-20230916171929-1dd9494ff065 // indirect
github.com/waku-org/go-zerokit-rln-x86_64 v0.0.0-20230916171518-2a77c3734dd1 // indirect
github.com/wk8/go-ordered-map v1.0.0 // indirect
github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/dig v1.17.1 // indirect
go.uber.org/fx v1.20.1 // indirect
go.uber.org/mock v0.3.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.18.0 // indirect
golang.org/x/exp v0.0.0-20231006140011-7918f672742d // indirect
golang.org/x/mod v0.13.0 // indirect
golang.org/x/net v0.17.0 // indirect
golang.org/x/sync v0.4.0 // indirect
golang.org/x/sys v0.16.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect
golang.org/x/tools v0.14.0 // indirect
google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
lukechampine.com/blake3 v1.2.1 // indirect
)

1022
go.sum Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,76 @@
package logging
import (
"strings"
logging "github.com/ipfs/go-log/v2"
"go.uber.org/zap"
)
var log *zap.Logger
var messageLoggers map[string]*zap.Logger
// Logger creates a zap.Logger with some reasonable defaults
func Logger() *zap.Logger {
if log == nil {
InitLogger("console", "stdout")
}
return log
}
// MessagesLogger returns a logger used for debug logging of sent/received messages
func MessagesLogger(prefix string) *zap.Logger {
if messageLoggers == nil {
messageLoggers = make(map[string]*zap.Logger)
}
logger := messageLoggers[prefix]
if logger == nil {
logger = logging.Logger(prefix + ".messages").Desugar()
messageLoggers[prefix] = logger
}
return logger
}
// InitLogger initializes a global logger using an specific encoding
func InitLogger(encoding string, output string) {
cfg := logging.GetConfig()
if encoding == "json" {
cfg.Format = logging.JSONOutput
} else if encoding == "nocolor" {
cfg.Format = logging.PlaintextOutput
} else {
cfg.Format = logging.ColorizedOutput
}
if output == "stdout" || output == "" {
cfg.Stdout = true
cfg.Stderr = false
} else {
if encoding == "console" {
cfg.Format = logging.PlaintextOutput
}
cfg.Stdout = false
cfg.Stderr = false
outputParts := strings.Split(output, ":")
if len(outputParts) == 2 {
cfg.File = outputParts[1]
} else {
if len(outputParts) > 2 || outputParts[0] != "file" {
panic("invalid output format")
}
cfg.File = "./waku.log"
}
}
if cfg.Level == logging.LevelError {
// Override default level setting
cfg.Level = logging.LevelInfo
}
logging.SetupLogging(cfg)
log = logging.Logger("gowaku").Desugar()
}

View File

@ -0,0 +1,215 @@
package persistence
import (
"context"
"database/sql"
"sync"
"time"
"github.com/waku-org/go-waku/waku/v2/timesource"
"go.uber.org/zap"
)
// WALMode for sqlite.
const WALMode = "wal"
// DBStore is a MessageProvider that has a *sql.DB connection
type DBStore struct {
db *sql.DB
migrationFn func(db *sql.DB, logger *zap.Logger) error
timesource timesource.Timesource
log *zap.Logger
enableMigrations bool
wg sync.WaitGroup
cancel context.CancelFunc
}
// DBOption is an optional setting that can be used to configure the DBStore
type DBOption func(*DBStore) error
// WithDB is a DBOption that lets you use any custom *sql.DB with a DBStore.
func WithDB(db *sql.DB) DBOption {
return func(d *DBStore) error {
d.db = db
return nil
}
}
// ConnectionPoolOptions is the options to be used for DB connection pooling
type ConnectionPoolOptions struct {
MaxOpenConnections int
MaxIdleConnections int
ConnectionMaxLifetime time.Duration
ConnectionMaxIdleTime time.Duration
}
// WithDriver is a DBOption that will open a *sql.DB connection
func WithDriver(driverName string, datasourceName string, connectionPoolOptions ...ConnectionPoolOptions) DBOption {
return func(d *DBStore) error {
db, err := sql.Open(driverName, datasourceName)
if err != nil {
return err
}
if len(connectionPoolOptions) != 0 {
db.SetConnMaxIdleTime(connectionPoolOptions[0].ConnectionMaxIdleTime)
db.SetConnMaxLifetime(connectionPoolOptions[0].ConnectionMaxLifetime)
db.SetMaxIdleConns(connectionPoolOptions[0].MaxIdleConnections)
db.SetMaxOpenConns(connectionPoolOptions[0].MaxOpenConnections)
}
d.db = db
return nil
}
}
type MigrationFn func(db *sql.DB, logger *zap.Logger) error
// WithMigrations is a DBOption used to determine if migrations should
// be executed, and what driver to use
func WithMigrations(migrationFn MigrationFn) DBOption {
return func(d *DBStore) error {
d.enableMigrations = true
d.migrationFn = migrationFn
return nil
}
}
// DefaultOptions returns the default DBoptions to be used.
func DefaultOptions() []DBOption {
return []DBOption{}
}
// Creates a new DB store using the db specified via options.
// It will run migrations if enabled
// clean up records according to the retention policy used
func NewDBStore(log *zap.Logger, options ...DBOption) (*DBStore, error) {
result := new(DBStore)
result.log = log.Named("dbstore")
optList := DefaultOptions()
optList = append(optList, options...)
for _, opt := range optList {
err := opt(result)
if err != nil {
return nil, err
}
}
if result.enableMigrations {
err := result.migrationFn(result.db, log)
if err != nil {
return nil, err
}
}
return result, nil
}
// Start starts the store server functionality
func (d *DBStore) Start(ctx context.Context, timesource timesource.Timesource) error {
ctx, cancel := context.WithCancel(ctx)
d.cancel = cancel
d.timesource = timesource
err := d.cleanOlderRecords(ctx)
if err != nil {
return err
}
d.wg.Add(1)
go d.checkForOlderRecords(ctx, 60*time.Second)
return nil
}
func (d *DBStore) cleanOlderRecords(ctx context.Context) error {
d.log.Info("Cleaning older records...")
// TODO:
d.log.Info("Older records removed")
return nil
}
func (d *DBStore) checkForOlderRecords(ctx context.Context, t time.Duration) {
defer d.wg.Done()
ticker := time.NewTicker(t)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
err := d.cleanOlderRecords(ctx)
if err != nil {
d.log.Error("cleaning older records", zap.Error(err))
}
}
}
}
// Stop closes a DB connection
func (d *DBStore) Stop() {
if d.cancel == nil {
return
}
d.cancel()
d.wg.Wait()
d.db.Close()
}
func (d *DBStore) GetTrx(ctx context.Context) (*sql.Tx, error) {
return d.db.BeginTx(ctx, nil)
}
func (d *DBStore) GetTopicSyncStatus(ctx context.Context, clusterID uint, pubsubTopics []string) (map[string]*time.Time, error) {
result := make(map[string]*time.Time)
for _, topic := range pubsubTopics {
result[topic] = &time.Time{}
}
sqlQuery := `SELECT pubsubTopic, lastSyncTimestamp FROM syncTopicStatus WHERE clusterId = ?`
rows, err := d.db.QueryContext(ctx, sqlQuery, clusterID)
if err != nil {
return nil, err
}
for rows.Next() {
var pubsubTopic string
var lastSyncTimestamp int64
err := rows.Scan(&pubsubTopic, &lastSyncTimestamp)
if err != nil {
return nil, err
}
t := time.Unix(0, lastSyncTimestamp)
result[pubsubTopic] = &t
}
defer rows.Close()
return result, nil
}
func (d *DBStore) UpdateTopicSyncState(tx *sql.Tx, clusterID uint, topic string, lastSyncTimestamp time.Time) error {
stmt, err := tx.Prepare("INSERT INTO syncTopicStatus(clusterId, pubsubTopic, lastSyncTimestamp) VALUES (?, ?, ?) ON CONFLICT(clusterId, pubsubTopic) DO UPDATE SET lastSyncTimestamp = ?")
if err != nil {
return err
}
_, err = stmt.Exec(clusterID, topic, lastSyncTimestamp.UnixNano())
if err != nil {
return err
}
return stmt.Close()
}

View File

@ -0,0 +1,53 @@
package migrate
import (
"database/sql"
"github.com/golang-migrate/migrate/v4"
"github.com/golang-migrate/migrate/v4/database"
bindata "github.com/golang-migrate/migrate/v4/source/go_bindata"
)
// Migrate applies migrations.
func Migrate(db *sql.DB, driver database.Driver, assetNames []string, assetFunc bindata.AssetFunc) error {
return migrateDB(db, bindata.Resource(
assetNames,
assetFunc,
), driver, true)
}
// Migrate applies migrations.
func MigrateDown(db *sql.DB, driver database.Driver, assetNames []string, assetFunc bindata.AssetFunc) error {
return migrateDB(db, bindata.Resource(
assetNames,
assetFunc,
), driver, false)
}
// Migrate database using provided resources.
func migrateDB(db *sql.DB, resources *bindata.AssetSource, driver database.Driver, up bool) error {
source, err := bindata.WithInstance(resources)
if err != nil {
return err
}
m, err := migrate.NewWithInstance(
"go-bindata",
source,
"gowakudb",
driver)
if err != nil {
return err
}
fn := m.Up
if !up {
fn = m.Down
}
if err = fn(); err != migrate.ErrNoChange {
return err
}
return nil
}

View File

@ -0,0 +1,319 @@
// Code generated by go-bindata. DO NOT EDIT.
// sources:
// 1_messages.down.sql (34B)
// 1_messages.up.sql (192B)
// doc.go (74B)
package migrations
import (
"bytes"
"compress/gzip"
"crypto/sha256"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
)
func bindataRead(data []byte, name string) ([]byte, error) {
gz, err := gzip.NewReader(bytes.NewBuffer(data))
if err != nil {
return nil, fmt.Errorf("read %q: %v", name, err)
}
var buf bytes.Buffer
_, err = io.Copy(&buf, gz)
clErr := gz.Close()
if err != nil {
return nil, fmt.Errorf("read %q: %v", name, err)
}
if clErr != nil {
return nil, err
}
return buf.Bytes(), nil
}
type asset struct {
bytes []byte
info os.FileInfo
digest [sha256.Size]byte
}
type bindataFileInfo struct {
name string
size int64
mode os.FileMode
modTime time.Time
}
func (fi bindataFileInfo) Name() string {
return fi.name
}
func (fi bindataFileInfo) Size() int64 {
return fi.size
}
func (fi bindataFileInfo) Mode() os.FileMode {
return fi.mode
}
func (fi bindataFileInfo) ModTime() time.Time {
return fi.modTime
}
func (fi bindataFileInfo) IsDir() bool {
return false
}
func (fi bindataFileInfo) Sys() interface{} {
return nil
}
var __1_messagesDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x09\xf2\x0f\x50\x08\x71\x74\xf2\x71\x55\xf0\x74\x53\x70\x8d\xf0\x0c\x0e\x09\x56\x28\xae\xcc\x4b\x8e\x2f\xc9\x2f\xc8\x4c\x2e\xb6\xe6\x02\x04\x00\x00\xff\xff\xb2\x54\x30\xb5\x22\x00\x00\x00")
func _1_messagesDownSqlBytes() ([]byte, error) {
return bindataRead(
__1_messagesDownSql,
"1_messages.down.sql",
)
}
func _1_messagesDownSql() (*asset, error) {
bytes, err := _1_messagesDownSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "1_messages.down.sql", size: 34, mode: os.FileMode(0664), modTime: time.Unix(1715177338, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x37, 0x66, 0x71, 0x87, 0x7d, 0xb7, 0xb1, 0x90, 0x2, 0x5e, 0x13, 0xb3, 0xb8, 0xa3, 0x93, 0x72, 0xb5, 0x8, 0xf, 0x79, 0xaf, 0xba, 0xfc, 0x38, 0x64, 0x79, 0x48, 0xac, 0x30, 0x59, 0x80, 0x21}}
return a, nil
}
var __1_messagesUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x6c\xcd\x31\xae\x82\x40\x14\x85\xe1\x1a\x56\x71\x4a\x48\xd8\xc1\xab\xe6\xe1\x55\x6e\x44\x30\xc3\x45\xa4\x32\x30\x52\x90\x00\x12\x67\x28\xd8\xbd\xd1\xc2\x68\x62\x7d\xfe\x7c\x27\xd6\xa4\x84\x20\xea\x3f\x25\xf0\x16\x59\x2e\xa0\x33\x17\x52\xc0\xae\x93\xb9\xb8\xdb\xdc\x1b\x8b\xc0\xf7\xcc\xb0\x58\xd7\xdd\xf9\x0a\xce\x84\x76\xa4\x5f\x6d\x56\xa6\x69\xe4\x7b\xf3\xd2\xda\xa5\x95\x67\x8c\x93\xd2\x71\xa2\xbe\xe6\xa1\xb1\xae\x58\x27\x23\xfd\xd8\x59\xd7\x8c\xf3\x2f\xe3\xa8\xf9\xa0\x74\x8d\x3d\xd5\x08\xde\x6f\x11\x3e\xec\xd0\x0f\x51\xb1\x24\x79\x29\xd0\x79\xc5\x9b\xbf\x47\x00\x00\x00\xff\xff\x19\xfe\xc7\x67\xc0\x00\x00\x00")
func _1_messagesUpSqlBytes() ([]byte, error) {
return bindataRead(
__1_messagesUpSql,
"1_messages.up.sql",
)
}
func _1_messagesUpSql() (*asset, error) {
bytes, err := _1_messagesUpSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "1_messages.up.sql", size: 192, mode: os.FileMode(0664), modTime: time.Unix(1715177323, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x7b, 0xd, 0x5e, 0xfd, 0xd, 0x7f, 0x5a, 0xac, 0x26, 0x8f, 0xd2, 0xc5, 0xfe, 0xcc, 0xf6, 0xfd, 0x95, 0x56, 0x40, 0xd4, 0xed, 0xe, 0x27, 0x33, 0x7d, 0xc5, 0x66, 0x86, 0x9c, 0xff, 0x2b, 0x47}}
return a, nil
}
var _docGo = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x2c\xc9\xb1\x0d\xc4\x20\x0c\x05\xd0\x9e\x29\xfe\x02\xd8\xfd\x6d\xe3\x4b\xac\x2f\x44\x82\x09\x78\x7f\xa5\x49\xfd\xa6\x1d\xdd\xe8\xd8\xcf\x55\x8a\x2a\xe3\x47\x1f\xbe\x2c\x1d\x8c\xfa\x6f\xe3\xb4\x34\xd4\xd9\x89\xbb\x71\x59\xb6\x18\x1b\x35\x20\xa2\x9f\x0a\x03\xa2\xe5\x0d\x00\x00\xff\xff\x60\xcd\x06\xbe\x4a\x00\x00\x00")
func docGoBytes() ([]byte, error) {
return bindataRead(
_docGo,
"doc.go",
)
}
func docGo() (*asset, error) {
bytes, err := docGoBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "doc.go", size: 74, mode: os.FileMode(0664), modTime: time.Unix(1715177003, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xde, 0x7c, 0x28, 0xcd, 0x47, 0xf2, 0xfa, 0x7c, 0x51, 0x2d, 0xd8, 0x38, 0xb, 0xb0, 0x34, 0x9d, 0x4c, 0x62, 0xa, 0x9e, 0x28, 0xc3, 0x31, 0x23, 0xd9, 0xbb, 0x89, 0x9f, 0xa0, 0x89, 0x1f, 0xe8}}
return a, nil
}
// Asset loads and returns the asset for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func Asset(name string) ([]byte, error) {
canonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[canonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
}
return a.bytes, nil
}
return nil, fmt.Errorf("Asset %s not found", name)
}
// AssetString returns the asset contents as a string (instead of a []byte).
func AssetString(name string) (string, error) {
data, err := Asset(name)
return string(data), err
}
// MustAsset is like Asset but panics when Asset would return an error.
// It simplifies safe initialization of global variables.
func MustAsset(name string) []byte {
a, err := Asset(name)
if err != nil {
panic("asset: Asset(" + name + "): " + err.Error())
}
return a
}
// MustAssetString is like AssetString but panics when Asset would return an
// error. It simplifies safe initialization of global variables.
func MustAssetString(name string) string {
return string(MustAsset(name))
}
// AssetInfo loads and returns the asset info for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func AssetInfo(name string) (os.FileInfo, error) {
canonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[canonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
}
return a.info, nil
}
return nil, fmt.Errorf("AssetInfo %s not found", name)
}
// AssetDigest returns the digest of the file with the given name. It returns an
// error if the asset could not be found or the digest could not be loaded.
func AssetDigest(name string) ([sha256.Size]byte, error) {
canonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[canonicalName]; ok {
a, err := f()
if err != nil {
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err)
}
return a.digest, nil
}
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name)
}
// Digests returns a map of all known files and their checksums.
func Digests() (map[string][sha256.Size]byte, error) {
mp := make(map[string][sha256.Size]byte, len(_bindata))
for name := range _bindata {
a, err := _bindata[name]()
if err != nil {
return nil, err
}
mp[name] = a.digest
}
return mp, nil
}
// AssetNames returns the names of the assets.
func AssetNames() []string {
names := make([]string, 0, len(_bindata))
for name := range _bindata {
names = append(names, name)
}
return names
}
// _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string]func() (*asset, error){
"1_messages.down.sql": _1_messagesDownSql,
"1_messages.up.sql": _1_messagesUpSql,
"doc.go": docGo,
}
// AssetDir returns the file names below a certain
// directory embedded in the file by go-bindata.
// For example if you run go-bindata on data/... and data contains the
// following hierarchy:
// data/
// foo.txt
// img/
// a.png
// b.png
// then AssetDir("data") would return []string{"foo.txt", "img"},
// AssetDir("data/img") would return []string{"a.png", "b.png"},
// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and
// AssetDir("") will return []string{"data"}.
func AssetDir(name string) ([]string, error) {
node := _bintree
if len(name) != 0 {
canonicalName := strings.Replace(name, "\\", "/", -1)
pathList := strings.Split(canonicalName, "/")
for _, p := range pathList {
node = node.Children[p]
if node == nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
}
}
if node.Func != nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
rv := make([]string, 0, len(node.Children))
for childName := range node.Children {
rv = append(rv, childName)
}
return rv, nil
}
type bintree struct {
Func func() (*asset, error)
Children map[string]*bintree
}
var _bintree = &bintree{nil, map[string]*bintree{
"1_messages.down.sql": &bintree{_1_messagesDownSql, map[string]*bintree{}},
"1_messages.up.sql": &bintree{_1_messagesUpSql, map[string]*bintree{}},
"doc.go": &bintree{docGo, map[string]*bintree{}},
}}
// RestoreAsset restores an asset under the given directory.
func RestoreAsset(dir, name string) error {
data, err := Asset(name)
if err != nil {
return err
}
info, err := AssetInfo(name)
if err != nil {
return err
}
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
if err != nil {
return err
}
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
if err != nil {
return err
}
return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
}
// RestoreAssets restores an asset under the given directory recursively.
func RestoreAssets(dir, name string) error {
children, err := AssetDir(name)
// File
if err != nil {
return RestoreAsset(dir, name)
}
// Dir
for _, child := range children {
err = RestoreAssets(dir, filepath.Join(name, child))
if err != nil {
return err
}
}
return nil
}
func _filePath(dir, name string) string {
canonicalName := strings.Replace(name, "\\", "/", -1)
return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...)
}

View File

@ -0,0 +1 @@
DROP TABLE IF EXISTS sync_topics;

View File

@ -0,0 +1,7 @@
CREATE TABLE IF NOT EXISTS syncTopicStatus (
clusterId INTEGER NOT NULL,
pubsubTopic VARCHAR NOT NULL,
lastSyncTimestamp INTEGER NOT NULL,
PRIMARY KEY (clusterId, pubsubTopic)
) WITHOUT ROWID;

View File

@ -0,0 +1,3 @@
package sql
//go:generate go-bindata -pkg migrations -o ../bindata.go ./

View File

@ -0,0 +1,58 @@
package sqlite
import (
"database/sql"
"strings"
"github.com/golang-migrate/migrate/v4/database"
"github.com/golang-migrate/migrate/v4/database/sqlite3"
_ "github.com/mattn/go-sqlite3" // Blank import to register the sqlite3 driver
"github.com/waku-org/storenode-messages/internal/persistence/migrate"
"github.com/waku-org/storenode-messages/internal/persistence/sqlite/migrations"
"go.uber.org/zap"
)
func addSqliteURLDefaults(dburl string) string {
if !strings.Contains(dburl, "?") {
dburl += "?"
}
if !strings.Contains(dburl, "_journal=") {
dburl += "&_journal=WAL"
}
if !strings.Contains(dburl, "_timeout=") {
dburl += "&_timeout=5000"
}
return dburl
}
// NewDB creates a sqlite3 DB in the specified path
func NewDB(dburl string, logger *zap.Logger) (*sql.DB, error) {
db, err := sql.Open("sqlite3", addSqliteURLDefaults(dburl))
if err != nil {
return nil, err
}
// Disable concurrent access as not supported by the driver
db.SetMaxOpenConns(1)
return db, nil
}
func migrationDriver(db *sql.DB) (database.Driver, error) {
return sqlite3.WithInstance(db, &sqlite3.Config{
MigrationsTable: sqlite3.DefaultMigrationsTable,
})
}
// Migrations is the function used for DB migration with sqlite driver
func Migrations(db *sql.DB, logger *zap.Logger) error {
migrationDriver, err := migrationDriver(db)
if err != nil {
return err
}
return migrate.Migrate(db, migrationDriver, migrations.AssetNames(), migrations.Asset)
}

View File

@ -0,0 +1,58 @@
package persistence
import (
"database/sql"
"errors"
"regexp"
"strings"
"github.com/waku-org/storenode-messages/internal/persistence/sqlite"
"go.uber.org/zap"
)
func validateDBUrl(val string) error {
matched, err := regexp.Match(`^[\w\+]+:\/\/[\w\/\\\.\:\@]+\?{0,1}.*$`, []byte(val))
if !matched || err != nil {
return errors.New("invalid db url option format")
}
return nil
}
// ParseURL will return a database connection, and migration function that should be used depending on a database connection string
func ParseURL(databaseURL string, logger *zap.Logger) (*sql.DB, func(*sql.DB, *zap.Logger) error, error) {
var db *sql.DB
var migrationFn func(*sql.DB, *zap.Logger) error
var err error
logger = logger.Named("db-setup")
dbURL := ""
if databaseURL != "" {
err := validateDBUrl(databaseURL)
if err != nil {
return nil, nil, err
}
dbURL = databaseURL
} else {
// In memoryDB
dbURL = "sqlite3://:memory:"
}
dbURLParts := strings.Split(dbURL, "://")
dbEngine := dbURLParts[0]
dbParams := dbURLParts[1]
switch dbEngine {
case "sqlite3":
db, err = sqlite.NewDB(dbParams, logger)
migrationFn = sqlite.Migrations
default:
err = errors.New("unsupported database engine")
}
if err != nil {
return nil, nil, err
}
return db, migrationFn, nil
}