refactor_: remove mailservers code
This commit is contained in:
parent
05e3a35bf7
commit
04833f559f
|
@ -124,51 +124,50 @@ func randomNodeConfig() *params.NodeConfig {
|
|||
privK, _ := crypto.GenerateKey()
|
||||
|
||||
return ¶ms.NodeConfig{
|
||||
NetworkID: uint64(int64(randomInt(math.MaxInt64))),
|
||||
DataDir: randomString(),
|
||||
KeyStoreDir: randomString(),
|
||||
NodeKey: randomString(),
|
||||
NoDiscovery: randomBool(),
|
||||
Rendezvous: randomBool(),
|
||||
ListenAddr: randomString(),
|
||||
AdvertiseAddr: randomString(),
|
||||
Name: randomString(),
|
||||
Version: randomString(),
|
||||
APIModules: randomString(),
|
||||
TLSEnabled: randomBool(),
|
||||
MaxPeers: randomInt(math.MaxInt64),
|
||||
MaxPendingPeers: randomInt(math.MaxInt64),
|
||||
EnableStatusService: randomBool(),
|
||||
BridgeConfig: params.BridgeConfig{Enabled: randomBool()},
|
||||
WalletConfig: params.WalletConfig{Enabled: randomBool()},
|
||||
LocalNotificationsConfig: params.LocalNotificationsConfig{Enabled: randomBool()},
|
||||
BrowsersConfig: params.BrowsersConfig{Enabled: randomBool()},
|
||||
PermissionsConfig: params.PermissionsConfig{Enabled: randomBool()},
|
||||
MailserversConfig: params.MailserversConfig{Enabled: randomBool()},
|
||||
Web3ProviderConfig: params.Web3ProviderConfig{Enabled: randomBool()},
|
||||
ConnectorConfig: params.ConnectorConfig{Enabled: randomBool()},
|
||||
SwarmConfig: params.SwarmConfig{Enabled: randomBool()},
|
||||
MailServerRegistryAddress: randomString(),
|
||||
HTTPEnabled: randomBool(),
|
||||
HTTPHost: randomString(),
|
||||
HTTPPort: randomInt(math.MaxInt64),
|
||||
HTTPVirtualHosts: randomStringSlice(),
|
||||
HTTPCors: randomStringSlice(),
|
||||
WSEnabled: false, // NOTE: leaving ws field idle since we are moving away from the storing the whole config
|
||||
WSHost: "",
|
||||
WSPort: 0,
|
||||
IPCEnabled: randomBool(),
|
||||
IPCFile: randomString(),
|
||||
LogEnabled: randomBool(),
|
||||
LogMobileSystem: randomBool(),
|
||||
LogDir: randomString(),
|
||||
LogFile: randomString(),
|
||||
LogLevel: randomString(),
|
||||
LogMaxBackups: randomInt(math.MaxInt64),
|
||||
LogMaxSize: randomInt(math.MaxInt64),
|
||||
LogCompressRotated: randomBool(),
|
||||
LogToStderr: randomBool(),
|
||||
UpstreamConfig: params.UpstreamRPCConfig{Enabled: randomBool(), URL: randomString()},
|
||||
NetworkID: uint64(int64(randomInt(math.MaxInt64))),
|
||||
DataDir: randomString(),
|
||||
KeyStoreDir: randomString(),
|
||||
NodeKey: randomString(),
|
||||
NoDiscovery: randomBool(),
|
||||
Rendezvous: randomBool(),
|
||||
ListenAddr: randomString(),
|
||||
AdvertiseAddr: randomString(),
|
||||
Name: randomString(),
|
||||
Version: randomString(),
|
||||
APIModules: randomString(),
|
||||
TLSEnabled: randomBool(),
|
||||
MaxPeers: randomInt(math.MaxInt64),
|
||||
MaxPendingPeers: randomInt(math.MaxInt64),
|
||||
EnableStatusService: randomBool(),
|
||||
BridgeConfig: params.BridgeConfig{Enabled: randomBool()},
|
||||
WalletConfig: params.WalletConfig{Enabled: randomBool()},
|
||||
LocalNotificationsConfig: params.LocalNotificationsConfig{Enabled: randomBool()},
|
||||
BrowsersConfig: params.BrowsersConfig{Enabled: randomBool()},
|
||||
PermissionsConfig: params.PermissionsConfig{Enabled: randomBool()},
|
||||
MailserversConfig: params.MailserversConfig{Enabled: randomBool()},
|
||||
Web3ProviderConfig: params.Web3ProviderConfig{Enabled: randomBool()},
|
||||
ConnectorConfig: params.ConnectorConfig{Enabled: randomBool()},
|
||||
SwarmConfig: params.SwarmConfig{Enabled: randomBool()},
|
||||
HTTPEnabled: randomBool(),
|
||||
HTTPHost: randomString(),
|
||||
HTTPPort: randomInt(math.MaxInt64),
|
||||
HTTPVirtualHosts: randomStringSlice(),
|
||||
HTTPCors: randomStringSlice(),
|
||||
WSEnabled: false, // NOTE: leaving ws field idle since we are moving away from the storing the whole config
|
||||
WSHost: "",
|
||||
WSPort: 0,
|
||||
IPCEnabled: randomBool(),
|
||||
IPCFile: randomString(),
|
||||
LogEnabled: randomBool(),
|
||||
LogMobileSystem: randomBool(),
|
||||
LogDir: randomString(),
|
||||
LogFile: randomString(),
|
||||
LogLevel: randomString(),
|
||||
LogMaxBackups: randomInt(math.MaxInt64),
|
||||
LogMaxSize: randomInt(math.MaxInt64),
|
||||
LogCompressRotated: randomBool(),
|
||||
LogToStderr: randomBool(),
|
||||
UpstreamConfig: params.UpstreamRPCConfig{Enabled: randomBool(), URL: randomString()},
|
||||
ClusterConfig: params.ClusterConfig{
|
||||
Enabled: randomBool(),
|
||||
Fleet: randomString(),
|
||||
|
@ -227,23 +226,13 @@ func randomNodeConfig() *params.NodeConfig {
|
|||
AutoUpdate: randomBool(),
|
||||
},
|
||||
WakuConfig: params.WakuConfig{
|
||||
Enabled: randomBool(),
|
||||
LightClient: randomBool(),
|
||||
FullNode: randomBool(),
|
||||
EnableMailServer: randomBool(),
|
||||
DataDir: randomString(),
|
||||
MinimumPoW: randomFloat(math.MaxInt64),
|
||||
MailServerPassword: randomString(),
|
||||
MailServerRateLimit: randomInt(math.MaxInt64),
|
||||
MailServerDataRetention: randomInt(math.MaxInt64),
|
||||
TTL: randomInt(math.MaxInt64),
|
||||
MaxMessageSize: uint32(randomInt(math.MaxInt64)),
|
||||
DatabaseConfig: params.DatabaseConfig{
|
||||
PGConfig: params.PGConfig{
|
||||
Enabled: randomBool(),
|
||||
URI: randomString(),
|
||||
},
|
||||
},
|
||||
Enabled: randomBool(),
|
||||
LightClient: randomBool(),
|
||||
FullNode: randomBool(),
|
||||
DataDir: randomString(),
|
||||
MinimumPoW: randomFloat(math.MaxInt64),
|
||||
TTL: randomInt(math.MaxInt64),
|
||||
MaxMessageSize: uint32(randomInt(math.MaxInt64)),
|
||||
EnableRateLimiter: randomBool(),
|
||||
PacketRateLimitIP: int64(randomInt(math.MaxInt64)),
|
||||
PacketRateLimitPeerID: int64(randomInt(math.MaxInt64)),
|
||||
|
|
|
@ -54,10 +54,9 @@ var (
|
|||
torrentClientPort = flag.Int("torrent-client-port", 9025, "Port for BitTorrent protocol connections")
|
||||
version = flag.Bool("version", false, "Print version and dump configuration")
|
||||
|
||||
dataDir = flag.String("dir", getDefaultDataDir(), "Directory used by node to store data")
|
||||
register = flag.Bool("register", false, "Register and make the node discoverable by other nodes")
|
||||
mailserver = flag.Bool("mailserver", false, "Enable Mail Server with default configuration")
|
||||
networkID = flag.Int(
|
||||
dataDir = flag.String("dir", getDefaultDataDir(), "Directory used by node to store data")
|
||||
register = flag.Bool("register", false, "Register and make the node discoverable by other nodes")
|
||||
networkID = flag.Int(
|
||||
"network-id",
|
||||
params.GoerliNetworkID,
|
||||
fmt.Sprintf(
|
||||
|
@ -118,9 +117,6 @@ func main() {
|
|||
}
|
||||
|
||||
opts := []params.Option{params.WithFleet(*fleet)}
|
||||
if *mailserver {
|
||||
opts = append(opts, params.WithMailserver())
|
||||
}
|
||||
|
||||
config, err := params.NewNodeConfigWithDefaultsAndFiles(
|
||||
*dataDir,
|
||||
|
@ -140,9 +136,7 @@ func main() {
|
|||
config.ListenAddr = *listenAddr
|
||||
}
|
||||
|
||||
if *register && *mailserver {
|
||||
config.RegisterTopics = append(config.RegisterTopics, params.MailServerDiscv5Topic)
|
||||
} else if *register {
|
||||
if *register {
|
||||
config.RegisterTopics = append(config.RegisterTopics, params.WhisperDiscv5Topic)
|
||||
}
|
||||
|
||||
|
@ -251,7 +245,6 @@ func main() {
|
|||
identity,
|
||||
gethbridge.NewNodeBridge(backend.StatusNode().GethNode(), backend.StatusNode().WakuService(), backend.StatusNode().WakuV2Service()),
|
||||
installationID.String(),
|
||||
nil,
|
||||
config.Version,
|
||||
options...,
|
||||
)
|
||||
|
@ -324,7 +317,6 @@ func main() {
|
|||
identity,
|
||||
gethbridge.NewNodeBridge(backend.StatusNode().GethNode(), backend.StatusNode().WakuService(), backend.StatusNode().WakuV2Service()),
|
||||
installationID.String(),
|
||||
nil,
|
||||
config.Version,
|
||||
options...,
|
||||
)
|
||||
|
|
|
@ -1,8 +0,0 @@
|
|||
{
|
||||
"WhisperConfig": {
|
||||
"Enabled": true,
|
||||
"EnableNTPSync": true,
|
||||
"EnableMailServer": true,
|
||||
"MailServerPassword": "status-offline-inbox"
|
||||
}
|
||||
}
|
|
@ -2,7 +2,6 @@ package gethbridge
|
|||
|
||||
import (
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
"github.com/status-im/status-go/waku"
|
||||
|
||||
wakucommon "github.com/status-im/status-go/waku/common"
|
||||
wakuv2common "github.com/status-im/status-go/wakuv2/common"
|
||||
|
@ -21,8 +20,6 @@ func NewWakuEnvelopeEventWrapper(envelopeEvent *wakucommon.EnvelopeEvent) *types
|
|||
for index := range data {
|
||||
wrappedData[index] = *NewWakuEnvelopeErrorWrapper(&data[index])
|
||||
}
|
||||
case *waku.MailServerResponse:
|
||||
wrappedData = NewWakuMailServerResponseWrapper(data)
|
||||
}
|
||||
return &types.EnvelopeEvent{
|
||||
Event: types.EventType(envelopeEvent.Event),
|
||||
|
|
|
@ -1,19 +0,0 @@
|
|||
package gethbridge
|
||||
|
||||
import (
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
"github.com/status-im/status-go/waku"
|
||||
)
|
||||
|
||||
// NewWakuMailServerResponseWrapper returns a types.MailServerResponse object that mimics Geth's MailServerResponse
|
||||
func NewWakuMailServerResponseWrapper(mailServerResponse *waku.MailServerResponse) *types.MailServerResponse {
|
||||
if mailServerResponse == nil {
|
||||
panic("mailServerResponse should not be nil")
|
||||
}
|
||||
|
||||
return &types.MailServerResponse{
|
||||
LastEnvelopeHash: types.Hash(mailServerResponse.LastEnvelopeHash),
|
||||
Cursor: mailServerResponse.Cursor,
|
||||
Error: mailServerResponse.Error,
|
||||
}
|
||||
}
|
|
@ -53,32 +53,3 @@ func (r *MessagesRequest) SetDefaults(now time.Time) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
// MailServerResponse is the response payload sent by the mailserver.
|
||||
type MailServerResponse struct {
|
||||
LastEnvelopeHash Hash
|
||||
Cursor []byte
|
||||
Error error
|
||||
}
|
||||
|
||||
// SyncMailRequest contains details which envelopes should be synced
|
||||
// between Mail Servers.
|
||||
type SyncMailRequest struct {
|
||||
// Lower is a lower bound of time range for which messages are requested.
|
||||
Lower uint32
|
||||
// Upper is a lower bound of time range for which messages are requested.
|
||||
Upper uint32
|
||||
// Bloom is a bloom filter to filter envelopes.
|
||||
Bloom []byte
|
||||
// Limit is the max number of envelopes to return.
|
||||
Limit uint32
|
||||
// Cursor is used for pagination of the results.
|
||||
Cursor []byte
|
||||
}
|
||||
|
||||
// SyncEventResponse is a response from the Mail Server
|
||||
// form which the peer received envelopes.
|
||||
type SyncEventResponse struct {
|
||||
Cursor []byte
|
||||
Error string
|
||||
}
|
||||
|
|
|
@ -43,7 +43,4 @@ type Whisper interface {
|
|||
GetFilter(id string) Filter
|
||||
Unsubscribe(id string) error
|
||||
UnsubscribeMany(ids []string) error
|
||||
|
||||
// SyncMessages can be sent between two Mail Servers and syncs envelopes between them.
|
||||
SyncMessages(peerID []byte, req SyncMailRequest) error
|
||||
}
|
||||
|
|
|
@ -1,43 +0,0 @@
|
|||
MailServer
|
||||
==========
|
||||
|
||||
This document is meant to collect various information about our MailServer implementation.
|
||||
|
||||
## Syncing between mail servers
|
||||
|
||||
It might happen that one mail server is behind other due to various reasons like a machine being down for a few minutes etc.
|
||||
|
||||
There is an option to fix such a mail server:
|
||||
1. SSH to a machine where this broken mail server runs,
|
||||
2. Add a mail server from which you want to sync:
|
||||
```
|
||||
# sudo might be not needed in your setup
|
||||
$ echo '{"jsonrpc":"2.0","method":"admin_addPeer", "params": ["enode://c42f368a23fa98ee546fd247220759062323249ef657d26d357a777443aec04db1b29a3a22ef3e7c548e18493ddaf51a31b0aed6079bd6ebe5ae838fcfaf3a49@206.189.243.162:30504"], "id":1}' | \
|
||||
sudo socat -d -d - UNIX-CONNECT:/docker/statusd-mail/data/geth.ipc
|
||||
```
|
||||
3. Mark it as a trusted peer:
|
||||
```
|
||||
# sudo might be not needed in your setup
|
||||
$ echo '{"jsonrpc":"2.0","method":"shh_markTrustedPeer", "params": ["enode://c42f368a23fa98ee546fd247220759062323249ef657d26d357a777443aec04db1b29a3a22ef3e7c548e18493ddaf51a31b0aed6079bd6ebe5ae838fcfaf3a49@206.189.243.162:30504"], "id":1}' | \
|
||||
sudo socat -d -d - UNIX-CONNECT:/docker/statusd-mail/data/geth.ipc
|
||||
```
|
||||
4. Finally, trigger the sync command:
|
||||
```
|
||||
# sudo might be not needed in your setup
|
||||
$ echo '{"jsonrpc":"2.0","method":"shhext_syncMessages","params":[{"mailServerPeer":"enode://c42f368a23fa98ee546fd247220759062323249ef657d26d357a777443aec04db1b29a3a22ef3e7c548e18493ddaf51a31b0aed6079bd6ebe5ae838fcfaf3a49@206.189.243.162:30504", "to": 1550479953, "from": 1550393583, "limit": 1000}],"id":1}' | \
|
||||
sudo socat -d -d - UNIX-CONNECT:/docker/statusd-mail/data/geth.ipc
|
||||
```
|
||||
|
||||
You can add `"followCursor": true` if you want it to automatically download messages until the cursor is empty meaning all data was synced.
|
||||
|
||||
### Debugging
|
||||
|
||||
To verify that your mail server received any responses, watch logs and seek for logs like this:
|
||||
```
|
||||
INFO [02-18|09:08:54.257] received sync response count=217 final=false err= cursor=[]
|
||||
```
|
||||
|
||||
And it should finish with:
|
||||
```
|
||||
INFO [02-18|09:08:54.431] received sync response count=0 final=true err= cursor=[]
|
||||
```
|
|
@ -1,85 +0,0 @@
|
|||
package mailserver
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
const (
|
||||
dbCleanerBatchSize = 1000
|
||||
dbCleanerPeriod = time.Hour
|
||||
)
|
||||
|
||||
// dbCleaner removes old messages from a db.
|
||||
type dbCleaner struct {
|
||||
sync.RWMutex
|
||||
|
||||
db DB
|
||||
batchSize int
|
||||
retention time.Duration
|
||||
|
||||
period time.Duration
|
||||
cancel chan struct{}
|
||||
}
|
||||
|
||||
// newDBCleaner returns a new cleaner for db.
|
||||
func newDBCleaner(db DB, retention time.Duration) *dbCleaner {
|
||||
return &dbCleaner{
|
||||
db: db,
|
||||
retention: retention,
|
||||
|
||||
batchSize: dbCleanerBatchSize,
|
||||
period: dbCleanerPeriod,
|
||||
}
|
||||
}
|
||||
|
||||
// Start starts a loop that cleans up old messages.
|
||||
func (c *dbCleaner) Start() {
|
||||
log.Info("Starting cleaning envelopes", "period", c.period, "retention", c.retention)
|
||||
|
||||
cancel := make(chan struct{})
|
||||
|
||||
c.Lock()
|
||||
c.cancel = cancel
|
||||
c.Unlock()
|
||||
|
||||
go c.schedule(c.period, cancel)
|
||||
}
|
||||
|
||||
// Stops stops the cleaning loop.
|
||||
func (c *dbCleaner) Stop() {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
if c.cancel == nil {
|
||||
return
|
||||
}
|
||||
close(c.cancel)
|
||||
c.cancel = nil
|
||||
}
|
||||
|
||||
func (c *dbCleaner) schedule(period time.Duration, cancel <-chan struct{}) {
|
||||
t := time.NewTicker(period)
|
||||
defer t.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-t.C:
|
||||
count, err := c.PruneEntriesOlderThan(time.Now().Add(-c.retention))
|
||||
if err != nil {
|
||||
log.Error("failed to prune data", "err", err)
|
||||
}
|
||||
log.Info("Prunned some some messages successfully", "count", count)
|
||||
case <-cancel:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PruneEntriesOlderThan removes messages sent between lower and upper timestamps
|
||||
// and returns how many have been removed.
|
||||
func (c *dbCleaner) PruneEntriesOlderThan(t time.Time) (int, error) {
|
||||
return c.db.Prune(t, c.batchSize)
|
||||
}
|
|
@ -1,160 +0,0 @@
|
|||
package mailserver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
waku "github.com/status-im/status-go/waku/common"
|
||||
)
|
||||
|
||||
func TestCleaner(t *testing.T) {
|
||||
now := time.Now()
|
||||
server := setupTestServer(t)
|
||||
defer server.Close()
|
||||
cleaner := newDBCleaner(server.ms.db, time.Hour)
|
||||
|
||||
archiveEnvelope(t, now.Add(-10*time.Second), server)
|
||||
archiveEnvelope(t, now.Add(-3*time.Second), server)
|
||||
archiveEnvelope(t, now.Add(-1*time.Second), server)
|
||||
|
||||
testMessagesCount(t, 3, server)
|
||||
|
||||
testPrune(t, now.Add(-5*time.Second), 1, cleaner)
|
||||
testPrune(t, now.Add(-2*time.Second), 1, cleaner)
|
||||
testPrune(t, now, 1, cleaner)
|
||||
|
||||
testMessagesCount(t, 0, server)
|
||||
}
|
||||
|
||||
func TestCleanerSchedule(t *testing.T) {
|
||||
now := time.Now()
|
||||
server := setupTestServer(t)
|
||||
defer server.Close()
|
||||
|
||||
cleaner := newDBCleaner(server.ms.db, time.Hour)
|
||||
cleaner.period = time.Millisecond * 10
|
||||
cleaner.Start()
|
||||
defer cleaner.Stop()
|
||||
|
||||
archiveEnvelope(t, now.Add(-3*time.Hour), server)
|
||||
archiveEnvelope(t, now.Add(-2*time.Hour), server)
|
||||
archiveEnvelope(t, now.Add(-1*time.Minute), server)
|
||||
|
||||
time.Sleep(time.Millisecond * 50)
|
||||
|
||||
testMessagesCount(t, 1, server)
|
||||
}
|
||||
|
||||
func benchmarkCleanerPrune(b *testing.B, messages int, batchSize int) {
|
||||
t := &testing.T{}
|
||||
now := time.Now()
|
||||
sentTime := now.Add(-10 * time.Second)
|
||||
server := setupTestServer(t)
|
||||
defer server.Close()
|
||||
|
||||
cleaner := newDBCleaner(server.ms.db, time.Hour)
|
||||
cleaner.batchSize = batchSize
|
||||
|
||||
for i := 0; i < messages; i++ {
|
||||
archiveEnvelope(t, sentTime, server)
|
||||
}
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
testPrune(t, now, 0, cleaner)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCleanerPruneM100_000_B100_000(b *testing.B) {
|
||||
benchmarkCleanerPrune(b, 100000, 100000)
|
||||
}
|
||||
|
||||
func BenchmarkCleanerPruneM100_000_B10_000(b *testing.B) {
|
||||
benchmarkCleanerPrune(b, 100000, 10000)
|
||||
}
|
||||
|
||||
func BenchmarkCleanerPruneM100_000_B1000(b *testing.B) {
|
||||
benchmarkCleanerPrune(b, 100000, 1000)
|
||||
}
|
||||
|
||||
func BenchmarkCleanerPruneM100_000_B100(b *testing.B) {
|
||||
benchmarkCleanerPrune(b, 100000, 100)
|
||||
}
|
||||
|
||||
func setupTestServer(t *testing.T) *WakuMailServer {
|
||||
var s WakuMailServer
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
|
||||
s.ms = &mailServer{
|
||||
db: &LevelDB{
|
||||
ldb: db,
|
||||
done: make(chan struct{}),
|
||||
},
|
||||
adapter: &wakuAdapter{},
|
||||
}
|
||||
s.minRequestPoW = powRequirement
|
||||
return &s
|
||||
}
|
||||
|
||||
func archiveEnvelope(t *testing.T, sentTime time.Time, server *WakuMailServer) *waku.Envelope {
|
||||
env, err := generateEnvelope(sentTime)
|
||||
require.NoError(t, err)
|
||||
server.Archive(env)
|
||||
|
||||
return env
|
||||
}
|
||||
|
||||
func testPrune(t *testing.T, u time.Time, expected int, c *dbCleaner) {
|
||||
n, err := c.PruneEntriesOlderThan(u)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, n)
|
||||
}
|
||||
|
||||
func testMessagesCount(t *testing.T, expected int, s *WakuMailServer) {
|
||||
count := countMessages(t, s.ms.db)
|
||||
require.Equal(t, expected, count, fmt.Sprintf("expected %d message, got: %d", expected, count))
|
||||
}
|
||||
|
||||
func countMessages(t *testing.T, db DB) int {
|
||||
var (
|
||||
count int
|
||||
zero types.Hash
|
||||
emptyTopic types.TopicType
|
||||
)
|
||||
|
||||
now := time.Now()
|
||||
kl := NewDBKey(uint32(0), emptyTopic, zero)
|
||||
ku := NewDBKey(uint32(now.Unix()), emptyTopic, zero)
|
||||
|
||||
query := CursorQuery{
|
||||
start: kl.raw,
|
||||
end: ku.raw,
|
||||
}
|
||||
|
||||
i, _ := db.BuildIterator(query)
|
||||
defer func() { _ = i.Release() }()
|
||||
|
||||
for i.Next() {
|
||||
var env waku.Envelope
|
||||
value, err := i.GetEnvelopeByBloomFilter(query.bloom)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = rlp.DecodeBytes(value, &env)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
count++
|
||||
}
|
||||
|
||||
return count
|
||||
}
|
|
@ -1,53 +0,0 @@
|
|||
package mailserver
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
)
|
||||
|
||||
const (
|
||||
// DBKeyLength is a size of the envelope key.
|
||||
DBKeyLength = types.HashLength + timestampLength + types.TopicLength
|
||||
CursorLength = types.HashLength + timestampLength
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrInvalidByteSize is returned when DBKey can't be created
|
||||
// from a byte slice because it has invalid length.
|
||||
ErrInvalidByteSize = errors.New("byte slice has invalid length")
|
||||
)
|
||||
|
||||
// DBKey key to be stored in a db.
|
||||
type DBKey struct {
|
||||
raw []byte
|
||||
}
|
||||
|
||||
// Bytes returns a bytes representation of the DBKey.
|
||||
func (k *DBKey) Bytes() []byte {
|
||||
return k.raw
|
||||
}
|
||||
|
||||
func (k *DBKey) Topic() types.TopicType {
|
||||
return types.BytesToTopic(k.raw[timestampLength+types.HashLength:])
|
||||
}
|
||||
|
||||
func (k *DBKey) EnvelopeHash() types.Hash {
|
||||
return types.BytesToHash(k.raw[timestampLength : types.HashLength+timestampLength])
|
||||
}
|
||||
|
||||
func (k *DBKey) Cursor() []byte {
|
||||
// We don't use the whole cursor for backward compatibility (also it's not needed)
|
||||
return k.raw[:CursorLength]
|
||||
}
|
||||
|
||||
// NewDBKey creates a new DBKey with the given values.
|
||||
func NewDBKey(timestamp uint32, topic types.TopicType, h types.Hash) *DBKey {
|
||||
var k DBKey
|
||||
k.raw = make([]byte, DBKeyLength)
|
||||
binary.BigEndian.PutUint32(k.raw, timestamp)
|
||||
copy(k.raw[timestampLength:], h[:])
|
||||
copy(k.raw[timestampLength+types.HashLength:], topic[:])
|
||||
return &k
|
||||
}
|
|
@ -1,20 +0,0 @@
|
|||
package mailserver
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
)
|
||||
|
||||
func TestNewDBKey(t *testing.T) {
|
||||
topic := types.BytesToTopic([]byte{0x01, 0x02, 0x03, 0x04})
|
||||
|
||||
hash := types.BytesToHash([]byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x30, 0x31, 0x32})
|
||||
dbKey := NewDBKey(0xabcdef12, topic, hash)
|
||||
expected := []byte{0xab, 0xcd, 0xef, 0x12, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x30, 0x31, 0x32, 0x01, 0x02, 0x03, 0x04}
|
||||
require.Equal(t, expected, dbKey.Bytes())
|
||||
require.Equal(t, topic, dbKey.Topic())
|
||||
require.Equal(t, hash, dbKey.EnvelopeHash())
|
||||
}
|
|
@ -1,88 +0,0 @@
|
|||
package mailserver
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type rateLimiter struct {
|
||||
sync.RWMutex
|
||||
|
||||
lifespan time.Duration // duration of the limit
|
||||
db map[string]time.Time
|
||||
|
||||
period time.Duration
|
||||
cancel chan struct{}
|
||||
}
|
||||
|
||||
func newRateLimiter(duration time.Duration) *rateLimiter {
|
||||
return &rateLimiter{
|
||||
lifespan: duration,
|
||||
db: make(map[string]time.Time),
|
||||
period: time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
func (l *rateLimiter) Start() {
|
||||
cancel := make(chan struct{})
|
||||
|
||||
l.Lock()
|
||||
l.cancel = cancel
|
||||
l.Unlock()
|
||||
|
||||
go l.cleanUp(l.period, cancel)
|
||||
}
|
||||
|
||||
func (l *rateLimiter) Stop() {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
|
||||
if l.cancel == nil {
|
||||
return
|
||||
}
|
||||
close(l.cancel)
|
||||
l.cancel = nil
|
||||
}
|
||||
|
||||
func (l *rateLimiter) Add(id string) {
|
||||
l.Lock()
|
||||
l.db[id] = time.Now()
|
||||
l.Unlock()
|
||||
}
|
||||
|
||||
func (l *rateLimiter) IsAllowed(id string) bool {
|
||||
l.RLock()
|
||||
defer l.RUnlock()
|
||||
|
||||
if lastRequestTime, ok := l.db[id]; ok {
|
||||
return lastRequestTime.Add(l.lifespan).Before(time.Now())
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (l *rateLimiter) cleanUp(period time.Duration, cancel <-chan struct{}) {
|
||||
t := time.NewTicker(period)
|
||||
defer t.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-t.C:
|
||||
l.deleteExpired()
|
||||
case <-cancel:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *rateLimiter) deleteExpired() {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
|
||||
now := time.Now()
|
||||
for id, lastRequestTime := range l.db {
|
||||
if lastRequestTime.Add(l.lifespan).Before(now) {
|
||||
delete(l.db, id)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,109 +0,0 @@
|
|||
package mailserver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestIsAllowed(t *testing.T) {
|
||||
peerID := "peerID"
|
||||
testCases := []struct {
|
||||
t time.Duration
|
||||
shouldBeAllowed bool
|
||||
db func() map[string]time.Time
|
||||
info string
|
||||
}{
|
||||
{
|
||||
t: 5 * time.Millisecond,
|
||||
shouldBeAllowed: true,
|
||||
db: func() map[string]time.Time {
|
||||
return make(map[string]time.Time)
|
||||
},
|
||||
info: "Expecting limiter.isAllowed to allow with an empty db",
|
||||
},
|
||||
{
|
||||
t: 5 * time.Millisecond,
|
||||
shouldBeAllowed: true,
|
||||
db: func() map[string]time.Time {
|
||||
db := make(map[string]time.Time)
|
||||
db[peerID] = time.Now().Add(time.Duration(-10) * time.Millisecond)
|
||||
return db
|
||||
},
|
||||
info: "Expecting limiter.isAllowed to allow with an expired peer on its db",
|
||||
},
|
||||
{
|
||||
t: 5 * time.Millisecond,
|
||||
shouldBeAllowed: false,
|
||||
db: func() map[string]time.Time {
|
||||
db := make(map[string]time.Time)
|
||||
db[peerID] = time.Now().Add(time.Duration(-1) * time.Millisecond)
|
||||
return db
|
||||
},
|
||||
info: "Expecting limiter.isAllowed to not allow with a non expired peer on its db",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.info, func(*testing.T) {
|
||||
l := newRateLimiter(tc.t)
|
||||
l.db = tc.db()
|
||||
assert.Equal(t, tc.shouldBeAllowed, l.IsAllowed(peerID))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveExpiredRateLimits(t *testing.T) {
|
||||
peer := "peer"
|
||||
l := newRateLimiter(time.Duration(5) * time.Second)
|
||||
for i := 0; i < 10; i++ {
|
||||
peerID := fmt.Sprintf("%s%d", peer, i)
|
||||
l.db[peerID] = time.Now().Add(time.Duration(i*(-2)) * time.Second)
|
||||
}
|
||||
|
||||
l.deleteExpired()
|
||||
assert.Equal(t, 3, len(l.db))
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
peerID := fmt.Sprintf("%s%d", peer, i)
|
||||
_, ok := l.db[peerID]
|
||||
assert.True(t, ok, fmt.Sprintf("Non expired peer '%s' should exist, but it doesn't", peerID))
|
||||
}
|
||||
for i := 3; i < 10; i++ {
|
||||
peerID := fmt.Sprintf("%s%d", peer, i)
|
||||
_, ok := l.db[peerID]
|
||||
assert.False(t, ok, fmt.Sprintf("Expired peer '%s' should not exist, but it does", peerID))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCleaningUpExpiredRateLimits(t *testing.T) {
|
||||
l := newRateLimiter(5 * time.Second)
|
||||
l.period = time.Millisecond * 10
|
||||
l.Start()
|
||||
defer l.Stop()
|
||||
|
||||
l.db["peer01"] = time.Now().Add(-1 * time.Second)
|
||||
l.db["peer02"] = time.Now().Add(-2 * time.Second)
|
||||
l.db["peer03"] = time.Now().Add(-10 * time.Second)
|
||||
|
||||
time.Sleep(time.Millisecond * 20)
|
||||
|
||||
_, ok := l.db["peer01"]
|
||||
assert.True(t, ok)
|
||||
_, ok = l.db["peer02"]
|
||||
assert.True(t, ok)
|
||||
_, ok = l.db["peer03"]
|
||||
assert.False(t, ok)
|
||||
}
|
||||
|
||||
func TestAddingLimts(t *testing.T) {
|
||||
peerID := "peerAdding"
|
||||
l := newRateLimiter(time.Duration(5) * time.Second)
|
||||
pre := time.Now()
|
||||
l.Add(peerID)
|
||||
post := time.Now()
|
||||
assert.True(t, l.db[peerID].After(pre))
|
||||
assert.True(t, l.db[peerID].Before(post))
|
||||
}
|
|
@ -1,948 +0,0 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package mailserver
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
prom "github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
gethbridge "github.com/status-im/status-go/eth-node/bridge/geth"
|
||||
"github.com/status-im/status-go/eth-node/crypto"
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
"github.com/status-im/status-go/params"
|
||||
"github.com/status-im/status-go/waku"
|
||||
wakucommon "github.com/status-im/status-go/waku/common"
|
||||
)
|
||||
|
||||
const (
|
||||
maxQueryRange = 24 * time.Hour
|
||||
maxQueryLimit = 1000
|
||||
// When we default the upper limit, we want to extend the range a bit
|
||||
// to accommodate for envelopes with slightly higher timestamp, in seconds
|
||||
whisperTTLSafeThreshold = 60
|
||||
)
|
||||
|
||||
var (
|
||||
errDirectoryNotProvided = errors.New("data directory not provided")
|
||||
errDecryptionMethodNotProvided = errors.New("decryption method is not provided")
|
||||
)
|
||||
|
||||
const (
|
||||
timestampLength = 4
|
||||
requestLimitLength = 4
|
||||
requestTimeRangeLength = timestampLength * 2
|
||||
processRequestTimeout = time.Minute
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
// DataDir points to a directory where mailserver's data is stored.
|
||||
DataDir string
|
||||
// Password is used to create a symmetric key to decrypt requests.
|
||||
Password string
|
||||
// AsymKey is an asymmetric key to decrypt requests.
|
||||
AsymKey string
|
||||
// MininumPoW is a minimum PoW for requests.
|
||||
MinimumPoW float64
|
||||
// RateLimit is a maximum number of requests per second from a peer.
|
||||
RateLimit int
|
||||
// DataRetention specifies a number of days an envelope should be stored for.
|
||||
DataRetention int
|
||||
PostgresEnabled bool
|
||||
PostgresURI string
|
||||
}
|
||||
|
||||
// --------------
|
||||
// WakuMailServer
|
||||
// --------------
|
||||
|
||||
type WakuMailServer struct {
|
||||
ms *mailServer
|
||||
shh *waku.Waku
|
||||
minRequestPoW float64
|
||||
|
||||
symFilter *wakucommon.Filter
|
||||
asymFilter *wakucommon.Filter
|
||||
}
|
||||
|
||||
func (s *WakuMailServer) Init(waku *waku.Waku, cfg *params.WakuConfig) error {
|
||||
s.shh = waku
|
||||
s.minRequestPoW = cfg.MinimumPoW
|
||||
|
||||
config := Config{
|
||||
DataDir: cfg.DataDir,
|
||||
Password: cfg.MailServerPassword,
|
||||
MinimumPoW: cfg.MinimumPoW,
|
||||
DataRetention: cfg.MailServerDataRetention,
|
||||
RateLimit: cfg.MailServerRateLimit,
|
||||
PostgresEnabled: cfg.DatabaseConfig.PGConfig.Enabled,
|
||||
PostgresURI: cfg.DatabaseConfig.PGConfig.URI,
|
||||
}
|
||||
var err error
|
||||
s.ms, err = newMailServer(
|
||||
config,
|
||||
&wakuAdapter{},
|
||||
&wakuService{Waku: waku},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.setupDecryptor(config.Password, config.AsymKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *WakuMailServer) Close() {
|
||||
s.ms.Close()
|
||||
}
|
||||
|
||||
func (s *WakuMailServer) Archive(env *wakucommon.Envelope) {
|
||||
s.ms.Archive(gethbridge.NewWakuEnvelope(env))
|
||||
}
|
||||
|
||||
func (s *WakuMailServer) Deliver(peerID []byte, req wakucommon.MessagesRequest) {
|
||||
s.ms.DeliverMail(types.BytesToHash(peerID), types.BytesToHash(req.ID), MessagesRequestPayload{
|
||||
Lower: req.From,
|
||||
Upper: req.To,
|
||||
Bloom: req.Bloom,
|
||||
Topics: req.Topics,
|
||||
Limit: req.Limit,
|
||||
Cursor: req.Cursor,
|
||||
Batch: true,
|
||||
})
|
||||
}
|
||||
|
||||
// DEPRECATED; user Deliver instead
|
||||
func (s *WakuMailServer) DeliverMail(peerID []byte, req *wakucommon.Envelope) {
|
||||
payload, err := s.decodeRequest(peerID, req)
|
||||
if err != nil {
|
||||
deliveryFailuresCounter.WithLabelValues("validation").Inc()
|
||||
log.Error(
|
||||
"[mailserver:DeliverMail] request failed validaton",
|
||||
"peerID", types.BytesToHash(peerID),
|
||||
"requestID", req.Hash().String(),
|
||||
"err", err,
|
||||
)
|
||||
s.ms.sendHistoricMessageErrorResponse(types.BytesToHash(peerID), types.Hash(req.Hash()), err)
|
||||
return
|
||||
}
|
||||
|
||||
s.ms.DeliverMail(types.BytesToHash(peerID), types.Hash(req.Hash()), payload)
|
||||
}
|
||||
|
||||
// bloomFromReceivedMessage for a given whisper.ReceivedMessage it extracts the
|
||||
// used bloom filter.
|
||||
func (s *WakuMailServer) bloomFromReceivedMessage(msg *wakucommon.ReceivedMessage) ([]byte, error) {
|
||||
payloadSize := len(msg.Payload)
|
||||
|
||||
if payloadSize < 8 {
|
||||
return nil, errors.New("Undersized p2p request")
|
||||
} else if payloadSize == 8 {
|
||||
return wakucommon.MakeFullNodeBloom(), nil
|
||||
} else if payloadSize < 8+wakucommon.BloomFilterSize {
|
||||
return nil, errors.New("Undersized bloom filter in p2p request")
|
||||
}
|
||||
|
||||
return msg.Payload[8 : 8+wakucommon.BloomFilterSize], nil
|
||||
}
|
||||
|
||||
func (s *WakuMailServer) decompositeRequest(peerID []byte, request *wakucommon.Envelope) (MessagesRequestPayload, error) {
|
||||
var (
|
||||
payload MessagesRequestPayload
|
||||
err error
|
||||
)
|
||||
|
||||
if s.minRequestPoW > 0.0 && request.PoW() < s.minRequestPoW {
|
||||
return payload, fmt.Errorf("PoW() is too low")
|
||||
}
|
||||
|
||||
decrypted := s.openEnvelope(request)
|
||||
if decrypted == nil {
|
||||
return payload, fmt.Errorf("failed to decrypt p2p request")
|
||||
}
|
||||
|
||||
if err := checkMsgSignature(decrypted.Src, peerID); err != nil {
|
||||
return payload, err
|
||||
}
|
||||
|
||||
payload.Bloom, err = s.bloomFromReceivedMessage(decrypted)
|
||||
if err != nil {
|
||||
return payload, err
|
||||
}
|
||||
|
||||
payload.Lower = binary.BigEndian.Uint32(decrypted.Payload[:4])
|
||||
payload.Upper = binary.BigEndian.Uint32(decrypted.Payload[4:8])
|
||||
|
||||
if payload.Upper < payload.Lower {
|
||||
err := fmt.Errorf("query range is invalid: from > to (%d > %d)", payload.Lower, payload.Upper)
|
||||
return payload, err
|
||||
}
|
||||
|
||||
lowerTime := time.Unix(int64(payload.Lower), 0)
|
||||
upperTime := time.Unix(int64(payload.Upper), 0)
|
||||
if upperTime.Sub(lowerTime) > maxQueryRange {
|
||||
err := fmt.Errorf("query range too big for peer %s", string(peerID))
|
||||
return payload, err
|
||||
}
|
||||
|
||||
if len(decrypted.Payload) >= requestTimeRangeLength+wakucommon.BloomFilterSize+requestLimitLength {
|
||||
payload.Limit = binary.BigEndian.Uint32(decrypted.Payload[requestTimeRangeLength+wakucommon.BloomFilterSize:])
|
||||
}
|
||||
|
||||
if len(decrypted.Payload) == requestTimeRangeLength+wakucommon.BloomFilterSize+requestLimitLength+DBKeyLength {
|
||||
payload.Cursor = decrypted.Payload[requestTimeRangeLength+wakucommon.BloomFilterSize+requestLimitLength:]
|
||||
}
|
||||
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
func (s *WakuMailServer) setupDecryptor(password, asymKey string) error {
|
||||
s.symFilter = nil
|
||||
s.asymFilter = nil
|
||||
|
||||
if password != "" {
|
||||
keyID, err := s.shh.AddSymKeyFromPassword(password)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create symmetric key: %v", err)
|
||||
}
|
||||
|
||||
symKey, err := s.shh.GetSymKey(keyID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("save symmetric key: %v", err)
|
||||
}
|
||||
|
||||
s.symFilter = &wakucommon.Filter{KeySym: symKey}
|
||||
}
|
||||
|
||||
if asymKey != "" {
|
||||
keyAsym, err := crypto.HexToECDSA(asymKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.asymFilter = &wakucommon.Filter{KeyAsym: keyAsym}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// openEnvelope tries to decrypt an envelope, first based on asymetric key (if
|
||||
// provided) and second on the symetric key (if provided)
|
||||
func (s *WakuMailServer) openEnvelope(request *wakucommon.Envelope) *wakucommon.ReceivedMessage {
|
||||
if s.asymFilter != nil {
|
||||
if d := request.Open(s.asymFilter); d != nil {
|
||||
return d
|
||||
}
|
||||
}
|
||||
if s.symFilter != nil {
|
||||
if d := request.Open(s.symFilter); d != nil {
|
||||
return d
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *WakuMailServer) decodeRequest(peerID []byte, request *wakucommon.Envelope) (MessagesRequestPayload, error) {
|
||||
var payload MessagesRequestPayload
|
||||
|
||||
if s.minRequestPoW > 0.0 && request.PoW() < s.minRequestPoW {
|
||||
return payload, errors.New("PoW too low")
|
||||
}
|
||||
|
||||
decrypted := s.openEnvelope(request)
|
||||
if decrypted == nil {
|
||||
log.Warn("Failed to decrypt p2p request")
|
||||
return payload, errors.New("failed to decrypt p2p request")
|
||||
}
|
||||
|
||||
if err := checkMsgSignature(decrypted.Src, peerID); err != nil {
|
||||
log.Warn("Check message signature failed", "err", err.Error())
|
||||
return payload, fmt.Errorf("check message signature failed: %v", err)
|
||||
}
|
||||
|
||||
if err := rlp.DecodeBytes(decrypted.Payload, &payload); err != nil {
|
||||
return payload, fmt.Errorf("failed to decode data: %v", err)
|
||||
}
|
||||
|
||||
if payload.Upper == 0 {
|
||||
payload.Upper = uint32(time.Now().Unix() + whisperTTLSafeThreshold)
|
||||
}
|
||||
|
||||
if payload.Upper < payload.Lower {
|
||||
log.Error("Query range is invalid: lower > upper", "lower", payload.Lower, "upper", payload.Upper)
|
||||
return payload, errors.New("query range is invalid: lower > upper")
|
||||
}
|
||||
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
// -------
|
||||
// adapter
|
||||
// -------
|
||||
|
||||
type adapter interface {
|
||||
CreateRequestFailedPayload(reqID types.Hash, err error) []byte
|
||||
CreateRequestCompletedPayload(reqID, lastEnvelopeHash types.Hash, cursor []byte) []byte
|
||||
CreateSyncResponse(envelopes []types.Envelope, cursor []byte, final bool, err string) interface{}
|
||||
CreateRawSyncResponse(envelopes []rlp.RawValue, cursor []byte, final bool, err string) interface{}
|
||||
}
|
||||
|
||||
// -----------
|
||||
// wakuAdapter
|
||||
// -----------
|
||||
|
||||
type wakuAdapter struct{}
|
||||
|
||||
var _ adapter = (*wakuAdapter)(nil)
|
||||
|
||||
func (wakuAdapter) CreateRequestFailedPayload(reqID types.Hash, err error) []byte {
|
||||
return waku.CreateMailServerRequestFailedPayload(common.Hash(reqID), err)
|
||||
}
|
||||
|
||||
func (wakuAdapter) CreateRequestCompletedPayload(reqID, lastEnvelopeHash types.Hash, cursor []byte) []byte {
|
||||
return waku.CreateMailServerRequestCompletedPayload(common.Hash(reqID), common.Hash(lastEnvelopeHash), cursor)
|
||||
}
|
||||
|
||||
func (wakuAdapter) CreateSyncResponse(_ []types.Envelope, _ []byte, _ bool, _ string) interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (wakuAdapter) CreateRawSyncResponse(_ []rlp.RawValue, _ []byte, _ bool, _ string) interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
// -------
|
||||
// service
|
||||
// -------
|
||||
|
||||
type service interface {
|
||||
SendHistoricMessageResponse(peerID []byte, payload []byte) error
|
||||
SendRawP2PDirect(peerID []byte, envelopes ...rlp.RawValue) error
|
||||
MaxMessageSize() uint32
|
||||
SendRawSyncResponse(peerID []byte, data interface{}) error // optional
|
||||
SendSyncResponse(peerID []byte, data interface{}) error // optional
|
||||
}
|
||||
|
||||
// -----------
|
||||
// wakuService
|
||||
// -----------
|
||||
|
||||
type wakuService struct {
|
||||
*waku.Waku
|
||||
}
|
||||
|
||||
func (s *wakuService) SendRawSyncResponse(peerID []byte, data interface{}) error {
|
||||
return errors.New("syncing mailservers is not support by Waku")
|
||||
}
|
||||
|
||||
func (s *wakuService) SendSyncResponse(peerID []byte, data interface{}) error {
|
||||
return errors.New("syncing mailservers is not support by Waku")
|
||||
}
|
||||
|
||||
// ----------
|
||||
// mailServer
|
||||
// ----------
|
||||
|
||||
type mailServer struct {
|
||||
adapter adapter
|
||||
service service
|
||||
db DB
|
||||
cleaner *dbCleaner // removes old envelopes
|
||||
muRateLimiter sync.RWMutex
|
||||
rateLimiter *rateLimiter
|
||||
}
|
||||
|
||||
func newMailServer(cfg Config, adapter adapter, service service) (*mailServer, error) {
|
||||
if len(cfg.DataDir) == 0 {
|
||||
return nil, errDirectoryNotProvided
|
||||
}
|
||||
|
||||
// TODO: move out
|
||||
if len(cfg.Password) == 0 && len(cfg.AsymKey) == 0 {
|
||||
return nil, errDecryptionMethodNotProvided
|
||||
}
|
||||
|
||||
s := mailServer{
|
||||
adapter: adapter,
|
||||
service: service,
|
||||
}
|
||||
|
||||
if cfg.RateLimit > 0 {
|
||||
s.setupRateLimiter(time.Duration(cfg.RateLimit) * time.Second)
|
||||
}
|
||||
|
||||
// Open database in the last step in order not to init with error
|
||||
// and leave the database open by accident.
|
||||
if cfg.PostgresEnabled {
|
||||
log.Info("Connecting to postgres database")
|
||||
database, err := NewPostgresDB(cfg.PostgresURI)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open DB: %s", err)
|
||||
}
|
||||
s.db = database
|
||||
log.Info("Connected to postgres database")
|
||||
} else {
|
||||
// Defaults to LevelDB
|
||||
database, err := NewLevelDB(cfg.DataDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open DB: %s", err)
|
||||
}
|
||||
s.db = database
|
||||
}
|
||||
|
||||
if cfg.DataRetention > 0 {
|
||||
// MailServerDataRetention is a number of days.
|
||||
s.setupCleaner(time.Duration(cfg.DataRetention) * time.Hour * 24)
|
||||
}
|
||||
|
||||
return &s, nil
|
||||
}
|
||||
|
||||
// setupRateLimiter in case limit is bigger than 0 it will setup an automated
|
||||
// limit db cleanup.
|
||||
func (s *mailServer) setupRateLimiter(limit time.Duration) {
|
||||
s.rateLimiter = newRateLimiter(limit)
|
||||
s.rateLimiter.Start()
|
||||
}
|
||||
|
||||
func (s *mailServer) setupCleaner(retention time.Duration) {
|
||||
s.cleaner = newDBCleaner(s.db, retention)
|
||||
s.cleaner.Start()
|
||||
}
|
||||
|
||||
func (s *mailServer) Archive(env types.Envelope) {
|
||||
err := s.db.SaveEnvelope(env)
|
||||
if err != nil {
|
||||
log.Error("Could not save envelope", "hash", env.Hash().String())
|
||||
}
|
||||
}
|
||||
|
||||
func (s *mailServer) DeliverMail(peerID, reqID types.Hash, req MessagesRequestPayload) {
|
||||
timer := prom.NewTimer(mailDeliveryDuration)
|
||||
defer timer.ObserveDuration()
|
||||
|
||||
deliveryAttemptsCounter.Inc()
|
||||
log.Info(
|
||||
"[mailserver:DeliverMail] delivering mail",
|
||||
"peerID", peerID.String(),
|
||||
"requestID", reqID.String(),
|
||||
)
|
||||
|
||||
req.SetDefaults()
|
||||
|
||||
log.Info(
|
||||
"[mailserver:DeliverMail] processing request",
|
||||
"peerID", peerID.String(),
|
||||
"requestID", reqID.String(),
|
||||
"lower", req.Lower,
|
||||
"upper", req.Upper,
|
||||
"bloom", req.Bloom,
|
||||
"topics", req.Topics,
|
||||
"limit", req.Limit,
|
||||
"cursor", req.Cursor,
|
||||
"batch", req.Batch,
|
||||
)
|
||||
|
||||
if err := req.Validate(); err != nil {
|
||||
syncFailuresCounter.WithLabelValues("req_invalid").Inc()
|
||||
log.Error(
|
||||
"[mailserver:DeliverMail] request invalid",
|
||||
"peerID", peerID.String(),
|
||||
"requestID", reqID.String(),
|
||||
"err", err,
|
||||
)
|
||||
s.sendHistoricMessageErrorResponse(peerID, reqID, fmt.Errorf("request is invalid: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
if s.exceedsPeerRequests(peerID) {
|
||||
deliveryFailuresCounter.WithLabelValues("peer_req_limit").Inc()
|
||||
log.Error(
|
||||
"[mailserver:DeliverMail] peer exceeded the limit",
|
||||
"peerID", peerID.String(),
|
||||
"requestID", reqID.String(),
|
||||
)
|
||||
s.sendHistoricMessageErrorResponse(peerID, reqID, fmt.Errorf("rate limit exceeded"))
|
||||
return
|
||||
}
|
||||
|
||||
if req.Batch {
|
||||
requestsBatchedCounter.Inc()
|
||||
}
|
||||
|
||||
iter, err := s.createIterator(req)
|
||||
if err != nil {
|
||||
log.Error(
|
||||
"[mailserver:DeliverMail] request failed",
|
||||
"peerID", peerID.String(),
|
||||
"requestID", reqID.String(),
|
||||
"err", err,
|
||||
)
|
||||
return
|
||||
}
|
||||
defer func() { _ = iter.Release() }()
|
||||
|
||||
bundles := make(chan []rlp.RawValue, 5)
|
||||
errCh := make(chan error)
|
||||
cancelProcessing := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
counter := 0
|
||||
for bundle := range bundles {
|
||||
if err := s.sendRawEnvelopes(peerID, bundle, req.Batch); err != nil {
|
||||
close(cancelProcessing)
|
||||
errCh <- err
|
||||
break
|
||||
}
|
||||
counter++
|
||||
}
|
||||
close(errCh)
|
||||
log.Info(
|
||||
"[mailserver:DeliverMail] finished sending bundles",
|
||||
"peerID", peerID,
|
||||
"requestID", reqID.String(),
|
||||
"counter", counter,
|
||||
)
|
||||
}()
|
||||
|
||||
nextPageCursor, lastEnvelopeHash := s.processRequestInBundles(
|
||||
iter,
|
||||
req.Bloom,
|
||||
req.Topics,
|
||||
int(req.Limit),
|
||||
processRequestTimeout,
|
||||
reqID.String(),
|
||||
bundles,
|
||||
cancelProcessing,
|
||||
)
|
||||
|
||||
// Wait for the goroutine to finish the work. It may return an error.
|
||||
if err := <-errCh; err != nil {
|
||||
deliveryFailuresCounter.WithLabelValues("process").Inc()
|
||||
log.Error(
|
||||
"[mailserver:DeliverMail] error while processing",
|
||||
"err", err,
|
||||
"peerID", peerID,
|
||||
"requestID", reqID,
|
||||
)
|
||||
s.sendHistoricMessageErrorResponse(peerID, reqID, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Processing of the request could be finished earlier due to iterator error.
|
||||
if err := iter.Error(); err != nil {
|
||||
deliveryFailuresCounter.WithLabelValues("iterator").Inc()
|
||||
log.Error(
|
||||
"[mailserver:DeliverMail] iterator failed",
|
||||
"err", err,
|
||||
"peerID", peerID,
|
||||
"requestID", reqID,
|
||||
)
|
||||
s.sendHistoricMessageErrorResponse(peerID, reqID, err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Info(
|
||||
"[mailserver:DeliverMail] sending historic message response",
|
||||
"peerID", peerID,
|
||||
"requestID", reqID,
|
||||
"last", lastEnvelopeHash,
|
||||
"next", nextPageCursor,
|
||||
)
|
||||
|
||||
s.sendHistoricMessageResponse(peerID, reqID, lastEnvelopeHash, nextPageCursor)
|
||||
}
|
||||
|
||||
func (s *mailServer) SyncMail(peerID types.Hash, req MessagesRequestPayload) error {
|
||||
log.Info("Started syncing envelopes", "peer", peerID.String(), "req", req)
|
||||
|
||||
requestID := fmt.Sprintf("%d-%d", time.Now().UnixNano(), rand.Intn(1000)) // nolint: gosec
|
||||
|
||||
syncAttemptsCounter.Inc()
|
||||
|
||||
// Check rate limiting for a requesting peer.
|
||||
if s.exceedsPeerRequests(peerID) {
|
||||
syncFailuresCounter.WithLabelValues("req_per_sec_limit").Inc()
|
||||
log.Error("Peer exceeded request per seconds limit", "peerID", peerID.String())
|
||||
return fmt.Errorf("requests per seconds limit exceeded")
|
||||
}
|
||||
|
||||
req.SetDefaults()
|
||||
|
||||
if err := req.Validate(); err != nil {
|
||||
syncFailuresCounter.WithLabelValues("req_invalid").Inc()
|
||||
return fmt.Errorf("request is invalid: %v", err)
|
||||
}
|
||||
|
||||
iter, err := s.createIterator(req)
|
||||
if err != nil {
|
||||
syncFailuresCounter.WithLabelValues("iterator").Inc()
|
||||
return err
|
||||
}
|
||||
defer func() { _ = iter.Release() }()
|
||||
|
||||
bundles := make(chan []rlp.RawValue, 5)
|
||||
errCh := make(chan error)
|
||||
cancelProcessing := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
for bundle := range bundles {
|
||||
resp := s.adapter.CreateRawSyncResponse(bundle, nil, false, "")
|
||||
if err := s.service.SendRawSyncResponse(peerID.Bytes(), resp); err != nil {
|
||||
close(cancelProcessing)
|
||||
errCh <- fmt.Errorf("failed to send sync response: %v", err)
|
||||
break
|
||||
}
|
||||
}
|
||||
close(errCh)
|
||||
}()
|
||||
|
||||
nextCursor, _ := s.processRequestInBundles(
|
||||
iter,
|
||||
req.Bloom,
|
||||
req.Topics,
|
||||
int(req.Limit),
|
||||
processRequestTimeout,
|
||||
requestID,
|
||||
bundles,
|
||||
cancelProcessing,
|
||||
)
|
||||
|
||||
// Wait for the goroutine to finish the work. It may return an error.
|
||||
if err := <-errCh; err != nil {
|
||||
syncFailuresCounter.WithLabelValues("routine").Inc()
|
||||
_ = s.service.SendSyncResponse(
|
||||
peerID.Bytes(),
|
||||
s.adapter.CreateSyncResponse(nil, nil, false, "failed to send a response"),
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
// Processing of the request could be finished earlier due to iterator error.
|
||||
if err := iter.Error(); err != nil {
|
||||
syncFailuresCounter.WithLabelValues("iterator").Inc()
|
||||
_ = s.service.SendSyncResponse(
|
||||
peerID.Bytes(),
|
||||
s.adapter.CreateSyncResponse(nil, nil, false, "failed to process all envelopes"),
|
||||
)
|
||||
return fmt.Errorf("LevelDB iterator failed: %v", err)
|
||||
}
|
||||
|
||||
log.Info("Finished syncing envelopes", "peer", peerID.String())
|
||||
|
||||
err = s.service.SendSyncResponse(
|
||||
peerID.Bytes(),
|
||||
s.adapter.CreateSyncResponse(nil, nextCursor, true, ""),
|
||||
)
|
||||
if err != nil {
|
||||
syncFailuresCounter.WithLabelValues("response_send").Inc()
|
||||
return fmt.Errorf("failed to send the final sync response: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close the mailserver and its associated db connection.
|
||||
func (s *mailServer) Close() {
|
||||
if s.db != nil {
|
||||
if err := s.db.Close(); err != nil {
|
||||
log.Error("closing database failed", "err", err)
|
||||
}
|
||||
}
|
||||
if s.rateLimiter != nil {
|
||||
s.rateLimiter.Stop()
|
||||
}
|
||||
if s.cleaner != nil {
|
||||
s.cleaner.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *mailServer) exceedsPeerRequests(peerID types.Hash) bool {
|
||||
s.muRateLimiter.RLock()
|
||||
defer s.muRateLimiter.RUnlock()
|
||||
|
||||
if s.rateLimiter == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if s.rateLimiter.IsAllowed(peerID.String()) {
|
||||
s.rateLimiter.Add(peerID.String())
|
||||
return false
|
||||
}
|
||||
|
||||
log.Info("peerID exceeded the number of requests per second", "peerID", peerID.String())
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *mailServer) createIterator(req MessagesRequestPayload) (Iterator, error) {
|
||||
var (
|
||||
emptyHash types.Hash
|
||||
emptyTopic types.TopicType
|
||||
ku, kl *DBKey
|
||||
)
|
||||
|
||||
ku = NewDBKey(req.Upper+1, emptyTopic, emptyHash)
|
||||
kl = NewDBKey(req.Lower, emptyTopic, emptyHash)
|
||||
|
||||
query := CursorQuery{
|
||||
start: kl.Bytes(),
|
||||
end: ku.Bytes(),
|
||||
cursor: req.Cursor,
|
||||
topics: req.Topics,
|
||||
bloom: req.Bloom,
|
||||
limit: req.Limit,
|
||||
}
|
||||
return s.db.BuildIterator(query)
|
||||
}
|
||||
|
||||
func (s *mailServer) processRequestInBundles(
|
||||
iter Iterator,
|
||||
bloom []byte,
|
||||
topics [][]byte,
|
||||
limit int,
|
||||
timeout time.Duration,
|
||||
requestID string,
|
||||
output chan<- []rlp.RawValue,
|
||||
cancel <-chan struct{},
|
||||
) ([]byte, types.Hash) {
|
||||
timer := prom.NewTimer(requestsInBundlesDuration)
|
||||
defer timer.ObserveDuration()
|
||||
|
||||
var (
|
||||
bundle []rlp.RawValue
|
||||
bundleSize uint32
|
||||
batches [][]rlp.RawValue
|
||||
processedEnvelopes int
|
||||
processedEnvelopesSize int64
|
||||
nextCursor []byte
|
||||
lastEnvelopeHash types.Hash
|
||||
)
|
||||
|
||||
log.Info(
|
||||
"[mailserver:processRequestInBundles] processing request",
|
||||
"requestID", requestID,
|
||||
"limit", limit,
|
||||
)
|
||||
|
||||
var topicsMap map[types.TopicType]bool
|
||||
|
||||
if len(topics) != 0 {
|
||||
topicsMap = make(map[types.TopicType]bool)
|
||||
for _, t := range topics {
|
||||
topicsMap[types.BytesToTopic(t)] = true
|
||||
}
|
||||
}
|
||||
|
||||
// We iterate over the envelopes.
|
||||
// We collect envelopes in batches.
|
||||
// If there still room and we haven't reached the limit
|
||||
// append and continue.
|
||||
// Otherwise publish what you have so far, reset the bundle to the
|
||||
// current envelope, and leave if we hit the limit
|
||||
for iter.Next() {
|
||||
var rawValue []byte
|
||||
var err error
|
||||
if len(topicsMap) != 0 {
|
||||
rawValue, err = iter.GetEnvelopeByTopicsMap(topicsMap)
|
||||
|
||||
} else if len(bloom) != 0 {
|
||||
rawValue, err = iter.GetEnvelopeByBloomFilter(bloom)
|
||||
} else {
|
||||
err = errors.New("either topics or bloom must be specified")
|
||||
}
|
||||
if err != nil {
|
||||
log.Error(
|
||||
"[mailserver:processRequestInBundles]Failed to get envelope from iterator",
|
||||
"err", err,
|
||||
"requestID", requestID,
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
if rawValue == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
key, err := iter.DBKey()
|
||||
if err != nil {
|
||||
log.Error(
|
||||
"[mailserver:processRequestInBundles] failed getting key",
|
||||
"requestID", requestID,
|
||||
)
|
||||
break
|
||||
|
||||
}
|
||||
|
||||
// TODO(adam): this is invalid code. If the limit is 1000,
|
||||
// it will only send 999 items and send a cursor.
|
||||
lastEnvelopeHash = key.EnvelopeHash()
|
||||
processedEnvelopes++
|
||||
envelopeSize := uint32(len(rawValue))
|
||||
limitReached := processedEnvelopes >= limit
|
||||
newSize := bundleSize + envelopeSize
|
||||
|
||||
// If we still have some room for messages, add and continue
|
||||
if !limitReached && newSize < s.service.MaxMessageSize() {
|
||||
bundle = append(bundle, rawValue)
|
||||
bundleSize = newSize
|
||||
continue
|
||||
}
|
||||
|
||||
// Publish if anything is in the bundle (there should always be
|
||||
// something unless limit = 1)
|
||||
if len(bundle) != 0 {
|
||||
batches = append(batches, bundle)
|
||||
processedEnvelopesSize += int64(bundleSize)
|
||||
}
|
||||
|
||||
// Reset the bundle with the current envelope
|
||||
bundle = []rlp.RawValue{rawValue}
|
||||
bundleSize = envelopeSize
|
||||
|
||||
// Leave if we reached the limit
|
||||
if limitReached {
|
||||
nextCursor = key.Cursor()
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(bundle) > 0 {
|
||||
batches = append(batches, bundle)
|
||||
processedEnvelopesSize += int64(bundleSize)
|
||||
}
|
||||
|
||||
log.Info(
|
||||
"[mailserver:processRequestInBundles] publishing envelopes",
|
||||
"requestID", requestID,
|
||||
"batchesCount", len(batches),
|
||||
"envelopeCount", processedEnvelopes,
|
||||
"processedEnvelopesSize", processedEnvelopesSize,
|
||||
"cursor", nextCursor,
|
||||
)
|
||||
|
||||
// Publish
|
||||
batchLoop:
|
||||
for _, batch := range batches {
|
||||
select {
|
||||
case output <- batch:
|
||||
// It might happen that during producing the batches,
|
||||
// the connection with the peer goes down and
|
||||
// the consumer of `output` channel exits prematurely.
|
||||
// In such a case, we should stop pushing batches and exit.
|
||||
case <-cancel:
|
||||
log.Info(
|
||||
"[mailserver:processRequestInBundles] failed to push all batches",
|
||||
"requestID", requestID,
|
||||
)
|
||||
break batchLoop
|
||||
case <-time.After(timeout):
|
||||
log.Error(
|
||||
"[mailserver:processRequestInBundles] timed out pushing a batch",
|
||||
"requestID", requestID,
|
||||
)
|
||||
break batchLoop
|
||||
}
|
||||
}
|
||||
|
||||
envelopesCounter.Inc()
|
||||
sentEnvelopeBatchSizeMeter.Observe(float64(processedEnvelopesSize))
|
||||
|
||||
log.Info(
|
||||
"[mailserver:processRequestInBundles] envelopes published",
|
||||
"requestID", requestID,
|
||||
)
|
||||
close(output)
|
||||
|
||||
return nextCursor, lastEnvelopeHash
|
||||
}
|
||||
|
||||
func (s *mailServer) sendRawEnvelopes(peerID types.Hash, envelopes []rlp.RawValue, batch bool) error {
|
||||
timer := prom.NewTimer(sendRawEnvelopeDuration)
|
||||
defer timer.ObserveDuration()
|
||||
|
||||
if batch {
|
||||
return s.service.SendRawP2PDirect(peerID.Bytes(), envelopes...)
|
||||
}
|
||||
|
||||
for _, env := range envelopes {
|
||||
if err := s.service.SendRawP2PDirect(peerID.Bytes(), env); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *mailServer) sendHistoricMessageResponse(peerID, reqID, lastEnvelopeHash types.Hash, cursor []byte) {
|
||||
payload := s.adapter.CreateRequestCompletedPayload(reqID, lastEnvelopeHash, cursor)
|
||||
err := s.service.SendHistoricMessageResponse(peerID.Bytes(), payload)
|
||||
if err != nil {
|
||||
deliveryFailuresCounter.WithLabelValues("historic_msg_resp").Inc()
|
||||
log.Error(
|
||||
"[mailserver:DeliverMail] error sending historic message response",
|
||||
"err", err,
|
||||
"peerID", peerID,
|
||||
"requestID", reqID,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *mailServer) sendHistoricMessageErrorResponse(peerID, reqID types.Hash, errorToReport error) {
|
||||
payload := s.adapter.CreateRequestFailedPayload(reqID, errorToReport)
|
||||
err := s.service.SendHistoricMessageResponse(peerID.Bytes(), payload)
|
||||
// if we can't report an error, probably something is wrong with p2p connection,
|
||||
// so we just print a log entry to document this sad fact
|
||||
if err != nil {
|
||||
log.Error("Error while reporting error response", "err", err, "peerID", peerID.String())
|
||||
}
|
||||
}
|
||||
|
||||
func extractBloomFromEncodedEnvelope(rawValue rlp.RawValue) ([]byte, error) {
|
||||
var envelope wakucommon.Envelope
|
||||
decodeErr := rlp.DecodeBytes(rawValue, &envelope)
|
||||
if decodeErr != nil {
|
||||
return nil, decodeErr
|
||||
}
|
||||
return envelope.Bloom(), nil
|
||||
}
|
||||
|
||||
// checkMsgSignature returns an error in case the message is not correctly signed.
|
||||
func checkMsgSignature(reqSrc *ecdsa.PublicKey, id []byte) error {
|
||||
src := crypto.FromECDSAPub(reqSrc)
|
||||
if len(src)-len(id) == 1 {
|
||||
src = src[1:]
|
||||
}
|
||||
|
||||
// if you want to check the signature, you can do it here. e.g.:
|
||||
// if !bytes.Equal(peerID, src) {
|
||||
if src == nil {
|
||||
return errors.New("wrong signature of p2p request")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,42 +0,0 @@
|
|||
package mailserver
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
)
|
||||
|
||||
// every this many seconds check real envelopes count
|
||||
const envelopeCountCheckInterval = 60
|
||||
|
||||
// DB is an interface to abstract interactions with the db so that the mailserver
|
||||
// is agnostic to the underlying technology used
|
||||
type DB interface {
|
||||
Close() error
|
||||
// SaveEnvelope stores an envelope
|
||||
SaveEnvelope(types.Envelope) error
|
||||
// GetEnvelope returns an rlp encoded envelope from the datastore
|
||||
GetEnvelope(*DBKey) ([]byte, error)
|
||||
// Prune removes envelopes older than time
|
||||
Prune(time.Time, int) (int, error)
|
||||
// BuildIterator returns an iterator over envelopes
|
||||
BuildIterator(query CursorQuery) (Iterator, error)
|
||||
}
|
||||
|
||||
type Iterator interface {
|
||||
Next() bool
|
||||
DBKey() (*DBKey, error)
|
||||
Release() error
|
||||
Error() error
|
||||
GetEnvelopeByBloomFilter(bloom []byte) ([]byte, error)
|
||||
GetEnvelopeByTopicsMap(topics map[types.TopicType]bool) ([]byte, error)
|
||||
}
|
||||
|
||||
type CursorQuery struct {
|
||||
start []byte
|
||||
end []byte
|
||||
cursor []byte
|
||||
limit uint32
|
||||
bloom []byte
|
||||
topics [][]byte
|
||||
}
|
|
@ -1,242 +0,0 @@
|
|||
package mailserver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
waku "github.com/status-im/status-go/waku/common"
|
||||
)
|
||||
|
||||
type LevelDB struct {
|
||||
// We can't embed as there are some state problems with go-routines
|
||||
ldb *leveldb.DB
|
||||
name string
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
type LevelDBIterator struct {
|
||||
iterator.Iterator
|
||||
}
|
||||
|
||||
func (i *LevelDBIterator) DBKey() (*DBKey, error) {
|
||||
return &DBKey{
|
||||
raw: i.Key(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (i *LevelDBIterator) GetEnvelopeByTopicsMap(topics map[types.TopicType]bool) ([]byte, error) {
|
||||
rawValue := make([]byte, len(i.Value()))
|
||||
copy(rawValue, i.Value())
|
||||
|
||||
key, err := i.DBKey()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !topics[key.Topic()] {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return rawValue, nil
|
||||
}
|
||||
|
||||
func (i *LevelDBIterator) GetEnvelopeByBloomFilter(bloom []byte) ([]byte, error) {
|
||||
var envelopeBloom []byte
|
||||
rawValue := make([]byte, len(i.Value()))
|
||||
copy(rawValue, i.Value())
|
||||
|
||||
key, err := i.DBKey()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(key.Bytes()) != DBKeyLength {
|
||||
var err error
|
||||
envelopeBloom, err = extractBloomFromEncodedEnvelope(rawValue)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
envelopeBloom = types.TopicToBloom(key.Topic())
|
||||
}
|
||||
if !types.BloomFilterMatch(bloom, envelopeBloom) {
|
||||
return nil, nil
|
||||
}
|
||||
return rawValue, nil
|
||||
}
|
||||
|
||||
func (i *LevelDBIterator) Release() error {
|
||||
i.Iterator.Release()
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewLevelDB(dataDir string) (*LevelDB, error) {
|
||||
// Open opens an existing leveldb database
|
||||
db, err := leveldb.OpenFile(dataDir, nil)
|
||||
if _, corrupted := err.(*errors.ErrCorrupted); corrupted {
|
||||
log.Info("database is corrupted trying to recover", "path", dataDir)
|
||||
db, err = leveldb.RecoverFile(dataDir, nil)
|
||||
}
|
||||
|
||||
instance := LevelDB{
|
||||
ldb: db,
|
||||
name: dataDir, // name is used for metrics labels
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
|
||||
// initialize the metric value
|
||||
instance.updateArchivedEnvelopesCount()
|
||||
// checking count on every insert is inefficient
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-instance.done:
|
||||
return
|
||||
case <-time.After(time.Second * envelopeCountCheckInterval):
|
||||
instance.updateArchivedEnvelopesCount()
|
||||
}
|
||||
}
|
||||
}()
|
||||
return &instance, err
|
||||
}
|
||||
|
||||
// GetEnvelope get an envelope by its key
|
||||
func (db *LevelDB) GetEnvelope(key *DBKey) ([]byte, error) {
|
||||
defer recoverLevelDBPanics("GetEnvelope")
|
||||
return db.ldb.Get(key.Bytes(), nil)
|
||||
}
|
||||
|
||||
func (db *LevelDB) updateArchivedEnvelopesCount() {
|
||||
if count, err := db.envelopesCount(); err != nil {
|
||||
log.Warn("db query for envelopes count failed", "err", err)
|
||||
} else {
|
||||
archivedEnvelopesGauge.WithLabelValues(db.name).Set(float64(count))
|
||||
}
|
||||
}
|
||||
|
||||
// Build iterator returns an iterator given a start/end and a cursor
|
||||
func (db *LevelDB) BuildIterator(query CursorQuery) (Iterator, error) {
|
||||
defer recoverLevelDBPanics("BuildIterator")
|
||||
|
||||
i := db.ldb.NewIterator(&util.Range{Start: query.start, Limit: query.end}, nil)
|
||||
|
||||
envelopeQueriesCounter.WithLabelValues("unknown", "unknown").Inc()
|
||||
// seek to the end as we want to return envelopes in a descending order
|
||||
if len(query.cursor) == CursorLength {
|
||||
i.Seek(query.cursor)
|
||||
}
|
||||
return &LevelDBIterator{i}, nil
|
||||
}
|
||||
|
||||
// Prune removes envelopes older than time
|
||||
func (db *LevelDB) Prune(t time.Time, batchSize int) (int, error) {
|
||||
defer recoverLevelDBPanics("Prune")
|
||||
|
||||
var zero types.Hash
|
||||
var emptyTopic types.TopicType
|
||||
kl := NewDBKey(0, emptyTopic, zero)
|
||||
ku := NewDBKey(uint32(t.Unix()), emptyTopic, zero)
|
||||
query := CursorQuery{
|
||||
start: kl.Bytes(),
|
||||
end: ku.Bytes(),
|
||||
}
|
||||
i, err := db.BuildIterator(query)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() { _ = i.Release() }()
|
||||
|
||||
batch := leveldb.Batch{}
|
||||
removed := 0
|
||||
|
||||
for i.Next() {
|
||||
dbKey, err := i.DBKey()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
batch.Delete(dbKey.Bytes())
|
||||
|
||||
if batch.Len() == batchSize {
|
||||
if err := db.ldb.Write(&batch, nil); err != nil {
|
||||
return removed, err
|
||||
}
|
||||
|
||||
removed = removed + batch.Len()
|
||||
batch.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
if batch.Len() > 0 {
|
||||
if err := db.ldb.Write(&batch, nil); err != nil {
|
||||
return removed, err
|
||||
}
|
||||
|
||||
removed = removed + batch.Len()
|
||||
}
|
||||
|
||||
return removed, nil
|
||||
}
|
||||
|
||||
func (db *LevelDB) envelopesCount() (int, error) {
|
||||
defer recoverLevelDBPanics("envelopesCount")
|
||||
iterator, err := db.BuildIterator(CursorQuery{})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
// LevelDB does not have API for getting a count
|
||||
var count int
|
||||
for iterator.Next() {
|
||||
count++
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// SaveEnvelope stores an envelope in leveldb and increments the metrics
|
||||
func (db *LevelDB) SaveEnvelope(env types.Envelope) error {
|
||||
defer recoverLevelDBPanics("SaveEnvelope")
|
||||
|
||||
key := NewDBKey(env.Expiry()-env.TTL(), env.Topic(), env.Hash())
|
||||
rawEnvelope, err := rlp.EncodeToBytes(env.Unwrap())
|
||||
if err != nil {
|
||||
log.Error(fmt.Sprintf("rlp.EncodeToBytes failed: %s", err))
|
||||
archivedErrorsCounter.WithLabelValues(db.name).Inc()
|
||||
return err
|
||||
}
|
||||
|
||||
if err = db.ldb.Put(key.Bytes(), rawEnvelope, nil); err != nil {
|
||||
log.Error(fmt.Sprintf("Writing to DB failed: %s", err))
|
||||
archivedErrorsCounter.WithLabelValues(db.name).Inc()
|
||||
}
|
||||
archivedEnvelopesGauge.WithLabelValues(db.name).Inc()
|
||||
archivedEnvelopeSizeMeter.WithLabelValues(db.name).Observe(
|
||||
float64(waku.EnvelopeHeaderLength + env.Size()))
|
||||
return err
|
||||
}
|
||||
|
||||
func (db *LevelDB) Close() error {
|
||||
select {
|
||||
case <-db.done:
|
||||
default:
|
||||
close(db.done)
|
||||
}
|
||||
return db.ldb.Close()
|
||||
}
|
||||
|
||||
func recoverLevelDBPanics(calleMethodName string) {
|
||||
// Recover from possible goleveldb panics
|
||||
if r := recover(); r != nil {
|
||||
if errString, ok := r.(string); ok {
|
||||
log.Error(fmt.Sprintf("recovered from panic in %s: %s", calleMethodName, errString))
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,47 +0,0 @@
|
|||
package mailserver
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
waku "github.com/status-im/status-go/waku/common"
|
||||
)
|
||||
|
||||
func TestLevelDB_BuildIteratorWithTopic(t *testing.T) {
|
||||
topic := []byte{0x01, 0x02, 0x03, 0x04}
|
||||
|
||||
db, err := NewLevelDB(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
|
||||
envelope, err := newTestEnvelope(topic)
|
||||
require.NoError(t, err)
|
||||
err = db.SaveEnvelope(envelope)
|
||||
require.NoError(t, err)
|
||||
|
||||
iter, err := db.BuildIterator(CursorQuery{
|
||||
start: NewDBKey(uint32(time.Now().Add(-time.Hour).Unix()), types.BytesToTopic(topic), types.Hash{}).Bytes(),
|
||||
end: NewDBKey(uint32(time.Now().Add(time.Second).Unix()), types.BytesToTopic(topic), types.Hash{}).Bytes(),
|
||||
topics: [][]byte{topic},
|
||||
limit: 10,
|
||||
})
|
||||
topicsMap := make(map[types.TopicType]bool)
|
||||
topicsMap[types.BytesToTopic(topic)] = true
|
||||
require.NoError(t, err)
|
||||
hasNext := iter.Next()
|
||||
require.True(t, hasNext)
|
||||
rawValue, err := iter.GetEnvelopeByTopicsMap(topicsMap)
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, rawValue)
|
||||
var receivedEnvelope waku.Envelope
|
||||
err = rlp.DecodeBytes(rawValue, &receivedEnvelope)
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, waku.BytesToTopic(topic), receivedEnvelope.Topic)
|
||||
|
||||
err = iter.Release()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, iter.Error())
|
||||
}
|
|
@ -1,309 +0,0 @@
|
|||
package mailserver
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/lib/pq"
|
||||
|
||||
// Import postgres driver
|
||||
_ "github.com/lib/pq"
|
||||
"github.com/status-im/migrate/v4"
|
||||
"github.com/status-im/migrate/v4/database/postgres"
|
||||
bindata "github.com/status-im/migrate/v4/source/go_bindata"
|
||||
|
||||
"github.com/status-im/status-go/mailserver/migrations"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
waku "github.com/status-im/status-go/waku/common"
|
||||
)
|
||||
|
||||
type PostgresDB struct {
|
||||
db *sql.DB
|
||||
name string
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
func NewPostgresDB(uri string) (*PostgresDB, error) {
|
||||
db, err := sql.Open("postgres", uri)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
instance := &PostgresDB{
|
||||
db: db,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
if err := instance.setup(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// name is used for metrics labels
|
||||
if name, err := instance.getDBName(uri); err == nil {
|
||||
instance.name = name
|
||||
}
|
||||
|
||||
// initialize the metric value
|
||||
instance.updateArchivedEnvelopesCount()
|
||||
// checking count on every insert is inefficient
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-instance.done:
|
||||
return
|
||||
case <-time.After(time.Second * envelopeCountCheckInterval):
|
||||
instance.updateArchivedEnvelopesCount()
|
||||
}
|
||||
}
|
||||
}()
|
||||
return instance, nil
|
||||
}
|
||||
|
||||
type postgresIterator struct {
|
||||
*sql.Rows
|
||||
}
|
||||
|
||||
func (i *PostgresDB) getDBName(uri string) (string, error) {
|
||||
query := "SELECT current_database()"
|
||||
var dbName string
|
||||
return dbName, i.db.QueryRow(query).Scan(&dbName)
|
||||
}
|
||||
|
||||
func (i *PostgresDB) envelopesCount() (int, error) {
|
||||
query := "SELECT count(*) FROM envelopes"
|
||||
var count int
|
||||
return count, i.db.QueryRow(query).Scan(&count)
|
||||
}
|
||||
|
||||
func (i *PostgresDB) updateArchivedEnvelopesCount() {
|
||||
if count, err := i.envelopesCount(); err != nil {
|
||||
log.Warn("db query for envelopes count failed", "err", err)
|
||||
} else {
|
||||
archivedEnvelopesGauge.WithLabelValues(i.name).Set(float64(count))
|
||||
}
|
||||
}
|
||||
|
||||
func (i *postgresIterator) DBKey() (*DBKey, error) {
|
||||
var value []byte
|
||||
var id []byte
|
||||
if err := i.Scan(&id, &value); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &DBKey{raw: id}, nil
|
||||
}
|
||||
|
||||
func (i *postgresIterator) Error() error {
|
||||
return i.Err()
|
||||
}
|
||||
|
||||
func (i *postgresIterator) Release() error {
|
||||
return i.Close()
|
||||
}
|
||||
|
||||
func (i *postgresIterator) GetEnvelopeByBloomFilter(bloom []byte) ([]byte, error) {
|
||||
var value []byte
|
||||
var id []byte
|
||||
if err := i.Scan(&id, &value); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return value, nil
|
||||
}
|
||||
|
||||
func (i *postgresIterator) GetEnvelopeByTopicsMap(topics map[types.TopicType]bool) ([]byte, error) {
|
||||
var value []byte
|
||||
var id []byte
|
||||
if err := i.Scan(&id, &value); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return value, nil
|
||||
}
|
||||
|
||||
func (i *PostgresDB) BuildIterator(query CursorQuery) (Iterator, error) {
|
||||
var args []interface{}
|
||||
|
||||
stmtString := "SELECT id, data FROM envelopes"
|
||||
|
||||
var historyRange string
|
||||
if len(query.cursor) > 0 {
|
||||
args = append(args, query.start, query.cursor)
|
||||
// If we have a cursor, we don't want to include that envelope in the result set
|
||||
stmtString += " " + "WHERE id >= $1 AND id < $2"
|
||||
historyRange = "partial" //nolint: goconst
|
||||
} else {
|
||||
args = append(args, query.start, query.end)
|
||||
stmtString += " " + "WHERE id >= $1 AND id <= $2"
|
||||
historyRange = "full" //nolint: goconst
|
||||
}
|
||||
|
||||
var filterRange string
|
||||
if len(query.topics) > 0 {
|
||||
args = append(args, pq.Array(query.topics))
|
||||
stmtString += " " + "AND topic = any($3)"
|
||||
filterRange = "partial" //nolint: goconst
|
||||
} else {
|
||||
stmtString += " " + fmt.Sprintf("AND bloom & b'%s'::bit(512) = bloom", toBitString(query.bloom))
|
||||
filterRange = "full" //nolint: goconst
|
||||
}
|
||||
|
||||
// Positional argument depends on the fact whether the query uses topics or bloom filter.
|
||||
// If topic is used, the list of topics is passed as an argument to the query.
|
||||
// If bloom filter is used, it is included into the query statement.
|
||||
args = append(args, query.limit)
|
||||
stmtString += " " + fmt.Sprintf("ORDER BY ID DESC LIMIT $%d", len(args))
|
||||
|
||||
stmt, err := i.db.Prepare(stmtString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
envelopeQueriesCounter.WithLabelValues(filterRange, historyRange).Inc()
|
||||
rows, err := stmt.Query(args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &postgresIterator{rows}, nil
|
||||
}
|
||||
|
||||
func (i *PostgresDB) setup() error {
|
||||
resources := bindata.Resource(
|
||||
migrations.AssetNames(),
|
||||
migrations.Asset,
|
||||
)
|
||||
|
||||
source, err := bindata.WithInstance(resources)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
driver, err := postgres.WithInstance(i.db, &postgres.Config{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m, err := migrate.NewWithInstance(
|
||||
"go-bindata",
|
||||
source,
|
||||
"postgres",
|
||||
driver)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = m.Up(); err != migrate.ErrNoChange {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *PostgresDB) Close() error {
|
||||
select {
|
||||
case <-i.done:
|
||||
default:
|
||||
close(i.done)
|
||||
}
|
||||
return i.db.Close()
|
||||
}
|
||||
|
||||
func (i *PostgresDB) GetEnvelope(key *DBKey) ([]byte, error) {
|
||||
statement := `SELECT data FROM envelopes WHERE id = $1`
|
||||
|
||||
stmt, err := i.db.Prepare(statement)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
var envelope []byte
|
||||
|
||||
if err = stmt.QueryRow(key.Bytes()).Scan(&envelope); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return envelope, nil
|
||||
}
|
||||
|
||||
func (i *PostgresDB) Prune(t time.Time, batch int) (int, error) {
|
||||
var zero types.Hash
|
||||
var emptyTopic types.TopicType
|
||||
kl := NewDBKey(0, emptyTopic, zero)
|
||||
ku := NewDBKey(uint32(t.Unix()), emptyTopic, zero)
|
||||
statement := "DELETE FROM envelopes WHERE id BETWEEN $1 AND $2"
|
||||
|
||||
stmt, err := i.db.Prepare(statement)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
result, err := stmt.Exec(kl.Bytes(), ku.Bytes())
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
rows, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int(rows), nil
|
||||
}
|
||||
|
||||
func (i *PostgresDB) SaveEnvelope(env types.Envelope) error {
|
||||
topic := env.Topic()
|
||||
key := NewDBKey(env.Expiry()-env.TTL(), topic, env.Hash())
|
||||
rawEnvelope, err := rlp.EncodeToBytes(env.Unwrap())
|
||||
if err != nil {
|
||||
log.Error(fmt.Sprintf("rlp.EncodeToBytes failed: %s", err))
|
||||
archivedErrorsCounter.WithLabelValues(i.name).Inc()
|
||||
return err
|
||||
}
|
||||
if rawEnvelope == nil {
|
||||
archivedErrorsCounter.WithLabelValues(i.name).Inc()
|
||||
return errors.New("failed to encode envelope to bytes")
|
||||
}
|
||||
|
||||
statement := "INSERT INTO envelopes (id, data, topic, bloom) VALUES ($1, $2, $3, B'"
|
||||
statement += toBitString(env.Bloom())
|
||||
statement += "'::bit(512)) ON CONFLICT (id) DO NOTHING;"
|
||||
stmt, err := i.db.Prepare(statement)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
_, err = stmt.Exec(
|
||||
key.Bytes(),
|
||||
rawEnvelope,
|
||||
topicToByte(topic),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
archivedErrorsCounter.WithLabelValues(i.name).Inc()
|
||||
return err
|
||||
}
|
||||
|
||||
archivedEnvelopesGauge.WithLabelValues(i.name).Inc()
|
||||
archivedEnvelopeSizeMeter.WithLabelValues(i.name).Observe(
|
||||
float64(waku.EnvelopeHeaderLength + env.Size()))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func topicToByte(t types.TopicType) []byte {
|
||||
return []byte{t[0], t[1], t[2], t[3]}
|
||||
}
|
||||
|
||||
func toBitString(bloom []byte) string {
|
||||
val := ""
|
||||
for _, n := range bloom {
|
||||
val += fmt.Sprintf("%08b", n)
|
||||
}
|
||||
return val
|
||||
}
|
|
@ -1,129 +0,0 @@
|
|||
// In order to run these tests, you must run a PostgreSQL database.
|
||||
//
|
||||
// Using Docker:
|
||||
// docker run -e POSTGRES_HOST_AUTH_METHOD=trust -d -p 5432:5432 postgres:9.6-alpine
|
||||
//
|
||||
|
||||
package mailserver
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
|
||||
gethbridge "github.com/status-im/status-go/eth-node/bridge/geth"
|
||||
"github.com/status-im/status-go/eth-node/crypto"
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
"github.com/status-im/status-go/postgres"
|
||||
waku "github.com/status-im/status-go/waku/common"
|
||||
)
|
||||
|
||||
func TestMailServerPostgresDBSuite(t *testing.T) {
|
||||
suite.Run(t, new(MailServerPostgresDBSuite))
|
||||
}
|
||||
|
||||
type MailServerPostgresDBSuite struct {
|
||||
suite.Suite
|
||||
}
|
||||
|
||||
func (s *MailServerPostgresDBSuite) SetupSuite() {
|
||||
// ResetDefaultTestPostgresDB Required to completely reset the Postgres DB
|
||||
err := postgres.ResetDefaultTestPostgresDB()
|
||||
s.NoError(err)
|
||||
}
|
||||
|
||||
func (s *MailServerPostgresDBSuite) TestPostgresDB_BuildIteratorWithBloomFilter() {
|
||||
topic := []byte{0xaa, 0xbb, 0xcc, 0xdd}
|
||||
|
||||
db, err := NewPostgresDB(postgres.DefaultTestURI)
|
||||
s.NoError(err)
|
||||
defer db.Close()
|
||||
|
||||
envelope, err := newTestEnvelope(topic)
|
||||
s.NoError(err)
|
||||
err = db.SaveEnvelope(envelope)
|
||||
s.NoError(err)
|
||||
|
||||
iter, err := db.BuildIterator(CursorQuery{
|
||||
start: NewDBKey(uint32(time.Now().Add(-time.Hour).Unix()), types.BytesToTopic(topic), types.Hash{}).Bytes(),
|
||||
end: NewDBKey(uint32(time.Now().Add(time.Second).Unix()), types.BytesToTopic(topic), types.Hash{}).Bytes(),
|
||||
bloom: types.TopicToBloom(types.BytesToTopic(topic)),
|
||||
limit: 10,
|
||||
})
|
||||
s.NoError(err)
|
||||
hasNext := iter.Next()
|
||||
s.True(hasNext)
|
||||
rawValue, err := iter.GetEnvelopeByBloomFilter(nil)
|
||||
s.NoError(err)
|
||||
s.NotEmpty(rawValue)
|
||||
var receivedEnvelope waku.Envelope
|
||||
err = rlp.DecodeBytes(rawValue, &receivedEnvelope)
|
||||
s.NoError(err)
|
||||
s.EqualValues(waku.BytesToTopic(topic), receivedEnvelope.Topic)
|
||||
|
||||
err = iter.Release()
|
||||
s.NoError(err)
|
||||
s.NoError(iter.Error())
|
||||
}
|
||||
|
||||
func (s *MailServerPostgresDBSuite) TestPostgresDB_BuildIteratorWithTopic() {
|
||||
topic := []byte{0x01, 0x02, 0x03, 0x04}
|
||||
|
||||
db, err := NewPostgresDB(postgres.DefaultTestURI)
|
||||
s.NoError(err)
|
||||
defer db.Close()
|
||||
|
||||
envelope, err := newTestEnvelope(topic)
|
||||
s.NoError(err)
|
||||
err = db.SaveEnvelope(envelope)
|
||||
s.NoError(err)
|
||||
|
||||
iter, err := db.BuildIterator(CursorQuery{
|
||||
start: NewDBKey(uint32(time.Now().Add(-time.Hour).Unix()), types.BytesToTopic(topic), types.Hash{}).Bytes(),
|
||||
end: NewDBKey(uint32(time.Now().Add(time.Second).Unix()), types.BytesToTopic(topic), types.Hash{}).Bytes(),
|
||||
topics: [][]byte{topic},
|
||||
limit: 10,
|
||||
})
|
||||
s.NoError(err)
|
||||
hasNext := iter.Next()
|
||||
s.True(hasNext)
|
||||
rawValue, err := iter.GetEnvelopeByBloomFilter(nil)
|
||||
s.NoError(err)
|
||||
s.NotEmpty(rawValue)
|
||||
var receivedEnvelope waku.Envelope
|
||||
err = rlp.DecodeBytes(rawValue, &receivedEnvelope)
|
||||
s.NoError(err)
|
||||
s.EqualValues(waku.BytesToTopic(topic), receivedEnvelope.Topic)
|
||||
|
||||
err = iter.Release()
|
||||
s.NoError(err)
|
||||
s.NoError(iter.Error())
|
||||
}
|
||||
|
||||
func newTestEnvelope(topic []byte) (types.Envelope, error) {
|
||||
privateKey, err := crypto.GenerateKey()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
params := waku.MessageParams{
|
||||
TTL: 10,
|
||||
PoW: 2.0,
|
||||
Payload: []byte("hello world"),
|
||||
WorkTime: 1,
|
||||
Topic: waku.BytesToTopic(topic),
|
||||
Dst: &privateKey.PublicKey,
|
||||
}
|
||||
message, err := waku.NewSentMessage(¶ms)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
now := time.Now()
|
||||
envelope, err := message.Wrap(¶ms, now)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return gethbridge.NewWakuEnvelope(envelope), nil
|
||||
}
|
|
@ -1,655 +0,0 @@
|
|||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package mailserver
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
"github.com/status-im/status-go/params"
|
||||
waku "github.com/status-im/status-go/waku"
|
||||
wakucommon "github.com/status-im/status-go/waku/common"
|
||||
)
|
||||
|
||||
const powRequirement = 0.00001
|
||||
|
||||
var keyID string
|
||||
var seed = time.Now().Unix()
|
||||
var testPayload = []byte("test payload")
|
||||
|
||||
type ServerTestParams struct {
|
||||
topic types.TopicType
|
||||
birth uint32
|
||||
low uint32
|
||||
upp uint32
|
||||
limit uint32
|
||||
key *ecdsa.PrivateKey
|
||||
}
|
||||
|
||||
func TestMailserverSuite(t *testing.T) {
|
||||
suite.Run(t, new(MailserverSuite))
|
||||
}
|
||||
|
||||
type MailserverSuite struct {
|
||||
suite.Suite
|
||||
server *WakuMailServer
|
||||
shh *waku.Waku
|
||||
config *params.WakuConfig
|
||||
dataDir string
|
||||
}
|
||||
|
||||
func (s *MailserverSuite) SetupTest() {
|
||||
s.server = &WakuMailServer{}
|
||||
s.shh = waku.New(&waku.DefaultConfig, nil)
|
||||
s.shh.RegisterMailServer(s.server)
|
||||
|
||||
tmpDir := s.T().TempDir()
|
||||
s.dataDir = tmpDir
|
||||
|
||||
s.config = ¶ms.WakuConfig{
|
||||
DataDir: tmpDir,
|
||||
MailServerPassword: "testpassword",
|
||||
}
|
||||
}
|
||||
|
||||
func (s *MailserverSuite) TestInit() {
|
||||
testCases := []struct {
|
||||
config params.WakuConfig
|
||||
expectedError error
|
||||
info string
|
||||
}{
|
||||
{
|
||||
config: params.WakuConfig{DataDir: ""},
|
||||
expectedError: errDirectoryNotProvided,
|
||||
info: "config with empty DataDir",
|
||||
},
|
||||
{
|
||||
config: params.WakuConfig{
|
||||
DataDir: s.config.DataDir,
|
||||
MailServerPassword: "pwd",
|
||||
},
|
||||
expectedError: nil,
|
||||
info: "config with correct DataDir and Password",
|
||||
},
|
||||
{
|
||||
config: params.WakuConfig{
|
||||
DataDir: s.config.DataDir,
|
||||
MailServerPassword: "pwd",
|
||||
MailServerRateLimit: 5,
|
||||
},
|
||||
expectedError: nil,
|
||||
info: "config with rate limit",
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
// to satisfy gosec: C601 checks
|
||||
tc := testCase
|
||||
s.T().Run(tc.info, func(*testing.T) {
|
||||
mailServer := &WakuMailServer{}
|
||||
shh := waku.New(&waku.DefaultConfig, nil)
|
||||
shh.RegisterMailServer(mailServer)
|
||||
|
||||
err := mailServer.Init(shh, &tc.config)
|
||||
s.Require().Equal(tc.expectedError, err)
|
||||
if err == nil {
|
||||
defer mailServer.Close()
|
||||
}
|
||||
|
||||
// db should be open only if there was no error
|
||||
if tc.expectedError == nil {
|
||||
s.NotNil(mailServer.ms.db)
|
||||
} else {
|
||||
s.Nil(mailServer.ms)
|
||||
}
|
||||
|
||||
if tc.config.MailServerRateLimit > 0 {
|
||||
s.NotNil(mailServer.ms.rateLimiter)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (s *MailserverSuite) TestArchive() {
|
||||
config := *s.config
|
||||
|
||||
err := s.server.Init(s.shh, &config)
|
||||
s.Require().NoError(err)
|
||||
defer s.server.Close()
|
||||
|
||||
env, err := generateEnvelope(time.Now())
|
||||
s.NoError(err)
|
||||
rawEnvelope, err := rlp.EncodeToBytes(env)
|
||||
s.NoError(err)
|
||||
|
||||
s.server.Archive(env)
|
||||
key := NewDBKey(env.Expiry-env.TTL, types.TopicType(env.Topic), types.Hash(env.Hash()))
|
||||
archivedEnvelope, err := s.server.ms.db.GetEnvelope(key)
|
||||
s.NoError(err)
|
||||
|
||||
s.Equal(rawEnvelope, archivedEnvelope)
|
||||
}
|
||||
|
||||
func (s *MailserverSuite) TestManageLimits() {
|
||||
err := s.server.Init(s.shh, s.config)
|
||||
s.NoError(err)
|
||||
s.server.ms.rateLimiter = newRateLimiter(time.Duration(5) * time.Millisecond)
|
||||
s.False(s.server.ms.exceedsPeerRequests(types.BytesToHash([]byte("peerID"))))
|
||||
s.Equal(1, len(s.server.ms.rateLimiter.db))
|
||||
firstSaved := s.server.ms.rateLimiter.db["peerID"]
|
||||
|
||||
// second call when limit is not accomplished does not store a new limit
|
||||
s.True(s.server.ms.exceedsPeerRequests(types.BytesToHash([]byte("peerID"))))
|
||||
s.Equal(1, len(s.server.ms.rateLimiter.db))
|
||||
s.Equal(firstSaved, s.server.ms.rateLimiter.db["peerID"])
|
||||
}
|
||||
|
||||
func (s *MailserverSuite) TestDBKey() {
|
||||
var h types.Hash
|
||||
var emptyTopic types.TopicType
|
||||
i := uint32(time.Now().Unix())
|
||||
k := NewDBKey(i, emptyTopic, h)
|
||||
s.Equal(len(k.Bytes()), DBKeyLength, "wrong DB key length")
|
||||
s.Equal(byte(i%0x100), k.Bytes()[3], "raw representation should be big endian")
|
||||
s.Equal(byte(i/0x1000000), k.Bytes()[0], "big endian expected")
|
||||
}
|
||||
|
||||
func (s *MailserverSuite) TestRequestPaginationLimit() {
|
||||
s.setupServer(s.server)
|
||||
defer s.server.Close()
|
||||
|
||||
var (
|
||||
sentEnvelopes []*wakucommon.Envelope
|
||||
sentHashes []common.Hash
|
||||
receivedHashes []common.Hash
|
||||
archiveKeys []string
|
||||
)
|
||||
|
||||
now := time.Now()
|
||||
count := uint32(10)
|
||||
|
||||
for i := count; i > 0; i-- {
|
||||
sentTime := now.Add(time.Duration(-i) * time.Second)
|
||||
env, err := generateEnvelope(sentTime)
|
||||
s.NoError(err)
|
||||
s.server.Archive(env)
|
||||
key := NewDBKey(env.Expiry-env.TTL, types.TopicType(env.Topic), types.Hash(env.Hash()))
|
||||
archiveKeys = append(archiveKeys, fmt.Sprintf("%x", key.Cursor()))
|
||||
sentEnvelopes = append(sentEnvelopes, env)
|
||||
sentHashes = append(sentHashes, env.Hash())
|
||||
}
|
||||
|
||||
reqLimit := uint32(6)
|
||||
peerID, request, err := s.prepareRequest(sentEnvelopes, reqLimit)
|
||||
s.NoError(err)
|
||||
payload, err := s.server.decompositeRequest(peerID, request)
|
||||
s.NoError(err)
|
||||
s.Nil(payload.Cursor)
|
||||
s.Equal(reqLimit, payload.Limit)
|
||||
|
||||
receivedHashes, cursor, _ := processRequestAndCollectHashes(s.server, payload)
|
||||
|
||||
// 10 envelopes sent
|
||||
s.Equal(count, uint32(len(sentEnvelopes)))
|
||||
// 6 envelopes received
|
||||
s.Len(receivedHashes, int(payload.Limit))
|
||||
// the 6 envelopes received should be in forward order
|
||||
s.Equal(sentHashes[:payload.Limit], receivedHashes)
|
||||
// cursor should be the key of the last envelope of the last page
|
||||
s.Equal(archiveKeys[payload.Limit-1], fmt.Sprintf("%x", cursor))
|
||||
|
||||
// second page
|
||||
payload.Cursor = cursor
|
||||
receivedHashes, cursor, _ = processRequestAndCollectHashes(s.server, payload)
|
||||
|
||||
// 4 envelopes received
|
||||
s.Equal(int(count-payload.Limit), len(receivedHashes))
|
||||
// cursor is nil because there are no other pages
|
||||
s.Nil(cursor)
|
||||
}
|
||||
|
||||
func (s *MailserverSuite) TestMailServer() {
|
||||
s.setupServer(s.server)
|
||||
defer s.server.Close()
|
||||
|
||||
env, err := generateEnvelope(time.Now())
|
||||
s.NoError(err)
|
||||
|
||||
s.server.Archive(env)
|
||||
|
||||
testCases := []struct {
|
||||
params *ServerTestParams
|
||||
expect bool
|
||||
isOK bool
|
||||
info string
|
||||
}{
|
||||
{
|
||||
params: s.defaultServerParams(env),
|
||||
expect: true,
|
||||
isOK: true,
|
||||
info: "Processing a request where from and to are equal to an existing register, should provide results",
|
||||
},
|
||||
{
|
||||
params: func() *ServerTestParams {
|
||||
params := s.defaultServerParams(env)
|
||||
params.low = params.birth + 1
|
||||
params.upp = params.birth + 1
|
||||
|
||||
return params
|
||||
}(),
|
||||
expect: false,
|
||||
isOK: true,
|
||||
info: "Processing a request where from and to are greater than any existing register, should not provide results",
|
||||
},
|
||||
{
|
||||
params: func() *ServerTestParams {
|
||||
params := s.defaultServerParams(env)
|
||||
params.upp = params.birth + 1
|
||||
params.topic[0] = 0xFF
|
||||
|
||||
return params
|
||||
}(),
|
||||
expect: false,
|
||||
isOK: true,
|
||||
info: "Processing a request where to is greater than any existing register and with a specific topic, should not provide results",
|
||||
},
|
||||
{
|
||||
params: func() *ServerTestParams {
|
||||
params := s.defaultServerParams(env)
|
||||
params.low = params.birth
|
||||
params.upp = params.birth - 1
|
||||
|
||||
return params
|
||||
}(),
|
||||
isOK: false,
|
||||
info: "Processing a request where to is lower than from should fail",
|
||||
},
|
||||
{
|
||||
params: func() *ServerTestParams {
|
||||
params := s.defaultServerParams(env)
|
||||
params.low = 0
|
||||
params.upp = params.birth + 24
|
||||
|
||||
return params
|
||||
}(),
|
||||
isOK: false,
|
||||
info: "Processing a request where difference between from and to is > 24 should fail",
|
||||
},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
// to satisfy gosec: C601 checks
|
||||
tc := testCase
|
||||
s.T().Run(tc.info, func(*testing.T) {
|
||||
request := s.createRequest(tc.params)
|
||||
src := crypto.FromECDSAPub(&tc.params.key.PublicKey)
|
||||
payload, err := s.server.decompositeRequest(src, request)
|
||||
s.Equal(tc.isOK, err == nil)
|
||||
if err == nil {
|
||||
s.Equal(tc.params.low, payload.Lower)
|
||||
s.Equal(tc.params.upp, payload.Upper)
|
||||
s.Equal(tc.params.limit, payload.Limit)
|
||||
s.Equal(types.TopicToBloom(tc.params.topic), payload.Bloom)
|
||||
s.Equal(tc.expect, s.messageExists(env, tc.params.low, tc.params.upp, payload.Bloom, tc.params.limit))
|
||||
|
||||
src[0]++
|
||||
_, err = s.server.decompositeRequest(src, request)
|
||||
s.True(err == nil)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (s *MailserverSuite) TestDecodeRequest() {
|
||||
s.setupServer(s.server)
|
||||
defer s.server.Close()
|
||||
|
||||
payload := MessagesRequestPayload{
|
||||
Lower: 50,
|
||||
Upper: 100,
|
||||
Bloom: []byte{0x01},
|
||||
Topics: [][]byte{},
|
||||
Limit: 10,
|
||||
Cursor: []byte{},
|
||||
Batch: true,
|
||||
}
|
||||
data, err := rlp.EncodeToBytes(payload)
|
||||
s.Require().NoError(err)
|
||||
|
||||
id, err := s.shh.NewKeyPair()
|
||||
s.Require().NoError(err)
|
||||
srcKey, err := s.shh.GetPrivateKey(id)
|
||||
s.Require().NoError(err)
|
||||
|
||||
env := s.createEnvelope(types.TopicType{0x01}, data, srcKey)
|
||||
|
||||
decodedPayload, err := s.server.decodeRequest(nil, env)
|
||||
s.Require().NoError(err)
|
||||
s.Equal(payload, decodedPayload)
|
||||
}
|
||||
|
||||
func (s *MailserverSuite) TestDecodeRequestNoUpper() {
|
||||
s.setupServer(s.server)
|
||||
defer s.server.Close()
|
||||
|
||||
payload := MessagesRequestPayload{
|
||||
Lower: 50,
|
||||
Bloom: []byte{0x01},
|
||||
Limit: 10,
|
||||
Cursor: []byte{},
|
||||
Batch: true,
|
||||
}
|
||||
data, err := rlp.EncodeToBytes(payload)
|
||||
s.Require().NoError(err)
|
||||
|
||||
id, err := s.shh.NewKeyPair()
|
||||
s.Require().NoError(err)
|
||||
srcKey, err := s.shh.GetPrivateKey(id)
|
||||
s.Require().NoError(err)
|
||||
|
||||
env := s.createEnvelope(types.TopicType{0x01}, data, srcKey)
|
||||
|
||||
decodedPayload, err := s.server.decodeRequest(nil, env)
|
||||
s.Require().NoError(err)
|
||||
s.NotEqual(0, decodedPayload.Upper)
|
||||
}
|
||||
|
||||
func (s *MailserverSuite) TestProcessRequestDeadlockHandling() {
|
||||
s.setupServer(s.server)
|
||||
defer s.server.Close()
|
||||
|
||||
var archievedEnvelopes []*wakucommon.Envelope
|
||||
|
||||
now := time.Now()
|
||||
count := uint32(10)
|
||||
|
||||
// Archieve some envelopes.
|
||||
for i := count; i > 0; i-- {
|
||||
sentTime := now.Add(time.Duration(-i) * time.Second)
|
||||
env, err := generateEnvelope(sentTime)
|
||||
s.NoError(err)
|
||||
s.server.Archive(env)
|
||||
archievedEnvelopes = append(archievedEnvelopes, env)
|
||||
}
|
||||
|
||||
// Prepare a request.
|
||||
peerID, request, err := s.prepareRequest(archievedEnvelopes, 5)
|
||||
s.NoError(err)
|
||||
payload, err := s.server.decompositeRequest(peerID, request)
|
||||
s.NoError(err)
|
||||
|
||||
testCases := []struct {
|
||||
Name string
|
||||
Timeout time.Duration
|
||||
Verify func(
|
||||
Iterator,
|
||||
time.Duration, // processRequestInBundles timeout
|
||||
chan []rlp.RawValue,
|
||||
)
|
||||
}{
|
||||
{
|
||||
Name: "finish processing using `done` channel",
|
||||
Timeout: time.Second * 5,
|
||||
Verify: func(
|
||||
iter Iterator,
|
||||
timeout time.Duration,
|
||||
bundles chan []rlp.RawValue,
|
||||
) {
|
||||
done := make(chan struct{})
|
||||
processFinished := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
s.server.ms.processRequestInBundles(iter, payload.Bloom, payload.Topics, int(payload.Limit), timeout, "req-01", bundles, done)
|
||||
close(processFinished)
|
||||
}()
|
||||
go close(done)
|
||||
|
||||
select {
|
||||
case <-processFinished:
|
||||
case <-time.After(time.Second):
|
||||
s.FailNow("waiting for processing finish timed out")
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "finish processing due to timeout",
|
||||
Timeout: time.Second,
|
||||
Verify: func(
|
||||
iter Iterator,
|
||||
timeout time.Duration,
|
||||
bundles chan []rlp.RawValue,
|
||||
) {
|
||||
done := make(chan struct{}) // won't be closed because we test timeout of `processRequestInBundles()`
|
||||
processFinished := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
s.server.ms.processRequestInBundles(iter, payload.Bloom, payload.Topics, int(payload.Limit), time.Second, "req-01", bundles, done)
|
||||
close(processFinished)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-processFinished:
|
||||
case <-time.After(time.Second * 5):
|
||||
s.FailNow("waiting for processing finish timed out")
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
s.T().Run(tc.Name, func(t *testing.T) {
|
||||
iter, err := s.server.ms.createIterator(payload)
|
||||
s.Require().NoError(err)
|
||||
|
||||
defer func() { _ = iter.Release() }()
|
||||
|
||||
// Nothing reads from this unbuffered channel which simulates a situation
|
||||
// when a connection between a peer and mail server was dropped.
|
||||
bundles := make(chan []rlp.RawValue)
|
||||
|
||||
tc.Verify(iter, tc.Timeout, bundles)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (s *MailserverSuite) messageExists(envelope *wakucommon.Envelope, low, upp uint32, bloom []byte, limit uint32) bool {
|
||||
receivedHashes, _, _ := processRequestAndCollectHashes(s.server, MessagesRequestPayload{
|
||||
Lower: low,
|
||||
Upper: upp,
|
||||
Bloom: bloom,
|
||||
Limit: limit,
|
||||
})
|
||||
for _, hash := range receivedHashes {
|
||||
if hash == envelope.Hash() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *MailserverSuite) setupServer(server *WakuMailServer) {
|
||||
const password = "password_for_this_test"
|
||||
|
||||
s.shh = waku.New(&waku.DefaultConfig, nil)
|
||||
s.shh.RegisterMailServer(server)
|
||||
|
||||
err := server.Init(s.shh, ¶ms.WakuConfig{
|
||||
DataDir: s.dataDir,
|
||||
MailServerPassword: password,
|
||||
MinimumPoW: powRequirement,
|
||||
})
|
||||
if err != nil {
|
||||
s.T().Fatal(err)
|
||||
}
|
||||
|
||||
keyID, err = s.shh.AddSymKeyFromPassword(password)
|
||||
if err != nil {
|
||||
s.T().Fatalf("failed to create symmetric key for mail request: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *MailserverSuite) prepareRequest(envelopes []*wakucommon.Envelope, limit uint32) (
|
||||
[]byte, *wakucommon.Envelope, error,
|
||||
) {
|
||||
if len(envelopes) == 0 {
|
||||
return nil, nil, errors.New("envelopes is empty")
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
|
||||
params := s.defaultServerParams(envelopes[0])
|
||||
params.low = uint32(now.Add(time.Duration(-len(envelopes)) * time.Second).Unix())
|
||||
params.upp = uint32(now.Unix())
|
||||
params.limit = limit
|
||||
|
||||
request := s.createRequest(params)
|
||||
peerID := crypto.FromECDSAPub(¶ms.key.PublicKey)
|
||||
|
||||
return peerID, request, nil
|
||||
}
|
||||
|
||||
func (s *MailserverSuite) defaultServerParams(env *wakucommon.Envelope) *ServerTestParams {
|
||||
id, err := s.shh.NewKeyPair()
|
||||
if err != nil {
|
||||
s.T().Fatalf("failed to generate new key pair with seed %d: %s.", seed, err)
|
||||
}
|
||||
testPeerID, err := s.shh.GetPrivateKey(id)
|
||||
if err != nil {
|
||||
s.T().Fatalf("failed to retrieve new key pair with seed %d: %s.", seed, err)
|
||||
}
|
||||
birth := env.Expiry - env.TTL
|
||||
|
||||
return &ServerTestParams{
|
||||
topic: types.TopicType(env.Topic),
|
||||
birth: birth,
|
||||
low: birth - 1,
|
||||
upp: birth + 1,
|
||||
limit: 0,
|
||||
key: testPeerID,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *MailserverSuite) createRequest(p *ServerTestParams) *wakucommon.Envelope {
|
||||
bloom := types.TopicToBloom(p.topic)
|
||||
data := make([]byte, 8)
|
||||
binary.BigEndian.PutUint32(data, p.low)
|
||||
binary.BigEndian.PutUint32(data[4:], p.upp)
|
||||
data = append(data, bloom...)
|
||||
|
||||
if p.limit != 0 {
|
||||
limitData := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(limitData, p.limit)
|
||||
data = append(data, limitData...)
|
||||
}
|
||||
|
||||
return s.createEnvelope(p.topic, data, p.key)
|
||||
}
|
||||
|
||||
func (s *MailserverSuite) createEnvelope(topic types.TopicType, data []byte, srcKey *ecdsa.PrivateKey) *wakucommon.Envelope {
|
||||
key, err := s.shh.GetSymKey(keyID)
|
||||
if err != nil {
|
||||
s.T().Fatalf("failed to retrieve sym key with seed %d: %s.", seed, err)
|
||||
}
|
||||
|
||||
params := &wakucommon.MessageParams{
|
||||
KeySym: key,
|
||||
Topic: wakucommon.TopicType(topic),
|
||||
Payload: data,
|
||||
PoW: powRequirement * 2,
|
||||
WorkTime: 2,
|
||||
Src: srcKey,
|
||||
}
|
||||
|
||||
msg, err := wakucommon.NewSentMessage(params)
|
||||
if err != nil {
|
||||
s.T().Fatalf("failed to create new message with seed %d: %s.", seed, err)
|
||||
}
|
||||
|
||||
env, err := msg.Wrap(params, time.Now())
|
||||
if err != nil {
|
||||
s.T().Fatalf("failed to wrap with seed %d: %s.", seed, err)
|
||||
}
|
||||
return env
|
||||
}
|
||||
|
||||
func generateEnvelopeWithKeys(sentTime time.Time, keySym []byte, keyAsym *ecdsa.PublicKey) (*wakucommon.Envelope, error) {
|
||||
params := &wakucommon.MessageParams{
|
||||
Topic: wakucommon.TopicType{0x1F, 0x7E, 0xA1, 0x7F},
|
||||
Payload: testPayload,
|
||||
PoW: powRequirement,
|
||||
WorkTime: 2,
|
||||
}
|
||||
|
||||
if len(keySym) > 0 {
|
||||
params.KeySym = keySym
|
||||
} else if keyAsym != nil {
|
||||
params.Dst = keyAsym
|
||||
}
|
||||
|
||||
msg, err := wakucommon.NewSentMessage(params)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create new message with seed %d: %s", seed, err)
|
||||
}
|
||||
env, err := msg.Wrap(params, sentTime)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to wrap with seed %d: %s", seed, err)
|
||||
}
|
||||
|
||||
return env, nil
|
||||
}
|
||||
|
||||
func generateEnvelope(sentTime time.Time) (*wakucommon.Envelope, error) {
|
||||
h := crypto.Keccak256Hash([]byte("test sample data"))
|
||||
return generateEnvelopeWithKeys(sentTime, h[:], nil)
|
||||
}
|
||||
|
||||
func processRequestAndCollectHashes(server *WakuMailServer, payload MessagesRequestPayload) ([]common.Hash, []byte, types.Hash) {
|
||||
iter, _ := server.ms.createIterator(payload)
|
||||
defer func() { _ = iter.Release() }()
|
||||
bundles := make(chan []rlp.RawValue, 10)
|
||||
done := make(chan struct{})
|
||||
|
||||
var hashes []common.Hash
|
||||
go func() {
|
||||
for bundle := range bundles {
|
||||
for _, rawEnvelope := range bundle {
|
||||
var env *wakucommon.Envelope
|
||||
if err := rlp.DecodeBytes(rawEnvelope, &env); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
hashes = append(hashes, env.Hash())
|
||||
}
|
||||
}
|
||||
close(done)
|
||||
}()
|
||||
|
||||
cursor, lastHash := server.ms.processRequestInBundles(iter, payload.Bloom, payload.Topics, int(payload.Limit), time.Minute, "req-01", bundles, done)
|
||||
|
||||
<-done
|
||||
|
||||
return hashes, cursor, lastHash
|
||||
}
|
|
@ -1,84 +0,0 @@
|
|||
package mailserver
|
||||
|
||||
import prom "github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
// By default the /metrics endpoint is not available.
|
||||
// It is exposed only if -metrics flag is set.
|
||||
|
||||
var (
|
||||
envelopesCounter = prom.NewCounter(prom.CounterOpts{
|
||||
Name: "mailserver_envelopes_total",
|
||||
Help: "Number of envelopes processed.",
|
||||
})
|
||||
deliveryFailuresCounter = prom.NewCounterVec(prom.CounterOpts{
|
||||
Name: "mailserver_delivery_failures_total",
|
||||
Help: "Number of requests that failed processing.",
|
||||
}, []string{"type"})
|
||||
deliveryAttemptsCounter = prom.NewCounter(prom.CounterOpts{
|
||||
Name: "mailserver_delivery_attempts_total",
|
||||
Help: "Number of Whisper envelopes processed.",
|
||||
})
|
||||
requestsBatchedCounter = prom.NewCounter(prom.CounterOpts{
|
||||
Name: "mailserver_requests_batched_total",
|
||||
Help: "Number of processed batched requests.",
|
||||
})
|
||||
requestsInBundlesDuration = prom.NewHistogram(prom.HistogramOpts{
|
||||
Name: "mailserver_requests_bundle_process_duration_seconds",
|
||||
Help: "The time it took to process message bundles.",
|
||||
})
|
||||
syncFailuresCounter = prom.NewCounterVec(prom.CounterOpts{
|
||||
Name: "mailserver_sync_failures_total",
|
||||
Help: "Number of failures processing a sync requests.",
|
||||
}, []string{"type"})
|
||||
syncAttemptsCounter = prom.NewCounter(prom.CounterOpts{
|
||||
Name: "mailserver_sync_attempts_total",
|
||||
Help: "Number of attempts are processing a sync requests.",
|
||||
})
|
||||
sendRawEnvelopeDuration = prom.NewHistogram(prom.HistogramOpts{
|
||||
Name: "mailserver_send_raw_envelope_duration_seconds",
|
||||
Help: "The time it took to send a Whisper envelope.",
|
||||
})
|
||||
sentEnvelopeBatchSizeMeter = prom.NewHistogram(prom.HistogramOpts{
|
||||
Name: "mailserver_sent_envelope_batch_size_bytes",
|
||||
Help: "Size of processed Whisper envelopes in bytes.",
|
||||
Buckets: prom.ExponentialBuckets(1024, 4, 10),
|
||||
})
|
||||
mailDeliveryDuration = prom.NewHistogram(prom.HistogramOpts{
|
||||
Name: "mailserver_delivery_duration_seconds",
|
||||
Help: "Time it takes to deliver messages to a Whisper peer.",
|
||||
})
|
||||
archivedErrorsCounter = prom.NewCounterVec(prom.CounterOpts{
|
||||
Name: "mailserver_archived_envelopes_failures_total",
|
||||
Help: "Number of failures storing a Whisper envelope.",
|
||||
}, []string{"db"})
|
||||
archivedEnvelopesGauge = prom.NewGaugeVec(prom.GaugeOpts{
|
||||
Name: "mailserver_archived_envelopes_total",
|
||||
Help: "Number of envelopes saved in the DB.",
|
||||
}, []string{"db"})
|
||||
archivedEnvelopeSizeMeter = prom.NewHistogramVec(prom.HistogramOpts{
|
||||
Name: "mailserver_archived_envelope_size_bytes",
|
||||
Help: "Size of envelopes saved.",
|
||||
Buckets: prom.ExponentialBuckets(1024, 2, 11),
|
||||
}, []string{"db"})
|
||||
envelopeQueriesCounter = prom.NewCounterVec(prom.CounterOpts{
|
||||
Name: "mailserver_envelope_queries_total",
|
||||
Help: "Number of queries for envelopes in the DB.",
|
||||
}, []string{"filter", "history"})
|
||||
)
|
||||
|
||||
func init() {
|
||||
prom.MustRegister(envelopesCounter)
|
||||
prom.MustRegister(deliveryFailuresCounter)
|
||||
prom.MustRegister(deliveryAttemptsCounter)
|
||||
prom.MustRegister(requestsBatchedCounter)
|
||||
prom.MustRegister(requestsInBundlesDuration)
|
||||
prom.MustRegister(syncFailuresCounter)
|
||||
prom.MustRegister(syncAttemptsCounter)
|
||||
prom.MustRegister(sendRawEnvelopeDuration)
|
||||
prom.MustRegister(sentEnvelopeBatchSizeMeter)
|
||||
prom.MustRegister(mailDeliveryDuration)
|
||||
prom.MustRegister(archivedErrorsCounter)
|
||||
prom.MustRegister(archivedEnvelopesGauge)
|
||||
prom.MustRegister(archivedEnvelopeSizeMeter)
|
||||
prom.MustRegister(envelopeQueriesCounter)
|
||||
}
|
|
@ -1,319 +0,0 @@
|
|||
// Code generated by go-bindata. DO NOT EDIT.
|
||||
// sources:
|
||||
// 1557732988_initialize_db.down.sql (72B)
|
||||
// 1557732988_initialize_db.up.sql (278B)
|
||||
// static.go (198B)
|
||||
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func bindataRead(data []byte, name string) ([]byte, error) {
|
||||
gz, err := gzip.NewReader(bytes.NewBuffer(data))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read %q: %v", name, err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
_, err = io.Copy(&buf, gz)
|
||||
clErr := gz.Close()
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read %q: %v", name, err)
|
||||
}
|
||||
if clErr != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
type asset struct {
|
||||
bytes []byte
|
||||
info os.FileInfo
|
||||
digest [sha256.Size]byte
|
||||
}
|
||||
|
||||
type bindataFileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
func (fi bindataFileInfo) Name() string {
|
||||
return fi.name
|
||||
}
|
||||
func (fi bindataFileInfo) Size() int64 {
|
||||
return fi.size
|
||||
}
|
||||
func (fi bindataFileInfo) Mode() os.FileMode {
|
||||
return fi.mode
|
||||
}
|
||||
func (fi bindataFileInfo) ModTime() time.Time {
|
||||
return fi.modTime
|
||||
}
|
||||
func (fi bindataFileInfo) IsDir() bool {
|
||||
return false
|
||||
}
|
||||
func (fi bindataFileInfo) Sys() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
var __1557732988_initialize_dbDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x09\xf2\x0f\x50\xf0\xf4\x73\x71\x8d\x50\xc8\x4c\x89\x4f\xca\xc9\xcf\xcf\x8d\xcf\x4c\xa9\xb0\xe6\x42\x95\x28\xc9\x2f\xc8\x4c\x46\x92\x08\x71\x74\xf2\x71\x55\x48\xcd\x2b\x4b\xcd\xc9\x2f\x48\x2d\xb6\xe6\x02\x04\x00\x00\xff\xff\x6b\x93\xaa\x08\x48\x00\x00\x00")
|
||||
|
||||
func _1557732988_initialize_dbDownSqlBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
__1557732988_initialize_dbDownSql,
|
||||
"1557732988_initialize_db.down.sql",
|
||||
)
|
||||
}
|
||||
|
||||
func _1557732988_initialize_dbDownSql() (*asset, error) {
|
||||
bytes, err := _1557732988_initialize_dbDownSqlBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "1557732988_initialize_db.down.sql", size: 72, mode: os.FileMode(0664), modTime: time.Unix(1700000000, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x77, 0x40, 0x78, 0xb7, 0x71, 0x3c, 0x20, 0x3b, 0xc9, 0xb, 0x2f, 0x49, 0xe4, 0xff, 0x1c, 0x84, 0x54, 0xa1, 0x30, 0xe3, 0x90, 0xf8, 0x73, 0xda, 0xb0, 0x2a, 0xea, 0x8e, 0xf1, 0x82, 0xe7, 0xd2}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var __1557732988_initialize_dbUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x0e\x72\x75\x0c\x71\x55\x08\x71\x74\xf2\x71\x55\x48\xcd\x2b\x4b\xcd\xc9\x2f\x48\x2d\x56\xd0\xc8\x4c\x51\x70\x8a\x0c\x71\x75\x54\xf0\xf3\x0f\x51\xf0\x0b\xf5\xf1\x51\x08\xf5\xf3\x0c\x0c\x75\xd5\x51\x48\x49\x2c\x49\x44\x93\xd3\x51\x28\xc9\x2f\xc8\x4c\xc6\x10\x4d\xca\xc9\xcf\xcf\x55\x70\xf2\x0c\xd1\x30\x35\x34\xd2\x84\x4b\x68\x5a\x73\x71\x41\xed\xf5\xf4\x73\x71\x8d\x50\xc8\x4c\x89\x07\x2b\x8d\xcf\x4c\xa9\x50\xf0\xf7\x43\x73\x87\x8b\x6b\xb0\x33\xd4\x2c\x4d\x6b\x0c\x8d\x60\x9b\xf1\x69\x04\x2b\x40\xd7\x88\x5d\x97\x06\x4c\x2d\x20\x00\x00\xff\xff\x0b\x7d\x91\x3e\x16\x01\x00\x00")
|
||||
|
||||
func _1557732988_initialize_dbUpSqlBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
__1557732988_initialize_dbUpSql,
|
||||
"1557732988_initialize_db.up.sql",
|
||||
)
|
||||
}
|
||||
|
||||
func _1557732988_initialize_dbUpSql() (*asset, error) {
|
||||
bytes, err := _1557732988_initialize_dbUpSqlBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "1557732988_initialize_db.up.sql", size: 278, mode: os.FileMode(0664), modTime: time.Unix(1700000000, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf5, 0x85, 0x41, 0x7a, 0xba, 0x4f, 0xa3, 0x43, 0xc0, 0x63, 0xfa, 0x2c, 0xd1, 0xc5, 0xbb, 0x20, 0xa0, 0x64, 0xa8, 0x3b, 0x65, 0x82, 0xa2, 0x14, 0x28, 0x18, 0x7c, 0x8b, 0x3a, 0x7a, 0xfd, 0xe0}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var _staticGo = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x54\xcc\x31\x4a\x06\x41\x0c\xc5\xf1\x7e\x4e\xf1\x4a\x05\x67\xa2\x95\x20\xd8\x8b\x28\x08\x7a\x81\xec\x6e\xc8\x17\xd6\x99\x2c\x99\xe8\xf9\x6d\x56\xc4\xd7\x3d\xfe\xf0\x23\xc2\x1b\xaf\x3b\xab\x60\x26\xa7\xad\x90\xbe\xc8\x36\x7f\xdf\xd5\xf3\xfb\x0d\x9e\x3e\x5e\x5f\xae\x11\x32\xfd\x2b\x56\x99\x08\xd3\x4b\xc2\x46\x3a\xf2\x22\x58\x6c\x70\x98\xcc\x72\xfc\x93\x4a\x21\x52\x7f\x50\x19\x12\x9c\x02\xf5\xba\xd8\xd8\x38\x19\xb5\xfb\x96\xd6\xe5\xf1\xee\xfe\xf6\x1c\xea\xb1\x2b\xba\x69\x70\x9a\x8f\x89\xea\x68\x8d\x5a\xa3\xce\xf6\x39\x25\xbe\x25\xe8\x2f\xd3\x49\x35\x75\xb4\xf2\x13\x00\x00\xff\xff\x9a\xab\xca\x11\xc6\x00\x00\x00")
|
||||
|
||||
func staticGoBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
_staticGo,
|
||||
"static.go",
|
||||
)
|
||||
}
|
||||
|
||||
func staticGo() (*asset, error) {
|
||||
bytes, err := staticGoBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "static.go", size: 198, mode: os.FileMode(0664), modTime: time.Unix(1700000000, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb, 0xd2, 0xfd, 0xbf, 0xe5, 0xff, 0xcb, 0x54, 0xec, 0x41, 0x23, 0x7b, 0xc0, 0xeb, 0x55, 0xb8, 0x69, 0xd7, 0x57, 0xf1, 0x83, 0x13, 0x88, 0x55, 0xd9, 0x73, 0xdc, 0x93, 0xee, 0x23, 0xe3, 0xe9}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// Asset loads and returns the asset for the given name.
|
||||
// It returns an error if the asset could not be found or
|
||||
// could not be loaded.
|
||||
func Asset(name string) ([]byte, error) {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.bytes, nil
|
||||
}
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
|
||||
// AssetString returns the asset contents as a string (instead of a []byte).
|
||||
func AssetString(name string) (string, error) {
|
||||
data, err := Asset(name)
|
||||
return string(data), err
|
||||
}
|
||||
|
||||
// MustAsset is like Asset but panics when Asset would return an error.
|
||||
// It simplifies safe initialization of global variables.
|
||||
func MustAsset(name string) []byte {
|
||||
a, err := Asset(name)
|
||||
if err != nil {
|
||||
panic("asset: Asset(" + name + "): " + err.Error())
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
// MustAssetString is like AssetString but panics when Asset would return an
|
||||
// error. It simplifies safe initialization of global variables.
|
||||
func MustAssetString(name string) string {
|
||||
return string(MustAsset(name))
|
||||
}
|
||||
|
||||
// AssetInfo loads and returns the asset info for the given name.
|
||||
// It returns an error if the asset could not be found or
|
||||
// could not be loaded.
|
||||
func AssetInfo(name string) (os.FileInfo, error) {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.info, nil
|
||||
}
|
||||
return nil, fmt.Errorf("AssetInfo %s not found", name)
|
||||
}
|
||||
|
||||
// AssetDigest returns the digest of the file with the given name. It returns an
|
||||
// error if the asset could not be found or the digest could not be loaded.
|
||||
func AssetDigest(name string) ([sha256.Size]byte, error) {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.digest, nil
|
||||
}
|
||||
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name)
|
||||
}
|
||||
|
||||
// Digests returns a map of all known files and their checksums.
|
||||
func Digests() (map[string][sha256.Size]byte, error) {
|
||||
mp := make(map[string][sha256.Size]byte, len(_bindata))
|
||||
for name := range _bindata {
|
||||
a, err := _bindata[name]()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mp[name] = a.digest
|
||||
}
|
||||
return mp, nil
|
||||
}
|
||||
|
||||
// AssetNames returns the names of the assets.
|
||||
func AssetNames() []string {
|
||||
names := make([]string, 0, len(_bindata))
|
||||
for name := range _bindata {
|
||||
names = append(names, name)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// _bindata is a table, holding each asset generator, mapped to its name.
|
||||
var _bindata = map[string]func() (*asset, error){
|
||||
"1557732988_initialize_db.down.sql": _1557732988_initialize_dbDownSql,
|
||||
|
||||
"1557732988_initialize_db.up.sql": _1557732988_initialize_dbUpSql,
|
||||
|
||||
"static.go": staticGo,
|
||||
}
|
||||
|
||||
// AssetDir returns the file names below a certain
|
||||
// directory embedded in the file by go-bindata.
|
||||
// For example if you run go-bindata on data/... and data contains the
|
||||
// following hierarchy:
|
||||
// data/
|
||||
// foo.txt
|
||||
// img/
|
||||
// a.png
|
||||
// b.png
|
||||
// then AssetDir("data") would return []string{"foo.txt", "img"},
|
||||
// AssetDir("data/img") would return []string{"a.png", "b.png"},
|
||||
// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and
|
||||
// AssetDir("") will return []string{"data"}.
|
||||
func AssetDir(name string) ([]string, error) {
|
||||
node := _bintree
|
||||
if len(name) != 0 {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
pathList := strings.Split(canonicalName, "/")
|
||||
for _, p := range pathList {
|
||||
node = node.Children[p]
|
||||
if node == nil {
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
if node.Func != nil {
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
rv := make([]string, 0, len(node.Children))
|
||||
for childName := range node.Children {
|
||||
rv = append(rv, childName)
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
|
||||
type bintree struct {
|
||||
Func func() (*asset, error)
|
||||
Children map[string]*bintree
|
||||
}
|
||||
|
||||
var _bintree = &bintree{nil, map[string]*bintree{
|
||||
"1557732988_initialize_db.down.sql": &bintree{_1557732988_initialize_dbDownSql, map[string]*bintree{}},
|
||||
"1557732988_initialize_db.up.sql": &bintree{_1557732988_initialize_dbUpSql, map[string]*bintree{}},
|
||||
"static.go": &bintree{staticGo, map[string]*bintree{}},
|
||||
}}
|
||||
|
||||
// RestoreAsset restores an asset under the given directory.
|
||||
func RestoreAsset(dir, name string) error {
|
||||
data, err := Asset(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info, err := AssetInfo(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
|
||||
}
|
||||
|
||||
// RestoreAssets restores an asset under the given directory recursively.
|
||||
func RestoreAssets(dir, name string) error {
|
||||
children, err := AssetDir(name)
|
||||
// File
|
||||
if err != nil {
|
||||
return RestoreAsset(dir, name)
|
||||
}
|
||||
// Dir
|
||||
for _, child := range children {
|
||||
err = RestoreAssets(dir, filepath.Join(name, child))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func _filePath(dir, name string) string {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...)
|
||||
}
|
|
@ -1,51 +0,0 @@
|
|||
package mailserver
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
maxMessagesRequestPayloadLimit = 1000
|
||||
)
|
||||
|
||||
// MessagesRequestPayload is a payload sent to the Mail Server.
|
||||
type MessagesRequestPayload struct {
|
||||
// Lower is a lower bound of time range for which messages are requested.
|
||||
Lower uint32
|
||||
// Upper is a lower bound of time range for which messages are requested.
|
||||
Upper uint32
|
||||
// Bloom is a bloom filter to filter envelopes.
|
||||
Bloom []byte
|
||||
// Topics is a list of topics to filter envelopes.
|
||||
Topics [][]byte
|
||||
// Limit is the max number of envelopes to return.
|
||||
Limit uint32
|
||||
// Cursor is used for pagination of the results.
|
||||
Cursor []byte
|
||||
// Batch set to true indicates that the client supports batched response.
|
||||
Batch bool
|
||||
}
|
||||
|
||||
func (r *MessagesRequestPayload) SetDefaults() {
|
||||
if r.Limit == 0 {
|
||||
r.Limit = maxQueryLimit
|
||||
}
|
||||
|
||||
if r.Upper == 0 {
|
||||
r.Upper = uint32(time.Now().Unix() + whisperTTLSafeThreshold)
|
||||
}
|
||||
}
|
||||
|
||||
func (r MessagesRequestPayload) Validate() error {
|
||||
if r.Upper < r.Lower {
|
||||
return errors.New("query range is invalid: lower > upper")
|
||||
}
|
||||
if len(r.Bloom) == 0 && len(r.Topics) == 0 {
|
||||
return errors.New("bloom filter and topics is empty")
|
||||
}
|
||||
if r.Limit > maxMessagesRequestPayloadLimit {
|
||||
return errors.New("limit exceeds the maximum allowed value")
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -432,7 +432,6 @@ func (n *StatusNode) startDiscovery() error {
|
|||
options := peers.NewDefaultOptions()
|
||||
// TODO(dshulyak) consider adding a flag to define this behaviour
|
||||
options.AllowStop = len(n.config.RegisterTopics) == 0
|
||||
options.TrustedMailServers = parseNodesToNodeID(n.config.ClusterConfig.TrustedMailServers)
|
||||
|
||||
n.peerPool = peers.NewPeerPool(
|
||||
n.discovery,
|
||||
|
|
|
@ -26,7 +26,6 @@ import (
|
|||
"github.com/status-im/status-go/eth-node/crypto"
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
"github.com/status-im/status-go/logutils"
|
||||
"github.com/status-im/status-go/mailserver"
|
||||
"github.com/status-im/status-go/multiaccounts/accounts"
|
||||
"github.com/status-im/status-go/multiaccounts/settings"
|
||||
"github.com/status-im/status-go/params"
|
||||
|
@ -297,13 +296,6 @@ func (b *StatusNode) wakuService(wakuCfg *params.WakuConfig, clusterCfg *params.
|
|||
w.SetTimeSource(timesource.Now)
|
||||
}
|
||||
|
||||
// enable mail service
|
||||
if wakuCfg.EnableMailServer {
|
||||
if err := registerWakuMailServer(w, wakuCfg); err != nil {
|
||||
return nil, fmt.Errorf("failed to register WakuMailServer: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if wakuCfg.LightClient {
|
||||
emptyBloomFilter := make([]byte, 64)
|
||||
if err := w.SetBloomFilter(emptyBloomFilter); err != nil {
|
||||
|
@ -400,10 +392,7 @@ func setSettingsNotifier(db *accounts.Database, feed *event.Feed) {
|
|||
}
|
||||
|
||||
func wakuRateLimiter(wakuCfg *params.WakuConfig, clusterCfg *params.ClusterConfig) *wakucommon.PeerRateLimiter {
|
||||
enodes := append(
|
||||
parseNodes(clusterCfg.StaticNodes),
|
||||
parseNodes(clusterCfg.TrustedMailServers)...,
|
||||
)
|
||||
enodes := parseNodes(clusterCfg.StaticNodes)
|
||||
var (
|
||||
ips []string
|
||||
peerIDs []enode.ID
|
||||
|
@ -606,13 +595,6 @@ func (b *StatusNode) peerService() *peer.Service {
|
|||
return b.peerSrvc
|
||||
}
|
||||
|
||||
func registerWakuMailServer(wakuService *waku.Waku, config *params.WakuConfig) (err error) {
|
||||
var mailServer mailserver.WakuMailServer
|
||||
wakuService.RegisterMailServer(&mailServer)
|
||||
|
||||
return mailServer.Init(wakuService, config)
|
||||
}
|
||||
|
||||
func appendIf(condition bool, services []common.StatusService, service common.StatusService) []common.StatusService {
|
||||
if !condition {
|
||||
return services
|
||||
|
|
|
@ -13,7 +13,6 @@ import (
|
|||
|
||||
const StaticNodes = "static"
|
||||
const BootNodes = "boot"
|
||||
const TrustedMailServers = "trusted_mailserver"
|
||||
const PushNotificationsServers = "pushnotification"
|
||||
const RendezvousNodes = "rendezvous"
|
||||
const DiscV5BootstrapNodes = "discV5boot"
|
||||
|
@ -39,12 +38,12 @@ func insertNodeConfig(tx *sql.Tx, c *params.NodeConfig) error {
|
|||
max_peers, max_pending_peers, enable_status_service, enable_ntp_sync,
|
||||
bridge_enabled, wallet_enabled, local_notifications_enabled,
|
||||
browser_enabled, permissions_enabled, mailservers_enabled,
|
||||
swarm_enabled, mailserver_registry_address, web3provider_enabled, connector_enabled,
|
||||
swarm_enabled, web3provider_enabled, connector_enabled,
|
||||
synthetic_id
|
||||
) VALUES (
|
||||
?, ?, ?, ?, ?, ?, ?, ?, ?, ?,
|
||||
?, ?, ?, ?, ?, ?, ?, ?, ?, ?,
|
||||
?, ?, ?, ?, ?, ?, 'id'
|
||||
?, ?, ?, ?, ?, 'id'
|
||||
)`,
|
||||
c.NetworkID, c.DataDir, c.KeyStoreDir, c.NodeKey, c.NoDiscovery, c.Rendezvous,
|
||||
c.ListenAddr, c.AdvertiseAddr, c.Name, c.Version, c.APIModules,
|
||||
|
@ -52,7 +51,7 @@ func insertNodeConfig(tx *sql.Tx, c *params.NodeConfig) error {
|
|||
c.EnableStatusService, true,
|
||||
c.BridgeConfig.Enabled, c.WalletConfig.Enabled, c.LocalNotificationsConfig.Enabled,
|
||||
c.BrowsersConfig.Enabled, c.PermissionsConfig.Enabled, c.MailserversConfig.Enabled,
|
||||
c.SwarmConfig.Enabled, c.MailServerRegistryAddress, c.Web3ProviderConfig.Enabled,
|
||||
c.SwarmConfig.Enabled, c.Web3ProviderConfig.Enabled,
|
||||
c.ConnectorConfig.Enabled,
|
||||
)
|
||||
return err
|
||||
|
@ -264,12 +263,12 @@ func insertWakuV2ConfigPostMigration(tx *sql.Tx, c *params.NodeConfig) error {
|
|||
func insertWakuV1Config(tx *sql.Tx, c *params.NodeConfig) error {
|
||||
_, err := tx.Exec(`
|
||||
INSERT OR REPLACE INTO waku_config (
|
||||
enabled, light_client, full_node, enable_mailserver, data_dir, minimum_pow, mailserver_password, mailserver_rate_limit, mailserver_data_retention,
|
||||
enabled, light_client, full_node, data_dir, minimum_pow,
|
||||
ttl, max_message_size, enable_rate_limiter, packet_rate_limit_ip, packet_rate_limit_peer_id, bytes_rate_limit_ip, bytes_rate_limit_peer_id,
|
||||
rate_limit_tolerance, bloom_filter_mode, enable_confirmations, synthetic_id
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 'id')`,
|
||||
c.WakuConfig.Enabled, c.WakuConfig.LightClient, c.WakuConfig.FullNode, c.WakuConfig.EnableMailServer, c.WakuConfig.DataDir, c.WakuConfig.MinimumPoW,
|
||||
c.WakuConfig.MailServerPassword, c.WakuConfig.MailServerRateLimit, c.WakuConfig.MailServerDataRetention, c.WakuConfig.TTL, c.WakuConfig.MaxMessageSize,
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 'id')`,
|
||||
c.WakuConfig.Enabled, c.WakuConfig.LightClient, c.WakuConfig.FullNode, c.WakuConfig.DataDir, c.WakuConfig.MinimumPoW,
|
||||
c.WakuConfig.TTL, c.WakuConfig.MaxMessageSize,
|
||||
c.WakuConfig.EnableRateLimiter, c.WakuConfig.PacketRateLimitIP, c.WakuConfig.PacketRateLimitPeerID, c.WakuConfig.BytesRateLimitIP, c.WakuConfig.BytesRateLimitPeerID,
|
||||
c.WakuConfig.RateLimitTolerance, c.WakuConfig.BloomFilterMode, c.WakuConfig.EnableConfirmations,
|
||||
)
|
||||
|
@ -277,10 +276,6 @@ func insertWakuV1Config(tx *sql.Tx, c *params.NodeConfig) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`INSERT OR REPLACE INTO waku_config_db_pg (enabled, uri, synthetic_id) VALUES (?, ?, 'id')`, c.WakuConfig.DatabaseConfig.PGConfig.Enabled, c.WakuConfig.DatabaseConfig.PGConfig.URI); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`DELETE FROM waku_softblacklisted_peers WHERE synthetic_id = 'id'`); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -311,7 +306,6 @@ func insertClusterConfigNodes(tx *sql.Tx, c *params.NodeConfig) error {
|
|||
nodeMap := make(map[string][]string)
|
||||
nodeMap[StaticNodes] = c.ClusterConfig.StaticNodes
|
||||
nodeMap[BootNodes] = c.ClusterConfig.BootNodes
|
||||
nodeMap[TrustedMailServers] = c.ClusterConfig.TrustedMailServers
|
||||
nodeMap[PushNotificationsServers] = c.ClusterConfig.PushNotificationsServers
|
||||
nodeMap[RendezvousNodes] = c.ClusterConfig.RendezvousNodes
|
||||
nodeMap[DiscV5BootstrapNodes] = c.ClusterConfig.DiscV5BootstrapNodes
|
||||
|
@ -445,14 +439,14 @@ func loadNodeConfig(tx *sql.Tx) (*params.NodeConfig, error) {
|
|||
listen_addr, advertise_addr, name, version, api_modules, tls_enabled, max_peers, max_pending_peers,
|
||||
enable_status_service, bridge_enabled, wallet_enabled, local_notifications_enabled,
|
||||
browser_enabled, permissions_enabled, mailservers_enabled, swarm_enabled,
|
||||
mailserver_registry_address, web3provider_enabled, connector_enabled FROM node_config
|
||||
web3provider_enabled, connector_enabled FROM node_config
|
||||
WHERE synthetic_id = 'id'
|
||||
`).Scan(
|
||||
&nodecfg.NetworkID, &nodecfg.DataDir, &nodecfg.KeyStoreDir, &nodecfg.NodeKey, &nodecfg.NoDiscovery, &nodecfg.Rendezvous,
|
||||
&nodecfg.ListenAddr, &nodecfg.AdvertiseAddr, &nodecfg.Name, &nodecfg.Version, &nodecfg.APIModules, &nodecfg.TLSEnabled, &nodecfg.MaxPeers, &nodecfg.MaxPendingPeers,
|
||||
&nodecfg.EnableStatusService, &nodecfg.BridgeConfig.Enabled, &nodecfg.WalletConfig.Enabled, &nodecfg.LocalNotificationsConfig.Enabled,
|
||||
&nodecfg.BrowsersConfig.Enabled, &nodecfg.PermissionsConfig.Enabled, &nodecfg.MailserversConfig.Enabled, &nodecfg.SwarmConfig.Enabled,
|
||||
&nodecfg.MailServerRegistryAddress, &nodecfg.Web3ProviderConfig.Enabled, &nodecfg.ConnectorConfig.Enabled,
|
||||
&nodecfg.Web3ProviderConfig.Enabled, &nodecfg.ConnectorConfig.Enabled,
|
||||
)
|
||||
if err != nil && err != sql.ErrNoRows {
|
||||
return nil, err
|
||||
|
@ -535,7 +529,6 @@ func loadNodeConfig(tx *sql.Tx) (*params.NodeConfig, error) {
|
|||
nodeMap := make(map[string]*[]string)
|
||||
nodeMap[StaticNodes] = &nodecfg.ClusterConfig.StaticNodes
|
||||
nodeMap[BootNodes] = &nodecfg.ClusterConfig.BootNodes
|
||||
nodeMap[TrustedMailServers] = &nodecfg.ClusterConfig.TrustedMailServers
|
||||
nodeMap[PushNotificationsServers] = &nodecfg.ClusterConfig.PushNotificationsServers
|
||||
nodeMap[RendezvousNodes] = &nodecfg.ClusterConfig.RendezvousNodes
|
||||
nodeMap[WakuNodes] = &nodecfg.ClusterConfig.WakuNodes
|
||||
|
@ -709,13 +702,13 @@ func loadNodeConfig(tx *sql.Tx) (*params.NodeConfig, error) {
|
|||
}
|
||||
|
||||
err = tx.QueryRow(`
|
||||
SELECT enabled, light_client, full_node, enable_mailserver, data_dir, minimum_pow, mailserver_password, mailserver_rate_limit, mailserver_data_retention,
|
||||
SELECT enabled, light_client, full_node, data_dir, minimum_pow,
|
||||
ttl, max_message_size, enable_rate_limiter, packet_rate_limit_ip, packet_rate_limit_peer_id, bytes_rate_limit_ip, bytes_rate_limit_peer_id,
|
||||
rate_limit_tolerance, bloom_filter_mode, enable_confirmations
|
||||
FROM waku_config WHERE synthetic_id = 'id'
|
||||
`).Scan(
|
||||
&nodecfg.WakuConfig.Enabled, &nodecfg.WakuConfig.LightClient, &nodecfg.WakuConfig.FullNode, &nodecfg.WakuConfig.EnableMailServer, &nodecfg.WakuConfig.DataDir, &nodecfg.WakuConfig.MinimumPoW,
|
||||
&nodecfg.WakuConfig.MailServerPassword, &nodecfg.WakuConfig.MailServerRateLimit, &nodecfg.WakuConfig.MailServerDataRetention, &nodecfg.WakuConfig.TTL, &nodecfg.WakuConfig.MaxMessageSize,
|
||||
&nodecfg.WakuConfig.Enabled, &nodecfg.WakuConfig.LightClient, &nodecfg.WakuConfig.FullNode, &nodecfg.WakuConfig.DataDir, &nodecfg.WakuConfig.MinimumPoW,
|
||||
&nodecfg.WakuConfig.TTL, &nodecfg.WakuConfig.MaxMessageSize,
|
||||
&nodecfg.WakuConfig.EnableRateLimiter, &nodecfg.WakuConfig.PacketRateLimitIP, &nodecfg.WakuConfig.PacketRateLimitPeerID, &nodecfg.WakuConfig.BytesRateLimitIP, &nodecfg.WakuConfig.BytesRateLimitPeerID,
|
||||
&nodecfg.WakuConfig.RateLimitTolerance, &nodecfg.WakuConfig.BloomFilterMode, &nodecfg.WakuConfig.EnableConfirmations,
|
||||
)
|
||||
|
@ -723,11 +716,6 @@ func loadNodeConfig(tx *sql.Tx) (*params.NodeConfig, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
err = tx.QueryRow("SELECT enabled, uri FROM waku_config_db_pg WHERE synthetic_id = 'id'").Scan(&nodecfg.WakuConfig.DatabaseConfig.PGConfig.Enabled, &nodecfg.WakuConfig.DatabaseConfig.PGConfig.URI)
|
||||
if err != nil && err != sql.ErrNoRows {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rows, err = tx.Query(`SELECT peer_id FROM waku_softblacklisted_peers WHERE synthetic_id = 'id' ORDER BY peer_id ASC`)
|
||||
if err != nil && err != sql.ErrNoRows {
|
||||
return nil, err
|
||||
|
|
|
@ -14,7 +14,6 @@ const (
|
|||
type Cluster struct {
|
||||
StaticNodes []string `json:"staticnodes"`
|
||||
BootNodes []string `json:"bootnodes"`
|
||||
MailServers []string `json:"mailservers"` // list of trusted mail servers
|
||||
RendezvousNodes []string `json:"rendezvousnodes"`
|
||||
}
|
||||
|
||||
|
|
|
@ -80,9 +80,6 @@ type WakuConfig struct {
|
|||
// FullNode should be true if waku should always acta as a full node
|
||||
FullNode bool
|
||||
|
||||
// EnableMailServer is mode when node is capable of delivering expired messages on demand
|
||||
EnableMailServer bool
|
||||
|
||||
// DataDir is the file system folder Waku should use for any data storage needs.
|
||||
// For instance, MailServer will use this directory to store its data.
|
||||
DataDir string
|
||||
|
@ -91,16 +88,6 @@ type WakuConfig struct {
|
|||
// We enforce a minimum as a bland spam prevention mechanism.
|
||||
MinimumPoW float64
|
||||
|
||||
// MailServerPassword for symmetric encryption of waku message history requests.
|
||||
// (if no account file selected, then this password is used for symmetric encryption).
|
||||
MailServerPassword string
|
||||
|
||||
// MailServerRateLimit minimum time between queries to mail server per peer.
|
||||
MailServerRateLimit int
|
||||
|
||||
// MailServerDataRetention is a number of days data should be stored by MailServer.
|
||||
MailServerDataRetention int
|
||||
|
||||
// TTL time to live for messages, in seconds
|
||||
TTL int
|
||||
|
||||
|
@ -108,9 +95,6 @@ type WakuConfig struct {
|
|||
// not only the size of envelopes sent in that packet.
|
||||
MaxMessageSize uint32
|
||||
|
||||
// DatabaseConfig is configuration for which data store we use.
|
||||
DatabaseConfig DatabaseConfig
|
||||
|
||||
// EnableRateLimiter set to true enables IP and peer ID rate limiting.
|
||||
EnableRateLimiter bool
|
||||
|
||||
|
@ -258,9 +242,6 @@ type ClusterConfig struct {
|
|||
// Deprecated: won't be used at all in wakuv2
|
||||
BootNodes []string
|
||||
|
||||
// TrustedMailServers is a list of verified and trusted Mail Server nodes.
|
||||
TrustedMailServers []string
|
||||
|
||||
// PushNotificationsServers is a list of default push notification servers.
|
||||
PushNotificationsServers []string
|
||||
|
||||
|
@ -526,9 +507,6 @@ type NodeConfig struct {
|
|||
// discoverable peers with the discovery limits.
|
||||
RequireTopics map[discv5.Topic]Limits `json:"RequireTopics"`
|
||||
|
||||
// MailServerRegistryAddress is the MailServerRegistry contract address
|
||||
MailServerRegistryAddress string
|
||||
|
||||
// PushNotificationServerConfig is the config for the push notification server
|
||||
PushNotificationServerConfig PushNotificationServerConfig `json:"PushNotificationServerConfig"`
|
||||
|
||||
|
@ -769,13 +747,6 @@ func WithLES() Option {
|
|||
}
|
||||
}
|
||||
|
||||
// WithMailserver enables MailServer.
|
||||
func WithMailserver() Option {
|
||||
return func(c *NodeConfig) error {
|
||||
return loadConfigFromAsset("../config/cli/mailserver-enabled.json", c)
|
||||
}
|
||||
}
|
||||
|
||||
func WithDiscV5BootstrapNodes(nodes []string) Option {
|
||||
return func(c *NodeConfig) error {
|
||||
c.ClusterConfig.DiscV5BootstrapNodes = nodes
|
||||
|
@ -1079,14 +1050,6 @@ func (c *NodeConfig) Validate() error {
|
|||
return fmt.Errorf("both Waku and WakuV2 are enabled and use the same data dir")
|
||||
}
|
||||
|
||||
// Waku's data directory must be relative to the main data directory
|
||||
// if EnableMailServer is true.
|
||||
if c.WakuConfig.Enabled && c.WakuConfig.EnableMailServer {
|
||||
if !strings.HasPrefix(c.WakuConfig.DataDir, c.DataDir) {
|
||||
return fmt.Errorf("WakuConfig.DataDir must start with DataDir fragment")
|
||||
}
|
||||
}
|
||||
|
||||
if !c.NoDiscovery && len(c.ClusterConfig.BootNodes) == 0 {
|
||||
// No point in running discovery if we don't have bootnodes.
|
||||
// In case we do have bootnodes, NoDiscovery should be true.
|
||||
|
|
|
@ -23,7 +23,6 @@ func TestNewNodeConfigWithDefaults(t *testing.T) {
|
|||
params.GoerliNetworkID,
|
||||
params.WithFleet(params.FleetProd),
|
||||
params.WithLES(),
|
||||
params.WithMailserver(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "/some/data/path", c.DataDir)
|
||||
|
@ -31,8 +30,6 @@ func TestNewNodeConfigWithDefaults(t *testing.T) {
|
|||
// assert Whisper
|
||||
assert.Equal(t, true, c.WakuConfig.Enabled)
|
||||
assert.Equal(t, "/some/data/path/waku", c.WakuConfig.DataDir)
|
||||
// assert MailServer
|
||||
assert.Equal(t, false, c.WakuConfig.EnableMailServer)
|
||||
// assert cluster
|
||||
assert.Equal(t, false, c.NoDiscovery)
|
||||
assert.Equal(t, params.FleetProd, c.ClusterConfig.Fleet)
|
||||
|
|
|
@ -55,8 +55,7 @@ var sendEnodeDiscovered = signal.SendEnodeDiscovered
|
|||
// ConfirmAdded calls base TopicPool ConfirmAdded method and sends a signal
|
||||
// confirming the enode has been discovered.
|
||||
func (t *cacheOnlyTopicPool) ConfirmAdded(server *p2p.Server, nodeID enode.ID) {
|
||||
trusted := t.verifier.VerifyNode(context.TODO(), nodeID)
|
||||
if trusted {
|
||||
if t.verifier == nil || t.verifier.VerifyNode(context.TODO(), nodeID) {
|
||||
// add to cache only if trusted
|
||||
t.TopicPool.ConfirmAdded(server, nodeID)
|
||||
sendEnodeDiscovered(nodeID.String(), string(t.topic))
|
||||
|
|
|
@ -14,7 +14,6 @@ import (
|
|||
|
||||
"github.com/status-im/status-go/discovery"
|
||||
"github.com/status-im/status-go/params"
|
||||
"github.com/status-im/status-go/peers/verifier"
|
||||
"github.com/status-im/status-go/signal"
|
||||
)
|
||||
|
||||
|
@ -55,8 +54,6 @@ type Options struct {
|
|||
// TopicStopSearchDelay time stopSearch will be waiting for max cached peers to be
|
||||
// filled before really stopping the search.
|
||||
TopicStopSearchDelay time.Duration
|
||||
// TrustedMailServers is a list of trusted nodes.
|
||||
TrustedMailServers []enode.ID
|
||||
}
|
||||
|
||||
// NewDefaultOptions returns a struct with default Options.
|
||||
|
@ -155,11 +152,7 @@ func (p *PeerPool) Start(server *p2p.Server) error {
|
|||
var topicPool TopicPoolInterface
|
||||
t := newTopicPool(p.discovery, topic, limits, p.opts.SlowSync, p.opts.FastSync, p.cache)
|
||||
if topic == MailServerDiscoveryTopic {
|
||||
v, err := p.initVerifier()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
topicPool = newCacheOnlyTopicPool(t, v)
|
||||
topicPool = newCacheOnlyTopicPool(t, nil)
|
||||
} else {
|
||||
topicPool = t
|
||||
}
|
||||
|
@ -175,10 +168,6 @@ func (p *PeerPool) Start(server *p2p.Server) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *PeerPool) initVerifier() (v Verifier, err error) {
|
||||
return verifier.NewLocalVerifier(p.opts.TrustedMailServers), nil
|
||||
}
|
||||
|
||||
func (p *PeerPool) startDiscovery() error {
|
||||
if p.discovery.Running() {
|
||||
return nil
|
||||
|
|
|
@ -128,7 +128,7 @@ func (s *PeerPoolSimulationSuite) TestPeerPoolCacheEthV5() {
|
|||
config := map[discv5.Topic]params.Limits{
|
||||
topic: params.NewLimits(1, 1),
|
||||
}
|
||||
peerPoolOpts := &Options{100 * time.Millisecond, 100 * time.Millisecond, 0, true, 100 * time.Millisecond, nil}
|
||||
peerPoolOpts := &Options{100 * time.Millisecond, 100 * time.Millisecond, 0, true, 100 * time.Millisecond}
|
||||
cache, err := newInMemoryCache()
|
||||
s.Require().NoError(err)
|
||||
peerPool := NewPeerPool(s.discovery[1], config, cache, peerPoolOpts)
|
||||
|
@ -177,7 +177,7 @@ func TestPeerPoolMaxPeersOverflow(t *testing.T) {
|
|||
defer func() { assert.NoError(t, discovery.Stop()) }()
|
||||
require.True(t, discovery.Running())
|
||||
|
||||
poolOpts := &Options{DefaultFastSync, DefaultSlowSync, 0, true, 100 * time.Millisecond, nil}
|
||||
poolOpts := &Options{DefaultFastSync, DefaultSlowSync, 0, true, 100 * time.Millisecond}
|
||||
pool := NewPeerPool(discovery, nil, nil, poolOpts)
|
||||
require.NoError(t, pool.Start(peer))
|
||||
require.Equal(t, signal.EventDiscoveryStarted, <-signals)
|
||||
|
@ -230,7 +230,7 @@ func TestPeerPoolDiscV5Timeout(t *testing.T) {
|
|||
require.True(t, discovery.Running())
|
||||
|
||||
// start PeerPool
|
||||
poolOpts := &Options{DefaultFastSync, DefaultSlowSync, time.Millisecond * 100, true, 100 * time.Millisecond, nil}
|
||||
poolOpts := &Options{DefaultFastSync, DefaultSlowSync, time.Millisecond * 100, true, 100 * time.Millisecond}
|
||||
pool := NewPeerPool(discovery, nil, nil, poolOpts)
|
||||
require.NoError(t, pool.Start(server))
|
||||
require.Equal(t, signal.EventDiscoveryStarted, <-signals)
|
||||
|
@ -277,7 +277,7 @@ func TestPeerPoolNotAllowedStopping(t *testing.T) {
|
|||
require.True(t, discovery.Running())
|
||||
|
||||
// start PeerPool
|
||||
poolOpts := &Options{DefaultFastSync, DefaultSlowSync, time.Millisecond * 100, false, 100 * time.Millisecond, nil}
|
||||
poolOpts := &Options{DefaultFastSync, DefaultSlowSync, time.Millisecond * 100, false, 100 * time.Millisecond}
|
||||
pool := NewPeerPool(discovery, nil, nil, poolOpts)
|
||||
require.NoError(t, pool.Start(server))
|
||||
|
||||
|
@ -294,7 +294,7 @@ func (s *PeerPoolSimulationSuite) TestUpdateTopicLimits() {
|
|||
config := map[discv5.Topic]params.Limits{
|
||||
topic: params.NewLimits(1, 1),
|
||||
}
|
||||
peerPoolOpts := &Options{100 * time.Millisecond, 100 * time.Millisecond, 0, true, 100 * time.Millisecond, nil}
|
||||
peerPoolOpts := &Options{100 * time.Millisecond, 100 * time.Millisecond, 0, true, 100 * time.Millisecond}
|
||||
cache, err := newInMemoryCache()
|
||||
s.Require().NoError(err)
|
||||
peerPool := NewPeerPool(s.discovery[1], config, cache, peerPoolOpts)
|
||||
|
@ -374,7 +374,6 @@ func (s *PeerPoolSimulationSuite) TestMailServerPeersDiscovery() {
|
|||
0,
|
||||
true,
|
||||
100 * time.Millisecond,
|
||||
[]enode.ID{s.peers[0].Self().ID()},
|
||||
}
|
||||
peerPool := NewPeerPool(s.discovery[1], config, cache, peerPoolOpts)
|
||||
s.Require().NoError(peerPool.Start(s.peers[1]))
|
||||
|
|
|
@ -64,7 +64,6 @@ import (
|
|||
"github.com/status-im/status-go/server"
|
||||
"github.com/status-im/status-go/services/browsers"
|
||||
ensservice "github.com/status-im/status-go/services/ens"
|
||||
"github.com/status-im/status-go/services/ext/mailservers"
|
||||
localnotifications "github.com/status-im/status-go/services/local-notifications"
|
||||
mailserversDB "github.com/status-im/status-go/services/mailservers"
|
||||
"github.com/status-im/status-go/services/wallet"
|
||||
|
@ -98,12 +97,9 @@ var messageCacheIntervalMs uint64 = 1000 * 60 * 60 * 48
|
|||
// layers.
|
||||
// It needs to expose an interface to manage installations
|
||||
// because installations are managed by the user.
|
||||
// Similarly, it needs to expose an interface to manage
|
||||
// mailservers because they can also be managed by the user.
|
||||
type Messenger struct {
|
||||
node types.Node
|
||||
server *p2p.Server
|
||||
peerStore *mailservers.PeerStore
|
||||
config *config
|
||||
identity *ecdsa.PrivateKey
|
||||
persistence *sqlitePersistence
|
||||
|
@ -291,24 +287,11 @@ func (interceptor EnvelopeEventsInterceptor) EnvelopeExpired(identifiers [][]byt
|
|||
interceptor.EnvelopeEventsHandler.EnvelopeExpired(identifiers, err)
|
||||
}
|
||||
|
||||
// MailServerRequestCompleted triggered when the mailserver sends a message to notify that the request has been completed
|
||||
func (interceptor EnvelopeEventsInterceptor) MailServerRequestCompleted(requestID types.Hash, lastEnvelopeHash types.Hash, cursor []byte, err error) {
|
||||
//we don't track mailserver requests in Messenger, so just redirect to handler
|
||||
interceptor.EnvelopeEventsHandler.MailServerRequestCompleted(requestID, lastEnvelopeHash, cursor, err)
|
||||
}
|
||||
|
||||
// MailServerRequestExpired triggered when the mailserver request expires
|
||||
func (interceptor EnvelopeEventsInterceptor) MailServerRequestExpired(hash types.Hash) {
|
||||
//we don't track mailserver requests in Messenger, so just redirect to handler
|
||||
interceptor.EnvelopeEventsHandler.MailServerRequestExpired(hash)
|
||||
}
|
||||
|
||||
func NewMessenger(
|
||||
nodeName string,
|
||||
identity *ecdsa.PrivateKey,
|
||||
node types.Node,
|
||||
installationID string,
|
||||
peerStore *mailservers.PeerStore,
|
||||
version string,
|
||||
opts ...Option,
|
||||
) (*Messenger, error) {
|
||||
|
@ -594,7 +577,6 @@ func NewMessenger(
|
|||
peersyncing: peersyncing.New(peersyncing.Config{Database: database, Timesource: transp}),
|
||||
peersyncingOffers: make(map[string]uint64),
|
||||
peersyncingRequests: make(map[string]uint64),
|
||||
peerStore: peerStore,
|
||||
mvdsStatusChangeEvent: make(chan datasyncnode.PeerStatusChangeEvent, 5),
|
||||
verificationDatabase: verification.NewPersistence(database),
|
||||
mailserverCycle: mailserverCycle{
|
||||
|
|
|
@ -91,7 +91,6 @@ func newTestMessenger(waku types.Waku, config testMessengerConfig) (*Messenger,
|
|||
config.privateKey,
|
||||
&testNode{shh: waku},
|
||||
uuid.New().String(),
|
||||
nil,
|
||||
"testVersion",
|
||||
options...,
|
||||
)
|
||||
|
|
|
@ -35,16 +35,6 @@ func (h EnvelopeSignalHandlerMock) EnvelopeExpired(identifiers [][]byte, err err
|
|||
signal.SendEnvelopeExpired(identifiers, err)
|
||||
}
|
||||
|
||||
// MailServerRequestCompleted triggered when the mailserver sends a message to notify that the request has been completed
|
||||
func (h EnvelopeSignalHandlerMock) MailServerRequestCompleted(requestID types.Hash, lastEnvelopeHash types.Hash, cursor []byte, err error) {
|
||||
signal.SendMailServerRequestCompleted(requestID, lastEnvelopeHash, cursor, err)
|
||||
}
|
||||
|
||||
// MailServerRequestExpired triggered when the mailserver request expires
|
||||
func (h EnvelopeSignalHandlerMock) MailServerRequestExpired(hash types.Hash) {
|
||||
signal.SendMailServerRequestExpired(hash)
|
||||
}
|
||||
|
||||
type EnvelopeEventsInterceptorMock struct {
|
||||
EnvelopeEventsInterceptor
|
||||
|
||||
|
@ -127,7 +117,6 @@ func (s *MessengerMessagesTrackingSuite) newMessenger(waku types.Waku, logger *z
|
|||
EnvelopeEventsHandler: EnvelopeSignalHandlerMock{},
|
||||
MaxAttempts: 1,
|
||||
AwaitOnlyMailServerConfirmations: false,
|
||||
IsMailserver: func(peer types.EnodeID) bool { return false },
|
||||
Logger: s.logger,
|
||||
}
|
||||
|
||||
|
|
|
@ -28,7 +28,6 @@ type EnvelopesMonitorConfig struct {
|
|||
EnvelopeEventsHandler EnvelopeEventsHandler
|
||||
MaxAttempts int
|
||||
AwaitOnlyMailServerConfirmations bool
|
||||
IsMailserver func(types.EnodeID) bool
|
||||
Logger *zap.Logger
|
||||
}
|
||||
|
||||
|
@ -36,8 +35,6 @@ type EnvelopesMonitorConfig struct {
|
|||
type EnvelopeEventsHandler interface {
|
||||
EnvelopeSent([][]byte)
|
||||
EnvelopeExpired([][]byte, error)
|
||||
MailServerRequestCompleted(types.Hash, types.Hash, []byte, error)
|
||||
MailServerRequestExpired(types.Hash)
|
||||
}
|
||||
|
||||
// NewEnvelopesMonitor returns a pointer to an instance of the EnvelopesMonitor.
|
||||
|
@ -59,7 +56,6 @@ func NewEnvelopesMonitor(w types.Waku, config EnvelopesMonitorConfig) *Envelopes
|
|||
handler: config.EnvelopeEventsHandler,
|
||||
awaitOnlyMailServerConfirmations: config.AwaitOnlyMailServerConfirmations,
|
||||
maxAttempts: config.MaxAttempts,
|
||||
isMailserver: config.IsMailserver,
|
||||
logger: logger.With(zap.Namespace("EnvelopesMonitor")),
|
||||
|
||||
// key is envelope hash (event.Hash)
|
||||
|
@ -200,42 +196,46 @@ func (m *EnvelopesMonitor) handleEvent(event types.EnvelopeEvent) {
|
|||
|
||||
func (m *EnvelopesMonitor) handleEventEnvelopeSent(event types.EnvelopeEvent) {
|
||||
// Mailserver confirmations for WakuV2 are disabled
|
||||
if (m.w == nil || m.w.Version() < 2) && m.awaitOnlyMailServerConfirmations {
|
||||
if !m.isMailserver(event.Peer) {
|
||||
// Perhaps code might be reused?
|
||||
|
||||
/*
|
||||
if (m.w == nil || m.w.Version() < 2) && m.awaitOnlyMailServerConfirmations {
|
||||
if !m.isMailserver(event.Peer) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
confirmationExpected := event.Batch != (types.Hash{})
|
||||
|
||||
envelope, ok := m.envelopes[event.Hash]
|
||||
|
||||
// If confirmations are not expected, we keep track of the envelope
|
||||
// being sent
|
||||
if !ok && !confirmationExpected {
|
||||
m.envelopes[event.Hash] = &monitoredEnvelope{envelopeHashID: event.Hash, state: EnvelopeSent}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
confirmationExpected := event.Batch != (types.Hash{})
|
||||
|
||||
envelope, ok := m.envelopes[event.Hash]
|
||||
|
||||
// If confirmations are not expected, we keep track of the envelope
|
||||
// being sent
|
||||
if !ok && !confirmationExpected {
|
||||
m.envelopes[event.Hash] = &monitoredEnvelope{envelopeHashID: event.Hash, state: EnvelopeSent}
|
||||
return
|
||||
}
|
||||
|
||||
// if message was already confirmed - skip it
|
||||
if envelope.state == EnvelopeSent {
|
||||
return
|
||||
}
|
||||
m.logger.Debug("envelope is sent", zap.String("hash", event.Hash.String()), zap.String("peer", event.Peer.String()))
|
||||
if confirmationExpected {
|
||||
if _, ok := m.batches[event.Batch]; !ok {
|
||||
m.batches[event.Batch] = map[types.Hash]struct{}{}
|
||||
// if message was already confirmed - skip it
|
||||
if envelope.state == EnvelopeSent {
|
||||
return
|
||||
}
|
||||
m.batches[event.Batch][event.Hash] = struct{}{}
|
||||
m.logger.Debug("waiting for a confirmation", zap.String("batch", event.Batch.String()))
|
||||
} else {
|
||||
m.logger.Debug("confirmation not expected, marking as sent")
|
||||
envelope.state = EnvelopeSent
|
||||
m.processMessageIDs(envelope.messageIDs)
|
||||
}
|
||||
m.logger.Debug("envelope is sent", zap.String("hash", event.Hash.String()), zap.String("peer", event.Peer.String()))
|
||||
if confirmationExpected {
|
||||
if _, ok := m.batches[event.Batch]; !ok {
|
||||
m.batches[event.Batch] = map[types.Hash]struct{}{}
|
||||
}
|
||||
m.batches[event.Batch][event.Hash] = struct{}{}
|
||||
m.logger.Debug("waiting for a confirmation", zap.String("batch", event.Batch.String()))
|
||||
} else {
|
||||
m.logger.Debug("confirmation not expected, marking as sent")
|
||||
envelope.state = EnvelopeSent
|
||||
m.processMessageIDs(envelope.messageIDs)
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
func (m *EnvelopesMonitor) handleAcknowledgedBatch(event types.EnvelopeEvent) {
|
||||
|
|
|
@ -55,7 +55,6 @@ func (s *EnvelopesMonitorSuite) SetupTest() {
|
|||
EnvelopeEventsHandler: s.eventsHandlerMock,
|
||||
MaxAttempts: 6,
|
||||
AwaitOnlyMailServerConfirmations: false,
|
||||
IsMailserver: func(types.EnodeID) bool { return false },
|
||||
Logger: zap.NewNop(),
|
||||
},
|
||||
)
|
||||
|
|
|
@ -3,9 +3,7 @@ package ext
|
|||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
|
@ -20,14 +18,12 @@ import (
|
|||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
|
||||
ethcommon "github.com/ethereum/go-ethereum/common"
|
||||
|
||||
"github.com/status-im/status-go/eth-node/crypto"
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
"github.com/status-im/status-go/images"
|
||||
"github.com/status-im/status-go/mailserver"
|
||||
multiaccountscommon "github.com/status-im/status-go/multiaccounts/common"
|
||||
"github.com/status-im/status-go/multiaccounts/settings"
|
||||
"github.com/status-im/status-go/protocol"
|
||||
|
@ -44,12 +40,6 @@ import (
|
|||
"github.com/status-im/status-go/protocol/transport"
|
||||
"github.com/status-im/status-go/protocol/urls"
|
||||
"github.com/status-im/status-go/protocol/verification"
|
||||
"github.com/status-im/status-go/services/ext/mailservers"
|
||||
)
|
||||
|
||||
const (
|
||||
// defaultRequestTimeout is the default request timeout in seconds
|
||||
defaultRequestTimeout = 10
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -69,73 +59,11 @@ var (
|
|||
// PAYLOADS
|
||||
// -----
|
||||
|
||||
// MessagesRequest is a RequestMessages() request payload.
|
||||
type MessagesRequest struct {
|
||||
// MailServerPeer is MailServer's enode address.
|
||||
MailServerPeer string `json:"mailServerPeer"`
|
||||
|
||||
// From is a lower bound of time range (optional).
|
||||
// Default is 24 hours back from now.
|
||||
From uint32 `json:"from"`
|
||||
|
||||
// To is a upper bound of time range (optional).
|
||||
// Default is now.
|
||||
To uint32 `json:"to"`
|
||||
|
||||
// Limit determines the number of messages sent by the mail server
|
||||
// for the current paginated request
|
||||
Limit uint32 `json:"limit"`
|
||||
|
||||
// Cursor is used as starting point for paginated requests
|
||||
Cursor string `json:"cursor"`
|
||||
|
||||
// StoreCursor is used as starting point for WAKUV2 paginatedRequests
|
||||
StoreCursor *StoreRequestCursor `json:"storeCursor"`
|
||||
|
||||
// Topic is a regular Whisper topic.
|
||||
// DEPRECATED
|
||||
Topic types.TopicType `json:"topic"`
|
||||
|
||||
// Topics is a list of Whisper topics.
|
||||
Topics []types.TopicType `json:"topics"`
|
||||
|
||||
// SymKeyID is an ID of a symmetric key to authenticate to MailServer.
|
||||
// It's derived from MailServer password.
|
||||
SymKeyID string `json:"symKeyID"`
|
||||
|
||||
// Timeout is the time to live of the request specified in seconds.
|
||||
// Default is 10 seconds
|
||||
Timeout time.Duration `json:"timeout"`
|
||||
|
||||
// Force ensures that requests will bypass enforced delay.
|
||||
Force bool `json:"force"`
|
||||
}
|
||||
|
||||
type StoreRequestCursor struct {
|
||||
Digest []byte `json:"digest"`
|
||||
ReceivedTime float64 `json:"receivedTime"`
|
||||
}
|
||||
|
||||
func (r *MessagesRequest) SetDefaults(now time.Time) {
|
||||
// set From and To defaults
|
||||
if r.To == 0 {
|
||||
r.To = uint32(now.UTC().Unix())
|
||||
}
|
||||
|
||||
if r.From == 0 {
|
||||
oneDay := uint32(86400) // -24 hours
|
||||
if r.To < oneDay {
|
||||
r.From = 0
|
||||
} else {
|
||||
r.From = r.To - oneDay
|
||||
}
|
||||
}
|
||||
|
||||
if r.Timeout == 0 {
|
||||
r.Timeout = defaultRequestTimeout
|
||||
}
|
||||
}
|
||||
|
||||
// MessagesResponse is a response for requestMessages2 method.
|
||||
type MessagesResponse struct {
|
||||
// Cursor from the response can be used to retrieve more messages
|
||||
|
@ -153,17 +81,15 @@ type MessagesResponse struct {
|
|||
|
||||
// PublicAPI extends whisper public API.
|
||||
type PublicAPI struct {
|
||||
service *Service
|
||||
eventSub mailservers.EnvelopeEventSubscriber
|
||||
log log.Logger
|
||||
service *Service
|
||||
log log.Logger
|
||||
}
|
||||
|
||||
// NewPublicAPI returns instance of the public API.
|
||||
func NewPublicAPI(s *Service, eventSub mailservers.EnvelopeEventSubscriber) *PublicAPI {
|
||||
func NewPublicAPI(s *Service) *PublicAPI {
|
||||
return &PublicAPI{
|
||||
service: s,
|
||||
eventSub: eventSub,
|
||||
log: log.New("package", "status-go/services/sshext.PublicAPI"),
|
||||
service: s,
|
||||
log: log.New("package", "status-go/services/sshext.PublicAPI"),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -175,33 +101,6 @@ type RetryConfig struct {
|
|||
MaxRetries int
|
||||
}
|
||||
|
||||
func WaitForExpiredOrCompleted(requestID types.Hash, events chan types.EnvelopeEvent, timeout time.Duration) (*types.MailServerResponse, error) {
|
||||
expired := fmt.Errorf("request %x expired", requestID)
|
||||
after := time.NewTimer(timeout)
|
||||
defer after.Stop()
|
||||
for {
|
||||
var ev types.EnvelopeEvent
|
||||
select {
|
||||
case ev = <-events:
|
||||
case <-after.C:
|
||||
return nil, expired
|
||||
}
|
||||
if ev.Hash != requestID {
|
||||
continue
|
||||
}
|
||||
switch ev.Event {
|
||||
case types.EventMailServerRequestCompleted:
|
||||
data, ok := ev.Data.(*types.MailServerResponse)
|
||||
if ok {
|
||||
return data, nil
|
||||
}
|
||||
return nil, errors.New("invalid event data type")
|
||||
case types.EventMailServerRequestExpired:
|
||||
return nil, expired
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type Author struct {
|
||||
PublicKey types.HexBytes `json:"publicKey"`
|
||||
Alias string `json:"alias"`
|
||||
|
@ -1862,57 +1761,6 @@ func (api *PublicAPI) DeleteCommunityMemberMessages(request *requests.DeleteComm
|
|||
return api.service.messenger.DeleteCommunityMemberMessages(request)
|
||||
}
|
||||
|
||||
// -----
|
||||
// HELPER
|
||||
// -----
|
||||
|
||||
// MakeMessagesRequestPayload makes a specific payload for MailServer
|
||||
// to request historic messages.
|
||||
// DEPRECATED
|
||||
func MakeMessagesRequestPayload(r MessagesRequest) ([]byte, error) {
|
||||
cursor, err := hex.DecodeString(r.Cursor)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid cursor: %v", err)
|
||||
}
|
||||
|
||||
if len(cursor) > 0 && len(cursor) != mailserver.CursorLength {
|
||||
return nil, fmt.Errorf("invalid cursor size: expected %d but got %d", mailserver.CursorLength, len(cursor))
|
||||
}
|
||||
|
||||
payload := mailserver.MessagesRequestPayload{
|
||||
Lower: r.From,
|
||||
Upper: r.To,
|
||||
// We need to pass bloom filter for
|
||||
// backward compatibility
|
||||
Bloom: createBloomFilter(r),
|
||||
Topics: topicsToByteArray(r.Topics),
|
||||
Limit: r.Limit,
|
||||
Cursor: cursor,
|
||||
// Client must tell the MailServer if it supports batch responses.
|
||||
// This can be removed in the future.
|
||||
Batch: true,
|
||||
}
|
||||
|
||||
return rlp.EncodeToBytes(payload)
|
||||
}
|
||||
|
||||
func topicsToByteArray(topics []types.TopicType) [][]byte {
|
||||
|
||||
var response [][]byte
|
||||
for idx := range topics {
|
||||
response = append(response, topics[idx][:])
|
||||
}
|
||||
|
||||
return response
|
||||
}
|
||||
|
||||
func createBloomFilter(r MessagesRequest) []byte {
|
||||
if len(r.Topics) > 0 {
|
||||
return topicsToBloom(r.Topics...)
|
||||
}
|
||||
return types.TopicToBloom(r.Topic)
|
||||
}
|
||||
|
||||
func topicsToBloom(topics ...types.TopicType) []byte {
|
||||
i := new(big.Int)
|
||||
for _, topic := range topics {
|
||||
|
|
|
@ -1,102 +1,13 @@
|
|||
package ext
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
|
||||
"github.com/status-im/status-go/mailserver"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestMessagesRequest_setDefaults(t *testing.T) {
|
||||
daysAgo := func(now time.Time, days int) uint32 {
|
||||
return uint32(now.UTC().Add(-24 * time.Hour * time.Duration(days)).Unix())
|
||||
}
|
||||
|
||||
tnow := time.Now()
|
||||
now := uint32(tnow.UTC().Unix())
|
||||
yesterday := daysAgo(tnow, 1)
|
||||
|
||||
scenarios := []struct {
|
||||
given *MessagesRequest
|
||||
expected *MessagesRequest
|
||||
}{
|
||||
{
|
||||
&MessagesRequest{From: 0, To: 0},
|
||||
&MessagesRequest{From: yesterday, To: now, Timeout: defaultRequestTimeout},
|
||||
},
|
||||
{
|
||||
&MessagesRequest{From: 1, To: 0},
|
||||
&MessagesRequest{From: uint32(1), To: now, Timeout: defaultRequestTimeout},
|
||||
},
|
||||
{
|
||||
&MessagesRequest{From: 0, To: yesterday},
|
||||
&MessagesRequest{From: daysAgo(tnow, 2), To: yesterday, Timeout: defaultRequestTimeout},
|
||||
},
|
||||
// 100 - 1 day would be invalid, so we set From to 0
|
||||
{
|
||||
&MessagesRequest{From: 0, To: 100},
|
||||
&MessagesRequest{From: 0, To: 100, Timeout: defaultRequestTimeout},
|
||||
},
|
||||
// set Timeout
|
||||
{
|
||||
&MessagesRequest{From: 0, To: 0, Timeout: 100},
|
||||
&MessagesRequest{From: yesterday, To: now, Timeout: 100},
|
||||
},
|
||||
}
|
||||
|
||||
for i, s := range scenarios {
|
||||
t.Run(fmt.Sprintf("Scenario %d", i), func(t *testing.T) {
|
||||
s.given.SetDefaults(tnow)
|
||||
require.Equal(t, s.expected, s.given)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMakeMessagesRequestPayload(t *testing.T) {
|
||||
var emptyTopic types.TopicType
|
||||
testCases := []struct {
|
||||
Name string
|
||||
Req MessagesRequest
|
||||
Err string
|
||||
}{
|
||||
{
|
||||
Name: "empty cursor",
|
||||
Req: MessagesRequest{Cursor: ""},
|
||||
Err: "",
|
||||
},
|
||||
{
|
||||
Name: "invalid cursor size",
|
||||
Req: MessagesRequest{Cursor: hex.EncodeToString([]byte{0x01, 0x02, 0x03})},
|
||||
Err: fmt.Sprintf("invalid cursor size: expected %d but got 3", mailserver.CursorLength),
|
||||
},
|
||||
{
|
||||
Name: "valid cursor",
|
||||
Req: MessagesRequest{
|
||||
Cursor: hex.EncodeToString(mailserver.NewDBKey(123, emptyTopic, types.Hash{}).Cursor()),
|
||||
},
|
||||
Err: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
_, err := MakeMessagesRequestPayload(tc.Req)
|
||||
if tc.Err == "" {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.EqualError(t, err, tc.Err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTopicsToBloom(t *testing.T) {
|
||||
t1 := stringToTopic("t1")
|
||||
b1 := types.TopicToBloom(t1)
|
||||
|
@ -121,36 +32,6 @@ func TestTopicsToBloom(t *testing.T) {
|
|||
assert.True(t, types.BloomFilterMatch(reqBloom, b3))
|
||||
}
|
||||
|
||||
func TestCreateBloomFilter(t *testing.T) {
|
||||
t1 := stringToTopic("t1")
|
||||
t2 := stringToTopic("t2")
|
||||
|
||||
req := MessagesRequest{Topic: t1}
|
||||
bloom := createBloomFilter(req)
|
||||
assert.Equal(t, topicsToBloom(t1), bloom)
|
||||
|
||||
req = MessagesRequest{Topics: []types.TopicType{t1, t2}}
|
||||
bloom = createBloomFilter(req)
|
||||
assert.Equal(t, topicsToBloom(t1, t2), bloom)
|
||||
}
|
||||
|
||||
func stringToTopic(s string) types.TopicType {
|
||||
return types.BytesToTopic([]byte(s))
|
||||
}
|
||||
|
||||
func TestExpiredOrCompleted(t *testing.T) {
|
||||
timeout := time.Millisecond
|
||||
events := make(chan types.EnvelopeEvent)
|
||||
errors := make(chan error, 1)
|
||||
hash := types.Hash{1}
|
||||
go func() {
|
||||
_, err := WaitForExpiredOrCompleted(hash, events, timeout)
|
||||
errors <- err
|
||||
}()
|
||||
select {
|
||||
case <-time.After(time.Second):
|
||||
require.FailNow(t, "timed out waiting for waitForExpiredOrCompleted to complete")
|
||||
case err := <-errors:
|
||||
require.EqualError(t, err, fmt.Sprintf("request %x expired", hash))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,136 +0,0 @@
|
|||
package ext
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
"github.com/status-im/status-go/services/ext/mailservers"
|
||||
)
|
||||
|
||||
// EnvelopeState in local tracker
|
||||
type EnvelopeState int
|
||||
|
||||
const (
|
||||
// NotRegistered returned if asked hash wasn't registered in the tracker.
|
||||
NotRegistered EnvelopeState = -1
|
||||
// MailServerRequestSent is set when p2p request is sent to the mailserver
|
||||
MailServerRequestSent
|
||||
)
|
||||
|
||||
// MailRequestMonitor is responsible for monitoring history request to mailservers.
|
||||
type MailRequestMonitor struct {
|
||||
eventSub mailservers.EnvelopeEventSubscriber
|
||||
handler EnvelopeEventsHandler
|
||||
|
||||
mu sync.Mutex
|
||||
cache map[types.Hash]EnvelopeState
|
||||
|
||||
requestsRegistry *RequestsRegistry
|
||||
|
||||
wg sync.WaitGroup
|
||||
quit chan struct{}
|
||||
}
|
||||
|
||||
func NewMailRequestMonitor(eventSub mailservers.EnvelopeEventSubscriber, h EnvelopeEventsHandler, reg *RequestsRegistry) *MailRequestMonitor {
|
||||
return &MailRequestMonitor{
|
||||
eventSub: eventSub,
|
||||
handler: h,
|
||||
cache: make(map[types.Hash]EnvelopeState),
|
||||
requestsRegistry: reg,
|
||||
}
|
||||
}
|
||||
|
||||
// Start processing events.
|
||||
func (m *MailRequestMonitor) Start() {
|
||||
m.quit = make(chan struct{})
|
||||
m.wg.Add(1)
|
||||
go func() {
|
||||
m.handleEnvelopeEvents()
|
||||
m.wg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
// Stop process events.
|
||||
func (m *MailRequestMonitor) Stop() {
|
||||
close(m.quit)
|
||||
m.wg.Wait()
|
||||
}
|
||||
|
||||
func (m *MailRequestMonitor) GetState(hash types.Hash) EnvelopeState {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
state, exist := m.cache[hash]
|
||||
if !exist {
|
||||
return NotRegistered
|
||||
}
|
||||
return state
|
||||
}
|
||||
|
||||
// handleEnvelopeEvents processes whisper envelope events
|
||||
func (m *MailRequestMonitor) handleEnvelopeEvents() {
|
||||
events := make(chan types.EnvelopeEvent, 100) // must be buffered to prevent blocking whisper
|
||||
sub := m.eventSub.SubscribeEnvelopeEvents(events)
|
||||
defer sub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case <-m.quit:
|
||||
return
|
||||
case event := <-events:
|
||||
m.handleEvent(event)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handleEvent based on type of the event either triggers
|
||||
// confirmation handler or removes hash from MailRequestMonitor
|
||||
func (m *MailRequestMonitor) handleEvent(event types.EnvelopeEvent) {
|
||||
handlers := map[types.EventType]func(types.EnvelopeEvent){
|
||||
types.EventMailServerRequestSent: m.handleRequestSent,
|
||||
types.EventMailServerRequestCompleted: m.handleEventMailServerRequestCompleted,
|
||||
types.EventMailServerRequestExpired: m.handleEventMailServerRequestExpired,
|
||||
}
|
||||
|
||||
if handler, ok := handlers[event.Event]; ok {
|
||||
handler(event)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MailRequestMonitor) handleRequestSent(event types.EnvelopeEvent) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.cache[event.Hash] = MailServerRequestSent
|
||||
}
|
||||
|
||||
func (m *MailRequestMonitor) handleEventMailServerRequestCompleted(event types.EnvelopeEvent) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.requestsRegistry.Unregister(event.Hash)
|
||||
state, ok := m.cache[event.Hash]
|
||||
if !ok || state != MailServerRequestSent {
|
||||
return
|
||||
}
|
||||
log.Debug("mailserver response received", "hash", event.Hash)
|
||||
delete(m.cache, event.Hash)
|
||||
if m.handler != nil {
|
||||
if resp, ok := event.Data.(*types.MailServerResponse); ok {
|
||||
m.handler.MailServerRequestCompleted(event.Hash, resp.LastEnvelopeHash, resp.Cursor, resp.Error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MailRequestMonitor) handleEventMailServerRequestExpired(event types.EnvelopeEvent) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.requestsRegistry.Unregister(event.Hash)
|
||||
state, ok := m.cache[event.Hash]
|
||||
if !ok || state != MailServerRequestSent {
|
||||
return
|
||||
}
|
||||
log.Debug("mailserver response expired", "hash", event.Hash)
|
||||
delete(m.cache, event.Hash)
|
||||
if m.handler != nil {
|
||||
m.handler.MailServerRequestExpired(event.Hash)
|
||||
}
|
||||
}
|
|
@ -1,85 +0,0 @@
|
|||
package ext
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
)
|
||||
|
||||
var (
|
||||
testHash = types.Hash{0x01}
|
||||
)
|
||||
|
||||
func TestMailRequestMonitorSuite(t *testing.T) {
|
||||
suite.Run(t, new(MailRequestMonitorSuite))
|
||||
}
|
||||
|
||||
type MailRequestMonitorSuite struct {
|
||||
suite.Suite
|
||||
|
||||
monitor *MailRequestMonitor
|
||||
}
|
||||
|
||||
func (s *MailRequestMonitorSuite) SetupTest() {
|
||||
s.monitor = &MailRequestMonitor{
|
||||
cache: map[types.Hash]EnvelopeState{},
|
||||
requestsRegistry: NewRequestsRegistry(0),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *MailRequestMonitorSuite) TestRequestCompleted() {
|
||||
mock := NewHandlerMock(1)
|
||||
s.monitor.handler = mock
|
||||
s.monitor.cache[testHash] = MailServerRequestSent
|
||||
s.monitor.handleEvent(types.EnvelopeEvent{
|
||||
Event: types.EventMailServerRequestCompleted,
|
||||
Hash: testHash,
|
||||
Data: &types.MailServerResponse{},
|
||||
})
|
||||
select {
|
||||
case requestID := <-mock.requestsCompleted:
|
||||
s.Equal(testHash, requestID)
|
||||
s.NotContains(s.monitor.cache, testHash)
|
||||
case <-time.After(10 * time.Second):
|
||||
s.Fail("timed out while waiting for a request to be completed")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *MailRequestMonitorSuite) TestRequestFailed() {
|
||||
mock := NewHandlerMock(1)
|
||||
s.monitor.handler = mock
|
||||
s.monitor.cache[testHash] = MailServerRequestSent
|
||||
s.monitor.handleEvent(types.EnvelopeEvent{
|
||||
Event: types.EventMailServerRequestCompleted,
|
||||
Hash: testHash,
|
||||
Data: &types.MailServerResponse{Error: errors.New("test error")},
|
||||
})
|
||||
select {
|
||||
case requestID := <-mock.requestsFailed:
|
||||
s.Equal(testHash, requestID)
|
||||
s.NotContains(s.monitor.cache, testHash)
|
||||
case <-time.After(10 * time.Second):
|
||||
s.Fail("timed out while waiting for a request to be failed")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *MailRequestMonitorSuite) TestRequestExpiration() {
|
||||
mock := NewHandlerMock(1)
|
||||
s.monitor.handler = mock
|
||||
s.monitor.cache[testHash] = MailServerRequestSent
|
||||
s.monitor.handleEvent(types.EnvelopeEvent{
|
||||
Event: types.EventMailServerRequestExpired,
|
||||
Hash: testHash,
|
||||
})
|
||||
select {
|
||||
case requestID := <-mock.requestsExpired:
|
||||
s.Equal(testHash, requestID)
|
||||
s.NotContains(s.monitor.cache, testHash)
|
||||
case <-time.After(10 * time.Second):
|
||||
s.Fail("timed out while waiting for request expiration")
|
||||
}
|
||||
}
|
|
@ -1,145 +0,0 @@
|
|||
package mailservers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
|
||||
"github.com/status-im/status-go/db"
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
)
|
||||
|
||||
// NewPeerRecord returns instance of the peer record.
|
||||
func NewPeerRecord(node *enode.Node) PeerRecord {
|
||||
return PeerRecord{node: node}
|
||||
}
|
||||
|
||||
// PeerRecord is set data associated with each peer that is stored on disk.
|
||||
// PeerRecord stored with a enode as a key in leveldb, and body marshalled as json.
|
||||
type PeerRecord struct {
|
||||
node *enode.Node
|
||||
|
||||
// last time it was used.
|
||||
LastUsed time.Time
|
||||
}
|
||||
|
||||
// Encode encodes PeerRecords to bytes.
|
||||
func (r PeerRecord) Encode() ([]byte, error) {
|
||||
return json.Marshal(r)
|
||||
}
|
||||
|
||||
// ID returns enode identity of the node.
|
||||
func (r PeerRecord) ID() enode.ID {
|
||||
return r.node.ID()
|
||||
}
|
||||
|
||||
// Node returs pointer to original object.
|
||||
// enode.Node doensn't allow modification on the object.
|
||||
func (r PeerRecord) Node() *enode.Node {
|
||||
return r.node
|
||||
}
|
||||
|
||||
// EncodeKey returns bytes that will should be used as a key in persistent storage.
|
||||
func (r PeerRecord) EncodeKey() ([]byte, error) {
|
||||
return r.Node().MarshalText()
|
||||
}
|
||||
|
||||
// NewCache returns pointer to a Cache instance.
|
||||
func NewCache(db *leveldb.DB) *Cache {
|
||||
return &Cache{db: db}
|
||||
}
|
||||
|
||||
// Cache is wrapper for operations on disk with leveldb.
|
||||
type Cache struct {
|
||||
db *leveldb.DB
|
||||
}
|
||||
|
||||
// Replace deletes old and adds new records in the persistent cache.
|
||||
func (c *Cache) Replace(nodes []*enode.Node) error {
|
||||
batch := new(leveldb.Batch)
|
||||
iter := createPeersIterator(c.db)
|
||||
defer iter.Release()
|
||||
newNodes := nodesToMap(nodes)
|
||||
for iter.Next() {
|
||||
record, err := unmarshalKeyValue(keyWithoutPrefix(iter.Key()), iter.Value())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, exist := newNodes[types.EnodeID(record.ID())]; exist {
|
||||
delete(newNodes, types.EnodeID(record.ID()))
|
||||
} else {
|
||||
batch.Delete(iter.Key())
|
||||
}
|
||||
}
|
||||
for _, n := range newNodes {
|
||||
enodeKey, err := n.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// we put nil as default value doesn't have any state associated with them.
|
||||
batch.Put(db.Key(db.MailserversCache, enodeKey), nil)
|
||||
}
|
||||
return c.db.Write(batch, nil)
|
||||
}
|
||||
|
||||
// LoadAll loads all records from persistent database.
|
||||
func (c *Cache) LoadAll() (rst []PeerRecord, err error) {
|
||||
iter := createPeersIterator(c.db)
|
||||
for iter.Next() {
|
||||
record, err := unmarshalKeyValue(keyWithoutPrefix(iter.Key()), iter.Value())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rst = append(rst, record)
|
||||
}
|
||||
return rst, nil
|
||||
}
|
||||
|
||||
// UpdateRecord updates single record.
|
||||
func (c *Cache) UpdateRecord(record PeerRecord) error {
|
||||
enodeKey, err := record.EncodeKey()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
value, err := record.Encode()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return c.db.Put(db.Key(db.MailserversCache, enodeKey), value, nil)
|
||||
}
|
||||
|
||||
func unmarshalKeyValue(key, value []byte) (record PeerRecord, err error) {
|
||||
enodeKey := key
|
||||
node := new(enode.Node)
|
||||
err = node.UnmarshalText(enodeKey)
|
||||
if err != nil {
|
||||
return record, err
|
||||
}
|
||||
record = PeerRecord{node: node}
|
||||
if len(value) != 0 {
|
||||
err = json.Unmarshal(value, &record)
|
||||
}
|
||||
return record, err
|
||||
}
|
||||
|
||||
func nodesToMap(nodes []*enode.Node) map[types.EnodeID]*enode.Node {
|
||||
rst := map[types.EnodeID]*enode.Node{}
|
||||
for _, n := range nodes {
|
||||
rst[types.EnodeID(n.ID())] = n
|
||||
}
|
||||
return rst
|
||||
}
|
||||
|
||||
func createPeersIterator(level *leveldb.DB) iterator.Iterator {
|
||||
return level.NewIterator(util.BytesPrefix([]byte{byte(db.MailserversCache)}), nil)
|
||||
}
|
||||
|
||||
// keyWithoutPrefix removes first byte from key.
|
||||
func keyWithoutPrefix(key []byte) []byte {
|
||||
return key[1:]
|
||||
}
|
|
@ -1,81 +0,0 @@
|
|||
package mailservers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
func newInMemCache(t *testing.T) *Cache {
|
||||
db, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
require.NoError(t, err)
|
||||
return NewCache(db)
|
||||
}
|
||||
|
||||
func containsNode(nodes []*enode.Node, node *enode.Node) error {
|
||||
for _, n := range nodes {
|
||||
if n.ID() == node.ID() {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("nodes %+s do not contain %s", nodes, node)
|
||||
}
|
||||
|
||||
func TestReplaceRecords(t *testing.T) {
|
||||
nodesNumber := 3
|
||||
cache := newInMemCache(t)
|
||||
nodes := make([]*enode.Node, nodesNumber)
|
||||
// First round is a sanity check that records were written.
|
||||
fillWithRandomNodes(t, nodes)
|
||||
require.NoError(t, cache.Replace(nodes))
|
||||
records, err := cache.LoadAll()
|
||||
require.NoError(t, err)
|
||||
require.Len(t, records, nodesNumber)
|
||||
for i := range records {
|
||||
require.NoError(t, containsNode(nodes, records[i].Node()))
|
||||
}
|
||||
// Replace all nodes and verify that length is the same and loaded records are found.
|
||||
fillWithRandomNodes(t, nodes)
|
||||
require.NoError(t, cache.Replace(nodes))
|
||||
records, err = cache.LoadAll()
|
||||
require.NoError(t, err)
|
||||
require.Len(t, records, nodesNumber)
|
||||
for i := range records {
|
||||
require.NoError(t, containsNode(nodes, records[i].Node()))
|
||||
}
|
||||
}
|
||||
|
||||
func TestUsedRecord(t *testing.T) {
|
||||
cache := newInMemCache(t)
|
||||
node, err := RandomNode()
|
||||
require.NoError(t, err)
|
||||
record := PeerRecord{node: node}
|
||||
require.NoError(t, cache.UpdateRecord(record))
|
||||
record.LastUsed = time.Now()
|
||||
require.NoError(t, cache.UpdateRecord(record))
|
||||
records, err := cache.LoadAll()
|
||||
require.NoError(t, err)
|
||||
require.Len(t, records, 1)
|
||||
require.True(t, record.LastUsed.Equal(records[0].LastUsed))
|
||||
}
|
||||
|
||||
func TestTimestampPreservedOnReplace(t *testing.T) {
|
||||
cache := newInMemCache(t)
|
||||
node, err := RandomNode()
|
||||
require.NoError(t, err)
|
||||
record := PeerRecord{node: node, LastUsed: time.Now()}
|
||||
require.NoError(t, cache.UpdateRecord(record))
|
||||
require.NoError(t, cache.Replace([]*enode.Node{node}))
|
||||
records, err := cache.LoadAll()
|
||||
require.NoError(t, err)
|
||||
require.Len(t, records, 1)
|
||||
require.Equal(t, node.ID(), records[0].Node().ID())
|
||||
require.False(t, records[0].LastUsed.IsZero(), "timestamp should be preserved and not equal to zero")
|
||||
|
||||
}
|
|
@ -1,271 +0,0 @@
|
|||
package mailservers
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
)
|
||||
|
||||
const (
|
||||
peerEventsBuffer = 10 // sufficient buffer to avoid blocking a p2p feed.
|
||||
whisperEventsBuffer = 20 // sufficient buffer to avod blocking a eventSub envelopes feed.
|
||||
)
|
||||
|
||||
// PeerAdderRemover is an interface for adding or removing peers.
|
||||
type PeerAdderRemover interface {
|
||||
AddPeer(node *enode.Node)
|
||||
RemovePeer(node *enode.Node)
|
||||
}
|
||||
|
||||
// PeerEventsSubscriber interface to subscribe for p2p.PeerEvent's.
|
||||
type PeerEventsSubscriber interface {
|
||||
SubscribeEvents(chan *p2p.PeerEvent) event.Subscription
|
||||
}
|
||||
|
||||
// EnvelopeEventSubscriber interface to subscribe for types.EnvelopeEvent's.
|
||||
type EnvelopeEventSubscriber interface {
|
||||
SubscribeEnvelopeEvents(chan<- types.EnvelopeEvent) types.Subscription
|
||||
}
|
||||
|
||||
type p2pServer interface {
|
||||
PeerAdderRemover
|
||||
PeerEventsSubscriber
|
||||
}
|
||||
|
||||
// NewConnectionManager creates an instance of ConnectionManager.
|
||||
func NewConnectionManager(server p2pServer, eventSub EnvelopeEventSubscriber, target, maxFailures int, timeout time.Duration) *ConnectionManager {
|
||||
return &ConnectionManager{
|
||||
server: server,
|
||||
eventSub: eventSub,
|
||||
connectedTarget: target,
|
||||
maxFailures: maxFailures,
|
||||
notifications: make(chan []*enode.Node),
|
||||
timeoutWaitAdded: timeout,
|
||||
}
|
||||
}
|
||||
|
||||
// ConnectionManager manages keeps target of peers connected.
|
||||
type ConnectionManager struct {
|
||||
wg sync.WaitGroup
|
||||
quit chan struct{}
|
||||
|
||||
server p2pServer
|
||||
eventSub EnvelopeEventSubscriber
|
||||
|
||||
notifications chan []*enode.Node
|
||||
connectedTarget int
|
||||
timeoutWaitAdded time.Duration
|
||||
maxFailures int
|
||||
}
|
||||
|
||||
// Notify sends a non-blocking notification about new nodes.
|
||||
func (ps *ConnectionManager) Notify(nodes []*enode.Node) {
|
||||
ps.wg.Add(1)
|
||||
go func() {
|
||||
select {
|
||||
case ps.notifications <- nodes:
|
||||
case <-ps.quit:
|
||||
}
|
||||
ps.wg.Done()
|
||||
}()
|
||||
|
||||
}
|
||||
|
||||
// Start subscribes to a p2p server and handles new peers and state updates for those peers.
|
||||
func (ps *ConnectionManager) Start() {
|
||||
ps.quit = make(chan struct{})
|
||||
ps.wg.Add(1)
|
||||
go func() {
|
||||
state := newInternalState(ps.server, ps.connectedTarget, ps.timeoutWaitAdded)
|
||||
events := make(chan *p2p.PeerEvent, peerEventsBuffer)
|
||||
sub := ps.server.SubscribeEvents(events)
|
||||
whisperEvents := make(chan types.EnvelopeEvent, whisperEventsBuffer)
|
||||
whisperSub := ps.eventSub.SubscribeEnvelopeEvents(whisperEvents)
|
||||
requests := map[types.Hash]struct{}{}
|
||||
failuresPerServer := map[types.EnodeID]int{}
|
||||
|
||||
defer sub.Unsubscribe()
|
||||
defer whisperSub.Unsubscribe()
|
||||
defer ps.wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-ps.quit:
|
||||
return
|
||||
case err := <-sub.Err():
|
||||
log.Error("retry after error subscribing to p2p events", "error", err)
|
||||
return
|
||||
case err := <-whisperSub.Err():
|
||||
log.Error("retry after error suscribing to eventSub events", "error", err)
|
||||
return
|
||||
case newNodes := <-ps.notifications:
|
||||
state.processReplacement(newNodes, events)
|
||||
case ev := <-events:
|
||||
processPeerEvent(state, ev)
|
||||
case ev := <-whisperEvents:
|
||||
// TODO treat failed requests the same way as expired
|
||||
switch ev.Event {
|
||||
case types.EventMailServerRequestSent:
|
||||
requests[ev.Hash] = struct{}{}
|
||||
case types.EventMailServerRequestCompleted:
|
||||
// reset failures count on first success
|
||||
failuresPerServer[ev.Peer] = 0
|
||||
delete(requests, ev.Hash)
|
||||
case types.EventMailServerRequestExpired:
|
||||
_, exist := requests[ev.Hash]
|
||||
if !exist {
|
||||
continue
|
||||
}
|
||||
failuresPerServer[ev.Peer]++
|
||||
log.Debug("request to a mail server expired, disconnect a peer", "address", ev.Peer)
|
||||
if failuresPerServer[ev.Peer] >= ps.maxFailures {
|
||||
state.nodeDisconnected(ev.Peer)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Stop gracefully closes all background goroutines and waits until they finish.
|
||||
func (ps *ConnectionManager) Stop() {
|
||||
if ps.quit == nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-ps.quit:
|
||||
return
|
||||
default:
|
||||
}
|
||||
close(ps.quit)
|
||||
ps.wg.Wait()
|
||||
ps.quit = nil
|
||||
}
|
||||
|
||||
func (state *internalState) processReplacement(newNodes []*enode.Node, events <-chan *p2p.PeerEvent) {
|
||||
replacement := map[types.EnodeID]*enode.Node{}
|
||||
for _, n := range newNodes {
|
||||
replacement[types.EnodeID(n.ID())] = n
|
||||
}
|
||||
state.replaceNodes(replacement)
|
||||
if state.ReachedTarget() {
|
||||
log.Debug("already connected with required target", "target", state.target)
|
||||
return
|
||||
}
|
||||
if state.timeout != 0 {
|
||||
log.Debug("waiting defined timeout to establish connections",
|
||||
"timeout", state.timeout, "target", state.target)
|
||||
timer := time.NewTimer(state.timeout)
|
||||
waitForConnections(state, timer.C, events)
|
||||
timer.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
func newInternalState(srv PeerAdderRemover, target int, timeout time.Duration) *internalState {
|
||||
return &internalState{
|
||||
options: options{target: target, timeout: timeout},
|
||||
srv: srv,
|
||||
connected: map[types.EnodeID]struct{}{},
|
||||
currentNodes: map[types.EnodeID]*enode.Node{},
|
||||
}
|
||||
}
|
||||
|
||||
type options struct {
|
||||
target int
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
type internalState struct {
|
||||
options
|
||||
srv PeerAdderRemover
|
||||
|
||||
connected map[types.EnodeID]struct{}
|
||||
currentNodes map[types.EnodeID]*enode.Node
|
||||
}
|
||||
|
||||
func (state *internalState) ReachedTarget() bool {
|
||||
return len(state.connected) >= state.target
|
||||
}
|
||||
|
||||
func (state *internalState) replaceNodes(new map[types.EnodeID]*enode.Node) {
|
||||
for nid, n := range state.currentNodes {
|
||||
if _, exist := new[nid]; !exist {
|
||||
delete(state.connected, nid)
|
||||
state.srv.RemovePeer(n)
|
||||
}
|
||||
}
|
||||
if !state.ReachedTarget() {
|
||||
for _, n := range new {
|
||||
state.srv.AddPeer(n)
|
||||
}
|
||||
}
|
||||
state.currentNodes = new
|
||||
}
|
||||
|
||||
func (state *internalState) nodeAdded(peer types.EnodeID) {
|
||||
n, exist := state.currentNodes[peer]
|
||||
if !exist {
|
||||
return
|
||||
}
|
||||
if state.ReachedTarget() {
|
||||
state.srv.RemovePeer(n)
|
||||
} else {
|
||||
state.connected[types.EnodeID(n.ID())] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func (state *internalState) nodeDisconnected(peer types.EnodeID) {
|
||||
n, exist := state.currentNodes[peer] // unrelated event
|
||||
if !exist {
|
||||
return
|
||||
}
|
||||
_, exist = state.connected[peer] // check if already disconnected
|
||||
if !exist {
|
||||
return
|
||||
}
|
||||
if len(state.currentNodes) == 1 { // keep node connected if we don't have another choice
|
||||
return
|
||||
}
|
||||
state.srv.RemovePeer(n) // remove peer permanently, otherwise p2p.Server will try to reconnect
|
||||
delete(state.connected, peer)
|
||||
if !state.ReachedTarget() { // try to connect with any other selected (but not connected) node
|
||||
for nid, n := range state.currentNodes {
|
||||
_, exist := state.connected[nid]
|
||||
if exist || peer == nid {
|
||||
continue
|
||||
}
|
||||
state.srv.AddPeer(n)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func processPeerEvent(state *internalState, ev *p2p.PeerEvent) {
|
||||
switch ev.Type {
|
||||
case p2p.PeerEventTypeAdd:
|
||||
log.Debug("connected to a mailserver", "address", ev.Peer)
|
||||
state.nodeAdded(types.EnodeID(ev.Peer))
|
||||
case p2p.PeerEventTypeDrop:
|
||||
log.Debug("mailserver disconnected", "address", ev.Peer)
|
||||
state.nodeDisconnected(types.EnodeID(ev.Peer))
|
||||
}
|
||||
}
|
||||
|
||||
func waitForConnections(state *internalState, timeout <-chan time.Time, events <-chan *p2p.PeerEvent) {
|
||||
for {
|
||||
select {
|
||||
case ev := <-events:
|
||||
processPeerEvent(state, ev)
|
||||
if state.ReachedTarget() {
|
||||
return
|
||||
}
|
||||
case <-timeout:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -1,400 +0,0 @@
|
|||
package mailservers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
"github.com/status-im/status-go/t/utils"
|
||||
)
|
||||
|
||||
type fakePeerEvents struct {
|
||||
mu sync.Mutex
|
||||
nodes map[types.EnodeID]struct{}
|
||||
input chan *p2p.PeerEvent
|
||||
}
|
||||
|
||||
func (f *fakePeerEvents) Nodes() []types.EnodeID {
|
||||
f.mu.Lock()
|
||||
rst := make([]types.EnodeID, 0, len(f.nodes))
|
||||
for n := range f.nodes {
|
||||
rst = append(rst, n)
|
||||
}
|
||||
f.mu.Unlock()
|
||||
return rst
|
||||
}
|
||||
|
||||
func (f *fakePeerEvents) AddPeer(node *enode.Node) {
|
||||
f.mu.Lock()
|
||||
f.nodes[types.EnodeID(node.ID())] = struct{}{}
|
||||
f.mu.Unlock()
|
||||
if f.input == nil {
|
||||
return
|
||||
}
|
||||
f.input <- &p2p.PeerEvent{
|
||||
Peer: node.ID(),
|
||||
Type: p2p.PeerEventTypeAdd,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *fakePeerEvents) RemovePeer(node *enode.Node) {
|
||||
f.mu.Lock()
|
||||
delete(f.nodes, types.EnodeID(node.ID()))
|
||||
f.mu.Unlock()
|
||||
if f.input == nil {
|
||||
return
|
||||
}
|
||||
f.input <- &p2p.PeerEvent{
|
||||
Peer: node.ID(),
|
||||
Type: p2p.PeerEventTypeDrop,
|
||||
}
|
||||
}
|
||||
|
||||
func newFakePeerAdderRemover() *fakePeerEvents {
|
||||
return &fakePeerEvents{nodes: map[types.EnodeID]struct{}{}}
|
||||
}
|
||||
|
||||
func (f *fakePeerEvents) SubscribeEvents(output chan *p2p.PeerEvent) event.Subscription {
|
||||
return event.NewSubscription(func(quit <-chan struct{}) error {
|
||||
for {
|
||||
select {
|
||||
case <-quit:
|
||||
return nil
|
||||
case ev := <-f.input:
|
||||
// will block the same way as in any feed
|
||||
output <- ev
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func newFakeServer() *fakePeerEvents {
|
||||
srv := newFakePeerAdderRemover()
|
||||
srv.input = make(chan *p2p.PeerEvent, 20)
|
||||
return srv
|
||||
}
|
||||
|
||||
type fakeEnvelopeEvents struct {
|
||||
input chan types.EnvelopeEvent
|
||||
}
|
||||
|
||||
func (f *fakeEnvelopeEvents) SubscribeEnvelopeEvents(output chan<- types.EnvelopeEvent) types.Subscription {
|
||||
return event.NewSubscription(func(quit <-chan struct{}) error {
|
||||
for {
|
||||
select {
|
||||
case <-quit:
|
||||
return nil
|
||||
case ev := <-f.input:
|
||||
// will block the same way as in any feed
|
||||
output <- ev
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func newFakeEnvelopesEvents() *fakeEnvelopeEvents {
|
||||
return &fakeEnvelopeEvents{
|
||||
input: make(chan types.EnvelopeEvent),
|
||||
}
|
||||
}
|
||||
|
||||
func fillWithRandomNodes(t *testing.T, nodes []*enode.Node) {
|
||||
var err error
|
||||
for i := range nodes {
|
||||
nodes[i], err = RandomNode()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func getMapWithRandomNodes(t *testing.T, n int) map[types.EnodeID]*enode.Node {
|
||||
nodes := make([]*enode.Node, n)
|
||||
fillWithRandomNodes(t, nodes)
|
||||
return nodesToMap(nodes)
|
||||
}
|
||||
|
||||
func mergeOldIntoNew(old, new map[types.EnodeID]*enode.Node) {
|
||||
for n := range old {
|
||||
new[n] = old[n]
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplaceNodes(t *testing.T) {
|
||||
type testCase struct {
|
||||
description string
|
||||
old map[types.EnodeID]*enode.Node
|
||||
new map[types.EnodeID]*enode.Node
|
||||
target int
|
||||
}
|
||||
for _, tc := range []testCase{
|
||||
{
|
||||
"InitialReplace",
|
||||
getMapWithRandomNodes(t, 0),
|
||||
getMapWithRandomNodes(t, 3),
|
||||
2,
|
||||
},
|
||||
{
|
||||
"FullReplace",
|
||||
getMapWithRandomNodes(t, 3),
|
||||
getMapWithRandomNodes(t, 3),
|
||||
2,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
peers := newFakePeerAdderRemover()
|
||||
state := newInternalState(peers, tc.target, 0)
|
||||
state.replaceNodes(tc.old)
|
||||
require.Len(t, peers.nodes, len(tc.old))
|
||||
for n := range peers.nodes {
|
||||
require.Contains(t, tc.old, n)
|
||||
}
|
||||
state.replaceNodes(tc.new)
|
||||
require.Len(t, peers.nodes, len(tc.new))
|
||||
for n := range peers.nodes {
|
||||
require.Contains(t, tc.new, n)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPartialReplaceNodesBelowTarget(t *testing.T) {
|
||||
peers := newFakePeerAdderRemover()
|
||||
old := getMapWithRandomNodes(t, 1)
|
||||
new := getMapWithRandomNodes(t, 2)
|
||||
state := newInternalState(peers, 2, 0)
|
||||
state.replaceNodes(old)
|
||||
mergeOldIntoNew(old, new)
|
||||
state.replaceNodes(new)
|
||||
require.Len(t, peers.nodes, len(new))
|
||||
}
|
||||
|
||||
func TestPartialReplaceNodesAboveTarget(t *testing.T) {
|
||||
peers := newFakePeerAdderRemover()
|
||||
initial, err := RandomNode()
|
||||
require.NoError(t, err)
|
||||
old := nodesToMap([]*enode.Node{initial})
|
||||
new := getMapWithRandomNodes(t, 2)
|
||||
state := newInternalState(peers, 1, 0)
|
||||
state.replaceNodes(old)
|
||||
state.nodeAdded(types.EnodeID(initial.ID()))
|
||||
mergeOldIntoNew(old, new)
|
||||
state.replaceNodes(new)
|
||||
require.Len(t, peers.nodes, 1)
|
||||
}
|
||||
|
||||
func TestConnectionManagerAddDrop(t *testing.T) {
|
||||
server := newFakeServer()
|
||||
whisper := newFakeEnvelopesEvents()
|
||||
target := 1
|
||||
connmanager := NewConnectionManager(server, whisper, target, 1, 0)
|
||||
connmanager.Start()
|
||||
defer connmanager.Stop()
|
||||
nodes := []*enode.Node{}
|
||||
for _, n := range getMapWithRandomNodes(t, 3) {
|
||||
nodes = append(nodes, n)
|
||||
}
|
||||
// Send 3 random nodes to connection manager.
|
||||
connmanager.Notify(nodes)
|
||||
var initial enode.ID
|
||||
// Wait till connection manager establishes connection with 1 peer.
|
||||
require.NoError(t, utils.Eventually(func() error {
|
||||
nodes := server.Nodes()
|
||||
if len(nodes) != target {
|
||||
return fmt.Errorf("unexpected number of connected servers: %d", len(nodes))
|
||||
}
|
||||
initial = enode.ID(nodes[0])
|
||||
return nil
|
||||
}, time.Second, 100*time.Millisecond))
|
||||
// Send an event that peer was dropped.
|
||||
select {
|
||||
case server.input <- &p2p.PeerEvent{Peer: initial, Type: p2p.PeerEventTypeDrop}:
|
||||
case <-time.After(time.Second):
|
||||
require.FailNow(t, "can't send a drop event")
|
||||
}
|
||||
// Connection manager should establish connection with any other peer from initial list.
|
||||
require.NoError(t, utils.Eventually(func() error {
|
||||
nodes := server.Nodes()
|
||||
if len(nodes) != target {
|
||||
return fmt.Errorf("unexpected number of connected servers: %d", len(nodes))
|
||||
}
|
||||
if enode.ID(nodes[0]) == initial {
|
||||
return fmt.Errorf("connected node wasn't changed from %s", initial)
|
||||
}
|
||||
return nil
|
||||
}, time.Second, 100*time.Millisecond))
|
||||
}
|
||||
|
||||
func TestConnectionManagerReplace(t *testing.T) {
|
||||
server := newFakeServer()
|
||||
whisper := newFakeEnvelopesEvents()
|
||||
target := 1
|
||||
connmanager := NewConnectionManager(server, whisper, target, 1, 0)
|
||||
connmanager.Start()
|
||||
defer connmanager.Stop()
|
||||
nodes := []*enode.Node{}
|
||||
for _, n := range getMapWithRandomNodes(t, 3) {
|
||||
nodes = append(nodes, n)
|
||||
}
|
||||
// Send a single node to connection manager.
|
||||
connmanager.Notify(nodes[:1])
|
||||
// Wait until this node will get connected.
|
||||
require.NoError(t, utils.Eventually(func() error {
|
||||
connected := server.Nodes()
|
||||
if len(connected) != target {
|
||||
return fmt.Errorf("unexpected number of connected servers: %d", len(connected))
|
||||
}
|
||||
if types.EnodeID(nodes[0].ID()) != connected[0] {
|
||||
return fmt.Errorf("connected with a wrong peer. expected %s, got %s", nodes[0].ID(), connected[0])
|
||||
}
|
||||
return nil
|
||||
}, time.Second, 100*time.Millisecond))
|
||||
// Replace previously sent node with 2 different nodes.
|
||||
connmanager.Notify(nodes[1:])
|
||||
// Wait until connection manager replaces node connected in the first round.
|
||||
require.NoError(t, utils.Eventually(func() error {
|
||||
connected := server.Nodes()
|
||||
if len(connected) != target {
|
||||
return fmt.Errorf("unexpected number of connected servers: %d", len(connected))
|
||||
}
|
||||
switch enode.ID(connected[0]) {
|
||||
case nodes[1].ID():
|
||||
case nodes[2].ID():
|
||||
default:
|
||||
return fmt.Errorf("connected with unexpected peer. got %s, expected %+v", connected[0], nodes[1:])
|
||||
}
|
||||
return nil
|
||||
}, time.Second, 100*time.Millisecond))
|
||||
}
|
||||
|
||||
func setupTestConnectionAfterExpiry(t *testing.T, server *fakePeerEvents, whisperMock *fakeEnvelopeEvents, target, maxFailures int, hash types.Hash) (*ConnectionManager, types.EnodeID) {
|
||||
connmanager := NewConnectionManager(server, whisperMock, target, maxFailures, 0)
|
||||
connmanager.Start()
|
||||
nodes := []*enode.Node{}
|
||||
for _, n := range getMapWithRandomNodes(t, 2) {
|
||||
nodes = append(nodes, n)
|
||||
}
|
||||
// Send two random nodes to connection manager.
|
||||
connmanager.Notify(nodes)
|
||||
var initial types.EnodeID
|
||||
// Wait until connection manager establishes connection with one node.
|
||||
require.NoError(t, utils.Eventually(func() error {
|
||||
nodes := server.Nodes()
|
||||
if len(nodes) != target {
|
||||
return fmt.Errorf("unexpected number of connected servers: %d", len(nodes))
|
||||
}
|
||||
initial = nodes[0]
|
||||
return nil
|
||||
}, time.Second, 100*time.Millisecond))
|
||||
// Send event that history request for connected peer was sent.
|
||||
select {
|
||||
case whisperMock.input <- types.EnvelopeEvent{
|
||||
Event: types.EventMailServerRequestSent, Peer: initial, Hash: hash}:
|
||||
case <-time.After(time.Second):
|
||||
require.FailNow(t, "can't send a 'sent' event")
|
||||
}
|
||||
return connmanager, initial
|
||||
}
|
||||
|
||||
func TestConnectionChangedAfterExpiry(t *testing.T) {
|
||||
server := newFakeServer()
|
||||
whisperMock := newFakeEnvelopesEvents()
|
||||
target := 1
|
||||
maxFailures := 1
|
||||
hash := types.Hash{1}
|
||||
connmanager, initial := setupTestConnectionAfterExpiry(t, server, whisperMock, target, maxFailures, hash)
|
||||
defer connmanager.Stop()
|
||||
|
||||
// And eventually expired.
|
||||
select {
|
||||
case whisperMock.input <- types.EnvelopeEvent{
|
||||
Event: types.EventMailServerRequestExpired, Peer: initial, Hash: hash}:
|
||||
case <-time.After(time.Second):
|
||||
require.FailNow(t, "can't send an 'expiry' event")
|
||||
}
|
||||
require.NoError(t, utils.Eventually(func() error {
|
||||
nodes := server.Nodes()
|
||||
if len(nodes) != target {
|
||||
return fmt.Errorf("unexpected number of connected servers: %d", len(nodes))
|
||||
}
|
||||
if nodes[0] == initial {
|
||||
return fmt.Errorf("connected node wasn't changed from %s", initial)
|
||||
}
|
||||
return nil
|
||||
}, time.Second, 100*time.Millisecond))
|
||||
}
|
||||
|
||||
func TestConnectionChangedAfterSecondExpiry(t *testing.T) {
|
||||
server := newFakeServer()
|
||||
whisperMock := newFakeEnvelopesEvents()
|
||||
target := 1
|
||||
maxFailures := 2
|
||||
hash := types.Hash{1}
|
||||
connmanager, initial := setupTestConnectionAfterExpiry(t, server, whisperMock, target, maxFailures, hash)
|
||||
defer connmanager.Stop()
|
||||
|
||||
// First expired is sent. Nothing should happen.
|
||||
select {
|
||||
case whisperMock.input <- types.EnvelopeEvent{
|
||||
Event: types.EventMailServerRequestExpired, Peer: initial, Hash: hash}:
|
||||
case <-time.After(time.Second):
|
||||
require.FailNow(t, "can't send an 'expiry' event")
|
||||
}
|
||||
|
||||
// we use 'eventually' as 'consistently' because this function will retry for a given timeout while error is received
|
||||
require.EqualError(t, utils.Eventually(func() error {
|
||||
nodes := server.Nodes()
|
||||
if len(nodes) != target {
|
||||
return fmt.Errorf("unexpected number of connected servers: %d", len(nodes))
|
||||
}
|
||||
if nodes[0] == initial {
|
||||
return fmt.Errorf("connected node wasn't changed from %s", initial)
|
||||
}
|
||||
return nil
|
||||
}, time.Second, 100*time.Millisecond), fmt.Sprintf("connected node wasn't changed from %s", initial))
|
||||
|
||||
// second expiry event
|
||||
select {
|
||||
case whisperMock.input <- types.EnvelopeEvent{
|
||||
Event: types.EventMailServerRequestExpired, Peer: initial, Hash: hash}:
|
||||
case <-time.After(time.Second):
|
||||
require.FailNow(t, "can't send an 'expiry' event")
|
||||
}
|
||||
require.NoError(t, utils.Eventually(func() error {
|
||||
nodes := server.Nodes()
|
||||
if len(nodes) != target {
|
||||
return fmt.Errorf("unexpected number of connected servers: %d", len(nodes))
|
||||
}
|
||||
if nodes[0] == initial {
|
||||
return fmt.Errorf("connected node wasn't changed from %s", initial)
|
||||
}
|
||||
return nil
|
||||
}, time.Second, 100*time.Millisecond))
|
||||
}
|
||||
|
||||
func TestProcessReplacementWaitsForConnections(t *testing.T) {
|
||||
srv := newFakePeerAdderRemover()
|
||||
target := 1
|
||||
timeout := time.Second
|
||||
nodes := make([]*enode.Node, 2)
|
||||
fillWithRandomNodes(t, nodes)
|
||||
events := make(chan *p2p.PeerEvent)
|
||||
state := newInternalState(srv, target, timeout)
|
||||
state.currentNodes = nodesToMap(nodes)
|
||||
go func() {
|
||||
select {
|
||||
case events <- &p2p.PeerEvent{Peer: nodes[0].ID(), Type: p2p.PeerEventTypeAdd}:
|
||||
case <-time.After(time.Second):
|
||||
assert.FailNow(t, "can't send a drop event")
|
||||
}
|
||||
}()
|
||||
state.processReplacement(nodes, events)
|
||||
require.Len(t, state.connected, 1)
|
||||
}
|
|
@ -1,85 +0,0 @@
|
|||
package mailservers
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
)
|
||||
|
||||
// NewLastUsedConnectionMonitor returns pointer to the instance of LastUsedConnectionMonitor.
|
||||
func NewLastUsedConnectionMonitor(ps *PeerStore, cache *Cache, eventSub EnvelopeEventSubscriber) *LastUsedConnectionMonitor {
|
||||
return &LastUsedConnectionMonitor{
|
||||
ps: ps,
|
||||
cache: cache,
|
||||
eventSub: eventSub,
|
||||
}
|
||||
}
|
||||
|
||||
// LastUsedConnectionMonitor watches relevant events and reflects it in cache.
|
||||
type LastUsedConnectionMonitor struct {
|
||||
ps *PeerStore
|
||||
cache *Cache
|
||||
|
||||
eventSub EnvelopeEventSubscriber
|
||||
|
||||
quit chan struct{}
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
// Start spins a separate goroutine to watch connections.
|
||||
func (mon *LastUsedConnectionMonitor) Start() {
|
||||
mon.quit = make(chan struct{})
|
||||
mon.wg.Add(1)
|
||||
go func() {
|
||||
events := make(chan types.EnvelopeEvent, whisperEventsBuffer)
|
||||
sub := mon.eventSub.SubscribeEnvelopeEvents(events)
|
||||
defer sub.Unsubscribe()
|
||||
defer mon.wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-mon.quit:
|
||||
return
|
||||
case err := <-sub.Err():
|
||||
log.Error("retry after error suscribing to eventSub events", "error", err)
|
||||
return
|
||||
case ev := <-events:
|
||||
node := mon.ps.Get(ev.Peer)
|
||||
if node == nil {
|
||||
continue
|
||||
}
|
||||
if ev.Event == types.EventMailServerRequestCompleted {
|
||||
err := mon.updateRecord(ev.Peer)
|
||||
if err != nil {
|
||||
log.Error("unable to update storage", "peer", ev.Peer, "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (mon *LastUsedConnectionMonitor) updateRecord(nodeID types.EnodeID) error {
|
||||
node := mon.ps.Get(nodeID)
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
return mon.cache.UpdateRecord(PeerRecord{node: node, LastUsed: time.Now()})
|
||||
}
|
||||
|
||||
// Stop closes channel to signal a quit and waits until all goroutines are stoppped.
|
||||
func (mon *LastUsedConnectionMonitor) Stop() {
|
||||
if mon.quit == nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-mon.quit:
|
||||
return
|
||||
default:
|
||||
}
|
||||
close(mon.quit)
|
||||
mon.wg.Wait()
|
||||
mon.quit = nil
|
||||
}
|
|
@ -1,81 +0,0 @@
|
|||
package mailservers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
"github.com/status-im/status-go/t/utils"
|
||||
)
|
||||
|
||||
func TestUsedConnectionPersisted(t *testing.T) {
|
||||
nodes := make([]*enode.Node, 2)
|
||||
fillWithRandomNodes(t, nodes)
|
||||
|
||||
cache := newInMemCache(t)
|
||||
store := NewPeerStore(cache)
|
||||
require.NoError(t, store.Update(nodes))
|
||||
whisperMock := newFakeEnvelopesEvents()
|
||||
monitor := NewLastUsedConnectionMonitor(store, cache, whisperMock)
|
||||
monitor.Start()
|
||||
|
||||
// Send a confirmation that we received history from one of the peers.
|
||||
select {
|
||||
case whisperMock.input <- types.EnvelopeEvent{
|
||||
Event: types.EventMailServerRequestCompleted, Peer: types.EnodeID(nodes[0].ID())}:
|
||||
case <-time.After(time.Second):
|
||||
require.FailNow(t, "can't send a 'completed' event")
|
||||
}
|
||||
|
||||
// Wait until records will be updated in the cache.
|
||||
require.NoError(t, utils.Eventually(func() error {
|
||||
records, err := cache.LoadAll()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if lth := len(records); lth != 2 {
|
||||
return fmt.Errorf("unexpected length of all records stored in the cache. expected %d got %d", 2, lth)
|
||||
}
|
||||
var used bool
|
||||
for _, r := range records {
|
||||
if r.Node().ID() == nodes[0].ID() {
|
||||
used = !r.LastUsed.IsZero()
|
||||
}
|
||||
}
|
||||
if !used {
|
||||
return fmt.Errorf("record %s is not marked as used", types.EnodeID(nodes[0].ID()))
|
||||
}
|
||||
return nil
|
||||
}, time.Second, 100*time.Millisecond))
|
||||
|
||||
// Use different peer, first will be marked as unused.
|
||||
select {
|
||||
case whisperMock.input <- types.EnvelopeEvent{
|
||||
Event: types.EventMailServerRequestCompleted, Peer: types.EnodeID(nodes[1].ID())}:
|
||||
case <-time.After(time.Second):
|
||||
require.FailNow(t, "can't send a 'completed' event")
|
||||
}
|
||||
|
||||
require.NoError(t, utils.Eventually(func() error {
|
||||
records, err := cache.LoadAll()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if lth := len(records); lth != 2 {
|
||||
return fmt.Errorf("unexpected length of all records stored in the cache. expected %d got %d", 2, lth)
|
||||
}
|
||||
sort.Slice(records, func(i, j int) bool {
|
||||
return records[i].LastUsed.After(records[j].LastUsed)
|
||||
})
|
||||
if records[0].Node().ID() != nodes[1].ID() {
|
||||
return fmt.Errorf("record wasn't updated after previous event")
|
||||
}
|
||||
return nil
|
||||
}, time.Second, 100*time.Millisecond))
|
||||
}
|
|
@ -1,63 +0,0 @@
|
|||
package mailservers
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNoConnected returned when mail servers are not connected.
|
||||
ErrNoConnected = errors.New("no connected mail servers")
|
||||
)
|
||||
|
||||
// PeersProvider is an interface for requesting list of peers.
|
||||
type PeersProvider interface {
|
||||
Peers() []*p2p.Peer
|
||||
}
|
||||
|
||||
// NewPeerStore returns an instance of PeerStore.
|
||||
func NewPeerStore(cache *Cache) *PeerStore {
|
||||
return &PeerStore{
|
||||
nodes: map[types.EnodeID]*enode.Node{},
|
||||
cache: cache,
|
||||
}
|
||||
}
|
||||
|
||||
// PeerStore stores list of selected mail servers and keeps N of them connected.
|
||||
type PeerStore struct {
|
||||
mu sync.RWMutex
|
||||
nodes map[types.EnodeID]*enode.Node
|
||||
|
||||
cache *Cache
|
||||
}
|
||||
|
||||
// Exist confirms that peers was added to a store.
|
||||
func (ps *PeerStore) Exist(nodeID types.EnodeID) bool {
|
||||
ps.mu.RLock()
|
||||
defer ps.mu.RUnlock()
|
||||
_, exist := ps.nodes[nodeID]
|
||||
return exist
|
||||
}
|
||||
|
||||
// Get returns instance of the node with requested ID or nil if ID is not found.
|
||||
func (ps *PeerStore) Get(nodeID types.EnodeID) *enode.Node {
|
||||
ps.mu.RLock()
|
||||
defer ps.mu.RUnlock()
|
||||
return ps.nodes[nodeID]
|
||||
}
|
||||
|
||||
// Update updates peers locally.
|
||||
func (ps *PeerStore) Update(nodes []*enode.Node) error {
|
||||
ps.mu.Lock()
|
||||
ps.nodes = map[types.EnodeID]*enode.Node{}
|
||||
for _, n := range nodes {
|
||||
ps.nodes[types.EnodeID(n.ID())] = n
|
||||
}
|
||||
ps.mu.Unlock()
|
||||
return ps.cache.Replace(nodes)
|
||||
}
|
|
@ -1,59 +0,0 @@
|
|||
package mailservers
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
)
|
||||
|
||||
func RandomNode() (*enode.Node, error) {
|
||||
pkey, err := crypto.GenerateKey()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return enode.NewV4(&pkey.PublicKey, nil, 0, 0), nil
|
||||
}
|
||||
|
||||
func TestUpdateResetsInternalStorage(t *testing.T) {
|
||||
store := NewPeerStore(newInMemCache(t))
|
||||
r1, err := RandomNode()
|
||||
require.NoError(t, err)
|
||||
r2, err := RandomNode()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, store.Update([]*enode.Node{r1, r2}))
|
||||
require.True(t, store.Exist(types.EnodeID(r1.ID())))
|
||||
require.True(t, store.Exist(types.EnodeID(r2.ID())))
|
||||
require.NoError(t, store.Update([]*enode.Node{r2}))
|
||||
require.False(t, store.Exist(types.EnodeID(r1.ID())))
|
||||
require.True(t, store.Exist(types.EnodeID(r2.ID())))
|
||||
}
|
||||
|
||||
func TestGetNodeByID(t *testing.T) {
|
||||
store := NewPeerStore(newInMemCache(t))
|
||||
r1, err := RandomNode()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, store.Update([]*enode.Node{r1}))
|
||||
require.Equal(t, r1, store.Get(types.EnodeID(r1.ID())))
|
||||
require.Nil(t, store.Get(types.EnodeID{1}))
|
||||
}
|
||||
|
||||
type fakePeerProvider struct {
|
||||
peers []*p2p.Peer
|
||||
}
|
||||
|
||||
func (f fakePeerProvider) Peers() []*p2p.Peer {
|
||||
return f.peers
|
||||
}
|
||||
|
||||
func TestNoConnected(t *testing.T) {
|
||||
provider := fakePeerProvider{}
|
||||
store := NewPeerStore(newInMemCache(t))
|
||||
_, err := GetFirstConnected(provider, store)
|
||||
require.EqualError(t, ErrNoConnected, err.Error())
|
||||
}
|
|
@ -1,54 +0,0 @@
|
|||
package mailservers
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
)
|
||||
|
||||
// GetFirstConnected returns first connected peer that is also added to a peer store.
|
||||
// Raises ErrNoConnected if no peers are added to a peer store.
|
||||
func GetFirstConnected(provider PeersProvider, store *PeerStore) (*enode.Node, error) {
|
||||
peers := provider.Peers()
|
||||
for _, p := range peers {
|
||||
if store.Exist(types.EnodeID(p.ID())) {
|
||||
return p.Node(), nil
|
||||
}
|
||||
}
|
||||
return nil, ErrNoConnected
|
||||
}
|
||||
|
||||
// NodesNotifee interface to be notified when new nodes are received.
|
||||
type NodesNotifee interface {
|
||||
Notify([]*enode.Node)
|
||||
}
|
||||
|
||||
// EnsureUsedRecordsAddedFirst checks if any nodes were marked as connected before app went offline.
|
||||
func EnsureUsedRecordsAddedFirst(ps *PeerStore, conn NodesNotifee) error {
|
||||
records, err := ps.cache.LoadAll()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(records) == 0 {
|
||||
return nil
|
||||
}
|
||||
sort.Slice(records, func(i, j int) bool {
|
||||
return records[i].LastUsed.After(records[j].LastUsed)
|
||||
})
|
||||
all := recordsToNodes(records)
|
||||
if !records[0].LastUsed.IsZero() {
|
||||
conn.Notify(all[:1])
|
||||
}
|
||||
conn.Notify(all)
|
||||
return nil
|
||||
}
|
||||
|
||||
func recordsToNodes(records []PeerRecord) []*enode.Node {
|
||||
nodes := make([]*enode.Node, len(records))
|
||||
for i := range records {
|
||||
nodes[i] = records[i].Node()
|
||||
}
|
||||
return nodes
|
||||
}
|
|
@ -1,58 +0,0 @@
|
|||
package mailservers
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
)
|
||||
|
||||
func TestGetFirstConnected(t *testing.T) {
|
||||
numPeers := 3
|
||||
nodes := make([]*enode.Node, numPeers)
|
||||
peers := make([]*p2p.Peer, numPeers)
|
||||
nodesMap := getMapWithRandomNodes(t, numPeers)
|
||||
i := 0
|
||||
for _, node := range nodesMap {
|
||||
nodes[i] = node
|
||||
peers[i] = p2p.NewPeer(node.ID(), node.ID().String(), nil)
|
||||
i++
|
||||
}
|
||||
store := NewPeerStore(newInMemCache(t))
|
||||
provider := fakePeerProvider{peers}
|
||||
_, err := GetFirstConnected(provider, store)
|
||||
require.EqualError(t, ErrNoConnected, err.Error())
|
||||
require.NoError(t, store.Update(nodes))
|
||||
node, err := GetFirstConnected(provider, store)
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, nodesMap, types.EnodeID(node.ID()))
|
||||
}
|
||||
|
||||
type trackingNodeNotifee struct {
|
||||
calls [][]*enode.Node
|
||||
}
|
||||
|
||||
func (t *trackingNodeNotifee) Notify(nodes []*enode.Node) {
|
||||
t.calls = append(t.calls, nodes)
|
||||
}
|
||||
|
||||
func TestEnsureNewRecordsAddedFirst(t *testing.T) {
|
||||
notifee := new(trackingNodeNotifee)
|
||||
store := NewPeerStore(newInMemCache(t))
|
||||
nodes := make([]*enode.Node, 3)
|
||||
fillWithRandomNodes(t, nodes)
|
||||
require.NoError(t, store.Update(nodes))
|
||||
record := NewPeerRecord(nodes[0])
|
||||
record.LastUsed = time.Now()
|
||||
require.NoError(t, store.cache.UpdateRecord(record))
|
||||
require.NoError(t, EnsureUsedRecordsAddedFirst(store, notifee))
|
||||
require.Len(t, notifee.calls, 2)
|
||||
require.Len(t, notifee.calls[0], 1)
|
||||
require.Equal(t, nodes[0].ID(), notifee.calls[0][0].ID())
|
||||
require.Len(t, notifee.calls[1], 3)
|
||||
}
|
|
@ -49,7 +49,6 @@ import (
|
|||
"github.com/status-im/status-go/server"
|
||||
"github.com/status-im/status-go/services/browsers"
|
||||
"github.com/status-im/status-go/services/communitytokens"
|
||||
"github.com/status-im/status-go/services/ext/mailservers"
|
||||
mailserversDB "github.com/status-im/status-go/services/mailservers"
|
||||
"github.com/status-im/status-go/services/wallet"
|
||||
"github.com/status-im/status-go/services/wallet/collectibles"
|
||||
|
@ -65,8 +64,6 @@ const providerID = "community"
|
|||
type EnvelopeEventsHandler interface {
|
||||
EnvelopeSent([][]byte)
|
||||
EnvelopeExpired([][]byte, error)
|
||||
MailServerRequestCompleted(types.Hash, types.Hash, []byte, error)
|
||||
MailServerRequestExpired(types.Hash)
|
||||
}
|
||||
|
||||
// Service is a service that provides some additional API to whisper-based protocols like Whisper or Waku.
|
||||
|
@ -78,9 +75,7 @@ type Service struct {
|
|||
n types.Node
|
||||
rpcClient *rpc.Client
|
||||
config params.NodeConfig
|
||||
mailMonitor *MailRequestMonitor
|
||||
server *p2p.Server
|
||||
peerStore *mailservers.PeerStore
|
||||
accountsDB *accounts.Database
|
||||
multiAccountsDB *multiaccounts.Database
|
||||
account *multiaccounts.Account
|
||||
|
@ -94,18 +89,12 @@ func New(
|
|||
n types.Node,
|
||||
rpcClient *rpc.Client,
|
||||
ldb *leveldb.DB,
|
||||
mailMonitor *MailRequestMonitor,
|
||||
eventSub mailservers.EnvelopeEventSubscriber,
|
||||
) *Service {
|
||||
cache := mailservers.NewCache(ldb)
|
||||
peerStore := mailservers.NewPeerStore(cache)
|
||||
return &Service{
|
||||
storage: db.NewLevelDBStorage(ldb),
|
||||
n: n,
|
||||
rpcClient: rpcClient,
|
||||
config: config,
|
||||
mailMonitor: mailMonitor,
|
||||
peerStore: peerStore,
|
||||
storage: db.NewLevelDBStorage(ldb),
|
||||
n: n,
|
||||
rpcClient: rpcClient,
|
||||
config: config,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -117,9 +106,6 @@ func (s *Service) NodeID() *ecdsa.PrivateKey {
|
|||
}
|
||||
|
||||
func (s *Service) GetPeer(rawURL string) (*enode.Node, error) {
|
||||
if len(rawURL) == 0 {
|
||||
return mailservers.GetFirstConnected(s.server, s.peerStore)
|
||||
}
|
||||
return enode.ParseV4(rawURL)
|
||||
}
|
||||
|
||||
|
@ -150,11 +136,8 @@ func (s *Service) InitProtocol(nodeName string, identity *ecdsa.PrivateKey, appD
|
|||
envelopesMonitorConfig := &transport.EnvelopesMonitorConfig{
|
||||
MaxAttempts: s.config.ShhextConfig.MaxMessageDeliveryAttempts,
|
||||
AwaitOnlyMailServerConfirmations: s.config.ShhextConfig.MailServerConfirmations,
|
||||
IsMailserver: func(peer types.EnodeID) bool {
|
||||
return s.peerStore.Exist(peer)
|
||||
},
|
||||
EnvelopeEventsHandler: EnvelopeSignalHandler{},
|
||||
Logger: logger,
|
||||
EnvelopeEventsHandler: EnvelopeSignalHandler{},
|
||||
Logger: logger,
|
||||
}
|
||||
s.accountsDB, err = accounts.NewDB(appDb)
|
||||
if err != nil {
|
||||
|
@ -173,7 +156,6 @@ func (s *Service) InitProtocol(nodeName string, identity *ecdsa.PrivateKey, appD
|
|||
identity,
|
||||
s.n,
|
||||
s.config.ShhextConfig.InstallationID,
|
||||
s.peerStore,
|
||||
params.Version,
|
||||
options...,
|
||||
)
|
||||
|
|
|
@ -22,16 +22,6 @@ func (h EnvelopeSignalHandler) EnvelopeExpired(identifiers [][]byte, err error)
|
|||
signal.SendEnvelopeExpired(identifiers, err)
|
||||
}
|
||||
|
||||
// MailServerRequestCompleted triggered when the mailserver sends a message to notify that the request has been completed
|
||||
func (h EnvelopeSignalHandler) MailServerRequestCompleted(requestID types.Hash, lastEnvelopeHash types.Hash, cursor []byte, err error) {
|
||||
signal.SendMailServerRequestCompleted(requestID, lastEnvelopeHash, cursor, err)
|
||||
}
|
||||
|
||||
// MailServerRequestExpired triggered when the mailserver request expires
|
||||
func (h EnvelopeSignalHandler) MailServerRequestExpired(hash types.Hash) {
|
||||
signal.SendMailServerRequestExpired(hash)
|
||||
}
|
||||
|
||||
// PublisherSignalHandler sends signals on protocol events
|
||||
type PublisherSignalHandler struct{}
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ type PublicAPI struct {
|
|||
// NewPublicAPI returns instance of the public API.
|
||||
func NewPublicAPI(s *Service) *PublicAPI {
|
||||
return &PublicAPI{
|
||||
PublicAPI: ext.NewPublicAPI(s.Service, s.w),
|
||||
PublicAPI: ext.NewPublicAPI(s.Service),
|
||||
service: s,
|
||||
publicAPI: s.w.PublicWakuAPI(),
|
||||
log: log.New("package", "status-go/services/wakuext.PublicAPI"),
|
||||
|
|
|
@ -21,14 +21,8 @@ func New(config params.NodeConfig, n types.Node, rpcClient *rpc.Client, handler
|
|||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
delay := ext.DefaultRequestsDelay
|
||||
if config.ShhextConfig.RequestsDelay != 0 {
|
||||
delay = config.ShhextConfig.RequestsDelay
|
||||
}
|
||||
requestsRegistry := ext.NewRequestsRegistry(delay)
|
||||
mailMonitor := ext.NewMailRequestMonitor(w, handler, requestsRegistry)
|
||||
return &Service{
|
||||
Service: ext.New(config, n, rpcClient, ldb, mailMonitor, w),
|
||||
Service: ext.New(config, n, rpcClient, ldb),
|
||||
w: w,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@ type PublicAPI struct {
|
|||
// NewPublicAPI returns instance of the public API.
|
||||
func NewPublicAPI(s *Service) *PublicAPI {
|
||||
return &PublicAPI{
|
||||
PublicAPI: ext.NewPublicAPI(s.Service, s.w),
|
||||
PublicAPI: ext.NewPublicAPI(s.Service),
|
||||
service: s,
|
||||
publicAPI: s.w.PublicWakuAPI(),
|
||||
log: log.New("package", "status-go/services/wakuext.PublicAPI"),
|
||||
|
|
|
@ -20,14 +20,8 @@ func New(config params.NodeConfig, n types.Node, rpcClient *rpc.Client, handler
|
|||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
delay := ext.DefaultRequestsDelay
|
||||
if config.ShhextConfig.RequestsDelay != 0 {
|
||||
delay = config.ShhextConfig.RequestsDelay
|
||||
}
|
||||
requestsRegistry := ext.NewRequestsRegistry(delay)
|
||||
mailMonitor := ext.NewMailRequestMonitor(w, handler, requestsRegistry)
|
||||
return &Service{
|
||||
Service: ext.New(config, n, rpcClient, ldb, mailMonitor, w),
|
||||
Service: ext.New(config, n, rpcClient, ldb),
|
||||
w: w,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,12 +20,6 @@ const (
|
|||
// to any peer
|
||||
EventEnvelopeExpired = "envelope.expired"
|
||||
|
||||
// EventMailServerRequestCompleted is triggered when whisper receives a message ack from the mailserver
|
||||
EventMailServerRequestCompleted = "mailserver.request.completed"
|
||||
|
||||
// EventMailServerRequestExpired is triggered when request TTL ends
|
||||
EventMailServerRequestExpired = "mailserver.request.expired"
|
||||
|
||||
// EventEnodeDiscovered is tiggered when enode has been discovered.
|
||||
EventEnodeDiscovered = "enode.discovered"
|
||||
|
||||
|
@ -165,26 +159,6 @@ func SendUpdateAvailable(available bool, latestVersion string, url string) {
|
|||
send(EventUpdateAvailable, UpdateAvailableSignal{Available: available, Version: latestVersion, URL: url})
|
||||
}
|
||||
|
||||
// SendMailServerRequestCompleted triggered when mail server response has been received
|
||||
func SendMailServerRequestCompleted(requestID types.Hash, lastEnvelopeHash types.Hash, cursor []byte, err error) {
|
||||
errorMsg := ""
|
||||
if err != nil {
|
||||
errorMsg = err.Error()
|
||||
}
|
||||
sig := MailServerResponseSignal{
|
||||
RequestID: requestID,
|
||||
LastEnvelopeHash: lastEnvelopeHash,
|
||||
Cursor: hex.EncodeToString(cursor),
|
||||
ErrorMsg: errorMsg,
|
||||
}
|
||||
send(EventMailServerRequestCompleted, sig)
|
||||
}
|
||||
|
||||
// SendMailServerRequestExpired triggered when mail server request expires
|
||||
func SendMailServerRequestExpired(hash types.Hash) {
|
||||
send(EventMailServerRequestExpired, EnvelopeSignal{Hash: hash})
|
||||
}
|
||||
|
||||
// EnodeDiscoveredSignal includes enode address and topic
|
||||
type EnodeDiscoveredSignal struct {
|
||||
Enode string `json:"enode"`
|
||||
|
|
|
@ -43,23 +43,6 @@ const (
|
|||
|
||||
// EventEnvelopeAvailable fires when envelop is available for filters
|
||||
EventEnvelopeAvailable EventType = "envelope.available"
|
||||
|
||||
// EventMailServerRequestSent fires when such request is sent.
|
||||
EventMailServerRequestSent EventType = "mailserver.request.sent"
|
||||
|
||||
// EventMailServerRequestCompleted fires after mailserver sends all the requested messages
|
||||
EventMailServerRequestCompleted EventType = "mailserver.request.completed"
|
||||
|
||||
// EventMailServerRequestExpired fires after mailserver the request TTL ends.
|
||||
// This event is independent and concurrent to EventMailServerRequestCompleted.
|
||||
// Request should be considered as expired only if expiry event was received first.
|
||||
EventMailServerRequestExpired EventType = "mailserver.request.expired"
|
||||
|
||||
// EventMailServerEnvelopeArchived fires after an envelope has been archived
|
||||
EventMailServerEnvelopeArchived EventType = "mailserver.envelope.archived"
|
||||
|
||||
// EventMailServerSyncFinished fires when the sync of messages is finished.
|
||||
EventMailServerSyncFinished EventType = "mailserver.sync.finished"
|
||||
)
|
||||
|
||||
// EnvelopeEvent represents an envelope event.
|
||||
|
|
|
@ -31,8 +31,6 @@ type Peer interface {
|
|||
// SetRWWriter sets the socket to read/write
|
||||
SetRWWriter(p2p.MsgReadWriter)
|
||||
|
||||
RequestHistoricMessages(*Envelope) error
|
||||
SendHistoricMessageResponse([]byte) error
|
||||
SendP2PMessages([]*Envelope) error
|
||||
SendRawP2PDirect([]rlp.RawValue) error
|
||||
|
||||
|
@ -61,8 +59,6 @@ type WakuHost interface {
|
|||
MaxMessageSize() uint32
|
||||
// LightClientMode returns whether the host is running in light client mode
|
||||
LightClientMode() bool
|
||||
// Mailserver returns whether the host is running a mailserver
|
||||
Mailserver() bool
|
||||
// LightClientModeConnectionRestricted indicates that connection to light client in light client mode not allowed
|
||||
LightClientModeConnectionRestricted() bool
|
||||
// ConfirmationsEnabled returns true if message confirmations are enabled.
|
||||
|
@ -92,14 +88,5 @@ type WakuHost interface {
|
|||
// OnMessagesResponse handles when the peer receive a message response
|
||||
// from a mailserver
|
||||
OnMessagesResponse(MessagesResponse, Peer) error
|
||||
// OnMessagesRequest handles when the peer receive a message request
|
||||
// this only works if the peer is a mailserver
|
||||
OnMessagesRequest(MessagesRequest, Peer) error
|
||||
// OnDeprecatedMessagesRequest handles when the peer receive a message request
|
||||
// using the *Envelope format. Currently the only production client (status-mobile)
|
||||
// is exclusively using this one.
|
||||
OnDeprecatedMessagesRequest(*Envelope, Peer) error
|
||||
|
||||
OnBatchAcknowledged(common.Hash, Peer) error
|
||||
OnP2PRequestCompleted([]byte, Peer) error
|
||||
}
|
||||
|
|
|
@ -1,179 +0,0 @@
|
|||
// Copyright 2019 The Waku Library Authors.
|
||||
//
|
||||
// The Waku library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The Waku library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty off
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the Waku library. If not, see <http://www.gnu.org/licenses/>.
|
||||
//
|
||||
// This software uses the go-ethereum library, which is licensed
|
||||
// under the GNU Lesser General Public Library, version 3 or any later.
|
||||
|
||||
package waku
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/status-im/status-go/waku/common"
|
||||
|
||||
gethcommon "github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
const (
|
||||
mailServerFailedPayloadPrefix = "ERROR="
|
||||
cursorSize = 36
|
||||
)
|
||||
|
||||
// MailServer represents a mail server, capable of
|
||||
// archiving the old messages for subsequent delivery
|
||||
// to the peers. Any implementation must ensure that both
|
||||
// functions are thread-safe. Also, they must return ASAP.
|
||||
// DeliverMail should use p2pMessageCode for delivery,
|
||||
// in order to bypass the expiry checks.
|
||||
type MailServer interface {
|
||||
Archive(env *common.Envelope)
|
||||
DeliverMail(peerID []byte, request *common.Envelope) // DEPRECATED; use Deliver()
|
||||
Deliver(peerID []byte, request common.MessagesRequest)
|
||||
}
|
||||
|
||||
// MailServerResponse is the response payload sent by the mailserver.
|
||||
type MailServerResponse struct {
|
||||
LastEnvelopeHash gethcommon.Hash
|
||||
Cursor []byte
|
||||
Error error
|
||||
}
|
||||
|
||||
func invalidResponseSizeError(size int) error {
|
||||
return fmt.Errorf("unexpected payload size: %d", size)
|
||||
}
|
||||
|
||||
// CreateMailServerRequestCompletedPayload creates a payload representing
|
||||
// a successful request to mailserver
|
||||
func CreateMailServerRequestCompletedPayload(requestID, lastEnvelopeHash gethcommon.Hash, cursor []byte) []byte {
|
||||
payload := make([]byte, len(requestID))
|
||||
copy(payload, requestID[:])
|
||||
payload = append(payload, lastEnvelopeHash[:]...)
|
||||
payload = append(payload, cursor...)
|
||||
return payload
|
||||
}
|
||||
|
||||
// CreateMailServerRequestFailedPayload creates a payload representing
|
||||
// a failed request to a mailserver
|
||||
func CreateMailServerRequestFailedPayload(requestID gethcommon.Hash, err error) []byte {
|
||||
payload := []byte(mailServerFailedPayloadPrefix)
|
||||
payload = append(payload, requestID[:]...)
|
||||
payload = append(payload, []byte(err.Error())...)
|
||||
return payload
|
||||
}
|
||||
|
||||
// CreateMailServerEvent returns EnvelopeEvent with correct data
|
||||
// if payload corresponds to any of the know mailserver events:
|
||||
// * request completed successfully
|
||||
// * request failed
|
||||
// If the payload is unknown/unparseable, it returns `nil`
|
||||
func CreateMailServerEvent(nodeID enode.ID, payload []byte) (*common.EnvelopeEvent, error) {
|
||||
if len(payload) < gethcommon.HashLength {
|
||||
return nil, invalidResponseSizeError(len(payload))
|
||||
}
|
||||
|
||||
event, err := tryCreateMailServerRequestFailedEvent(nodeID, payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if event != nil {
|
||||
return event, nil
|
||||
}
|
||||
|
||||
return tryCreateMailServerRequestCompletedEvent(nodeID, payload)
|
||||
}
|
||||
|
||||
func tryCreateMailServerRequestFailedEvent(nodeID enode.ID, payload []byte) (*common.EnvelopeEvent, error) {
|
||||
if len(payload) < gethcommon.HashLength+len(mailServerFailedPayloadPrefix) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
prefix, remainder := extractPrefix(payload, len(mailServerFailedPayloadPrefix))
|
||||
|
||||
if !bytes.Equal(prefix, []byte(mailServerFailedPayloadPrefix)) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var (
|
||||
requestID gethcommon.Hash
|
||||
errorMsg string
|
||||
)
|
||||
|
||||
requestID, remainder = extractHash(remainder)
|
||||
errorMsg = string(remainder)
|
||||
|
||||
event := common.EnvelopeEvent{
|
||||
Peer: nodeID,
|
||||
Hash: requestID,
|
||||
Event: common.EventMailServerRequestCompleted,
|
||||
Data: &MailServerResponse{
|
||||
Error: errors.New(errorMsg),
|
||||
},
|
||||
}
|
||||
|
||||
return &event, nil
|
||||
|
||||
}
|
||||
|
||||
func tryCreateMailServerRequestCompletedEvent(nodeID enode.ID, payload []byte) (*common.EnvelopeEvent, error) {
|
||||
// check if payload is
|
||||
// - requestID or
|
||||
// - requestID + lastEnvelopeHash or
|
||||
// - requestID + lastEnvelopeHash + cursor
|
||||
// requestID is the hash of the request envelope.
|
||||
// lastEnvelopeHash is the last envelope sent by the mail server
|
||||
// cursor is the db key, 36 bytes: 4 for the timestamp + 32 for the envelope hash.
|
||||
if len(payload) > gethcommon.HashLength*2+cursorSize {
|
||||
return nil, invalidResponseSizeError(len(payload))
|
||||
}
|
||||
|
||||
var (
|
||||
requestID gethcommon.Hash
|
||||
lastEnvelopeHash gethcommon.Hash
|
||||
cursor []byte
|
||||
)
|
||||
|
||||
requestID, remainder := extractHash(payload)
|
||||
|
||||
if len(remainder) >= gethcommon.HashLength {
|
||||
lastEnvelopeHash, remainder = extractHash(remainder)
|
||||
}
|
||||
|
||||
if len(remainder) >= cursorSize {
|
||||
cursor = remainder
|
||||
}
|
||||
|
||||
event := common.EnvelopeEvent{
|
||||
Peer: nodeID,
|
||||
Hash: requestID,
|
||||
Event: common.EventMailServerRequestCompleted,
|
||||
Data: &MailServerResponse{
|
||||
LastEnvelopeHash: lastEnvelopeHash,
|
||||
Cursor: cursor,
|
||||
},
|
||||
}
|
||||
|
||||
return &event, nil
|
||||
}
|
||||
|
||||
func extractHash(payload []byte) (gethcommon.Hash, []byte) {
|
||||
prefix, remainder := extractPrefix(payload, gethcommon.HashLength)
|
||||
return gethcommon.BytesToHash(prefix), remainder
|
||||
}
|
||||
|
||||
func extractPrefix(payload []byte, size int) ([]byte, []byte) {
|
||||
return payload[:size], payload[size:]
|
||||
}
|
|
@ -4,7 +4,6 @@ import (
|
|||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"net"
|
||||
|
@ -227,16 +226,6 @@ func (p *Peer) handlePacket(packet p2p.Msg) error {
|
|||
p.logger.Warn("failed to decode direct message, peer will be disconnected", zap.String("peerID", types.EncodeHex(p.ID())), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
case p2pRequestCode:
|
||||
if err := p.handleP2PRequestCode(packet); err != nil {
|
||||
p.logger.Warn("failed to decode p2p request message, peer will be disconnected", zap.String("peerID", types.EncodeHex(p.ID())), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
case p2pRequestCompleteCode:
|
||||
if err := p.handleP2PRequestCompleteCode(packet); err != nil {
|
||||
p.logger.Warn("failed to decode p2p request complete message, peer will be disconnected", zap.String("peerID", types.EncodeHex(p.ID())), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
default:
|
||||
// New message common might be implemented in the future versions of Waku.
|
||||
// For forward compatibility, just ignore.
|
||||
|
@ -289,43 +278,6 @@ func (p *Peer) handleMessageResponseCode(packet p2p.Msg) error {
|
|||
return p.host.OnMessagesResponse(response, p)
|
||||
}
|
||||
|
||||
func (p *Peer) handleP2PRequestCode(packet p2p.Msg) error {
|
||||
// Must be processed if mail server is implemented. Otherwise ignore.
|
||||
if !p.host.Mailserver() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read all data as we will try to decode it possibly twice.
|
||||
data, err := ioutil.ReadAll(packet.Payload)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid p2p request messages: %v", err)
|
||||
}
|
||||
r := bytes.NewReader(data)
|
||||
packet.Payload = r
|
||||
|
||||
var requestDeprecated common.Envelope
|
||||
errDepReq := packet.Decode(&requestDeprecated)
|
||||
if errDepReq == nil {
|
||||
return p.host.OnDeprecatedMessagesRequest(&requestDeprecated, p)
|
||||
}
|
||||
p.logger.Info("failed to decode p2p request message (deprecated)", zap.String("peerID", types.EncodeHex(p.ID())), zap.Error(errDepReq))
|
||||
|
||||
// As we failed to decode the request, let's set the offset
|
||||
// to the beginning and try decode it again.
|
||||
if _, err := r.Seek(0, io.SeekStart); err != nil {
|
||||
return fmt.Errorf("invalid p2p request message: %v", err)
|
||||
}
|
||||
|
||||
var request common.MessagesRequest
|
||||
errReq := packet.Decode(&request)
|
||||
if errReq == nil {
|
||||
return p.host.OnMessagesRequest(request, p)
|
||||
}
|
||||
p.logger.Info("failed to decode p2p request message", zap.String("peerID", types.EncodeHex(p.ID())), zap.Error(errReq))
|
||||
|
||||
return errors.New("invalid p2p request message")
|
||||
}
|
||||
|
||||
func (p *Peer) handleBatchAcknowledgeCode(packet p2p.Msg) error {
|
||||
var batchHash gethcommon.Hash
|
||||
if err := packet.Decode(&batchHash); err != nil {
|
||||
|
@ -368,18 +320,6 @@ func (p *Peer) handleP2PMessageCode(packet p2p.Msg) error {
|
|||
return p.host.OnNewP2PEnvelopes(envelopes)
|
||||
}
|
||||
|
||||
func (p *Peer) handleP2PRequestCompleteCode(packet p2p.Msg) error {
|
||||
if !p.trusted {
|
||||
return nil
|
||||
}
|
||||
|
||||
var payload []byte
|
||||
if err := packet.Decode(&payload); err != nil {
|
||||
return fmt.Errorf("invalid p2p request complete message: %v", err)
|
||||
}
|
||||
return p.host.OnP2PRequestCompleted(payload, p)
|
||||
}
|
||||
|
||||
// sendConfirmation sends messageResponseCode and batchAcknowledgedCode messages.
|
||||
func (p *Peer) sendConfirmation(data []byte, envelopeErrors []common.EnvelopeError) (err error) {
|
||||
batchHash := crypto.Keccak256Hash(data)
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"net"
|
||||
|
@ -259,16 +258,6 @@ func (p *Peer) handlePacket(packet p2p.Msg) error {
|
|||
p.logger.Warn("failed to decode direct message, peer will be disconnected", zap.String("peerID", types.EncodeHex(p.ID())), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
case p2pRequestCode:
|
||||
if err := p.handleP2PRequestCode(packet); err != nil {
|
||||
p.logger.Warn("failed to decode p2p request message, peer will be disconnected", zap.String("peerID", types.EncodeHex(p.ID())), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
case p2pRequestCompleteCode:
|
||||
if err := p.handleP2PRequestCompleteCode(packet); err != nil {
|
||||
p.logger.Warn("failed to decode p2p request complete message, peer will be disconnected", zap.String("peerID", types.EncodeHex(p.ID())), zap.Error(err))
|
||||
return err
|
||||
}
|
||||
default:
|
||||
// New message common might be implemented in the future versions of Waku.
|
||||
// For forward compatibility, just ignore.
|
||||
|
@ -321,43 +310,6 @@ func (p *Peer) handleMessageResponseCode(packet p2p.Msg) error {
|
|||
return p.host.OnMessagesResponse(response, p)
|
||||
}
|
||||
|
||||
func (p *Peer) handleP2PRequestCode(packet p2p.Msg) error {
|
||||
// Must be processed if mail server is implemented. Otherwise ignore.
|
||||
if !p.host.Mailserver() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read all data as we will try to decode it possibly twice.
|
||||
data, err := ioutil.ReadAll(packet.Payload)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid p2p request messages: %v", err)
|
||||
}
|
||||
r := bytes.NewReader(data)
|
||||
packet.Payload = r
|
||||
|
||||
var requestDeprecated common.Envelope
|
||||
errDepReq := packet.Decode(&requestDeprecated)
|
||||
if errDepReq == nil {
|
||||
return p.host.OnDeprecatedMessagesRequest(&requestDeprecated, p)
|
||||
}
|
||||
p.logger.Info("failed to decode p2p request message (deprecated)", zap.String("peerID", types.EncodeHex(p.ID())), zap.Error(errDepReq))
|
||||
|
||||
// As we failed to decode the request, let's set the offset
|
||||
// to the beginning and try decode it again.
|
||||
if _, err := r.Seek(0, io.SeekStart); err != nil {
|
||||
return fmt.Errorf("invalid p2p request message: %v", err)
|
||||
}
|
||||
|
||||
var request common.MessagesRequest
|
||||
errReq := packet.Decode(&request)
|
||||
if errReq == nil {
|
||||
return p.host.OnMessagesRequest(request, p)
|
||||
}
|
||||
p.logger.Info("failed to decode p2p request message", zap.String("peerID", types.EncodeHex(p.ID())), zap.Error(errReq))
|
||||
|
||||
return errors.New("invalid p2p request message")
|
||||
}
|
||||
|
||||
func (p *Peer) handleBatchAcknowledgeCode(packet p2p.Msg) error {
|
||||
var batchHash gethcommon.Hash
|
||||
if err := packet.Decode(&batchHash); err != nil {
|
||||
|
@ -400,18 +352,6 @@ func (p *Peer) handleP2PMessageCode(packet p2p.Msg) error {
|
|||
return p.host.OnNewP2PEnvelopes(envelopes)
|
||||
}
|
||||
|
||||
func (p *Peer) handleP2PRequestCompleteCode(packet p2p.Msg) error {
|
||||
if !p.trusted {
|
||||
return nil
|
||||
}
|
||||
|
||||
var payload []byte
|
||||
if err := packet.Decode(&payload); err != nil {
|
||||
return fmt.Errorf("invalid p2p request complete message: %v", err)
|
||||
}
|
||||
return p.host.OnP2PRequestCompleted(payload, p)
|
||||
}
|
||||
|
||||
// sendConfirmation sends messageResponseCode and batchAcknowledgedCode messages.
|
||||
func (p *Peer) sendConfirmation(data []byte, envelopeErrors []common.EnvelopeError) (err error) {
|
||||
batchHash := crypto.Keccak256Hash(data)
|
||||
|
|
48
waku/waku.go
48
waku/waku.go
|
@ -102,8 +102,6 @@ type Waku struct {
|
|||
settings settings // Holds configuration settings that can be dynamically changed
|
||||
settingsMu sync.RWMutex // Mutex to sync the settings access
|
||||
|
||||
mailServer MailServer
|
||||
|
||||
rateLimiter *common.PeerRateLimiter
|
||||
|
||||
envelopeFeed event.Feed
|
||||
|
@ -484,12 +482,6 @@ func (w *Waku) Protocols() []p2p.Protocol {
|
|||
return w.protocols
|
||||
}
|
||||
|
||||
// RegisterMailServer registers MailServer interface.
|
||||
// MailServer will process all the incoming messages with p2pRequestCode.
|
||||
func (w *Waku) RegisterMailServer(server MailServer) {
|
||||
w.mailServer = server
|
||||
}
|
||||
|
||||
// SetRateLimiter registers a rate limiter.
|
||||
func (w *Waku) RegisterRateLimiter(r *common.PeerRateLimiter) {
|
||||
w.rateLimiter = r
|
||||
|
@ -640,14 +632,6 @@ func (w *Waku) AllowP2PMessagesFromPeer(peerID []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (w *Waku) SendHistoricMessageResponse(peerID []byte, payload []byte) error {
|
||||
peer, err := w.getPeer(peerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return peer.SendHistoricMessageResponse(payload)
|
||||
}
|
||||
|
||||
// SendP2PMessage sends a peer-to-peer message to a specific peer.
|
||||
// It sends one or more envelopes in a single batch.
|
||||
func (w *Waku) SendP2PMessages(peerID []byte, envelopes ...*common.Envelope) error {
|
||||
|
@ -1126,30 +1110,6 @@ func (w *Waku) OnNewP2PEnvelopes(envelopes []*common.Envelope) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (w *Waku) Mailserver() bool {
|
||||
return w.mailServer != nil
|
||||
}
|
||||
|
||||
func (w *Waku) OnMessagesRequest(request common.MessagesRequest, p common.Peer) error {
|
||||
w.mailServer.Deliver(p.ID(), request)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Waku) OnDeprecatedMessagesRequest(request *common.Envelope, p common.Peer) error {
|
||||
w.mailServer.DeliverMail(p.ID(), request)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Waku) OnP2PRequestCompleted(payload []byte, p common.Peer) error {
|
||||
msEvent, err := CreateMailServerEvent(p.EnodeID(), payload)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid p2p request complete payload: %v", err)
|
||||
}
|
||||
|
||||
w.postP2P(*msEvent)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Waku) OnMessagesResponse(response common.MessagesResponse, p common.Peer) error {
|
||||
w.envelopeFeed.Send(common.EnvelopeEvent{
|
||||
Batch: response.Hash,
|
||||
|
@ -1338,14 +1298,6 @@ func (w *Waku) addAndBridge(envelope *common.Envelope, isP2P bool, bridged bool)
|
|||
common.EnvelopesCachedCounter.WithLabelValues("miss").Inc()
|
||||
common.EnvelopesSizeMeter.Observe(float64(envelope.Size()))
|
||||
w.postEvent(envelope, isP2P) // notify the local node about the new message
|
||||
if w.mailServer != nil {
|
||||
w.mailServer.Archive(envelope)
|
||||
w.envelopeFeed.Send(common.EnvelopeEvent{
|
||||
Topic: envelope.Topic,
|
||||
Hash: envelope.Hash(),
|
||||
Event: common.EventMailServerEnvelopeArchived,
|
||||
})
|
||||
}
|
||||
// Bridge only envelopes that are not p2p messages.
|
||||
// In particular, if a node is a lightweight node,
|
||||
// it should not bridge any envelopes.
|
||||
|
|
|
@ -323,23 +323,6 @@ func (s *WakuTestSuite) TestWakuTimeDesyncEnvelopeIgnored() {
|
|||
}
|
||||
}
|
||||
|
||||
type MockMailserver struct {
|
||||
deliverMail func([]byte, *common.Envelope)
|
||||
}
|
||||
|
||||
func (*MockMailserver) Archive(e *common.Envelope) {
|
||||
}
|
||||
|
||||
func (*MockMailserver) Deliver(peerID []byte, r common.MessagesRequest) {
|
||||
}
|
||||
|
||||
func (m *MockMailserver) DeliverMail(peerID []byte, e *common.Envelope) {
|
||||
|
||||
if m.deliverMail != nil {
|
||||
m.deliverMail(peerID, e)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *WakuTestSuite) TestRateLimiterIntegration() {
|
||||
conf := &Config{
|
||||
MinimumAcceptedPoW: 0,
|
||||
|
@ -370,61 +353,6 @@ func (s *WakuTestSuite) TestRateLimiterIntegration() {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *WakuTestSuite) TestMailserverCompletionEvent() {
|
||||
w1 := New(nil, nil)
|
||||
s.Require().NoError(w1.Start())
|
||||
defer func() { handleError(s.T(), w1.Stop()) }()
|
||||
|
||||
rw1, rw2 := p2p.MsgPipe()
|
||||
errorc := make(chan error, 1)
|
||||
go func() {
|
||||
err := w1.HandlePeer(s.newPeer(w1, p2p.NewPeer(enode.ID{}, "1", []p2p.Cap{}), rw1, nil, s.stats), rw1)
|
||||
errorc <- err
|
||||
}()
|
||||
|
||||
w2 := New(nil, nil)
|
||||
s.Require().NoError(w2.Start())
|
||||
defer func() { handleError(s.T(), w2.Stop()) }()
|
||||
|
||||
peer2 := s.newPeer(w2, p2p.NewPeer(enode.ID{1}, "1", nil), rw2, nil, s.stats)
|
||||
peer2.SetPeerTrusted(true)
|
||||
|
||||
events := make(chan common.EnvelopeEvent)
|
||||
sub := w1.SubscribeEnvelopeEvents(events)
|
||||
defer sub.Unsubscribe()
|
||||
|
||||
envelopes := []*common.Envelope{{Data: []byte{1}}, {Data: []byte{2}}}
|
||||
s.Require().NoError(peer2.Start())
|
||||
// Set peer trusted, we know the peer has been added as handshake was successful
|
||||
w1.getPeers()[0].SetPeerTrusted(true)
|
||||
|
||||
s.Require().NoError(peer2.SendP2PMessages(envelopes))
|
||||
s.Require().NoError(peer2.SendHistoricMessageResponse(make([]byte, 100)))
|
||||
s.Require().NoError(rw2.Close())
|
||||
|
||||
// Wait for all messages to be read
|
||||
err := <-errorc
|
||||
s.Require().EqualError(err, "p2p: read or write on closed message pipe")
|
||||
|
||||
after := time.After(2 * time.Second)
|
||||
count := 0
|
||||
for {
|
||||
select {
|
||||
case <-after:
|
||||
s.Require().FailNow("timed out waiting for all events")
|
||||
case ev := <-events:
|
||||
switch ev.Event {
|
||||
case common.EventEnvelopeAvailable:
|
||||
count++
|
||||
case common.EventMailServerRequestCompleted:
|
||||
s.Require().Equal(count, len(envelopes),
|
||||
"all envelope.avaiable events mut be recevied before request is compelted")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// two generic waku node handshake
|
||||
func (s *WakuTestSuite) TestPeerHandshakeWithTwoFullNode() {
|
||||
rw1, rw2 := p2p.MsgPipe()
|
||||
|
|
Loading…
Reference in New Issue