2018-04-11 15:41:51 +00:00
|
|
|
package shhext
|
|
|
|
|
|
|
|
import (
|
2019-06-26 16:17:41 +00:00
|
|
|
"context"
|
2018-04-26 05:56:19 +00:00
|
|
|
"crypto/ecdsa"
|
2019-06-26 16:17:41 +00:00
|
|
|
"fmt"
|
2019-07-17 22:25:42 +00:00
|
|
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
|
|
|
"github.com/status-im/status-go/logutils"
|
2019-07-01 09:39:51 +00:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2018-12-12 09:39:00 +00:00
|
|
|
"time"
|
2018-04-11 15:41:51 +00:00
|
|
|
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
2019-05-17 11:06:56 +00:00
|
|
|
"github.com/ethereum/go-ethereum/log"
|
2018-04-11 15:41:51 +00:00
|
|
|
"github.com/ethereum/go-ethereum/node"
|
|
|
|
"github.com/ethereum/go-ethereum/p2p"
|
2018-11-21 10:22:30 +00:00
|
|
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
2018-04-11 15:41:51 +00:00
|
|
|
"github.com/ethereum/go-ethereum/rpc"
|
2019-07-01 09:39:51 +00:00
|
|
|
|
2019-04-30 06:46:12 +00:00
|
|
|
"github.com/status-im/status-go/db"
|
2019-01-17 12:56:22 +00:00
|
|
|
"github.com/status-im/status-go/params"
|
2018-04-20 11:26:54 +00:00
|
|
|
"github.com/status-im/status-go/services/shhext/dedup"
|
2018-12-05 13:57:05 +00:00
|
|
|
"github.com/status-im/status-go/services/shhext/mailservers"
|
2019-07-01 09:39:51 +00:00
|
|
|
"github.com/status-im/status-go/signal"
|
|
|
|
|
2019-07-17 22:25:42 +00:00
|
|
|
protocol "github.com/status-im/status-protocol-go"
|
2018-09-25 07:05:38 +00:00
|
|
|
whisper "github.com/status-im/whisper/whisperv6"
|
2018-05-02 12:14:08 +00:00
|
|
|
"github.com/syndtr/goleveldb/leveldb"
|
2019-07-01 09:39:51 +00:00
|
|
|
"golang.org/x/crypto/sha3"
|
2018-04-11 15:41:51 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2018-12-05 13:57:05 +00:00
|
|
|
// defaultConnectionsTarget used in Service.Start if configured connection target is 0.
|
|
|
|
defaultConnectionsTarget = 1
|
2018-12-12 09:39:00 +00:00
|
|
|
// defaultTimeoutWaitAdded is a timeout to use to establish initial connections.
|
|
|
|
defaultTimeoutWaitAdded = 5 * time.Second
|
2018-04-11 15:41:51 +00:00
|
|
|
)
|
|
|
|
|
2018-04-13 05:52:22 +00:00
|
|
|
// EnvelopeEventsHandler used for two different event types.
|
|
|
|
type EnvelopeEventsHandler interface {
|
|
|
|
EnvelopeSent(common.Hash)
|
2019-04-02 10:40:45 +00:00
|
|
|
EnvelopeExpired(common.Hash, error)
|
2018-10-18 10:25:00 +00:00
|
|
|
MailServerRequestCompleted(common.Hash, common.Hash, []byte, error)
|
2018-06-15 15:12:31 +00:00
|
|
|
MailServerRequestExpired(common.Hash)
|
2018-04-13 05:52:22 +00:00
|
|
|
}
|
2018-04-11 15:41:51 +00:00
|
|
|
|
|
|
|
// Service is a service that provides some additional Whisper API.
|
|
|
|
type Service struct {
|
2019-07-17 22:25:42 +00:00
|
|
|
messenger *protocol.Messenger
|
|
|
|
cancelMessenger chan struct{}
|
|
|
|
|
2019-05-06 06:33:19 +00:00
|
|
|
storage db.TransactionalStorage
|
2019-01-15 09:21:33 +00:00
|
|
|
w *whisper.Whisper
|
|
|
|
config params.ShhextConfig
|
2019-02-20 06:57:57 +00:00
|
|
|
envelopesMonitor *EnvelopesMonitor
|
|
|
|
mailMonitor *MailRequestMonitor
|
2019-01-15 09:21:33 +00:00
|
|
|
requestsRegistry *RequestsRegistry
|
2019-04-30 06:46:12 +00:00
|
|
|
historyUpdates *HistoryUpdateReactor
|
2019-01-15 09:21:33 +00:00
|
|
|
server *p2p.Server
|
|
|
|
nodeID *ecdsa.PrivateKey
|
|
|
|
deduplicator *dedup.Deduplicator
|
2019-04-30 06:46:12 +00:00
|
|
|
peerStore *mailservers.PeerStore
|
|
|
|
cache *mailservers.Cache
|
|
|
|
connManager *mailservers.ConnectionManager
|
|
|
|
lastUsedMonitor *mailservers.LastUsedConnectionMonitor
|
2018-09-24 18:07:34 +00:00
|
|
|
}
|
|
|
|
|
2018-04-11 15:41:51 +00:00
|
|
|
// Make sure that Service implements node.Service interface.
|
|
|
|
var _ node.Service = (*Service)(nil)
|
|
|
|
|
2019-05-23 08:47:20 +00:00
|
|
|
// New returns a new Service.
|
2019-04-30 06:46:12 +00:00
|
|
|
func New(w *whisper.Whisper, handler EnvelopeEventsHandler, ldb *leveldb.DB, config params.ShhextConfig) *Service {
|
|
|
|
cache := mailservers.NewCache(ldb)
|
2018-12-12 09:39:00 +00:00
|
|
|
ps := mailservers.NewPeerStore(cache)
|
2019-01-15 09:21:33 +00:00
|
|
|
delay := defaultRequestsDelay
|
|
|
|
if config.RequestsDelay != 0 {
|
|
|
|
delay = config.RequestsDelay
|
|
|
|
}
|
|
|
|
requestsRegistry := NewRequestsRegistry(delay)
|
2019-05-06 06:33:19 +00:00
|
|
|
historyUpdates := NewHistoryUpdateReactor()
|
2019-02-20 06:57:57 +00:00
|
|
|
mailMonitor := &MailRequestMonitor{
|
|
|
|
w: w,
|
|
|
|
handler: handler,
|
|
|
|
cache: map[common.Hash]EnvelopeState{},
|
|
|
|
requestsRegistry: requestsRegistry,
|
|
|
|
}
|
2019-03-01 13:36:21 +00:00
|
|
|
envelopesMonitor := NewEnvelopesMonitor(w, handler, config.MailServerConfirmations, ps, config.MaxMessageDeliveryAttempts)
|
2018-04-11 15:41:51 +00:00
|
|
|
return &Service{
|
2019-05-06 06:33:19 +00:00
|
|
|
storage: db.NewLevelDBStorage(ldb),
|
2019-01-15 09:21:33 +00:00
|
|
|
w: w,
|
|
|
|
config: config,
|
2019-02-20 06:57:57 +00:00
|
|
|
envelopesMonitor: envelopesMonitor,
|
|
|
|
mailMonitor: mailMonitor,
|
2019-01-15 09:21:33 +00:00
|
|
|
requestsRegistry: requestsRegistry,
|
2019-04-30 06:46:12 +00:00
|
|
|
historyUpdates: historyUpdates,
|
|
|
|
deduplicator: dedup.NewDeduplicator(w, ldb),
|
2019-01-15 09:21:33 +00:00
|
|
|
peerStore: ps,
|
|
|
|
cache: cache,
|
2018-04-11 15:41:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-01 09:39:51 +00:00
|
|
|
func (s *Service) InitProtocolWithPassword(address string, password string) error {
|
|
|
|
digest := sha3.Sum256([]byte(password))
|
|
|
|
encKey := fmt.Sprintf("%x", digest)
|
|
|
|
return s.initProtocol(address, encKey, password)
|
|
|
|
}
|
|
|
|
|
|
|
|
// InitProtocolWithEncyptionKey creates an instance of ProtocolService given an address and encryption key.
|
|
|
|
func (s *Service) InitProtocolWithEncyptionKey(address string, encKey string) error {
|
|
|
|
return s.initProtocol(address, encKey, "")
|
|
|
|
}
|
|
|
|
|
2019-07-30 06:14:13 +00:00
|
|
|
func (s *Service) initProtocol(address, encKey, password string) error { // nolint: gocyclo
|
2019-07-01 09:39:51 +00:00
|
|
|
if !s.config.PFSEnabled {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
dataDir := filepath.Clean(s.config.BackupDisabledDataDir)
|
|
|
|
|
|
|
|
if err := os.MkdirAll(dataDir, os.ModePerm); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
v0Path := filepath.Join(dataDir, fmt.Sprintf("%x.db", address))
|
|
|
|
v1Path := filepath.Join(dataDir, fmt.Sprintf("%s.db", s.config.InstallationID))
|
|
|
|
v2Path := filepath.Join(dataDir, fmt.Sprintf("%s.v2.db", s.config.InstallationID))
|
|
|
|
v3Path := filepath.Join(dataDir, fmt.Sprintf("%s.v3.db", s.config.InstallationID))
|
|
|
|
v4Path := filepath.Join(dataDir, fmt.Sprintf("%s.v4.db", s.config.InstallationID))
|
|
|
|
|
|
|
|
if password != "" {
|
2019-07-17 22:25:42 +00:00
|
|
|
if err := migrateDBFile(v0Path, v1Path, "ON", password); err != nil {
|
2019-07-01 09:39:51 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-07-17 22:25:42 +00:00
|
|
|
if err := migrateDBFile(v1Path, v2Path, password, encKey); err != nil {
|
2019-07-01 09:39:51 +00:00
|
|
|
// Remove db file as created with a blank password and never used,
|
|
|
|
// and there's no need to rekey in this case
|
|
|
|
os.Remove(v1Path)
|
|
|
|
os.Remove(v2Path)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-17 22:25:42 +00:00
|
|
|
if err := migrateDBKeyKdfIterations(v2Path, v3Path, encKey); err != nil {
|
2019-07-01 09:39:51 +00:00
|
|
|
os.Remove(v2Path)
|
|
|
|
os.Remove(v3Path)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fix IOS not encrypting database
|
2019-07-17 22:25:42 +00:00
|
|
|
if err := encryptDatabase(v3Path, v4Path, encKey); err != nil {
|
2019-07-01 09:39:51 +00:00
|
|
|
os.Remove(v3Path)
|
|
|
|
os.Remove(v4Path)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Desktop was passing a network dependent directory, which meant that
|
|
|
|
// if running on testnet it would not access the right db. This copies
|
|
|
|
// the db from mainnet to the root location.
|
|
|
|
networkDependentPath := filepath.Join(dataDir, "ethereum", "mainnet_rpc", fmt.Sprintf("%s.v4.db", s.config.InstallationID))
|
|
|
|
if _, err := os.Stat(networkDependentPath); err == nil {
|
|
|
|
if err := os.Rename(networkDependentPath, v4Path); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else if !os.IsNotExist(err) {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-07-30 06:14:13 +00:00
|
|
|
// In one of the versions, we split the database file into multiple ones.
|
|
|
|
// Later, we discovered that it really hurts the performance so we consolidated
|
|
|
|
// it again but in a better way keeping migrations in separate packages.
|
2019-07-17 22:25:42 +00:00
|
|
|
sessionsDatabasePath := filepath.Join(dataDir, fmt.Sprintf("%s.sessions.v4.sql", s.config.InstallationID))
|
2019-07-30 06:14:13 +00:00
|
|
|
sessionsStat, sessionsStatErr := os.Stat(sessionsDatabasePath)
|
|
|
|
v4PathStat, v4PathStatErr := os.Stat(v4Path)
|
|
|
|
|
|
|
|
if sessionsStatErr == nil && os.IsNotExist(v4PathStatErr) {
|
|
|
|
// This is a clear situation where we have the sessions.v4.sql file and v4Path does not exist.
|
|
|
|
// In the previous migration, we removed v4Path when it is successfully copied into the sessions sql file.
|
|
|
|
if err := os.Rename(sessionsDatabasePath, v4Path); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else if sessionsStatErr == nil && v4PathStatErr == nil {
|
|
|
|
// Both files exist so probably the migration to split databases failed.
|
|
|
|
if sessionsStat.ModTime().After(v4PathStat.ModTime()) {
|
|
|
|
// Sessions sql file is newer.
|
|
|
|
if err := os.Rename(sessionsDatabasePath, v4Path); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-07-17 22:25:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-30 06:14:13 +00:00
|
|
|
options, err := buildMessengerOptions(s.config, v4Path, encKey)
|
2019-07-01 09:39:51 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-07-26 07:17:29 +00:00
|
|
|
selectedKeyID := s.w.SelectedKeyPairID()
|
|
|
|
identity, err := s.w.GetPrivateKey(selectedKeyID)
|
2019-07-17 22:25:42 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2019-07-01 09:39:51 +00:00
|
|
|
}
|
2019-07-26 07:17:29 +00:00
|
|
|
|
2019-07-17 22:25:42 +00:00
|
|
|
messenger, err := protocol.NewMessenger(
|
|
|
|
identity,
|
|
|
|
&server{server: s.server},
|
|
|
|
s.w,
|
|
|
|
s.config.InstallationID,
|
2019-07-26 07:17:29 +00:00
|
|
|
options...,
|
2019-07-17 22:25:42 +00:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2019-07-01 09:39:51 +00:00
|
|
|
}
|
2019-07-17 22:25:42 +00:00
|
|
|
s.messenger = messenger
|
|
|
|
// Start a loop that retrieves all messages and propagates them to status-react.
|
|
|
|
s.cancelMessenger = make(chan struct{})
|
|
|
|
go s.retrieveMessagesLoop(time.Second, s.cancelMessenger)
|
2019-07-01 09:39:51 +00:00
|
|
|
|
2019-07-17 22:25:42 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Service) retrieveMessagesLoop(tick time.Duration, cancel <-chan struct{}) {
|
|
|
|
ticker := time.NewTicker(tick)
|
|
|
|
defer ticker.Stop()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ticker.C:
|
|
|
|
chatWithMessages, err := s.messenger.RetrieveRawAll()
|
2019-07-05 12:45:47 +00:00
|
|
|
if err != nil {
|
2019-07-17 22:25:42 +00:00
|
|
|
log.Error("failed to retrieve raw messages", "err", err)
|
2019-07-05 12:45:47 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2019-07-17 22:25:42 +00:00
|
|
|
var signalMessages []*signal.Messages
|
2019-07-26 07:17:29 +00:00
|
|
|
|
2019-07-17 22:25:42 +00:00
|
|
|
for chat, messages := range chatWithMessages {
|
2019-07-26 07:17:29 +00:00
|
|
|
var retrievedMessages []*whisper.Message
|
|
|
|
for _, message := range messages {
|
|
|
|
whisperMessage := message.TransportMessage
|
|
|
|
whisperMessage.Payload = message.DecryptedPayload
|
|
|
|
retrievedMessages = append(retrievedMessages, whisperMessage)
|
|
|
|
}
|
|
|
|
|
2019-07-17 22:25:42 +00:00
|
|
|
signalMessage := &signal.Messages{
|
|
|
|
Chat: chat,
|
|
|
|
Error: nil, // TODO: what is it needed for?
|
2019-07-26 07:17:29 +00:00
|
|
|
Messages: s.deduplicator.Deduplicate(retrievedMessages),
|
2019-07-17 22:25:42 +00:00
|
|
|
}
|
|
|
|
signalMessages = append(signalMessages, signalMessage)
|
|
|
|
}
|
2019-07-01 09:39:51 +00:00
|
|
|
|
2019-07-17 22:25:42 +00:00
|
|
|
log.Debug("retrieve messages loop", "messages", len(signalMessages))
|
|
|
|
|
|
|
|
if len(signalMessages) == 0 {
|
|
|
|
continue
|
2019-07-01 09:39:51 +00:00
|
|
|
}
|
2019-07-17 22:25:42 +00:00
|
|
|
|
|
|
|
PublisherSignalHandler{}.NewMessages(signalMessages)
|
|
|
|
case <-cancel:
|
|
|
|
return
|
2019-07-01 09:39:51 +00:00
|
|
|
}
|
|
|
|
}
|
2019-07-17 22:25:42 +00:00
|
|
|
}
|
2019-07-05 12:45:47 +00:00
|
|
|
|
2019-07-17 22:25:42 +00:00
|
|
|
func (s *Service) ConfirmMessagesProcessed(messageIDs [][]byte) error {
|
|
|
|
return s.messenger.ConfirmMessagesProcessed(messageIDs)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Service) EnableInstallation(installationID string) error {
|
|
|
|
return s.messenger.EnableInstallation(installationID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// DisableInstallation disables an installation for multi-device sync.
|
|
|
|
func (s *Service) DisableInstallation(installationID string) error {
|
|
|
|
return s.messenger.DisableInstallation(installationID)
|
2019-07-01 09:39:51 +00:00
|
|
|
}
|
|
|
|
|
2018-11-21 10:22:30 +00:00
|
|
|
// UpdateMailservers updates information about selected mail servers.
|
2018-12-12 09:39:00 +00:00
|
|
|
func (s *Service) UpdateMailservers(nodes []*enode.Node) error {
|
|
|
|
if err := s.peerStore.Update(nodes); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-12-05 13:57:05 +00:00
|
|
|
if s.connManager != nil {
|
|
|
|
s.connManager.Notify(nodes)
|
|
|
|
}
|
2018-12-12 09:39:00 +00:00
|
|
|
return nil
|
2018-11-21 10:22:30 +00:00
|
|
|
}
|
|
|
|
|
2018-04-11 15:41:51 +00:00
|
|
|
// Protocols returns a new protocols list. In this case, there are none.
|
|
|
|
func (s *Service) Protocols() []p2p.Protocol {
|
|
|
|
return []p2p.Protocol{}
|
|
|
|
}
|
|
|
|
|
|
|
|
// APIs returns a list of new APIs.
|
|
|
|
func (s *Service) APIs() []rpc.API {
|
2018-06-25 13:27:17 +00:00
|
|
|
apis := []rpc.API{
|
2018-04-11 15:41:51 +00:00
|
|
|
{
|
|
|
|
Namespace: "shhext",
|
|
|
|
Version: "1.0",
|
2018-04-26 05:56:19 +00:00
|
|
|
Service: NewPublicAPI(s),
|
2018-04-11 15:41:51 +00:00
|
|
|
Public: true,
|
|
|
|
},
|
|
|
|
}
|
2018-06-25 13:27:17 +00:00
|
|
|
return apis
|
2018-04-11 15:41:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Start is run when a service is started.
|
|
|
|
// It does nothing in this case but is required by `node.Service` interface.
|
|
|
|
func (s *Service) Start(server *p2p.Server) error {
|
2018-12-05 13:57:05 +00:00
|
|
|
if s.config.EnableConnectionManager {
|
|
|
|
connectionsTarget := s.config.ConnectionTarget
|
|
|
|
if connectionsTarget == 0 {
|
|
|
|
connectionsTarget = defaultConnectionsTarget
|
|
|
|
}
|
2019-01-21 14:00:10 +00:00
|
|
|
maxFailures := s.config.MaxServerFailures
|
|
|
|
// if not defined change server on first expired event
|
|
|
|
if maxFailures == 0 {
|
|
|
|
maxFailures = 1
|
|
|
|
}
|
|
|
|
s.connManager = mailservers.NewConnectionManager(server, s.w, connectionsTarget, maxFailures, defaultTimeoutWaitAdded)
|
2018-12-05 13:57:05 +00:00
|
|
|
s.connManager.Start()
|
2018-12-12 09:39:00 +00:00
|
|
|
if err := mailservers.EnsureUsedRecordsAddedFirst(s.peerStore, s.connManager); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if s.config.EnableLastUsedMonitor {
|
|
|
|
s.lastUsedMonitor = mailservers.NewLastUsedConnectionMonitor(s.peerStore, s.cache, s.w)
|
|
|
|
s.lastUsedMonitor.Start()
|
2018-12-05 13:57:05 +00:00
|
|
|
}
|
2019-02-20 06:57:57 +00:00
|
|
|
s.envelopesMonitor.Start()
|
|
|
|
s.mailMonitor.Start()
|
2018-04-26 05:56:19 +00:00
|
|
|
s.nodeID = server.PrivateKey
|
2018-12-05 13:57:05 +00:00
|
|
|
s.server = server
|
2019-07-05 12:45:47 +00:00
|
|
|
return nil
|
2019-06-03 14:29:14 +00:00
|
|
|
}
|
|
|
|
|
2018-04-11 15:41:51 +00:00
|
|
|
// Stop is run when a service is stopped.
|
|
|
|
func (s *Service) Stop() error {
|
2019-06-03 14:29:14 +00:00
|
|
|
log.Info("Stopping shhext service")
|
2018-12-05 13:57:05 +00:00
|
|
|
if s.config.EnableConnectionManager {
|
|
|
|
s.connManager.Stop()
|
|
|
|
}
|
2018-12-12 09:39:00 +00:00
|
|
|
if s.config.EnableLastUsedMonitor {
|
|
|
|
s.lastUsedMonitor.Stop()
|
|
|
|
}
|
2019-02-26 12:55:01 +00:00
|
|
|
s.requestsRegistry.Clear()
|
2019-02-20 06:57:57 +00:00
|
|
|
s.envelopesMonitor.Stop()
|
|
|
|
s.mailMonitor.Stop()
|
2019-07-17 22:25:42 +00:00
|
|
|
|
|
|
|
if s.cancelMessenger != nil {
|
|
|
|
select {
|
|
|
|
case <-s.cancelMessenger:
|
|
|
|
// channel already closed
|
|
|
|
default:
|
|
|
|
close(s.cancelMessenger)
|
|
|
|
s.cancelMessenger = nil
|
2019-05-23 07:54:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-17 22:25:42 +00:00
|
|
|
if s.messenger != nil {
|
|
|
|
if err := s.messenger.Shutdown(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
2019-05-17 11:06:56 +00:00
|
|
|
}
|
2019-06-26 16:17:41 +00:00
|
|
|
|
|
|
|
func (s *Service) syncMessages(ctx context.Context, mailServerID []byte, r whisper.SyncMailRequest) (resp whisper.SyncEventResponse, err error) {
|
|
|
|
err = s.w.SyncMessages(mailServerID, r)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for the response which is received asynchronously as a p2p packet.
|
|
|
|
// This packet handler will send an event which contains the response payload.
|
|
|
|
events := make(chan whisper.EnvelopeEvent, 1024)
|
|
|
|
sub := s.w.SubscribeEnvelopeEvents(events)
|
|
|
|
defer sub.Unsubscribe()
|
|
|
|
|
|
|
|
// Add explicit timeout context, otherwise the request
|
|
|
|
// can hang indefinitely if not specified by the sender.
|
|
|
|
// Sender is usually through netcat or some bash tool
|
|
|
|
// so it's not really possible to specify the timeout.
|
|
|
|
timeoutCtx, cancel := context.WithTimeout(ctx, time.Second*30)
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case event := <-events:
|
|
|
|
if event.Event != whisper.EventMailServerSyncFinished {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Info("received EventMailServerSyncFinished event", "data", event.Data)
|
|
|
|
|
|
|
|
var ok bool
|
|
|
|
|
|
|
|
resp, ok = event.Data.(whisper.SyncEventResponse)
|
|
|
|
if !ok {
|
|
|
|
err = fmt.Errorf("did not understand the response event data")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
return
|
|
|
|
case <-timeoutCtx.Done():
|
|
|
|
err = timeoutCtx.Err()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-07-17 22:25:42 +00:00
|
|
|
|
2019-07-30 06:14:13 +00:00
|
|
|
func buildMessengerOptions(config params.ShhextConfig, dbPath, dbKey string) ([]protocol.Option, error) {
|
2019-07-26 07:17:29 +00:00
|
|
|
// Create a custom zap.Logger which will forward logs from status-protocol-go to status-go logger.
|
|
|
|
zapLogger, err := logutils.NewZapLoggerWithAdapter(logutils.Logger())
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
options := []protocol.Option{
|
|
|
|
protocol.WithCustomLogger(zapLogger),
|
2019-07-30 06:14:13 +00:00
|
|
|
protocol.WithDatabaseConfig(dbPath, dbKey),
|
2019-07-26 07:17:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if !config.DisableGenericDiscoveryTopic {
|
|
|
|
options = append(options, protocol.WithGenericDiscoveryTopicSupport())
|
|
|
|
}
|
|
|
|
|
|
|
|
if config.DataSyncEnabled {
|
|
|
|
options = append(options, protocol.WithDatasync())
|
|
|
|
}
|
|
|
|
|
|
|
|
if config.SendV1Messages {
|
|
|
|
options = append(options, protocol.WithSendV1Messages())
|
|
|
|
}
|
|
|
|
return options, nil
|
|
|
|
}
|
|
|
|
|
2019-07-17 22:25:42 +00:00
|
|
|
func (s *Service) afterPost(hash []byte, newMessage whisper.NewMessage) hexutil.Bytes {
|
|
|
|
s.envelopesMonitor.Add(common.BytesToHash(hash), newMessage)
|
|
|
|
mID := messageID(newMessage)
|
|
|
|
return mID[:]
|
|
|
|
}
|