From 79b8112f89e218e5d24b6c4400eeaf6d4e8ec6db Mon Sep 17 00:00:00 2001 From: Adam Babik Date: Mon, 20 Jan 2020 21:56:06 +0100 Subject: [PATCH] Split shhext into shhext and wakuext (#1803) --- cmd/node-canary/main.go | 3 +- node/get_status_node.go | 31 +- node/geth_node.go | 6 +- protocol/messenger.go | 7 +- services/{shhext => ext}/README.md | 0 services/ext/api.go | 461 ++++++++++ services/ext/api_test.go | 156 ++++ services/{shhext => ext}/context.go | 4 +- services/ext/handler_mock.go | 48 + services/{shhext => ext}/mailrequests.go | 22 +- services/{shhext => ext}/mailrequests_test.go | 8 +- services/{shhext => ext}/mailservers/cache.go | 0 .../{shhext => ext}/mailservers/cache_test.go | 0 .../mailservers/connmanager.go | 14 +- .../mailservers/connmanager_test.go | 0 .../mailservers/connmonitor.go | 14 +- .../mailservers/connmonitor_test.go | 0 .../{shhext => ext}/mailservers/peerstore.go | 0 .../mailservers/peerstore_test.go | 0 services/{shhext => ext}/mailservers/utils.go | 0 .../{shhext => ext}/mailservers/utils_test.go | 0 services/ext/node_mock.go | 36 + services/{shhext => ext}/requests.go | 6 +- services/{shhext => ext}/requests_test.go | 2 +- services/{shhext => ext}/rpc.go | 2 +- services/ext/service.go | 441 ++++++++++ services/{shhext => ext}/signal.go | 5 +- services/shhext/api.go | 167 ---- services/shhext/api_geth.go | 684 ++++----------- services/shhext/api_geth_test.go | 543 +++++++++--- services/shhext/context_geth.go | 14 - services/shhext/history.go | 19 - services/shhext/history_geth.go | 340 -------- services/shhext/history_geth_test.go | 360 -------- services/shhext/service.go | 424 +-------- services/shhext/service_nimbus.go | 390 +-------- services/shhext/service_test.go | 819 ------------------ services/shhext_wakuext_test.go | 69 ++ services/wakuext/api.go | 173 ++++ services/wakuext/api_test.go | 411 +++++++++ services/wakuext/service.go | 50 ++ signal/events_shhext.go | 3 - t/benchmarks/mailserver_test.go | 8 +- t/e2e/whisper/whisper_mailbox_test.go | 3 +- .../status-im/status-go/protocol/messenger.go | 7 +- .../status-im/status-go/whisper/v6/whisper.go | 4 - whisper/whisper.go | 4 - 47 files changed, 2517 insertions(+), 3241 deletions(-) rename services/{shhext => ext}/README.md (100%) create mode 100644 services/ext/api.go create mode 100644 services/ext/api_test.go rename services/{shhext => ext}/context.go (95%) create mode 100644 services/ext/handler_mock.go rename services/{shhext => ext}/mailrequests.go (85%) rename services/{shhext => ext}/mailrequests_test.go (95%) rename services/{shhext => ext}/mailservers/cache.go (100%) rename services/{shhext => ext}/mailservers/cache_test.go (100%) rename services/{shhext => ext}/mailservers/connmanager.go (94%) rename services/{shhext => ext}/mailservers/connmanager_test.go (100%) rename services/{shhext => ext}/mailservers/connmonitor.go (82%) rename services/{shhext => ext}/mailservers/connmonitor_test.go (100%) rename services/{shhext => ext}/mailservers/peerstore.go (100%) rename services/{shhext => ext}/mailservers/peerstore_test.go (100%) rename services/{shhext => ext}/mailservers/utils.go (100%) rename services/{shhext => ext}/mailservers/utils_test.go (100%) create mode 100644 services/ext/node_mock.go rename services/{shhext => ext}/requests.go (95%) rename services/{shhext => ext}/requests_test.go (99%) rename services/{shhext => ext}/rpc.go (99%) create mode 100644 services/ext/service.go rename services/{shhext => ext}/signal.go (92%) delete mode 100644 services/shhext/api.go delete mode 100644 services/shhext/context_geth.go delete mode 100644 services/shhext/history.go delete mode 100644 services/shhext/history_geth.go delete mode 100644 services/shhext/history_geth_test.go delete mode 100644 services/shhext/service_test.go create mode 100644 services/shhext_wakuext_test.go create mode 100644 services/wakuext/api.go create mode 100644 services/wakuext/api_test.go create mode 100644 services/wakuext/service.go diff --git a/cmd/node-canary/main.go b/cmd/node-canary/main.go index fbd91425e..d0bdf3573 100644 --- a/cmd/node-canary/main.go +++ b/cmd/node-canary/main.go @@ -25,6 +25,7 @@ import ( "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/params" "github.com/status-im/status-go/rpc" + "github.com/status-im/status-go/services/ext" "github.com/status-im/status-go/services/shhext" "github.com/status-im/status-go/t/helpers" ) @@ -170,7 +171,7 @@ func verifyMailserverBehavior(mailserverNode *enode.Node) { // request messages from mailbox shhextAPI := shhext.NewPublicAPI(clientShhExtService) requestIDBytes, err := shhextAPI.RequestMessages(context.TODO(), - shhext.MessagesRequest{ + ext.MessagesRequest{ MailServerPeer: mailserverNode.String(), From: uint32(clientWhisperService.GetCurrentTime().Add(-time.Duration(*period) * time.Second).Unix()), Limit: 1, diff --git a/node/get_status_node.go b/node/get_status_node.go index a946a3aa0..e4c434d08 100644 --- a/node/get_status_node.go +++ b/node/get_status_node.go @@ -25,8 +25,6 @@ import ( "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enr" - "github.com/status-im/status-go/whisper/v6" - "github.com/status-im/status-go/db" "github.com/status-im/status-go/discovery" "github.com/status-im/status-go/params" @@ -37,7 +35,10 @@ import ( "github.com/status-im/status-go/services/permissions" "github.com/status-im/status-go/services/shhext" "github.com/status-im/status-go/services/status" + "github.com/status-im/status-go/services/wakuext" "github.com/status-im/status-go/services/wallet" + "github.com/status-im/status-go/waku" + "github.com/status-im/status-go/whisper/v6" ) // tickerResolution is the delta to check blockchain sync progress. @@ -585,6 +586,19 @@ func (n *StatusNode) WhisperService() (w *whisper.Whisper, err error) { return } +// WakuService exposes reference to Whisper service running on top of the node +func (n *StatusNode) WakuService() (w *waku.Waku, err error) { + n.mu.RLock() + defer n.mu.RUnlock() + + err = n.gethService(&w) + if err == node.ErrServiceUnknown { + err = ErrServiceUnknown + } + + return +} + // ShhExtService exposes reference to shh extension service running on top of the node func (n *StatusNode) ShhExtService() (s *shhext.Service, err error) { n.mu.RLock() @@ -598,6 +612,19 @@ func (n *StatusNode) ShhExtService() (s *shhext.Service, err error) { return } +// WakuExtService exposes reference to shh extension service running on top of the node +func (n *StatusNode) WakuExtService() (s *wakuext.Service, err error) { + n.mu.RLock() + defer n.mu.RUnlock() + + err = n.gethService(&s) + if err == node.ErrServiceUnknown { + err = ErrServiceUnknown + } + + return +} + // WalletService returns wallet.Service instance if it was started. func (n *StatusNode) WalletService() (s *wallet.Service, err error) { n.mu.RLock() diff --git a/node/geth_node.go b/node/geth_node.go index f334bf880..2066523d8 100644 --- a/node/geth_node.go +++ b/node/geth_node.go @@ -31,12 +31,14 @@ import ( "github.com/status-im/status-go/eth-node/crypto" "github.com/status-im/status-go/mailserver" "github.com/status-im/status-go/params" + "github.com/status-im/status-go/services/ext" "github.com/status-im/status-go/services/incentivisation" "github.com/status-im/status-go/services/nodebridge" "github.com/status-im/status-go/services/peer" "github.com/status-im/status-go/services/personal" "github.com/status-im/status-go/services/shhext" "github.com/status-im/status-go/services/status" + "github.com/status-im/status-go/services/wakuext" "github.com/status-im/status-go/static" "github.com/status-im/status-go/timesource" "github.com/status-im/status-go/waku" @@ -365,7 +367,7 @@ func activateShhService(stack *node.Node, config *params.NodeConfig, db *leveldb if err := ctx.Service(ðnode); err != nil { return nil, err } - return shhext.New(ethnode.Node, ctx, "shhext", shhext.EnvelopeSignalHandler{}, db, config.ShhextConfig), nil + return shhext.New(config.ShhextConfig, ethnode.Node, ctx, ext.EnvelopeSignalHandler{}, db), nil }) } @@ -389,7 +391,7 @@ func activateWakuService(stack *node.Node, config *params.NodeConfig, db *leveld if err := ctx.Service(ðnode); err != nil { return nil, err } - return shhext.New(ethnode.Node, ctx, "wakuext", shhext.EnvelopeSignalHandler{}, db, config.ShhextConfig), nil + return wakuext.New(config.ShhextConfig, ethnode.Node, ctx, ext.EnvelopeSignalHandler{}, db), nil }) } diff --git a/protocol/messenger.go b/protocol/messenger.go index 4baec271e..f0afa68e4 100644 --- a/protocol/messenger.go +++ b/protocol/messenger.go @@ -283,8 +283,7 @@ func NewMessenger( // Initialize transport layer. var transp transport.Transport - - if shh, err := node.GetWhisper(nil); err == nil { + if shh, err := node.GetWhisper(nil); err == nil && shh != nil { transp, err = shhtransp.NewWhisperServiceTransport( shh, identity, @@ -296,10 +295,10 @@ func NewMessenger( if err != nil { return nil, errors.Wrap(err, "failed to create WhisperServiceTransport") } - } else if err != nil { + } else { logger.Info("failed to find Whisper service; trying Waku", zap.Error(err)) waku, err := node.GetWaku(nil) - if err != nil { + if err != nil || waku == nil { return nil, errors.Wrap(err, "failed to find Whisper and Waku services") } transp, err = wakutransp.NewWakuServiceTransport( diff --git a/services/shhext/README.md b/services/ext/README.md similarity index 100% rename from services/shhext/README.md rename to services/ext/README.md diff --git a/services/ext/api.go b/services/ext/api.go new file mode 100644 index 000000000..80fd0be8e --- /dev/null +++ b/services/ext/api.go @@ -0,0 +1,461 @@ +package ext + +import ( + "context" + "encoding/hex" + "errors" + "fmt" + "math/big" + "time" + + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" + + "github.com/status-im/status-go/eth-node/types" + enstypes "github.com/status-im/status-go/eth-node/types/ens" + "github.com/status-im/status-go/mailserver" + "github.com/status-im/status-go/params" + "github.com/status-im/status-go/protocol" + "github.com/status-im/status-go/protocol/encryption/multidevice" + "github.com/status-im/status-go/protocol/transport" + "github.com/status-im/status-go/services/ext/mailservers" +) + +const ( + // defaultRequestTimeout is the default request timeout in seconds + defaultRequestTimeout = 10 + + // ensContractAddress is the address of the ENS resolver + ensContractAddress = "0x314159265dd8dbb310642f98f50c066173c1259b" +) + +var ( + // ErrInvalidMailServerPeer is returned when it fails to parse enode from params. + ErrInvalidMailServerPeer = errors.New("invalid mailServerPeer value") + // ErrInvalidSymKeyID is returned when it fails to get a symmetric key. + ErrInvalidSymKeyID = errors.New("invalid symKeyID value") + // ErrInvalidPublicKey is returned when public key can't be extracted + // from MailServer's nodeID. + ErrInvalidPublicKey = errors.New("can't extract public key") + // ErrPFSNotEnabled is returned when an endpoint PFS only is called but + // PFS is disabled + ErrPFSNotEnabled = errors.New("pfs not enabled") +) + +// ----- +// PAYLOADS +// ----- + +// MessagesRequest is a RequestMessages() request payload. +type MessagesRequest struct { + // MailServerPeer is MailServer's enode address. + MailServerPeer string `json:"mailServerPeer"` + + // From is a lower bound of time range (optional). + // Default is 24 hours back from now. + From uint32 `json:"from"` + + // To is a upper bound of time range (optional). + // Default is now. + To uint32 `json:"to"` + + // Limit determines the number of messages sent by the mail server + // for the current paginated request + Limit uint32 `json:"limit"` + + // Cursor is used as starting point for paginated requests + Cursor string `json:"cursor"` + + // Topic is a regular Whisper topic. + // DEPRECATED + Topic types.TopicType `json:"topic"` + + // Topics is a list of Whisper topics. + Topics []types.TopicType `json:"topics"` + + // SymKeyID is an ID of a symmetric key to authenticate to MailServer. + // It's derived from MailServer password. + SymKeyID string `json:"symKeyID"` + + // Timeout is the time to live of the request specified in seconds. + // Default is 10 seconds + Timeout time.Duration `json:"timeout"` + + // Force ensures that requests will bypass enforced delay. + Force bool `json:"force"` +} + +func (r *MessagesRequest) SetDefaults(now time.Time) { + // set From and To defaults + if r.To == 0 { + r.To = uint32(now.UTC().Unix()) + } + + if r.From == 0 { + oneDay := uint32(86400) // -24 hours + if r.To < oneDay { + r.From = 0 + } else { + r.From = r.To - oneDay + } + } + + if r.Timeout == 0 { + r.Timeout = defaultRequestTimeout + } +} + +// MessagesResponse is a response for requestMessages2 method. +type MessagesResponse struct { + // Cursor from the response can be used to retrieve more messages + // for the previous request. + Cursor string `json:"cursor"` + + // Error indicates that something wrong happened when sending messages + // to the requester. + Error error `json:"error"` +} + +// ----- +// PUBLIC API +// ----- + +// PublicAPI extends whisper public API. +type PublicAPI struct { + service *Service + eventSub mailservers.EnvelopeEventSubscriber + log log.Logger +} + +// NewPublicAPI returns instance of the public API. +func NewPublicAPI(s *Service, eventSub mailservers.EnvelopeEventSubscriber) *PublicAPI { + return &PublicAPI{ + service: s, + eventSub: eventSub, + log: log.New("package", "status-go/services/sshext.PublicAPI"), + } +} + +// RetryConfig specifies configuration for retries with timeout and max amount of retries. +type RetryConfig struct { + BaseTimeout time.Duration + // StepTimeout defines duration increase per each retry. + StepTimeout time.Duration + MaxRetries int +} + +func WaitForExpiredOrCompleted(requestID types.Hash, events chan types.EnvelopeEvent, timeout time.Duration) (*types.MailServerResponse, error) { + expired := fmt.Errorf("request %x expired", requestID) + after := time.NewTimer(timeout) + defer after.Stop() + for { + var ev types.EnvelopeEvent + select { + case ev = <-events: + case <-after.C: + return nil, expired + } + if ev.Hash != requestID { + continue + } + switch ev.Event { + case types.EventMailServerRequestCompleted: + data, ok := ev.Data.(*types.MailServerResponse) + if ok { + return data, nil + } + return nil, errors.New("invalid event data type") + case types.EventMailServerRequestExpired: + return nil, expired + } + } +} + +type Author struct { + PublicKey types.HexBytes `json:"publicKey"` + Alias string `json:"alias"` + Identicon string `json:"identicon"` +} + +type Metadata struct { + DedupID []byte `json:"dedupId"` + EncryptionID types.HexBytes `json:"encryptionId"` + MessageID types.HexBytes `json:"messageId"` + Author Author `json:"author"` +} + +// ConfirmMessagesProcessedByID is a method to confirm that messages was consumed by +// the client side. +// TODO: this is broken now as it requires dedup ID while a message hash should be used. +func (api *PublicAPI) ConfirmMessagesProcessedByID(messageConfirmations []*Metadata) error { + confirmationCount := len(messageConfirmations) + dedupIDs := make([][]byte, confirmationCount) + encryptionIDs := make([][]byte, confirmationCount) + for i, confirmation := range messageConfirmations { + dedupIDs[i] = confirmation.DedupID + encryptionIDs[i] = confirmation.EncryptionID + } + return api.service.ConfirmMessagesProcessed(encryptionIDs) +} + +// SendPublicMessage sends a public chat message to the underlying transport. +// Message's payload is a transit encoded message. +// It's important to call PublicAPI.afterSend() so that the client receives a signal +// with confirmation that the message left the device. +func (api *PublicAPI) SendPublicMessage(ctx context.Context, msg SendPublicMessageRPC) (types.HexBytes, error) { + chat := protocol.Chat{ + Name: msg.Chat, + } + return api.service.messenger.SendRaw(ctx, chat, msg.Payload) +} + +// SendDirectMessage sends a 1:1 chat message to the underlying transport +// Message's payload is a transit encoded message. +// It's important to call PublicAPI.afterSend() so that the client receives a signal +// with confirmation that the message left the device. +func (api *PublicAPI) SendDirectMessage(ctx context.Context, msg SendDirectMessageRPC) (types.HexBytes, error) { + chat := protocol.Chat{ + ChatType: protocol.ChatTypeOneToOne, + ID: types.EncodeHex(msg.PubKey), + } + + return api.service.messenger.SendRaw(ctx, chat, msg.Payload) +} + +func (api *PublicAPI) Join(chat protocol.Chat) error { + return api.service.messenger.Join(chat) +} + +func (api *PublicAPI) Leave(chat protocol.Chat) error { + return api.service.messenger.Leave(chat) +} + +func (api *PublicAPI) LeaveGroupChat(ctx Context, chatID string) (*protocol.MessengerResponse, error) { + return api.service.messenger.LeaveGroupChat(ctx, chatID) +} + +func (api *PublicAPI) CreateGroupChatWithMembers(ctx Context, name string, members []string) (*protocol.MessengerResponse, error) { + return api.service.messenger.CreateGroupChatWithMembers(ctx, name, members) +} + +func (api *PublicAPI) AddMembersToGroupChat(ctx Context, chatID string, members []string) (*protocol.MessengerResponse, error) { + return api.service.messenger.AddMembersToGroupChat(ctx, chatID, members) +} + +func (api *PublicAPI) RemoveMemberFromGroupChat(ctx Context, chatID string, member string) (*protocol.MessengerResponse, error) { + return api.service.messenger.RemoveMemberFromGroupChat(ctx, chatID, member) +} + +func (api *PublicAPI) AddAdminsToGroupChat(ctx Context, chatID string, members []string) (*protocol.MessengerResponse, error) { + return api.service.messenger.AddAdminsToGroupChat(ctx, chatID, members) +} + +func (api *PublicAPI) ConfirmJoiningGroup(ctx context.Context, chatID string) (*protocol.MessengerResponse, error) { + return api.service.messenger.ConfirmJoiningGroup(ctx, chatID) +} + +func (api *PublicAPI) LoadFilters(parent context.Context, chats []*transport.Filter) ([]*transport.Filter, error) { + return api.service.messenger.LoadFilters(chats) +} + +func (api *PublicAPI) SaveChat(parent context.Context, chat *protocol.Chat) error { + api.log.Info("saving chat", "chat", chat) + return api.service.messenger.SaveChat(chat) +} + +func (api *PublicAPI) Chats(parent context.Context) []*protocol.Chat { + return api.service.messenger.Chats() +} + +func (api *PublicAPI) DeleteChat(parent context.Context, chatID string) error { + return api.service.messenger.DeleteChat(chatID) +} + +func (api *PublicAPI) SaveContact(parent context.Context, contact *protocol.Contact) error { + return api.service.messenger.SaveContact(contact) +} + +func (api *PublicAPI) BlockContact(parent context.Context, contact *protocol.Contact) ([]*protocol.Chat, error) { + api.log.Info("blocking contact", "contact", contact.ID) + return api.service.messenger.BlockContact(contact) +} + +func (api *PublicAPI) Contacts(parent context.Context) []*protocol.Contact { + return api.service.messenger.Contacts() +} + +func (api *PublicAPI) RemoveFilters(parent context.Context, chats []*transport.Filter) error { + return api.service.messenger.RemoveFilters(chats) +} + +// EnableInstallation enables an installation for multi-device sync. +func (api *PublicAPI) EnableInstallation(installationID string) error { + return api.service.messenger.EnableInstallation(installationID) +} + +// DisableInstallation disables an installation for multi-device sync. +func (api *PublicAPI) DisableInstallation(installationID string) error { + return api.service.messenger.DisableInstallation(installationID) +} + +// GetOurInstallations returns all the installations available given an identity +func (api *PublicAPI) GetOurInstallations() []*multidevice.Installation { + return api.service.messenger.Installations() +} + +// SetInstallationMetadata sets the metadata for our own installation +func (api *PublicAPI) SetInstallationMetadata(installationID string, data *multidevice.InstallationMetadata) error { + return api.service.messenger.SetInstallationMetadata(installationID, data) +} + +// VerifyENSNames takes a list of ensdetails and returns whether they match the public key specified +func (api *PublicAPI) VerifyENSNames(details []enstypes.ENSDetails) (map[string]enstypes.ENSResponse, error) { + return api.service.messenger.VerifyENSNames(params.MainnetEthereumNetworkURL, ensContractAddress, details) +} + +type ApplicationMessagesResponse struct { + Messages []*protocol.Message `json:"messages"` + Cursor string `json:"cursor"` +} + +func (api *PublicAPI) ChatMessages(chatID, cursor string, limit int) (*ApplicationMessagesResponse, error) { + messages, cursor, err := api.service.messenger.MessageByChatID(chatID, cursor, limit) + if err != nil { + return nil, err + } + + return &ApplicationMessagesResponse{ + Messages: messages, + Cursor: cursor, + }, nil +} + +func (api *PublicAPI) DeleteMessage(id string) error { + return api.service.messenger.DeleteMessage(id) +} + +func (api *PublicAPI) DeleteMessagesByChatID(id string) error { + return api.service.messenger.DeleteMessagesByChatID(id) +} + +func (api *PublicAPI) MarkMessagesSeen(chatID string, ids []string) error { + return api.service.messenger.MarkMessagesSeen(chatID, ids) +} + +func (api *PublicAPI) UpdateMessageOutgoingStatus(id, newOutgoingStatus string) error { + return api.service.messenger.UpdateMessageOutgoingStatus(id, newOutgoingStatus) +} + +func (api *PublicAPI) SendChatMessage(ctx context.Context, message *protocol.Message) (*protocol.MessengerResponse, error) { + return api.service.messenger.SendChatMessage(ctx, message) +} + +func (api *PublicAPI) ReSendChatMessage(ctx context.Context, messageID string) error { + return api.service.messenger.ReSendChatMessage(ctx, messageID) +} + +func (api *PublicAPI) RequestTransaction(ctx context.Context, chatID, value, contract, address string) (*protocol.MessengerResponse, error) { + return api.service.messenger.RequestTransaction(ctx, chatID, value, contract, address) +} + +func (api *PublicAPI) RequestAddressForTransaction(ctx context.Context, chatID, from, value, contract string) (*protocol.MessengerResponse, error) { + return api.service.messenger.RequestAddressForTransaction(ctx, chatID, from, value, contract) +} + +func (api *PublicAPI) DeclineRequestAddressForTransaction(ctx context.Context, messageID string) (*protocol.MessengerResponse, error) { + return api.service.messenger.DeclineRequestAddressForTransaction(ctx, messageID) +} + +func (api *PublicAPI) DeclineRequestTransaction(ctx context.Context, messageID string) (*protocol.MessengerResponse, error) { + return api.service.messenger.DeclineRequestTransaction(ctx, messageID) +} + +func (api *PublicAPI) AcceptRequestAddressForTransaction(ctx context.Context, messageID, address string) (*protocol.MessengerResponse, error) { + return api.service.messenger.AcceptRequestAddressForTransaction(ctx, messageID, address) +} + +func (api *PublicAPI) SendTransaction(ctx context.Context, chatID, value, contract, transactionHash string, signature types.HexBytes) (*protocol.MessengerResponse, error) { + return api.service.messenger.SendTransaction(ctx, chatID, value, contract, transactionHash, signature) +} + +func (api *PublicAPI) AcceptRequestTransaction(ctx context.Context, transactionHash, messageID string, signature types.HexBytes) (*protocol.MessengerResponse, error) { + return api.service.messenger.AcceptRequestTransaction(ctx, transactionHash, messageID, signature) +} + +func (api *PublicAPI) SendContactUpdates(ctx context.Context, name, picture string) error { + return api.service.messenger.SendContactUpdates(ctx, name, picture) +} + +func (api *PublicAPI) SendContactUpdate(ctx context.Context, contactID, name, picture string) (*protocol.MessengerResponse, error) { + return api.service.messenger.SendContactUpdate(ctx, contactID, name, picture) +} + +func (api *PublicAPI) SendPairInstallation(ctx context.Context) (*protocol.MessengerResponse, error) { + return api.service.messenger.SendPairInstallation(ctx) +} + +func (api *PublicAPI) SyncDevices(ctx context.Context, name, picture string) error { + return api.service.messenger.SyncDevices(ctx, name, picture) +} + +// Echo is a method for testing purposes. +func (api *PublicAPI) Echo(ctx context.Context, message string) (string, error) { + return message, nil +} + +// ----- +// HELPER +// ----- + +// MakeMessagesRequestPayload makes a specific payload for MailServer +// to request historic messages. +// DEPRECATED +func MakeMessagesRequestPayload(r MessagesRequest) ([]byte, error) { + cursor, err := hex.DecodeString(r.Cursor) + if err != nil { + return nil, fmt.Errorf("invalid cursor: %v", err) + } + + if len(cursor) > 0 && len(cursor) != mailserver.CursorLength { + return nil, fmt.Errorf("invalid cursor size: expected %d but got %d", mailserver.CursorLength, len(cursor)) + } + + payload := mailserver.MessagesRequestPayload{ + Lower: r.From, + Upper: r.To, + Bloom: createBloomFilter(r), + Limit: r.Limit, + Cursor: cursor, + // Client must tell the MailServer if it supports batch responses. + // This can be removed in the future. + Batch: true, + } + + return rlp.EncodeToBytes(payload) +} + +func createBloomFilter(r MessagesRequest) []byte { + if len(r.Topics) > 0 { + return topicsToBloom(r.Topics...) + } + return types.TopicToBloom(r.Topic) +} + +func topicsToBloom(topics ...types.TopicType) []byte { + i := new(big.Int) + for _, topic := range topics { + bloom := types.TopicToBloom(topic) + i.Or(i, new(big.Int).SetBytes(bloom[:])) + } + + combined := make([]byte, types.BloomFilterSize) + data := i.Bytes() + copy(combined[types.BloomFilterSize-len(data):], data[:]) + + return combined +} + +// TopicsToBloom squashes all topics into a single bloom filter. +func TopicsToBloom(topics ...types.TopicType) []byte { + return topicsToBloom(topics...) +} diff --git a/services/ext/api_test.go b/services/ext/api_test.go new file mode 100644 index 000000000..6d46c2ca9 --- /dev/null +++ b/services/ext/api_test.go @@ -0,0 +1,156 @@ +package ext + +import ( + "encoding/hex" + "fmt" + "testing" + "time" + + "github.com/status-im/status-go/eth-node/types" + + "github.com/status-im/status-go/mailserver" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestMessagesRequest_setDefaults(t *testing.T) { + daysAgo := func(now time.Time, days int) uint32 { + return uint32(now.UTC().Add(-24 * time.Hour * time.Duration(days)).Unix()) + } + + tnow := time.Now() + now := uint32(tnow.UTC().Unix()) + yesterday := daysAgo(tnow, 1) + + scenarios := []struct { + given *MessagesRequest + expected *MessagesRequest + }{ + { + &MessagesRequest{From: 0, To: 0}, + &MessagesRequest{From: yesterday, To: now, Timeout: defaultRequestTimeout}, + }, + { + &MessagesRequest{From: 1, To: 0}, + &MessagesRequest{From: uint32(1), To: now, Timeout: defaultRequestTimeout}, + }, + { + &MessagesRequest{From: 0, To: yesterday}, + &MessagesRequest{From: daysAgo(tnow, 2), To: yesterday, Timeout: defaultRequestTimeout}, + }, + // 100 - 1 day would be invalid, so we set From to 0 + { + &MessagesRequest{From: 0, To: 100}, + &MessagesRequest{From: 0, To: 100, Timeout: defaultRequestTimeout}, + }, + // set Timeout + { + &MessagesRequest{From: 0, To: 0, Timeout: 100}, + &MessagesRequest{From: yesterday, To: now, Timeout: 100}, + }, + } + + for i, s := range scenarios { + t.Run(fmt.Sprintf("Scenario %d", i), func(t *testing.T) { + s.given.SetDefaults(tnow) + require.Equal(t, s.expected, s.given) + }) + } +} + +func TestMakeMessagesRequestPayload(t *testing.T) { + var emptyTopic types.TopicType + testCases := []struct { + Name string + Req MessagesRequest + Err string + }{ + { + Name: "empty cursor", + Req: MessagesRequest{Cursor: ""}, + Err: "", + }, + { + Name: "invalid cursor size", + Req: MessagesRequest{Cursor: hex.EncodeToString([]byte{0x01, 0x02, 0x03})}, + Err: fmt.Sprintf("invalid cursor size: expected %d but got 3", mailserver.CursorLength), + }, + { + Name: "valid cursor", + Req: MessagesRequest{ + Cursor: hex.EncodeToString(mailserver.NewDBKey(123, emptyTopic, types.Hash{}).Cursor()), + }, + Err: "", + }, + } + + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + _, err := MakeMessagesRequestPayload(tc.Req) + if tc.Err == "" { + require.NoError(t, err) + } else { + require.EqualError(t, err, tc.Err) + } + }) + } +} + +func TestTopicsToBloom(t *testing.T) { + t1 := stringToTopic("t1") + b1 := types.TopicToBloom(t1) + t2 := stringToTopic("t2") + b2 := types.TopicToBloom(t2) + t3 := stringToTopic("t3") + b3 := types.TopicToBloom(t3) + + reqBloom := topicsToBloom(t1) + assert.True(t, types.BloomFilterMatch(reqBloom, b1)) + assert.False(t, types.BloomFilterMatch(reqBloom, b2)) + assert.False(t, types.BloomFilterMatch(reqBloom, b3)) + + reqBloom = topicsToBloom(t1, t2) + assert.True(t, types.BloomFilterMatch(reqBloom, b1)) + assert.True(t, types.BloomFilterMatch(reqBloom, b2)) + assert.False(t, types.BloomFilterMatch(reqBloom, b3)) + + reqBloom = topicsToBloom(t1, t2, t3) + assert.True(t, types.BloomFilterMatch(reqBloom, b1)) + assert.True(t, types.BloomFilterMatch(reqBloom, b2)) + assert.True(t, types.BloomFilterMatch(reqBloom, b3)) +} + +func TestCreateBloomFilter(t *testing.T) { + t1 := stringToTopic("t1") + t2 := stringToTopic("t2") + + req := MessagesRequest{Topic: t1} + bloom := createBloomFilter(req) + assert.Equal(t, topicsToBloom(t1), bloom) + + req = MessagesRequest{Topics: []types.TopicType{t1, t2}} + bloom = createBloomFilter(req) + assert.Equal(t, topicsToBloom(t1, t2), bloom) +} + +func stringToTopic(s string) types.TopicType { + return types.BytesToTopic([]byte(s)) +} + +func TestExpiredOrCompleted(t *testing.T) { + timeout := time.Millisecond + events := make(chan types.EnvelopeEvent) + errors := make(chan error, 1) + hash := types.Hash{1} + go func() { + _, err := WaitForExpiredOrCompleted(hash, events, timeout) + errors <- err + }() + select { + case <-time.After(time.Second): + require.FailNow(t, "timed out waiting for waitForExpiredOrCompleted to complete") + case err := <-errors: + require.EqualError(t, err, fmt.Sprintf("request %x expired", hash)) + } +} diff --git a/services/shhext/context.go b/services/ext/context.go similarity index 95% rename from services/shhext/context.go rename to services/ext/context.go index 037857f68..358b197a3 100644 --- a/services/shhext/context.go +++ b/services/ext/context.go @@ -1,4 +1,4 @@ -package shhext +package ext import ( "context" @@ -7,7 +7,7 @@ import ( "github.com/status-im/status-go/db" ) -// ContextKey is a type used for keys in shhext Context. +// ContextKey is a type used for keys in ext Context. type ContextKey struct { Name string } diff --git a/services/ext/handler_mock.go b/services/ext/handler_mock.go new file mode 100644 index 000000000..4f8ef0907 --- /dev/null +++ b/services/ext/handler_mock.go @@ -0,0 +1,48 @@ +package ext + +import ( + "github.com/status-im/status-go/eth-node/types" +) + +type failureMessage struct { + IDs [][]byte + Error error +} + +func NewHandlerMock(buf int) HandlerMock { + return HandlerMock{ + confirmations: make(chan [][]byte, buf), + expirations: make(chan failureMessage, buf), + requestsCompleted: make(chan types.Hash, buf), + requestsExpired: make(chan types.Hash, buf), + requestsFailed: make(chan types.Hash, buf), + } +} + +type HandlerMock struct { + confirmations chan [][]byte + expirations chan failureMessage + requestsCompleted chan types.Hash + requestsExpired chan types.Hash + requestsFailed chan types.Hash +} + +func (t HandlerMock) EnvelopeSent(ids [][]byte) { + t.confirmations <- ids +} + +func (t HandlerMock) EnvelopeExpired(ids [][]byte, err error) { + t.expirations <- failureMessage{IDs: ids, Error: err} +} + +func (t HandlerMock) MailServerRequestCompleted(requestID types.Hash, lastEnvelopeHash types.Hash, cursor []byte, err error) { + if err == nil { + t.requestsCompleted <- requestID + } else { + t.requestsFailed <- requestID + } +} + +func (t HandlerMock) MailServerRequestExpired(hash types.Hash) { + t.requestsExpired <- hash +} diff --git a/services/shhext/mailrequests.go b/services/ext/mailrequests.go similarity index 85% rename from services/shhext/mailrequests.go rename to services/ext/mailrequests.go index c2a8819a9..39fc07354 100644 --- a/services/shhext/mailrequests.go +++ b/services/ext/mailrequests.go @@ -1,6 +1,6 @@ // +build !nimbus -package shhext +package ext import ( "sync" @@ -8,6 +8,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/status-im/status-go/eth-node/types" + "github.com/status-im/status-go/services/ext/mailservers" ) // EnvelopeState in local tracker @@ -16,18 +17,14 @@ type EnvelopeState int const ( // NotRegistered returned if asked hash wasn't registered in the tracker. NotRegistered EnvelopeState = -1 - // EnvelopePosted is set when envelope was added to a local whisper queue. - EnvelopePosted EnvelopeState = iota - // EnvelopeSent is set when envelope is sent to atleast one peer. - EnvelopeSent // MailServerRequestSent is set when p2p request is sent to the mailserver MailServerRequestSent ) // MailRequestMonitor is responsible for monitoring history request to mailservers. type MailRequestMonitor struct { - w types.Whisper - handler EnvelopeEventsHandler + eventSub mailservers.EnvelopeEventSubscriber + handler EnvelopeEventsHandler mu sync.Mutex cache map[types.Hash]EnvelopeState @@ -38,6 +35,15 @@ type MailRequestMonitor struct { quit chan struct{} } +func NewMailRequestMonitor(eventSub mailservers.EnvelopeEventSubscriber, h EnvelopeEventsHandler, reg *RequestsRegistry) *MailRequestMonitor { + return &MailRequestMonitor{ + eventSub: eventSub, + handler: h, + cache: make(map[types.Hash]EnvelopeState), + requestsRegistry: reg, + } +} + // Start processing events. func (m *MailRequestMonitor) Start() { m.quit = make(chan struct{}) @@ -67,7 +73,7 @@ func (m *MailRequestMonitor) GetState(hash types.Hash) EnvelopeState { // handleEnvelopeEvents processes whisper envelope events func (m *MailRequestMonitor) handleEnvelopeEvents() { events := make(chan types.EnvelopeEvent, 100) // must be buffered to prevent blocking whisper - sub := m.w.SubscribeEnvelopeEvents(events) + sub := m.eventSub.SubscribeEnvelopeEvents(events) defer sub.Unsubscribe() for { select { diff --git a/services/shhext/mailrequests_test.go b/services/ext/mailrequests_test.go similarity index 95% rename from services/shhext/mailrequests_test.go rename to services/ext/mailrequests_test.go index b6901b860..2c7b5815f 100644 --- a/services/shhext/mailrequests_test.go +++ b/services/ext/mailrequests_test.go @@ -1,6 +1,6 @@ // +build !nimbus -package shhext +package ext import ( "errors" @@ -34,7 +34,7 @@ func (s *MailRequestMonitorSuite) SetupTest() { } func (s *MailRequestMonitorSuite) TestRequestCompleted() { - mock := newHandlerMock(1) + mock := NewHandlerMock(1) s.monitor.handler = mock s.monitor.cache[testHash] = MailServerRequestSent s.monitor.handleEvent(types.EnvelopeEvent{ @@ -52,7 +52,7 @@ func (s *MailRequestMonitorSuite) TestRequestCompleted() { } func (s *MailRequestMonitorSuite) TestRequestFailed() { - mock := newHandlerMock(1) + mock := NewHandlerMock(1) s.monitor.handler = mock s.monitor.cache[testHash] = MailServerRequestSent s.monitor.handleEvent(types.EnvelopeEvent{ @@ -70,7 +70,7 @@ func (s *MailRequestMonitorSuite) TestRequestFailed() { } func (s *MailRequestMonitorSuite) TestRequestExpiration() { - mock := newHandlerMock(1) + mock := NewHandlerMock(1) s.monitor.handler = mock s.monitor.cache[testHash] = MailServerRequestSent s.monitor.handleEvent(types.EnvelopeEvent{ diff --git a/services/shhext/mailservers/cache.go b/services/ext/mailservers/cache.go similarity index 100% rename from services/shhext/mailservers/cache.go rename to services/ext/mailservers/cache.go diff --git a/services/shhext/mailservers/cache_test.go b/services/ext/mailservers/cache_test.go similarity index 100% rename from services/shhext/mailservers/cache_test.go rename to services/ext/mailservers/cache_test.go diff --git a/services/shhext/mailservers/connmanager.go b/services/ext/mailservers/connmanager.go similarity index 94% rename from services/shhext/mailservers/connmanager.go rename to services/ext/mailservers/connmanager.go index 266fdfcaa..f4c5bf3fe 100644 --- a/services/shhext/mailservers/connmanager.go +++ b/services/ext/mailservers/connmanager.go @@ -14,7 +14,7 @@ import ( const ( peerEventsBuffer = 10 // sufficient buffer to avoid blocking a p2p feed. - whisperEventsBuffer = 20 // sufficient buffer to avod blocking a whisper envelopes feed. + whisperEventsBuffer = 20 // sufficient buffer to avod blocking a eventSub envelopes feed. ) // PeerAdderRemover is an interface for adding or removing peers. @@ -39,10 +39,10 @@ type p2pServer interface { } // NewConnectionManager creates an instance of ConnectionManager. -func NewConnectionManager(server p2pServer, whisper EnvelopeEventSubscriber, target, maxFailures int, timeout time.Duration) *ConnectionManager { +func NewConnectionManager(server p2pServer, eventSub EnvelopeEventSubscriber, target, maxFailures int, timeout time.Duration) *ConnectionManager { return &ConnectionManager{ server: server, - whisper: whisper, + eventSub: eventSub, connectedTarget: target, maxFailures: maxFailures, notifications: make(chan []*enode.Node), @@ -55,8 +55,8 @@ type ConnectionManager struct { wg sync.WaitGroup quit chan struct{} - server p2pServer - whisper EnvelopeEventSubscriber + server p2pServer + eventSub EnvelopeEventSubscriber notifications chan []*enode.Node connectedTarget int @@ -86,7 +86,7 @@ func (ps *ConnectionManager) Start() { events := make(chan *p2p.PeerEvent, peerEventsBuffer) sub := ps.server.SubscribeEvents(events) whisperEvents := make(chan types.EnvelopeEvent, whisperEventsBuffer) - whisperSub := ps.whisper.SubscribeEnvelopeEvents(whisperEvents) + whisperSub := ps.eventSub.SubscribeEnvelopeEvents(whisperEvents) requests := map[types.Hash]struct{}{} failuresPerServer := map[types.EnodeID]int{} @@ -101,7 +101,7 @@ func (ps *ConnectionManager) Start() { log.Error("retry after error subscribing to p2p events", "error", err) return case err := <-whisperSub.Err(): - log.Error("retry after error suscribing to whisper events", "error", err) + log.Error("retry after error suscribing to eventSub events", "error", err) return case newNodes := <-ps.notifications: state.processReplacement(newNodes, events) diff --git a/services/shhext/mailservers/connmanager_test.go b/services/ext/mailservers/connmanager_test.go similarity index 100% rename from services/shhext/mailservers/connmanager_test.go rename to services/ext/mailservers/connmanager_test.go diff --git a/services/shhext/mailservers/connmonitor.go b/services/ext/mailservers/connmonitor.go similarity index 82% rename from services/shhext/mailservers/connmonitor.go rename to services/ext/mailservers/connmonitor.go index d263f22fc..8d9bd6f40 100644 --- a/services/shhext/mailservers/connmonitor.go +++ b/services/ext/mailservers/connmonitor.go @@ -10,11 +10,11 @@ import ( ) // NewLastUsedConnectionMonitor returns pointer to the instance of LastUsedConnectionMonitor. -func NewLastUsedConnectionMonitor(ps *PeerStore, cache *Cache, whisper EnvelopeEventSubscriber) *LastUsedConnectionMonitor { +func NewLastUsedConnectionMonitor(ps *PeerStore, cache *Cache, eventSub EnvelopeEventSubscriber) *LastUsedConnectionMonitor { return &LastUsedConnectionMonitor{ - ps: ps, - cache: cache, - whisper: whisper, + ps: ps, + cache: cache, + eventSub: eventSub, } } @@ -23,7 +23,7 @@ type LastUsedConnectionMonitor struct { ps *PeerStore cache *Cache - whisper EnvelopeEventSubscriber + eventSub EnvelopeEventSubscriber quit chan struct{} wg sync.WaitGroup @@ -35,7 +35,7 @@ func (mon *LastUsedConnectionMonitor) Start() { mon.wg.Add(1) go func() { events := make(chan types.EnvelopeEvent, whisperEventsBuffer) - sub := mon.whisper.SubscribeEnvelopeEvents(events) + sub := mon.eventSub.SubscribeEnvelopeEvents(events) defer sub.Unsubscribe() defer mon.wg.Done() for { @@ -43,7 +43,7 @@ func (mon *LastUsedConnectionMonitor) Start() { case <-mon.quit: return case err := <-sub.Err(): - log.Error("retry after error suscribing to whisper events", "error", err) + log.Error("retry after error suscribing to eventSub events", "error", err) return case ev := <-events: node := mon.ps.Get(ev.Peer) diff --git a/services/shhext/mailservers/connmonitor_test.go b/services/ext/mailservers/connmonitor_test.go similarity index 100% rename from services/shhext/mailservers/connmonitor_test.go rename to services/ext/mailservers/connmonitor_test.go diff --git a/services/shhext/mailservers/peerstore.go b/services/ext/mailservers/peerstore.go similarity index 100% rename from services/shhext/mailservers/peerstore.go rename to services/ext/mailservers/peerstore.go diff --git a/services/shhext/mailservers/peerstore_test.go b/services/ext/mailservers/peerstore_test.go similarity index 100% rename from services/shhext/mailservers/peerstore_test.go rename to services/ext/mailservers/peerstore_test.go diff --git a/services/shhext/mailservers/utils.go b/services/ext/mailservers/utils.go similarity index 100% rename from services/shhext/mailservers/utils.go rename to services/ext/mailservers/utils.go diff --git a/services/shhext/mailservers/utils_test.go b/services/ext/mailservers/utils_test.go similarity index 100% rename from services/shhext/mailservers/utils_test.go rename to services/ext/mailservers/utils_test.go diff --git a/services/ext/node_mock.go b/services/ext/node_mock.go new file mode 100644 index 000000000..2eecbef13 --- /dev/null +++ b/services/ext/node_mock.go @@ -0,0 +1,36 @@ +package ext + +import ( + "github.com/status-im/status-go/eth-node/types" + enstypes "github.com/status-im/status-go/eth-node/types/ens" + "go.uber.org/zap" +) + +type TestNodeWrapper struct { + whisper types.Whisper + waku types.Waku +} + +func NewTestNodeWrapper(whisper types.Whisper, waku types.Waku) *TestNodeWrapper { + return &TestNodeWrapper{whisper: whisper, waku: waku} +} + +func (w *TestNodeWrapper) NewENSVerifier(_ *zap.Logger) enstypes.ENSVerifier { + panic("not implemented") +} + +func (w *TestNodeWrapper) GetWhisper(_ interface{}) (types.Whisper, error) { + return w.whisper, nil +} + +func (w *TestNodeWrapper) GetWaku(_ interface{}) (types.Waku, error) { + return w.waku, nil +} + +func (w *TestNodeWrapper) AddPeer(url string) error { + panic("not implemented") +} + +func (w *TestNodeWrapper) RemovePeer(url string) error { + panic("not implemented") +} diff --git a/services/shhext/requests.go b/services/ext/requests.go similarity index 95% rename from services/shhext/requests.go rename to services/ext/requests.go index ac6596be4..f138325a5 100644 --- a/services/shhext/requests.go +++ b/services/ext/requests.go @@ -1,4 +1,4 @@ -package shhext +package ext import ( "fmt" @@ -10,8 +10,8 @@ import ( ) const ( - // defaultRequestsDelay will be used in RequestsRegistry if no other was provided. - defaultRequestsDelay = 3 * time.Second + // DefaultRequestsDelay will be used in RequestsRegistry if no other was provided. + DefaultRequestsDelay = 3 * time.Second ) type requestMeta struct { diff --git a/services/shhext/requests_test.go b/services/ext/requests_test.go similarity index 99% rename from services/shhext/requests_test.go rename to services/ext/requests_test.go index e6b876538..e0b6e0445 100644 --- a/services/shhext/requests_test.go +++ b/services/ext/requests_test.go @@ -1,4 +1,4 @@ -package shhext +package ext import ( "testing" diff --git a/services/shhext/rpc.go b/services/ext/rpc.go similarity index 99% rename from services/shhext/rpc.go rename to services/ext/rpc.go index 951b9f663..73eb97d8a 100644 --- a/services/shhext/rpc.go +++ b/services/ext/rpc.go @@ -1,7 +1,7 @@ // TODO: These types should be defined using protobuf, but protoc can only emit []byte instead of types.HexBytes, // which causes issues when marshaling to JSON on the react side. Let's do that once the chat protocol is moved to the go repo. -package shhext +package ext import ( "crypto/ecdsa" diff --git a/services/ext/service.go b/services/ext/service.go new file mode 100644 index 000000000..12b9bc51a --- /dev/null +++ b/services/ext/service.go @@ -0,0 +1,441 @@ +package ext + +import ( + "context" + "crypto/ecdsa" + "database/sql" + "math/big" + "os" + "path/filepath" + "time" + + "github.com/status-im/status-go/services/wallet" + + "github.com/syndtr/goleveldb/leveldb" + + "github.com/status-im/status-go/logutils" + + commongethtypes "github.com/ethereum/go-ethereum/common" + gethtypes "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/rpc" + + "github.com/status-im/status-go/db" + "github.com/status-im/status-go/multiaccounts/accounts" + "github.com/status-im/status-go/params" + "github.com/status-im/status-go/services/ext/mailservers" + "github.com/status-im/status-go/signal" + + "go.uber.org/zap" + + coretypes "github.com/status-im/status-go/eth-node/core/types" + "github.com/status-im/status-go/eth-node/types" + "github.com/status-im/status-go/protocol" + "github.com/status-im/status-go/protocol/transport" +) + +const ( + // defaultConnectionsTarget used in Service.Start if configured connection target is 0. + defaultConnectionsTarget = 1 + // defaultTimeoutWaitAdded is a timeout to use to establish initial connections. + defaultTimeoutWaitAdded = 5 * time.Second +) + +// EnvelopeEventsHandler used for two different event types. +type EnvelopeEventsHandler interface { + EnvelopeSent([][]byte) + EnvelopeExpired([][]byte, error) + MailServerRequestCompleted(types.Hash, types.Hash, []byte, error) + MailServerRequestExpired(types.Hash) +} + +// Service is a service that provides some additional API to whisper-based protocols like Whisper or Waku. +type Service struct { + messenger *protocol.Messenger + identity *ecdsa.PrivateKey + cancelMessenger chan struct{} + storage db.TransactionalStorage + n types.Node + config params.ShhextConfig + mailMonitor *MailRequestMonitor + requestsRegistry *RequestsRegistry + server *p2p.Server + eventSub mailservers.EnvelopeEventSubscriber + peerStore *mailservers.PeerStore + cache *mailservers.Cache + connManager *mailservers.ConnectionManager + lastUsedMonitor *mailservers.LastUsedConnectionMonitor + accountsDB *accounts.Database +} + +// Make sure that Service implements node.Service interface. +var _ node.Service = (*Service)(nil) + +func New( + config params.ShhextConfig, + n types.Node, + ldb *leveldb.DB, + mailMonitor *MailRequestMonitor, + reqRegistry *RequestsRegistry, + eventSub mailservers.EnvelopeEventSubscriber, +) *Service { + cache := mailservers.NewCache(ldb) + peerStore := mailservers.NewPeerStore(cache) + return &Service{ + storage: db.NewLevelDBStorage(ldb), + n: n, + config: config, + mailMonitor: mailMonitor, + requestsRegistry: reqRegistry, + peerStore: peerStore, + cache: mailservers.NewCache(ldb), + eventSub: eventSub, + } +} + +func (s *Service) NodeID() *ecdsa.PrivateKey { + if s.server == nil { + return nil + } + return s.server.PrivateKey +} + +func (s *Service) RequestsRegistry() *RequestsRegistry { + return s.requestsRegistry +} + +func (s *Service) GetPeer(rawURL string) (*enode.Node, error) { + if len(rawURL) == 0 { + return mailservers.GetFirstConnected(s.server, s.peerStore) + } + return enode.ParseV4(rawURL) +} + +func (s *Service) InitProtocol(identity *ecdsa.PrivateKey, db *sql.DB) error { // nolint: gocyclo + if !s.config.PFSEnabled { + return nil + } + + // If Messenger has been already set up, we need to shut it down + // before we init it again. Otherwise, it will lead to goroutines leakage + // due to not stopped filters. + if s.messenger != nil { + if err := s.messenger.Shutdown(); err != nil { + return err + } + } + + s.identity = identity + + dataDir := filepath.Clean(s.config.BackupDisabledDataDir) + + if err := os.MkdirAll(dataDir, os.ModePerm); err != nil { + return err + } + + // Create a custom zap.Logger which will forward logs from status-go/protocol to status-go logger. + zapLogger, err := logutils.NewZapLoggerWithAdapter(logutils.Logger()) + if err != nil { + return err + } + + envelopesMonitorConfig := &transport.EnvelopesMonitorConfig{ + MaxAttempts: s.config.MaxMessageDeliveryAttempts, + MailserverConfirmationsEnabled: s.config.MailServerConfirmations, + IsMailserver: func(peer types.EnodeID) bool { + return s.peerStore.Exist(peer) + }, + EnvelopeEventsHandler: EnvelopeSignalHandler{}, + Logger: zapLogger, + } + options := buildMessengerOptions(s.config, db, envelopesMonitorConfig, zapLogger) + + messenger, err := protocol.NewMessenger( + identity, + s.n, + s.config.InstallationID, + options..., + ) + if err != nil { + return err + } + s.accountsDB = accounts.NewDB(db) + s.messenger = messenger + // Start a loop that retrieves all messages and propagates them to status-react. + s.cancelMessenger = make(chan struct{}) + go s.retrieveMessagesLoop(time.Second, s.cancelMessenger) + go s.verifyTransactionLoop(30*time.Second, s.cancelMessenger) + + return s.messenger.Init() +} + +func (s *Service) retrieveMessagesLoop(tick time.Duration, cancel <-chan struct{}) { + ticker := time.NewTicker(tick) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + response, err := s.messenger.RetrieveAll() + if err != nil { + log.Error("failed to retrieve raw messages", "err", err) + continue + } + if !response.IsEmpty() { + PublisherSignalHandler{}.NewMessages(response) + } + case <-cancel: + return + } + } +} + +type verifyTransactionClient struct { + chainID *big.Int + url string +} + +func (c *verifyTransactionClient) TransactionByHash(ctx context.Context, hash types.Hash) (coretypes.Message, coretypes.TransactionStatus, error) { + signer := gethtypes.NewEIP155Signer(c.chainID) + client, err := ethclient.Dial(c.url) + if err != nil { + return coretypes.Message{}, coretypes.TransactionStatusPending, err + } + + transaction, pending, err := client.TransactionByHash(ctx, commongethtypes.BytesToHash(hash.Bytes())) + if err != nil { + return coretypes.Message{}, coretypes.TransactionStatusPending, err + } + + message, err := transaction.AsMessage(signer) + if err != nil { + return coretypes.Message{}, coretypes.TransactionStatusPending, err + } + from := types.BytesToAddress(message.From().Bytes()) + to := types.BytesToAddress(message.To().Bytes()) + + if pending { + return coretypes.NewMessage( + from, + &to, + message.Nonce(), + message.Value(), + message.Gas(), + message.GasPrice(), + message.Data(), + message.CheckNonce(), + ), coretypes.TransactionStatusPending, nil + } + + receipt, err := client.TransactionReceipt(ctx, commongethtypes.BytesToHash(hash.Bytes())) + if err != nil { + return coretypes.Message{}, coretypes.TransactionStatusPending, err + } + + coremessage := coretypes.NewMessage( + from, + &to, + message.Nonce(), + message.Value(), + message.Gas(), + message.GasPrice(), + message.Data(), + message.CheckNonce(), + ) + + // Token transfer, check the logs + if len(coremessage.Data()) != 0 { + if wallet.IsTokenTransfer(receipt.Logs) { + return coremessage, coretypes.TransactionStatus(receipt.Status), nil + } + return coremessage, coretypes.TransactionStatusFailed, nil + } + + return coremessage, coretypes.TransactionStatus(receipt.Status), nil +} + +func (s *Service) verifyTransactionLoop(tick time.Duration, cancel <-chan struct{}) { + if s.config.VerifyTransactionURL == "" { + log.Warn("not starting transaction loop") + return + } + + ticker := time.NewTicker(tick) + defer ticker.Stop() + + ctx, cancelVerifyTransaction := context.WithCancel(context.Background()) + + for { + select { + case <-ticker.C: + accounts, err := s.accountsDB.GetAccounts() + if err != nil { + log.Error("failed to retrieve accounts", "err", err) + } + var wallets []types.Address + for _, account := range accounts { + if account.Wallet { + wallets = append(wallets, types.BytesToAddress(account.Address.Bytes())) + } + } + + response, err := s.messenger.ValidateTransactions(ctx, wallets) + if err != nil { + log.Error("failed to validate transactions", "err", err) + continue + } + if !response.IsEmpty() { + PublisherSignalHandler{}.NewMessages(response) + } + case <-cancel: + cancelVerifyTransaction() + return + } + } +} + +func (s *Service) ConfirmMessagesProcessed(messageIDs [][]byte) error { + return s.messenger.ConfirmMessagesProcessed(messageIDs) +} + +func (s *Service) EnableInstallation(installationID string) error { + return s.messenger.EnableInstallation(installationID) +} + +// DisableInstallation disables an installation for multi-device sync. +func (s *Service) DisableInstallation(installationID string) error { + return s.messenger.DisableInstallation(installationID) +} + +// UpdateMailservers updates information about selected mail servers. +func (s *Service) UpdateMailservers(nodes []*enode.Node) error { + if err := s.peerStore.Update(nodes); err != nil { + return err + } + if s.connManager != nil { + s.connManager.Notify(nodes) + } + return nil +} + +// Protocols returns a new protocols list. In this case, there are none. +func (s *Service) Protocols() []p2p.Protocol { + return []p2p.Protocol{} +} + +// APIs returns a list of new APIs. +func (s *Service) APIs() []rpc.API { + panic("this is abstract service, use shhext or wakuext implementation") +} + +// Start is run when a service is started. +// It does nothing in this case but is required by `node.Service` interface. +func (s *Service) Start(server *p2p.Server) error { + if s.config.EnableConnectionManager { + connectionsTarget := s.config.ConnectionTarget + if connectionsTarget == 0 { + connectionsTarget = defaultConnectionsTarget + } + maxFailures := s.config.MaxServerFailures + // if not defined change server on first expired event + if maxFailures == 0 { + maxFailures = 1 + } + s.connManager = mailservers.NewConnectionManager(server, s.eventSub, connectionsTarget, maxFailures, defaultTimeoutWaitAdded) + s.connManager.Start() + if err := mailservers.EnsureUsedRecordsAddedFirst(s.peerStore, s.connManager); err != nil { + return err + } + } + if s.config.EnableLastUsedMonitor { + s.lastUsedMonitor = mailservers.NewLastUsedConnectionMonitor(s.peerStore, s.cache, s.eventSub) + s.lastUsedMonitor.Start() + } + s.mailMonitor.Start() + s.server = server + return nil +} + +// Stop is run when a service is stopped. +func (s *Service) Stop() error { + log.Info("Stopping shhext service") + if s.config.EnableConnectionManager { + s.connManager.Stop() + } + if s.config.EnableLastUsedMonitor { + s.lastUsedMonitor.Stop() + } + s.requestsRegistry.Clear() + s.mailMonitor.Stop() + + if s.cancelMessenger != nil { + select { + case <-s.cancelMessenger: + // channel already closed + default: + close(s.cancelMessenger) + s.cancelMessenger = nil + } + } + + if s.messenger != nil { + if err := s.messenger.Shutdown(); err != nil { + return err + } + } + + return nil +} + +func onNegotiatedFilters(filters []*transport.Filter) { + var signalFilters []*signal.Filter + for _, filter := range filters { + + signalFilter := &signal.Filter{ + ChatID: filter.ChatID, + SymKeyID: filter.SymKeyID, + Listen: filter.Listen, + FilterID: filter.FilterID, + Identity: filter.Identity, + Topic: filter.Topic, + } + + signalFilters = append(signalFilters, signalFilter) + } + if len(filters) != 0 { + handler := PublisherSignalHandler{} + handler.FilterAdded(signalFilters) + } +} + +func buildMessengerOptions( + config params.ShhextConfig, + db *sql.DB, + envelopesMonitorConfig *transport.EnvelopesMonitorConfig, + logger *zap.Logger, +) []protocol.Option { + options := []protocol.Option{ + protocol.WithCustomLogger(logger), + protocol.WithDatabase(db), + protocol.WithEnvelopesMonitorConfig(envelopesMonitorConfig), + protocol.WithOnNegotiatedFilters(onNegotiatedFilters), + } + + if config.DataSyncEnabled { + options = append(options, protocol.WithDatasync()) + } + + if config.VerifyTransactionURL != "" { + client := &verifyTransactionClient{ + url: config.VerifyTransactionURL, + chainID: big.NewInt(config.VerifyTransactionChainID), + } + options = append(options, protocol.WithVerifyTransactionClient(client)) + } + + return options +} diff --git a/services/shhext/signal.go b/services/ext/signal.go similarity index 92% rename from services/shhext/signal.go rename to services/ext/signal.go index 944330c44..6c4fc7f36 100644 --- a/services/shhext/signal.go +++ b/services/ext/signal.go @@ -1,4 +1,4 @@ -package shhext +package ext import ( "github.com/status-im/status-go/eth-node/types" @@ -40,7 +40,8 @@ func (h PublisherSignalHandler) BundleAdded(identity string, installationID stri signal.SendBundleAdded(identity, installationID) } -func (h PublisherSignalHandler) WhisperFilterAdded(filters []*signal.Filter) { +func (h PublisherSignalHandler) FilterAdded(filters []*signal.Filter) { + // TODO(waku): change the name of the filter to generic one. signal.SendWhisperFilterAdded(filters) } diff --git a/services/shhext/api.go b/services/shhext/api.go deleted file mode 100644 index 645f8eea6..000000000 --- a/services/shhext/api.go +++ /dev/null @@ -1,167 +0,0 @@ -package shhext - -import ( - "errors" - "time" - - "github.com/status-im/status-go/eth-node/types" -) - -const ( - // defaultWorkTime is a work time reported in messages sent to MailServer nodes. - defaultWorkTime = 5 - // defaultRequestTimeout is the default request timeout in seconds - defaultRequestTimeout = 10 - - // ensContractAddress is the address of the ENS resolver - ensContractAddress = "0x314159265dd8dbb310642f98f50c066173c1259b" -) - -var ( - // ErrInvalidMailServerPeer is returned when it fails to parse enode from params. - ErrInvalidMailServerPeer = errors.New("invalid mailServerPeer value") - // ErrInvalidSymKeyID is returned when it fails to get a symmetric key. - ErrInvalidSymKeyID = errors.New("invalid symKeyID value") - // ErrInvalidPublicKey is returned when public key can't be extracted - // from MailServer's nodeID. - ErrInvalidPublicKey = errors.New("can't extract public key") - // ErrPFSNotEnabled is returned when an endpoint PFS only is called but - // PFS is disabled - ErrPFSNotEnabled = errors.New("pfs not enabled") -) - -// ----- -// PAYLOADS -// ----- - -// MessagesRequest is a RequestMessages() request payload. -type MessagesRequest struct { - // MailServerPeer is MailServer's enode address. - MailServerPeer string `json:"mailServerPeer"` - - // From is a lower bound of time range (optional). - // Default is 24 hours back from now. - From uint32 `json:"from"` - - // To is a upper bound of time range (optional). - // Default is now. - To uint32 `json:"to"` - - // Limit determines the number of messages sent by the mail server - // for the current paginated request - Limit uint32 `json:"limit"` - - // Cursor is used as starting point for paginated requests - Cursor string `json:"cursor"` - - // Topic is a regular Whisper topic. - // DEPRECATED - Topic types.TopicType `json:"topic"` - - // Topics is a list of Whisper topics. - Topics []types.TopicType `json:"topics"` - - // SymKeyID is an ID of a symmetric key to authenticate to MailServer. - // It's derived from MailServer password. - SymKeyID string `json:"symKeyID"` - - // Timeout is the time to live of the request specified in seconds. - // Default is 10 seconds - Timeout time.Duration `json:"timeout"` - - // Force ensures that requests will bypass enforced delay. - Force bool `json:"force"` -} - -func (r *MessagesRequest) setDefaults(now time.Time) { - // set From and To defaults - if r.To == 0 { - r.To = uint32(now.UTC().Unix()) - } - - if r.From == 0 { - oneDay := uint32(86400) // -24 hours - if r.To < oneDay { - r.From = 0 - } else { - r.From = r.To - oneDay - } - } - - if r.Timeout == 0 { - r.Timeout = defaultRequestTimeout - } -} - -// MessagesResponse is a response for shhext_requestMessages2 method. -type MessagesResponse struct { - // Cursor from the response can be used to retrieve more messages - // for the previous request. - Cursor string `json:"cursor"` - - // Error indicates that something wrong happened when sending messages - // to the requester. - Error error `json:"error"` -} - -// SyncMessagesRequest is a SyncMessages() request payload. -type SyncMessagesRequest struct { - // MailServerPeer is MailServer's enode address. - MailServerPeer string `json:"mailServerPeer"` - - // From is a lower bound of time range (optional). - // Default is 24 hours back from now. - From uint32 `json:"from"` - - // To is a upper bound of time range (optional). - // Default is now. - To uint32 `json:"to"` - - // Limit determines the number of messages sent by the mail server - // for the current paginated request - Limit uint32 `json:"limit"` - - // Cursor is used as starting point for paginated requests - Cursor string `json:"cursor"` - - // FollowCursor if true loads messages until cursor is empty. - FollowCursor bool `json:"followCursor"` - - // Topics is a list of Whisper topics. - // If empty, a full bloom filter will be used. - Topics []types.TopicType `json:"topics"` -} - -// InitiateHistoryRequestParams type for initiating history requests from a peer. -type InitiateHistoryRequestParams struct { - Peer string - SymKeyID string - Requests []TopicRequest - Force bool - Timeout time.Duration -} - -// SyncMessagesResponse is a response from the mail server -// to which SyncMessagesRequest was sent. -type SyncMessagesResponse struct { - // Cursor from the response can be used to retrieve more messages - // for the previous request. - Cursor string `json:"cursor"` - - // Error indicates that something wrong happened when sending messages - // to the requester. - Error string `json:"error"` -} - -type Author struct { - PublicKey types.HexBytes `json:"publicKey"` - Alias string `json:"alias"` - Identicon string `json:"identicon"` -} - -type Metadata struct { - DedupID []byte `json:"dedupId"` - EncryptionID types.HexBytes `json:"encryptionId"` - MessageID types.HexBytes `json:"messageId"` - Author Author `json:"author"` -} diff --git a/services/shhext/api_geth.go b/services/shhext/api_geth.go index 2318749e3..59687a041 100644 --- a/services/shhext/api_geth.go +++ b/services/shhext/api_geth.go @@ -6,34 +6,27 @@ import ( "context" "crypto/ecdsa" "encoding/hex" - "errors" "fmt" - "math/big" "time" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/rlp" - - "github.com/status-im/status-go/db" - "github.com/status-im/status-go/mailserver" - "github.com/status-im/status-go/services/shhext/mailservers" - "github.com/status-im/status-go/whisper/v6" gethbridge "github.com/status-im/status-go/eth-node/bridge/geth" "github.com/status-im/status-go/eth-node/types" - enstypes "github.com/status-im/status-go/eth-node/types/ens" - "github.com/status-im/status-go/protocol" - "github.com/status-im/status-go/protocol/encryption/multidevice" - "github.com/status-im/status-go/protocol/transport" + "github.com/status-im/status-go/services/ext" + "github.com/status-im/status-go/whisper/v6" ) -// ----- -// PUBLIC API -// ----- +const ( + // defaultWorkTime is a work time reported in messages sent to MailServer nodes. + defaultWorkTime = 5 +) // PublicAPI extends whisper public API. type PublicAPI struct { + *ext.PublicAPI + service *Service publicAPI types.PublicWhisperAPI log log.Logger @@ -42,32 +35,119 @@ type PublicAPI struct { // NewPublicAPI returns instance of the public API. func NewPublicAPI(s *Service) *PublicAPI { return &PublicAPI{ + PublicAPI: ext.NewPublicAPI(s.Service, s.w), service: s, publicAPI: s.w.PublicWhisperAPI(), log: log.New("package", "status-go/services/sshext.PublicAPI"), } } -func (api *PublicAPI) getPeer(rawurl string) (*enode.Node, error) { - if len(rawurl) == 0 { - return mailservers.GetFirstConnected(api.service.server, api.service.peerStore) +// makeEnvelop makes an envelop for a historic messages request. +// Symmetric key is used to authenticate to MailServer. +// PK is the current node ID. +// DEPRECATED +func makeEnvelop( + payload []byte, + symKey []byte, + publicKey *ecdsa.PublicKey, + nodeID *ecdsa.PrivateKey, + pow float64, + now time.Time, +) (types.Envelope, error) { + // TODO: replace with an types.Envelope creator passed to the API struct + params := whisper.MessageParams{ + PoW: pow, + Payload: payload, + WorkTime: defaultWorkTime, + Src: nodeID, } - return enode.ParseV4(rawurl) + // Either symKey or public key is required. + // This condition is verified in `message.Wrap()` method. + if len(symKey) > 0 { + params.KeySym = symKey + } else if publicKey != nil { + params.Dst = publicKey + } + message, err := whisper.NewSentMessage(¶ms) + if err != nil { + return nil, err + } + envelope, err := message.Wrap(¶ms, now) + if err != nil { + return nil, err + } + return gethbridge.NewWhisperEnvelope(envelope), nil } -// RetryConfig specifies configuration for retries with timeout and max amount of retries. -type RetryConfig struct { - BaseTimeout time.Duration - // StepTimeout defines duration increase per each retry. - StepTimeout time.Duration - MaxRetries int +// RequestMessages sends a request for historic messages to a MailServer. +func (api *PublicAPI) RequestMessages(_ context.Context, r ext.MessagesRequest) (types.HexBytes, error) { + api.log.Info("RequestMessages", "request", r) + + now := api.service.w.GetCurrentTime() + r.SetDefaults(now) + + if r.From > r.To { + return nil, fmt.Errorf("Query range is invalid: from > to (%d > %d)", r.From, r.To) + } + + mailServerNode, err := api.service.GetPeer(r.MailServerPeer) + if err != nil { + return nil, fmt.Errorf("%v: %v", ext.ErrInvalidMailServerPeer, err) + } + + var ( + symKey []byte + publicKey *ecdsa.PublicKey + ) + + if r.SymKeyID != "" { + symKey, err = api.service.w.GetSymKey(r.SymKeyID) + if err != nil { + return nil, fmt.Errorf("%v: %v", ext.ErrInvalidSymKeyID, err) + } + } else { + publicKey = mailServerNode.Pubkey() + } + + payload, err := ext.MakeMessagesRequestPayload(r) + if err != nil { + return nil, err + } + + envelope, err := makeEnvelop( + payload, + symKey, + publicKey, + api.service.NodeID(), + api.service.w.MinPow(), + now, + ) + if err != nil { + return nil, err + } + hash := envelope.Hash() + + if !r.Force { + err = api.service.RequestsRegistry().Register(hash, r.Topics) + if err != nil { + return nil, err + } + } + + if err := api.service.w.RequestHistoricMessagesWithTimeout(mailServerNode.ID().Bytes(), envelope, r.Timeout*time.Second); err != nil { + if !r.Force { + api.service.RequestsRegistry().Unregister(hash) + } + return nil, err + } + + return hash[:], nil } // RequestMessagesSync repeats MessagesRequest using configuration in retry conf. -func (api *PublicAPI) RequestMessagesSync(conf RetryConfig, r MessagesRequest) (MessagesResponse, error) { - var resp MessagesResponse +func (api *PublicAPI) RequestMessagesSync(conf ext.RetryConfig, r ext.MessagesRequest) (ext.MessagesResponse, error) { + var resp ext.MessagesResponse - shh := api.service.w events := make(chan types.EnvelopeEvent, 10) var ( requestID types.HexBytes @@ -75,7 +155,7 @@ func (api *PublicAPI) RequestMessagesSync(conf RetryConfig, r MessagesRequest) ( retries int ) for retries <= conf.MaxRetries { - sub := shh.SubscribeEnvelopeEvents(events) + sub := api.service.w.SubscribeEnvelopeEvents(events) r.Timeout = conf.BaseTimeout + conf.StepTimeout*time.Duration(retries) timeout := r.Timeout // FIXME this weird conversion is required because MessagesRequest expects seconds but defines time.Duration @@ -85,7 +165,7 @@ func (api *PublicAPI) RequestMessagesSync(conf RetryConfig, r MessagesRequest) ( sub.Unsubscribe() return resp, err } - mailServerResp, err := waitForExpiredOrCompleted(types.BytesToHash(requestID), events, timeout) + mailServerResp, err := ext.WaitForExpiredOrCompleted(types.BytesToHash(requestID), events, timeout) sub.Unsubscribe() if err == nil { resp.Cursor = hex.EncodeToString(mailServerResp.Cursor) @@ -98,96 +178,44 @@ func (api *PublicAPI) RequestMessagesSync(conf RetryConfig, r MessagesRequest) ( return resp, fmt.Errorf("failed to request messages after %d retries", retries) } -func waitForExpiredOrCompleted(requestID types.Hash, events chan types.EnvelopeEvent, timeout time.Duration) (*types.MailServerResponse, error) { - expired := fmt.Errorf("request %x expired", requestID) - after := time.NewTimer(timeout) - defer after.Stop() - for { - var ev types.EnvelopeEvent - select { - case ev = <-events: - case <-after.C: - return nil, expired - } - if ev.Hash != requestID { - continue - } - switch ev.Event { - case types.EventMailServerRequestCompleted: - data, ok := ev.Data.(*types.MailServerResponse) - if ok { - return data, nil - } - return nil, errors.New("invalid event data type") - case types.EventMailServerRequestExpired: - return nil, expired - } - } +// SyncMessagesRequest is a SyncMessages() request payload. +type SyncMessagesRequest struct { + // MailServerPeer is MailServer's enode address. + MailServerPeer string `json:"mailServerPeer"` + + // From is a lower bound of time range (optional). + // Default is 24 hours back from now. + From uint32 `json:"from"` + + // To is a upper bound of time range (optional). + // Default is now. + To uint32 `json:"to"` + + // Limit determines the number of messages sent by the mail server + // for the current paginated request + Limit uint32 `json:"limit"` + + // Cursor is used as starting point for paginated requests + Cursor string `json:"cursor"` + + // FollowCursor if true loads messages until cursor is empty. + FollowCursor bool `json:"followCursor"` + + // Topics is a list of Whisper topics. + // If empty, a full bloom filter will be used. + Topics []types.TopicType `json:"topics"` } -// RequestMessages sends a request for historic messages to a MailServer. -func (api *PublicAPI) RequestMessages(_ context.Context, r MessagesRequest) (types.HexBytes, error) { - api.log.Info("RequestMessages", "request", r) - shh := api.service.w - now := api.service.w.GetCurrentTime() - r.setDefaults(now) +// SyncMessagesResponse is a response from the mail server +// to which SyncMessagesRequest was sent. +type SyncMessagesResponse struct { + // Cursor from the response can be used to retrieve more messages + // for the previous request. + Cursor string `json:"cursor"` - if r.From > r.To { - return nil, fmt.Errorf("Query range is invalid: from > to (%d > %d)", r.From, r.To) - } - - mailServerNode, err := api.getPeer(r.MailServerPeer) - if err != nil { - return nil, fmt.Errorf("%v: %v", ErrInvalidMailServerPeer, err) - } - - var ( - symKey []byte - publicKey *ecdsa.PublicKey - ) - - if r.SymKeyID != "" { - symKey, err = shh.GetSymKey(r.SymKeyID) - if err != nil { - return nil, fmt.Errorf("%v: %v", ErrInvalidSymKeyID, err) - } - } else { - publicKey = mailServerNode.Pubkey() - } - - payload, err := makeMessagesRequestPayload(r) - if err != nil { - return nil, err - } - - envelope, err := makeEnvelop( - payload, - symKey, - publicKey, - api.service.nodeID, - shh.MinPow(), - now, - ) - if err != nil { - return nil, err - } - hash := envelope.Hash() - - if !r.Force { - err = api.service.requestsRegistry.Register(hash, r.Topics) - if err != nil { - return nil, err - } - } - - if err := shh.RequestHistoricMessagesWithTimeout(mailServerNode.ID().Bytes(), envelope, r.Timeout*time.Second); err != nil { - if !r.Force { - api.service.requestsRegistry.Unregister(hash) - } - return nil, err - } - - return hash[:], nil + // Error indicates that something wrong happened when sending messages + // to the requester. + Error string `json:"error"` } // createSyncMailRequest creates SyncMailRequest. It uses a full bloom filter @@ -195,7 +223,7 @@ func (api *PublicAPI) RequestMessages(_ context.Context, r MessagesRequest) (typ func createSyncMailRequest(r SyncMessagesRequest) (types.SyncMailRequest, error) { var bloom []byte if len(r.Topics) > 0 { - bloom = topicsToBloom(r.Topics...) + bloom = ext.TopicsToBloom(r.Topics...) } else { bloom = types.MakeFullNodeBloom() } @@ -242,7 +270,7 @@ func (api *PublicAPI) SyncMessages(ctx context.Context, r SyncMessagesRequest) ( for { log.Info("Sending a request to sync messages", "request", request) - resp, err := api.service.syncMessages(ctx, mailServerID, request) + resp, err := api.service.SyncMessages(ctx, mailServerID, request) if err != nil { return response, err } @@ -256,421 +284,3 @@ func (api *PublicAPI) SyncMessages(ctx context.Context, r SyncMessagesRequest) ( request.Cursor = resp.Cursor } } - -// ConfirmMessagesProcessedByID is a method to confirm that messages was consumed by -// the client side. -// TODO: this is broken now as it requires dedup ID while a message hash should be used. -func (api *PublicAPI) ConfirmMessagesProcessedByID(messageConfirmations []*Metadata) error { - confirmationCount := len(messageConfirmations) - dedupIDs := make([][]byte, confirmationCount) - encryptionIDs := make([][]byte, confirmationCount) - for i, confirmation := range messageConfirmations { - dedupIDs[i] = confirmation.DedupID - encryptionIDs[i] = confirmation.EncryptionID - } - return api.service.ConfirmMessagesProcessed(encryptionIDs) -} - -// Post is used to send one-to-one for those who did not enabled device-to-device sync, -// in other words don't use PFS-enabled messages. Otherwise, SendDirectMessage is used. -// It's important to call PublicAPI.afterSend() so that the client receives a signal -// with confirmation that the message left the device. -func (api *PublicAPI) Post(ctx context.Context, newMessage types.NewMessage) (types.HexBytes, error) { - return api.publicAPI.Post(ctx, newMessage) -} - -// SendPublicMessage sends a public chat message to the underlying transport. -// Message's payload is a transit encoded message. -// It's important to call PublicAPI.afterSend() so that the client receives a signal -// with confirmation that the message left the device. -func (api *PublicAPI) SendPublicMessage(ctx context.Context, msg SendPublicMessageRPC) (types.HexBytes, error) { - chat := protocol.Chat{ - Name: msg.Chat, - } - return api.service.messenger.SendRaw(ctx, chat, msg.Payload) -} - -// SendDirectMessage sends a 1:1 chat message to the underlying transport -// Message's payload is a transit encoded message. -// It's important to call PublicAPI.afterSend() so that the client receives a signal -// with confirmation that the message left the device. -func (api *PublicAPI) SendDirectMessage(ctx context.Context, msg SendDirectMessageRPC) (types.HexBytes, error) { - chat := protocol.Chat{ - ChatType: protocol.ChatTypeOneToOne, - ID: types.EncodeHex(msg.PubKey), - } - - return api.service.messenger.SendRaw(ctx, chat, msg.Payload) -} - -func (api *PublicAPI) Join(chat protocol.Chat) error { - return api.service.messenger.Join(chat) -} - -func (api *PublicAPI) Leave(chat protocol.Chat) error { - return api.service.messenger.Leave(chat) -} - -func (api *PublicAPI) LeaveGroupChat(ctx Context, chatID string) (*protocol.MessengerResponse, error) { - return api.service.messenger.LeaveGroupChat(ctx, chatID) -} - -func (api *PublicAPI) CreateGroupChatWithMembers(ctx Context, name string, members []string) (*protocol.MessengerResponse, error) { - return api.service.messenger.CreateGroupChatWithMembers(ctx, name, members) -} - -func (api *PublicAPI) AddMembersToGroupChat(ctx Context, chatID string, members []string) (*protocol.MessengerResponse, error) { - return api.service.messenger.AddMembersToGroupChat(ctx, chatID, members) -} - -func (api *PublicAPI) RemoveMemberFromGroupChat(ctx Context, chatID string, member string) (*protocol.MessengerResponse, error) { - return api.service.messenger.RemoveMemberFromGroupChat(ctx, chatID, member) -} - -func (api *PublicAPI) AddAdminsToGroupChat(ctx Context, chatID string, members []string) (*protocol.MessengerResponse, error) { - return api.service.messenger.AddAdminsToGroupChat(ctx, chatID, members) -} - -func (api *PublicAPI) ConfirmJoiningGroup(ctx context.Context, chatID string) (*protocol.MessengerResponse, error) { - return api.service.messenger.ConfirmJoiningGroup(ctx, chatID) -} - -func (api *PublicAPI) requestMessagesUsingPayload(request db.HistoryRequest, peer, symkeyID string, payload []byte, force bool, timeout time.Duration, topics []types.TopicType) (hash types.Hash, err error) { - shh := api.service.w - now := api.service.w.GetCurrentTime() - - mailServerNode, err := api.getPeer(peer) - if err != nil { - return hash, fmt.Errorf("%v: %v", ErrInvalidMailServerPeer, err) - } - - var ( - symKey []byte - publicKey *ecdsa.PublicKey - ) - - if symkeyID != "" { - symKey, err = shh.GetSymKey(symkeyID) - if err != nil { - return hash, fmt.Errorf("%v: %v", ErrInvalidSymKeyID, err) - } - } else { - publicKey = mailServerNode.Pubkey() - } - - envelope, err := makeEnvelop( - payload, - symKey, - publicKey, - api.service.nodeID, - shh.MinPow(), - now, - ) - if err != nil { - return hash, err - } - hash = envelope.Hash() - - err = request.Replace(hash) - if err != nil { - return hash, err - } - - if !force { - err = api.service.requestsRegistry.Register(hash, topics) - if err != nil { - return hash, err - } - } - - if err := shh.RequestHistoricMessagesWithTimeout(mailServerNode.ID().Bytes(), envelope, timeout); err != nil { - if !force { - api.service.requestsRegistry.Unregister(hash) - } - return hash, err - } - - return hash, nil - -} - -// InitiateHistoryRequests is a stateful API for initiating history request for each topic. -// Caller of this method needs to define only two parameters per each TopicRequest: -// - Topic -// - Duration in nanoseconds. Will be used to determine starting time for history request. -// After that status-go will guarantee that request for this topic and date will be performed. -func (api *PublicAPI) InitiateHistoryRequests(parent context.Context, request InitiateHistoryRequestParams) (rst []types.HexBytes, err error) { - tx := api.service.storage.NewTx() - defer func() { - if err == nil { - err = tx.Commit() - } - }() - ctx := NewContextFromService(parent, api.service, tx) - requests, err := api.service.historyUpdates.CreateRequests(ctx, request.Requests) - if err != nil { - return nil, err - } - var ( - payload []byte - hash types.Hash - ) - for i := range requests { - req := requests[i] - options := CreateTopicOptionsFromRequest(req) - bloom := options.ToBloomFilterOption() - payload, err = bloom.ToMessagesRequestPayload() - if err != nil { - return rst, err - } - hash, err = api.requestMessagesUsingPayload(req, request.Peer, request.SymKeyID, payload, request.Force, request.Timeout, options.Topics()) - if err != nil { - return rst, err - } - rst = append(rst, hash.Bytes()) - } - return rst, err -} - -// CompleteRequest client must mark request completed when all envelopes were processed. -func (api *PublicAPI) CompleteRequest(parent context.Context, hex string) (err error) { - tx := api.service.storage.NewTx() - ctx := NewContextFromService(parent, api.service, tx) - err = api.service.historyUpdates.UpdateFinishedRequest(ctx, types.HexToHash(hex)) - if err == nil { - return tx.Commit() - } - return err -} - -func (api *PublicAPI) LoadFilters(parent context.Context, chats []*transport.Filter) ([]*transport.Filter, error) { - return api.service.messenger.LoadFilters(chats) -} - -func (api *PublicAPI) SaveChat(parent context.Context, chat *protocol.Chat) error { - api.log.Info("saving chat", "chat", chat) - return api.service.messenger.SaveChat(chat) -} - -func (api *PublicAPI) Chats(parent context.Context) []*protocol.Chat { - return api.service.messenger.Chats() -} - -func (api *PublicAPI) DeleteChat(parent context.Context, chatID string) error { - return api.service.messenger.DeleteChat(chatID) -} - -func (api *PublicAPI) SaveContact(parent context.Context, contact *protocol.Contact) error { - return api.service.messenger.SaveContact(contact) -} - -func (api *PublicAPI) BlockContact(parent context.Context, contact *protocol.Contact) ([]*protocol.Chat, error) { - api.log.Info("blocking contact", "contact", contact.ID) - return api.service.messenger.BlockContact(contact) -} - -func (api *PublicAPI) Contacts(parent context.Context) []*protocol.Contact { - return api.service.messenger.Contacts() -} - -func (api *PublicAPI) RemoveFilters(parent context.Context, chats []*transport.Filter) error { - return api.service.messenger.RemoveFilters(chats) -} - -// EnableInstallation enables an installation for multi-device sync. -func (api *PublicAPI) EnableInstallation(installationID string) error { - return api.service.messenger.EnableInstallation(installationID) -} - -// DisableInstallation disables an installation for multi-device sync. -func (api *PublicAPI) DisableInstallation(installationID string) error { - return api.service.messenger.DisableInstallation(installationID) -} - -// GetOurInstallations returns all the installations available given an identity -func (api *PublicAPI) GetOurInstallations() []*multidevice.Installation { - return api.service.messenger.Installations() -} - -// SetInstallationMetadata sets the metadata for our own installation -func (api *PublicAPI) SetInstallationMetadata(installationID string, data *multidevice.InstallationMetadata) error { - return api.service.messenger.SetInstallationMetadata(installationID, data) -} - -// VerifyENSNames takes a list of ensdetails and returns whether they match the public key specified -func (api *PublicAPI) VerifyENSNames(details []enstypes.ENSDetails) (map[string]enstypes.ENSResponse, error) { - return api.service.messenger.VerifyENSNames(api.service.config.VerifyENSURL, ensContractAddress, details) -} - -type ApplicationMessagesResponse struct { - Messages []*protocol.Message `json:"messages"` - Cursor string `json:"cursor"` -} - -func (api *PublicAPI) ChatMessages(chatID, cursor string, limit int) (*ApplicationMessagesResponse, error) { - messages, cursor, err := api.service.messenger.MessageByChatID(chatID, cursor, limit) - if err != nil { - return nil, err - } - - return &ApplicationMessagesResponse{ - Messages: messages, - Cursor: cursor, - }, nil -} - -func (api *PublicAPI) DeleteMessage(id string) error { - return api.service.messenger.DeleteMessage(id) -} - -func (api *PublicAPI) DeleteMessagesByChatID(id string) error { - return api.service.messenger.DeleteMessagesByChatID(id) -} - -func (api *PublicAPI) MarkMessagesSeen(chatID string, ids []string) error { - return api.service.messenger.MarkMessagesSeen(chatID, ids) -} - -func (api *PublicAPI) UpdateMessageOutgoingStatus(id, newOutgoingStatus string) error { - return api.service.messenger.UpdateMessageOutgoingStatus(id, newOutgoingStatus) -} - -func (api *PublicAPI) SendChatMessage(ctx context.Context, message *protocol.Message) (*protocol.MessengerResponse, error) { - return api.service.messenger.SendChatMessage(ctx, message) -} - -func (api *PublicAPI) ReSendChatMessage(ctx context.Context, messageID string) error { - return api.service.messenger.ReSendChatMessage(ctx, messageID) -} - -func (api *PublicAPI) RequestTransaction(ctx context.Context, chatID, value, contract, address string) (*protocol.MessengerResponse, error) { - return api.service.messenger.RequestTransaction(ctx, chatID, value, contract, address) -} - -func (api *PublicAPI) RequestAddressForTransaction(ctx context.Context, chatID, from, value, contract string) (*protocol.MessengerResponse, error) { - return api.service.messenger.RequestAddressForTransaction(ctx, chatID, from, value, contract) -} - -func (api *PublicAPI) DeclineRequestAddressForTransaction(ctx context.Context, messageID string) (*protocol.MessengerResponse, error) { - return api.service.messenger.DeclineRequestAddressForTransaction(ctx, messageID) -} - -func (api *PublicAPI) DeclineRequestTransaction(ctx context.Context, messageID string) (*protocol.MessengerResponse, error) { - return api.service.messenger.DeclineRequestTransaction(ctx, messageID) -} - -func (api *PublicAPI) AcceptRequestAddressForTransaction(ctx context.Context, messageID, address string) (*protocol.MessengerResponse, error) { - return api.service.messenger.AcceptRequestAddressForTransaction(ctx, messageID, address) -} - -func (api *PublicAPI) SendTransaction(ctx context.Context, chatID, value, contract, transactionHash string, signature types.HexBytes) (*protocol.MessengerResponse, error) { - return api.service.messenger.SendTransaction(ctx, chatID, value, contract, transactionHash, signature) -} - -func (api *PublicAPI) AcceptRequestTransaction(ctx context.Context, transactionHash, messageID string, signature types.HexBytes) (*protocol.MessengerResponse, error) { - return api.service.messenger.AcceptRequestTransaction(ctx, transactionHash, messageID, signature) -} - -func (api *PublicAPI) SendContactUpdates(ctx context.Context, name, picture string) error { - return api.service.messenger.SendContactUpdates(ctx, name, picture) -} - -func (api *PublicAPI) SendContactUpdate(ctx context.Context, contactID, name, picture string) (*protocol.MessengerResponse, error) { - return api.service.messenger.SendContactUpdate(ctx, contactID, name, picture) -} - -func (api *PublicAPI) SendPairInstallation(ctx context.Context) (*protocol.MessengerResponse, error) { - return api.service.messenger.SendPairInstallation(ctx) -} - -func (api *PublicAPI) SyncDevices(ctx context.Context, name, picture string) error { - return api.service.messenger.SyncDevices(ctx, name, picture) -} - -// ----- -// HELPER -// ----- - -// makeEnvelop makes an envelop for a historic messages request. -// Symmetric key is used to authenticate to MailServer. -// PK is the current node ID. -func makeEnvelop( - payload []byte, - symKey []byte, - publicKey *ecdsa.PublicKey, - nodeID *ecdsa.PrivateKey, - pow float64, - now time.Time, -) (types.Envelope, error) { - // TODO: replace with an types.Envelope creator passed to the API struct - params := whisper.MessageParams{ - PoW: pow, - Payload: payload, - WorkTime: defaultWorkTime, - Src: nodeID, - } - // Either symKey or public key is required. - // This condition is verified in `message.Wrap()` method. - if len(symKey) > 0 { - params.KeySym = symKey - } else if publicKey != nil { - params.Dst = publicKey - } - message, err := whisper.NewSentMessage(¶ms) - if err != nil { - return nil, err - } - envelope, err := message.Wrap(¶ms, now) - if err != nil { - return nil, err - } - return gethbridge.NewWhisperEnvelope(envelope), nil -} - -// makeMessagesRequestPayload makes a specific payload for MailServer -// to request historic messages. -func makeMessagesRequestPayload(r MessagesRequest) ([]byte, error) { - cursor, err := hex.DecodeString(r.Cursor) - if err != nil { - return nil, fmt.Errorf("invalid cursor: %v", err) - } - - if len(cursor) > 0 && len(cursor) != mailserver.CursorLength { - return nil, fmt.Errorf("invalid cursor size: expected %d but got %d", mailserver.CursorLength, len(cursor)) - } - - payload := mailserver.MessagesRequestPayload{ - Lower: r.From, - Upper: r.To, - Bloom: createBloomFilter(r), - Limit: r.Limit, - Cursor: cursor, - // Client must tell the MailServer if it supports batch responses. - // This can be removed in the future. - Batch: true, - } - - return rlp.EncodeToBytes(payload) -} - -func createBloomFilter(r MessagesRequest) []byte { - if len(r.Topics) > 0 { - return topicsToBloom(r.Topics...) - } - - return types.TopicToBloom(r.Topic) -} - -func topicsToBloom(topics ...types.TopicType) []byte { - i := new(big.Int) - for _, topic := range topics { - bloom := types.TopicToBloom(topic) - i.Or(i, new(big.Int).SetBytes(bloom[:])) - } - - combined := make([]byte, types.BloomFilterSize) - data := i.Bytes() - copy(combined[types.BloomFilterSize-len(data):], data[:]) - - return combined -} diff --git a/services/shhext/api_geth_test.go b/services/shhext/api_geth_test.go index 86e9c207c..02b9cf45a 100644 --- a/services/shhext/api_geth_test.go +++ b/services/shhext/api_geth_test.go @@ -1,146 +1,37 @@ -// +build !nimbus - package shhext import ( "context" "encoding/hex" "fmt" + "io/ioutil" + "math" + "net" + "os" + "strconv" "testing" "time" - "github.com/status-im/status-go/eth-node/types" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/status-im/status-go/mailserver" - - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/storage" + + gethbridge "github.com/status-im/status-go/eth-node/bridge/geth" + "github.com/status-im/status-go/eth-node/crypto" + "github.com/status-im/status-go/eth-node/types" + "github.com/status-im/status-go/params" + "github.com/status-im/status-go/services/ext" + "github.com/status-im/status-go/sqlite" + "github.com/status-im/status-go/t/helpers" + "github.com/status-im/status-go/whisper/v6" ) -func TestMessagesRequest_setDefaults(t *testing.T) { - daysAgo := func(now time.Time, days int) uint32 { - return uint32(now.UTC().Add(-24 * time.Hour * time.Duration(days)).Unix()) - } - - tnow := time.Now() - now := uint32(tnow.UTC().Unix()) - yesterday := daysAgo(tnow, 1) - - scenarios := []struct { - given *MessagesRequest - expected *MessagesRequest - }{ - { - &MessagesRequest{From: 0, To: 0}, - &MessagesRequest{From: yesterday, To: now, Timeout: defaultRequestTimeout}, - }, - { - &MessagesRequest{From: 1, To: 0}, - &MessagesRequest{From: uint32(1), To: now, Timeout: defaultRequestTimeout}, - }, - { - &MessagesRequest{From: 0, To: yesterday}, - &MessagesRequest{From: daysAgo(tnow, 2), To: yesterday, Timeout: defaultRequestTimeout}, - }, - // 100 - 1 day would be invalid, so we set From to 0 - { - &MessagesRequest{From: 0, To: 100}, - &MessagesRequest{From: 0, To: 100, Timeout: defaultRequestTimeout}, - }, - // set Timeout - { - &MessagesRequest{From: 0, To: 0, Timeout: 100}, - &MessagesRequest{From: yesterday, To: now, Timeout: 100}, - }, - } - - for i, s := range scenarios { - t.Run(fmt.Sprintf("Scenario %d", i), func(t *testing.T) { - s.given.setDefaults(tnow) - require.Equal(t, s.expected, s.given) - }) - } -} - -func TestMakeMessagesRequestPayload(t *testing.T) { - var emptyTopic types.TopicType - testCases := []struct { - Name string - Req MessagesRequest - Err string - }{ - { - Name: "empty cursor", - Req: MessagesRequest{Cursor: ""}, - Err: "", - }, - { - Name: "invalid cursor size", - Req: MessagesRequest{Cursor: hex.EncodeToString([]byte{0x01, 0x02, 0x03})}, - Err: fmt.Sprintf("invalid cursor size: expected %d but got 3", mailserver.CursorLength), - }, - { - Name: "valid cursor", - Req: MessagesRequest{ - Cursor: hex.EncodeToString(mailserver.NewDBKey(123, emptyTopic, types.Hash{}).Cursor()), - }, - Err: "", - }, - } - - for _, tc := range testCases { - t.Run(tc.Name, func(t *testing.T) { - _, err := makeMessagesRequestPayload(tc.Req) - if tc.Err == "" { - require.NoError(t, err) - } else { - require.EqualError(t, err, tc.Err) - } - }) - } -} - -func TestTopicsToBloom(t *testing.T) { - t1 := stringToTopic("t1") - b1 := types.TopicToBloom(t1) - t2 := stringToTopic("t2") - b2 := types.TopicToBloom(t2) - t3 := stringToTopic("t3") - b3 := types.TopicToBloom(t3) - - reqBloom := topicsToBloom(t1) - assert.True(t, types.BloomFilterMatch(reqBloom, b1)) - assert.False(t, types.BloomFilterMatch(reqBloom, b2)) - assert.False(t, types.BloomFilterMatch(reqBloom, b3)) - - reqBloom = topicsToBloom(t1, t2) - assert.True(t, types.BloomFilterMatch(reqBloom, b1)) - assert.True(t, types.BloomFilterMatch(reqBloom, b2)) - assert.False(t, types.BloomFilterMatch(reqBloom, b3)) - - reqBloom = topicsToBloom(t1, t2, t3) - assert.True(t, types.BloomFilterMatch(reqBloom, b1)) - assert.True(t, types.BloomFilterMatch(reqBloom, b2)) - assert.True(t, types.BloomFilterMatch(reqBloom, b3)) -} - -func TestCreateBloomFilter(t *testing.T) { - t1 := stringToTopic("t1") - t2 := stringToTopic("t2") - - req := MessagesRequest{Topic: t1} - bloom := createBloomFilter(req) - assert.Equal(t, topicsToBloom(t1), bloom) - - req = MessagesRequest{Topics: []types.TopicType{t1, t2}} - bloom = createBloomFilter(req) - assert.Equal(t, topicsToBloom(t1, t2), bloom) -} - -func stringToTopic(s string) types.TopicType { - return types.BytesToTopic([]byte(s)) -} - func TestCreateSyncMailRequest(t *testing.T) { testCases := []struct { Name string @@ -223,19 +114,383 @@ func TestSyncMessagesErrors(t *testing.T) { } } -func TestExpiredOrCompleted(t *testing.T) { - timeout := time.Millisecond - events := make(chan types.EnvelopeEvent) - errors := make(chan error, 1) - hash := types.Hash{1} - go func() { - _, err := waitForExpiredOrCompleted(hash, events, timeout) - errors <- err - }() - select { - case <-time.After(time.Second): - require.FailNow(t, "timed out waiting for waitForExpiredOrCompleted to complete") - case err := <-errors: - require.EqualError(t, err, fmt.Sprintf("request %x expired", hash)) +func TestRequestMessagesErrors(t *testing.T) { + var err error + + shh := gethbridge.NewGethWhisperWrapper(whisper.New(nil)) + aNode, err := node.New(&node.Config{ + P2P: p2p.Config{ + MaxPeers: math.MaxInt32, + NoDiscovery: true, + }, + NoUSB: true, + }) // in-memory node as no data dir + require.NoError(t, err) + err = aNode.Register(func(*node.ServiceContext) (node.Service, error) { + return gethbridge.GetGethWhisperFrom(shh), nil + }) + require.NoError(t, err) + + err = aNode.Start() + require.NoError(t, err) + defer func() { require.NoError(t, aNode.Stop()) }() + + handler := ext.NewHandlerMock(1) + config := params.ShhextConfig{ + InstallationID: "1", + BackupDisabledDataDir: os.TempDir(), + PFSEnabled: true, } + nodeWrapper := ext.NewTestNodeWrapper(shh, nil) + service := New(config, nodeWrapper, nil, handler, nil) + api := NewPublicAPI(service) + + const ( + mailServerPeer = "enode://b7e65e1bedc2499ee6cbd806945af5e7df0e59e4070c96821570bd581473eade24a489f5ec95d060c0db118c879403ab88d827d3766978f28708989d35474f87@[::]:51920" + ) + + var hash []byte + + // invalid MailServer enode address + hash, err = api.RequestMessages(context.TODO(), ext.MessagesRequest{MailServerPeer: "invalid-address"}) + require.Nil(t, hash) + require.EqualError(t, err, "invalid mailServerPeer value: invalid URL scheme, want \"enode\"") + + // non-existent symmetric key + hash, err = api.RequestMessages(context.TODO(), ext.MessagesRequest{ + MailServerPeer: mailServerPeer, + SymKeyID: "invalid-sym-key-id", + }) + require.Nil(t, hash) + require.EqualError(t, err, "invalid symKeyID value: non-existent key ID") + + // with a symmetric key + symKeyID, symKeyErr := shh.AddSymKeyFromPassword("some-pass") + require.NoError(t, symKeyErr) + hash, err = api.RequestMessages(context.TODO(), ext.MessagesRequest{ + MailServerPeer: mailServerPeer, + SymKeyID: symKeyID, + }) + require.Nil(t, hash) + require.Contains(t, err.Error(), "Could not find peer with ID") + + // from is greater than to + hash, err = api.RequestMessages(context.TODO(), ext.MessagesRequest{ + From: 10, + To: 5, + }) + require.Nil(t, hash) + require.Contains(t, err.Error(), "Query range is invalid: from > to (10 > 5)") +} + +func TestInitProtocol(t *testing.T) { + directory, err := ioutil.TempDir("", "status-go-testing") + require.NoError(t, err) + + config := params.ShhextConfig{ + InstallationID: "2", + BackupDisabledDataDir: directory, + PFSEnabled: true, + MailServerConfirmations: true, + ConnectionTarget: 10, + } + db, err := leveldb.Open(storage.NewMemStorage(), nil) + require.NoError(t, err) + + shh := gethbridge.NewGethWhisperWrapper(whisper.New(nil)) + privateKey, err := crypto.GenerateKey() + require.NoError(t, err) + + nodeWrapper := ext.NewTestNodeWrapper(shh, nil) + service := New(config, nodeWrapper, nil, nil, db) + + tmpdir, err := ioutil.TempDir("", "test-shhext-service-init-protocol") + require.NoError(t, err) + + sqlDB, err := sqlite.OpenDB(fmt.Sprintf("%s/db.sql", tmpdir), "password") + require.NoError(t, err) + + err = service.InitProtocol(privateKey, sqlDB) + require.NoError(t, err) +} + +func TestShhExtSuite(t *testing.T) { + suite.Run(t, new(ShhExtSuite)) +} + +type ShhExtSuite struct { + suite.Suite + + dir string + nodes []*node.Node + whispers []types.Whisper + services []*Service +} + +func (s *ShhExtSuite) createAndAddNode() { + idx := len(s.nodes) + + // create a node + cfg := &node.Config{ + Name: strconv.Itoa(idx), + P2P: p2p.Config{ + MaxPeers: math.MaxInt32, + NoDiscovery: true, + ListenAddr: ":0", + }, + NoUSB: true, + } + stack, err := node.New(cfg) + s.NoError(err) + whisper := whisper.New(nil) + err = stack.Register(func(n *node.ServiceContext) (node.Service, error) { + return whisper, nil + }) + s.NoError(err) + + // set up protocol + config := params.ShhextConfig{ + InstallationID: strconv.Itoa(idx), + BackupDisabledDataDir: s.dir, + PFSEnabled: true, + MailServerConfirmations: true, + ConnectionTarget: 10, + } + db, err := leveldb.Open(storage.NewMemStorage(), nil) + s.Require().NoError(err) + nodeWrapper := ext.NewTestNodeWrapper(gethbridge.NewGethWhisperWrapper(whisper), nil) + service := New(config, nodeWrapper, nil, nil, db) + sqlDB, err := sqlite.OpenDB(fmt.Sprintf("%s/%d", s.dir, idx), "password") + s.Require().NoError(err) + privateKey, err := crypto.GenerateKey() + s.NoError(err) + err = service.InitProtocol(privateKey, sqlDB) + s.NoError(err) + err = stack.Register(func(n *node.ServiceContext) (node.Service, error) { + return service, nil + }) + s.NoError(err) + + // start the node + err = stack.Start() + s.Require().NoError(err) + + // store references + s.nodes = append(s.nodes, stack) + s.whispers = append(s.whispers, gethbridge.NewGethWhisperWrapper(whisper)) + s.services = append(s.services, service) +} + +func (s *ShhExtSuite) SetupTest() { + var err error + s.dir, err = ioutil.TempDir("", "status-go-testing") + s.Require().NoError(err) +} + +func (s *ShhExtSuite) TearDownTest() { + for _, n := range s.nodes { + s.NoError(n.Stop()) + } + s.nodes = nil + s.whispers = nil + s.services = nil +} + +func (s *ShhExtSuite) TestRequestMessagesSuccess() { + // two nodes needed: client and mailserver + s.createAndAddNode() + s.createAndAddNode() + + waitErr := helpers.WaitForPeerAsync(s.nodes[0].Server(), s.nodes[1].Server().Self().URLv4(), p2p.PeerEventTypeAdd, time.Second) + s.nodes[0].Server().AddPeer(s.nodes[1].Server().Self()) + s.Require().NoError(<-waitErr) + + api := NewPublicAPI(s.services[0]) + + _, err := api.RequestMessages(context.Background(), ext.MessagesRequest{ + MailServerPeer: s.nodes[1].Server().Self().URLv4(), + Topics: []types.TopicType{{1}}, + }) + s.NoError(err) +} + +func (s *ShhExtSuite) TestMultipleRequestMessagesWithoutForce() { + // two nodes needed: client and mailserver + s.createAndAddNode() + s.createAndAddNode() + + waitErr := helpers.WaitForPeerAsync(s.nodes[0].Server(), s.nodes[1].Server().Self().URLv4(), p2p.PeerEventTypeAdd, time.Second) + s.nodes[0].Server().AddPeer(s.nodes[1].Server().Self()) + s.Require().NoError(<-waitErr) + + api := NewPublicAPI(s.services[0]) + + _, err := api.RequestMessages(context.Background(), ext.MessagesRequest{ + MailServerPeer: s.nodes[1].Server().Self().URLv4(), + Topics: []types.TopicType{{1}}, + }) + s.NoError(err) + _, err = api.RequestMessages(context.Background(), ext.MessagesRequest{ + MailServerPeer: s.nodes[1].Server().Self().URLv4(), + Topics: []types.TopicType{{1}}, + }) + s.EqualError(err, "another request with the same topics was sent less than 3s ago. Please wait for a bit longer, or set `force` to true in request parameters") + _, err = api.RequestMessages(context.Background(), ext.MessagesRequest{ + MailServerPeer: s.nodes[1].Server().Self().URLv4(), + Topics: []types.TopicType{{2}}, + }) + s.NoError(err) +} + +func (s *ShhExtSuite) TestFailedRequestWithUnknownMailServerPeer() { + s.createAndAddNode() + + api := NewPublicAPI(s.services[0]) + + _, err := api.RequestMessages(context.Background(), ext.MessagesRequest{ + MailServerPeer: "enode://19872f94b1e776da3a13e25afa71b47dfa99e658afd6427ea8d6e03c22a99f13590205a8826443e95a37eee1d815fc433af7a8ca9a8d0df7943d1f55684045b7@0.0.0.0:30305", + Topics: []types.TopicType{{1}}, + }) + s.EqualError(err, "Could not find peer with ID: 10841e6db5c02fc331bf36a8d2a9137a1696d9d3b6b1f872f780e02aa8ec5bba") +} + +const ( + // internal whisper protocol codes + statusCode = 0 + p2pRequestCompleteCode = 125 +) + +type WhisperNodeMockSuite struct { + suite.Suite + + localWhisperAPI *whisper.PublicWhisperAPI + localAPI *PublicAPI + localNode *enode.Node + remoteRW *p2p.MsgPipeRW + + localService *Service +} + +func (s *WhisperNodeMockSuite) SetupTest() { + db, err := leveldb.Open(storage.NewMemStorage(), nil) + s.Require().NoError(err) + conf := &whisper.Config{ + MinimumAcceptedPOW: 0, + MaxMessageSize: 100 << 10, + } + w := whisper.New(conf) + s.Require().NoError(w.Start(nil)) + pkey, err := crypto.GenerateKey() + s.Require().NoError(err) + node := enode.NewV4(&pkey.PublicKey, net.ParseIP("127.0.0.1"), 1, 1) + peer := p2p.NewPeer(node.ID(), "1", []p2p.Cap{{"shh", 6}}) + rw1, rw2 := p2p.MsgPipe() + errorc := make(chan error, 1) + go func() { + err := w.HandlePeer(peer, rw2) + errorc <- err + }() + whisperWrapper := gethbridge.NewGethWhisperWrapper(w) + s.Require().NoError(p2p.ExpectMsg(rw1, statusCode, []interface{}{ + whisper.ProtocolVersion, + math.Float64bits(whisperWrapper.MinPow()), + whisperWrapper.BloomFilter(), + false, + true, + whisper.RateLimits{}, + })) + s.Require().NoError(p2p.SendItems( + rw1, + statusCode, + whisper.ProtocolVersion, + whisper.ProtocolVersion, + math.Float64bits(whisperWrapper.MinPow()), + whisperWrapper.BloomFilter(), + true, + true, + whisper.RateLimits{}, + )) + + nodeWrapper := ext.NewTestNodeWrapper(whisperWrapper, nil) + s.localService = New( + params.ShhextConfig{MailServerConfirmations: true, MaxMessageDeliveryAttempts: 3}, + nodeWrapper, + nil, + nil, + db, + ) + s.Require().NoError(s.localService.UpdateMailservers([]*enode.Node{node})) + + s.localWhisperAPI = whisper.NewPublicWhisperAPI(w) + s.localAPI = NewPublicAPI(s.localService) + s.localNode = node + s.remoteRW = rw1 +} + +func TestRequestMessagesSync(t *testing.T) { + suite.Run(t, new(RequestMessagesSyncSuite)) +} + +type RequestMessagesSyncSuite struct { + WhisperNodeMockSuite +} + +func (s *RequestMessagesSyncSuite) TestExpired() { + // intentionally discarding all requests, so that request will timeout + go func() { + msg, err := s.remoteRW.ReadMsg() + s.Require().NoError(err) + s.Require().NoError(msg.Discard()) + }() + _, err := s.localAPI.RequestMessagesSync( + ext.RetryConfig{ + BaseTimeout: time.Second, + }, + ext.MessagesRequest{ + MailServerPeer: s.localNode.String(), + }, + ) + s.Require().EqualError(err, "failed to request messages after 1 retries") +} + +func (s *RequestMessagesSyncSuite) testCompletedFromAttempt(target int) { + const cursorSize = 36 // taken from mailserver_response.go from whisper package + cursor := [cursorSize]byte{} + cursor[0] = 0x01 + + go func() { + attempt := 0 + for { + attempt++ + msg, err := s.remoteRW.ReadMsg() + s.Require().NoError(err) + if attempt < target { + s.Require().NoError(msg.Discard()) + continue + } + var e whisper.Envelope + s.Require().NoError(msg.Decode(&e)) + s.Require().NoError(p2p.Send(s.remoteRW, p2pRequestCompleteCode, whisper.CreateMailServerRequestCompletedPayload(e.Hash(), common.Hash{}, cursor[:]))) + } + }() + resp, err := s.localAPI.RequestMessagesSync( + ext.RetryConfig{ + BaseTimeout: time.Second, + MaxRetries: target, + }, + ext.MessagesRequest{ + MailServerPeer: s.localNode.String(), + Force: true, // force true is convenient here because timeout is less then default delay (3s) + }, + ) + s.Require().NoError(err) + s.Require().Equal(ext.MessagesResponse{Cursor: hex.EncodeToString(cursor[:])}, resp) +} + +func (s *RequestMessagesSyncSuite) TestCompletedFromFirstAttempt() { + s.testCompletedFromAttempt(1) +} + +func (s *RequestMessagesSyncSuite) TestCompletedFromSecondAttempt() { + s.testCompletedFromAttempt(2) } diff --git a/services/shhext/context_geth.go b/services/shhext/context_geth.go deleted file mode 100644 index e67c887ea..000000000 --- a/services/shhext/context_geth.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build !nimbus - -package shhext - -import ( - "context" - - "github.com/status-im/status-go/db" -) - -// NewContextFromService creates new context instance using Service fileds directly and Storage. -func NewContextFromService(ctx context.Context, service *Service, storage db.Storage) Context { - return NewContext(ctx, service.w.GetCurrentTime, service.requestsRegistry, storage) -} diff --git a/services/shhext/history.go b/services/shhext/history.go deleted file mode 100644 index f28665385..000000000 --- a/services/shhext/history.go +++ /dev/null @@ -1,19 +0,0 @@ -package shhext - -import ( - "time" - - "github.com/status-im/status-go/eth-node/types" -) - -const ( - // WhisperTimeAllowance is needed to ensure that we won't miss envelopes that were - // delivered to mail server after we made a request. - WhisperTimeAllowance = 20 * time.Second -) - -// TopicRequest defines what user has to provide. -type TopicRequest struct { - Topic types.TopicType - Duration time.Duration -} diff --git a/services/shhext/history_geth.go b/services/shhext/history_geth.go deleted file mode 100644 index 0a21d8152..000000000 --- a/services/shhext/history_geth.go +++ /dev/null @@ -1,340 +0,0 @@ -// +build !nimbus - -package shhext - -import ( - "errors" - "fmt" - "sort" - "sync" - "time" - - "github.com/ethereum/go-ethereum/rlp" - - "github.com/status-im/status-go/db" - "github.com/status-im/status-go/eth-node/types" - "github.com/status-im/status-go/mailserver" -) - -// NewHistoryUpdateReactor creates HistoryUpdateReactor instance. -func NewHistoryUpdateReactor() *HistoryUpdateReactor { - return &HistoryUpdateReactor{} -} - -// HistoryUpdateReactor responsible for tracking progress for all history requests. -// It listens for 2 events: -// - when envelope from mail server is received we will update appropriate topic on disk -// - when confirmation for request completion is received - we will set last envelope timestamp as the last timestamp -// for all TopicLists in current request. -type HistoryUpdateReactor struct { - mu sync.Mutex -} - -// UpdateFinishedRequest removes successfully finished request and updates every topic -// attached to the request. -func (reactor *HistoryUpdateReactor) UpdateFinishedRequest(ctx Context, id types.Hash) error { - reactor.mu.Lock() - defer reactor.mu.Unlock() - req, err := ctx.HistoryStore().GetRequest(id) - if err != nil { - return err - } - for i := range req.Histories() { - th := &req.Histories()[i] - th.RequestID = types.Hash{} - th.Current = th.End - th.End = time.Time{} - if err := th.Save(); err != nil { - return err - } - } - return req.Delete() -} - -// UpdateTopicHistory updates Current timestamp for the TopicHistory with a given timestamp. -func (reactor *HistoryUpdateReactor) UpdateTopicHistory(ctx Context, topic types.TopicType, timestamp time.Time) error { - reactor.mu.Lock() - defer reactor.mu.Unlock() - histories, err := ctx.HistoryStore().GetHistoriesByTopic(topic) - if err != nil { - return err - } - if len(histories) == 0 { - return fmt.Errorf("no histories for topic 0x%x", topic) - } - for i := range histories { - th := &histories[i] - // this case could happen only iff envelopes were delivered out of order - // last envelope received, request completed, then others envelopes received - // request completed, last envelope received, and then all others envelopes received - if !th.Pending() { - continue - } - if timestamp.Before(th.End) && timestamp.After(th.Current) { - th.Current = timestamp - } - err := th.Save() - if err != nil { - return err - } - } - return nil -} - -// CreateRequests receives list of topic with desired timestamps and initiates both pending requests and requests -// that cover new topics. -func (reactor *HistoryUpdateReactor) CreateRequests(ctx Context, topicRequests []TopicRequest) ([]db.HistoryRequest, error) { - reactor.mu.Lock() - defer reactor.mu.Unlock() - seen := map[types.TopicType]struct{}{} - for i := range topicRequests { - if _, exist := seen[topicRequests[i].Topic]; exist { - return nil, errors.New("only one duration per topic is allowed") - } - seen[topicRequests[i].Topic] = struct{}{} - } - histories := map[types.TopicType]db.TopicHistory{} - for i := range topicRequests { - th, err := ctx.HistoryStore().GetHistory(topicRequests[i].Topic, topicRequests[i].Duration) - if err != nil { - return nil, err - } - histories[th.Topic] = th - } - requests, err := ctx.HistoryStore().GetAllRequests() - if err != nil { - return nil, err - } - filtered := []db.HistoryRequest{} - for i := range requests { - req := requests[i] - for _, th := range histories { - if th.Pending() { - delete(histories, th.Topic) - } - } - if !ctx.RequestRegistry().Has(req.ID) { - filtered = append(filtered, req) - } - } - adjusted, err := adjustRequestedHistories(ctx.HistoryStore(), mapToList(histories)) - if err != nil { - return nil, err - } - filtered = append(filtered, - GroupHistoriesByRequestTimespan(ctx.HistoryStore(), adjusted)...) - return RenewRequests(filtered, ctx.Time()), nil -} - -// for every history that is not included in any request check if there are other ranges with such topic in db -// if so check if they can be merged -// if not then adjust second part so that End of it will be equal to First of previous -func adjustRequestedHistories(store db.HistoryStore, histories []db.TopicHistory) ([]db.TopicHistory, error) { - adjusted := []db.TopicHistory{} - for i := range histories { - all, err := store.GetHistoriesByTopic(histories[i].Topic) - if err != nil { - return nil, err - } - th, err := adjustRequestedHistory(&histories[i], all...) - if err != nil { - return nil, err - } - if th != nil { - adjusted = append(adjusted, *th) - } - } - return adjusted, nil -} - -func adjustRequestedHistory(th *db.TopicHistory, others ...db.TopicHistory) (*db.TopicHistory, error) { - sort.Slice(others, func(i, j int) bool { - return others[i].Duration > others[j].Duration - }) - if len(others) == 1 && others[0].Duration == th.Duration { - return th, nil - } - for j := range others { - if others[j].Duration == th.Duration { - // skip instance with same duration - continue - } else if th.Duration > others[j].Duration { - if th.Current.Equal(others[j].First) { - // this condition will be reached when query for new index successfully finished - th.Current = others[j].Current - // FIXME next two db operations must be completed atomically - err := th.Save() - if err != nil { - return nil, err - } - err = others[j].Delete() - if err != nil { - return nil, err - } - } else if (others[j].First != time.Time{}) { - // select First timestamp with lowest value. if there are multiple indexes that cover such ranges: - // 6:00 - 7:00 Duration: 3h - // 7:00 - 8:00 2h - // 8:00 - 9:00 1h - // and client created new index with Duration 4h - // 4h index must have End value set to 6:00 - if (others[j].First.Before(th.End) || th.End == time.Time{}) { - th.End = others[j].First - } - } else { - // remove previous if it is covered by new one - // client created multiple indexes without any succsefully executed query - err := others[j].Delete() - if err != nil { - return nil, err - } - } - } else if th.Duration < others[j].Duration { - if !others[j].Pending() { - th = &others[j] - } else { - return nil, nil - } - } - } - return th, nil -} - -// RenewRequests re-sets current, first and end timestamps. -// Changes should not be persisted on disk in this method. -func RenewRequests(requests []db.HistoryRequest, now time.Time) []db.HistoryRequest { - zero := time.Time{} - for i := range requests { - req := requests[i] - histories := req.Histories() - for j := range histories { - history := &histories[j] - if history.Current == zero { - history.Current = now.Add(-(history.Duration)) - } - if history.First == zero { - history.First = history.Current - } - if history.End == zero { - history.End = now - } - } - } - return requests -} - -// CreateTopicOptionsFromRequest transforms histories attached to a single request to a simpler format - TopicOptions. -func CreateTopicOptionsFromRequest(req db.HistoryRequest) TopicOptions { - histories := req.Histories() - rst := make(TopicOptions, len(histories)) - for i := range histories { - history := histories[i] - rst[i] = TopicOption{ - Topic: history.Topic, - Range: Range{ - Start: uint64(history.Current.Add(-(WhisperTimeAllowance)).Unix()), - End: uint64(history.End.Unix()), - }, - } - } - return rst -} - -func mapToList(topics map[types.TopicType]db.TopicHistory) []db.TopicHistory { - rst := make([]db.TopicHistory, 0, len(topics)) - for key := range topics { - rst = append(rst, topics[key]) - } - return rst -} - -// GroupHistoriesByRequestTimespan creates requests from provided histories. -// Multiple histories will be included into the same request only if they share timespan. -func GroupHistoriesByRequestTimespan(store db.HistoryStore, histories []db.TopicHistory) []db.HistoryRequest { - requests := []db.HistoryRequest{} - for _, th := range histories { - var added bool - for i := range requests { - req := &requests[i] - histories := req.Histories() - if histories[0].SameRange(th) { - req.AddHistory(th) - added = true - } - } - if !added { - req := store.NewRequest() - req.AddHistory(th) - requests = append(requests, req) - } - } - return requests -} - -// Range of the request. -type Range struct { - Start uint64 - End uint64 -} - -// TopicOption request for a single topic. -type TopicOption struct { - Topic types.TopicType - Range Range -} - -// TopicOptions is a list of topic-based requsts. -type TopicOptions []TopicOption - -// ToBloomFilterOption creates bloom filter request from a list of topics. -func (options TopicOptions) ToBloomFilterOption() BloomFilterOption { - topics := make([]types.TopicType, len(options)) - var start, end uint64 - for i := range options { - opt := options[i] - topics[i] = opt.Topic - if opt.Range.Start > start { - start = opt.Range.Start - } - if opt.Range.End > end { - end = opt.Range.End - } - } - - return BloomFilterOption{ - Range: Range{Start: start, End: end}, - Filter: topicsToBloom(topics...), - } -} - -// Topics returns list of whisper TopicType attached to each TopicOption. -func (options TopicOptions) Topics() []types.TopicType { - rst := make([]types.TopicType, len(options)) - for i := range options { - rst[i] = options[i].Topic - } - return rst -} - -// BloomFilterOption is a request based on bloom filter. -type BloomFilterOption struct { - Range Range - Filter []byte -} - -// ToMessagesRequestPayload creates mailserver.MessagesRequestPayload and encodes it to bytes using rlp. -func (filter BloomFilterOption) ToMessagesRequestPayload() ([]byte, error) { - // TODO fix this conversion. - // we start from time.Duration which is int64, then convert to uint64 for rlp-serilizability - // why uint32 here? max uint32 is smaller than max int64 - payload := mailserver.MessagesRequestPayload{ - Lower: uint32(filter.Range.Start), - Upper: uint32(filter.Range.End), - Bloom: filter.Filter, - // Client must tell the MailServer if it supports batch responses. - // This can be removed in the future. - Batch: true, - Limit: 1000, - } - return rlp.EncodeToBytes(payload) -} diff --git a/services/shhext/history_geth_test.go b/services/shhext/history_geth_test.go deleted file mode 100644 index 27c6f6a42..000000000 --- a/services/shhext/history_geth_test.go +++ /dev/null @@ -1,360 +0,0 @@ -// +build !nimbus - -package shhext - -import ( - "context" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/ethereum/go-ethereum/rlp" - - "github.com/status-im/status-go/db" - "github.com/status-im/status-go/eth-node/types" - "github.com/status-im/status-go/mailserver" -) - -func newTestContext(t *testing.T) Context { - mdb, err := db.NewMemoryDB() - require.NoError(t, err) - return NewContext(context.Background(), time.Now, NewRequestsRegistry(0), db.NewLevelDBStorage(mdb)) -} - -func createInMemStore(t *testing.T) db.HistoryStore { - mdb, err := db.NewMemoryDB() - require.NoError(t, err) - return db.NewHistoryStore(db.NewLevelDBStorage(mdb)) -} - -func TestRenewRequest(t *testing.T) { - req := db.HistoryRequest{} - duration := time.Hour - req.AddHistory(db.TopicHistory{Duration: duration}) - - firstNow := time.Now() - RenewRequests([]db.HistoryRequest{req}, firstNow) - - initial := firstNow.Add(-duration).Unix() - - th := req.Histories()[0] - require.Equal(t, initial, th.Current.Unix()) - require.Equal(t, initial, th.First.Unix()) - require.Equal(t, firstNow.Unix(), th.End.Unix()) - - secondNow := time.Now() - RenewRequests([]db.HistoryRequest{req}, secondNow) - - require.Equal(t, initial, th.Current.Unix()) - require.Equal(t, initial, th.First.Unix()) - require.Equal(t, secondNow.Unix(), th.End.Unix()) -} - -func TestCreateTopicOptionsFromRequest(t *testing.T) { - req := db.HistoryRequest{} - topic := types.TopicType{1} - now := time.Now() - req.AddHistory(db.TopicHistory{Topic: topic, Current: now, End: now}) - options := CreateTopicOptionsFromRequest(req) - require.Len(t, options, len(req.Histories()), - "length must be equal to the number of topic histories attached to request") - require.Equal(t, topic, options[0].Topic) - require.Equal(t, uint64(now.Add(-WhisperTimeAllowance).Unix()), options[0].Range.Start, - "start of the range must be adjusted by the whisper time allowance") - require.Equal(t, uint64(now.Unix()), options[0].Range.End) -} - -func TestTopicOptionsToBloom(t *testing.T) { - options := TopicOptions{ - {Topic: types.TopicType{1}, Range: Range{Start: 1, End: 10}}, - {Topic: types.TopicType{2}, Range: Range{Start: 3, End: 12}}, - } - bloom := options.ToBloomFilterOption() - require.Equal(t, uint64(3), bloom.Range.Start, "Start must be the latest Start across all options") - require.Equal(t, uint64(12), bloom.Range.End, "End must be the latest End across all options") - require.Equal(t, topicsToBloom(options[0].Topic, options[1].Topic), bloom.Filter) -} - -func TestBloomFilterToMessageRequestPayload(t *testing.T) { - var ( - start uint32 = 10 - end uint32 = 20 - filter = []byte{1, 1, 1, 1} - message = mailserver.MessagesRequestPayload{ - Lower: start, - Upper: end, - Bloom: filter, - Batch: true, - Limit: 1000, - } - bloomOption = BloomFilterOption{ - Filter: filter, - Range: Range{ - Start: uint64(start), - End: uint64(end), - }, - } - ) - expected, err := rlp.EncodeToBytes(message) - require.NoError(t, err) - payload, err := bloomOption.ToMessagesRequestPayload() - require.NoError(t, err) - require.Equal(t, expected, payload) -} - -func TestCreateRequestsEmptyState(t *testing.T) { - ctx := newTestContext(t) - reactor := NewHistoryUpdateReactor() - requests, err := reactor.CreateRequests(ctx, []TopicRequest{ - {Topic: types.TopicType{1}, Duration: time.Hour}, - {Topic: types.TopicType{2}, Duration: time.Hour}, - {Topic: types.TopicType{3}, Duration: 10 * time.Hour}, - }) - require.NoError(t, err) - require.Len(t, requests, 2) - var ( - oneTopic, twoTopic db.HistoryRequest - ) - if len(requests[0].Histories()) == 1 { - oneTopic, twoTopic = requests[0], requests[1] - } else { - oneTopic, twoTopic = requests[1], requests[0] - } - require.Len(t, oneTopic.Histories(), 1) - require.Len(t, twoTopic.Histories(), 2) - -} - -func TestCreateRequestsWithExistingRequest(t *testing.T) { - ctx := newTestContext(t) - store := ctx.HistoryStore() - req := store.NewRequest() - req.ID = types.Hash{1} - th := store.NewHistory(types.TopicType{1}, time.Hour) - req.AddHistory(th) - require.NoError(t, req.Save()) - reactor := NewHistoryUpdateReactor() - requests, err := reactor.CreateRequests(ctx, []TopicRequest{ - {Topic: types.TopicType{1}, Duration: time.Hour}, - {Topic: types.TopicType{2}, Duration: time.Hour}, - {Topic: types.TopicType{3}, Duration: time.Hour}, - }) - require.NoError(t, err) - require.Len(t, requests, 2) - - var ( - oneTopic, twoTopic db.HistoryRequest - ) - if len(requests[0].Histories()) == 1 { - oneTopic, twoTopic = requests[0], requests[1] - } else { - oneTopic, twoTopic = requests[1], requests[0] - } - assert.Len(t, oneTopic.Histories(), 1) - assert.Len(t, twoTopic.Histories(), 2) -} - -func TestCreateMultiRequestsWithSameTopic(t *testing.T) { - ctx := newTestContext(t) - store := ctx.HistoryStore() - reactor := NewHistoryUpdateReactor() - topic := types.TopicType{1} - requests, err := reactor.CreateRequests(ctx, []TopicRequest{ - {Topic: topic, Duration: time.Hour}, - }) - require.NoError(t, err) - require.Len(t, requests, 1) - requests[0].ID = types.Hash{1} - require.NoError(t, requests[0].Save()) - - // duration changed. request wasn't finished - requests, err = reactor.CreateRequests(ctx, []TopicRequest{ - {Topic: topic, Duration: 10 * time.Hour}, - }) - require.NoError(t, err) - require.Len(t, requests, 2) - longest := 0 - for i := range requests { - r := &requests[i] - r.ID = types.Hash{byte(i)} - require.NoError(t, r.Save()) - require.Len(t, r.Histories(), 1) - if r.Histories()[0].Duration == 10*time.Hour { - longest = i - } - } - require.Equal(t, requests[longest].Histories()[0].End, requests[longest^1].Histories()[0].First) - - for _, r := range requests { - require.NoError(t, reactor.UpdateFinishedRequest(ctx, r.ID)) - } - requests, err = reactor.CreateRequests(ctx, []TopicRequest{ - {Topic: topic, Duration: 10 * time.Hour}, - }) - require.NoError(t, err) - require.Len(t, requests, 1) - - topics, err := store.GetHistoriesByTopic(topic) - require.NoError(t, err) - require.Len(t, topics, 1) - require.Equal(t, 10*time.Hour, topics[0].Duration) -} - -func TestRequestFinishedUpdate(t *testing.T) { - ctx := newTestContext(t) - store := ctx.HistoryStore() - req := store.NewRequest() - req.ID = types.Hash{1} - now := ctx.Time() - thOne := store.NewHistory(types.TopicType{1}, time.Hour) - thOne.End = now - thTwo := store.NewHistory(types.TopicType{2}, time.Hour) - thTwo.End = now - req.AddHistory(thOne) - req.AddHistory(thTwo) - require.NoError(t, req.Save()) - - reactor := NewHistoryUpdateReactor() - require.NoError(t, reactor.UpdateTopicHistory(ctx, thOne.Topic, now.Add(-time.Minute))) - require.NoError(t, reactor.UpdateFinishedRequest(ctx, req.ID)) - _, err := store.GetRequest(req.ID) - require.EqualError(t, err, "leveldb: not found") - - require.NoError(t, thOne.Load()) - require.NoError(t, thTwo.Load()) - require.Equal(t, now.Unix(), thOne.Current.Unix()) - require.Equal(t, now.Unix(), thTwo.Current.Unix()) -} - -func TestTopicHistoryUpdate(t *testing.T) { - ctx := newTestContext(t) - store := ctx.HistoryStore() - reqID := types.Hash{1} - request := store.NewRequest() - request.ID = reqID - now := time.Now() - require.NoError(t, request.Save()) - th := store.NewHistory(types.TopicType{1}, time.Hour) - th.RequestID = request.ID - th.End = now - require.NoError(t, th.Save()) - reactor := NewHistoryUpdateReactor() - timestamp := now.Add(-time.Minute) - - require.NoError(t, reactor.UpdateTopicHistory(ctx, th.Topic, timestamp)) - require.NoError(t, th.Load()) - require.Equal(t, timestamp.Unix(), th.Current.Unix()) - - require.NoError(t, reactor.UpdateTopicHistory(ctx, th.Topic, now)) - require.NoError(t, th.Load()) - require.Equal(t, timestamp.Unix(), th.Current.Unix()) -} - -func TestGroupHistoriesByRequestTimestamp(t *testing.T) { - requests := GroupHistoriesByRequestTimespan(createInMemStore(t), []db.TopicHistory{ - {Topic: types.TopicType{1}, Duration: time.Hour}, - {Topic: types.TopicType{2}, Duration: time.Hour}, - {Topic: types.TopicType{3}, Duration: 2 * time.Hour}, - {Topic: types.TopicType{4}, Duration: 2 * time.Hour}, - {Topic: types.TopicType{5}, Duration: 3 * time.Hour}, - {Topic: types.TopicType{6}, Duration: 3 * time.Hour}, - }) - require.Len(t, requests, 3) - for _, req := range requests { - require.Len(t, req.Histories(), 2) - } -} - -// initial creation of the history index. no other histories in store -func TestAdjustHistoryWithNoOtherHistories(t *testing.T) { - store := createInMemStore(t) - th := store.NewHistory(types.TopicType{1}, time.Hour) - adjusted, err := adjustRequestedHistories(store, []db.TopicHistory{th}) - require.NoError(t, err) - require.Len(t, adjusted, 1) - require.Equal(t, th.Topic, adjusted[0].Topic) -} - -// Duration for the history index with same topic was gradually incresed: -// {Duration: 1h} {Duration: 2h} {Duration: 3h} -// But actual request wasn't sent -// So when we receive {Duration: 4h} we can merge all of them into single index -// that covers all of them e.g. {Duration: 4h} -func TestAdjustHistoryWithExistingLowerRanges(t *testing.T) { - store := createInMemStore(t) - topic := types.TopicType{1} - histories := make([]db.TopicHistory, 3) - i := 0 - for i = range histories { - histories[i] = store.NewHistory(topic, time.Duration(i+1)*time.Hour) - require.NoError(t, histories[i].Save()) - } - i++ - th := store.NewHistory(topic, time.Duration(i+1)*time.Hour) - adjusted, err := adjustRequestedHistories(store, []db.TopicHistory{th}) - require.NoError(t, err) - require.Len(t, adjusted, 1) - require.Equal(t, th.Duration, adjusted[0].Duration) - - all, err := store.GetHistoriesByTopic(topic) - require.NoError(t, err) - require.Len(t, all, 1) - require.Equal(t, th.Duration, all[0].Duration) -} - -// Precondition is based on the previous test. We have same information in the database -// but now every history index request was successfully completed. And End timstamp is set to the First of the next index. -// So, we have: -// {First: now-1h, End: now} {First: now-2h, End: now-1h} {First: now-3h: End: now-2h} -// When we want to create new request with {Duration: 4h} -// We see that there is no reason to keep all indexes and we can squash them. -func TestAdjustHistoriesWithExistingCoveredLowerRanges(t *testing.T) { - store := createInMemStore(t) - topic := types.TopicType{1} - histories := make([]db.TopicHistory, 3) - i := 0 - now := time.Now() - for i = range histories { - duration := time.Duration(i+1) * time.Hour - prevduration := time.Duration(i) * time.Hour - histories[i] = store.NewHistory(topic, duration) - histories[i].First = now.Add(-duration) - histories[i].Current = now.Add(-prevduration) - require.NoError(t, histories[i].Save()) - } - i++ - th := store.NewHistory(topic, time.Duration(i+1)*time.Hour) - th.Current = now.Add(-time.Duration(i) * time.Hour) - adjusted, err := adjustRequestedHistories(store, []db.TopicHistory{th}) - require.NoError(t, err) - require.Len(t, adjusted, 1) - require.Equal(t, th.Duration, adjusted[0].Duration) -} - -func TestAdjustHistoryReplaceTopicWithHigherDuration(t *testing.T) { - store := createInMemStore(t) - topic := types.TopicType{1} - hour := store.NewHistory(topic, time.Hour) - require.NoError(t, hour.Save()) - minute := store.NewHistory(topic, time.Minute) - adjusted, err := adjustRequestedHistories(store, []db.TopicHistory{minute}) - require.NoError(t, err) - require.Len(t, adjusted, 1) - require.Equal(t, hour.Duration, adjusted[0].Duration) -} - -// if client requested lower duration than the one we have in the index already it will -// it will be discarded and we will use existing index -func TestAdjustHistoryRemoveTopicIfPendingWithHigherDuration(t *testing.T) { - store := createInMemStore(t) - topic := types.TopicType{1} - hour := store.NewHistory(topic, time.Hour) - hour.RequestID = types.Hash{1} - require.NoError(t, hour.Save()) - minute := store.NewHistory(topic, time.Minute) - adjusted, err := adjustRequestedHistories(store, []db.TopicHistory{minute}) - require.NoError(t, err) - require.Len(t, adjusted, 0) -} diff --git a/services/shhext/service.go b/services/shhext/service.go index 0449f8c9b..6c20a4138 100644 --- a/services/shhext/service.go +++ b/services/shhext/service.go @@ -4,335 +4,50 @@ package shhext import ( "context" - "crypto/ecdsa" - "database/sql" "fmt" - "math/big" - "os" - "path/filepath" "time" - "github.com/status-im/status-go/logutils" + "github.com/syndtr/goleveldb/leveldb" - commongethtypes "github.com/ethereum/go-ethereum/common" - gethtypes "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/node" - "github.com/ethereum/go-ethereum/p2p" - "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/rpc" - "github.com/status-im/status-go/db" - "github.com/status-im/status-go/multiaccounts/accounts" - "github.com/status-im/status-go/params" - "github.com/status-im/status-go/services/shhext/mailservers" - "github.com/status-im/status-go/services/wallet" - "github.com/status-im/status-go/signal" - - "github.com/syndtr/goleveldb/leveldb" - "go.uber.org/zap" - - coretypes "github.com/status-im/status-go/eth-node/core/types" "github.com/status-im/status-go/eth-node/types" - "github.com/status-im/status-go/protocol" - "github.com/status-im/status-go/protocol/transport" + "github.com/status-im/status-go/params" + "github.com/status-im/status-go/services/ext" ) -const ( - // defaultConnectionsTarget used in Service.Start if configured connection target is 0. - defaultConnectionsTarget = 1 - // defaultTimeoutWaitAdded is a timeout to use to establish initial connections. - defaultTimeoutWaitAdded = 5 * time.Second -) - -// EnvelopeEventsHandler used for two different event types. -type EnvelopeEventsHandler interface { - EnvelopeSent([][]byte) - EnvelopeExpired([][]byte, error) - MailServerRequestCompleted(types.Hash, types.Hash, []byte, error) - MailServerRequestExpired(types.Hash) -} - -// Service is a service that provides some additional Whisper API. type Service struct { - apiName string - messenger *protocol.Messenger - identity *ecdsa.PrivateKey - cancelMessenger chan struct{} - storage db.TransactionalStorage - n types.Node - w types.Whisper - config params.ShhextConfig - mailMonitor *MailRequestMonitor - requestsRegistry *RequestsRegistry - historyUpdates *HistoryUpdateReactor - server *p2p.Server - nodeID *ecdsa.PrivateKey - peerStore *mailservers.PeerStore - cache *mailservers.Cache - connManager *mailservers.ConnectionManager - lastUsedMonitor *mailservers.LastUsedConnectionMonitor - accountsDB *accounts.Database + *ext.Service + w types.Whisper } -// Make sure that Service implements node.Service interface. -var _ node.Service = (*Service)(nil) - -// New returns a new shhext Service. -func New(n types.Node, ctx interface{}, apiName string, handler EnvelopeEventsHandler, ldb *leveldb.DB, config params.ShhextConfig) *Service { +func New(config params.ShhextConfig, n types.Node, ctx interface{}, handler ext.EnvelopeEventsHandler, ldb *leveldb.DB) *Service { w, err := n.GetWhisper(ctx) if err != nil { panic(err) } - cache := mailservers.NewCache(ldb) - ps := mailservers.NewPeerStore(cache) - delay := defaultRequestsDelay + delay := ext.DefaultRequestsDelay if config.RequestsDelay != 0 { delay = config.RequestsDelay } - requestsRegistry := NewRequestsRegistry(delay) - historyUpdates := NewHistoryUpdateReactor() - mailMonitor := &MailRequestMonitor{ - w: w, - handler: handler, - cache: map[types.Hash]EnvelopeState{}, - requestsRegistry: requestsRegistry, - } + requestsRegistry := ext.NewRequestsRegistry(delay) + mailMonitor := ext.NewMailRequestMonitor(w, handler, requestsRegistry) return &Service{ - apiName: apiName, - storage: db.NewLevelDBStorage(ldb), - n: n, - w: w, - config: config, - mailMonitor: mailMonitor, - requestsRegistry: requestsRegistry, - historyUpdates: historyUpdates, - peerStore: ps, - cache: cache, + Service: ext.New(config, n, ldb, mailMonitor, requestsRegistry, w), + w: w, } } -func (s *Service) InitProtocol(identity *ecdsa.PrivateKey, db *sql.DB) error { // nolint: gocyclo - if !s.config.PFSEnabled { - return nil - } - - // If Messenger has been already set up, we need to shut it down - // before we init it again. Otherwise, it will lead to goroutines leakage - // due to not stopped filters. - if s.messenger != nil { - if err := s.messenger.Shutdown(); err != nil { - return err - } - } - - s.identity = identity - - dataDir := filepath.Clean(s.config.BackupDisabledDataDir) - - if err := os.MkdirAll(dataDir, os.ModePerm); err != nil { - return err - } - - // Create a custom zap.Logger which will forward logs from status-go/protocol to status-go logger. - zapLogger, err := logutils.NewZapLoggerWithAdapter(logutils.Logger()) - if err != nil { - return err - } - - envelopesMonitorConfig := &transport.EnvelopesMonitorConfig{ - MaxAttempts: s.config.MaxMessageDeliveryAttempts, - MailserverConfirmationsEnabled: s.config.MailServerConfirmations, - IsMailserver: func(peer types.EnodeID) bool { - return s.peerStore.Exist(peer) - }, - EnvelopeEventsHandler: EnvelopeSignalHandler{}, - Logger: zapLogger, - } - options := buildMessengerOptions(s.config, db, envelopesMonitorConfig, zapLogger) - - messenger, err := protocol.NewMessenger( - identity, - s.n, - s.config.InstallationID, - options..., - ) - if err != nil { - return err - } - s.accountsDB = accounts.NewDB(db) - s.messenger = messenger - // Start a loop that retrieves all messages and propagates them to status-react. - s.cancelMessenger = make(chan struct{}) - go s.retrieveMessagesLoop(time.Second, s.cancelMessenger) - go s.verifyTransactionLoop(30*time.Second, s.cancelMessenger) - - return s.messenger.Init() -} - -func (s *Service) retrieveMessagesLoop(tick time.Duration, cancel <-chan struct{}) { - ticker := time.NewTicker(tick) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - response, err := s.messenger.RetrieveAll() - if err != nil { - log.Error("failed to retrieve raw messages", "err", err) - continue - } - if !response.IsEmpty() { - PublisherSignalHandler{}.NewMessages(response) - } - case <-cancel: - return - } - } -} - -type verifyTransactionClient struct { - chainID *big.Int - url string -} - -func (c *verifyTransactionClient) TransactionByHash(ctx context.Context, hash types.Hash) (coretypes.Message, coretypes.TransactionStatus, error) { - signer := gethtypes.NewEIP155Signer(c.chainID) - client, err := ethclient.Dial(c.url) - if err != nil { - return coretypes.Message{}, coretypes.TransactionStatusPending, err - } - - transaction, pending, err := client.TransactionByHash(ctx, commongethtypes.BytesToHash(hash.Bytes())) - if err != nil { - return coretypes.Message{}, coretypes.TransactionStatusPending, err - } - - message, err := transaction.AsMessage(signer) - if err != nil { - return coretypes.Message{}, coretypes.TransactionStatusPending, err - } - from := types.BytesToAddress(message.From().Bytes()) - to := types.BytesToAddress(message.To().Bytes()) - - if pending { - return coretypes.NewMessage( - from, - &to, - message.Nonce(), - message.Value(), - message.Gas(), - message.GasPrice(), - message.Data(), - message.CheckNonce(), - ), coretypes.TransactionStatusPending, nil - } - - receipt, err := client.TransactionReceipt(ctx, commongethtypes.BytesToHash(hash.Bytes())) - if err != nil { - return coretypes.Message{}, coretypes.TransactionStatusPending, err - } - - coremessage := coretypes.NewMessage( - from, - &to, - message.Nonce(), - message.Value(), - message.Gas(), - message.GasPrice(), - message.Data(), - message.CheckNonce(), - ) - - // Token transfer, check the logs - if len(coremessage.Data()) != 0 { - if wallet.IsTokenTransfer(receipt.Logs) { - return coremessage, coretypes.TransactionStatus(receipt.Status), nil - } else { - return coremessage, coretypes.TransactionStatusFailed, nil - } - - } - - return coremessage, coretypes.TransactionStatus(receipt.Status), nil - -} - -func (s *Service) verifyTransactionLoop(tick time.Duration, cancel <-chan struct{}) { - if s.config.VerifyTransactionURL == "" { - log.Warn("not starting transaction loop") - return - } - - ticker := time.NewTicker(tick) - defer ticker.Stop() - - ctx, cancelVerifyTransaction := context.WithCancel(context.Background()) - - for { - select { - case <-ticker.C: - accounts, err := s.accountsDB.GetAccounts() - if err != nil { - log.Error("failed to retrieve accounts", "err", err) - } - var wallets []types.Address - for _, account := range accounts { - if account.Wallet { - wallets = append(wallets, types.BytesToAddress(account.Address.Bytes())) - } - } - - response, err := s.messenger.ValidateTransactions(ctx, wallets) - if err != nil { - log.Error("failed to validate transactions", "err", err) - continue - } - if !response.IsEmpty() { - PublisherSignalHandler{}.NewMessages(response) - } - case <-cancel: - cancelVerifyTransaction() - return - } - } -} - -func (s *Service) ConfirmMessagesProcessed(messageIDs [][]byte) error { - return s.messenger.ConfirmMessagesProcessed(messageIDs) -} - -func (s *Service) EnableInstallation(installationID string) error { - return s.messenger.EnableInstallation(installationID) -} - -// DisableInstallation disables an installation for multi-device sync. -func (s *Service) DisableInstallation(installationID string) error { - return s.messenger.DisableInstallation(installationID) -} - -// UpdateMailservers updates information about selected mail servers. -func (s *Service) UpdateMailservers(nodes []*enode.Node) error { - if err := s.peerStore.Update(nodes); err != nil { - return err - } - if s.connManager != nil { - s.connManager.Notify(nodes) - } - return nil -} - -// Protocols returns a new protocols list. In this case, there are none. -func (s *Service) Protocols() []p2p.Protocol { - return []p2p.Protocol{} +func (s *Service) PublicWhisperAPI() types.PublicWhisperAPI { + return s.w.PublicWhisperAPI() } // APIs returns a list of new APIs. func (s *Service) APIs() []rpc.API { apis := []rpc.API{ { - Namespace: s.apiName, + Namespace: "shhext", Version: "1.0", Service: NewPublicAPI(s), Public: true, @@ -341,67 +56,7 @@ func (s *Service) APIs() []rpc.API { return apis } -// Start is run when a service is started. -// It does nothing in this case but is required by `node.Service` interface. -func (s *Service) Start(server *p2p.Server) error { - if s.config.EnableConnectionManager { - connectionsTarget := s.config.ConnectionTarget - if connectionsTarget == 0 { - connectionsTarget = defaultConnectionsTarget - } - maxFailures := s.config.MaxServerFailures - // if not defined change server on first expired event - if maxFailures == 0 { - maxFailures = 1 - } - s.connManager = mailservers.NewConnectionManager(server, s.w, connectionsTarget, maxFailures, defaultTimeoutWaitAdded) - s.connManager.Start() - if err := mailservers.EnsureUsedRecordsAddedFirst(s.peerStore, s.connManager); err != nil { - return err - } - } - if s.config.EnableLastUsedMonitor { - s.lastUsedMonitor = mailservers.NewLastUsedConnectionMonitor(s.peerStore, s.cache, s.w) - s.lastUsedMonitor.Start() - } - s.mailMonitor.Start() - s.nodeID = server.PrivateKey - s.server = server - return nil -} - -// Stop is run when a service is stopped. -func (s *Service) Stop() error { - log.Info("Stopping shhext service") - if s.config.EnableConnectionManager { - s.connManager.Stop() - } - if s.config.EnableLastUsedMonitor { - s.lastUsedMonitor.Stop() - } - s.requestsRegistry.Clear() - s.mailMonitor.Stop() - - if s.cancelMessenger != nil { - select { - case <-s.cancelMessenger: - // channel already closed - default: - close(s.cancelMessenger) - s.cancelMessenger = nil - } - } - - if s.messenger != nil { - if err := s.messenger.Shutdown(); err != nil { - return err - } - } - - return nil -} - -func (s *Service) syncMessages(ctx context.Context, mailServerID []byte, r types.SyncMailRequest) (resp types.SyncEventResponse, err error) { +func (s *Service) SyncMessages(ctx context.Context, mailServerID []byte, r types.SyncMailRequest) (resp types.SyncEventResponse, err error) { err = s.w.SyncMessages(mailServerID, r) if err != nil { return @@ -443,52 +98,3 @@ func (s *Service) syncMessages(ctx context.Context, mailServerID []byte, r types } } } - -func onNegotiatedFilters(filters []*transport.Filter) { - var signalFilters []*signal.Filter - for _, filter := range filters { - - signalFilter := &signal.Filter{ - ChatID: filter.ChatID, - SymKeyID: filter.SymKeyID, - Listen: filter.Listen, - FilterID: filter.FilterID, - Identity: filter.Identity, - Topic: filter.Topic, - } - - signalFilters = append(signalFilters, signalFilter) - } - if len(filters) != 0 { - handler := PublisherSignalHandler{} - handler.WhisperFilterAdded(signalFilters) - } -} - -func buildMessengerOptions( - config params.ShhextConfig, - db *sql.DB, - envelopesMonitorConfig *transport.EnvelopesMonitorConfig, - logger *zap.Logger, -) []protocol.Option { - options := []protocol.Option{ - protocol.WithCustomLogger(logger), - protocol.WithDatabase(db), - protocol.WithEnvelopesMonitorConfig(envelopesMonitorConfig), - protocol.WithOnNegotiatedFilters(onNegotiatedFilters), - } - - if config.DataSyncEnabled { - options = append(options, protocol.WithDatasync()) - } - - if config.VerifyTransactionURL != "" { - client := &verifyTransactionClient{ - url: config.VerifyTransactionURL, - chainID: big.NewInt(config.VerifyTransactionChainID), - } - options = append(options, protocol.WithVerifyTransactionClient(client)) - } - - return options -} diff --git a/services/shhext/service_nimbus.go b/services/shhext/service_nimbus.go index 452dc93ec..198207758 100644 --- a/services/shhext/service_nimbus.go +++ b/services/shhext/service_nimbus.go @@ -4,358 +4,59 @@ package shhext import ( "context" - "crypto/ecdsa" - "database/sql" "fmt" - "os" - "path/filepath" "time" - "github.com/status-im/status-go/logutils" + "github.com/syndtr/goleveldb/leveldb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rpc" - "github.com/status-im/status-go/db" - "github.com/status-im/status-go/params" - nimbussvc "github.com/status-im/status-go/services/nimbus" - "github.com/status-im/status-go/signal" - - "github.com/syndtr/goleveldb/leveldb" - "go.uber.org/zap" - "github.com/status-im/status-go/eth-node/types" - "github.com/status-im/status-go/protocol" - "github.com/status-im/status-go/protocol/transport" + "github.com/status-im/status-go/params" + "github.com/status-im/status-go/services/ext" ) -const ( - // defaultConnectionsTarget used in Service.Start if configured connection target is 0. - defaultConnectionsTarget = 1 - // defaultTimeoutWaitAdded is a timeout to use to establish initial connections. - defaultTimeoutWaitAdded = 5 * time.Second -) - -// EnvelopeEventsHandler used for two different event types. -type EnvelopeEventsHandler interface { - EnvelopeSent([][]byte) - EnvelopeExpired([][]byte, error) - MailServerRequestCompleted(types.Hash, types.Hash, []byte, error) - MailServerRequestExpired(types.Hash) +type Service struct { + *ext.Service + w types.Whisper } -// NimbusService is a service that provides some additional Whisper API. -type NimbusService struct { - apiName string - messenger *protocol.Messenger - identity *ecdsa.PrivateKey - cancelMessenger chan struct{} - storage db.TransactionalStorage - n types.Node - w types.Whisper - config params.ShhextConfig - // mailMonitor *MailRequestMonitor - // requestsRegistry *RequestsRegistry - // historyUpdates *HistoryUpdateReactor - // server *p2p.Server - nodeID *ecdsa.PrivateKey - // peerStore *mailservers.PeerStore - // cache *mailservers.Cache - // connManager *mailservers.ConnectionManager - // lastUsedMonitor *mailservers.LastUsedConnectionMonitor - // accountsDB *accounts.Database -} - -// Make sure that NimbusService implements nimbussvc.Service interface. -var _ nimbussvc.Service = (*NimbusService)(nil) - -// NewNimbus returns a new shhext NimbusService. -func NewNimbus(n types.Node, ctx interface{}, apiName string, ldb *leveldb.DB, config params.ShhextConfig) *NimbusService { +func New(config params.ShhextConfig, n types.Node, ctx interface{}, handler ext.EnvelopeEventsHandler, ldb *leveldb.DB) *Service { w, err := n.GetWhisper(ctx) if err != nil { panic(err) } - // cache := mailservers.NewCache(ldb) - // ps := mailservers.NewPeerStore(cache) - // delay := defaultRequestsDelay - // if config.RequestsDelay != 0 { - // delay = config.RequestsDelay - // } - // requestsRegistry := NewRequestsRegistry(delay) - // historyUpdates := NewHistoryUpdateReactor() - // mailMonitor := &MailRequestMonitor{ - // w: w, - // handler: handler, - // cache: map[types.Hash]EnvelopeState{}, - // requestsRegistry: requestsRegistry, - // } - return &NimbusService{ - apiName: apiName, - storage: db.NewLevelDBStorage(ldb), - n: n, + delay := ext.DefaultRequestsDelay + if config.RequestsDelay != 0 { + delay = config.RequestsDelay + } + requestsRegistry := ext.NewRequestsRegistry(delay) + mailMonitor := ext.NewMailRequestMonitor(w, handler, requestsRegistry) + return &Service{ + Service: ext.New(config, n, ldb, mailMonitor, requestsRegistry, w), w: w, - config: config, - // mailMonitor: mailMonitor, - // requestsRegistry: requestsRegistry, - // historyUpdates: historyUpdates, - // peerStore: ps, - // cache: cache, } } -func (s *NimbusService) InitProtocol(identity *ecdsa.PrivateKey, db *sql.DB) error { // nolint: gocyclo - if !s.config.PFSEnabled { - return nil - } - - // If Messenger has been already set up, we need to shut it down - // before we init it again. Otherwise, it will lead to goroutines leakage - // due to not stopped filters. - if s.messenger != nil { - if err := s.messenger.Shutdown(); err != nil { - return err - } - } - - s.identity = identity - - dataDir := filepath.Clean(s.config.BackupDisabledDataDir) - - if err := os.MkdirAll(dataDir, os.ModePerm); err != nil { - return err - } - - // Create a custom zap.Logger which will forward logs from status-go/protocol to status-go logger. - zapLogger, err := logutils.NewZapLoggerWithAdapter(logutils.Logger()) - if err != nil { - return err - } - - // envelopesMonitorConfig := &protocolwhisper.EnvelopesMonitorConfig{ - // MaxAttempts: s.config.MaxMessageDeliveryAttempts, - // MailserverConfirmationsEnabled: s.config.MailServerConfirmations, - // IsMailserver: func(peer types.EnodeID) bool { - // return s.peerStore.Exist(peer) - // }, - // EnvelopeEventsHandler: EnvelopeSignalHandler{}, - // Logger: zapLogger, - // } - options := buildMessengerOptions(s.config, db, nil, zapLogger) - - messenger, err := protocol.NewMessenger( - identity, - s.n, - s.config.InstallationID, - options..., - ) - if err != nil { - return err - } - // s.accountsDB = accounts.NewDB(db) - s.messenger = messenger - // Start a loop that retrieves all messages and propagates them to status-react. - s.cancelMessenger = make(chan struct{}) - go s.retrieveMessagesLoop(time.Second, s.cancelMessenger) - // go s.verifyTransactionLoop(30*time.Second, s.cancelMessenger) - - return s.messenger.Init() +func (s *Service) PublicWhisperAPI() types.PublicWhisperAPI { + return s.w.PublicWhisperAPI() } -func (s *NimbusService) retrieveMessagesLoop(tick time.Duration, cancel <-chan struct{}) { - ticker := time.NewTicker(tick) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - response, err := s.messenger.RetrieveAll() - if err != nil { - log.Error("failed to retrieve raw messages", "err", err) - continue - } - if !response.IsEmpty() { - PublisherSignalHandler{}.NewMessages(response) - } - case <-cancel: - return - } - } -} - -// type verifyTransactionClient struct { -// chainID *big.Int -// url string -// } - -// func (c *verifyTransactionClient) TransactionByHash(ctx context.Context, hash types.Hash) (coretypes.Message, bool, error) { -// signer := gethtypes.NewEIP155Signer(c.chainID) -// client, err := ethclient.Dial(c.url) -// if err != nil { -// return coretypes.Message{}, false, err -// } - -// transaction, pending, err := client.TransactionByHash(ctx, commongethtypes.BytesToHash(hash.Bytes())) -// if err != nil { -// return coretypes.Message{}, false, err -// } - -// message, err := transaction.AsMessage(signer) -// if err != nil { -// return coretypes.Message{}, false, err -// } -// from := types.BytesToAddress(message.From().Bytes()) -// to := types.BytesToAddress(message.To().Bytes()) - -// return coretypes.NewMessage( -// from, -// &to, -// message.Nonce(), -// message.Value(), -// message.Gas(), -// message.GasPrice(), -// message.Data(), -// message.CheckNonce(), -// ), pending, nil -// } - -// func (s *Service) verifyTransactionLoop(tick time.Duration, cancel <-chan struct{}) { -// if s.config.VerifyTransactionURL == "" { -// log.Warn("not starting transaction loop") -// return -// } - -// ticker := time.NewTicker(tick) -// defer ticker.Stop() - -// ctx, cancelVerifyTransaction := context.WithCancel(context.Background()) - -// for { -// select { -// case <-ticker.C: -// accounts, err := s.accountsDB.GetAccounts() -// if err != nil { -// log.Error("failed to retrieve accounts", "err", err) -// } -// var wallets []types.Address -// for _, account := range accounts { -// if account.Wallet { -// wallets = append(wallets, types.BytesToAddress(account.Address.Bytes())) -// } -// } - -// response, err := s.messenger.ValidateTransactions(ctx, wallets) -// if err != nil { -// log.Error("failed to validate transactions", "err", err) -// continue -// } -// if !response.IsEmpty() { -// PublisherSignalHandler{}.NewMessages(response) -// } -// case <-cancel: -// cancelVerifyTransaction() -// return -// } -// } -// } - -func (s *NimbusService) ConfirmMessagesProcessed(messageIDs [][]byte) error { - return s.messenger.ConfirmMessagesProcessed(messageIDs) -} - -func (s *NimbusService) EnableInstallation(installationID string) error { - return s.messenger.EnableInstallation(installationID) -} - -// DisableInstallation disables an installation for multi-device sync. -func (s *NimbusService) DisableInstallation(installationID string) error { - return s.messenger.DisableInstallation(installationID) -} - -// UpdateMailservers updates information about selected mail servers. -// func (s *NimbusService) UpdateMailservers(nodes []*enode.Node) error { -// // if err := s.peerStore.Update(nodes); err != nil { -// // return err -// // } -// // if s.connManager != nil { -// // s.connManager.Notify(nodes) -// // } -// return nil -// } - // APIs returns a list of new APIs. -func (s *NimbusService) APIs() []rpc.API { +func (s *Service) APIs() []rpc.API { apis := []rpc.API{ { - Namespace: s.apiName, + Namespace: "shhext", Version: "1.0", - Service: NewNimbusPublicAPI(s), + Service: NewPublicAPI(s), Public: true, }, } return apis } -// Start is run when a service is started. -// It does nothing in this case but is required by `node.NimbusService` interface. -func (s *NimbusService) StartService() error { - if s.config.EnableConnectionManager { - // connectionsTarget := s.config.ConnectionTarget - // if connectionsTarget == 0 { - // connectionsTarget = defaultConnectionsTarget - // } - // maxFailures := s.config.MaxServerFailures - // // if not defined change server on first expired event - // if maxFailures == 0 { - // maxFailures = 1 - // } - // s.connManager = mailservers.NewConnectionManager(server, s.w, connectionsTarget, maxFailures, defaultTimeoutWaitAdded) - // s.connManager.Start() - // if err := mailservers.EnsureUsedRecordsAddedFirst(s.peerStore, s.connManager); err != nil { - // return err - // } - } - if s.config.EnableLastUsedMonitor { - // s.lastUsedMonitor = mailservers.NewLastUsedConnectionMonitor(s.peerStore, s.cache, s.w) - // s.lastUsedMonitor.Start() - } - // s.mailMonitor.Start() - // s.nodeID = server.PrivateKey - // s.server = server - return nil -} - -// Stop is run when a service is stopped. -func (s *NimbusService) Stop() error { - log.Info("Stopping shhext service") - // if s.config.EnableConnectionManager { - // s.connManager.Stop() - // } - // if s.config.EnableLastUsedMonitor { - // s.lastUsedMonitor.Stop() - // } - // s.requestsRegistry.Clear() - // s.mailMonitor.Stop() - - if s.cancelMessenger != nil { - select { - case <-s.cancelMessenger: - // channel already closed - default: - close(s.cancelMessenger) - s.cancelMessenger = nil - } - } - - if s.messenger != nil { - if err := s.messenger.Shutdown(); err != nil { - return err - } - } - - return nil -} - -func (s *NimbusService) syncMessages(ctx context.Context, mailServerID []byte, r types.SyncMailRequest) (resp types.SyncEventResponse, err error) { +func (s *Service) SyncMessages(ctx context.Context, mailServerID []byte, r types.SyncMailRequest) (resp types.SyncEventResponse, err error) { err = s.w.SyncMessages(mailServerID, r) if err != nil { return @@ -397,52 +98,3 @@ func (s *NimbusService) syncMessages(ctx context.Context, mailServerID []byte, r } } } - -func onNegotiatedFilters(filters []*transport.Filter) { - var signalFilters []*signal.Filter - for _, filter := range filters { - - signalFilter := &signal.Filter{ - ChatID: filter.ChatID, - SymKeyID: filter.SymKeyID, - Listen: filter.Listen, - FilterID: filter.FilterID, - Identity: filter.Identity, - Topic: filter.Topic, - } - - signalFilters = append(signalFilters, signalFilter) - } - if len(filters) != 0 { - handler := PublisherSignalHandler{} - handler.WhisperFilterAdded(signalFilters) - } -} - -func buildMessengerOptions( - config params.ShhextConfig, - db *sql.DB, - envelopesMonitorConfig *transport.EnvelopesMonitorConfig, - logger *zap.Logger, -) []protocol.Option { - options := []protocol.Option{ - protocol.WithCustomLogger(logger), - protocol.WithDatabase(db), - //protocol.WithEnvelopesMonitorConfig(envelopesMonitorConfig), - protocol.WithOnNegotiatedFilters(onNegotiatedFilters), - } - - if config.DataSyncEnabled { - options = append(options, protocol.WithDatasync()) - } - - // if config.VerifyTransactionURL != "" { - // client := &verifyTransactionClient{ - // url: config.VerifyTransactionURL, - // chainID: big.NewInt(config.VerifyTransactionChainID), - // } - // options = append(options, protocol.WithVerifyTransactionClient(client)) - // } - - return options -} diff --git a/services/shhext/service_test.go b/services/shhext/service_test.go deleted file mode 100644 index 5446c8d8f..000000000 --- a/services/shhext/service_test.go +++ /dev/null @@ -1,819 +0,0 @@ -package shhext - -import ( - "context" - "encoding/hex" - "errors" - "fmt" - "io/ioutil" - "math" - "net" - "os" - "testing" - "time" - - "github.com/stretchr/testify/suite" - "github.com/syndtr/goleveldb/leveldb" - "github.com/syndtr/goleveldb/leveldb/storage" - "go.uber.org/zap" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/node" - "github.com/ethereum/go-ethereum/p2p" - "github.com/ethereum/go-ethereum/p2p/enode" - - gethbridge "github.com/status-im/status-go/eth-node/bridge/geth" - "github.com/status-im/status-go/eth-node/types" - enstypes "github.com/status-im/status-go/eth-node/types/ens" - "github.com/status-im/status-go/mailserver" - "github.com/status-im/status-go/params" - "github.com/status-im/status-go/sqlite" - "github.com/status-im/status-go/t/helpers" - "github.com/status-im/status-go/t/utils" - "github.com/status-im/status-go/whisper/v6" -) - -const ( - // internal whisper protocol codes - statusCode = 0 - p2pRequestCompleteCode = 125 -) - -type failureMessage struct { - IDs [][]byte - Error error -} - -func newHandlerMock(buf int) handlerMock { - return handlerMock{ - confirmations: make(chan [][]byte, buf), - expirations: make(chan failureMessage, buf), - requestsCompleted: make(chan types.Hash, buf), - requestsExpired: make(chan types.Hash, buf), - requestsFailed: make(chan types.Hash, buf), - } -} - -type handlerMock struct { - confirmations chan [][]byte - expirations chan failureMessage - requestsCompleted chan types.Hash - requestsExpired chan types.Hash - requestsFailed chan types.Hash -} - -func (t handlerMock) EnvelopeSent(ids [][]byte) { - t.confirmations <- ids -} - -func (t handlerMock) EnvelopeExpired(ids [][]byte, err error) { - t.expirations <- failureMessage{IDs: ids, Error: err} -} - -func (t handlerMock) MailServerRequestCompleted(requestID types.Hash, lastEnvelopeHash types.Hash, cursor []byte, err error) { - if err == nil { - t.requestsCompleted <- requestID - } else { - t.requestsFailed <- requestID - } -} - -func (t handlerMock) MailServerRequestExpired(hash types.Hash) { - t.requestsExpired <- hash -} - -func TestShhExtSuite(t *testing.T) { - suite.Run(t, new(ShhExtSuite)) -} - -type ShhExtSuite struct { - suite.Suite - - nodes []*node.Node - services []*Service - whisperWrapper []types.Whisper - whisper []*whisper.Whisper -} - -func (s *ShhExtSuite) SetupTest() { - s.nodes = make([]*node.Node, 2) - s.services = make([]*Service, 2) - s.whisper = make([]*whisper.Whisper, 2) - s.whisperWrapper = make([]types.Whisper, 2) - - directory, err := ioutil.TempDir("", "status-go-testing") - s.Require().NoError(err) - - for i := range s.nodes { - i := i // bind i to be usable in service constructors - cfg := &node.Config{ - Name: fmt.Sprintf("node-%d", i), - P2P: p2p.Config{ - NoDiscovery: true, - MaxPeers: 1, - ListenAddr: ":0", - }, - NoUSB: true, - } - stack, err := node.New(cfg) - s.NoError(err) - s.whisper[i] = whisper.New(nil) - s.whisperWrapper[i] = gethbridge.NewGethWhisperWrapper(s.whisper[i]) - - privateKey, err := crypto.GenerateKey() - s.NoError(err) - - s.NoError(stack.Register(func(n *node.ServiceContext) (node.Service, error) { - return gethbridge.GetGethWhisperFrom(s.whisperWrapper[i]), nil - })) - - config := params.ShhextConfig{ - InstallationID: "1", - BackupDisabledDataDir: directory, - PFSEnabled: true, - MailServerConfirmations: true, - ConnectionTarget: 10, - } - db, err := leveldb.Open(storage.NewMemStorage(), nil) - s.Require().NoError(err) - nodeWrapper := &testNodeWrapper{w: s.whisperWrapper[i]} - s.services[i] = New(nodeWrapper, nil, "shhext", nil, db, config) - - tmpdir, err := ioutil.TempDir("", "test-shhext-service") - s.Require().NoError(err) - - sqlDB, err := sqlite.OpenDB(fmt.Sprintf("%s/%d", tmpdir, i), "password") - s.Require().NoError(err) - - s.Require().NoError(s.services[i].InitProtocol(privateKey, sqlDB)) - s.NoError(stack.Register(func(n *node.ServiceContext) (node.Service, error) { - return s.services[i], nil - })) - s.Require().NoError(stack.Start()) - s.nodes[i] = stack - } -} - -func (s *ShhExtSuite) TestInitProtocol() { - directory, err := ioutil.TempDir("", "status-go-testing") - s.Require().NoError(err) - - config := params.ShhextConfig{ - InstallationID: "2", - BackupDisabledDataDir: directory, - PFSEnabled: true, - MailServerConfirmations: true, - ConnectionTarget: 10, - } - db, err := leveldb.Open(storage.NewMemStorage(), nil) - s.Require().NoError(err) - - shh := gethbridge.NewGethWhisperWrapper(whisper.New(nil)) - privateKey, err := crypto.GenerateKey() - s.Require().NoError(err) - - nodeWrapper := &testNodeWrapper{w: shh} - service := New(nodeWrapper, nil, "shhext", nil, db, config) - - tmpdir, err := ioutil.TempDir("", "test-shhext-service-init-protocol") - s.Require().NoError(err) - - sqlDB, err := sqlite.OpenDB(fmt.Sprintf("%s/db.sql", tmpdir), "password") - s.Require().NoError(err) - - err = service.InitProtocol(privateKey, sqlDB) - s.NoError(err) -} - -func (s *ShhExtSuite) TestRequestMessagesErrors() { - var err error - - shh := gethbridge.NewGethWhisperWrapper(whisper.New(nil)) - aNode, err := node.New(&node.Config{ - P2P: p2p.Config{ - MaxPeers: math.MaxInt32, - NoDiscovery: true, - }, - NoUSB: true, - }) // in-memory node as no data dir - s.NoError(err) - err = aNode.Register(func(*node.ServiceContext) (node.Service, error) { - return gethbridge.GetGethWhisperFrom(shh), nil - }) - s.NoError(err) - - err = aNode.Start() - s.NoError(err) - defer func() { s.NoError(aNode.Stop()) }() - - mock := newHandlerMock(1) - config := params.ShhextConfig{ - InstallationID: "1", - BackupDisabledDataDir: os.TempDir(), - PFSEnabled: true, - } - nodeWrapper := &testNodeWrapper{w: shh} - service := New(nodeWrapper, nil, "shhext", mock, nil, config) - api := NewPublicAPI(service) - - const ( - mailServerPeer = "enode://b7e65e1bedc2499ee6cbd806945af5e7df0e59e4070c96821570bd581473eade24a489f5ec95d060c0db118c879403ab88d827d3766978f28708989d35474f87@[::]:51920" - ) - - var hash []byte - - // invalid MailServer enode address - hash, err = api.RequestMessages(context.TODO(), MessagesRequest{MailServerPeer: "invalid-address"}) - s.Nil(hash) - s.EqualError(err, "invalid mailServerPeer value: invalid URL scheme, want \"enode\"") - - // non-existent symmetric key - hash, err = api.RequestMessages(context.TODO(), MessagesRequest{ - MailServerPeer: mailServerPeer, - SymKeyID: "invalid-sym-key-id", - }) - s.Nil(hash) - s.EqualError(err, "invalid symKeyID value: non-existent key ID") - - // with a symmetric key - symKeyID, symKeyErr := shh.AddSymKeyFromPassword("some-pass") - s.NoError(symKeyErr) - hash, err = api.RequestMessages(context.TODO(), MessagesRequest{ - MailServerPeer: mailServerPeer, - SymKeyID: symKeyID, - }) - s.Nil(hash) - s.Contains(err.Error(), "Could not find peer with ID") - - // from is greater than to - hash, err = api.RequestMessages(context.TODO(), MessagesRequest{ - From: 10, - To: 5, - }) - s.Nil(hash) - s.Contains(err.Error(), "Query range is invalid: from > to (10 > 5)") -} - -func (s *ShhExtSuite) TestMultipleRequestMessagesWithoutForce() { - waitErr := helpers.WaitForPeerAsync(s.nodes[0].Server(), s.nodes[1].Server().Self().URLv4(), p2p.PeerEventTypeAdd, time.Second) - s.nodes[0].Server().AddPeer(s.nodes[1].Server().Self()) - s.Require().NoError(<-waitErr) - client, err := s.nodes[0].Attach() - s.NoError(err) - s.NoError(client.Call(nil, "shhext_requestMessages", MessagesRequest{ - MailServerPeer: s.nodes[1].Server().Self().URLv4(), - Topics: []types.TopicType{{1}}, - })) - s.EqualError(client.Call(nil, "shhext_requestMessages", MessagesRequest{ - MailServerPeer: s.nodes[1].Server().Self().URLv4(), - Topics: []types.TopicType{{1}}, - }), "another request with the same topics was sent less than 3s ago. Please wait for a bit longer, or set `force` to true in request parameters") - s.NoError(client.Call(nil, "shhext_requestMessages", MessagesRequest{ - MailServerPeer: s.nodes[1].Server().Self().URLv4(), - Topics: []types.TopicType{{2}}, - })) -} - -func (s *ShhExtSuite) TestFailedRequestUnregistered() { - waitErr := helpers.WaitForPeerAsync(s.nodes[0].Server(), s.nodes[1].Server().Self().URLv4(), p2p.PeerEventTypeAdd, time.Second) - s.nodes[0].Server().AddPeer(s.nodes[1].Server().Self()) - s.Require().NoError(<-waitErr) - client, err := s.nodes[0].Attach() - topics := []types.TopicType{{1}} - s.NoError(err) - s.EqualError(client.Call(nil, "shhext_requestMessages", MessagesRequest{ - MailServerPeer: "enode://19872f94b1e776da3a13e25afa71b47dfa99e658afd6427ea8d6e03c22a99f13590205a8826443e95a37eee1d815fc433af7a8ca9a8d0df7943d1f55684045b7@0.0.0.0:30305", - Topics: topics, - }), "Could not find peer with ID: 10841e6db5c02fc331bf36a8d2a9137a1696d9d3b6b1f872f780e02aa8ec5bba") - s.NoError(client.Call(nil, "shhext_requestMessages", MessagesRequest{ - MailServerPeer: s.nodes[1].Server().Self().URLv4(), - Topics: topics, - })) -} - -func (s *ShhExtSuite) TestRequestMessagesSuccess() { - var err error - - shh := gethbridge.NewGethWhisperWrapper(whisper.New(nil)) - privateKey, err := crypto.GenerateKey() - s.Require().NoError(err) - aNode, err := node.New(&node.Config{ - P2P: p2p.Config{ - MaxPeers: math.MaxInt32, - NoDiscovery: true, - }, - NoUSB: true, - }) // in-memory node as no data dir - s.Require().NoError(err) - err = aNode.Register(func(*node.ServiceContext) (node.Service, error) { return gethbridge.GetGethWhisperFrom(shh), nil }) - s.Require().NoError(err) - - err = aNode.Start() - s.Require().NoError(err) - defer func() { err := aNode.Stop(); s.NoError(err) }() - - mock := newHandlerMock(1) - config := params.ShhextConfig{ - InstallationID: "1", - BackupDisabledDataDir: os.TempDir(), - PFSEnabled: true, - } - nodeWrapper := &testNodeWrapper{w: shh} - service := New(nodeWrapper, nil, "shhext", mock, nil, config) - - tmpdir, err := ioutil.TempDir("", "test-shhext-service-request-messages") - s.Require().NoError(err) - - sqlDB, err := sqlite.OpenDB(fmt.Sprintf("%s/db.sql", tmpdir), "password") - s.Require().NoError(err) - - s.Require().NoError(service.InitProtocol(privateKey, sqlDB)) - s.Require().NoError(service.Start(aNode.Server())) - api := NewPublicAPI(service) - - // with a peer acting as a mailserver - // prepare a node first - mailNode, err := node.New(&node.Config{ - P2P: p2p.Config{ - MaxPeers: math.MaxInt32, - NoDiscovery: true, - ListenAddr: ":0", - }, - NoUSB: true, - }) // in-memory node as no data dir - s.Require().NoError(err) - err = mailNode.Register(func(*node.ServiceContext) (node.Service, error) { - return whisper.New(nil), nil - }) - s.NoError(err) - err = mailNode.Start() - s.Require().NoError(err) - defer func() { s.NoError(mailNode.Stop()) }() - - // add mailPeer as a peer - waitErr := helpers.WaitForPeerAsync(aNode.Server(), mailNode.Server().Self().URLv4(), p2p.PeerEventTypeAdd, time.Second) - aNode.Server().AddPeer(mailNode.Server().Self()) - s.Require().NoError(<-waitErr) - - var hash []byte - - // send a request with a symmetric key - symKeyID, symKeyErr := shh.AddSymKeyFromPassword("some-pass") - s.Require().NoError(symKeyErr) - hash, err = api.RequestMessages(context.TODO(), MessagesRequest{ - MailServerPeer: mailNode.Server().Self().URLv4(), - SymKeyID: symKeyID, - Force: true, - }) - s.Require().NoError(err) - s.Require().NotNil(hash) - // Send a request without a symmetric key. In this case, - // a public key extracted from MailServerPeer will be used. - hash, err = api.RequestMessages(context.TODO(), MessagesRequest{ - MailServerPeer: mailNode.Server().Self().URLv4(), - Force: true, - }) - s.Require().NoError(err) - s.Require().NotNil(hash) -} - -func (s *ShhExtSuite) TearDown() { - for _, n := range s.nodes { - s.NoError(n.Stop()) - } -} - -type testNodeWrapper struct { - w types.Whisper -} - -func (w *testNodeWrapper) NewENSVerifier(_ *zap.Logger) enstypes.ENSVerifier { - panic("not implemented") -} - -func (w *testNodeWrapper) GetWhisper(_ interface{}) (types.Whisper, error) { - return w.w, nil -} - -func (w *testNodeWrapper) GetWaku(_ interface{}) (types.Waku, error) { - return nil, errors.New("not implemented") -} - -func (w *testNodeWrapper) AddPeer(url string) error { - panic("not implemented") -} - -func (w *testNodeWrapper) RemovePeer(url string) error { - panic("not implemented") -} - -type WhisperNodeMockSuite struct { - suite.Suite - - localWhisperAPI *whisper.PublicWhisperAPI - localAPI *PublicAPI - localNode *enode.Node - remoteRW *p2p.MsgPipeRW - - localService *Service -} - -func (s *WhisperNodeMockSuite) SetupTest() { - db, err := leveldb.Open(storage.NewMemStorage(), nil) - s.Require().NoError(err) - conf := &whisper.Config{ - MinimumAcceptedPOW: 0, - MaxMessageSize: 100 << 10, - } - w := whisper.New(conf) - s.Require().NoError(w.Start(nil)) - pkey, err := crypto.GenerateKey() - s.Require().NoError(err) - node := enode.NewV4(&pkey.PublicKey, net.ParseIP("127.0.0.1"), 1, 1) - peer := p2p.NewPeer(node.ID(), "1", []p2p.Cap{{"shh", 6}}) - rw1, rw2 := p2p.MsgPipe() - errorc := make(chan error, 1) - go func() { - err := w.HandlePeer(peer, rw2) - errorc <- err - }() - whisperWrapper := gethbridge.NewGethWhisperWrapper(w) - s.Require().NoError(p2p.ExpectMsg(rw1, statusCode, []interface{}{ - whisper.ProtocolVersion, - math.Float64bits(whisperWrapper.MinPow()), - whisperWrapper.BloomFilter(), - false, - true, - whisper.RateLimits{}, - })) - s.Require().NoError(p2p.SendItems( - rw1, - statusCode, - whisper.ProtocolVersion, - whisper.ProtocolVersion, - math.Float64bits(whisperWrapper.MinPow()), - whisperWrapper.BloomFilter(), - true, - true, - whisper.RateLimits{}, - )) - - nodeWrapper := &testNodeWrapper{w: whisperWrapper} - s.localService = New( - nodeWrapper, - nil, - "shhext", - nil, - db, - params.ShhextConfig{MailServerConfirmations: true, MaxMessageDeliveryAttempts: 3}, - ) - s.Require().NoError(s.localService.UpdateMailservers([]*enode.Node{node})) - - s.localWhisperAPI = whisper.NewPublicWhisperAPI(w) - s.localAPI = NewPublicAPI(s.localService) - s.localNode = node - s.remoteRW = rw1 -} - -func TestRequestMessagesSync(t *testing.T) { - suite.Run(t, new(RequestMessagesSyncSuite)) -} - -type RequestMessagesSyncSuite struct { - WhisperNodeMockSuite -} - -func (s *RequestMessagesSyncSuite) TestExpired() { - // intentionally discarding all requests, so that request will timeout - go func() { - msg, err := s.remoteRW.ReadMsg() - s.Require().NoError(err) - s.Require().NoError(msg.Discard()) - }() - _, err := s.localAPI.RequestMessagesSync( - RetryConfig{ - BaseTimeout: time.Second, - }, - MessagesRequest{ - MailServerPeer: s.localNode.String(), - }, - ) - s.Require().EqualError(err, "failed to request messages after 1 retries") -} - -func (s *RequestMessagesSyncSuite) testCompletedFromAttempt(target int) { - const cursorSize = 36 // taken from mailserver_response.go from whisper package - cursor := [cursorSize]byte{} - cursor[0] = 0x01 - - go func() { - attempt := 0 - for { - attempt++ - msg, err := s.remoteRW.ReadMsg() - s.Require().NoError(err) - if attempt < target { - s.Require().NoError(msg.Discard()) - continue - } - var e whisper.Envelope - s.Require().NoError(msg.Decode(&e)) - s.Require().NoError(p2p.Send(s.remoteRW, p2pRequestCompleteCode, whisper.CreateMailServerRequestCompletedPayload(e.Hash(), common.Hash{}, cursor[:]))) - } - }() - resp, err := s.localAPI.RequestMessagesSync( - RetryConfig{ - BaseTimeout: time.Second, - MaxRetries: target, - }, - MessagesRequest{ - MailServerPeer: s.localNode.String(), - Force: true, // force true is convenient here because timeout is less then default delay (3s) - }, - ) - s.Require().NoError(err) - s.Require().Equal(MessagesResponse{Cursor: hex.EncodeToString(cursor[:])}, resp) -} - -func (s *RequestMessagesSyncSuite) TestCompletedFromFirstAttempt() { - s.testCompletedFromAttempt(1) -} - -func (s *RequestMessagesSyncSuite) TestCompletedFromSecondAttempt() { - s.testCompletedFromAttempt(2) -} - -func TestWhisperConfirmations(t *testing.T) { - suite.Run(t, new(WhisperConfirmationSuite)) -} - -type WhisperConfirmationSuite struct { - WhisperNodeMockSuite -} - -func TestWhisperRetriesSuite(t *testing.T) { - suite.Run(t, new(WhisperRetriesSuite)) -} - -type WhisperRetriesSuite struct { - WhisperNodeMockSuite -} - -func TestRequestWithTrackingHistorySuite(t *testing.T) { - suite.Run(t, new(RequestWithTrackingHistorySuite)) -} - -type RequestWithTrackingHistorySuite struct { - suite.Suite - - envelopeSymkey string - envelopeSymkeyID string - - localWhisperAPI types.PublicWhisperAPI - localAPI *PublicAPI - localService *Service - localContext Context - mailSymKey string - - remoteMailserver *mailserver.WhisperMailServer - remoteNode *enode.Node - remoteWhisper *whisper.Whisper -} - -func (s *RequestWithTrackingHistorySuite) SetupTest() { - db, err := leveldb.Open(storage.NewMemStorage(), nil) - s.Require().NoError(err) - conf := &whisper.Config{ - MinimumAcceptedPOW: 0, - MaxMessageSize: 100 << 10, - } - localSHH := whisper.New(conf) - local := gethbridge.NewGethWhisperWrapper(localSHH) - s.Require().NoError(localSHH.Start(nil)) - - s.localWhisperAPI = local.PublicWhisperAPI() - nodeWrapper := &testNodeWrapper{w: local} - s.localService = New(nodeWrapper, nil, "shhext", nil, db, params.ShhextConfig{}) - s.localContext = NewContextFromService(context.Background(), s.localService, s.localService.storage) - localPkey, err := crypto.GenerateKey() - s.Require().NoError(err) - - tmpdir, err := ioutil.TempDir("", "test-shhext-service") - s.Require().NoError(err) - - sqlDB, err := sqlite.OpenDB(fmt.Sprintf("%s/db.sql", tmpdir), "password") - s.Require().NoError(err) - - s.Require().NoError(s.localService.InitProtocol(nil, sqlDB)) - s.Require().NoError(s.localService.Start(&p2p.Server{Config: p2p.Config{PrivateKey: localPkey}})) - s.localAPI = NewPublicAPI(s.localService) - - remoteSHH := whisper.New(conf) - s.remoteWhisper = remoteSHH - s.Require().NoError(remoteSHH.Start(nil)) - s.remoteMailserver = &mailserver.WhisperMailServer{} - remoteSHH.RegisterMailServer(s.remoteMailserver) - password := "test" - tmpdir, err = ioutil.TempDir("", "tracking-history-tests-") - s.Require().NoError(err) - s.Require().NoError(s.remoteMailserver.Init(remoteSHH, ¶ms.WhisperConfig{ - DataDir: tmpdir, - MailServerPassword: password, - })) - - pkey, err := crypto.GenerateKey() - s.Require().NoError(err) - // we need proper enode for a remote node. it will be used when mail server request is made - s.remoteNode = enode.NewV4(&pkey.PublicKey, net.ParseIP("127.0.0.1"), 1, 1) - remotePeer := p2p.NewPeer(s.remoteNode.ID(), "1", []p2p.Cap{{"shh", 6}}) - localPeer := p2p.NewPeer(enode.ID{2}, "2", []p2p.Cap{{"shh", 6}}) - // FIXME close this in tear down - rw1, rw2 := p2p.MsgPipe() - go func() { - err := localSHH.HandlePeer(remotePeer, rw1) - s.Require().NoError(err) - }() - go func() { - err := remoteSHH.HandlePeer(localPeer, rw2) - s.Require().NoError(err) - }() - s.mailSymKey, err = s.localWhisperAPI.GenerateSymKeyFromPassword(context.Background(), password) - s.Require().NoError(err) - - s.envelopeSymkey = "topics" - s.envelopeSymkeyID, err = s.localWhisperAPI.GenerateSymKeyFromPassword(context.Background(), s.envelopeSymkey) - s.Require().NoError(err) -} - -func (s *RequestWithTrackingHistorySuite) postEnvelopes(topics ...types.TopicType) []hexutil.Bytes { - var ( - rst = make([]hexutil.Bytes, len(topics)) - err error - ) - for i, t := range topics { - rst[i], err = s.localWhisperAPI.Post(context.Background(), types.NewMessage{ - SymKeyID: s.envelopeSymkeyID, - TTL: 10, - Topic: t, - }) - s.Require().NoError(err) - } - return rst - -} - -func (s *RequestWithTrackingHistorySuite) waitForArchival(hexes []hexutil.Bytes) { - events := make(chan whisper.EnvelopeEvent, 2) - sub := s.remoteWhisper.SubscribeEnvelopeEvents(events) - defer sub.Unsubscribe() - s.Require().NoError(waitForArchival(events, 2*time.Second, hexes...)) -} - -func (s *RequestWithTrackingHistorySuite) createEmptyFilter(topics ...types.TopicType) string { - filterid, err := s.localWhisperAPI.NewMessageFilter(types.Criteria{ - SymKeyID: s.envelopeSymkeyID, - Topics: topics, - AllowP2P: true, - }) - s.Require().NoError(err) - s.Require().NotNil(filterid) - - messages, err := s.localWhisperAPI.GetFilterMessages(filterid) - s.Require().NoError(err) - s.Require().Empty(messages) - return filterid -} - -func (s *RequestWithTrackingHistorySuite) initiateHistoryRequest(topics ...TopicRequest) []types.HexBytes { - requests, err := s.localAPI.InitiateHistoryRequests(context.Background(), InitiateHistoryRequestParams{ - Peer: s.remoteNode.String(), - SymKeyID: s.mailSymKey, - Timeout: 10 * time.Second, - Requests: topics, - }) - s.Require().NoError(err) - return requests -} - -func (s *RequestWithTrackingHistorySuite) waitMessagesDelivered(filterid string, hexes ...hexutil.Bytes) { - var received int - s.Require().NoError(utils.Eventually(func() error { - messages, err := s.localWhisperAPI.GetFilterMessages(filterid) - if err != nil { - return err - } - received += len(messages) - if received != len(hexes) { - return fmt.Errorf("expecting to receive %d messages, received %d", len(hexes), received) - } - return nil - }, 2*time.Second, 200*time.Millisecond)) -} - -func (s *RequestWithTrackingHistorySuite) waitNoRequests() { - store := s.localContext.HistoryStore() - s.Require().NoError(utils.Eventually(func() error { - reqs, err := store.GetAllRequests() - if err != nil { - return err - } - if len(reqs) != 0 { - return fmt.Errorf("not all requests were removed. count %d", len(reqs)) - } - return nil - }, 2*time.Second, 200*time.Millisecond)) -} - -func (s *RequestWithTrackingHistorySuite) TestMultipleMergeIntoOne() { - topic1 := types.TopicType{1, 1, 1, 1} - topic2 := types.TopicType{2, 2, 2, 2} - topic3 := types.TopicType{3, 3, 3, 3} - hexes := s.postEnvelopes(topic1, topic2, topic3) - s.waitForArchival(hexes) - - filterid := s.createEmptyFilter(topic1, topic2, topic3) - requests := s.initiateHistoryRequest( - TopicRequest{Topic: topic1, Duration: time.Hour}, - TopicRequest{Topic: topic2, Duration: time.Hour}, - TopicRequest{Topic: topic3, Duration: 10 * time.Hour}, - ) - // since we are using different duration for 3rd topic there will be 2 requests - s.Require().Len(requests, 2) - s.Require().NotEqual(requests[0], requests[1]) - s.waitMessagesDelivered(filterid, hexes...) - - s.Require().NoError(s.localService.historyUpdates.UpdateTopicHistory(s.localContext, topic1, time.Now())) - s.Require().NoError(s.localService.historyUpdates.UpdateTopicHistory(s.localContext, topic2, time.Now())) - s.Require().NoError(s.localService.historyUpdates.UpdateTopicHistory(s.localContext, topic3, time.Now())) - for _, r := range requests { - s.Require().NoError(s.localAPI.CompleteRequest(context.TODO(), r.String())) - } - s.waitNoRequests() - - requests = s.initiateHistoryRequest( - TopicRequest{Topic: topic1, Duration: time.Hour}, - TopicRequest{Topic: topic2, Duration: time.Hour}, - TopicRequest{Topic: topic3, Duration: 10 * time.Hour}, - ) - s.Len(requests, 1) -} - -func (s *RequestWithTrackingHistorySuite) TestSingleRequest() { - topic1 := types.TopicType{1, 1, 1, 1} - topic2 := types.TopicType{255, 255, 255, 255} - hexes := s.postEnvelopes(topic1, topic2) - s.waitForArchival(hexes) - - filterid := s.createEmptyFilter(topic1, topic2) - requests := s.initiateHistoryRequest( - TopicRequest{Topic: topic1, Duration: time.Hour}, - TopicRequest{Topic: topic2, Duration: time.Hour}, - ) - s.Require().Len(requests, 1) - s.waitMessagesDelivered(filterid, hexes...) -} - -func (s *RequestWithTrackingHistorySuite) TestPreviousRequestReplaced() { - topic1 := types.TopicType{1, 1, 1, 1} - topic2 := types.TopicType{255, 255, 255, 255} - - requests := s.initiateHistoryRequest( - TopicRequest{Topic: topic1, Duration: time.Hour}, - TopicRequest{Topic: topic2, Duration: time.Hour}, - ) - s.Require().Len(requests, 1) - s.localService.requestsRegistry.Clear() - replaced := s.initiateHistoryRequest( - TopicRequest{Topic: topic1, Duration: time.Hour}, - TopicRequest{Topic: topic2, Duration: time.Hour}, - ) - s.Require().Len(replaced, 1) - s.Require().NotEqual(requests[0], replaced[0]) -} - -func waitForArchival(events chan whisper.EnvelopeEvent, duration time.Duration, hashes ...hexutil.Bytes) error { - waiting := map[common.Hash]struct{}{} - for _, hash := range hashes { - waiting[common.BytesToHash(hash)] = struct{}{} - } - timeout := time.After(duration) - for { - select { - case <-timeout: - return errors.New("timed out while waiting for mailserver to archive envelopes") - case ev := <-events: - if ev.Event != whisper.EventMailServerEnvelopeArchived { - continue - } - if _, exist := waiting[ev.Hash]; exist { - delete(waiting, ev.Hash) - if len(waiting) == 0 { - return nil - } - } - } - } - -} diff --git a/services/shhext_wakuext_test.go b/services/shhext_wakuext_test.go new file mode 100644 index 000000000..9c54d293d --- /dev/null +++ b/services/shhext_wakuext_test.go @@ -0,0 +1,69 @@ +package services + +import ( + "math" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/p2p" + gethbridge "github.com/status-im/status-go/eth-node/bridge/geth" + "github.com/status-im/status-go/params" + "github.com/status-im/status-go/services/ext" + "github.com/status-im/status-go/services/shhext" + "github.com/status-im/status-go/services/wakuext" + "github.com/status-im/status-go/waku" + "github.com/status-im/status-go/whisper/v6" +) + +func TestShhextAndWakuextInSingleNode(t *testing.T) { + aNode, err := node.New(&node.Config{ + P2P: p2p.Config{ + MaxPeers: math.MaxInt32, + NoDiscovery: true, + }, + NoUSB: true, + }) // in-memory node as no data dir + require.NoError(t, err) + + // register waku and whisper services + wakuWrapper := gethbridge.NewGethWakuWrapper(waku.New(nil, nil)) + err = aNode.Register(func(*node.ServiceContext) (node.Service, error) { + return gethbridge.GetGethWakuFrom(wakuWrapper), nil + }) + require.NoError(t, err) + whisperWrapper := gethbridge.NewGethWhisperWrapper(whisper.New(nil)) + err = aNode.Register(func(*node.ServiceContext) (node.Service, error) { + return gethbridge.GetGethWhisperFrom(whisperWrapper), nil + }) + require.NoError(t, err) + + nodeWrapper := ext.NewTestNodeWrapper(whisperWrapper, wakuWrapper) + + // register ext services + err = aNode.Register(func(ctx *node.ServiceContext) (node.Service, error) { + return wakuext.New(params.ShhextConfig{}, nodeWrapper, ctx, ext.EnvelopeSignalHandler{}, nil), nil + }) + require.NoError(t, err) + err = aNode.Register(func(ctx *node.ServiceContext) (node.Service, error) { + return shhext.New(params.ShhextConfig{}, nodeWrapper, ctx, ext.EnvelopeSignalHandler{}, nil), nil + }) + require.NoError(t, err) + + // start node + err = aNode.Start() + require.NoError(t, err) + defer func() { require.NoError(t, aNode.Stop()) }() + + // verify the services are available + rpc, err := aNode.Attach() + require.NoError(t, err) + var result string + err = rpc.Call(&result, "shhext_echo", "shhext test") + require.NoError(t, err) + require.Equal(t, "shhext test", result) + err = rpc.Call(&result, "wakuext_echo", "wakuext test") + require.NoError(t, err) + require.Equal(t, "wakuext test", result) +} diff --git a/services/wakuext/api.go b/services/wakuext/api.go new file mode 100644 index 000000000..a47d866b5 --- /dev/null +++ b/services/wakuext/api.go @@ -0,0 +1,173 @@ +package wakuext + +import ( + "context" + "crypto/ecdsa" + "encoding/hex" + "fmt" + "time" + + "github.com/ethereum/go-ethereum/log" + gethbridge "github.com/status-im/status-go/eth-node/bridge/geth" + "github.com/status-im/status-go/eth-node/types" + "github.com/status-im/status-go/services/ext" + "github.com/status-im/status-go/waku" +) + +const ( + // defaultWorkTime is a work time reported in messages sent to MailServer nodes. + defaultWorkTime = 5 +) + +// PublicAPI extends waku public API. +type PublicAPI struct { + *ext.PublicAPI + service *Service + publicAPI types.PublicWakuAPI + log log.Logger +} + +// NewPublicAPI returns instance of the public API. +func NewPublicAPI(s *Service) *PublicAPI { + return &PublicAPI{ + PublicAPI: ext.NewPublicAPI(s.Service, s.w), + service: s, + publicAPI: s.w.PublicWakuAPI(), + log: log.New("package", "status-go/services/wakuext.PublicAPI"), + } +} + +// makeEnvelop makes an envelop for a historic messages request. +// Symmetric key is used to authenticate to MailServer. +// PK is the current node ID. +// DEPRECATED +func makeEnvelop( + payload []byte, + symKey []byte, + publicKey *ecdsa.PublicKey, + nodeID *ecdsa.PrivateKey, + pow float64, + now time.Time, +) (types.Envelope, error) { + params := waku.MessageParams{ + PoW: pow, + Payload: payload, + WorkTime: defaultWorkTime, + Src: nodeID, + } + // Either symKey or public key is required. + // This condition is verified in `message.Wrap()` method. + if len(symKey) > 0 { + params.KeySym = symKey + } else if publicKey != nil { + params.Dst = publicKey + } + message, err := waku.NewSentMessage(¶ms) + if err != nil { + return nil, err + } + envelope, err := message.Wrap(¶ms, now) + if err != nil { + return nil, err + } + return gethbridge.NewWakuEnvelope(envelope), nil +} + +// RequestMessages sends a request for historic messages to a MailServer. +func (api *PublicAPI) RequestMessages(_ context.Context, r ext.MessagesRequest) (types.HexBytes, error) { + api.log.Info("RequestMessages", "request", r) + + now := api.service.w.GetCurrentTime() + r.SetDefaults(now) + + if r.From > r.To { + return nil, fmt.Errorf("Query range is invalid: from > to (%d > %d)", r.From, r.To) + } + + mailServerNode, err := api.service.GetPeer(r.MailServerPeer) + if err != nil { + return nil, fmt.Errorf("%v: %v", ext.ErrInvalidMailServerPeer, err) + } + + var ( + symKey []byte + publicKey *ecdsa.PublicKey + ) + + if r.SymKeyID != "" { + symKey, err = api.service.w.GetSymKey(r.SymKeyID) + if err != nil { + return nil, fmt.Errorf("%v: %v", ext.ErrInvalidSymKeyID, err) + } + } else { + publicKey = mailServerNode.Pubkey() + } + + payload, err := ext.MakeMessagesRequestPayload(r) + if err != nil { + return nil, err + } + + envelope, err := makeEnvelop( + payload, + symKey, + publicKey, + api.service.NodeID(), + api.service.w.MinPow(), + now, + ) + if err != nil { + return nil, err + } + hash := envelope.Hash() + + if !r.Force { + err = api.service.RequestsRegistry().Register(hash, r.Topics) + if err != nil { + return nil, err + } + } + + if err := api.service.w.RequestHistoricMessagesWithTimeout(mailServerNode.ID().Bytes(), envelope, r.Timeout*time.Second); err != nil { + if !r.Force { + api.service.RequestsRegistry().Unregister(hash) + } + return nil, err + } + + return hash[:], nil +} + +// RequestMessagesSync repeats MessagesRequest using configuration in retry conf. +func (api *PublicAPI) RequestMessagesSync(conf ext.RetryConfig, r ext.MessagesRequest) (ext.MessagesResponse, error) { + var resp ext.MessagesResponse + + events := make(chan types.EnvelopeEvent, 10) + var ( + requestID types.HexBytes + err error + retries int + ) + for retries <= conf.MaxRetries { + sub := api.service.w.SubscribeEnvelopeEvents(events) + r.Timeout = conf.BaseTimeout + conf.StepTimeout*time.Duration(retries) + timeout := r.Timeout + // FIXME this weird conversion is required because MessagesRequest expects seconds but defines time.Duration + r.Timeout = time.Duration(int(r.Timeout.Seconds())) + requestID, err = api.RequestMessages(context.Background(), r) + if err != nil { + sub.Unsubscribe() + return resp, err + } + mailServerResp, err := ext.WaitForExpiredOrCompleted(types.BytesToHash(requestID), events, timeout) + sub.Unsubscribe() + if err == nil { + resp.Cursor = hex.EncodeToString(mailServerResp.Cursor) + resp.Error = mailServerResp.Error + return resp, nil + } + retries++ + api.log.Error("[RequestMessagesSync] failed", "err", err, "retries", retries) + } + return resp, fmt.Errorf("failed to request messages after %d retries", retries) +} diff --git a/services/wakuext/api_test.go b/services/wakuext/api_test.go new file mode 100644 index 000000000..939529661 --- /dev/null +++ b/services/wakuext/api_test.go @@ -0,0 +1,411 @@ +package wakuext + +import ( + "context" + "encoding/hex" + "fmt" + "io/ioutil" + "math" + "net" + "os" + "strconv" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/storage" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/enode" + gethbridge "github.com/status-im/status-go/eth-node/bridge/geth" + "github.com/status-im/status-go/eth-node/crypto" + "github.com/status-im/status-go/eth-node/types" + "github.com/status-im/status-go/params" + "github.com/status-im/status-go/services/ext" + "github.com/status-im/status-go/sqlite" + "github.com/status-im/status-go/t/helpers" + "github.com/status-im/status-go/waku" +) + +func TestRequestMessagesErrors(t *testing.T) { + var err error + + waku := gethbridge.NewGethWakuWrapper(waku.New(nil, nil)) + aNode, err := node.New(&node.Config{ + P2P: p2p.Config{ + MaxPeers: math.MaxInt32, + NoDiscovery: true, + }, + NoUSB: true, + }) // in-memory node as no data dir + require.NoError(t, err) + err = aNode.Register(func(*node.ServiceContext) (node.Service, error) { + return gethbridge.GetGethWakuFrom(waku), nil + }) + require.NoError(t, err) + + err = aNode.Start() + require.NoError(t, err) + defer func() { require.NoError(t, aNode.Stop()) }() + + handler := ext.NewHandlerMock(1) + config := params.ShhextConfig{ + InstallationID: "1", + BackupDisabledDataDir: os.TempDir(), + PFSEnabled: true, + } + nodeWrapper := ext.NewTestNodeWrapper(nil, waku) + service := New(config, nodeWrapper, nil, handler, nil) + api := NewPublicAPI(service) + + const mailServerPeer = "enode://b7e65e1bedc2499ee6cbd806945af5e7df0e59e4070c96821570bd581473eade24a489f5ec95d060c0db118c879403ab88d827d3766978f28708989d35474f87@[::]:51920" + + var hash []byte + + // invalid MailServer enode address + hash, err = api.RequestMessages(context.TODO(), ext.MessagesRequest{MailServerPeer: "invalid-address"}) + require.Nil(t, hash) + require.EqualError(t, err, "invalid mailServerPeer value: invalid URL scheme, want \"enode\"") + + // non-existent symmetric key + hash, err = api.RequestMessages(context.TODO(), ext.MessagesRequest{ + MailServerPeer: mailServerPeer, + SymKeyID: "invalid-sym-key-id", + }) + require.Nil(t, hash) + require.EqualError(t, err, "invalid symKeyID value: non-existent key ID") + + // with a symmetric key + symKeyID, symKeyErr := waku.AddSymKeyFromPassword("some-pass") + require.NoError(t, symKeyErr) + hash, err = api.RequestMessages(context.TODO(), ext.MessagesRequest{ + MailServerPeer: mailServerPeer, + SymKeyID: symKeyID, + }) + require.Nil(t, hash) + require.Contains(t, err.Error(), "could not find peer with ID") + + // from is greater than to + hash, err = api.RequestMessages(context.TODO(), ext.MessagesRequest{ + From: 10, + To: 5, + }) + require.Nil(t, hash) + require.Contains(t, err.Error(), "Query range is invalid: from > to (10 > 5)") +} + +func TestInitProtocol(t *testing.T) { + directory, err := ioutil.TempDir("", "status-go-testing") + require.NoError(t, err) + + config := params.ShhextConfig{ + InstallationID: "2", + BackupDisabledDataDir: directory, + PFSEnabled: true, + MailServerConfirmations: true, + ConnectionTarget: 10, + } + db, err := leveldb.Open(storage.NewMemStorage(), nil) + require.NoError(t, err) + + waku := gethbridge.NewGethWakuWrapper(waku.New(nil, nil)) + privateKey, err := crypto.GenerateKey() + require.NoError(t, err) + + nodeWrapper := ext.NewTestNodeWrapper(nil, waku) + service := New(config, nodeWrapper, nil, nil, db) + + tmpdir, err := ioutil.TempDir("", "test-shhext-service-init-protocol") + require.NoError(t, err) + + sqlDB, err := sqlite.OpenDB(fmt.Sprintf("%s/db.sql", tmpdir), "password") + require.NoError(t, err) + + err = service.InitProtocol(privateKey, sqlDB) + require.NoError(t, err) +} + +func TestShhExtSuite(t *testing.T) { + suite.Run(t, new(ShhExtSuite)) +} + +type ShhExtSuite struct { + suite.Suite + + dir string + nodes []*node.Node + wakus []types.Waku + services []*Service +} + +func (s *ShhExtSuite) createAndAddNode() { + idx := len(s.nodes) + + // create a node + cfg := &node.Config{ + Name: strconv.Itoa(idx), + P2P: p2p.Config{ + MaxPeers: math.MaxInt32, + NoDiscovery: true, + ListenAddr: ":0", + }, + NoUSB: true, + } + stack, err := node.New(cfg) + s.NoError(err) + w := waku.New(nil, nil) + err = stack.Register(func(n *node.ServiceContext) (node.Service, error) { + return w, nil + }) + s.NoError(err) + + // set up protocol + config := params.ShhextConfig{ + InstallationID: "1", + BackupDisabledDataDir: s.dir, + PFSEnabled: true, + MailServerConfirmations: true, + ConnectionTarget: 10, + } + db, err := leveldb.Open(storage.NewMemStorage(), nil) + s.Require().NoError(err) + nodeWrapper := ext.NewTestNodeWrapper(nil, gethbridge.NewGethWakuWrapper(w)) + service := New(config, nodeWrapper, nil, nil, db) + sqlDB, err := sqlite.OpenDB(fmt.Sprintf("%s/%d", s.dir, idx), "password") + s.Require().NoError(err) + privateKey, err := crypto.GenerateKey() + s.NoError(err) + err = service.InitProtocol(privateKey, sqlDB) + s.NoError(err) + err = stack.Register(func(n *node.ServiceContext) (node.Service, error) { + return service, nil + }) + s.NoError(err) + + // start the node + err = stack.Start() + s.Require().NoError(err) + + // store references + s.nodes = append(s.nodes, stack) + s.wakus = append(s.wakus, gethbridge.NewGethWakuWrapper(w)) + s.services = append(s.services, service) +} + +func (s *ShhExtSuite) SetupTest() { + var err error + s.dir, err = ioutil.TempDir("", "status-go-testing") + s.Require().NoError(err) +} + +func (s *ShhExtSuite) TearDownTest() { + for _, n := range s.nodes { + s.NoError(n.Stop()) + } + s.nodes = nil + s.wakus = nil + s.services = nil +} + +func (s *ShhExtSuite) TestRequestMessagesSuccess() { + // two nodes needed: client and mailserver + s.createAndAddNode() + s.createAndAddNode() + + waitErr := helpers.WaitForPeerAsync(s.nodes[0].Server(), s.nodes[1].Server().Self().URLv4(), p2p.PeerEventTypeAdd, time.Second) + s.nodes[0].Server().AddPeer(s.nodes[1].Server().Self()) + s.Require().NoError(<-waitErr) + + api := NewPublicAPI(s.services[0]) + + _, err := api.RequestMessages(context.Background(), ext.MessagesRequest{ + MailServerPeer: s.nodes[1].Server().Self().URLv4(), + Topics: []types.TopicType{{1}}, + }) + s.NoError(err) +} + +func (s *ShhExtSuite) TestMultipleRequestMessagesWithoutForce() { + // two nodes needed: client and mailserver + s.createAndAddNode() + s.createAndAddNode() + + waitErr := helpers.WaitForPeerAsync(s.nodes[0].Server(), s.nodes[1].Server().Self().URLv4(), p2p.PeerEventTypeAdd, time.Second) + s.nodes[0].Server().AddPeer(s.nodes[1].Server().Self()) + s.Require().NoError(<-waitErr) + + api := NewPublicAPI(s.services[0]) + + _, err := api.RequestMessages(context.Background(), ext.MessagesRequest{ + MailServerPeer: s.nodes[1].Server().Self().URLv4(), + Topics: []types.TopicType{{1}}, + }) + s.NoError(err) + _, err = api.RequestMessages(context.Background(), ext.MessagesRequest{ + MailServerPeer: s.nodes[1].Server().Self().URLv4(), + Topics: []types.TopicType{{1}}, + }) + s.EqualError(err, "another request with the same topics was sent less than 3s ago. Please wait for a bit longer, or set `force` to true in request parameters") + _, err = api.RequestMessages(context.Background(), ext.MessagesRequest{ + MailServerPeer: s.nodes[1].Server().Self().URLv4(), + Topics: []types.TopicType{{2}}, + }) + s.NoError(err) +} + +func (s *ShhExtSuite) TestFailedRequestWithUnknownMailServerPeer() { + s.createAndAddNode() + + api := NewPublicAPI(s.services[0]) + + _, err := api.RequestMessages(context.Background(), ext.MessagesRequest{ + MailServerPeer: "enode://19872f94b1e776da3a13e25afa71b47dfa99e658afd6427ea8d6e03c22a99f13590205a8826443e95a37eee1d815fc433af7a8ca9a8d0df7943d1f55684045b7@0.0.0.0:30305", + Topics: []types.TopicType{{1}}, + }) + s.EqualError(err, "could not find peer with ID: 10841e6db5c02fc331bf36a8d2a9137a1696d9d3b6b1f872f780e02aa8ec5bba") +} + +const ( + // internal waku protocol codes + statusCode = 0 + p2pRequestCompleteCode = 125 +) + +type WakuNodeMockSuite struct { + suite.Suite + + localWakuAPI *waku.PublicWakuAPI + localAPI *PublicAPI + localNode *enode.Node + remoteRW *p2p.MsgPipeRW + + localService *Service +} + +func (s *WakuNodeMockSuite) SetupTest() { + db, err := leveldb.Open(storage.NewMemStorage(), nil) + s.Require().NoError(err) + conf := &waku.Config{ + MinimumAcceptedPoW: 0, + MaxMessageSize: 100 << 10, + EnableConfirmations: true, + } + w := waku.New(conf, nil) + s.Require().NoError(w.Start(nil)) + pkey, err := crypto.GenerateKey() + s.Require().NoError(err) + node := enode.NewV4(&pkey.PublicKey, net.ParseIP("127.0.0.1"), 1, 1) + peer := p2p.NewPeer(node.ID(), "1", []p2p.Cap{{"shh", 6}}) + rw1, rw2 := p2p.MsgPipe() + errorc := make(chan error, 1) + go func() { + err := w.HandlePeer(peer, rw2) + errorc <- err + }() + wakuWrapper := gethbridge.NewGethWakuWrapper(w) + s.Require().NoError(p2p.ExpectMsg(rw1, statusCode, []interface{}{ + waku.ProtocolVersion, + math.Float64bits(wakuWrapper.MinPow()), + wakuWrapper.BloomFilter(), + false, + true, + waku.RateLimits{}, + })) + s.Require().NoError(p2p.SendItems( + rw1, + statusCode, + waku.ProtocolVersion, + math.Float64bits(wakuWrapper.MinPow()), + wakuWrapper.BloomFilter(), + true, + true, + waku.RateLimits{}, + )) + + nodeWrapper := ext.NewTestNodeWrapper(nil, wakuWrapper) + s.localService = New( + params.ShhextConfig{MailServerConfirmations: true, MaxMessageDeliveryAttempts: 3}, + nodeWrapper, + nil, + nil, + db, + ) + s.Require().NoError(s.localService.UpdateMailservers([]*enode.Node{node})) + + s.localWakuAPI = waku.NewPublicWakuAPI(w) + s.localAPI = NewPublicAPI(s.localService) + s.localNode = node + s.remoteRW = rw1 +} + +func TestRequestMessagesSync(t *testing.T) { + suite.Run(t, new(RequestMessagesSyncSuite)) +} + +type RequestMessagesSyncSuite struct { + WakuNodeMockSuite +} + +func (s *RequestMessagesSyncSuite) TestExpired() { + // intentionally discarding all requests, so that request will timeout + go func() { + msg, err := s.remoteRW.ReadMsg() + s.Require().NoError(err) + s.Require().NoError(msg.Discard()) + }() + _, err := s.localAPI.RequestMessagesSync( + ext.RetryConfig{ + BaseTimeout: time.Second, + }, + ext.MessagesRequest{ + MailServerPeer: s.localNode.String(), + }, + ) + s.Require().EqualError(err, "failed to request messages after 1 retries") +} + +func (s *RequestMessagesSyncSuite) testCompletedFromAttempt(target int) { + const cursorSize = 36 // taken from mailserver_response.go from waku package + cursor := [cursorSize]byte{} + cursor[0] = 0x01 + + go func() { + attempt := 0 + for { + attempt++ + msg, err := s.remoteRW.ReadMsg() + s.Require().NoError(err) + if attempt < target { + s.Require().NoError(msg.Discard()) + continue + } + var e waku.Envelope + s.Require().NoError(msg.Decode(&e)) + s.Require().NoError(p2p.Send(s.remoteRW, p2pRequestCompleteCode, waku.CreateMailServerRequestCompletedPayload(e.Hash(), common.Hash{}, cursor[:]))) + } + }() + resp, err := s.localAPI.RequestMessagesSync( + ext.RetryConfig{ + BaseTimeout: time.Second, + MaxRetries: target, + }, + ext.MessagesRequest{ + MailServerPeer: s.localNode.String(), + Force: true, // force true is convenient here because timeout is less then default delay (3s) + }, + ) + s.Require().NoError(err) + s.Require().Equal(ext.MessagesResponse{Cursor: hex.EncodeToString(cursor[:])}, resp) +} + +func (s *RequestMessagesSyncSuite) TestCompletedFromFirstAttempt() { + s.testCompletedFromAttempt(1) +} + +func (s *RequestMessagesSyncSuite) TestCompletedFromSecondAttempt() { + s.testCompletedFromAttempt(2) +} diff --git a/services/wakuext/service.go b/services/wakuext/service.go new file mode 100644 index 000000000..7aae6e329 --- /dev/null +++ b/services/wakuext/service.go @@ -0,0 +1,50 @@ +package wakuext + +import ( + "github.com/syndtr/goleveldb/leveldb" + + "github.com/ethereum/go-ethereum/rpc" + + "github.com/status-im/status-go/eth-node/types" + "github.com/status-im/status-go/params" + "github.com/status-im/status-go/services/ext" +) + +type Service struct { + *ext.Service + w types.Waku +} + +func New(config params.ShhextConfig, n types.Node, ctx interface{}, handler ext.EnvelopeEventsHandler, ldb *leveldb.DB) *Service { + w, err := n.GetWaku(ctx) + if err != nil { + panic(err) + } + delay := ext.DefaultRequestsDelay + if config.RequestsDelay != 0 { + delay = config.RequestsDelay + } + requestsRegistry := ext.NewRequestsRegistry(delay) + mailMonitor := ext.NewMailRequestMonitor(w, handler, requestsRegistry) + return &Service{ + Service: ext.New(config, n, ldb, mailMonitor, requestsRegistry, w), + w: w, + } +} + +func (s *Service) PublicWakuAPI() types.PublicWakuAPI { + return s.w.PublicWakuAPI() +} + +// APIs returns a list of new APIs. +func (s *Service) APIs() []rpc.API { + apis := []rpc.API{ + { + Namespace: "wakuext", + Version: "1.0", + Service: NewPublicAPI(s), + Public: true, + }, + } + return apis +} diff --git a/signal/events_shhext.go b/signal/events_shhext.go index 61c611c54..e0da0ad6d 100644 --- a/signal/events_shhext.go +++ b/signal/events_shhext.go @@ -18,9 +18,6 @@ const ( // to any peer EventEnvelopeExpired = "envelope.expired" - // EventEnvelopeDiscarded is triggerd when envelope was discarded by a peer for some reason. - EventEnvelopeDiscarded = "envelope.discarded" - // EventMailServerRequestCompleted is triggered when whisper receives a message ack from the mailserver EventMailServerRequestCompleted = "mailserver.request.completed" diff --git a/t/benchmarks/mailserver_test.go b/t/benchmarks/mailserver_test.go index da8fe0040..46960fc7e 100644 --- a/t/benchmarks/mailserver_test.go +++ b/t/benchmarks/mailserver_test.go @@ -7,6 +7,8 @@ import ( "testing" "time" + "github.com/status-im/status-go/services/shhext" + "github.com/stretchr/testify/require" "github.com/ethereum/go-ethereum/node" @@ -14,8 +16,8 @@ import ( gethbridge "github.com/status-im/status-go/eth-node/bridge/geth" "github.com/status-im/status-go/eth-node/types" "github.com/status-im/status-go/params" + "github.com/status-im/status-go/services/ext" "github.com/status-im/status-go/services/nodebridge" - "github.com/status-im/status-go/services/shhext" "github.com/status-im/status-go/whisper/v6" ) @@ -73,7 +75,7 @@ func testMailserverPeer(t *testing.T) { require.NoError(t, err) // register mail service as well err = n.Register(func(ctx *node.ServiceContext) (node.Service, error) { - mailService := shhext.New(gethbridge.NewNodeBridge(n), ctx, nil, nil, config) + mailService := shhext.New(config, gethbridge.NewNodeBridge(n), ctx, nil, nil) return mailService, nil }) require.NoError(t, err) @@ -109,7 +111,7 @@ func testMailserverPeer(t *testing.T) { ok, err := shhAPI.MarkTrustedPeer(context.TODO(), *peerURL) require.NoError(t, err) require.True(t, ok) - requestID, err := shhextAPI.RequestMessages(context.TODO(), shhext.MessagesRequest{ + requestID, err := shhextAPI.RequestMessages(context.TODO(), ext.MessagesRequest{ MailServerPeer: *peerURL, SymKeyID: symKeyID, Topic: types.TopicType(topic), diff --git a/t/e2e/whisper/whisper_mailbox_test.go b/t/e2e/whisper/whisper_mailbox_test.go index e53f6b68f..dfed915e4 100644 --- a/t/e2e/whisper/whisper_mailbox_test.go +++ b/t/e2e/whisper/whisper_mailbox_test.go @@ -13,6 +13,8 @@ import ( "testing" "time" + "github.com/status-im/status-go/services/shhext" + "github.com/stretchr/testify/suite" "golang.org/x/crypto/sha3" @@ -25,7 +27,6 @@ import ( "github.com/status-im/status-go/mailserver" "github.com/status-im/status-go/params" "github.com/status-im/status-go/rpc" - "github.com/status-im/status-go/services/shhext" "github.com/status-im/status-go/t/helpers" "github.com/status-im/status-go/t/utils" "github.com/status-im/status-go/whisper/v6" diff --git a/vendor/github.com/status-im/status-go/protocol/messenger.go b/vendor/github.com/status-im/status-go/protocol/messenger.go index 4baec271e..f0afa68e4 100644 --- a/vendor/github.com/status-im/status-go/protocol/messenger.go +++ b/vendor/github.com/status-im/status-go/protocol/messenger.go @@ -283,8 +283,7 @@ func NewMessenger( // Initialize transport layer. var transp transport.Transport - - if shh, err := node.GetWhisper(nil); err == nil { + if shh, err := node.GetWhisper(nil); err == nil && shh != nil { transp, err = shhtransp.NewWhisperServiceTransport( shh, identity, @@ -296,10 +295,10 @@ func NewMessenger( if err != nil { return nil, errors.Wrap(err, "failed to create WhisperServiceTransport") } - } else if err != nil { + } else { logger.Info("failed to find Whisper service; trying Waku", zap.Error(err)) waku, err := node.GetWaku(nil) - if err != nil { + if err != nil || waku == nil { return nil, errors.Wrap(err, "failed to find Whisper and Waku services") } transp, err = wakutransp.NewWakuServiceTransport( diff --git a/vendor/github.com/status-im/status-go/whisper/v6/whisper.go b/vendor/github.com/status-im/status-go/whisper/v6/whisper.go index 693fede2d..8bd55ff81 100644 --- a/vendor/github.com/status-im/status-go/whisper/v6/whisper.go +++ b/vendor/github.com/status-im/status-go/whisper/v6/whisper.go @@ -1202,18 +1202,14 @@ func (whisper *Whisper) runMessageLoop(p *Peer, rw p2p.MsgReadWriter) error { log.Warn("failed to decode response message, peer will be disconnected", "peer", p.peer.ID(), "err", err) return errors.New("invalid request response message") } - event, err := CreateMailServerEvent(p.peer.ID(), payload) - if err != nil { log.Warn("error while parsing request complete code, peer will be disconnected", "peer", p.peer.ID(), "err", err) return err } - if event != nil { whisper.postP2P(*event) } - } default: // New message types might be implemented in the future versions of Whisper. diff --git a/whisper/whisper.go b/whisper/whisper.go index 693fede2d..8bd55ff81 100644 --- a/whisper/whisper.go +++ b/whisper/whisper.go @@ -1202,18 +1202,14 @@ func (whisper *Whisper) runMessageLoop(p *Peer, rw p2p.MsgReadWriter) error { log.Warn("failed to decode response message, peer will be disconnected", "peer", p.peer.ID(), "err", err) return errors.New("invalid request response message") } - event, err := CreateMailServerEvent(p.peer.ID(), payload) - if err != nil { log.Warn("error while parsing request complete code, peer will be disconnected", "peer", p.peer.ID(), "err", err) return err } - if event != nil { whisper.postP2P(*event) } - } default: // New message types might be implemented in the future versions of Whisper.