2019-11-21 16:19:22 +00:00
|
|
|
package protocol
|
2019-09-02 09:29:06 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"crypto/ecdsa"
|
|
|
|
"database/sql"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/golang/protobuf/proto"
|
|
|
|
"github.com/pkg/errors"
|
2020-01-02 09:10:19 +00:00
|
|
|
datasyncnode "github.com/vacp2p/mvds/node"
|
|
|
|
datasyncproto "github.com/vacp2p/mvds/protobuf"
|
|
|
|
"go.uber.org/zap"
|
|
|
|
|
2019-11-23 17:57:05 +00:00
|
|
|
"github.com/status-im/status-go/eth-node/crypto"
|
|
|
|
"github.com/status-im/status-go/eth-node/types"
|
2019-11-21 16:19:22 +00:00
|
|
|
"github.com/status-im/status-go/protocol/datasync"
|
|
|
|
datasyncpeer "github.com/status-im/status-go/protocol/datasync/peer"
|
|
|
|
"github.com/status-im/status-go/protocol/encryption"
|
2019-12-02 15:34:05 +00:00
|
|
|
"github.com/status-im/status-go/protocol/protobuf"
|
2020-01-13 19:17:30 +00:00
|
|
|
"github.com/status-im/status-go/protocol/transport"
|
2019-11-21 16:19:22 +00:00
|
|
|
v1protocol "github.com/status-im/status-go/protocol/v1"
|
2019-09-02 09:29:06 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// Whisper message properties.
|
|
|
|
const (
|
|
|
|
whisperTTL = 15
|
|
|
|
whisperPoW = 0.002
|
|
|
|
whisperPoWTime = 5
|
|
|
|
)
|
|
|
|
|
|
|
|
type messageProcessor struct {
|
|
|
|
identity *ecdsa.PrivateKey
|
|
|
|
datasync *datasync.DataSync
|
|
|
|
protocol *encryption.Protocol
|
2020-01-13 19:17:30 +00:00
|
|
|
transport transport.Transport
|
2019-09-02 09:29:06 +00:00
|
|
|
logger *zap.Logger
|
|
|
|
|
|
|
|
featureFlags featureFlags
|
|
|
|
}
|
|
|
|
|
|
|
|
func newMessageProcessor(
|
|
|
|
identity *ecdsa.PrivateKey,
|
|
|
|
database *sql.DB,
|
|
|
|
enc *encryption.Protocol,
|
2020-01-13 19:17:30 +00:00
|
|
|
transport transport.Transport,
|
2019-09-02 09:29:06 +00:00
|
|
|
logger *zap.Logger,
|
|
|
|
features featureFlags,
|
|
|
|
) (*messageProcessor, error) {
|
2020-02-10 11:22:37 +00:00
|
|
|
dataSyncTransport := datasync.NewNodeTransport()
|
2019-09-02 09:29:06 +00:00
|
|
|
dataSyncNode, err := datasyncnode.NewPersistentNode(
|
|
|
|
database,
|
|
|
|
dataSyncTransport,
|
|
|
|
datasyncpeer.PublicKeyToPeerID(identity.PublicKey),
|
|
|
|
datasyncnode.BATCH,
|
|
|
|
datasync.CalculateSendTime,
|
|
|
|
logger,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
ds := datasync.New(dataSyncNode, dataSyncTransport, features.datasync, logger)
|
|
|
|
|
|
|
|
p := &messageProcessor{
|
|
|
|
identity: identity,
|
|
|
|
datasync: ds,
|
|
|
|
protocol: enc,
|
|
|
|
transport: transport,
|
|
|
|
logger: logger,
|
|
|
|
featureFlags: features,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Initializing DataSync is required to encrypt and send messages.
|
|
|
|
// With DataSync enabled, messages are added to the DataSync
|
|
|
|
// but actual encrypt and send calls are postponed.
|
|
|
|
// sendDataSync is responsible for encrypting and sending postponed messages.
|
|
|
|
if features.datasync {
|
|
|
|
ds.Init(p.sendDataSync)
|
|
|
|
ds.Start(300 * time.Millisecond)
|
|
|
|
}
|
|
|
|
|
|
|
|
return p, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *messageProcessor) Stop() {
|
|
|
|
p.datasync.Stop() // idempotent op
|
|
|
|
}
|
|
|
|
|
|
|
|
// SendPrivateRaw takes encoded data, encrypts it and sends through the wire.
|
|
|
|
func (p *messageProcessor) SendPrivateRaw(
|
|
|
|
ctx context.Context,
|
2019-10-14 14:10:48 +00:00
|
|
|
recipient *ecdsa.PublicKey,
|
2019-09-02 09:29:06 +00:00
|
|
|
data []byte,
|
2019-12-02 15:34:05 +00:00
|
|
|
messageType protobuf.ApplicationMetadataMessage_Type,
|
2019-09-02 09:29:06 +00:00
|
|
|
) ([]byte, error) {
|
|
|
|
p.logger.Debug(
|
|
|
|
"sending a private message",
|
2019-10-14 14:10:48 +00:00
|
|
|
zap.Binary("public-key", crypto.FromECDSAPub(recipient)),
|
2019-09-02 09:29:06 +00:00
|
|
|
zap.String("site", "SendPrivateRaw"),
|
|
|
|
)
|
2019-12-02 15:34:05 +00:00
|
|
|
return p.sendPrivate(ctx, recipient, data, messageType)
|
2019-09-26 07:01:17 +00:00
|
|
|
}
|
2019-09-02 09:29:06 +00:00
|
|
|
|
Move to protobuf for Message type (#1706)
* Use a single Message type `v1/message.go` and `message.go` are the same now, and they embed `protobuf.ChatMessage`
* Use `SendChatMessage` for sending chat messages, this is basically the old `Send` but a bit more flexible so we can send different message types (stickers,commands), and not just text.
* Remove dedup from services/shhext. Because now we process in status-protocol, dedup makes less sense, as those messages are going to be processed anyway, so removing for now, we can re-evaluate if bringing it to status-go or not.
* Change the various retrieveX method to a single one:
`RetrieveAll` will be processing those messages that it can process (Currently only `Message`), and return the rest in `RawMessages` (still transit). The format for the response is:
`Chats`: -> The chats updated by receiving the message
`Messages`: -> The messages retrieved (already matched to a chat)
`Contacts`: -> The contacts updated by the messages
`RawMessages` -> Anything else that can't be parsed, eventually as we move everything to status-protocol-go this will go away.
2019-12-05 16:25:34 +00:00
|
|
|
// SendGroupRaw takes encoded data, encrypts it and sends through the wire,
|
|
|
|
// always return the messageID
|
|
|
|
func (p *messageProcessor) SendGroupRaw(
|
|
|
|
ctx context.Context,
|
|
|
|
recipients []*ecdsa.PublicKey,
|
|
|
|
data []byte,
|
2019-12-02 15:34:05 +00:00
|
|
|
messageType protobuf.ApplicationMetadataMessage_Type,
|
Move to protobuf for Message type (#1706)
* Use a single Message type `v1/message.go` and `message.go` are the same now, and they embed `protobuf.ChatMessage`
* Use `SendChatMessage` for sending chat messages, this is basically the old `Send` but a bit more flexible so we can send different message types (stickers,commands), and not just text.
* Remove dedup from services/shhext. Because now we process in status-protocol, dedup makes less sense, as those messages are going to be processed anyway, so removing for now, we can re-evaluate if bringing it to status-go or not.
* Change the various retrieveX method to a single one:
`RetrieveAll` will be processing those messages that it can process (Currently only `Message`), and return the rest in `RawMessages` (still transit). The format for the response is:
`Chats`: -> The chats updated by receiving the message
`Messages`: -> The messages retrieved (already matched to a chat)
`Contacts`: -> The contacts updated by the messages
`RawMessages` -> Anything else that can't be parsed, eventually as we move everything to status-protocol-go this will go away.
2019-12-05 16:25:34 +00:00
|
|
|
) ([]byte, error) {
|
|
|
|
p.logger.Debug(
|
|
|
|
"sending a private group message",
|
|
|
|
zap.String("site", "SendGroupRaw"),
|
|
|
|
)
|
|
|
|
// Calculate messageID first
|
2019-12-02 15:34:05 +00:00
|
|
|
wrappedMessage, err := p.wrapMessageV1(data, messageType)
|
Move to protobuf for Message type (#1706)
* Use a single Message type `v1/message.go` and `message.go` are the same now, and they embed `protobuf.ChatMessage`
* Use `SendChatMessage` for sending chat messages, this is basically the old `Send` but a bit more flexible so we can send different message types (stickers,commands), and not just text.
* Remove dedup from services/shhext. Because now we process in status-protocol, dedup makes less sense, as those messages are going to be processed anyway, so removing for now, we can re-evaluate if bringing it to status-go or not.
* Change the various retrieveX method to a single one:
`RetrieveAll` will be processing those messages that it can process (Currently only `Message`), and return the rest in `RawMessages` (still transit). The format for the response is:
`Chats`: -> The chats updated by receiving the message
`Messages`: -> The messages retrieved (already matched to a chat)
`Contacts`: -> The contacts updated by the messages
`RawMessages` -> Anything else that can't be parsed, eventually as we move everything to status-protocol-go this will go away.
2019-12-05 16:25:34 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "failed to wrap message")
|
|
|
|
}
|
|
|
|
|
|
|
|
messageID := v1protocol.MessageID(&p.identity.PublicKey, wrappedMessage)
|
|
|
|
|
|
|
|
for _, recipient := range recipients {
|
2019-12-02 15:34:05 +00:00
|
|
|
_, err = p.sendPrivate(ctx, recipient, data, messageType)
|
Move to protobuf for Message type (#1706)
* Use a single Message type `v1/message.go` and `message.go` are the same now, and they embed `protobuf.ChatMessage`
* Use `SendChatMessage` for sending chat messages, this is basically the old `Send` but a bit more flexible so we can send different message types (stickers,commands), and not just text.
* Remove dedup from services/shhext. Because now we process in status-protocol, dedup makes less sense, as those messages are going to be processed anyway, so removing for now, we can re-evaluate if bringing it to status-go or not.
* Change the various retrieveX method to a single one:
`RetrieveAll` will be processing those messages that it can process (Currently only `Message`), and return the rest in `RawMessages` (still transit). The format for the response is:
`Chats`: -> The chats updated by receiving the message
`Messages`: -> The messages retrieved (already matched to a chat)
`Contacts`: -> The contacts updated by the messages
`RawMessages` -> Anything else that can't be parsed, eventually as we move everything to status-protocol-go this will go away.
2019-12-05 16:25:34 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "failed to send message")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return messageID, nil
|
|
|
|
}
|
|
|
|
|
2019-09-26 07:01:17 +00:00
|
|
|
// sendPrivate sends data to the recipient identifying with a given public key.
|
|
|
|
func (p *messageProcessor) sendPrivate(
|
|
|
|
ctx context.Context,
|
2019-10-14 14:10:48 +00:00
|
|
|
recipient *ecdsa.PublicKey,
|
2019-09-26 07:01:17 +00:00
|
|
|
data []byte,
|
2019-12-02 15:34:05 +00:00
|
|
|
messageType protobuf.ApplicationMetadataMessage_Type,
|
2019-09-26 07:01:17 +00:00
|
|
|
) ([]byte, error) {
|
2019-10-14 14:10:48 +00:00
|
|
|
p.logger.Debug("sending private message", zap.Binary("recipient", crypto.FromECDSAPub(recipient)))
|
|
|
|
|
2019-12-02 15:34:05 +00:00
|
|
|
wrappedMessage, err := p.wrapMessageV1(data, messageType)
|
2019-09-02 09:29:06 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "failed to wrap message")
|
|
|
|
}
|
|
|
|
|
2019-11-21 16:19:22 +00:00
|
|
|
messageID := v1protocol.MessageID(&p.identity.PublicKey, wrappedMessage)
|
2019-09-02 09:29:06 +00:00
|
|
|
|
|
|
|
if p.featureFlags.datasync {
|
2019-10-14 14:10:48 +00:00
|
|
|
if err := p.addToDataSync(recipient, wrappedMessage); err != nil {
|
2019-09-02 09:29:06 +00:00
|
|
|
return nil, errors.Wrap(err, "failed to send message with datasync")
|
|
|
|
}
|
2019-09-26 07:01:17 +00:00
|
|
|
|
|
|
|
// No need to call transport tracking.
|
|
|
|
// It is done in a data sync dispatch step.
|
2019-09-02 09:29:06 +00:00
|
|
|
} else {
|
2019-10-14 14:10:48 +00:00
|
|
|
messageSpec, err := p.protocol.BuildDirectMessage(p.identity, recipient, wrappedMessage)
|
2019-09-02 09:29:06 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "failed to encrypt message")
|
|
|
|
}
|
|
|
|
|
2019-10-14 14:10:48 +00:00
|
|
|
hash, newMessage, err := p.sendMessageSpec(ctx, recipient, messageSpec)
|
2019-09-02 09:29:06 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "failed to send a message spec")
|
|
|
|
}
|
|
|
|
|
2019-10-09 14:22:53 +00:00
|
|
|
p.transport.Track([][]byte{messageID}, hash, newMessage)
|
2019-09-02 09:29:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return messageID, nil
|
|
|
|
}
|
|
|
|
|
2020-01-10 18:59:01 +00:00
|
|
|
// sendPairInstallation sends data to the recipients, using DH
|
|
|
|
func (p *messageProcessor) SendPairInstallation(
|
2019-10-14 14:10:48 +00:00
|
|
|
ctx context.Context,
|
2020-01-10 18:59:01 +00:00
|
|
|
recipient *ecdsa.PublicKey,
|
|
|
|
data []byte,
|
|
|
|
messageType protobuf.ApplicationMetadataMessage_Type,
|
|
|
|
) ([]byte, error) {
|
|
|
|
p.logger.Debug("sending private message", zap.Binary("recipient", crypto.FromECDSAPub(recipient)))
|
|
|
|
|
|
|
|
wrappedMessage, err := p.wrapMessageV1(data, messageType)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "failed to wrap message")
|
|
|
|
}
|
|
|
|
|
|
|
|
messageSpec, err := p.protocol.BuildDHMessage(p.identity, recipient, wrappedMessage)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "failed to encrypt message")
|
|
|
|
}
|
|
|
|
|
|
|
|
hash, newMessage, err := p.sendMessageSpec(ctx, recipient, messageSpec)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "failed to send a message spec")
|
|
|
|
}
|
|
|
|
|
|
|
|
messageID := v1protocol.MessageID(&p.identity.PublicKey, wrappedMessage)
|
|
|
|
p.transport.Track([][]byte{messageID}, hash, newMessage)
|
|
|
|
|
|
|
|
return messageID, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *messageProcessor) EncodeMembershipUpdate(
|
2019-12-02 15:34:05 +00:00
|
|
|
group *v1protocol.Group,
|
|
|
|
chatMessage *protobuf.ChatMessage,
|
|
|
|
) ([]byte, error) {
|
2019-10-14 14:10:48 +00:00
|
|
|
|
2019-11-21 16:19:22 +00:00
|
|
|
message := v1protocol.MembershipUpdateMessage{
|
2019-12-02 15:34:05 +00:00
|
|
|
ChatID: group.ChatID(),
|
|
|
|
Events: group.Events(),
|
|
|
|
Message: chatMessage,
|
2019-10-14 14:10:48 +00:00
|
|
|
}
|
2019-11-21 16:19:22 +00:00
|
|
|
encodedMessage, err := v1protocol.EncodeMembershipUpdateMessage(message)
|
2019-10-14 14:10:48 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "failed to encode membership update message")
|
|
|
|
}
|
|
|
|
|
2020-01-10 18:59:01 +00:00
|
|
|
return encodedMessage, nil
|
2019-10-14 14:10:48 +00:00
|
|
|
}
|
|
|
|
|
2019-09-02 09:29:06 +00:00
|
|
|
// SendPublicRaw takes encoded data, encrypts it and sends through the wire.
|
2019-12-02 15:34:05 +00:00
|
|
|
func (p *messageProcessor) SendPublicRaw(
|
|
|
|
ctx context.Context,
|
|
|
|
chatName string,
|
|
|
|
data []byte,
|
|
|
|
messageType protobuf.ApplicationMetadataMessage_Type,
|
|
|
|
) ([]byte, error) {
|
2019-11-23 17:57:05 +00:00
|
|
|
var newMessage *types.NewMessage
|
2019-09-02 09:29:06 +00:00
|
|
|
|
2019-12-02 15:34:05 +00:00
|
|
|
wrappedMessage, err := p.wrapMessageV1(data, messageType)
|
2019-09-02 09:29:06 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "failed to wrap message")
|
|
|
|
}
|
|
|
|
|
2019-11-23 17:57:05 +00:00
|
|
|
newMessage = &types.NewMessage{
|
2019-09-02 09:29:06 +00:00
|
|
|
TTL: whisperTTL,
|
|
|
|
Payload: wrappedMessage,
|
|
|
|
PowTarget: whisperPoW,
|
|
|
|
PowTime: whisperPoWTime,
|
|
|
|
}
|
|
|
|
|
2019-10-09 14:22:53 +00:00
|
|
|
hash, err := p.transport.SendPublic(ctx, newMessage, chatName)
|
2019-09-02 09:29:06 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-11-21 16:19:22 +00:00
|
|
|
messageID := v1protocol.MessageID(&p.identity.PublicKey, wrappedMessage)
|
2019-09-02 09:29:06 +00:00
|
|
|
|
|
|
|
p.transport.Track([][]byte{messageID}, hash, newMessage)
|
|
|
|
|
|
|
|
return messageID, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleMessages expects a whisper message as input, and it will go through
|
|
|
|
// a series of transformations until the message is parsed into an application
|
|
|
|
// layer message, or in case of Raw methods, the processing stops at the layer
|
2019-09-26 07:01:17 +00:00
|
|
|
// before.
|
|
|
|
// It returns an error only if the processing of required steps failed.
|
2019-11-23 17:57:05 +00:00
|
|
|
func (p *messageProcessor) handleMessages(shhMessage *types.Message, applicationLayer bool) ([]*v1protocol.StatusMessage, error) {
|
2019-09-02 09:29:06 +00:00
|
|
|
logger := p.logger.With(zap.String("site", "handleMessages"))
|
2019-12-17 20:51:01 +00:00
|
|
|
hlogger := logger.With(zap.ByteString("hash", shhMessage.Hash))
|
2019-11-21 16:19:22 +00:00
|
|
|
var statusMessage v1protocol.StatusMessage
|
2019-09-02 09:29:06 +00:00
|
|
|
|
|
|
|
err := statusMessage.HandleTransport(shhMessage)
|
|
|
|
if err != nil {
|
|
|
|
hlogger.Error("failed to handle transport layer message", zap.Error(err))
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = p.handleEncryptionLayer(context.Background(), &statusMessage)
|
|
|
|
if err != nil {
|
|
|
|
hlogger.Debug("failed to handle an encryption message", zap.Error(err))
|
|
|
|
}
|
|
|
|
|
|
|
|
statusMessages, err := statusMessage.HandleDatasync(p.datasync)
|
|
|
|
if err != nil {
|
|
|
|
hlogger.Debug("failed to handle datasync message", zap.Error(err))
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, statusMessage := range statusMessages {
|
|
|
|
err := statusMessage.HandleApplicationMetadata()
|
|
|
|
if err != nil {
|
|
|
|
hlogger.Error("failed to handle application metadata layer message", zap.Error(err))
|
|
|
|
}
|
|
|
|
|
|
|
|
if applicationLayer {
|
|
|
|
err = statusMessage.HandleApplication()
|
|
|
|
if err != nil {
|
2019-11-04 10:08:22 +00:00
|
|
|
hlogger.Error("failed to handle application layer message", zap.Error(err))
|
2019-09-02 09:29:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return statusMessages, nil
|
|
|
|
}
|
|
|
|
|
2019-11-21 16:19:22 +00:00
|
|
|
func (p *messageProcessor) handleEncryptionLayer(ctx context.Context, message *v1protocol.StatusMessage) error {
|
2019-09-02 09:29:06 +00:00
|
|
|
logger := p.logger.With(zap.String("site", "handleEncryptionLayer"))
|
|
|
|
publicKey := message.SigPubKey()
|
|
|
|
|
|
|
|
err := message.HandleEncryption(p.identity, publicKey, p.protocol)
|
|
|
|
if err == encryption.ErrDeviceNotFound {
|
2019-11-04 10:08:22 +00:00
|
|
|
if err := p.handleErrDeviceNotFound(ctx, publicKey); err != nil {
|
|
|
|
logger.Error("failed to handle ErrDeviceNotFound", zap.Error(err))
|
2019-09-02 09:29:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "failed to process an encrypted message")
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *messageProcessor) handleErrDeviceNotFound(ctx context.Context, publicKey *ecdsa.PublicKey) error {
|
|
|
|
now := time.Now().Unix()
|
|
|
|
advertise, err := p.protocol.ShouldAdvertiseBundle(publicKey, now)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if !advertise {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
messageSpec, err := p.protocol.BuildBundleAdvertiseMessage(p.identity, publicKey)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, time.Second)
|
|
|
|
defer cancel()
|
|
|
|
_, _, err = p.sendMessageSpec(ctx, publicKey, messageSpec)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
p.protocol.ConfirmBundleAdvertisement(publicKey, now)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-12-02 15:34:05 +00:00
|
|
|
func (p *messageProcessor) wrapMessageV1(encodedMessage []byte, messageType protobuf.ApplicationMetadataMessage_Type) ([]byte, error) {
|
|
|
|
wrappedMessage, err := v1protocol.WrapMessageV1(encodedMessage, messageType, p.identity)
|
2019-09-02 09:29:06 +00:00
|
|
|
if err != nil {
|
Move to protobuf for Message type (#1706)
* Use a single Message type `v1/message.go` and `message.go` are the same now, and they embed `protobuf.ChatMessage`
* Use `SendChatMessage` for sending chat messages, this is basically the old `Send` but a bit more flexible so we can send different message types (stickers,commands), and not just text.
* Remove dedup from services/shhext. Because now we process in status-protocol, dedup makes less sense, as those messages are going to be processed anyway, so removing for now, we can re-evaluate if bringing it to status-go or not.
* Change the various retrieveX method to a single one:
`RetrieveAll` will be processing those messages that it can process (Currently only `Message`), and return the rest in `RawMessages` (still transit). The format for the response is:
`Chats`: -> The chats updated by receiving the message
`Messages`: -> The messages retrieved (already matched to a chat)
`Contacts`: -> The contacts updated by the messages
`RawMessages` -> Anything else that can't be parsed, eventually as we move everything to status-protocol-go this will go away.
2019-12-05 16:25:34 +00:00
|
|
|
return nil, errors.Wrap(err, "failed to wrap message")
|
2019-09-02 09:29:06 +00:00
|
|
|
}
|
Move to protobuf for Message type (#1706)
* Use a single Message type `v1/message.go` and `message.go` are the same now, and they embed `protobuf.ChatMessage`
* Use `SendChatMessage` for sending chat messages, this is basically the old `Send` but a bit more flexible so we can send different message types (stickers,commands), and not just text.
* Remove dedup from services/shhext. Because now we process in status-protocol, dedup makes less sense, as those messages are going to be processed anyway, so removing for now, we can re-evaluate if bringing it to status-go or not.
* Change the various retrieveX method to a single one:
`RetrieveAll` will be processing those messages that it can process (Currently only `Message`), and return the rest in `RawMessages` (still transit). The format for the response is:
`Chats`: -> The chats updated by receiving the message
`Messages`: -> The messages retrieved (already matched to a chat)
`Contacts`: -> The contacts updated by the messages
`RawMessages` -> Anything else that can't be parsed, eventually as we move everything to status-protocol-go this will go away.
2019-12-05 16:25:34 +00:00
|
|
|
return wrappedMessage, nil
|
2019-09-02 09:29:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (p *messageProcessor) addToDataSync(publicKey *ecdsa.PublicKey, message []byte) error {
|
|
|
|
groupID := datasync.ToOneToOneGroupID(&p.identity.PublicKey, publicKey)
|
|
|
|
peerID := datasyncpeer.PublicKeyToPeerID(*publicKey)
|
|
|
|
exist, err := p.datasync.IsPeerInGroup(groupID, peerID)
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "failed to check if peer is in group")
|
|
|
|
}
|
|
|
|
if !exist {
|
|
|
|
if err := p.datasync.AddPeer(groupID, peerID); err != nil {
|
|
|
|
return errors.Wrap(err, "failed to add peer")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
_, err = p.datasync.AppendMessage(groupID, message)
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "failed to append message to datasync")
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// sendDataSync sends a message scheduled by the data sync layer.
|
2019-09-26 07:01:17 +00:00
|
|
|
// Data Sync layer calls this method "dispatch" function.
|
2019-09-02 09:29:06 +00:00
|
|
|
func (p *messageProcessor) sendDataSync(ctx context.Context, publicKey *ecdsa.PublicKey, encodedMessage []byte, payload *datasyncproto.Payload) error {
|
|
|
|
messageIDs := make([][]byte, 0, len(payload.Messages))
|
|
|
|
for _, payload := range payload.Messages {
|
2019-11-21 16:19:22 +00:00
|
|
|
messageIDs = append(messageIDs, v1protocol.MessageID(&p.identity.PublicKey, payload.Body))
|
2019-09-02 09:29:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
messageSpec, err := p.protocol.BuildDirectMessage(p.identity, publicKey, encodedMessage)
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "failed to encrypt message")
|
|
|
|
}
|
|
|
|
|
|
|
|
hash, newMessage, err := p.sendMessageSpec(ctx, publicKey, messageSpec)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-10-09 14:22:53 +00:00
|
|
|
p.transport.Track(messageIDs, hash, newMessage)
|
2019-09-02 09:29:06 +00:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-09-26 07:01:17 +00:00
|
|
|
// sendMessageSpec analyses the spec properties and selects a proper transport method.
|
2019-11-23 17:57:05 +00:00
|
|
|
func (p *messageProcessor) sendMessageSpec(ctx context.Context, publicKey *ecdsa.PublicKey, messageSpec *encryption.ProtocolMessageSpec) ([]byte, *types.NewMessage, error) {
|
2019-09-02 09:29:06 +00:00
|
|
|
newMessage, err := messageSpecToWhisper(messageSpec)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
logger := p.logger.With(zap.String("site", "sendMessageSpec"))
|
|
|
|
|
|
|
|
var hash []byte
|
|
|
|
|
|
|
|
switch {
|
|
|
|
case messageSpec.SharedSecret != nil:
|
|
|
|
logger.Debug("sending using shared secret")
|
2019-10-09 14:22:53 +00:00
|
|
|
hash, err = p.transport.SendPrivateWithSharedSecret(ctx, newMessage, publicKey, messageSpec.SharedSecret)
|
Move to protobuf for Message type (#1706)
* Use a single Message type `v1/message.go` and `message.go` are the same now, and they embed `protobuf.ChatMessage`
* Use `SendChatMessage` for sending chat messages, this is basically the old `Send` but a bit more flexible so we can send different message types (stickers,commands), and not just text.
* Remove dedup from services/shhext. Because now we process in status-protocol, dedup makes less sense, as those messages are going to be processed anyway, so removing for now, we can re-evaluate if bringing it to status-go or not.
* Change the various retrieveX method to a single one:
`RetrieveAll` will be processing those messages that it can process (Currently only `Message`), and return the rest in `RawMessages` (still transit). The format for the response is:
`Chats`: -> The chats updated by receiving the message
`Messages`: -> The messages retrieved (already matched to a chat)
`Contacts`: -> The contacts updated by the messages
`RawMessages` -> Anything else that can't be parsed, eventually as we move everything to status-protocol-go this will go away.
2019-12-05 16:25:34 +00:00
|
|
|
default:
|
2019-09-02 09:29:06 +00:00
|
|
|
logger.Debug("sending partitioned topic")
|
2019-10-09 14:22:53 +00:00
|
|
|
hash, err = p.transport.SendPrivateWithPartitioned(ctx, newMessage, publicKey)
|
2019-09-02 09:29:06 +00:00
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
2019-10-09 14:22:53 +00:00
|
|
|
return hash, newMessage, nil
|
2019-09-02 09:29:06 +00:00
|
|
|
}
|
|
|
|
|
2019-11-23 17:57:05 +00:00
|
|
|
func messageSpecToWhisper(spec *encryption.ProtocolMessageSpec) (*types.NewMessage, error) {
|
|
|
|
var newMessage *types.NewMessage
|
2019-09-02 09:29:06 +00:00
|
|
|
|
|
|
|
payload, err := proto.Marshal(spec.Message)
|
|
|
|
if err != nil {
|
|
|
|
return newMessage, err
|
|
|
|
}
|
|
|
|
|
2019-11-23 17:57:05 +00:00
|
|
|
newMessage = &types.NewMessage{
|
2019-09-02 09:29:06 +00:00
|
|
|
TTL: whisperTTL,
|
|
|
|
Payload: payload,
|
|
|
|
PowTarget: whisperPoW,
|
|
|
|
PowTime: whisperPoWTime,
|
|
|
|
}
|
|
|
|
return newMessage, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// isPubKeyEqual checks that two public keys are equal
|
|
|
|
func isPubKeyEqual(a, b *ecdsa.PublicKey) bool {
|
|
|
|
// the curve is always the same, just compare the points
|
|
|
|
return a.X.Cmp(b.X) == 0 && a.Y.Cmp(b.Y) == 0
|
|
|
|
}
|