mirror of
https://github.com/status-im/status-go.git
synced 2025-02-19 18:28:18 +00:00
feat: add configurable throttling mechanism for importing msgs
part of: status-im/status-desktop#10815
This commit is contained in:
parent
449314a4dc
commit
15d2b4fe80
2
go.mod
2
go.mod
@ -88,6 +88,7 @@ require (
|
|||||||
go.uber.org/multierr v1.11.0
|
go.uber.org/multierr v1.11.0
|
||||||
golang.org/x/exp v0.0.0-20230321023759-10a507213a29
|
golang.org/x/exp v0.0.0-20230321023759-10a507213a29
|
||||||
golang.org/x/net v0.8.0
|
golang.org/x/net v0.8.0
|
||||||
|
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
@ -267,7 +268,6 @@ require (
|
|||||||
golang.org/x/sys v0.7.0 // indirect
|
golang.org/x/sys v0.7.0 // indirect
|
||||||
golang.org/x/term v0.6.0 // indirect
|
golang.org/x/term v0.6.0 // indirect
|
||||||
golang.org/x/text v0.8.0 // indirect
|
golang.org/x/text v0.8.0 // indirect
|
||||||
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect
|
|
||||||
golang.org/x/tools v0.7.0 // indirect
|
golang.org/x/tools v0.7.0 // indirect
|
||||||
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect
|
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect
|
||||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||||
|
@ -3234,6 +3234,7 @@ func (m *Manager) ExtractMessagesFromHistoryArchive(communityID types.HexBytes,
|
|||||||
}
|
}
|
||||||
|
|
||||||
data := make([]byte, metadata.Size-metadata.Padding)
|
data := make([]byte, metadata.Size-metadata.Padding)
|
||||||
|
m.LogStdout("loading history archive data into memory", zap.Float64("data_size_MB", float64(metadata.Size-metadata.Padding)/1024.0/1024.0))
|
||||||
_, err = dataFile.Read(data)
|
_, err = dataFile.Read(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
m.LogStdout("failed failed to read archive data", zap.Error(err))
|
m.LogStdout("failed failed to read archive data", zap.Error(err))
|
||||||
|
@ -66,9 +66,6 @@ import (
|
|||||||
"github.com/status-im/status-go/telemetry"
|
"github.com/status-im/status-go/telemetry"
|
||||||
)
|
)
|
||||||
|
|
||||||
const maxChunkSizeMessages = 1000
|
|
||||||
const maxChunkSizeBytes = 1500000
|
|
||||||
|
|
||||||
// todo: kozieiev: get rid of wakutransp word
|
// todo: kozieiev: get rid of wakutransp word
|
||||||
type chatContext string
|
type chatContext string
|
||||||
|
|
||||||
@ -3280,10 +3277,8 @@ func (m *Messenger) handleImportedMessages(messagesToHandle map[transport.Filter
|
|||||||
}
|
}
|
||||||
|
|
||||||
importMessagesToSave := messageState.Response.DiscordMessages()
|
importMessagesToSave := messageState.Response.DiscordMessages()
|
||||||
importMessagesCount := len(importMessagesToSave)
|
if len(importMessagesToSave) > 0 {
|
||||||
if importMessagesCount > 0 {
|
m.communitiesManager.LogStdout(fmt.Sprintf("saving %d discord messages", len(importMessagesToSave)))
|
||||||
if importMessagesCount <= maxChunkSizeMessages {
|
|
||||||
m.communitiesManager.LogStdout(fmt.Sprintf("saving %d discord messages", importMessagesCount))
|
|
||||||
m.handleImportMessagesMutex.Lock()
|
m.handleImportMessagesMutex.Lock()
|
||||||
err := m.persistence.SaveDiscordMessages(importMessagesToSave)
|
err := m.persistence.SaveDiscordMessages(importMessagesToSave)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -3292,58 +3287,24 @@ func (m *Messenger) handleImportedMessages(messagesToHandle map[transport.Filter
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
m.handleImportMessagesMutex.Unlock()
|
m.handleImportMessagesMutex.Unlock()
|
||||||
} else {
|
|
||||||
// We need to process the messages in chunks otherwise we'll
|
|
||||||
// block the database for too long
|
|
||||||
chunks := chunkSlice(importMessagesToSave, maxChunkSizeMessages)
|
|
||||||
chunksCount := len(chunks)
|
|
||||||
for i, msgs := range chunks {
|
|
||||||
m.communitiesManager.LogStdout(fmt.Sprintf("saving %d/%d chunk with %d discord messages", i+1, chunksCount, len(msgs)))
|
|
||||||
// We can't defer Unlock here because we want to
|
|
||||||
// unlock after every iteration to leave room for
|
|
||||||
// other processes to access the database
|
|
||||||
m.handleImportMessagesMutex.Lock()
|
|
||||||
err := m.persistence.SaveDiscordMessages(msgs)
|
|
||||||
if err != nil {
|
|
||||||
m.communitiesManager.LogStdout(fmt.Sprintf("failed to save discord message chunk %d of %d", i+1, chunksCount), zap.Error(err))
|
|
||||||
m.handleImportMessagesMutex.Unlock()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
m.handleImportMessagesMutex.Unlock()
|
|
||||||
// We slow down the saving of message chunks to keep the database responsive
|
|
||||||
if i < chunksCount-1 {
|
|
||||||
time.Sleep(2 * time.Second)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
messageAttachmentsToSave := messageState.Response.DiscordMessageAttachments()
|
messageAttachmentsToSave := messageState.Response.DiscordMessageAttachments()
|
||||||
if len(messageAttachmentsToSave) > 0 {
|
if len(messageAttachmentsToSave) > 0 {
|
||||||
chunks := chunkAttachmentsByByteSize(messageAttachmentsToSave, maxChunkSizeBytes)
|
m.communitiesManager.LogStdout(fmt.Sprintf("saving %d discord message attachments", len(messageAttachmentsToSave)))
|
||||||
chunksCount := len(chunks)
|
|
||||||
for i, attachments := range chunks {
|
|
||||||
m.communitiesManager.LogStdout(fmt.Sprintf("saving %d/%d chunk with %d discord message attachments", i+1, chunksCount, len(attachments)))
|
|
||||||
m.handleImportMessagesMutex.Lock()
|
m.handleImportMessagesMutex.Lock()
|
||||||
err := m.persistence.SaveDiscordMessageAttachments(attachments)
|
err := m.persistence.SaveDiscordMessageAttachments(messageAttachmentsToSave)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
m.communitiesManager.LogStdout(fmt.Sprintf("failed to save discord message attachments chunk %d of %d", i+1, chunksCount), zap.Error(err))
|
m.communitiesManager.LogStdout("failed to save discord message attachments", zap.Error(err))
|
||||||
m.handleImportMessagesMutex.Unlock()
|
m.handleImportMessagesMutex.Unlock()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// We slow down the saving of message chunks to keep the database responsive
|
|
||||||
m.handleImportMessagesMutex.Unlock()
|
m.handleImportMessagesMutex.Unlock()
|
||||||
if i < chunksCount-1 {
|
|
||||||
time.Sleep(2 * time.Second)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
messagesToSave := messageState.Response.Messages()
|
messagesToSave := messageState.Response.Messages()
|
||||||
messagesCount := len(messagesToSave)
|
if len(messagesToSave) > 0 {
|
||||||
if messagesCount > 0 {
|
m.communitiesManager.LogStdout(fmt.Sprintf("saving %d app messages", len(messagesToSave)))
|
||||||
if messagesCount <= maxChunkSizeMessages {
|
|
||||||
m.communitiesManager.LogStdout(fmt.Sprintf("saving %d app messages", messagesCount))
|
|
||||||
m.handleMessagesMutex.Lock()
|
m.handleMessagesMutex.Lock()
|
||||||
err := m.SaveMessages(messagesToSave)
|
err := m.SaveMessages(messagesToSave)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -3351,25 +3312,8 @@ func (m *Messenger) handleImportedMessages(messagesToHandle map[transport.Filter
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
m.handleMessagesMutex.Unlock()
|
m.handleMessagesMutex.Unlock()
|
||||||
} else {
|
|
||||||
chunks := chunkSlice(messagesToSave, maxChunkSizeMessages)
|
|
||||||
chunksCount := len(chunks)
|
|
||||||
for i, msgs := range chunks {
|
|
||||||
m.communitiesManager.LogStdout(fmt.Sprintf("saving %d/%d chunk with %d app messages", i+1, chunksCount, len(msgs)))
|
|
||||||
m.handleMessagesMutex.Lock()
|
|
||||||
err := m.SaveMessages(msgs)
|
|
||||||
if err != nil {
|
|
||||||
m.handleMessagesMutex.Unlock()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
m.handleMessagesMutex.Unlock()
|
|
||||||
// We slow down the saving of message chunks to keep the database responsive
|
|
||||||
if i < chunksCount-1 {
|
|
||||||
time.Sleep(2 * time.Second)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save chats if they were modified
|
// Save chats if they were modified
|
||||||
if len(messageState.Response.chats) > 0 {
|
if len(messageState.Response.chats) > 0 {
|
||||||
err := m.saveChats(messageState.Response.Chats())
|
err := m.saveChats(messageState.Response.Chats())
|
||||||
@ -4478,29 +4422,11 @@ func (m *Messenger) saveDataAndPrepareResponse(messageState *ReceivedMessageStat
|
|||||||
}
|
}
|
||||||
|
|
||||||
messagesToSave := messageState.Response.Messages()
|
messagesToSave := messageState.Response.Messages()
|
||||||
messagesCount := len(messagesToSave)
|
if len(messagesToSave) > 0 {
|
||||||
if messagesCount > 0 {
|
|
||||||
if messagesCount <= maxChunkSizeMessages {
|
|
||||||
err = m.SaveMessages(messagesToSave)
|
err = m.SaveMessages(messagesToSave)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
messageChunks := chunkSlice(messagesToSave, maxChunkSizeMessages)
|
|
||||||
chunksCount := len(messageChunks)
|
|
||||||
for i, msgs := range messageChunks {
|
|
||||||
err := m.SaveMessages(msgs)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// We slow down the saving of message chunks to keep the database responsive
|
|
||||||
// this is important when messages from history archives are handled,
|
|
||||||
// which could result in handling several thousand messages per archive
|
|
||||||
if i < chunksCount-1 {
|
|
||||||
time.Sleep(2 * time.Second)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, emojiReaction := range messageState.EmojiReactions {
|
for _, emojiReaction := range messageState.EmojiReactions {
|
||||||
@ -6292,45 +6218,6 @@ func (m *Messenger) handleSyncVerificationRequest(state *ReceivedMessageState, m
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func chunkSlice[T comparable](slice []T, chunkSize int) [][]T {
|
|
||||||
var chunks [][]T
|
|
||||||
for i := 0; i < len(slice); i += chunkSize {
|
|
||||||
end := i + chunkSize
|
|
||||||
|
|
||||||
// necessary check to avoid slicing beyond
|
|
||||||
// slice capacity
|
|
||||||
if end > len(slice) {
|
|
||||||
end = len(slice)
|
|
||||||
}
|
|
||||||
|
|
||||||
chunks = append(chunks, slice[i:end])
|
|
||||||
}
|
|
||||||
|
|
||||||
return chunks
|
|
||||||
}
|
|
||||||
|
|
||||||
func chunkAttachmentsByByteSize(slice []*protobuf.DiscordMessageAttachment, maxFileSizeBytes uint64) [][]*protobuf.DiscordMessageAttachment {
|
|
||||||
var chunks [][]*protobuf.DiscordMessageAttachment
|
|
||||||
|
|
||||||
currentChunkSize := uint64(0)
|
|
||||||
currentChunk := make([]*protobuf.DiscordMessageAttachment, 0)
|
|
||||||
|
|
||||||
for i, attachment := range slice {
|
|
||||||
payloadBytes := attachment.GetFileSizeBytes()
|
|
||||||
if currentChunkSize+payloadBytes > maxFileSizeBytes && len(currentChunk) > 0 {
|
|
||||||
chunks = append(chunks, currentChunk)
|
|
||||||
currentChunk = make([]*protobuf.DiscordMessageAttachment, 0)
|
|
||||||
currentChunkSize = uint64(0)
|
|
||||||
}
|
|
||||||
currentChunk = append(currentChunk, attachment)
|
|
||||||
currentChunkSize = currentChunkSize + payloadBytes
|
|
||||||
if i == len(slice)-1 {
|
|
||||||
chunks = append(chunks, currentChunk)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return chunks
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Messenger) ImageServerURL() string {
|
func (m *Messenger) ImageServerURL() string {
|
||||||
return m.httpServer.MakeImageServerURL()
|
return m.httpServer.MakeImageServerURL()
|
||||||
}
|
}
|
||||||
|
@ -12,6 +12,8 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/time/rate"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
"github.com/ethereum/go-ethereum/ethclient"
|
"github.com/ethereum/go-ethereum/ethclient"
|
||||||
|
|
||||||
@ -47,6 +49,19 @@ var updateActiveMembersInterval = 24 * time.Hour
|
|||||||
|
|
||||||
const discordTimestampLayout = "2006-01-02T15:04:05+00:00"
|
const discordTimestampLayout = "2006-01-02T15:04:05+00:00"
|
||||||
|
|
||||||
|
var importRateLimiter = rate.NewLimiter(rate.Every(importSlowRate), 1)
|
||||||
|
|
||||||
|
const (
|
||||||
|
importSlowRate = time.Second / 1
|
||||||
|
importFastRate = time.Second / 100
|
||||||
|
importMessagesChunkSize = 10
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
maxChunkSizeMessages = 1000
|
||||||
|
maxChunkSizeBytes = 1500000
|
||||||
|
)
|
||||||
|
|
||||||
func (m *Messenger) publishOrg(org *communities.Community) error {
|
func (m *Messenger) publishOrg(org *communities.Community) error {
|
||||||
m.logger.Debug("publishing org", zap.String("org-id", org.IDString()), zap.Any("org", org))
|
m.logger.Debug("publishing org", zap.String("org-id", org.IDString()), zap.Any("org", org))
|
||||||
payload, err := org.MarshaledDescription()
|
payload, err := org.MarshaledDescription()
|
||||||
@ -2309,14 +2324,28 @@ func (m *Messenger) resumeHistoryArchivesImport(communityID types.HexBytes) erro
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *Messenger) SpeedupArchivesImport() {
|
||||||
|
importRateLimiter.SetLimit(rate.Every(importFastRate))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Messenger) SlowdownArchivesImport() {
|
||||||
|
importRateLimiter.SetLimit(rate.Every(importSlowRate))
|
||||||
|
}
|
||||||
|
|
||||||
func (m *Messenger) importHistoryArchives(communityID types.HexBytes, cancel chan struct{}) error {
|
func (m *Messenger) importHistoryArchives(communityID types.HexBytes, cancel chan struct{}) error {
|
||||||
importTicker := time.NewTicker(100 * time.Millisecond)
|
importTicker := time.NewTicker(100 * time.Millisecond)
|
||||||
defer importTicker.Stop()
|
defer importTicker.Stop()
|
||||||
|
|
||||||
|
ctx, cancelFunc := context.WithCancel(context.Background())
|
||||||
|
go func() {
|
||||||
|
<-cancel
|
||||||
|
cancelFunc()
|
||||||
|
}()
|
||||||
|
|
||||||
importMessageArchivesLoop:
|
importMessageArchivesLoop:
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-cancel:
|
case <-ctx.Done():
|
||||||
m.communitiesManager.LogStdout("interrupted importing history archive messages")
|
m.communitiesManager.LogStdout("interrupted importing history archive messages")
|
||||||
return nil
|
return nil
|
||||||
case <-importTicker.C:
|
case <-importTicker.C:
|
||||||
@ -2345,16 +2374,19 @@ importMessageArchivesLoop:
|
|||||||
}
|
}
|
||||||
|
|
||||||
m.config.messengerSignalsHandler.ImportingHistoryArchiveMessages(types.EncodeHex(communityID))
|
m.config.messengerSignalsHandler.ImportingHistoryArchiveMessages(types.EncodeHex(communityID))
|
||||||
response, err := m.handleArchiveMessages(archiveMessages, communityID)
|
|
||||||
if err != nil {
|
for _, messagesChunk := range chunkSlice(archiveMessages, importMessagesChunkSize) {
|
||||||
m.communitiesManager.LogStdout("failed to handle archive messages", zap.Error(err))
|
if err := importRateLimiter.Wait(ctx); err != nil {
|
||||||
continue
|
if !errors.Is(err, context.Canceled) {
|
||||||
|
m.communitiesManager.LogStdout("rate limiter error when handling archive messages", zap.Error(err))
|
||||||
|
}
|
||||||
|
continue importMessageArchivesLoop
|
||||||
}
|
}
|
||||||
|
|
||||||
err = m.communitiesManager.SetMessageArchiveIDImported(communityID, downloadedArchiveID, true)
|
response, err := m.handleArchiveMessages(messagesChunk, communityID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
m.communitiesManager.LogStdout("failed to mark history message archive as imported", zap.Error(err))
|
m.communitiesManager.LogStdout("failed to handle archive messages", zap.Error(err))
|
||||||
continue
|
continue importMessageArchivesLoop
|
||||||
}
|
}
|
||||||
|
|
||||||
if !response.IsEmpty() {
|
if !response.IsEmpty() {
|
||||||
@ -2364,6 +2396,13 @@ importMessageArchivesLoop:
|
|||||||
localnotifications.PushMessages(notifications)
|
localnotifications.PushMessages(notifications)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = m.communitiesManager.SetMessageArchiveIDImported(communityID, downloadedArchiveID, true)
|
||||||
|
if err != nil {
|
||||||
|
m.communitiesManager.LogStdout("failed to mark history message archive as imported", zap.Error(err))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -3640,3 +3679,42 @@ func (m *Messenger) CheckPermissionsToJoinCommunity(request *requests.CheckPermi
|
|||||||
|
|
||||||
return m.communitiesManager.CheckPermissionToJoin(request.CommunityID, addresses)
|
return m.communitiesManager.CheckPermissionToJoin(request.CommunityID, addresses)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func chunkSlice[T comparable](slice []T, chunkSize int) [][]T {
|
||||||
|
var chunks [][]T
|
||||||
|
for i := 0; i < len(slice); i += chunkSize {
|
||||||
|
end := i + chunkSize
|
||||||
|
|
||||||
|
// necessary check to avoid slicing beyond
|
||||||
|
// slice capacity
|
||||||
|
if end > len(slice) {
|
||||||
|
end = len(slice)
|
||||||
|
}
|
||||||
|
|
||||||
|
chunks = append(chunks, slice[i:end])
|
||||||
|
}
|
||||||
|
|
||||||
|
return chunks
|
||||||
|
}
|
||||||
|
|
||||||
|
func chunkAttachmentsByByteSize(slice []*protobuf.DiscordMessageAttachment, maxFileSizeBytes uint64) [][]*protobuf.DiscordMessageAttachment {
|
||||||
|
var chunks [][]*protobuf.DiscordMessageAttachment
|
||||||
|
|
||||||
|
currentChunkSize := uint64(0)
|
||||||
|
currentChunk := make([]*protobuf.DiscordMessageAttachment, 0)
|
||||||
|
|
||||||
|
for i, attachment := range slice {
|
||||||
|
payloadBytes := attachment.GetFileSizeBytes()
|
||||||
|
if currentChunkSize+payloadBytes > maxFileSizeBytes && len(currentChunk) > 0 {
|
||||||
|
chunks = append(chunks, currentChunk)
|
||||||
|
currentChunk = make([]*protobuf.DiscordMessageAttachment, 0)
|
||||||
|
currentChunkSize = uint64(0)
|
||||||
|
}
|
||||||
|
currentChunk = append(currentChunk, attachment)
|
||||||
|
currentChunkSize = currentChunkSize + payloadBytes
|
||||||
|
if i == len(slice)-1 {
|
||||||
|
chunks = append(chunks, currentChunk)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return chunks
|
||||||
|
}
|
||||||
|
@ -440,6 +440,16 @@ func (api *PublicAPI) ImportCommunity(ctx context.Context, hexPrivateKey string)
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Speeds up importing messages from archives
|
||||||
|
func (api *PublicAPI) SpeedupArchivesImport(ctx context.Context) {
|
||||||
|
api.service.messenger.SpeedupArchivesImport()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Slows down importing messages from archives
|
||||||
|
func (api *PublicAPI) SlowdownArchivesImport(ctx context.Context) {
|
||||||
|
api.service.messenger.SlowdownArchivesImport()
|
||||||
|
}
|
||||||
|
|
||||||
// CreateCommunityChat creates a community chat in the given community
|
// CreateCommunityChat creates a community chat in the given community
|
||||||
func (api *PublicAPI) CreateCommunityChat(communityID types.HexBytes, c *protobuf.CommunityChat) (*protocol.MessengerResponse, error) {
|
func (api *PublicAPI) CreateCommunityChat(communityID types.HexBytes, c *protobuf.CommunityChat) (*protocol.MessengerResponse, error) {
|
||||||
return api.service.messenger.CreateCommunityChat(communityID, c)
|
return api.service.messenger.CreateCommunityChat(communityID, c)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user