mirror of https://github.com/status-im/go-waku.git
chore: limit the maximum number of message hashes to request per query (#1190)
This commit is contained in:
parent
92d62a7c38
commit
159635e21b
|
@ -20,6 +20,7 @@ import (
|
|||
)
|
||||
|
||||
const maxContentTopicsPerRequest = 10
|
||||
const maxMsgHashesPerRequest = 50
|
||||
|
||||
// MessageTracker should keep track of messages it has seen before and
|
||||
// provide a way to determine whether a message exists or not. This
|
||||
|
@ -247,14 +248,26 @@ func (m *MissingMessageVerifier) fetchMessagesBatch(c chan<- *protocol.Envelope,
|
|||
return nil
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
// Split into batches
|
||||
for i := 0; i < len(missingHashes); i += maxMsgHashesPerRequest {
|
||||
j := i + maxMsgHashesPerRequest
|
||||
if j > len(missingHashes) {
|
||||
j = len(missingHashes)
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func(messageHashes []pb.MessageHash) {
|
||||
defer wg.Wait()
|
||||
|
||||
result, err = m.storeQueryWithRetry(interest.ctx, func(ctx context.Context) (*store.Result, error) {
|
||||
return m.store.QueryByHash(ctx, missingHashes, store.WithPeer(interest.peerID), store.WithPaging(false, 100))
|
||||
return m.store.QueryByHash(ctx, messageHashes, store.WithPeer(interest.peerID), store.WithPaging(false, maxMsgHashesPerRequest))
|
||||
}, logger, "retrieving missing messages")
|
||||
if err != nil {
|
||||
if !errors.Is(err, context.Canceled) {
|
||||
logger.Error("storenode not available", zap.Error(err))
|
||||
}
|
||||
return err
|
||||
return
|
||||
}
|
||||
|
||||
for !result.IsComplete() {
|
||||
|
@ -276,9 +289,14 @@ func (m *MissingMessageVerifier) fetchMessagesBatch(c chan<- *protocol.Envelope,
|
|||
if !errors.Is(err, context.Canceled) {
|
||||
logger.Error("storenode not available", zap.Error(err))
|
||||
}
|
||||
return err
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
}(missingHashes[i:j])
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@ import (
|
|||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const DefaultMaxHashQueryLength = 100
|
||||
const DefaultMaxHashQueryLength = 50
|
||||
const DefaultHashQueryInterval = 3 * time.Second
|
||||
const DefaultMessageSentPeriod = 3 // in seconds
|
||||
const DefaultMessageExpiredPerid = 10 // in seconds
|
||||
|
@ -216,7 +216,7 @@ func (m *MessageSentCheck) messageHashBasedQuery(ctx context.Context, hashes []c
|
|||
messageHashes[i] = pb.ToMessageHash(hash.Bytes())
|
||||
}
|
||||
|
||||
m.logger.Debug("store.queryByHash request", zap.String("requestID", hexutil.Encode(requestID)), zap.Stringer("peerID", selectedPeer), zap.Any("messageHashes", messageHashes))
|
||||
m.logger.Debug("store.queryByHash request", zap.String("requestID", hexutil.Encode(requestID)), zap.Stringer("peerID", selectedPeer), zap.Stringers("messageHashes", messageHashes))
|
||||
|
||||
result, err := m.store.QueryByHash(ctx, messageHashes, opts...)
|
||||
if err != nil {
|
||||
|
@ -248,8 +248,8 @@ func (m *MessageSentCheck) messageHashBasedQuery(ctx context.Context, hashes []c
|
|||
}
|
||||
}
|
||||
|
||||
m.logger.Debug("ack message hashes", zap.Any("ackHashes", ackHashes))
|
||||
m.logger.Debug("missed message hashes", zap.Any("missedHashes", missedHashes))
|
||||
m.logger.Debug("ack message hashes", zap.Stringers("ackHashes", ackHashes))
|
||||
m.logger.Debug("missed message hashes", zap.Stringers("missedHashes", missedHashes))
|
||||
|
||||
return append(ackHashes, missedHashes...)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue