mirror of
https://github.com/logos-messaging/go-libp2p-pubsub.git
synced 2026-01-04 05:43:06 +00:00
fix(BatchPublishing): Make topic.AddToBatch threadsafe (#622)
topic.Publish is already thread safe. topic.AddToBatch should strive to follow similar semantics. Looking at how this would integrate with Prysm, they use separate goroutines per message they'd like to batch.
This commit is contained in:
parent
3f89e4331c
commit
fedbccc0c6
@ -3682,6 +3682,9 @@ func BenchmarkRoundRobinMessageIDScheduler(b *testing.B) {
|
||||
}
|
||||
|
||||
func TestMessageBatchPublish(t *testing.T) {
|
||||
concurrentAdds := []bool{false, true}
|
||||
for _, concurrentAdd := range concurrentAdds {
|
||||
t.Run(fmt.Sprintf("WithConcurrentAdd=%v", concurrentAdd), func(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
hosts := getDefaultHosts(t, 20)
|
||||
@ -3718,13 +3721,27 @@ func TestMessageBatchPublish(t *testing.T) {
|
||||
time.Sleep(time.Second * 2)
|
||||
|
||||
var batch MessageBatch
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < numMessages; i++ {
|
||||
msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i))
|
||||
if concurrentAdd {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
err := topics[0].AddToBatch(ctx, &batch, msg)
|
||||
if err != nil {
|
||||
t.Log(err)
|
||||
t.Fail()
|
||||
}
|
||||
}()
|
||||
} else {
|
||||
err := topics[0].AddToBatch(ctx, &batch, msg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
err := psubs[0].PublishBatch(&batch)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -3743,6 +3760,8 @@ func TestMessageBatchPublish(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPublishDuplicateMessage(t *testing.T) {
|
||||
|
||||
@ -2,6 +2,7 @@ package pubsub
|
||||
|
||||
import (
|
||||
"iter"
|
||||
"sync"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
@ -10,9 +11,24 @@ import (
|
||||
// once. This allows the Scheduler to define an order for outgoing RPCs.
|
||||
// This helps bandwidth constrained peers.
|
||||
type MessageBatch struct {
|
||||
mu sync.Mutex
|
||||
messages []*Message
|
||||
}
|
||||
|
||||
func (mb *MessageBatch) add(msg *Message) {
|
||||
mb.mu.Lock()
|
||||
defer mb.mu.Unlock()
|
||||
mb.messages = append(mb.messages, msg)
|
||||
}
|
||||
|
||||
func (mb *MessageBatch) take() []*Message {
|
||||
mb.mu.Lock()
|
||||
defer mb.mu.Unlock()
|
||||
messages := mb.messages
|
||||
mb.messages = nil
|
||||
return messages
|
||||
}
|
||||
|
||||
type messageBatchAndPublishOptions struct {
|
||||
messages []*Message
|
||||
opts *BatchPublishOptions
|
||||
|
||||
@ -1600,12 +1600,10 @@ func (p *PubSub) PublishBatch(batch *MessageBatch, opts ...BatchPubOpt) error {
|
||||
setDefaultBatchPublishOptions(publishOptions)
|
||||
|
||||
p.sendMessageBatch <- messageBatchAndPublishOptions{
|
||||
messages: batch.messages,
|
||||
messages: batch.take(),
|
||||
opts: publishOptions,
|
||||
}
|
||||
|
||||
// Clear the batch's messages in case a user reuses the same batch object
|
||||
batch.messages = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user