From b8a6a868adce87101b3f61cb1b1a644db627c59f Mon Sep 17 00:00:00 2001 From: web3-bot Date: Mon, 26 Aug 2024 15:58:30 +0100 Subject: [PATCH 01/27] ci: uci/update-go (#577) This PR was created automatically by the @web3-bot as a part of the [Unified CI](https://github.com/ipdxco/unified-github-workflows) project. --- compat/compat.pb.go | 3 ++- go.mod | 2 +- pb/rpc.pb.go | 3 ++- pb/trace.pb.go | 3 ++- 4 files changed, 7 insertions(+), 4 deletions(-) diff --git a/compat/compat.pb.go b/compat/compat.pb.go index 607b78a..57a00dd 100644 --- a/compat/compat.pb.go +++ b/compat/compat.pb.go @@ -5,10 +5,11 @@ package compat_pb import ( fmt "fmt" - proto "github.com/gogo/protobuf/proto" io "io" math "math" math_bits "math/bits" + + proto "github.com/gogo/protobuf/proto" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/go.mod b/go.mod index 652266e..437017c 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/libp2p/go-libp2p-pubsub -go 1.21 +go 1.22 require ( github.com/benbjohnson/clock v1.3.5 diff --git a/pb/rpc.pb.go b/pb/rpc.pb.go index 151cb44..213cdcc 100644 --- a/pb/rpc.pb.go +++ b/pb/rpc.pb.go @@ -5,10 +5,11 @@ package pubsub_pb import ( fmt "fmt" - proto "github.com/gogo/protobuf/proto" io "io" math "math" math_bits "math/bits" + + proto "github.com/gogo/protobuf/proto" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/pb/trace.pb.go b/pb/trace.pb.go index 04f1ec1..9361c39 100644 --- a/pb/trace.pb.go +++ b/pb/trace.pb.go @@ -5,10 +5,11 @@ package pubsub_pb import ( fmt "fmt" - proto "github.com/gogo/protobuf/proto" io "io" math "math" math_bits "math/bits" + + proto "github.com/gogo/protobuf/proto" ) // Reference imports to suppress errors if they are not otherwise used. From 4c139741882d3ef6a917e6f3ea403832885b7472 Mon Sep 17 00:00:00 2001 From: Andrew Gillis <11790789+gammazero@users.noreply.github.com> Date: Mon, 9 Sep 2024 08:42:16 -0700 Subject: [PATCH 02/27] Update go-libp2p to latest (#578) - Update to go-libp2p v0.36.3 --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 437017c..3faa411 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/ipfs/go-log/v2 v2.5.1 github.com/libp2p/go-buffer-pool v0.1.0 - github.com/libp2p/go-libp2p v0.36.2 + github.com/libp2p/go-libp2p v0.36.3 github.com/libp2p/go-libp2p-testing v0.12.0 github.com/libp2p/go-msgio v0.3.0 github.com/multiformats/go-multiaddr v0.13.0 diff --git a/go.sum b/go.sum index cb7aa74..71a6bd2 100644 --- a/go.sum +++ b/go.sum @@ -134,8 +134,8 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6 github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= -github.com/libp2p/go-libp2p v0.36.2 h1:BbqRkDaGC3/5xfaJakLV/BrpjlAuYqSB0lRvtzL3B/U= -github.com/libp2p/go-libp2p v0.36.2/go.mod h1:XO3joasRE4Eup8yCTTP/+kX+g92mOgRaadk46LmPhHY= +github.com/libp2p/go-libp2p v0.36.3 h1:NHz30+G7D8Y8YmznrVZZla0ofVANrvBl2c+oARfMeDQ= +github.com/libp2p/go-libp2p v0.36.3/go.mod h1:4Y5vFyCUiJuluEPmpnKYf6WFx5ViKPUYs/ixe9ANFZ8= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= From f71345c1ec0ee4b30cd702bb605927f788ff9f36 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Wed, 25 Sep 2024 02:33:35 -0400 Subject: [PATCH 03/27] Do not format expensive debug messages in non-debug levels in doDropRPC (#580) In high load scenarios when consumer is slow, `doDropRPC` is called often and makes extra unnecessary allocations formatting `log.Debug` message. Fixed by checking log level before running expensive formatting. Before: ``` BenchmarkAllocDoDropRPC-10 13684732 76.28 ns/op 144 B/op 3 allocs/op ``` After: ``` BenchmarkAllocDoDropRPC-10 28140273 42.88 ns/op 112 B/op 1 allocs/op ``` --- go.mod | 2 +- gossipsub.go | 6 +++++- gossipsub_test.go | 8 ++++++++ 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 3faa411..f1358e7 100644 --- a/go.mod +++ b/go.mod @@ -12,6 +12,7 @@ require ( github.com/libp2p/go-msgio v0.3.0 github.com/multiformats/go-multiaddr v0.13.0 github.com/multiformats/go-varint v0.0.7 + go.uber.org/zap v1.27.0 ) require ( @@ -98,7 +99,6 @@ require ( go.uber.org/fx v1.22.2 // indirect go.uber.org/mock v0.4.0 // indirect go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.26.0 // indirect golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect golang.org/x/mod v0.20.0 // indirect diff --git a/gossipsub.go b/gossipsub.go index 117b585..dcc5d19 100644 --- a/gossipsub.go +++ b/gossipsub.go @@ -19,6 +19,8 @@ import ( "github.com/libp2p/go-libp2p/core/protocol" "github.com/libp2p/go-libp2p/core/record" "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem" + + "go.uber.org/zap/zapcore" ) const ( @@ -1334,7 +1336,9 @@ func (gs *GossipSubRouter) sendRPC(p peer.ID, out *RPC, urgent bool) { } func (gs *GossipSubRouter) doDropRPC(rpc *RPC, p peer.ID, reason string) { - log.Debugf("dropping message to peer %s: %s", p, reason) + if log.Level() <= zapcore.DebugLevel { + log.Debugf("dropping message to peer %s: %s", p, reason) + } gs.tracer.DropRPC(rpc, p) // push control messages that need to be retried ctl := rpc.GetControl() diff --git a/gossipsub_test.go b/gossipsub_test.go index 3b45557..d515654 100644 --- a/gossipsub_test.go +++ b/gossipsub_test.go @@ -3175,3 +3175,11 @@ func TestGossipsubIdontwantClear(t *testing.T) { <-ctx.Done() } + +func BenchmarkAllocDoDropRPC(b *testing.B) { + gs := GossipSubRouter{tracer: &pubsubTracer{}} + + for i := 0; i < b.N; i++ { + gs.doDropRPC(&RPC{}, "peerID", "reason") + } +} From c06df2f9a38e9382e644b241adf0e96e5ca00955 Mon Sep 17 00:00:00 2001 From: "Yahya Hassanzadeh, Ph.D." <19204398+yhassanzadeh13@users.noreply.github.com> Date: Fri, 18 Oct 2024 13:28:24 -0700 Subject: [PATCH 04/27] Add Function to Enable Application Layer to Send Direct Control Messages (#562) ### PR Description This PR addresses https://github.com/libp2p/go-libp2p-pubsub/issues/561; i.e., adding a new `SendControl` function to the `GossipSubRouter`. This will allow the application layer to send direct control messages to peers, facilitating finer-grained testing. --- gossipsub.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/gossipsub.go b/gossipsub.go index dcc5d19..222c71a 100644 --- a/gossipsub.go +++ b/gossipsub.go @@ -2121,6 +2121,23 @@ func (gs *GossipSubRouter) WithDefaultTagTracer() Option { return WithRawTracer(gs.tagTracer) } +// SendControl dispatches the given set of control messages to the given peer. +// The control messages are sent as a single RPC, with the given (optional) messages. +// Args: +// +// p: the peer to send the control messages to. +// ctl: the control messages to send. +// msgs: the messages to send in the same RPC (optional). +// The control messages are piggybacked on the messages. +// +// Returns: +// +// nothing. +func (gs *GossipSubRouter) SendControl(p peer.ID, ctl *pb.ControlMessage, msgs ...*pb.Message) { + out := rpcWithControl(msgs, ctl.Ihave, ctl.Iwant, ctl.Graft, ctl.Prune, ctl.Idontwant) + gs.sendRPC(p, out, false) +} + func peerListToMap(peers []peer.ID) map[peer.ID]struct{} { pmap := make(map[peer.ID]struct{}) for _, p := range peers { From 3536508a9d6914cd46b9154e4c050085d9e2821a Mon Sep 17 00:00:00 2001 From: Nishant Das Date: Fri, 27 Dec 2024 02:02:14 +0800 Subject: [PATCH 05/27] Fix the Router's Ability to Prune the Mesh Periodically (#589) When a new peer wants to graft us into their mesh, we check our current mesh size to determine whether we can add any more new peers to it. This is done to prevent our mesh size from being greater than `Dhi` and prevent mesh takeover attacks here: https://github.com/libp2p/go-libp2p-pubsub/blob/c06df2f9a38e9382e644b241adf0e96e5ca00955/gossipsub.go#L943 During every heartbeat we check our mesh size and if it is **greater** than `Dhi` then we will prune our mesh back down to `D`. https://github.com/libp2p/go-libp2p-pubsub/blob/c06df2f9a38e9382e644b241adf0e96e5ca00955/gossipsub.go#L1608 However if you look closely at both lines there is a problematic end result. Since we only stop grafting new peers into our mesh if our current mesh size is **greater than or equal to** `Dhi` and we only prune peers if the current mesh size is greater than `Dhi`. This would result in the mesh being in a state of stasis at `Dhi`. Rather than float between `D` and `Dhi` , the mesh stagnates at `Dhi` . This would end up increasing the target degree of the node to `Dhi` from `D`. This had been observed in ethereum mainnet by recording mesh interactions and message fulfillment from those peers. This PR fixes it by adding an equality check to the conditional so that it can be periodically pruned. The PR also adds a regression test for this particular case. --- gossipsub.go | 2 +- gossipsub_test.go | 47 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+), 1 deletion(-) diff --git a/gossipsub.go b/gossipsub.go index 222c71a..849f2fa 100644 --- a/gossipsub.go +++ b/gossipsub.go @@ -1605,7 +1605,7 @@ func (gs *GossipSubRouter) heartbeat() { } // do we have too many peers? - if len(peers) > gs.params.Dhi { + if len(peers) >= gs.params.Dhi { plst := peerMapToList(peers) // sort by score (but shuffle first for the case we don't use the score) diff --git a/gossipsub_test.go b/gossipsub_test.go index d515654..93edeec 100644 --- a/gossipsub_test.go +++ b/gossipsub_test.go @@ -3176,6 +3176,53 @@ func TestGossipsubIdontwantClear(t *testing.T) { <-ctx.Done() } +func TestGossipsubPruneMeshCorrectly(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + hosts := getDefaultHosts(t, 9) + + msgID := func(pmsg *pb.Message) string { + // silly content-based test message-ID: just use the data as whole + return base64.URLEncoding.EncodeToString(pmsg.Data) + } + + params := DefaultGossipSubParams() + params.Dhi = 8 + + psubs := make([]*PubSub, 9) + for i := 0; i < 9; i++ { + psubs[i] = getGossipsub(ctx, hosts[i], + WithGossipSubParams(params), + WithMessageIdFn(msgID)) + } + + topic := "foobar" + for _, ps := range psubs { + _, err := ps.Subscribe(topic) + if err != nil { + t.Fatal(err) + } + } + + // Connect first peer with the rest of the 8 other + // peers. + for i := 1; i < 9; i++ { + connect(t, hosts[0], hosts[i]) + } + + // Wait for 2 heartbeats to be able to prune excess peers back down to D. + totalTimeToWait := params.HeartbeatInitialDelay + 2*params.HeartbeatInterval + time.Sleep(totalTimeToWait) + + meshPeers, ok := psubs[0].rt.(*GossipSubRouter).mesh[topic] + if !ok { + t.Fatal("mesh does not exist for topic") + } + if len(meshPeers) != params.D { + t.Fatalf("mesh does not have the correct number of peers. Wanted %d but got %d", params.D, len(meshPeers)) + } +} + func BenchmarkAllocDoDropRPC(b *testing.B) { gs := GossipSubRouter{tracer: &pubsubTracer{}} From 0936035d5f49b608669a16e857841ac544f82528 Mon Sep 17 00:00:00 2001 From: Nishant Das Date: Sat, 28 Dec 2024 19:49:13 +0800 Subject: [PATCH 06/27] Improve IDONTWANT Flood Protection (#590) In this PR we add in a new config parameter called `MaxIDontWantLength` which would be very similarly used as `MaxIHaveLength` has been used of `IHAVE` messgaes . This parameter has been set as the value of `10` now. The main purpose is to bring how IDONTWANT messages are handled in line with how IHAVE have been handled. We add the relevant changes to the `handleIDontWant` method along with adding in a new regression test for this check. --- gossipsub.go | 15 +++++++++++++ gossipsub_spam_test.go | 48 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 63 insertions(+) diff --git a/gossipsub.go b/gossipsub.go index 849f2fa..d604162 100644 --- a/gossipsub.go +++ b/gossipsub.go @@ -68,6 +68,7 @@ var ( GossipSubGraftFloodThreshold = 10 * time.Second GossipSubMaxIHaveLength = 5000 GossipSubMaxIHaveMessages = 10 + GossipSubMaxIDontWantLength = 10 GossipSubMaxIDontWantMessages = 1000 GossipSubIWantFollowupTime = 3 * time.Second GossipSubIDontWantMessageThreshold = 1024 // 1KB @@ -218,6 +219,10 @@ type GossipSubParams struct { // MaxIHaveMessages is the maximum number of IHAVE messages to accept from a peer within a heartbeat. MaxIHaveMessages int + // MaxIDontWantLength is the maximum number of messages to include in an IDONTWANT message. Also controls + // the maximum number of IDONTWANT ids we will accept to protect against IDONTWANT floods. This value + // should be adjusted if your system anticipates a larger amount than specified per heartbeat. + MaxIDontWantLength int // MaxIDontWantMessages is the maximum number of IDONTWANT messages to accept from a peer within a heartbeat. MaxIDontWantMessages int @@ -303,6 +308,7 @@ func DefaultGossipSubParams() GossipSubParams { GraftFloodThreshold: GossipSubGraftFloodThreshold, MaxIHaveLength: GossipSubMaxIHaveLength, MaxIHaveMessages: GossipSubMaxIHaveMessages, + MaxIDontWantLength: GossipSubMaxIDontWantLength, MaxIDontWantMessages: GossipSubMaxIDontWantMessages, IWantFollowupTime: GossipSubIWantFollowupTime, IDontWantMessageThreshold: GossipSubIDontWantMessageThreshold, @@ -1009,9 +1015,18 @@ func (gs *GossipSubRouter) handleIDontWant(p peer.ID, ctl *pb.ControlMessage) { } gs.peerdontwant[p]++ + totalUnwantedIds := 0 // Remember all the unwanted message ids +mainIDWLoop: for _, idontwant := range ctl.GetIdontwant() { for _, mid := range idontwant.GetMessageIDs() { + // IDONTWANT flood protection + if totalUnwantedIds >= gs.params.MaxIDontWantLength { + log.Debugf("IDONWANT: peer %s has advertised too many ids (%d) within this message; ignoring", p, totalUnwantedIds) + break mainIDWLoop + } + + totalUnwantedIds++ gs.unwanted[p][computeChecksum(mid)] = gs.params.IDontWantMessageTTL } } diff --git a/gossipsub_spam_test.go b/gossipsub_spam_test.go index df2ffff..9f6f0f9 100644 --- a/gossipsub_spam_test.go +++ b/gossipsub_spam_test.go @@ -4,6 +4,7 @@ import ( "context" "crypto/rand" "encoding/base64" + "fmt" "strconv" "sync" "testing" @@ -891,6 +892,53 @@ func TestGossipsubAttackSpamIDONTWANT(t *testing.T) { <-ctx.Done() } +func TestGossipsubHandleIDontwantSpam(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + hosts := getDefaultHosts(t, 2) + + msgID := func(pmsg *pb.Message) string { + // silly content-based test message-ID: just use the data as whole + return base64.URLEncoding.EncodeToString(pmsg.Data) + } + + psubs := make([]*PubSub, 2) + psubs[0] = getGossipsub(ctx, hosts[0], WithMessageIdFn(msgID)) + psubs[1] = getGossipsub(ctx, hosts[1], WithMessageIdFn(msgID)) + + connect(t, hosts[0], hosts[1]) + + topic := "foobar" + for _, ps := range psubs { + _, err := ps.Subscribe(topic) + if err != nil { + t.Fatal(err) + } + } + exceededIDWLength := GossipSubMaxIDontWantLength + 1 + var idwIds []string + for i := 0; i < exceededIDWLength; i++ { + idwIds = append(idwIds, fmt.Sprintf("idontwant-%d", i)) + } + rPid := hosts[1].ID() + ctrlMessage := &pb.ControlMessage{Idontwant: []*pb.ControlIDontWant{{MessageIDs: idwIds}}} + grt := psubs[0].rt.(*GossipSubRouter) + grt.handleIDontWant(rPid, ctrlMessage) + + if grt.peerdontwant[rPid] != 1 { + t.Errorf("Wanted message count of %d but received %d", 1, grt.peerdontwant[rPid]) + } + mid := fmt.Sprintf("idontwant-%d", GossipSubMaxIDontWantLength-1) + if _, ok := grt.unwanted[rPid][computeChecksum(mid)]; !ok { + t.Errorf("Desired message id was not stored in the unwanted map: %s", mid) + } + + mid = fmt.Sprintf("idontwant-%d", GossipSubMaxIDontWantLength) + if _, ok := grt.unwanted[rPid][computeChecksum(mid)]; ok { + t.Errorf("Unwanted message id was stored in the unwanted map: %s", mid) + } +} + type mockGSOnRead func(writeMsg func(*pb.RPC), irpc *pb.RPC) func newMockGS(ctx context.Context, t *testing.T, attacker host.Host, onReadMsg mockGSOnRead) { From bf5b583843312d1e668c2a99f4543d47fe175812 Mon Sep 17 00:00:00 2001 From: Pop Chunhapanya Date: Tue, 31 Dec 2024 03:25:26 +0700 Subject: [PATCH 07/27] Allow cancelling IWANT using IDONTWANT (#591) As specified in the Gossipsub v1.2 spec, we should allow cancelling IWANT by IDONTWANT. That is if IDONTWANT already arrived, we should not process IWANT. However due to the code structure, we can cancel IWANT only in handleIWant. https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.2.md#cancelling-iwant --- gossipsub.go | 5 +++ gossipsub_test.go | 104 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 109 insertions(+) diff --git a/gossipsub.go b/gossipsub.go index d604162..56b6886 100644 --- a/gossipsub.go +++ b/gossipsub.go @@ -839,6 +839,11 @@ func (gs *GossipSubRouter) handleIWant(p peer.ID, ctl *pb.ControlMessage) []*pb. ihave := make(map[string]*pb.Message) for _, iwant := range ctl.GetIwant() { for _, mid := range iwant.GetMessageIDs() { + // Check if that peer has sent IDONTWANT before, if so don't send them the message + if _, ok := gs.unwanted[p][computeChecksum(mid)]; ok { + continue + } + msg, count, ok := gs.mcache.GetForPeer(mid, p) if !ok { continue diff --git a/gossipsub_test.go b/gossipsub_test.go index 93edeec..675d164 100644 --- a/gossipsub_test.go +++ b/gossipsub_test.go @@ -3079,6 +3079,110 @@ func TestGossipsubIdontwantSmallMessage(t *testing.T) { <-ctx.Done() } +// Test that IWANT will have no effect after IDONTWANT is sent +func TestGossipsubIdontwantBeforeIwant(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + hosts := getDefaultHosts(t, 3) + + msgID := func(pmsg *pb.Message) string { + // silly content-based test message-ID: just use the data as whole + return base64.URLEncoding.EncodeToString(pmsg.Data) + } + + psubs := make([]*PubSub, 2) + psubs[0] = getGossipsub(ctx, hosts[0], WithMessageIdFn(msgID)) + psubs[1] = getGossipsub(ctx, hosts[1], WithMessageIdFn(msgID)) + + topic := "foobar" + for _, ps := range psubs { + _, err := ps.Subscribe(topic) + if err != nil { + t.Fatal(err) + } + } + + // Wait a bit after the last message before checking the result + msgWaitMax := 2 * time.Second + msgTimer := time.NewTimer(msgWaitMax) + + // Checks we received right messages + msgReceived := false + ihaveReceived := false + checkMsgs := func() { + if msgReceived { + t.Fatalf("Expected no messages received after IDONWANT") + } + if !ihaveReceived { + t.Fatalf("Expected IHAVE received") + } + } + + // Wait for the timer to expire + go func() { + select { + case <-msgTimer.C: + checkMsgs() + cancel() + return + case <-ctx.Done(): + checkMsgs() + } + }() + + newMockGS(ctx, t, hosts[2], func(writeMsg func(*pb.RPC), irpc *pb.RPC) { + // Check if it receives any message + if len(irpc.GetPublish()) > 0 { + msgReceived = true + } + // The middle peer is supposed to send IHAVE + for _, ihave := range irpc.GetControl().GetIhave() { + ihaveReceived = true + mids := ihave.GetMessageIDs() + + writeMsg(&pb.RPC{ + Control: &pb.ControlMessage{Idontwant: []*pb.ControlIDontWant{{MessageIDs: mids}}}, + }) + // Wait for the middle peer to process IDONTWANT + time.Sleep(100 * time.Millisecond) + writeMsg(&pb.RPC{ + Control: &pb.ControlMessage{Iwant: []*pb.ControlIWant{{MessageIDs: mids}}}, + }) + } + // When the middle peer connects it will send us its subscriptions + for _, sub := range irpc.GetSubscriptions() { + if sub.GetSubscribe() { + // Reply by subcribing to the topic and pruning to the middle peer to make sure + // that it's not in the mesh + writeMsg(&pb.RPC{ + Subscriptions: []*pb.RPC_SubOpts{{Subscribe: sub.Subscribe, Topicid: sub.Topicid}}, + Control: &pb.ControlMessage{Prune: []*pb.ControlPrune{{TopicID: sub.Topicid}}}, + }) + + go func() { + // Wait for an interval to make sure the middle peer + // received and processed the subscribe + time.Sleep(100 * time.Millisecond) + + data := make([]byte, 16) + crand.Read(data) + + // Publish the message from the first peer + if err := psubs[0].Publish(topic, data); err != nil { + t.Error(err) + return // cannot call t.Fatal in a non-test goroutine + } + }() + } + } + }) + + connect(t, hosts[0], hosts[1]) + connect(t, hosts[1], hosts[2]) + + <-ctx.Done() +} + // Test that IDONTWANT will cleared when it's old enough func TestGossipsubIdontwantClear(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) From 9b90c72cedb43cbee1c8a67e3a684f64665e36f2 Mon Sep 17 00:00:00 2001 From: Marco Munizaga Date: Thu, 6 Feb 2025 17:19:16 -0800 Subject: [PATCH 08/27] Release v0.13.0 (#593) --- version.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version.json b/version.json index ea22ea5..d3f7968 100644 --- a/version.json +++ b/version.json @@ -1,3 +1,3 @@ { - "version": "v0.11.0" + "version": "v0.13.0" } From bfcc7c4889cd113d01ce7f66faf62f38d72c0260 Mon Sep 17 00:00:00 2001 From: web3-bot Date: Sun, 16 Feb 2025 22:31:20 +0100 Subject: [PATCH 09/27] ci: uci/update-go (#595) This PR was created automatically by the @web3-bot as a part of the [Unified CI](https://github.com/ipdxco/unified-github-workflows) project. --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index f1358e7..6723b6e 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/libp2p/go-libp2p-pubsub -go 1.22 +go 1.23 require ( github.com/benbjohnson/clock v1.3.5 From b50197ee8bc78e075dca3422635eaf8a617466fa Mon Sep 17 00:00:00 2001 From: Andrew Gillis <11790789+gammazero@users.noreply.github.com> Date: Tue, 18 Feb 2025 03:41:46 -1000 Subject: [PATCH 10/27] Upgrade go-libp2p to v0.39.1 (#598) --- go.mod | 90 ++++++++++++++------------- go.sum | 189 +++++++++++++++++++++++++++++---------------------------- 2 files changed, 144 insertions(+), 135 deletions(-) diff --git a/go.mod b/go.mod index 6723b6e..75f4cd2 100644 --- a/go.mod +++ b/go.mod @@ -7,10 +7,10 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/ipfs/go-log/v2 v2.5.1 github.com/libp2p/go-buffer-pool v0.1.0 - github.com/libp2p/go-libp2p v0.36.3 + github.com/libp2p/go-libp2p v0.39.1 github.com/libp2p/go-libp2p-testing v0.12.0 github.com/libp2p/go-msgio v0.3.0 - github.com/multiformats/go-multiaddr v0.13.0 + github.com/multiformats/go-multiaddr v0.14.0 github.com/multiformats/go-varint v0.0.7 go.uber.org/zap v1.27.0 ) @@ -30,84 +30,90 @@ require ( github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/google/gopacket v1.1.19 // indirect - github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 // indirect + github.com/google/pprof v0.0.0-20250202011525-fc3143867406 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.3 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/huin/goupnp v1.3.0 // indirect - github.com/ipfs/go-cid v0.4.1 // indirect + github.com/ipfs/go-cid v0.5.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect - github.com/klauspost/compress v1.17.9 // indirect - github.com/klauspost/cpuid/v2 v2.2.8 // indirect - github.com/koron/go-ssdp v0.0.4 // indirect - github.com/libp2p/go-flow-metrics v0.1.0 // indirect + github.com/klauspost/compress v1.17.11 // indirect + github.com/klauspost/cpuid/v2 v2.2.9 // indirect + github.com/koron/go-ssdp v0.0.5 // indirect + github.com/libp2p/go-flow-metrics v0.2.0 // indirect github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect github.com/libp2p/go-nat v0.2.0 // indirect - github.com/libp2p/go-netroute v0.2.1 // indirect + github.com/libp2p/go-netroute v0.2.2 // indirect github.com/libp2p/go-reuseport v0.4.0 // indirect - github.com/libp2p/go-yamux/v4 v4.0.1 // indirect + github.com/libp2p/go-yamux/v4 v4.0.2 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/miekg/dns v1.1.62 // indirect + github.com/miekg/dns v1.1.63 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect github.com/minio/sha256-simd v1.0.1 // indirect github.com/mr-tron/base58 v1.2.0 // indirect github.com/multiformats/go-base32 v0.1.0 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect - github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect + github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect github.com/multiformats/go-multibase v0.2.0 // indirect github.com/multiformats/go-multicodec v0.9.0 // indirect github.com/multiformats/go-multihash v0.2.3 // indirect - github.com/multiformats/go-multistream v0.5.0 // indirect + github.com/multiformats/go-multistream v0.6.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/onsi/ginkgo/v2 v2.20.0 // indirect + github.com/onsi/ginkgo/v2 v2.22.2 // indirect github.com/opencontainers/runtime-spec v1.2.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect - github.com/pion/datachannel v1.5.8 // indirect + github.com/pion/datachannel v1.5.10 // indirect github.com/pion/dtls/v2 v2.2.12 // indirect - github.com/pion/ice/v2 v2.3.34 // indirect - github.com/pion/interceptor v0.1.30 // indirect - github.com/pion/logging v0.2.2 // indirect + github.com/pion/dtls/v3 v3.0.4 // indirect + github.com/pion/ice/v2 v2.3.37 // indirect + github.com/pion/ice/v4 v4.0.6 // indirect + github.com/pion/interceptor v0.1.37 // indirect + github.com/pion/logging v0.2.3 // indirect github.com/pion/mdns v0.0.12 // indirect + github.com/pion/mdns/v2 v2.0.7 // indirect github.com/pion/randutil v0.1.0 // indirect - github.com/pion/rtcp v1.2.14 // indirect - github.com/pion/rtp v1.8.9 // indirect - github.com/pion/sctp v1.8.33 // indirect - github.com/pion/sdp/v3 v3.0.9 // indirect - github.com/pion/srtp/v2 v2.0.20 // indirect + github.com/pion/rtcp v1.2.15 // indirect + github.com/pion/rtp v1.8.11 // indirect + github.com/pion/sctp v1.8.35 // indirect + github.com/pion/sdp/v3 v3.0.10 // indirect + github.com/pion/srtp/v3 v3.0.4 // indirect github.com/pion/stun v0.6.1 // indirect + github.com/pion/stun/v3 v3.0.0 // indirect github.com/pion/transport/v2 v2.2.10 // indirect + github.com/pion/transport/v3 v3.0.7 // indirect github.com/pion/turn/v2 v2.1.6 // indirect - github.com/pion/webrtc/v3 v3.3.0 // indirect + github.com/pion/turn/v4 v4.0.0 // indirect + github.com/pion/webrtc/v4 v4.0.8 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.20.0 // indirect + github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect - github.com/quic-go/qpack v0.4.0 // indirect - github.com/quic-go/quic-go v0.46.0 // indirect - github.com/quic-go/webtransport-go v0.8.0 // indirect + github.com/quic-go/qpack v0.5.1 // indirect + github.com/quic-go/quic-go v0.49.0 // indirect + github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/stretchr/testify v1.9.0 // indirect - github.com/wlynxg/anet v0.0.4 // indirect + github.com/stretchr/testify v1.10.0 // indirect + github.com/wlynxg/anet v0.0.5 // indirect go.uber.org/dig v1.18.0 // indirect - go.uber.org/fx v1.22.2 // indirect - go.uber.org/mock v0.4.0 // indirect + go.uber.org/fx v1.23.0 // indirect + go.uber.org/mock v0.5.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.26.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/mod v0.20.0 // indirect - golang.org/x/net v0.28.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.24.0 // indirect - golang.org/x/text v0.17.0 // indirect - golang.org/x/tools v0.24.0 // indirect - google.golang.org/protobuf v1.34.2 // indirect + golang.org/x/crypto v0.32.0 // indirect + golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c // indirect + golang.org/x/mod v0.23.0 // indirect + golang.org/x/net v0.34.0 // indirect + golang.org/x/sync v0.11.0 // indirect + golang.org/x/sys v0.30.0 // indirect + golang.org/x/text v0.22.0 // indirect + golang.org/x/tools v0.29.0 // indirect + google.golang.org/protobuf v1.36.4 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.3.0 // indirect ) diff --git a/go.sum b/go.sum index 71a6bd2..01c2c04 100644 --- a/go.sum +++ b/go.sum @@ -85,8 +85,8 @@ github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k= -github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= +github.com/google/pprof v0.0.0-20250202011525-fc3143867406 h1:wlQI2cYY0BsWmmPPAnxfQ8SDW0S3Jasn+4B8kXFxprg= +github.com/google/pprof v0.0.0-20250202011525-fc3143867406/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -101,8 +101,8 @@ github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= -github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= -github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= +github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg= +github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk= github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= @@ -115,12 +115,12 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= -github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= -github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= -github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= -github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY= +github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8= +github.com/koron/go-ssdp v0.0.5 h1:E1iSMxIs4WqxTbIBLtmNBeOOC+1sCIXQeqTWVnpmwhk= +github.com/koron/go-ssdp v0.0.5/go.mod h1:Qm59B7hpKpDqfyRNWRNr00jGwLdXjDyZh6y7rH6VS0w= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -132,10 +132,10 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= -github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= -github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= -github.com/libp2p/go-libp2p v0.36.3 h1:NHz30+G7D8Y8YmznrVZZla0ofVANrvBl2c+oARfMeDQ= -github.com/libp2p/go-libp2p v0.36.3/go.mod h1:4Y5vFyCUiJuluEPmpnKYf6WFx5ViKPUYs/ixe9ANFZ8= +github.com/libp2p/go-flow-metrics v0.2.0 h1:EIZzjmeOE6c8Dav0sNv35vhZxATIXWZg6j/C08XmmDw= +github.com/libp2p/go-flow-metrics v0.2.0/go.mod h1:st3qqfu8+pMfh+9Mzqb2GTiwrAGjIPszEjZmtksN8Jc= +github.com/libp2p/go-libp2p v0.39.1 h1:1Ur6rPCf3GR+g8jkrnaQaM0ha2IGespsnNlCqJLLALE= +github.com/libp2p/go-libp2p v0.39.1/go.mod h1:3zicI8Lp7Isun+Afo/JOACUbbJqqR2owK6RQWFsVAbI= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= @@ -144,12 +144,12 @@ github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0 github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM= github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk= github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk= -github.com/libp2p/go-netroute v0.2.1 h1:V8kVrpD8GK0Riv15/7VN6RbUQ3URNZVosw7H2v9tksU= -github.com/libp2p/go-netroute v0.2.1/go.mod h1:hraioZr0fhBjG0ZRXJJ6Zj2IVEVNx6tDTFQfSmcq7mQ= +github.com/libp2p/go-netroute v0.2.2 h1:Dejd8cQ47Qx2kRABg6lPwknU7+nBnFRpko45/fFPuZ8= +github.com/libp2p/go-netroute v0.2.2/go.mod h1:Rntq6jUAH0l9Gg17w5bFGhcC9a+vk4KNXs6s7IljKYE= github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s= github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU= -github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCypkQ= -github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= +github.com/libp2p/go-yamux/v4 v4.0.2 h1:nrLh89LN/LEiqcFiqdKDRHjGstN300C1269K/EX0CPU= +github.com/libp2p/go-yamux/v4 v4.0.2/go.mod h1:C808cCRgOs1iBwY4S71T5oxgMxgLmqUw56qh4AeBW2o= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= @@ -159,9 +159,8 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= -github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= -github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= +github.com/miekg/dns v1.1.63 h1:8M5aAw6OMZfFXTT7K5V0Eu5YiiL8l7nUAkyN6C9YwaY= +github.com/miekg/dns v1.1.63/go.mod h1:6NGHfjhpmr5lt3XPLuyfDJi5AXbNIPM9PY6H6sF1Nfs= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= @@ -182,11 +181,10 @@ github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYg github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= -github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= -github.com/multiformats/go-multiaddr v0.13.0 h1:BCBzs61E3AGHcYYTv8dqRH43ZfyrqM8RXVPT8t13tLQ= -github.com/multiformats/go-multiaddr v0.13.0/go.mod h1:sBXrNzucqkFJhvKOiwwLyqamGa/P5EIXNPLovyhQCII= -github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= -github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= +github.com/multiformats/go-multiaddr v0.14.0 h1:bfrHrJhrRuh/NXH5mCnemjpbGjzRw/b+tJFOD41g2tU= +github.com/multiformats/go-multiaddr v0.14.0/go.mod h1:6EkVAxtznq2yC3QT5CM1UTAwG0GTP3EWAIcjHuzQ+r4= +github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M= +github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= @@ -196,56 +194,61 @@ github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI1 github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= -github.com/multiformats/go-multistream v0.5.0 h1:5htLSLl7lvJk3xx3qT/8Zm9J4K8vEOf/QGkvOGQAyiE= -github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dydlEqV3l6N3/GBsX6ILA= -github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-multistream v0.6.0 h1:ZaHKbsL404720283o4c/IHQXiS6gb8qAN5EIJ4PN5EA= +github.com/multiformats/go-multistream v0.6.0/go.mod h1:MOyoG5otO24cHIg8kf9QW2/NozURlkP/rvi2FQJyCPg= github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= -github.com/onsi/ginkgo/v2 v2.20.0 h1:PE84V2mHqoT1sglvHc8ZdQtPcwmvvt29WLEEO3xmdZw= -github.com/onsi/ginkgo/v2 v2.20.0/go.mod h1:lG9ey2Z29hR41WMVthyJBGUBcBhGOtoPF2VFMvBXFCI= -github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= -github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= +github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU= +github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk= +github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= +github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= -github.com/pion/datachannel v1.5.8 h1:ph1P1NsGkazkjrvyMfhRBUAWMxugJjq2HfQifaOoSNo= -github.com/pion/datachannel v1.5.8/go.mod h1:PgmdpoaNBLX9HNzNClmdki4DYW5JtI7Yibu8QzbL3tI= +github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o= +github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oLo8Rs4Py/M= github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk= github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= -github.com/pion/ice/v2 v2.3.34 h1:Ic1ppYCj4tUOcPAp76U6F3fVrlSw8A9JtRXLqw6BbUM= -github.com/pion/ice/v2 v2.3.34/go.mod h1:mBF7lnigdqgtB+YHkaY/Y6s6tsyRyo4u4rPGRuOjUBQ= -github.com/pion/interceptor v0.1.30 h1:au5rlVHsgmxNi+v/mjOPazbW1SHzfx7/hYOEYQnUcxA= -github.com/pion/interceptor v0.1.30/go.mod h1:RQuKT5HTdkP2Fi0cuOS5G5WNymTjzXaGF75J4k7z2nc= -github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= +github.com/pion/dtls/v3 v3.0.4 h1:44CZekewMzfrn9pmGrj5BNnTMDCFwr+6sLH+cCuLM7U= +github.com/pion/dtls/v3 v3.0.4/go.mod h1:R373CsjxWqNPf6MEkfdy3aSe9niZvL/JaKlGeFphtMg= +github.com/pion/ice/v2 v2.3.37 h1:ObIdaNDu1rCo7hObhs34YSBcO7fjslJMZV0ux+uZWh0= +github.com/pion/ice/v2 v2.3.37/go.mod h1:mBF7lnigdqgtB+YHkaY/Y6s6tsyRyo4u4rPGRuOjUBQ= +github.com/pion/ice/v4 v4.0.6 h1:jmM9HwI9lfetQV/39uD0nY4y++XZNPhvzIPCb8EwxUM= +github.com/pion/ice/v4 v4.0.6/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw= +github.com/pion/interceptor v0.1.37 h1:aRA8Zpab/wE7/c0O3fh1PqY0AJI3fCSEM5lRWJVorwI= +github.com/pion/interceptor v0.1.37/go.mod h1:JzxbJ4umVTlZAf+/utHzNesY8tmRkM2lVmkS82TTj8Y= github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= +github.com/pion/logging v0.2.3 h1:gHuf0zpoh1GW67Nr6Gj4cv5Z9ZscU7g/EaoC/Ke/igI= +github.com/pion/logging v0.2.3/go.mod h1:z8YfknkquMe1csOrxK5kc+5/ZPAzMxbKLX5aXpbpC90= github.com/pion/mdns v0.0.12 h1:CiMYlY+O0azojWDmxdNr7ADGrnZ+V6Ilfner+6mSVK8= github.com/pion/mdns v0.0.12/go.mod h1:VExJjv8to/6Wqm1FXK+Ii/Z9tsVk/F5sD/N70cnYFbk= +github.com/pion/mdns/v2 v2.0.7 h1:c9kM8ewCgjslaAmicYMFQIde2H9/lrZpjBkN8VwoVtM= +github.com/pion/mdns/v2 v2.0.7/go.mod h1:vAdSYNAT0Jy3Ru0zl2YiW3Rm/fJCwIeM0nToenfOJKA= github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA= github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8= -github.com/pion/rtcp v1.2.12/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4= -github.com/pion/rtcp v1.2.14 h1:KCkGV3vJ+4DAJmvP0vaQShsb0xkRfWkO540Gy102KyE= -github.com/pion/rtcp v1.2.14/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4= -github.com/pion/rtp v1.8.3/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= -github.com/pion/rtp v1.8.9 h1:E2HX740TZKaqdcPmf4pw6ZZuG8u5RlMMt+l3dxeu6Wk= -github.com/pion/rtp v1.8.9/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= -github.com/pion/sctp v1.8.33 h1:dSE4wX6uTJBcNm8+YlMg7lw1wqyKHggsP5uKbdj+NZw= -github.com/pion/sctp v1.8.33/go.mod h1:beTnqSzewI53KWoG3nqB282oDMGrhNxBdb+JZnkCwRM= -github.com/pion/sdp/v3 v3.0.9 h1:pX++dCHoHUwq43kuwf3PyJfHlwIj4hXA7Vrifiq0IJY= -github.com/pion/sdp/v3 v3.0.9/go.mod h1:B5xmvENq5IXJimIO4zfp6LAe1fD9N+kFv+V/1lOdz8M= -github.com/pion/srtp/v2 v2.0.20 h1:HNNny4s+OUmG280ETrCdgFndp4ufx3/uy85EawYEhTk= -github.com/pion/srtp/v2 v2.0.20/go.mod h1:0KJQjA99A6/a0DOVTu1PhDSw0CXF2jTkqOoMg3ODqdA= +github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo= +github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0= +github.com/pion/rtp v1.8.11 h1:17xjnY5WO5hgO6SD3/NTIUPvSFw/PbLsIJyz1r1yNIk= +github.com/pion/rtp v1.8.11/go.mod h1:8uMBJj32Pa1wwx8Fuv/AsFhn8jsgw+3rUC2PfoBZ8p4= +github.com/pion/sctp v1.8.35 h1:qwtKvNK1Wc5tHMIYgTDJhfZk7vATGVHhXbUDfHbYwzA= +github.com/pion/sctp v1.8.35/go.mod h1:EcXP8zCYVTRy3W9xtOF7wJm1L1aXfKRQzaM33SjQlzg= +github.com/pion/sdp/v3 v3.0.10 h1:6MChLE/1xYB+CjumMw+gZ9ufp2DPApuVSnDT8t5MIgA= +github.com/pion/sdp/v3 v3.0.10/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E= +github.com/pion/srtp/v3 v3.0.4 h1:2Z6vDVxzrX3UHEgrUyIGM4rRouoC7v+NiF1IHtp9B5M= +github.com/pion/srtp/v3 v3.0.4/go.mod h1:1Jx3FwDoxpRaTh1oRV8A/6G1BnFL+QI82eK4ms8EEJQ= github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4= github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8= +github.com/pion/stun/v3 v3.0.0 h1:4h1gwhWLWuZWOJIJR9s2ferRO+W3zA/b6ijOI6mKzUw= +github.com/pion/stun/v3 v3.0.0/go.mod h1:HvCN8txt8mwi4FBvS3EmDghW6aQJ24T+y+1TKjB5jyU= github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g= -github.com/pion/transport/v2 v2.2.3/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q= github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E= @@ -255,31 +258,33 @@ github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uP github.com/pion/turn/v2 v2.1.3/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= github.com/pion/turn/v2 v2.1.6 h1:Xr2niVsiPTB0FPtt+yAWKFUkU1eotQbGgpTIld4x1Gc= github.com/pion/turn/v2 v2.1.6/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= -github.com/pion/webrtc/v3 v3.3.0 h1:Rf4u6n6U5t5sUxhYPQk/samzU/oDv7jk6BA5hyO2F9I= -github.com/pion/webrtc/v3 v3.3.0/go.mod h1:hVmrDJvwhEertRWObeb1xzulzHGeVUoPlWvxdGzcfU0= +github.com/pion/turn/v4 v4.0.0 h1:qxplo3Rxa9Yg1xXDxxH8xaqcyGUtbHYw4QSCvmFWvhM= +github.com/pion/turn/v4 v4.0.0/go.mod h1:MuPDkm15nYSklKpN8vWJ9W2M0PlyQZqYt1McGuxG7mA= +github.com/pion/webrtc/v4 v4.0.8 h1:T1ZmnT9qxIJIt4d8XoiMOBrTClGHDDXNg9e/fh018Qc= +github.com/pion/webrtc/v4 v4.0.8/go.mod h1:HHBeUVBAC+j4ZFnYhovEFStF02Arb1EyD4G7e7HBTJw= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.20.0 h1:jBzTZ7B099Rg24tny+qngoynol8LtVYlA2bqx3vEloI= -github.com/prometheus/client_golang v1.20.0/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= -github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= -github.com/quic-go/quic-go v0.46.0 h1:uuwLClEEyk1DNvchH8uCByQVjo3yKL9opKulExNDs7Y= -github.com/quic-go/quic-go v0.46.0/go.mod h1:1dLehS7TIR64+vxGR70GDcatWTOtMX2PUtnKsjbTurI= -github.com/quic-go/webtransport-go v0.8.0 h1:HxSrwun11U+LlmwpgM1kEqIqH90IT4N8auv/cD7QFJg= -github.com/quic-go/webtransport-go v0.8.0/go.mod h1:N99tjprW432Ut5ONql/aUhSLT0YVSlwHohQsuac9WaM= +github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= +github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg= +github.com/quic-go/quic-go v0.49.0 h1:w5iJHXwHxs1QxyBv1EHKuC50GX5to8mJAxvtnttJp94= +github.com/quic-go/quic-go v0.49.0/go.mod h1:s2wDnmCdooUQBmQfpUSTCYBl1/D4FcqbULMMkASvR6s= +github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 h1:4WFk6u3sOT6pLa1kQ50ZVdm8BQFgJNA117cepZxtLIg= +github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66/go.mod h1:Vp72IJajgeOL6ddqrAhmp7IM9zbTcgkQxD/YdxrVwMw= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= @@ -318,7 +323,6 @@ github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -327,15 +331,15 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= -github.com/wlynxg/anet v0.0.4 h1:0de1OFQxnNqAu+x2FAKKCVIrnfGKQbs7FQz++tB0+Uw= -github.com/wlynxg/anet v0.0.4/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= +github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU= +github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= @@ -344,13 +348,13 @@ go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw= go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= -go.uber.org/fx v1.22.2 h1:iPW+OPxv0G8w75OemJ1RAnTUrF55zOJlXlo1TbJ0Buw= -go.uber.org/fx v1.22.2/go.mod h1:o/D9n+2mLP6v1EG+qsdT1O8wKopYAsqZasju97SDFCU= +go.uber.org/fx v1.23.0 h1:lIr/gYWQGfTwGcSXWXu4vP5Ws6iqnNEIY+F/aFzCKTg= +go.uber.org/fx v1.23.0/go.mod h1:o/D9n+2mLP6v1EG+qsdT1O8wKopYAsqZasju97SDFCU= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= -go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= +go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU= +go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= @@ -371,11 +375,11 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= +golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c h1:KL/ZBHXgKGVmuZBZ01Lt57yE5ws8ZPSkkihmEyq7FXc= +golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -387,8 +391,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM= +golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -411,8 +415,8 @@ golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -428,8 +432,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= +golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -442,7 +446,6 @@ golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -456,8 +459,8 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= +golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= @@ -473,8 +476,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= +golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= @@ -493,8 +496,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE= +golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -515,8 +518,8 @@ google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmE google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.36.4 h1:6A3ZDJHn/eNqc1i+IdefRzy/9PokBTPvcqMySR7NNIM= +google.golang.org/protobuf v1.36.4/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From f486808fbed1d7b36cd05029ec76876c52d718d9 Mon Sep 17 00:00:00 2001 From: Aayush Rajasekaran Date: Sun, 23 Feb 2025 17:39:38 -0500 Subject: [PATCH 11/27] feat: avoid repeated checksum calculations (#599) We don't need to recalculate the checksum for each peer. --- gossipsub.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gossipsub.go b/gossipsub.go index 56b6886..214696b 100644 --- a/gossipsub.go +++ b/gossipsub.go @@ -1196,11 +1196,11 @@ func (gs *GossipSubRouter) Publish(msg *Message) { gs.lastpub[topic] = time.Now().UnixNano() } + csum := computeChecksum(gs.p.idGen.ID(msg)) for p := range gmap { - mid := gs.p.idGen.ID(msg) // Check if it has already received an IDONTWANT for the message. // If so, don't send it to the peer - if _, ok := gs.unwanted[p][computeChecksum(mid)]; ok { + if _, ok := gs.unwanted[p][csum]; ok { continue } tosend[p] = struct{}{} From 68726389f2c07d451f93a210146472e2e11ac32f Mon Sep 17 00:00:00 2001 From: Hlib Kanunnikov Date: Thu, 13 Mar 2025 11:59:39 +0100 Subject: [PATCH 12/27] feat: WithValidatorData publishing option (#603) Micro optimization that helps preventing deserialization of published messages in local subscribtions --- topic.go | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/topic.go b/topic.go index 103e2d5..f9b7ccc 100644 --- a/topic.go +++ b/topic.go @@ -213,9 +213,10 @@ type RouterReady func(rt PubSubRouter, topic string) (bool, error) type ProvideKey func() (crypto.PrivKey, peer.ID) type PublishOptions struct { - ready RouterReady - customKey ProvideKey - local bool + ready RouterReady + customKey ProvideKey + local bool + validatorData any } type PubOpt func(pub *PublishOptions) error @@ -308,7 +309,7 @@ func (t *Topic) Publish(ctx context.Context, data []byte, opts ...PubOpt) error } } - return t.p.val.PushLocal(&Message{m, "", t.p.host.ID(), nil, pub.local}) + return t.p.val.PushLocal(&Message{m, "", t.p.host.ID(), pub.validatorData, pub.local}) } // WithReadiness returns a publishing option for only publishing when the router is ready. @@ -332,6 +333,15 @@ func WithLocalPublication(local bool) PubOpt { } } +// WithValidatorData returns a publishing option to set custom validator data for the message. +// This allows users to avoid deserialization of the message data when validating the message locally. +func WithValidatorData(data any) PubOpt { + return func(pub *PublishOptions) error { + pub.validatorData = data + return nil + } +} + // WithSecretKeyAndPeerId returns a publishing option for providing a custom private key and its corresponding peer ID // This option is useful when we want to send messages from "virtual", never-connectable peers in the network func WithSecretKeyAndPeerId(key crypto.PrivKey, pid peer.ID) PubOpt { From 95a070affb1e94ced749733b631a18baaa17a4f4 Mon Sep 17 00:00:00 2001 From: web3-bot Date: Fri, 28 Mar 2025 19:05:04 +0100 Subject: [PATCH 13/27] ci: uci/copy-templates (#604) This PR was created automatically by the @web3-bot as a part of the [Unified CI](https://github.com/ipdxco/unified-github-workflows) project. --- .github/workflows/generated-pr.yml | 14 ++++++++++++++ .github/workflows/stale.yml | 5 +++-- 2 files changed, 17 insertions(+), 2 deletions(-) create mode 100644 .github/workflows/generated-pr.yml diff --git a/.github/workflows/generated-pr.yml b/.github/workflows/generated-pr.yml new file mode 100644 index 0000000..b8c5cc6 --- /dev/null +++ b/.github/workflows/generated-pr.yml @@ -0,0 +1,14 @@ +name: Close Generated PRs + +on: + schedule: + - cron: '0 0 * * *' + workflow_dispatch: + +permissions: + issues: write + pull-requests: write + +jobs: + stale: + uses: ipdxco/unified-github-workflows/.github/workflows/reusable-generated-pr.yml@v1 diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 16d65d7..7c955c4 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -1,8 +1,9 @@ -name: Close and mark stale issue +name: Close Stale Issues on: schedule: - cron: '0 0 * * *' + workflow_dispatch: permissions: issues: write @@ -10,4 +11,4 @@ permissions: jobs: stale: - uses: pl-strflt/.github/.github/workflows/reusable-stale-issue.yml@v0.3 + uses: ipdxco/unified-github-workflows/.github/workflows/reusable-stale-issue.yml@v1 From 50ccc5ca90cc48ff635b60e5bbb5dd039f3c2e0e Mon Sep 17 00:00:00 2001 From: Marco Munizaga Date: Wed, 30 Apr 2025 00:58:50 -0700 Subject: [PATCH 14/27] fix(IDONTWANT)!: Do not IDONTWANT your sender (#609) We were sending IDONTWANT to the sender of the received message. This is pointless, as the sender should not repeat a message it already sent. The sender could also have tracked that it had sent this peer the message (we don't do this currently, and it's probably not necessary). @ppopth --- floodsub.go | 2 +- gossipsub.go | 6 +++- gossipsub_test.go | 72 +++++++++++++++++++++++++++++++++++++++++++++++ pubsub.go | 4 +-- randomsub.go | 2 +- 5 files changed, 81 insertions(+), 5 deletions(-) diff --git a/floodsub.go b/floodsub.go index 45b3fde..4c943bb 100644 --- a/floodsub.go +++ b/floodsub.go @@ -71,7 +71,7 @@ func (fs *FloodSubRouter) AcceptFrom(peer.ID) AcceptStatus { return AcceptAll } -func (fs *FloodSubRouter) PreValidation([]*Message) {} +func (fs *FloodSubRouter) PreValidation(from peer.ID, msgs []*Message) {} func (fs *FloodSubRouter) HandleRPC(rpc *RPC) {} diff --git a/gossipsub.go b/gossipsub.go index 214696b..ecd4eda 100644 --- a/gossipsub.go +++ b/gossipsub.go @@ -707,7 +707,7 @@ func (gs *GossipSubRouter) AcceptFrom(p peer.ID) AcceptStatus { // PreValidation sends the IDONTWANT control messages to all the mesh // peers. They need to be sent right before the validation because they // should be seen by the peers as soon as possible. -func (gs *GossipSubRouter) PreValidation(msgs []*Message) { +func (gs *GossipSubRouter) PreValidation(from peer.ID, msgs []*Message) { tmids := make(map[string][]string) for _, msg := range msgs { if len(msg.GetData()) < gs.params.IDontWantMessageThreshold { @@ -724,6 +724,10 @@ func (gs *GossipSubRouter) PreValidation(msgs []*Message) { shuffleStrings(mids) // send IDONTWANT to all the mesh peers for p := range gs.mesh[topic] { + if p == from { + // We don't send IDONTWANT to the peer that sent us the messages + continue + } // send to only peers that support IDONTWANT if gs.feature(GossipSubFeatureIdontwant, gs.peers[p]) { idontwant := []*pb.ControlIDontWant{{MessageIDs: mids}} diff --git a/gossipsub_test.go b/gossipsub_test.go index 675d164..abb347f 100644 --- a/gossipsub_test.go +++ b/gossipsub_test.go @@ -2815,6 +2815,78 @@ func TestGossipsubIdontwantReceive(t *testing.T) { <-ctx.Done() } +type mockRawTracer struct { + onRecvRPC func(*RPC) +} + +func (m *mockRawTracer) RecvRPC(rpc *RPC) { + if m.onRecvRPC != nil { + m.onRecvRPC(rpc) + } +} + +func (m *mockRawTracer) AddPeer(p peer.ID, proto protocol.ID) {} +func (m *mockRawTracer) DeliverMessage(msg *Message) {} +func (m *mockRawTracer) DropRPC(rpc *RPC, p peer.ID) {} +func (m *mockRawTracer) DuplicateMessage(msg *Message) {} +func (m *mockRawTracer) Graft(p peer.ID, topic string) {} +func (m *mockRawTracer) Join(topic string) {} +func (m *mockRawTracer) Leave(topic string) {} +func (m *mockRawTracer) Prune(p peer.ID, topic string) {} +func (m *mockRawTracer) RejectMessage(msg *Message, reason string) {} +func (m *mockRawTracer) RemovePeer(p peer.ID) {} +func (m *mockRawTracer) SendRPC(rpc *RPC, p peer.ID) {} +func (m *mockRawTracer) ThrottlePeer(p peer.ID) {} +func (m *mockRawTracer) UndeliverableMessage(msg *Message) {} +func (m *mockRawTracer) ValidateMessage(msg *Message) {} + +var _ RawTracer = &mockRawTracer{} + +func TestGossipsubNoIDONTWANTToMessageSender(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + hosts := getDefaultHosts(t, 3) + denseConnect(t, hosts) + + psubs := make([]*PubSub, 2) + + receivedIDONTWANT := make(chan struct{}) + psubs[0] = getGossipsub(ctx, hosts[0], WithRawTracer(&mockRawTracer{ + onRecvRPC: func(rpc *RPC) { + if len(rpc.GetControl().GetIdontwant()) > 0 { + close(receivedIDONTWANT) + } + }, + })) + psubs[1] = getGossipsub(ctx, hosts[1]) + + topicString := "foobar" + var topics []*Topic + for _, ps := range psubs { + topic, err := ps.Join(topicString) + if err != nil { + t.Fatal(err) + } + topics = append(topics, topic) + + _, err = ps.Subscribe(topicString) + if err != nil { + t.Fatal(err) + } + } + time.Sleep(time.Second) + + msg := make([]byte, GossipSubIDontWantMessageThreshold+1) + topics[0].Publish(ctx, msg) + + select { + case <-receivedIDONTWANT: + t.Fatal("IDONTWANT should not be sent to the message sender") + case <-time.After(time.Second): + } + +} + // Test that non-mesh peers will not get IDONTWANT func TestGossipsubIdontwantNonMesh(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) diff --git a/pubsub.go b/pubsub.go index 3ca14ab..5c27c3e 100644 --- a/pubsub.go +++ b/pubsub.go @@ -203,7 +203,7 @@ type PubSubRouter interface { AcceptFrom(peer.ID) AcceptStatus // PreValidation is invoked on messages in the RPC envelope right before pushing it to // the validation pipeline - PreValidation([]*Message) + PreValidation(from peer.ID, msgs []*Message) // HandleRPC is invoked to process control messages in the RPC envelope. // It is invoked after subscriptions and payload messages have been processed. HandleRPC(*RPC) @@ -1106,7 +1106,7 @@ func (p *PubSub) handleIncomingRPC(rpc *RPC) { toPush = append(toPush, msg) } } - p.rt.PreValidation(toPush) + p.rt.PreValidation(rpc.from, toPush) for _, msg := range toPush { p.pushMsg(msg) } diff --git a/randomsub.go b/randomsub.go index 4e410f5..f9f6473 100644 --- a/randomsub.go +++ b/randomsub.go @@ -94,7 +94,7 @@ func (rs *RandomSubRouter) AcceptFrom(peer.ID) AcceptStatus { return AcceptAll } -func (rs *RandomSubRouter) PreValidation([]*Message) {} +func (rs *RandomSubRouter) PreValidation(from peer.ID, msgs []*Message) {} func (rs *RandomSubRouter) HandleRPC(rpc *RPC) {} From 0c5ee7bbfeb051200bc39eb824246cc651f7358a Mon Sep 17 00:00:00 2001 From: Marco Munizaga Date: Thu, 8 May 2025 10:23:02 -0700 Subject: [PATCH 15/27] feat(gossipsub): Add MessageBatch (#607) to support batch publishing messages Replaces #602. Batch publishing lets the system know there are multiple related messages to be published so it can prioritize sending different messages before sending copies of messages. For example, with the default API, when you publish two messages A and B, under the hood A gets sent to D=8 peers first, before B gets sent out. With this MessageBatch api we can now send one copy of A _and then_ one copy of B before sending multiple copies. When a node has bandwidth constraints relative to the messages it is publishing this improves dissemination time. For more context see this post: https://ethresear.ch/t/improving-das-performance-with-gossipsub-batch-publishing/21713 --- gossipsub.go | 149 +++++++++++++++++++-------------- gossipsub_test.go | 208 ++++++++++++++++++++++++++++++++++++++++++++++ messagebatch.go | 62 ++++++++++++++ pubsub.go | 53 ++++++++++++ topic.go | 62 ++++++++++++-- validation.go | 51 +++++++----- 6 files changed, 496 insertions(+), 89 deletions(-) create mode 100644 messagebatch.go diff --git a/gossipsub.go b/gossipsub.go index ecd4eda..b5a605a 100644 --- a/gossipsub.go +++ b/gossipsub.go @@ -5,6 +5,7 @@ import ( "crypto/sha256" "fmt" "io" + "iter" "math/rand" "sort" "time" @@ -522,6 +523,8 @@ type GossipSubRouter struct { heartbeatTicks uint64 } +var _ BatchPublisher = &GossipSubRouter{} + type connectInfo struct { p peer.ID spr *record.Envelope @@ -1143,81 +1146,105 @@ func (gs *GossipSubRouter) connector() { } } -func (gs *GossipSubRouter) Publish(msg *Message) { - gs.mcache.Put(msg) - - from := msg.ReceivedFrom - topic := msg.GetTopic() - - tosend := make(map[peer.ID]struct{}) - - // any peers in the topic? - tmap, ok := gs.p.topics[topic] - if !ok { - return +func (gs *GossipSubRouter) PublishBatch(messages []*Message, opts *BatchPublishOptions) { + strategy := opts.Strategy + for _, msg := range messages { + msgID := gs.p.idGen.ID(msg) + for p, rpc := range gs.rpcs(msg) { + strategy.AddRPC(p, msgID, rpc) + } } - if gs.floodPublish && from == gs.p.host.ID() { - for p := range tmap { - _, direct := gs.direct[p] - if direct || gs.score.Score(p) >= gs.publishThreshold { - tosend[p] = struct{}{} - } - } - } else { - // direct peers - for p := range gs.direct { - _, inTopic := tmap[p] - if inTopic { - tosend[p] = struct{}{} - } - } + for p, rpc := range strategy.All() { + gs.sendRPC(p, rpc, false) + } +} - // floodsub peers - for p := range tmap { - if !gs.feature(GossipSubFeatureMesh, gs.peers[p]) && gs.score.Score(p) >= gs.publishThreshold { - tosend[p] = struct{}{} - } - } +func (gs *GossipSubRouter) Publish(msg *Message) { + for p, rpc := range gs.rpcs(msg) { + gs.sendRPC(p, rpc, false) + } +} - // gossipsub peers - gmap, ok := gs.mesh[topic] +func (gs *GossipSubRouter) rpcs(msg *Message) iter.Seq2[peer.ID, *RPC] { + return func(yield func(peer.ID, *RPC) bool) { + gs.mcache.Put(msg) + + from := msg.ReceivedFrom + topic := msg.GetTopic() + + tosend := make(map[peer.ID]struct{}) + + // any peers in the topic? + tmap, ok := gs.p.topics[topic] if !ok { - // we are not in the mesh for topic, use fanout peers - gmap, ok = gs.fanout[topic] - if !ok || len(gmap) == 0 { - // we don't have any, pick some with score above the publish threshold - peers := gs.getPeers(topic, gs.params.D, func(p peer.ID) bool { - _, direct := gs.direct[p] - return !direct && gs.score.Score(p) >= gs.publishThreshold - }) + return + } - if len(peers) > 0 { - gmap = peerListToMap(peers) - gs.fanout[topic] = gmap + if gs.floodPublish && from == gs.p.host.ID() { + for p := range tmap { + _, direct := gs.direct[p] + if direct || gs.score.Score(p) >= gs.publishThreshold { + tosend[p] = struct{}{} } } - gs.lastpub[topic] = time.Now().UnixNano() + } else { + // direct peers + for p := range gs.direct { + _, inTopic := tmap[p] + if inTopic { + tosend[p] = struct{}{} + } + } + + // floodsub peers + for p := range tmap { + if !gs.feature(GossipSubFeatureMesh, gs.peers[p]) && gs.score.Score(p) >= gs.publishThreshold { + tosend[p] = struct{}{} + } + } + + // gossipsub peers + gmap, ok := gs.mesh[topic] + if !ok { + // we are not in the mesh for topic, use fanout peers + gmap, ok = gs.fanout[topic] + if !ok || len(gmap) == 0 { + // we don't have any, pick some with score above the publish threshold + peers := gs.getPeers(topic, gs.params.D, func(p peer.ID) bool { + _, direct := gs.direct[p] + return !direct && gs.score.Score(p) >= gs.publishThreshold + }) + + if len(peers) > 0 { + gmap = peerListToMap(peers) + gs.fanout[topic] = gmap + } + } + gs.lastpub[topic] = time.Now().UnixNano() + } + + csum := computeChecksum(gs.p.idGen.ID(msg)) + for p := range gmap { + // Check if it has already received an IDONTWANT for the message. + // If so, don't send it to the peer + if _, ok := gs.unwanted[p][csum]; ok { + continue + } + tosend[p] = struct{}{} + } } - csum := computeChecksum(gs.p.idGen.ID(msg)) - for p := range gmap { - // Check if it has already received an IDONTWANT for the message. - // If so, don't send it to the peer - if _, ok := gs.unwanted[p][csum]; ok { + out := rpcWithMessages(msg.Message) + for pid := range tosend { + if pid == from || pid == peer.ID(msg.GetFrom()) { continue } - tosend[p] = struct{}{} - } - } - out := rpcWithMessages(msg.Message) - for pid := range tosend { - if pid == from || pid == peer.ID(msg.GetFrom()) { - continue + if !yield(pid, out) { + return + } } - - gs.sendRPC(pid, out, false) } } diff --git a/gossipsub_test.go b/gossipsub_test.go index abb347f..72188be 100644 --- a/gossipsub_test.go +++ b/gossipsub_test.go @@ -9,9 +9,11 @@ import ( "io" mrand "math/rand" "sort" + "strings" "sync" "sync/atomic" "testing" + "testing/quick" "time" pb "github.com/libp2p/go-libp2p-pubsub/pb" @@ -3406,3 +3408,209 @@ func BenchmarkAllocDoDropRPC(b *testing.B) { gs.doDropRPC(&RPC{}, "peerID", "reason") } } + +func TestRoundRobinMessageIDScheduler(t *testing.T) { + const maxNumPeers = 256 + const maxNumMessages = 1_000 + + err := quick.Check(func(numPeers uint16, numMessages uint16) bool { + numPeers = numPeers % maxNumPeers + numMessages = numMessages % maxNumMessages + + output := make([]pendingRPC, 0, numMessages*numPeers) + + var strategy RoundRobinMessageIDScheduler + + peers := make([]peer.ID, numPeers) + for i := 0; i < int(numPeers); i++ { + peers[i] = peer.ID(fmt.Sprintf("peer%d", i)) + } + + getID := func(r pendingRPC) string { + return string(r.rpc.Publish[0].Data) + } + + for i := range int(numMessages) { + for j := range int(numPeers) { + strategy.AddRPC(peers[j], fmt.Sprintf("msg%d", i), &RPC{ + RPC: pb.RPC{ + Publish: []*pb.Message{ + { + Data: []byte(fmt.Sprintf("msg%d", i)), + }, + }, + }, + }) + } + } + + for p, rpc := range strategy.All() { + output = append(output, pendingRPC{ + peer: p, + rpc: rpc, + }) + } + + // Check invariants + // 1. The published rpcs count is the same as the number of messages added + // 2. Before all message IDs are seen, no message ID may be repeated + // 3. The set of message ID + peer ID combinations should be the same as the input + + // 1. + expectedCount := int(numMessages) * int(numPeers) + if len(output) != expectedCount { + t.Logf("Expected %d RPCs, got %d", expectedCount, len(output)) + return false + } + + // 2. + seen := make(map[string]bool) + expected := make(map[string]bool) + for i := 0; i < int(numMessages); i++ { + expected[fmt.Sprintf("msg%d", i)] = true + } + + for _, rpc := range output { + if expected[getID(rpc)] { + delete(expected, getID(rpc)) + } + if seen[getID(rpc)] && len(expected) > 0 { + t.Logf("Message ID %s repeated before all message IDs are seen", getID(rpc)) + return false + } + seen[getID(rpc)] = true + } + + // 3. + inputSet := make(map[string]bool) + for i := range int(numMessages) { + for j := range int(numPeers) { + inputSet[fmt.Sprintf("msg%d:peer%d", i, j)] = true + } + } + for _, rpc := range output { + if !inputSet[getID(rpc)+":"+string(rpc.peer)] { + t.Logf("Message ID %s not in input", getID(rpc)) + return false + } + } + return true + }, &quick.Config{MaxCount: 32}) + if err != nil { + t.Fatal(err) + } +} + +func BenchmarkRoundRobinMessageIDScheduler(b *testing.B) { + const numPeers = 1_000 + const numMessages = 1_000 + var strategy RoundRobinMessageIDScheduler + + peers := make([]peer.ID, numPeers) + for i := range int(numPeers) { + peers[i] = peer.ID(fmt.Sprintf("peer%d", i)) + } + msgs := make([]string, numMessages) + for i := range numMessages { + msgs[i] = fmt.Sprintf("msg%d", i) + } + + emptyRPC := &RPC{} + b.ResetTimer() + + for i := 0; i < b.N; i++ { + j := i % len(peers) + msgIdx := i % numMessages + strategy.AddRPC(peers[j], msgs[msgIdx], emptyRPC) + if i%100 == 0 { + for range strategy.All() { + } + } + } +} + +func TestMessageBatchPublish(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + hosts := getDefaultHosts(t, 20) + + msgIDFn := func(msg *pb.Message) string { + hdr := string(msg.Data[0:16]) + msgID := strings.SplitN(hdr, " ", 2) + return msgID[0] + } + const numMessages = 100 + // +8 to account for the gossiping overhead + psubs := getGossipsubs(ctx, hosts, WithMessageIdFn(msgIDFn), WithPeerOutboundQueueSize(numMessages+8)) + + var topics []*Topic + var msgs []*Subscription + for _, ps := range psubs { + topic, err := ps.Join("foobar") + if err != nil { + t.Fatal(err) + } + topics = append(topics, topic) + + subch, err := topic.Subscribe(WithBufferSize(numMessages + 8)) + if err != nil { + t.Fatal(err) + } + + msgs = append(msgs, subch) + } + + sparseConnect(t, hosts) + + // wait for heartbeats to build mesh + time.Sleep(time.Second * 2) + + var batch MessageBatch + for i := 0; i < numMessages; i++ { + msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) + err := topics[0].AddToBatch(ctx, &batch, msg) + if err != nil { + t.Fatal(err) + } + } + err := psubs[0].PublishBatch(&batch) + if err != nil { + t.Fatal(err) + } + + for range numMessages { + for _, sub := range msgs { + got, err := sub.Next(ctx) + if err != nil { + t.Fatal(sub.err) + } + id := msgIDFn(got.Message) + expected := []byte(fmt.Sprintf("%s it's not a floooooood %s", id, id)) + if !bytes.Equal(expected, got.Data) { + t.Fatal("got wrong message!") + } + } + } +} + +func TestPublishDuplicateMessage(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + hosts := getDefaultHosts(t, 1) + psubs := getGossipsubs(ctx, hosts, WithMessageIdFn(func(msg *pb.Message) string { + return string(msg.Data) + })) + topic, err := psubs[0].Join("foobar") + if err != nil { + t.Fatal(err) + } + err = topic.Publish(ctx, []byte("hello")) + if err != nil { + t.Fatal(err) + } + + err = topic.Publish(ctx, []byte("hello")) + if err != nil { + t.Fatal("Duplicate message should not return an error") + } +} diff --git a/messagebatch.go b/messagebatch.go new file mode 100644 index 0000000..8178645 --- /dev/null +++ b/messagebatch.go @@ -0,0 +1,62 @@ +package pubsub + +import ( + "iter" + + "github.com/libp2p/go-libp2p/core/peer" +) + +// MessageBatch allows a user to batch related messages and then publish them at +// once. This allows the Scheduler to define an order for outgoing RPCs. +// This helps bandwidth constrained peers. +type MessageBatch struct { + messages []*Message +} + +type messageBatchAndPublishOptions struct { + messages []*Message + opts *BatchPublishOptions +} + +// RPCScheduler schedules outgoing RPCs. +type RPCScheduler interface { + // AddRPC adds an RPC to the scheduler. + AddRPC(peer peer.ID, msgID string, rpc *RPC) + // All returns an ordered iterator of RPCs. + All() iter.Seq2[peer.ID, *RPC] +} + +type pendingRPC struct { + peer peer.ID + rpc *RPC +} + +// RoundRobinMessageIDScheduler schedules outgoing RPCs in round-robin order of message IDs. +type RoundRobinMessageIDScheduler struct { + rpcs map[string][]pendingRPC +} + +func (s *RoundRobinMessageIDScheduler) AddRPC(peer peer.ID, msgID string, rpc *RPC) { + if s.rpcs == nil { + s.rpcs = make(map[string][]pendingRPC) + } + s.rpcs[msgID] = append(s.rpcs[msgID], pendingRPC{peer: peer, rpc: rpc}) +} + +func (s *RoundRobinMessageIDScheduler) All() iter.Seq2[peer.ID, *RPC] { + return func(yield func(peer.ID, *RPC) bool) { + for len(s.rpcs) > 0 { + for msgID, rpcs := range s.rpcs { + if len(rpcs) == 0 { + delete(s.rpcs, msgID) + continue + } + if !yield(rpcs[0].peer, rpcs[0].rpc) { + return + } + + s.rpcs[msgID] = rpcs[1:] + } + } + } +} diff --git a/pubsub.go b/pubsub.go index 5c27c3e..fae115a 100644 --- a/pubsub.go +++ b/pubsub.go @@ -134,6 +134,9 @@ type PubSub struct { // sendMsg handles messages that have been validated sendMsg chan *Message + // sendMessageBatch publishes a batch of messages + sendMessageBatch chan messageBatchAndPublishOptions + // addVal handles validator registration requests addVal chan *addValReq @@ -217,6 +220,10 @@ type PubSubRouter interface { Leave(topic string) } +type BatchPublisher interface { + PublishBatch(messages []*Message, opts *BatchPublishOptions) +} + type AcceptStatus int const ( @@ -281,6 +288,7 @@ func NewPubSub(ctx context.Context, h host.Host, rt PubSubRouter, opts ...Option rmTopic: make(chan *rmTopicReq), getTopics: make(chan *topicReq), sendMsg: make(chan *Message, 32), + sendMessageBatch: make(chan messageBatchAndPublishOptions, 1), addVal: make(chan *addValReq), rmVal: make(chan *rmValReq), eval: make(chan func()), @@ -642,6 +650,9 @@ func (p *PubSub) processLoop(ctx context.Context) { case msg := <-p.sendMsg: p.publishMessage(msg) + case batchAndOpts := <-p.sendMessageBatch: + p.publishMessageBatch(batchAndOpts) + case req := <-p.addVal: p.val.AddValidator(req) @@ -1221,6 +1232,15 @@ func (p *PubSub) publishMessage(msg *Message) { } } +func (p *PubSub) publishMessageBatch(batchAndOpts messageBatchAndPublishOptions) { + for _, msg := range batchAndOpts.messages { + p.tracer.DeliverMessage(msg) + p.notifySubs(msg) + } + // We type checked when pushing the batch to the channel + p.rt.(BatchPublisher).PublishBatch(batchAndOpts.messages, batchAndOpts.opts) +} + type addTopicReq struct { topic *Topic resp chan *Topic @@ -1358,6 +1378,39 @@ func (p *PubSub) Publish(topic string, data []byte, opts ...PubOpt) error { return t.Publish(context.TODO(), data, opts...) } +// PublishBatch publishes a batch of messages. This only works for routers that +// implement the BatchPublisher interface. +// +// Users should make sure there is enough space in the Peer's outbound queue to +// ensure messages are not dropped. WithPeerOutboundQueueSize should be set to +// at least the expected number of batched messages per peer plus some slack to +// account for gossip messages. +// +// The default publish strategy is RoundRobinMessageIDScheduler. +func (p *PubSub) PublishBatch(batch *MessageBatch, opts ...BatchPubOpt) error { + if _, ok := p.rt.(BatchPublisher); !ok { + return fmt.Errorf("pubsub router is not a BatchPublisher") + } + + publishOptions := &BatchPublishOptions{} + for _, o := range opts { + err := o(publishOptions) + if err != nil { + return err + } + } + setDefaultBatchPublishOptions(publishOptions) + + p.sendMessageBatch <- messageBatchAndPublishOptions{ + messages: batch.messages, + opts: publishOptions, + } + + // Clear the batch's messages in case a user reuses the same batch object + batch.messages = nil + return nil +} + func (p *PubSub) nextSeqno() []byte { seqno := make([]byte, 8) counter := atomic.AddUint64(&p.counter, 1) diff --git a/topic.go b/topic.go index f9b7ccc..a6ad979 100644 --- a/topic.go +++ b/topic.go @@ -219,14 +219,53 @@ type PublishOptions struct { validatorData any } +type BatchPublishOptions struct { + Strategy RPCScheduler +} + type PubOpt func(pub *PublishOptions) error +type BatchPubOpt func(pub *BatchPublishOptions) error + +func setDefaultBatchPublishOptions(opts *BatchPublishOptions) { + if opts.Strategy == nil { + opts.Strategy = &RoundRobinMessageIDScheduler{} + } +} // Publish publishes data to topic. func (t *Topic) Publish(ctx context.Context, data []byte, opts ...PubOpt) error { + msg, err := t.validate(ctx, data, opts...) + if err != nil { + if errors.Is(err, dupeErr{}) { + // If it was a duplicate, we return nil to indicate success. + // Semantically the message was published by us or someone else. + return nil + } + return err + } + return t.p.val.sendMsgBlocking(msg) +} + +func (t *Topic) AddToBatch(ctx context.Context, batch *MessageBatch, data []byte, opts ...PubOpt) error { + msg, err := t.validate(ctx, data, opts...) + if err != nil { + if errors.Is(err, dupeErr{}) { + // If it was a duplicate, we return nil to indicate success. + // Semantically the message was published by us or someone else. + // We won't add it to the batch. Since it's already been published. + return nil + } + return err + } + batch.messages = append(batch.messages, msg) + return nil +} + +func (t *Topic) validate(ctx context.Context, data []byte, opts ...PubOpt) (*Message, error) { t.mux.RLock() defer t.mux.RUnlock() if t.closed { - return ErrTopicClosed + return nil, ErrTopicClosed } pid := t.p.signID @@ -236,17 +275,17 @@ func (t *Topic) Publish(ctx context.Context, data []byte, opts ...PubOpt) error for _, opt := range opts { err := opt(pub) if err != nil { - return err + return nil, err } } if pub.customKey != nil && !pub.local { key, pid = pub.customKey() if key == nil { - return ErrNilSignKey + return nil, ErrNilSignKey } if len(pid) == 0 { - return ErrEmptyPeerID + return nil, ErrEmptyPeerID } } @@ -264,7 +303,7 @@ func (t *Topic) Publish(ctx context.Context, data []byte, opts ...PubOpt) error m.From = []byte(pid) err := signMessage(pid, key, m) if err != nil { - return err + return nil, err } } @@ -291,9 +330,9 @@ func (t *Topic) Publish(ctx context.Context, data []byte, opts ...PubOpt) error break readyLoop } case <-t.p.ctx.Done(): - return t.p.ctx.Err() + return nil, t.p.ctx.Err() case <-ctx.Done(): - return ctx.Err() + return nil, ctx.Err() } if ticker == nil { ticker = time.NewTicker(200 * time.Millisecond) @@ -303,13 +342,18 @@ func (t *Topic) Publish(ctx context.Context, data []byte, opts ...PubOpt) error select { case <-ticker.C: case <-ctx.Done(): - return fmt.Errorf("router is not ready: %w", ctx.Err()) + return nil, fmt.Errorf("router is not ready: %w", ctx.Err()) } } } } - return t.p.val.PushLocal(&Message{m, "", t.p.host.ID(), pub.validatorData, pub.local}) + msg := &Message{m, "", t.p.host.ID(), pub.validatorData, pub.local} + err := t.p.val.ValidateLocal(msg) + if err != nil { + return nil, err + } + return msg, nil } // WithReadiness returns a publishing option for only publishing when the router is ready. diff --git a/validation.go b/validation.go index 1044d5d..6433a41 100644 --- a/validation.go +++ b/validation.go @@ -26,6 +26,12 @@ func (e ValidationError) Error() string { return e.Reason } +type dupeErr struct{} + +func (dupeErr) Error() string { + return "duplicate message" +} + // Validator is a function that validates a message with a binary decision: accept or reject. type Validator func(context.Context, peer.ID, *Message) bool @@ -226,10 +232,9 @@ func (v *validation) RemoveValidator(req *rmValReq) { } } -// PushLocal synchronously pushes a locally published message and performs applicable -// validations. -// Returns an error if validation fails -func (v *validation) PushLocal(msg *Message) error { +// ValidateLocal synchronously validates a locally published message and +// performs applicable validations. Returns an error if validation fails. +func (v *validation) ValidateLocal(msg *Message) error { v.p.tracer.PublishMessage(msg) err := v.p.checkSigningPolicy(msg) @@ -238,7 +243,9 @@ func (v *validation) PushLocal(msg *Message) error { } vals := v.getValidators(msg) - return v.validate(vals, msg.ReceivedFrom, msg, true) + return v.validate(vals, msg.ReceivedFrom, msg, true, func(msg *Message) error { + return nil + }) } // Push pushes a message into the validation pipeline. @@ -282,15 +289,26 @@ func (v *validation) validateWorker() { for { select { case req := <-v.validateQ: - v.validate(req.vals, req.src, req.msg, false) + _ = v.validate(req.vals, req.src, req.msg, false, v.sendMsgBlocking) case <-v.p.ctx.Done(): return } } } -// validate performs validation and only sends the message if all validators succeed -func (v *validation) validate(vals []*validatorImpl, src peer.ID, msg *Message, synchronous bool) error { +func (v *validation) sendMsgBlocking(msg *Message) error { + select { + case v.p.sendMsg <- msg: + return nil + case <-v.p.ctx.Done(): + return v.p.ctx.Err() + } +} + +// validate performs validation and only calls onValid if all validators succeed. +// If synchronous is true, onValid will be called before this function returns +// if the message is new and accepted. +func (v *validation) validate(vals []*validatorImpl, src peer.ID, msg *Message, synchronous bool, onValid func(*Message) error) error { // If signature verification is enabled, but signing is disabled, // the Signature is required to be nil upon receiving the message in PubSub.pushMsg. if msg.Signature != nil { @@ -306,7 +324,7 @@ func (v *validation) validate(vals []*validatorImpl, src peer.ID, msg *Message, id := v.p.idGen.ID(msg) if !v.p.markSeen(id) { v.tracer.DuplicateMessage(msg) - return nil + return dupeErr{} } else { v.tracer.ValidateMessage(msg) } @@ -345,7 +363,7 @@ loop: select { case v.validateThrottle <- struct{}{}: go func() { - v.doValidateTopic(async, src, msg, result) + v.doValidateTopic(async, src, msg, result, onValid) <-v.validateThrottle }() default: @@ -360,13 +378,8 @@ loop: return ValidationError{Reason: RejectValidationIgnored} } - // no async validators, accepted message, send it! - select { - case v.p.sendMsg <- msg: - return nil - case <-v.p.ctx.Done(): - return v.p.ctx.Err() - } + // no async validators, accepted message + return onValid(msg) } func (v *validation) validateSignature(msg *Message) bool { @@ -379,7 +392,7 @@ func (v *validation) validateSignature(msg *Message) bool { return true } -func (v *validation) doValidateTopic(vals []*validatorImpl, src peer.ID, msg *Message, r ValidationResult) { +func (v *validation) doValidateTopic(vals []*validatorImpl, src peer.ID, msg *Message, r ValidationResult, onValid func(*Message) error) { result := v.validateTopic(vals, src, msg) if result == ValidationAccept && r != ValidationAccept { @@ -388,7 +401,7 @@ func (v *validation) doValidateTopic(vals []*validatorImpl, src peer.ID, msg *Me switch result { case ValidationAccept: - v.p.sendMsg <- msg + _ = onValid(msg) case ValidationReject: log.Debugf("message validation failed; dropping message from %s", src) v.tracer.RejectMessage(msg, RejectValidationFailed) From 9e5145fb29c9df968bbec842fcb4cbab64f47b7f Mon Sep 17 00:00:00 2001 From: Marco Munizaga Date: Mon, 19 May 2025 17:02:21 -0700 Subject: [PATCH 16/27] Send IDONTWANT before first publish (#612) See #610 We previously send IDONTWANT only when forwarding. This has us send IDONTWANT on our initial publish as well. Helps in the case that one or more peers may also publish the same thing at around the same time (see #610 for a longer explanation) and prevents "boomerang" duplicates where a peer sends you back the message you sent before you get a chance to send it to them. This also serves as a hint to a peer that you are about to send them a certain message. --- floodsub.go | 2 +- gossipsub.go | 4 +-- gossipsub_test.go | 63 ++++++++++++++++++++++++++++++++++++++++++++++- pubsub.go | 6 ++--- randomsub.go | 2 +- topic.go | 1 + 6 files changed, 70 insertions(+), 8 deletions(-) diff --git a/floodsub.go b/floodsub.go index 4c943bb..359886a 100644 --- a/floodsub.go +++ b/floodsub.go @@ -71,7 +71,7 @@ func (fs *FloodSubRouter) AcceptFrom(peer.ID) AcceptStatus { return AcceptAll } -func (fs *FloodSubRouter) PreValidation(from peer.ID, msgs []*Message) {} +func (fs *FloodSubRouter) Preprocess(from peer.ID, msgs []*Message) {} func (fs *FloodSubRouter) HandleRPC(rpc *RPC) {} diff --git a/gossipsub.go b/gossipsub.go index b5a605a..3b52efe 100644 --- a/gossipsub.go +++ b/gossipsub.go @@ -707,10 +707,10 @@ func (gs *GossipSubRouter) AcceptFrom(p peer.ID) AcceptStatus { return gs.gate.AcceptFrom(p) } -// PreValidation sends the IDONTWANT control messages to all the mesh +// Preprocess sends the IDONTWANT control messages to all the mesh // peers. They need to be sent right before the validation because they // should be seen by the peers as soon as possible. -func (gs *GossipSubRouter) PreValidation(from peer.ID, msgs []*Message) { +func (gs *GossipSubRouter) Preprocess(from peer.ID, msgs []*Message) { tmids := make(map[string][]string) for _, msg := range msgs { if len(msg.GetData()) < gs.params.IDontWantMessageThreshold { diff --git a/gossipsub_test.go b/gossipsub_test.go index 72188be..2231352 100644 --- a/gossipsub_test.go +++ b/gossipsub_test.go @@ -2847,7 +2847,7 @@ var _ RawTracer = &mockRawTracer{} func TestGossipsubNoIDONTWANTToMessageSender(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getDefaultHosts(t, 3) + hosts := getDefaultHosts(t, 2) denseConnect(t, hosts) psubs := make([]*PubSub, 2) @@ -2886,6 +2886,67 @@ func TestGossipsubNoIDONTWANTToMessageSender(t *testing.T) { t.Fatal("IDONTWANT should not be sent to the message sender") case <-time.After(time.Second): } +} + +func TestGossipsubIDONTWANTBeforeFirstPublish(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + hosts := getDefaultHosts(t, 2) + denseConnect(t, hosts) + + psubs := make([]*PubSub, 2) + + psubs[0] = getGossipsub(ctx, hosts[0]) + rpcsReceived := make(chan string) + psubs[1] = getGossipsub(ctx, hosts[1], WithRawTracer(&mockRawTracer{ + onRecvRPC: func(rpc *RPC) { + if len(rpc.GetControl().GetIdontwant()) > 0 { + rpcsReceived <- "idontwant" + } + if len(rpc.GetPublish()) > 0 { + rpcsReceived <- "publish" + } + }, + })) + + topicString := "foobar" + var topics []*Topic + for _, ps := range psubs { + topic, err := ps.Join(topicString) + if err != nil { + t.Fatal(err) + } + topics = append(topics, topic) + + _, err = ps.Subscribe(topicString) + if err != nil { + t.Fatal(err) + } + } + time.Sleep(2 * time.Second) + + msg := make([]byte, GossipSubIDontWantMessageThreshold+1) + _ = topics[0].Publish(ctx, msg) + + timeout := time.After(5 * time.Second) + + select { + case kind := <-rpcsReceived: + if kind == "publish" { + t.Fatal("IDONTWANT should be sent before publish") + } + case <-timeout: + t.Fatal("IDONTWANT should be sent on first publish") + } + + select { + case kind := <-rpcsReceived: + if kind != "publish" { + t.Fatal("Expected publish after IDONTWANT") + } + case <-timeout: + t.Fatal("Expected publish after IDONTWANT") + } } diff --git a/pubsub.go b/pubsub.go index fae115a..e8e598b 100644 --- a/pubsub.go +++ b/pubsub.go @@ -204,9 +204,9 @@ type PubSubRouter interface { // Allows routers with internal scoring to vet peers before committing any processing resources // to the message and implement an effective graylist and react to validation queue overload. AcceptFrom(peer.ID) AcceptStatus - // PreValidation is invoked on messages in the RPC envelope right before pushing it to + // Preprocess is invoked on messages in the RPC envelope right before pushing it to // the validation pipeline - PreValidation(from peer.ID, msgs []*Message) + Preprocess(from peer.ID, msgs []*Message) // HandleRPC is invoked to process control messages in the RPC envelope. // It is invoked after subscriptions and payload messages have been processed. HandleRPC(*RPC) @@ -1117,7 +1117,7 @@ func (p *PubSub) handleIncomingRPC(rpc *RPC) { toPush = append(toPush, msg) } } - p.rt.PreValidation(rpc.from, toPush) + p.rt.Preprocess(rpc.from, toPush) for _, msg := range toPush { p.pushMsg(msg) } diff --git a/randomsub.go b/randomsub.go index f9f6473..fe70e43 100644 --- a/randomsub.go +++ b/randomsub.go @@ -94,7 +94,7 @@ func (rs *RandomSubRouter) AcceptFrom(peer.ID) AcceptStatus { return AcceptAll } -func (rs *RandomSubRouter) PreValidation(from peer.ID, msgs []*Message) {} +func (rs *RandomSubRouter) Preprocess(from peer.ID, msgs []*Message) {} func (rs *RandomSubRouter) HandleRPC(rpc *RPC) {} diff --git a/topic.go b/topic.go index a6ad979..b164e32 100644 --- a/topic.go +++ b/topic.go @@ -349,6 +349,7 @@ func (t *Topic) validate(ctx context.Context, data []byte, opts ...PubOpt) (*Mes } msg := &Message{m, "", t.p.host.ID(), pub.validatorData, pub.local} + t.p.rt.Preprocess(t.p.host.ID(), []*Message{msg}) err := t.p.val.ValidateLocal(msg) if err != nil { return nil, err From 38ad16a6872e0ff4cab2fc1ef39e25aa443d97c0 Mon Sep 17 00:00:00 2001 From: Marco Munizaga Date: Tue, 27 May 2025 09:04:26 -0700 Subject: [PATCH 17/27] test: Fix flaky TestMessageBatchPublish (#616) Messages where being dropped in the validation queue if the machine was not fast enough. --- gossipsub_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/gossipsub_test.go b/gossipsub_test.go index 2231352..c37382d 100644 --- a/gossipsub_test.go +++ b/gossipsub_test.go @@ -3591,7 +3591,7 @@ func BenchmarkRoundRobinMessageIDScheduler(b *testing.B) { } func TestMessageBatchPublish(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() hosts := getDefaultHosts(t, 20) @@ -3602,7 +3602,7 @@ func TestMessageBatchPublish(t *testing.T) { } const numMessages = 100 // +8 to account for the gossiping overhead - psubs := getGossipsubs(ctx, hosts, WithMessageIdFn(msgIDFn), WithPeerOutboundQueueSize(numMessages+8)) + psubs := getGossipsubs(ctx, hosts, WithMessageIdFn(msgIDFn), WithPeerOutboundQueueSize(numMessages+8), WithValidateQueueSize(numMessages+8)) var topics []*Topic var msgs []*Subscription @@ -3643,7 +3643,7 @@ func TestMessageBatchPublish(t *testing.T) { for _, sub := range msgs { got, err := sub.Next(ctx) if err != nil { - t.Fatal(sub.err) + t.Fatal(err) } id := msgIDFn(got.Message) expected := []byte(fmt.Sprintf("%s it's not a floooooood %s", id, id)) From c405ca80280609a2b664e2b9a978981524942a0e Mon Sep 17 00:00:00 2001 From: Marco Munizaga Date: Wed, 28 May 2025 22:32:25 -0700 Subject: [PATCH 18/27] refactor: 10x faster RPC splitting (#615) Builds on #582. 10x faster than current master. 0 allocs. The basic logic is the same as the old version, except we return an `iter.Seq[RPC]` and yield `RPC` types instead of a slice of `*RPC`. This lets us avoid allocations for heap pointers. Please review @algorandskiy, and let me know if this improves your use case. --- gossipsub.go | 136 +------------------------------ gossipsub_test.go | 190 +++++++++++++++++++++++++++++++++++++++++--- pubsub.go | 198 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 378 insertions(+), 146 deletions(-) diff --git a/gossipsub.go b/gossipsub.go index 3b52efe..68bef4a 100644 --- a/gossipsub.go +++ b/gossipsub.go @@ -1375,14 +1375,13 @@ func (gs *GossipSubRouter) sendRPC(p peer.ID, out *RPC, urgent bool) { } // Potentially split the RPC into multiple RPCs that are below the max message size - outRPCs := appendOrMergeRPC(nil, gs.p.maxMessageSize, *out) - for _, rpc := range outRPCs { + for rpc := range out.split(gs.p.maxMessageSize) { if rpc.Size() > gs.p.maxMessageSize { // This should only happen if a single message/control is above the maxMessageSize. gs.doDropRPC(out, p, fmt.Sprintf("Dropping oversized RPC. Size: %d, limit: %d. (Over by %d bytes)", rpc.Size(), gs.p.maxMessageSize, rpc.Size()-gs.p.maxMessageSize)) continue } - gs.doSendRPC(rpc, p, q, urgent) + gs.doSendRPC(&rpc, p, q, urgent) } } @@ -1412,137 +1411,6 @@ func (gs *GossipSubRouter) doSendRPC(rpc *RPC, p peer.ID, q *rpcQueue, urgent bo gs.tracer.SendRPC(rpc, p) } -// appendOrMergeRPC appends the given RPCs to the slice, merging them if possible. -// If any elem is too large to fit in a single RPC, it will be split into multiple RPCs. -// If an RPC is too large and can't be split further (e.g. Message data is -// bigger than the RPC limit), then it will be returned as an oversized RPC. -// The caller should filter out oversized RPCs. -func appendOrMergeRPC(slice []*RPC, limit int, elems ...RPC) []*RPC { - if len(elems) == 0 { - return slice - } - - if len(slice) == 0 && len(elems) == 1 && elems[0].Size() < limit { - // Fast path: no merging needed and only one element - return append(slice, &elems[0]) - } - - out := slice - if len(out) == 0 { - out = append(out, &RPC{RPC: pb.RPC{}}) - out[0].from = elems[0].from - } - - for _, elem := range elems { - lastRPC := out[len(out)-1] - - // Merge/Append publish messages - // TODO: Never merge messages. The current behavior is the same as the - // old behavior. In the future let's not merge messages. Since, - // it may increase message latency. - for _, msg := range elem.GetPublish() { - if lastRPC.Publish = append(lastRPC.Publish, msg); lastRPC.Size() > limit { - lastRPC.Publish = lastRPC.Publish[:len(lastRPC.Publish)-1] - lastRPC = &RPC{RPC: pb.RPC{}, from: elem.from} - lastRPC.Publish = append(lastRPC.Publish, msg) - out = append(out, lastRPC) - } - } - - // Merge/Append Subscriptions - for _, sub := range elem.GetSubscriptions() { - if lastRPC.Subscriptions = append(lastRPC.Subscriptions, sub); lastRPC.Size() > limit { - lastRPC.Subscriptions = lastRPC.Subscriptions[:len(lastRPC.Subscriptions)-1] - lastRPC = &RPC{RPC: pb.RPC{}, from: elem.from} - lastRPC.Subscriptions = append(lastRPC.Subscriptions, sub) - out = append(out, lastRPC) - } - } - - // Merge/Append Control messages - if ctl := elem.GetControl(); ctl != nil { - if lastRPC.Control == nil { - lastRPC.Control = &pb.ControlMessage{} - if lastRPC.Size() > limit { - lastRPC.Control = nil - lastRPC = &RPC{RPC: pb.RPC{Control: &pb.ControlMessage{}}, from: elem.from} - out = append(out, lastRPC) - } - } - - for _, graft := range ctl.GetGraft() { - if lastRPC.Control.Graft = append(lastRPC.Control.Graft, graft); lastRPC.Size() > limit { - lastRPC.Control.Graft = lastRPC.Control.Graft[:len(lastRPC.Control.Graft)-1] - lastRPC = &RPC{RPC: pb.RPC{Control: &pb.ControlMessage{}}, from: elem.from} - lastRPC.Control.Graft = append(lastRPC.Control.Graft, graft) - out = append(out, lastRPC) - } - } - - for _, prune := range ctl.GetPrune() { - if lastRPC.Control.Prune = append(lastRPC.Control.Prune, prune); lastRPC.Size() > limit { - lastRPC.Control.Prune = lastRPC.Control.Prune[:len(lastRPC.Control.Prune)-1] - lastRPC = &RPC{RPC: pb.RPC{Control: &pb.ControlMessage{}}, from: elem.from} - lastRPC.Control.Prune = append(lastRPC.Control.Prune, prune) - out = append(out, lastRPC) - } - } - - for _, iwant := range ctl.GetIwant() { - if len(lastRPC.Control.Iwant) == 0 { - // Initialize with a single IWANT. - // For IWANTs we don't need more than a single one, - // since there are no topic IDs here. - newIWant := &pb.ControlIWant{} - if lastRPC.Control.Iwant = append(lastRPC.Control.Iwant, newIWant); lastRPC.Size() > limit { - lastRPC.Control.Iwant = lastRPC.Control.Iwant[:len(lastRPC.Control.Iwant)-1] - lastRPC = &RPC{RPC: pb.RPC{Control: &pb.ControlMessage{ - Iwant: []*pb.ControlIWant{newIWant}, - }}, from: elem.from} - out = append(out, lastRPC) - } - } - for _, msgID := range iwant.GetMessageIDs() { - if lastRPC.Control.Iwant[0].MessageIDs = append(lastRPC.Control.Iwant[0].MessageIDs, msgID); lastRPC.Size() > limit { - lastRPC.Control.Iwant[0].MessageIDs = lastRPC.Control.Iwant[0].MessageIDs[:len(lastRPC.Control.Iwant[0].MessageIDs)-1] - lastRPC = &RPC{RPC: pb.RPC{Control: &pb.ControlMessage{ - Iwant: []*pb.ControlIWant{{MessageIDs: []string{msgID}}}, - }}, from: elem.from} - out = append(out, lastRPC) - } - } - } - - for _, ihave := range ctl.GetIhave() { - if len(lastRPC.Control.Ihave) == 0 || - lastRPC.Control.Ihave[len(lastRPC.Control.Ihave)-1].TopicID != ihave.TopicID { - // Start a new IHAVE if we are referencing a new topic ID - newIhave := &pb.ControlIHave{TopicID: ihave.TopicID} - if lastRPC.Control.Ihave = append(lastRPC.Control.Ihave, newIhave); lastRPC.Size() > limit { - lastRPC.Control.Ihave = lastRPC.Control.Ihave[:len(lastRPC.Control.Ihave)-1] - lastRPC = &RPC{RPC: pb.RPC{Control: &pb.ControlMessage{ - Ihave: []*pb.ControlIHave{newIhave}, - }}, from: elem.from} - out = append(out, lastRPC) - } - } - for _, msgID := range ihave.GetMessageIDs() { - lastIHave := lastRPC.Control.Ihave[len(lastRPC.Control.Ihave)-1] - if lastIHave.MessageIDs = append(lastIHave.MessageIDs, msgID); lastRPC.Size() > limit { - lastIHave.MessageIDs = lastIHave.MessageIDs[:len(lastIHave.MessageIDs)-1] - lastRPC = &RPC{RPC: pb.RPC{Control: &pb.ControlMessage{ - Ihave: []*pb.ControlIHave{{TopicID: ihave.TopicID, MessageIDs: []string{msgID}}}, - }}, from: elem.from} - out = append(out, lastRPC) - } - } - } - } - } - - return out -} - func (gs *GossipSubRouter) heartbeatTimer() { time.Sleep(gs.params.HeartbeatInitialDelay) select { diff --git a/gossipsub_test.go b/gossipsub_test.go index c37382d..7aa5188 100644 --- a/gossipsub_test.go +++ b/gossipsub_test.go @@ -8,7 +8,10 @@ import ( "fmt" "io" mrand "math/rand" + mrand2 "math/rand/v2" + "slices" "sort" + "strconv" "strings" "sync" "sync/atomic" @@ -2341,7 +2344,7 @@ func (iwe *iwantEverything) handleStream(s network.Stream) { } } -func validRPCSizes(slice []*RPC, limit int) bool { +func validRPCSizes(slice []RPC, limit int) bool { for _, rpc := range slice { if rpc.Size() > limit { return false @@ -2351,8 +2354,8 @@ func validRPCSizes(slice []*RPC, limit int) bool { } func TestFragmentRPCFunction(t *testing.T) { - fragmentRPC := func(rpc *RPC, limit int) ([]*RPC, error) { - rpcs := appendOrMergeRPC(nil, limit, *rpc) + fragmentRPC := func(rpc *RPC, limit int) ([]RPC, error) { + rpcs := slices.Collect(rpc.split(limit)) if allValid := validRPCSizes(rpcs, limit); !allValid { return rpcs, fmt.Errorf("RPC size exceeds limit") } @@ -2371,7 +2374,7 @@ func TestFragmentRPCFunction(t *testing.T) { return msg } - ensureBelowLimit := func(rpcs []*RPC) { + ensureBelowLimit := func(rpcs []RPC) { for _, r := range rpcs { if r.Size() > limit { t.Fatalf("expected fragmented RPC to be below %d bytes, was %d", limit, r.Size()) @@ -2387,7 +2390,7 @@ func TestFragmentRPCFunction(t *testing.T) { t.Fatal(err) } if len(results) != 1 { - t.Fatalf("expected single RPC if input is < limit, got %d", len(results)) + t.Fatalf("expected single RPC if input is < limit, got %d %#v", len(results), results) } // if there's a message larger than the limit, we should fail @@ -2418,8 +2421,8 @@ func TestFragmentRPCFunction(t *testing.T) { ensureBelowLimit(results) msgsPerRPC := limit / msgSize expectedRPCs := nMessages / msgsPerRPC - if len(results) != expectedRPCs { - t.Fatalf("expected %d RPC messages in output, got %d", expectedRPCs, len(results)) + if len(results) > expectedRPCs+1 { + t.Fatalf("expected around %d RPC messages in output, got %d", expectedRPCs, len(results)) } var nMessagesFragmented int var nSubscriptions int @@ -2514,7 +2517,7 @@ func TestFragmentRPCFunction(t *testing.T) { // Now we return a the giant ID in a RPC by itself so that it can be // dropped before actually sending the RPC. This lets us log the anamoly. // To keep this test useful, we implement the old behavior here. - filtered := make([]*RPC, 0, len(results)) + filtered := make([]RPC, 0, len(results)) for _, r := range results { if r.Size() < limit { filtered = append(filtered, r) @@ -2541,7 +2544,7 @@ func TestFragmentRPCFunction(t *testing.T) { } } -func FuzzAppendOrMergeRPC(f *testing.F) { +func FuzzRPCSplit(f *testing.F) { minMaxMsgSize := 100 maxMaxMsgSize := 2048 f.Fuzz(func(t *testing.T, data []byte) { @@ -2550,14 +2553,102 @@ func FuzzAppendOrMergeRPC(f *testing.F) { maxSize = minMaxMsgSize } rpc := generateRPC(data, maxSize) - rpcs := appendOrMergeRPC(nil, maxSize, *rpc) - if !validRPCSizes(rpcs, maxSize) { - t.Fatalf("invalid RPC size") + originalControl := compressedRPC{ihave: make(map[string][]string)} + originalControl.append(&rpc.RPC) + mergedControl := compressedRPC{ihave: make(map[string][]string)} + + for rpc := range rpc.split(maxSize) { + if rpc.Size() > maxSize { + t.Fatalf("invalid RPC size %v %d (max=%d)", rpc, rpc.Size(), maxSize) + } + mergedControl.append(&rpc.RPC) + } + + if !originalControl.equal(&mergedControl) { + t.Fatalf("control mismatch: \n%#v\n%#v\n", originalControl, mergedControl) + } }) } +type compressedRPC struct { + msgs [][]byte + iwant []string + ihave map[string][]string // topic -> []string + idontwant []string + prune [][]byte + graft []string // list of topic +} + +func (c *compressedRPC) equal(o *compressedRPC) bool { + equalBytesSlices := func(a, b [][]byte) bool { + return slices.EqualFunc(a, b, func(e1 []byte, e2 []byte) bool { + return bytes.Equal(e1, e2) + }) + } + if !equalBytesSlices(c.msgs, o.msgs) { + return false + } + + if !slices.Equal(c.iwant, o.iwant) || + !slices.Equal(c.idontwant, o.idontwant) || + !equalBytesSlices(c.prune, o.prune) || + !slices.Equal(c.graft, o.graft) { + return false + } + + if len(c.ihave) != len(o.ihave) { + return false + } + for topic, ids := range c.ihave { + if !slices.Equal(ids, o.ihave[topic]) { + return false + } + } + + return true + +} + +func (c *compressedRPC) append(rpc *pb.RPC) { + for _, m := range rpc.Publish { + d, err := m.Marshal() + if err != nil { + panic(err) + } + c.msgs = append(c.msgs, d) + } + + ctrl := rpc.Control + if ctrl == nil { + return + } + for _, iwant := range ctrl.Iwant { + c.iwant = append(c.iwant, iwant.MessageIDs...) + c.iwant = slices.DeleteFunc(c.iwant, func(e string) bool { return len(e) == 0 }) + } + for _, ihave := range ctrl.Ihave { + c.ihave[*ihave.TopicID] = append(c.ihave[*ihave.TopicID], ihave.MessageIDs...) + c.ihave[*ihave.TopicID] = slices.DeleteFunc(c.ihave[*ihave.TopicID], func(e string) bool { return len(e) == 0 }) + } + for _, idontwant := range ctrl.Idontwant { + c.idontwant = append(c.idontwant, idontwant.MessageIDs...) + c.idontwant = slices.DeleteFunc(c.idontwant, func(e string) bool { return len(e) == 0 }) + } + for _, prune := range ctrl.Prune { + d, err := prune.Marshal() + if err != nil { + panic(err) + } + c.prune = append(c.prune, d) + } + for _, graft := range ctrl.Graft { + c.graft = append(c.graft, *graft.TopicID) + c.graft = slices.DeleteFunc(c.graft, func(e string) bool { return len(e) == 0 }) + } +} + func TestGossipsubManagesAnAddressBook(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -3675,3 +3766,78 @@ func TestPublishDuplicateMessage(t *testing.T) { t.Fatal("Duplicate message should not return an error") } } + +func genNRpcs(tb testing.TB, n int, maxSize int) []*RPC { + r := mrand2.NewChaCha8([32]byte{}) + rpcs := make([]*RPC, n) + for i := range rpcs { + var data [64]byte + _, err := r.Read(data[:]) + if err != nil { + tb.Fatal(err) + } + rpcs[i] = generateRPC(data[:], maxSize) + } + return rpcs +} + +func BenchmarkSplitRPC(b *testing.B) { + maxSize := 2048 + rpcs := genNRpcs(b, 100, maxSize) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + rpc := rpcs[i%len(rpcs)] + rpc.split(maxSize) + } +} + +func BenchmarkSplitRPCLargeMessages(b *testing.B) { + addToRPC := func(rpc *RPC, numMsgs int, msgSize int) { + msgs := make([]*pb.Message, numMsgs) + payload := make([]byte, msgSize) + for i := range msgs { + rpc.Publish = append(rpc.Publish, &pb.Message{ + Data: payload, + From: []byte(strconv.Itoa(i)), + }) + } + } + + b.Run("Many large messages", func(b *testing.B) { + r := mrand.New(mrand.NewSource(99)) + const numRPCs = 30 + const msgSize = 50 * 1024 + rpc := &RPC{} + for i := 0; i < numRPCs; i++ { + addToRPC(rpc, 20, msgSize+r.Intn(100)) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + for range rpc.split(DefaultMaxMessageSize) { + + } + } + }) + + b.Run("2 large messages", func(b *testing.B) { + const numRPCs = 2 + const msgSize = DefaultMaxMessageSize - 100 + rpc := &RPC{} + for i := 0; i < numRPCs; i++ { + addToRPC(rpc, 1, msgSize) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + count := 0 + for range rpc.split(DefaultMaxMessageSize) { + count++ + } + if count != 2 { + b.Fatalf("expected 2 RPCs, got %d", count) + } + } + }) +} diff --git a/pubsub.go b/pubsub.go index e8e598b..91017d1 100644 --- a/pubsub.go +++ b/pubsub.go @@ -5,6 +5,8 @@ import ( "encoding/binary" "errors" "fmt" + "iter" + "math/bits" "math/rand" "sync" "sync/atomic" @@ -255,6 +257,202 @@ type RPC struct { from peer.ID } +// split splits the given RPC If a sub RPC is too large and can't be split +// further (e.g. Message data is bigger than the RPC limit), then it will be +// returned as an oversized RPC. The caller should filter out oversized RPCs. +func (rpc *RPC) split(limit int) iter.Seq[RPC] { + return func(yield func(RPC) bool) { + nextRPC := RPC{from: rpc.from} + + { + nextRPCSize := 0 + + messagesInNextRPC := 0 + messageSlice := rpc.Publish + + // Merge/Append publish messages. This pattern is optimized compared the + // the patterns for other fields because this is the common cause for + // splitting a message. + for _, msg := range rpc.Publish { + // We know the message field number is <15 so this is safe. + incrementalSize := pbFieldNumberLT15Size + sizeOfEmbeddedMsg(msg.Size()) + if nextRPCSize+incrementalSize > limit { + // The message doesn't fit. Let's set the messages that did fit + // into this RPC, yield it, then make a new one + nextRPC.Publish = messageSlice[:messagesInNextRPC] + messageSlice = messageSlice[messagesInNextRPC:] + if !yield(nextRPC) { + return + } + + nextRPC = RPC{from: rpc.from} + nextRPCSize = 0 + messagesInNextRPC = 0 + } + messagesInNextRPC++ + nextRPCSize += incrementalSize + } + + if nextRPCSize > 0 { + // yield the message here for simplicity. We aren't optimally + // packing this RPC, but we avoid successively calling .Size() + // on the messages for the next parts. + nextRPC.Publish = messageSlice[:messagesInNextRPC] + if !yield(nextRPC) { + return + } + nextRPC = RPC{from: rpc.from} + } + } + + // Fast path check. It's possible the original RPC is now small enough + // without the messages to publish + nextRPC = *rpc + nextRPC.Publish = nil + if s := nextRPC.Size(); s < limit { + if s != 0 { + yield(nextRPC) + } + return + } + // We have to split the RPC into multiple parts + nextRPC = RPC{from: rpc.from} + + // Merge/Append Subscriptions + for _, sub := range rpc.Subscriptions { + if nextRPC.Subscriptions = append(nextRPC.Subscriptions, sub); nextRPC.Size() > limit { + nextRPC.Subscriptions = nextRPC.Subscriptions[:len(nextRPC.Subscriptions)-1] + if !yield(nextRPC) { + return + } + + nextRPC = RPC{from: rpc.from} + nextRPC.Subscriptions = append(nextRPC.Subscriptions, sub) + } + } + + // Merge/Append Control messages + if ctl := rpc.Control; ctl != nil { + if nextRPC.Control == nil { + nextRPC.Control = &pb.ControlMessage{} + if nextRPC.Size() > limit { + nextRPC.Control = nil + if !yield(nextRPC) { + return + } + nextRPC = RPC{RPC: pb.RPC{Control: &pb.ControlMessage{}}, from: rpc.from} + } + } + + for _, graft := range ctl.GetGraft() { + if nextRPC.Control.Graft = append(nextRPC.Control.Graft, graft); nextRPC.Size() > limit { + nextRPC.Control.Graft = nextRPC.Control.Graft[:len(nextRPC.Control.Graft)-1] + if !yield(nextRPC) { + return + } + nextRPC = RPC{RPC: pb.RPC{Control: &pb.ControlMessage{}}, from: rpc.from} + nextRPC.Control.Graft = append(nextRPC.Control.Graft, graft) + } + } + + for _, prune := range ctl.GetPrune() { + if nextRPC.Control.Prune = append(nextRPC.Control.Prune, prune); nextRPC.Size() > limit { + nextRPC.Control.Prune = nextRPC.Control.Prune[:len(nextRPC.Control.Prune)-1] + if !yield(nextRPC) { + return + } + nextRPC = RPC{RPC: pb.RPC{Control: &pb.ControlMessage{}}, from: rpc.from} + nextRPC.Control.Prune = append(nextRPC.Control.Prune, prune) + } + } + + for _, iwant := range ctl.GetIwant() { + if len(nextRPC.Control.Iwant) == 0 { + // Initialize with a single IWANT. + // For IWANTs we don't need more than a single one, + // since there are no topic IDs here. + newIWant := &pb.ControlIWant{} + if nextRPC.Control.Iwant = append(nextRPC.Control.Iwant, newIWant); nextRPC.Size() > limit { + nextRPC.Control.Iwant = nextRPC.Control.Iwant[:len(nextRPC.Control.Iwant)-1] + if !yield(nextRPC) { + return + } + nextRPC = RPC{RPC: pb.RPC{Control: &pb.ControlMessage{ + Iwant: []*pb.ControlIWant{newIWant}, + }}, from: rpc.from} + } + } + for _, msgID := range iwant.GetMessageIDs() { + if nextRPC.Control.Iwant[0].MessageIDs = append(nextRPC.Control.Iwant[0].MessageIDs, msgID); nextRPC.Size() > limit { + nextRPC.Control.Iwant[0].MessageIDs = nextRPC.Control.Iwant[0].MessageIDs[:len(nextRPC.Control.Iwant[0].MessageIDs)-1] + if !yield(nextRPC) { + return + } + nextRPC = RPC{RPC: pb.RPC{Control: &pb.ControlMessage{ + Iwant: []*pb.ControlIWant{{MessageIDs: []string{msgID}}}, + }}, from: rpc.from} + } + } + } + + for _, ihave := range ctl.GetIhave() { + if len(nextRPC.Control.Ihave) == 0 || + nextRPC.Control.Ihave[len(nextRPC.Control.Ihave)-1].TopicID != ihave.TopicID { + // Start a new IHAVE if we are referencing a new topic ID + newIhave := &pb.ControlIHave{TopicID: ihave.TopicID} + if nextRPC.Control.Ihave = append(nextRPC.Control.Ihave, newIhave); nextRPC.Size() > limit { + nextRPC.Control.Ihave = nextRPC.Control.Ihave[:len(nextRPC.Control.Ihave)-1] + if !yield(nextRPC) { + return + } + nextRPC = RPC{RPC: pb.RPC{Control: &pb.ControlMessage{ + Ihave: []*pb.ControlIHave{newIhave}, + }}, from: rpc.from} + } + } + for _, msgID := range ihave.GetMessageIDs() { + lastIHave := nextRPC.Control.Ihave[len(nextRPC.Control.Ihave)-1] + if lastIHave.MessageIDs = append(lastIHave.MessageIDs, msgID); nextRPC.Size() > limit { + lastIHave.MessageIDs = lastIHave.MessageIDs[:len(lastIHave.MessageIDs)-1] + if !yield(nextRPC) { + return + } + nextRPC = RPC{RPC: pb.RPC{Control: &pb.ControlMessage{ + Ihave: []*pb.ControlIHave{{TopicID: ihave.TopicID, MessageIDs: []string{msgID}}}, + }}, from: rpc.from} + } + } + } + } + + if nextRPC.Size() > 0 { + if !yield(nextRPC) { + return + } + } + } +} + +// pbFieldNumberLT15Size is the number of bytes required to encode a protobuf +// field number less than or equal to 15 along with its wire type. This is 1 +// byte because the protobuf encoding of field numbers is a varint encoding of: +// fieldNumber << 3 | wireType +// Refer to https://protobuf.dev/programming-guides/encoding/#structure +// for more details on the encoding of messages. You may also reference the +// concrete implementation of pb.RPC.Size() +const pbFieldNumberLT15Size = 1 + +func sovRpc(x uint64) (n int) { + return (bits.Len64(x) + 6) / 7 +} + +func sizeOfEmbeddedMsg( + msgSize int, +) int { + prefixSize := sovRpc(uint64(msgSize)) + return prefixSize + msgSize +} + type Option func(*PubSub) error // NewPubSub returns a new PubSub management object. From 3f89e4331c981a6b60206b762a10d015c04875a8 Mon Sep 17 00:00:00 2001 From: Marco Munizaga Date: Thu, 29 May 2025 15:58:51 -0700 Subject: [PATCH 19/27] Release v0.14.0 (#614) This release contains a couple fixes and the new Batch Publishing feature. - #607 Batch Publishing. Useful if you are publishing a group of related messages at once - #612 Send IDONTWANT before initial publish. Useful when many nodes may publish the same message at once. - #609 Avoid sending an extra "IDONTWANT" to the peer that just sent you a message. - #615 10x faster rpc splitting. --- version.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version.json b/version.json index d3f7968..7dd7b57 100644 --- a/version.json +++ b/version.json @@ -1,3 +1,3 @@ { - "version": "v0.13.0" + "version": "v0.14.0" } From fedbccc0c69d091d1fcbcfe5709a3f03a2aecb9c Mon Sep 17 00:00:00 2001 From: Marco Munizaga Date: Wed, 25 Jun 2025 12:38:21 -0700 Subject: [PATCH 20/27] fix(BatchPublishing): Make topic.AddToBatch threadsafe (#622) topic.Publish is already thread safe. topic.AddToBatch should strive to follow similar semantics. Looking at how this would integrate with Prysm, they use separate goroutines per message they'd like to batch. --- gossipsub_test.go | 117 +++++++++++++++++++++++++++------------------- messagebatch.go | 16 +++++++ pubsub.go | 4 +- topic.go | 2 +- 4 files changed, 86 insertions(+), 53 deletions(-) diff --git a/gossipsub_test.go b/gossipsub_test.go index 7aa5188..9f450d8 100644 --- a/gossipsub_test.go +++ b/gossipsub_test.go @@ -3682,66 +3682,85 @@ func BenchmarkRoundRobinMessageIDScheduler(b *testing.B) { } func TestMessageBatchPublish(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - hosts := getDefaultHosts(t, 20) + concurrentAdds := []bool{false, true} + for _, concurrentAdd := range concurrentAdds { + t.Run(fmt.Sprintf("WithConcurrentAdd=%v", concurrentAdd), func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + hosts := getDefaultHosts(t, 20) - msgIDFn := func(msg *pb.Message) string { - hdr := string(msg.Data[0:16]) - msgID := strings.SplitN(hdr, " ", 2) - return msgID[0] - } - const numMessages = 100 - // +8 to account for the gossiping overhead - psubs := getGossipsubs(ctx, hosts, WithMessageIdFn(msgIDFn), WithPeerOutboundQueueSize(numMessages+8), WithValidateQueueSize(numMessages+8)) + msgIDFn := func(msg *pb.Message) string { + hdr := string(msg.Data[0:16]) + msgID := strings.SplitN(hdr, " ", 2) + return msgID[0] + } + const numMessages = 100 + // +8 to account for the gossiping overhead + psubs := getGossipsubs(ctx, hosts, WithMessageIdFn(msgIDFn), WithPeerOutboundQueueSize(numMessages+8), WithValidateQueueSize(numMessages+8)) - var topics []*Topic - var msgs []*Subscription - for _, ps := range psubs { - topic, err := ps.Join("foobar") - if err != nil { - t.Fatal(err) - } - topics = append(topics, topic) + var topics []*Topic + var msgs []*Subscription + for _, ps := range psubs { + topic, err := ps.Join("foobar") + if err != nil { + t.Fatal(err) + } + topics = append(topics, topic) - subch, err := topic.Subscribe(WithBufferSize(numMessages + 8)) - if err != nil { - t.Fatal(err) - } + subch, err := topic.Subscribe(WithBufferSize(numMessages + 8)) + if err != nil { + t.Fatal(err) + } - msgs = append(msgs, subch) - } + msgs = append(msgs, subch) + } - sparseConnect(t, hosts) + sparseConnect(t, hosts) - // wait for heartbeats to build mesh - time.Sleep(time.Second * 2) + // wait for heartbeats to build mesh + time.Sleep(time.Second * 2) - var batch MessageBatch - for i := 0; i < numMessages; i++ { - msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) - err := topics[0].AddToBatch(ctx, &batch, msg) - if err != nil { - t.Fatal(err) - } - } - err := psubs[0].PublishBatch(&batch) - if err != nil { - t.Fatal(err) - } - - for range numMessages { - for _, sub := range msgs { - got, err := sub.Next(ctx) + var batch MessageBatch + var wg sync.WaitGroup + for i := 0; i < numMessages; i++ { + msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) + if concurrentAdd { + wg.Add(1) + go func() { + defer wg.Done() + err := topics[0].AddToBatch(ctx, &batch, msg) + if err != nil { + t.Log(err) + t.Fail() + } + }() + } else { + err := topics[0].AddToBatch(ctx, &batch, msg) + if err != nil { + t.Fatal(err) + } + } + } + wg.Wait() + err := psubs[0].PublishBatch(&batch) if err != nil { t.Fatal(err) } - id := msgIDFn(got.Message) - expected := []byte(fmt.Sprintf("%s it's not a floooooood %s", id, id)) - if !bytes.Equal(expected, got.Data) { - t.Fatal("got wrong message!") + + for range numMessages { + for _, sub := range msgs { + got, err := sub.Next(ctx) + if err != nil { + t.Fatal(err) + } + id := msgIDFn(got.Message) + expected := []byte(fmt.Sprintf("%s it's not a floooooood %s", id, id)) + if !bytes.Equal(expected, got.Data) { + t.Fatal("got wrong message!") + } + } } - } + }) } } diff --git a/messagebatch.go b/messagebatch.go index 8178645..55941d0 100644 --- a/messagebatch.go +++ b/messagebatch.go @@ -2,6 +2,7 @@ package pubsub import ( "iter" + "sync" "github.com/libp2p/go-libp2p/core/peer" ) @@ -10,9 +11,24 @@ import ( // once. This allows the Scheduler to define an order for outgoing RPCs. // This helps bandwidth constrained peers. type MessageBatch struct { + mu sync.Mutex messages []*Message } +func (mb *MessageBatch) add(msg *Message) { + mb.mu.Lock() + defer mb.mu.Unlock() + mb.messages = append(mb.messages, msg) +} + +func (mb *MessageBatch) take() []*Message { + mb.mu.Lock() + defer mb.mu.Unlock() + messages := mb.messages + mb.messages = nil + return messages +} + type messageBatchAndPublishOptions struct { messages []*Message opts *BatchPublishOptions diff --git a/pubsub.go b/pubsub.go index 91017d1..3af9888 100644 --- a/pubsub.go +++ b/pubsub.go @@ -1600,12 +1600,10 @@ func (p *PubSub) PublishBatch(batch *MessageBatch, opts ...BatchPubOpt) error { setDefaultBatchPublishOptions(publishOptions) p.sendMessageBatch <- messageBatchAndPublishOptions{ - messages: batch.messages, + messages: batch.take(), opts: publishOptions, } - // Clear the batch's messages in case a user reuses the same batch object - batch.messages = nil return nil } diff --git a/topic.go b/topic.go index b164e32..c438ebc 100644 --- a/topic.go +++ b/topic.go @@ -257,7 +257,7 @@ func (t *Topic) AddToBatch(ctx context.Context, batch *MessageBatch, data []byte } return err } - batch.messages = append(batch.messages, msg) + batch.add(msg) return nil } From ae65ce484ebbf21d4baf92d5b875eade9f002057 Mon Sep 17 00:00:00 2001 From: Marco Munizaga Date: Wed, 25 Jun 2025 16:15:26 -0700 Subject: [PATCH 21/27] Release v0.14.1 (#623) Includes #622 --- version.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version.json b/version.json index 7dd7b57..f49d0df 100644 --- a/version.json +++ b/version.json @@ -1,3 +1,3 @@ { - "version": "v0.14.0" + "version": "v0.14.1" } From e38c340f93f463b690917ad21fc5170969ef565c Mon Sep 17 00:00:00 2001 From: Marco Munizaga Date: Thu, 3 Jul 2025 11:10:37 -0700 Subject: [PATCH 22/27] Fix race when calling Preprocess and msg ID generator(#627) Closes #624 --- midgen.go | 3 +++ topic.go | 4 +++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/midgen.go b/midgen.go index 9d3acfc..291e297 100644 --- a/midgen.go +++ b/midgen.go @@ -9,6 +9,7 @@ import ( // msgIDGenerator handles computing IDs for msgs // It allows setting custom generators(MsgIdFunction) per topic type msgIDGenerator struct { + sync.Mutex Default MsgIdFunction topicGensLk sync.RWMutex @@ -31,6 +32,8 @@ func (m *msgIDGenerator) Set(topic string, gen MsgIdFunction) { // ID computes ID for the msg or short-circuits with the cached value. func (m *msgIDGenerator) ID(msg *Message) string { + m.Lock() + defer m.Unlock() if msg.ID != "" { return msg.ID } diff --git a/topic.go b/topic.go index c438ebc..3a65052 100644 --- a/topic.go +++ b/topic.go @@ -349,7 +349,9 @@ func (t *Topic) validate(ctx context.Context, data []byte, opts ...PubOpt) (*Mes } msg := &Message{m, "", t.p.host.ID(), pub.validatorData, pub.local} - t.p.rt.Preprocess(t.p.host.ID(), []*Message{msg}) + t.p.eval <- func() { + t.p.rt.Preprocess(t.p.host.ID(), []*Message{msg}) + } err := t.p.val.ValidateLocal(msg) if err != nil { return nil, err From 631e47b133f56802f6b3c3f8ab0dd344920c81ae Mon Sep 17 00:00:00 2001 From: Marco Munizaga Date: Thu, 3 Jul 2025 11:37:53 -0700 Subject: [PATCH 23/27] Fix test races and enable race tests in CI (#626) closes #624 --- .github/workflows/go-test-config.json | 3 +- backoff_test.go | 10 ++- floodsub_test.go | 22 +++++-- gossipsub_spam_test.go | 5 ++ gossipsub_test.go | 90 +++++++++++++++++---------- pubsub_test.go | 4 +- timecache/first_seen_cache.go | 6 +- timecache/first_seen_cache_test.go | 8 +-- timecache/last_seen_cache.go | 6 +- timecache/last_seen_cache_test.go | 7 +-- timecache/util.go | 6 +- validation_builtin_test.go | 17 ++++- 12 files changed, 125 insertions(+), 59 deletions(-) diff --git a/.github/workflows/go-test-config.json b/.github/workflows/go-test-config.json index b0642fb..879d74a 100644 --- a/.github/workflows/go-test-config.json +++ b/.github/workflows/go-test-config.json @@ -1,4 +1,3 @@ { - "skipOSes": ["windows", "macos"], - "skipRace": true + "skipOSes": ["windows", "macos"] } diff --git a/backoff_test.go b/backoff_test.go index 4cedbe1..542aceb 100644 --- a/backoff_test.go +++ b/backoff_test.go @@ -96,11 +96,17 @@ func TestBackoff_Clean(t *testing.T) { if err != nil { t.Fatalf("unexpected error post update: %s", err) } + b.mu.Lock() b.info[id].lastTried = time.Now().Add(-TimeToLive) // enforces expiry + b.mu.Unlock() } - if len(b.info) != size { - t.Fatalf("info map size mismatch, expected: %d, got: %d", size, len(b.info)) + b.mu.Lock() + infoLen := len(b.info) + b.mu.Unlock() + + if infoLen != size { + t.Fatalf("info map size mismatch, expected: %d, got: %d", size, infoLen) } // waits for a cleanup loop to kick-in diff --git a/floodsub_test.go b/floodsub_test.go index 8efedaa..13c698f 100644 --- a/floodsub_test.go +++ b/floodsub_test.go @@ -268,8 +268,11 @@ func TestReconnects(t *testing.T) { t.Fatal("timed out waiting for B chan to be closed") } - nSubs := len(psubs[2].mySubs["cats"]) - if nSubs > 0 { + nSubs := make(chan int) + psubs[2].eval <- func() { + nSubs <- len(psubs[2].mySubs["cats"]) + } + if <-nSubs > 0 { t.Fatal(`B should have 0 subscribers for channel "cats", has`, nSubs) } @@ -866,9 +869,14 @@ func TestImproperlySignedMessageRejected(t *testing.T) { t.Fatal(err) } - var adversaryMessages []*Message + adversaryMessagesCh := make(chan []*Message) + adversaryContext, adversaryCancel := context.WithCancel(ctx) go func(ctx context.Context) { + var adversaryMessages []*Message + defer func() { + adversaryMessagesCh <- adversaryMessages + }() for { select { case <-ctx.Done(): @@ -885,6 +893,7 @@ func TestImproperlySignedMessageRejected(t *testing.T) { <-time.After(1 * time.Second) adversaryCancel() + adversaryMessages := <-adversaryMessagesCh // Ensure the adversary successfully publishes the incorrectly signed // message. If the adversary "sees" this, we successfully got through @@ -895,9 +904,13 @@ func TestImproperlySignedMessageRejected(t *testing.T) { // the honest peer's validation process will drop the message; // next will never furnish the incorrect message. - var honestPeerMessages []*Message + honestPeerMessagesCh := make(chan []*Message) honestPeerContext, honestPeerCancel := context.WithCancel(ctx) go func(ctx context.Context) { + var honestPeerMessages []*Message + defer func() { + honestPeerMessagesCh <- honestPeerMessages + }() for { select { case <-ctx.Done(): @@ -915,6 +928,7 @@ func TestImproperlySignedMessageRejected(t *testing.T) { <-time.After(1 * time.Second) honestPeerCancel() + honestPeerMessages := <-honestPeerMessagesCh if len(honestPeerMessages) != 1 { t.Fatalf("got %d messages, expected 1", len(honestPeerMessages)) } diff --git a/gossipsub_spam_test.go b/gossipsub_spam_test.go index 9f6f0f9..e1f16b6 100644 --- a/gossipsub_spam_test.go +++ b/gossipsub_spam_test.go @@ -797,7 +797,10 @@ func TestGossipsubAttackSpamIDONTWANT(t *testing.T) { // Checks we received some messages var expMid string var actMids []string + var mu sync.Mutex checkMsgs := func() { + mu.Lock() + defer mu.Unlock() if len(actMids) == 0 { t.Fatalf("Expected some messages when the maximum number of IDONTWANTs is reached") } @@ -822,6 +825,8 @@ func TestGossipsubAttackSpamIDONTWANT(t *testing.T) { }() newMockGS(ctx, t, hosts[2], func(writeMsg func(*pb.RPC), irpc *pb.RPC) { + mu.Lock() + defer mu.Unlock() // Each time the host receives a message for _, msg := range irpc.GetPublish() { actMids = append(actMids, msgID(msg)) diff --git a/gossipsub_test.go b/gossipsub_test.go index 9f450d8..d0f905d 100644 --- a/gossipsub_test.go +++ b/gossipsub_test.go @@ -1989,6 +1989,27 @@ func TestGossipSubLeaveTopic(t *testing.T) { <-done } +// withRouter is a race-free way of accessing state from the PubSubRouter. +// It runs the callback synchronously +func withRouter(p *PubSub, f func(r PubSubRouter)) { + done := make(chan struct{}) + p.eval <- func() { + defer close(done) + router := p.rt + f(router) + } + <-done +} + +// withGSRouter is a race-free way of accessing state from the GossipSubRouter. +// It runs the callback synchronously +func withGSRouter(p *PubSub, f func(r *GossipSubRouter)) { + withRouter(p, func(r PubSubRouter) { + router := p.rt.(*GossipSubRouter) + f(router) + }) +} + func TestGossipSubJoinTopic(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -2003,13 +2024,15 @@ func TestGossipSubJoinTopic(t *testing.T) { connect(t, h[0], h[1]) connect(t, h[0], h[2]) - router0 := psubs[0].rt.(*GossipSubRouter) - // Add in backoff for peer. peerMap := make(map[peer.ID]time.Time) - peerMap[h[1].ID()] = time.Now().Add(router0.params.UnsubscribeBackoff) + withGSRouter(psubs[0], func(router0 *GossipSubRouter) { + peerMap[h[1].ID()] = time.Now().Add(router0.params.UnsubscribeBackoff) + }) - router0.backoff["test"] = peerMap + withGSRouter(psubs[0], func(router0 *GossipSubRouter) { + router0.backoff["test"] = peerMap + }) // Join all peers for _, ps := range psubs { @@ -2021,15 +2044,16 @@ func TestGossipSubJoinTopic(t *testing.T) { time.Sleep(time.Second) - meshMap := router0.mesh["test"] - if len(meshMap) != 1 { - t.Fatalf("Unexpect peer included in the mesh") - } - - _, ok := meshMap[h[1].ID()] - if ok { - t.Fatalf("Peer that was to be backed off is included in the mesh") - } + withGSRouter(psubs[0], func(router0 *GossipSubRouter) { + meshMap := router0.mesh["test"] + if len(meshMap) != 1 { + t.Fatalf("Unexpect peer included in the mesh") + } + _, ok := meshMap[h[1].ID()] + if ok { + t.Fatalf("Peer that was to be backed off is included in the mesh") + } + }) } type sybilSquatter struct { @@ -2697,10 +2721,10 @@ func TestGossipsubIdontwantSend(t *testing.T) { return base64.URLEncoding.EncodeToString(pmsg.Data) } - validated := false + var validated atomic.Bool validate := func(context.Context, peer.ID, *Message) bool { time.Sleep(100 * time.Millisecond) - validated = true + validated.Store(true) return true } @@ -2798,7 +2822,7 @@ func TestGossipsubIdontwantSend(t *testing.T) { for _, idonthave := range irpc.GetControl().GetIdontwant() { // If true, it means that, when we get IDONTWANT, the middle peer has done validation // already, which should not be the case - if validated { + if validated.Load() { t.Fatalf("IDONTWANT should be sent before doing validation") } for _, mid := range idonthave.GetMessageIDs() { @@ -3333,13 +3357,13 @@ func TestGossipsubIdontwantBeforeIwant(t *testing.T) { msgTimer := time.NewTimer(msgWaitMax) // Checks we received right messages - msgReceived := false - ihaveReceived := false + var msgReceived atomic.Bool + var ihaveReceived atomic.Bool checkMsgs := func() { - if msgReceived { + if msgReceived.Load() { t.Fatalf("Expected no messages received after IDONWANT") } - if !ihaveReceived { + if !ihaveReceived.Load() { t.Fatalf("Expected IHAVE received") } } @@ -3359,11 +3383,11 @@ func TestGossipsubIdontwantBeforeIwant(t *testing.T) { newMockGS(ctx, t, hosts[2], func(writeMsg func(*pb.RPC), irpc *pb.RPC) { // Check if it receives any message if len(irpc.GetPublish()) > 0 { - msgReceived = true + msgReceived.Store(true) } // The middle peer is supposed to send IHAVE for _, ihave := range irpc.GetControl().GetIhave() { - ihaveReceived = true + ihaveReceived.Store(true) mids := ihave.GetMessageIDs() writeMsg(&pb.RPC{ @@ -3437,9 +3461,9 @@ func TestGossipsubIdontwantClear(t *testing.T) { msgTimer := time.NewTimer(msgWaitMax) // Checks we received some message after the IDONTWANT is cleared - received := false + var received atomic.Bool checkMsgs := func() { - if !received { + if !received.Load() { t.Fatalf("Expected some message after the IDONTWANT is cleared") } } @@ -3459,7 +3483,7 @@ func TestGossipsubIdontwantClear(t *testing.T) { newMockGS(ctx, t, hosts[2], func(writeMsg func(*pb.RPC), irpc *pb.RPC) { // Check if it receives any message if len(irpc.GetPublish()) > 0 { - received = true + received.Store(true) } // When the middle peer connects it will send us its subscriptions for _, sub := range irpc.GetSubscriptions() { @@ -3544,13 +3568,15 @@ func TestGossipsubPruneMeshCorrectly(t *testing.T) { totalTimeToWait := params.HeartbeatInitialDelay + 2*params.HeartbeatInterval time.Sleep(totalTimeToWait) - meshPeers, ok := psubs[0].rt.(*GossipSubRouter).mesh[topic] - if !ok { - t.Fatal("mesh does not exist for topic") - } - if len(meshPeers) != params.D { - t.Fatalf("mesh does not have the correct number of peers. Wanted %d but got %d", params.D, len(meshPeers)) - } + withGSRouter(psubs[0], func(rt *GossipSubRouter) { + meshPeers, ok := rt.mesh[topic] + if !ok { + t.Fatal("mesh does not exist for topic") + } + if len(meshPeers) != params.D { + t.Fatalf("mesh does not have the correct number of peers. Wanted %d but got %d", params.D, len(meshPeers)) + } + }) } func BenchmarkAllocDoDropRPC(b *testing.B) { diff --git a/pubsub_test.go b/pubsub_test.go index 245a69d..37fbbf2 100644 --- a/pubsub_test.go +++ b/pubsub_test.go @@ -40,7 +40,9 @@ func TestPubSubRemovesBlacklistedPeer(t *testing.T) { // Bad peer is blacklisted after it has connected. // Calling p.BlacklistPeer directly does the right thing but we should also clean // up the peer if it has been added the the blacklist by another means. - bl.Add(hosts[0].ID()) + withRouter(psubs1, func(r PubSubRouter) { + bl.Add(hosts[0].ID()) + }) _, err := psubs0.Subscribe("test") if err != nil { diff --git a/timecache/first_seen_cache.go b/timecache/first_seen_cache.go index 457391c..ea9dd2d 100644 --- a/timecache/first_seen_cache.go +++ b/timecache/first_seen_cache.go @@ -18,6 +18,10 @@ type FirstSeenCache struct { var _ TimeCache = (*FirstSeenCache)(nil) func newFirstSeenCache(ttl time.Duration) *FirstSeenCache { + return newFirstSeenCacheWithSweepInterval(ttl, backgroundSweepInterval) +} + +func newFirstSeenCacheWithSweepInterval(ttl time.Duration, sweepInterval time.Duration) *FirstSeenCache { tc := &FirstSeenCache{ m: make(map[string]time.Time), ttl: ttl, @@ -25,7 +29,7 @@ func newFirstSeenCache(ttl time.Duration) *FirstSeenCache { ctx, done := context.WithCancel(context.Background()) tc.done = done - go background(ctx, &tc.lk, tc.m) + go background(ctx, &tc.lk, tc.m, sweepInterval) return tc } diff --git a/timecache/first_seen_cache_test.go b/timecache/first_seen_cache_test.go index 59d2a59..10f69c9 100644 --- a/timecache/first_seen_cache_test.go +++ b/timecache/first_seen_cache_test.go @@ -17,9 +17,7 @@ func TestFirstSeenCacheFound(t *testing.T) { } func TestFirstSeenCacheExpire(t *testing.T) { - backgroundSweepInterval = time.Second - - tc := newFirstSeenCache(time.Second) + tc := newFirstSeenCacheWithSweepInterval(time.Second, time.Second) for i := 0; i < 10; i++ { tc.Add(fmt.Sprint(i)) time.Sleep(time.Millisecond * 100) @@ -34,9 +32,7 @@ func TestFirstSeenCacheExpire(t *testing.T) { } func TestFirstSeenCacheNotFoundAfterExpire(t *testing.T) { - backgroundSweepInterval = time.Second - - tc := newFirstSeenCache(time.Second) + tc := newFirstSeenCacheWithSweepInterval(time.Second, time.Second) tc.Add(fmt.Sprint(0)) time.Sleep(2 * time.Second) diff --git a/timecache/last_seen_cache.go b/timecache/last_seen_cache.go index 128c299..676e0ae 100644 --- a/timecache/last_seen_cache.go +++ b/timecache/last_seen_cache.go @@ -19,6 +19,10 @@ type LastSeenCache struct { var _ TimeCache = (*LastSeenCache)(nil) func newLastSeenCache(ttl time.Duration) *LastSeenCache { + return newLastSeenCacheWithSweepInterval(ttl, backgroundSweepInterval) +} + +func newLastSeenCacheWithSweepInterval(ttl time.Duration, sweepInterval time.Duration) *LastSeenCache { tc := &LastSeenCache{ m: make(map[string]time.Time), ttl: ttl, @@ -26,7 +30,7 @@ func newLastSeenCache(ttl time.Duration) *LastSeenCache { ctx, done := context.WithCancel(context.Background()) tc.done = done - go background(ctx, &tc.lk, tc.m) + go background(ctx, &tc.lk, tc.m, sweepInterval) return tc } diff --git a/timecache/last_seen_cache_test.go b/timecache/last_seen_cache_test.go index 4522026..a320093 100644 --- a/timecache/last_seen_cache_test.go +++ b/timecache/last_seen_cache_test.go @@ -17,8 +17,7 @@ func TestLastSeenCacheFound(t *testing.T) { } func TestLastSeenCacheExpire(t *testing.T) { - backgroundSweepInterval = time.Second - tc := newLastSeenCache(time.Second) + tc := newLastSeenCacheWithSweepInterval(time.Second, time.Second) for i := 0; i < 11; i++ { tc.Add(fmt.Sprint(i)) time.Sleep(time.Millisecond * 100) @@ -80,9 +79,7 @@ func TestLastSeenCacheSlideForward(t *testing.T) { } func TestLastSeenCacheNotFoundAfterExpire(t *testing.T) { - backgroundSweepInterval = time.Second - - tc := newLastSeenCache(time.Second) + tc := newLastSeenCacheWithSweepInterval(time.Second, time.Second) tc.Add(fmt.Sprint(0)) time.Sleep(2 * time.Second) diff --git a/timecache/util.go b/timecache/util.go index eaf92b3..5370572 100644 --- a/timecache/util.go +++ b/timecache/util.go @@ -6,10 +6,10 @@ import ( "time" ) -var backgroundSweepInterval = time.Minute +const backgroundSweepInterval = time.Minute -func background(ctx context.Context, lk sync.Locker, m map[string]time.Time) { - ticker := time.NewTicker(backgroundSweepInterval) +func background(ctx context.Context, lk sync.Locker, m map[string]time.Time, tickerDur time.Duration) { + ticker := time.NewTicker(tickerDur) defer ticker.Stop() for { diff --git a/validation_builtin_test.go b/validation_builtin_test.go index bca8774..ed57753 100644 --- a/validation_builtin_test.go +++ b/validation_builtin_test.go @@ -20,10 +20,23 @@ import ( pb "github.com/libp2p/go-libp2p-pubsub/pb" ) -var rng *rand.Rand +var rng *concurrentRNG + +type concurrentRNG struct { + mu sync.Mutex + rng *rand.Rand +} + +func (r *concurrentRNG) Intn(n int) int { + r.mu.Lock() + defer r.mu.Unlock() + return r.rng.Intn(n) +} func init() { - rng = rand.New(rand.NewSource(314159)) + rng = &concurrentRNG{ + rng: rand.New(rand.NewSource(314159)), + } } func TestBasicSeqnoValidator1(t *testing.T) { From bc7e2e619dd243c6b2636181d1d89080b43b6c9f Mon Sep 17 00:00:00 2001 From: Marco Munizaga Date: Thu, 3 Jul 2025 12:04:07 -0700 Subject: [PATCH 24/27] Skip 32-bit tests in CI (#628) This is roughly a third of our CI time, and, as far as I know, running 32bit tests has never caught an issue. Also, I'm unaware of anyone using this library on a 32bit x86 system. I believe the last 32bit x86 CPU released was the [pentium 4](https://en.wikipedia.org/wiki/List_of_Intel_Pentium_4_processors) close to 20 years ago. --- .github/workflows/go-test-config.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/go-test-config.json b/.github/workflows/go-test-config.json index 879d74a..00cef44 100644 --- a/.github/workflows/go-test-config.json +++ b/.github/workflows/go-test-config.json @@ -1,3 +1,4 @@ { - "skipOSes": ["windows", "macos"] + "skipOSes": ["windows", "macos"], + "skip32bit": true } From abb8f8a2cd5aee610e16de66d63cd539a353e166 Mon Sep 17 00:00:00 2001 From: Marco Munizaga Date: Thu, 3 Jul 2025 13:44:54 -0700 Subject: [PATCH 25/27] Release v0.14.2 (#629) Includes #627 --- version.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version.json b/version.json index f49d0df..82827c5 100644 --- a/version.json +++ b/version.json @@ -1,3 +1,3 @@ { - "version": "v0.14.1" + "version": "v0.14.2" } From ee9c8434f99818972c6b26fd9ec8bebe0b1c829a Mon Sep 17 00:00:00 2001 From: web3-bot <81333946+web3-bot@users.noreply.github.com> Date: Thu, 21 Aug 2025 09:13:04 +0100 Subject: [PATCH 26/27] ci: uci/update-go (#638) This PR was created automatically by the @web3-bot as a part of the [Unified CI](https://github.com/ipdxco/unified-github-workflows) project. --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 75f4cd2..19d8fdb 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/libp2p/go-libp2p-pubsub -go 1.23 +go 1.24 require ( github.com/benbjohnson/clock v1.3.5 From ab876fc71c34e89a7f0c8f4e361720ca9fa8588a Mon Sep 17 00:00:00 2001 From: Dat Duong Date: Fri, 22 Aug 2025 07:01:05 +0700 Subject: [PATCH 27/27] fix: Select ctx.Done() when preprocessing to avoid blocking on cancel (#635) Close #636 The PR updates the send logic to use a select with ctx.Done() and t.p.ctx.Done(), ensuring the operation terminates gracefully. A test case reproducing the issue is included in the PR for verification. --- topic.go | 8 +++++++- topic_test.go | 17 +++++++++++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/topic.go b/topic.go index 3a65052..dd094ea 100644 --- a/topic.go +++ b/topic.go @@ -349,8 +349,14 @@ func (t *Topic) validate(ctx context.Context, data []byte, opts ...PubOpt) (*Mes } msg := &Message{m, "", t.p.host.ID(), pub.validatorData, pub.local} - t.p.eval <- func() { + select { + case t.p.eval <- func() { t.p.rt.Preprocess(t.p.host.ID(), []*Message{msg}) + }: + case <-t.p.ctx.Done(): + return nil, t.p.ctx.Err() + case <-ctx.Done(): + return nil, ctx.Err() } err := t.p.val.ValidateLocal(msg) if err != nil { diff --git a/topic_test.go b/topic_test.go index ef05feb..aa96cf5 100644 --- a/topic_test.go +++ b/topic_test.go @@ -951,6 +951,23 @@ func TestTopicPublishWithKeyInvalidParameters(t *testing.T) { }) } +func TestTopicPublishWithContextCanceled(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + const topic = "foobar" + const numHosts = 5 + + hosts := getDefaultHosts(t, numHosts) + topics := getTopics(getPubsubs(ctx, hosts), topic) + cancel() + + err := topics[0].Publish(ctx, []byte("buff")) + if err != context.Canceled { + t.Fatal("error should have been of type context.Canceled", err) + } +} + func TestTopicRelayPublishWithKey(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel()