Merge branch 'libp2p:master' into master

This commit is contained in:
Prem Chaitanya Prathi 2025-08-22 10:47:47 +05:30 committed by GitHub
commit fc0a0a9bb7
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
29 changed files with 1625 additions and 435 deletions

14
.github/workflows/generated-pr.yml vendored Normal file
View File

@ -0,0 +1,14 @@
name: Close Generated PRs
on:
schedule:
- cron: '0 0 * * *'
workflow_dispatch:
permissions:
issues: write
pull-requests: write
jobs:
stale:
uses: ipdxco/unified-github-workflows/.github/workflows/reusable-generated-pr.yml@v1

View File

@ -1,4 +1,4 @@
{
"skipOSes": ["windows", "macos"],
"skipRace": true
"skip32bit": true
}

View File

@ -1,8 +1,9 @@
name: Close and mark stale issue
name: Close Stale Issues
on:
schedule:
- cron: '0 0 * * *'
workflow_dispatch:
permissions:
issues: write
@ -10,4 +11,4 @@ permissions:
jobs:
stale:
uses: pl-strflt/.github/.github/workflows/reusable-stale-issue.yml@v0.3
uses: ipdxco/unified-github-workflows/.github/workflows/reusable-stale-issue.yml@v1

View File

@ -96,11 +96,17 @@ func TestBackoff_Clean(t *testing.T) {
if err != nil {
t.Fatalf("unexpected error post update: %s", err)
}
b.mu.Lock()
b.info[id].lastTried = time.Now().Add(-TimeToLive) // enforces expiry
b.mu.Unlock()
}
if len(b.info) != size {
t.Fatalf("info map size mismatch, expected: %d, got: %d", size, len(b.info))
b.mu.Lock()
infoLen := len(b.info)
b.mu.Unlock()
if infoLen != size {
t.Fatalf("info map size mismatch, expected: %d, got: %d", size, infoLen)
}
// waits for a cleanup loop to kick-in

View File

@ -5,10 +5,11 @@ package compat_pb
import (
fmt "fmt"
proto "github.com/gogo/protobuf/proto"
io "io"
math "math"
math_bits "math/bits"
proto "github.com/gogo/protobuf/proto"
)
// Reference imports to suppress errors if they are not otherwise used.

View File

@ -71,7 +71,7 @@ func (fs *FloodSubRouter) AcceptFrom(peer.ID) AcceptStatus {
return AcceptAll
}
func (fs *FloodSubRouter) PreValidation([]*Message) {}
func (fs *FloodSubRouter) Preprocess(from peer.ID, msgs []*Message) {}
func (fs *FloodSubRouter) HandleRPC(rpc *RPC) {}

View File

@ -268,8 +268,11 @@ func TestReconnects(t *testing.T) {
t.Fatal("timed out waiting for B chan to be closed")
}
nSubs := len(psubs[2].mySubs["cats"])
if nSubs > 0 {
nSubs := make(chan int)
psubs[2].eval <- func() {
nSubs <- len(psubs[2].mySubs["cats"])
}
if <-nSubs > 0 {
t.Fatal(`B should have 0 subscribers for channel "cats", has`, nSubs)
}
@ -866,9 +869,14 @@ func TestImproperlySignedMessageRejected(t *testing.T) {
t.Fatal(err)
}
var adversaryMessages []*Message
adversaryMessagesCh := make(chan []*Message)
adversaryContext, adversaryCancel := context.WithCancel(ctx)
go func(ctx context.Context) {
var adversaryMessages []*Message
defer func() {
adversaryMessagesCh <- adversaryMessages
}()
for {
select {
case <-ctx.Done():
@ -885,6 +893,7 @@ func TestImproperlySignedMessageRejected(t *testing.T) {
<-time.After(1 * time.Second)
adversaryCancel()
adversaryMessages := <-adversaryMessagesCh
// Ensure the adversary successfully publishes the incorrectly signed
// message. If the adversary "sees" this, we successfully got through
@ -895,9 +904,13 @@ func TestImproperlySignedMessageRejected(t *testing.T) {
// the honest peer's validation process will drop the message;
// next will never furnish the incorrect message.
var honestPeerMessages []*Message
honestPeerMessagesCh := make(chan []*Message)
honestPeerContext, honestPeerCancel := context.WithCancel(ctx)
go func(ctx context.Context) {
var honestPeerMessages []*Message
defer func() {
honestPeerMessagesCh <- honestPeerMessages
}()
for {
select {
case <-ctx.Done():
@ -915,6 +928,7 @@ func TestImproperlySignedMessageRejected(t *testing.T) {
<-time.After(1 * time.Second)
honestPeerCancel()
honestPeerMessages := <-honestPeerMessagesCh
if len(honestPeerMessages) != 1 {
t.Fatalf("got %d messages, expected 1", len(honestPeerMessages))
}

94
go.mod
View File

@ -1,17 +1,18 @@
module github.com/libp2p/go-libp2p-pubsub
go 1.21
go 1.24
require (
github.com/benbjohnson/clock v1.3.5
github.com/gogo/protobuf v1.3.2
github.com/ipfs/go-log/v2 v2.5.1
github.com/libp2p/go-buffer-pool v0.1.0
github.com/libp2p/go-libp2p v0.36.2
github.com/libp2p/go-libp2p v0.39.1
github.com/libp2p/go-libp2p-testing v0.12.0
github.com/libp2p/go-msgio v0.3.0
github.com/multiformats/go-multiaddr v0.13.0
github.com/multiformats/go-multiaddr v0.14.0
github.com/multiformats/go-varint v0.0.7
go.uber.org/zap v1.27.0
)
require (
@ -29,85 +30,90 @@ require (
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/google/gopacket v1.1.19 // indirect
github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 // indirect
github.com/google/pprof v0.0.0-20250202011525-fc3143867406 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/websocket v1.5.3 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/huin/goupnp v1.3.0 // indirect
github.com/ipfs/go-cid v0.4.1 // indirect
github.com/ipfs/go-cid v0.5.0 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
github.com/klauspost/compress v1.17.9 // indirect
github.com/klauspost/cpuid/v2 v2.2.8 // indirect
github.com/koron/go-ssdp v0.0.4 // indirect
github.com/libp2p/go-flow-metrics v0.1.0 // indirect
github.com/klauspost/compress v1.17.11 // indirect
github.com/klauspost/cpuid/v2 v2.2.9 // indirect
github.com/koron/go-ssdp v0.0.5 // indirect
github.com/libp2p/go-flow-metrics v0.2.0 // indirect
github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
github.com/libp2p/go-nat v0.2.0 // indirect
github.com/libp2p/go-netroute v0.2.1 // indirect
github.com/libp2p/go-netroute v0.2.2 // indirect
github.com/libp2p/go-reuseport v0.4.0 // indirect
github.com/libp2p/go-yamux/v4 v4.0.1 // indirect
github.com/libp2p/go-yamux/v4 v4.0.2 // indirect
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/miekg/dns v1.1.62 // indirect
github.com/miekg/dns v1.1.63 // indirect
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
github.com/minio/sha256-simd v1.0.1 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/multiformats/go-base32 v0.1.0 // indirect
github.com/multiformats/go-base36 v0.2.0 // indirect
github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect
github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
github.com/multiformats/go-multibase v0.2.0 // indirect
github.com/multiformats/go-multicodec v0.9.0 // indirect
github.com/multiformats/go-multihash v0.2.3 // indirect
github.com/multiformats/go-multistream v0.5.0 // indirect
github.com/multiformats/go-multistream v0.6.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/onsi/ginkgo/v2 v2.20.0 // indirect
github.com/onsi/ginkgo/v2 v2.22.2 // indirect
github.com/opencontainers/runtime-spec v1.2.0 // indirect
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
github.com/pion/datachannel v1.5.8 // indirect
github.com/pion/datachannel v1.5.10 // indirect
github.com/pion/dtls/v2 v2.2.12 // indirect
github.com/pion/ice/v2 v2.3.34 // indirect
github.com/pion/interceptor v0.1.30 // indirect
github.com/pion/logging v0.2.2 // indirect
github.com/pion/dtls/v3 v3.0.4 // indirect
github.com/pion/ice/v2 v2.3.37 // indirect
github.com/pion/ice/v4 v4.0.6 // indirect
github.com/pion/interceptor v0.1.37 // indirect
github.com/pion/logging v0.2.3 // indirect
github.com/pion/mdns v0.0.12 // indirect
github.com/pion/mdns/v2 v2.0.7 // indirect
github.com/pion/randutil v0.1.0 // indirect
github.com/pion/rtcp v1.2.14 // indirect
github.com/pion/rtp v1.8.9 // indirect
github.com/pion/sctp v1.8.33 // indirect
github.com/pion/sdp/v3 v3.0.9 // indirect
github.com/pion/srtp/v2 v2.0.20 // indirect
github.com/pion/rtcp v1.2.15 // indirect
github.com/pion/rtp v1.8.11 // indirect
github.com/pion/sctp v1.8.35 // indirect
github.com/pion/sdp/v3 v3.0.10 // indirect
github.com/pion/srtp/v3 v3.0.4 // indirect
github.com/pion/stun v0.6.1 // indirect
github.com/pion/stun/v3 v3.0.0 // indirect
github.com/pion/transport/v2 v2.2.10 // indirect
github.com/pion/transport/v3 v3.0.7 // indirect
github.com/pion/turn/v2 v2.1.6 // indirect
github.com/pion/webrtc/v3 v3.3.0 // indirect
github.com/pion/turn/v4 v4.0.0 // indirect
github.com/pion/webrtc/v4 v4.0.8 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_golang v1.20.0 // indirect
github.com/prometheus/client_golang v1.20.5 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.55.0 // indirect
github.com/prometheus/common v0.62.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/quic-go/qpack v0.4.0 // indirect
github.com/quic-go/quic-go v0.46.0 // indirect
github.com/quic-go/webtransport-go v0.8.0 // indirect
github.com/quic-go/qpack v0.5.1 // indirect
github.com/quic-go/quic-go v0.49.0 // indirect
github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 // indirect
github.com/raulk/go-watchdog v1.3.0 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/stretchr/testify v1.9.0 // indirect
github.com/wlynxg/anet v0.0.4 // indirect
github.com/stretchr/testify v1.10.0 // indirect
github.com/wlynxg/anet v0.0.5 // indirect
go.uber.org/dig v1.18.0 // indirect
go.uber.org/fx v1.22.2 // indirect
go.uber.org/mock v0.4.0 // indirect
go.uber.org/fx v1.23.0 // indirect
go.uber.org/mock v0.5.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
golang.org/x/crypto v0.26.0 // indirect
golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect
golang.org/x/mod v0.20.0 // indirect
golang.org/x/net v0.28.0 // indirect
golang.org/x/sync v0.8.0 // indirect
golang.org/x/sys v0.24.0 // indirect
golang.org/x/text v0.17.0 // indirect
golang.org/x/tools v0.24.0 // indirect
google.golang.org/protobuf v1.34.2 // indirect
golang.org/x/crypto v0.32.0 // indirect
golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c // indirect
golang.org/x/mod v0.23.0 // indirect
golang.org/x/net v0.34.0 // indirect
golang.org/x/sync v0.11.0 // indirect
golang.org/x/sys v0.30.0 // indirect
golang.org/x/text v0.22.0 // indirect
golang.org/x/tools v0.29.0 // indirect
google.golang.org/protobuf v1.36.4 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
lukechampine.com/blake3 v1.3.0 // indirect
)

189
go.sum
View File

@ -85,8 +85,8 @@ github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k=
github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
github.com/google/pprof v0.0.0-20250202011525-fc3143867406 h1:wlQI2cYY0BsWmmPPAnxfQ8SDW0S3Jasn+4B8kXFxprg=
github.com/google/pprof v0.0.0-20250202011525-fc3143867406/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@ -101,8 +101,8 @@ github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s=
github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk=
github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg=
github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk=
github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY=
github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
@ -115,12 +115,12 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM=
github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0=
github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk=
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY=
github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8=
github.com/koron/go-ssdp v0.0.5 h1:E1iSMxIs4WqxTbIBLtmNBeOOC+1sCIXQeqTWVnpmwhk=
github.com/koron/go-ssdp v0.0.5/go.mod h1:Qm59B7hpKpDqfyRNWRNr00jGwLdXjDyZh6y7rH6VS0w=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
@ -132,10 +132,10 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM=
github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro=
github.com/libp2p/go-libp2p v0.36.2 h1:BbqRkDaGC3/5xfaJakLV/BrpjlAuYqSB0lRvtzL3B/U=
github.com/libp2p/go-libp2p v0.36.2/go.mod h1:XO3joasRE4Eup8yCTTP/+kX+g92mOgRaadk46LmPhHY=
github.com/libp2p/go-flow-metrics v0.2.0 h1:EIZzjmeOE6c8Dav0sNv35vhZxATIXWZg6j/C08XmmDw=
github.com/libp2p/go-flow-metrics v0.2.0/go.mod h1:st3qqfu8+pMfh+9Mzqb2GTiwrAGjIPszEjZmtksN8Jc=
github.com/libp2p/go-libp2p v0.39.1 h1:1Ur6rPCf3GR+g8jkrnaQaM0ha2IGespsnNlCqJLLALE=
github.com/libp2p/go-libp2p v0.39.1/go.mod h1:3zicI8Lp7Isun+Afo/JOACUbbJqqR2owK6RQWFsVAbI=
github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA=
@ -144,12 +144,12 @@ github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0
github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM=
github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk=
github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk=
github.com/libp2p/go-netroute v0.2.1 h1:V8kVrpD8GK0Riv15/7VN6RbUQ3URNZVosw7H2v9tksU=
github.com/libp2p/go-netroute v0.2.1/go.mod h1:hraioZr0fhBjG0ZRXJJ6Zj2IVEVNx6tDTFQfSmcq7mQ=
github.com/libp2p/go-netroute v0.2.2 h1:Dejd8cQ47Qx2kRABg6lPwknU7+nBnFRpko45/fFPuZ8=
github.com/libp2p/go-netroute v0.2.2/go.mod h1:Rntq6jUAH0l9Gg17w5bFGhcC9a+vk4KNXs6s7IljKYE=
github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s=
github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU=
github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCypkQ=
github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4=
github.com/libp2p/go-yamux/v4 v4.0.2 h1:nrLh89LN/LEiqcFiqdKDRHjGstN300C1269K/EX0CPU=
github.com/libp2p/go-yamux/v4 v4.0.2/go.mod h1:C808cCRgOs1iBwY4S71T5oxgMxgLmqUw56qh4AeBW2o=
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk=
@ -159,9 +159,8 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ=
github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ=
github.com/miekg/dns v1.1.63 h1:8M5aAw6OMZfFXTT7K5V0Eu5YiiL8l7nUAkyN6C9YwaY=
github.com/miekg/dns v1.1.63/go.mod h1:6NGHfjhpmr5lt3XPLuyfDJi5AXbNIPM9PY6H6sF1Nfs=
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8=
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms=
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc=
@ -182,11 +181,10 @@ github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYg
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo=
github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4=
github.com/multiformats/go-multiaddr v0.13.0 h1:BCBzs61E3AGHcYYTv8dqRH43ZfyrqM8RXVPT8t13tLQ=
github.com/multiformats/go-multiaddr v0.13.0/go.mod h1:sBXrNzucqkFJhvKOiwwLyqamGa/P5EIXNPLovyhQCII=
github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A=
github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk=
github.com/multiformats/go-multiaddr v0.14.0 h1:bfrHrJhrRuh/NXH5mCnemjpbGjzRw/b+tJFOD41g2tU=
github.com/multiformats/go-multiaddr v0.14.0/go.mod h1:6EkVAxtznq2yC3QT5CM1UTAwG0GTP3EWAIcjHuzQ+r4=
github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M=
github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc=
github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E=
github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo=
github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
@ -196,56 +194,61 @@ github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI1
github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
github.com/multiformats/go-multistream v0.5.0 h1:5htLSLl7lvJk3xx3qT/8Zm9J4K8vEOf/QGkvOGQAyiE=
github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dydlEqV3l6N3/GBsX6ILA=
github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
github.com/multiformats/go-multistream v0.6.0 h1:ZaHKbsL404720283o4c/IHQXiS6gb8qAN5EIJ4PN5EA=
github.com/multiformats/go-multistream v0.6.0/go.mod h1:MOyoG5otO24cHIg8kf9QW2/NozURlkP/rvi2FQJyCPg=
github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
github.com/onsi/ginkgo/v2 v2.20.0 h1:PE84V2mHqoT1sglvHc8ZdQtPcwmvvt29WLEEO3xmdZw=
github.com/onsi/ginkgo/v2 v2.20.0/go.mod h1:lG9ey2Z29hR41WMVthyJBGUBcBhGOtoPF2VFMvBXFCI=
github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k=
github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY=
github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU=
github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk=
github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk=
github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
github.com/pion/datachannel v1.5.8 h1:ph1P1NsGkazkjrvyMfhRBUAWMxugJjq2HfQifaOoSNo=
github.com/pion/datachannel v1.5.8/go.mod h1:PgmdpoaNBLX9HNzNClmdki4DYW5JtI7Yibu8QzbL3tI=
github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o=
github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oLo8Rs4Py/M=
github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s=
github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk=
github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE=
github.com/pion/ice/v2 v2.3.34 h1:Ic1ppYCj4tUOcPAp76U6F3fVrlSw8A9JtRXLqw6BbUM=
github.com/pion/ice/v2 v2.3.34/go.mod h1:mBF7lnigdqgtB+YHkaY/Y6s6tsyRyo4u4rPGRuOjUBQ=
github.com/pion/interceptor v0.1.30 h1:au5rlVHsgmxNi+v/mjOPazbW1SHzfx7/hYOEYQnUcxA=
github.com/pion/interceptor v0.1.30/go.mod h1:RQuKT5HTdkP2Fi0cuOS5G5WNymTjzXaGF75J4k7z2nc=
github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY=
github.com/pion/dtls/v3 v3.0.4 h1:44CZekewMzfrn9pmGrj5BNnTMDCFwr+6sLH+cCuLM7U=
github.com/pion/dtls/v3 v3.0.4/go.mod h1:R373CsjxWqNPf6MEkfdy3aSe9niZvL/JaKlGeFphtMg=
github.com/pion/ice/v2 v2.3.37 h1:ObIdaNDu1rCo7hObhs34YSBcO7fjslJMZV0ux+uZWh0=
github.com/pion/ice/v2 v2.3.37/go.mod h1:mBF7lnigdqgtB+YHkaY/Y6s6tsyRyo4u4rPGRuOjUBQ=
github.com/pion/ice/v4 v4.0.6 h1:jmM9HwI9lfetQV/39uD0nY4y++XZNPhvzIPCb8EwxUM=
github.com/pion/ice/v4 v4.0.6/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw=
github.com/pion/interceptor v0.1.37 h1:aRA8Zpab/wE7/c0O3fh1PqY0AJI3fCSEM5lRWJVorwI=
github.com/pion/interceptor v0.1.37/go.mod h1:JzxbJ4umVTlZAf+/utHzNesY8tmRkM2lVmkS82TTj8Y=
github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms=
github.com/pion/logging v0.2.3 h1:gHuf0zpoh1GW67Nr6Gj4cv5Z9ZscU7g/EaoC/Ke/igI=
github.com/pion/logging v0.2.3/go.mod h1:z8YfknkquMe1csOrxK5kc+5/ZPAzMxbKLX5aXpbpC90=
github.com/pion/mdns v0.0.12 h1:CiMYlY+O0azojWDmxdNr7ADGrnZ+V6Ilfner+6mSVK8=
github.com/pion/mdns v0.0.12/go.mod h1:VExJjv8to/6Wqm1FXK+Ii/Z9tsVk/F5sD/N70cnYFbk=
github.com/pion/mdns/v2 v2.0.7 h1:c9kM8ewCgjslaAmicYMFQIde2H9/lrZpjBkN8VwoVtM=
github.com/pion/mdns/v2 v2.0.7/go.mod h1:vAdSYNAT0Jy3Ru0zl2YiW3Rm/fJCwIeM0nToenfOJKA=
github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA=
github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8=
github.com/pion/rtcp v1.2.12/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4=
github.com/pion/rtcp v1.2.14 h1:KCkGV3vJ+4DAJmvP0vaQShsb0xkRfWkO540Gy102KyE=
github.com/pion/rtcp v1.2.14/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4=
github.com/pion/rtp v1.8.3/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU=
github.com/pion/rtp v1.8.9 h1:E2HX740TZKaqdcPmf4pw6ZZuG8u5RlMMt+l3dxeu6Wk=
github.com/pion/rtp v1.8.9/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU=
github.com/pion/sctp v1.8.33 h1:dSE4wX6uTJBcNm8+YlMg7lw1wqyKHggsP5uKbdj+NZw=
github.com/pion/sctp v1.8.33/go.mod h1:beTnqSzewI53KWoG3nqB282oDMGrhNxBdb+JZnkCwRM=
github.com/pion/sdp/v3 v3.0.9 h1:pX++dCHoHUwq43kuwf3PyJfHlwIj4hXA7Vrifiq0IJY=
github.com/pion/sdp/v3 v3.0.9/go.mod h1:B5xmvENq5IXJimIO4zfp6LAe1fD9N+kFv+V/1lOdz8M=
github.com/pion/srtp/v2 v2.0.20 h1:HNNny4s+OUmG280ETrCdgFndp4ufx3/uy85EawYEhTk=
github.com/pion/srtp/v2 v2.0.20/go.mod h1:0KJQjA99A6/a0DOVTu1PhDSw0CXF2jTkqOoMg3ODqdA=
github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo=
github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0=
github.com/pion/rtp v1.8.11 h1:17xjnY5WO5hgO6SD3/NTIUPvSFw/PbLsIJyz1r1yNIk=
github.com/pion/rtp v1.8.11/go.mod h1:8uMBJj32Pa1wwx8Fuv/AsFhn8jsgw+3rUC2PfoBZ8p4=
github.com/pion/sctp v1.8.35 h1:qwtKvNK1Wc5tHMIYgTDJhfZk7vATGVHhXbUDfHbYwzA=
github.com/pion/sctp v1.8.35/go.mod h1:EcXP8zCYVTRy3W9xtOF7wJm1L1aXfKRQzaM33SjQlzg=
github.com/pion/sdp/v3 v3.0.10 h1:6MChLE/1xYB+CjumMw+gZ9ufp2DPApuVSnDT8t5MIgA=
github.com/pion/sdp/v3 v3.0.10/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E=
github.com/pion/srtp/v3 v3.0.4 h1:2Z6vDVxzrX3UHEgrUyIGM4rRouoC7v+NiF1IHtp9B5M=
github.com/pion/srtp/v3 v3.0.4/go.mod h1:1Jx3FwDoxpRaTh1oRV8A/6G1BnFL+QI82eK4ms8EEJQ=
github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4=
github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8=
github.com/pion/stun/v3 v3.0.0 h1:4h1gwhWLWuZWOJIJR9s2ferRO+W3zA/b6ijOI6mKzUw=
github.com/pion/stun/v3 v3.0.0/go.mod h1:HvCN8txt8mwi4FBvS3EmDghW6aQJ24T+y+1TKjB5jyU=
github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g=
github.com/pion/transport/v2 v2.2.3/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0=
github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0=
github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q=
github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E=
@ -255,31 +258,33 @@ github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uP
github.com/pion/turn/v2 v2.1.3/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY=
github.com/pion/turn/v2 v2.1.6 h1:Xr2niVsiPTB0FPtt+yAWKFUkU1eotQbGgpTIld4x1Gc=
github.com/pion/turn/v2 v2.1.6/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY=
github.com/pion/webrtc/v3 v3.3.0 h1:Rf4u6n6U5t5sUxhYPQk/samzU/oDv7jk6BA5hyO2F9I=
github.com/pion/webrtc/v3 v3.3.0/go.mod h1:hVmrDJvwhEertRWObeb1xzulzHGeVUoPlWvxdGzcfU0=
github.com/pion/turn/v4 v4.0.0 h1:qxplo3Rxa9Yg1xXDxxH8xaqcyGUtbHYw4QSCvmFWvhM=
github.com/pion/turn/v4 v4.0.0/go.mod h1:MuPDkm15nYSklKpN8vWJ9W2M0PlyQZqYt1McGuxG7mA=
github.com/pion/webrtc/v4 v4.0.8 h1:T1ZmnT9qxIJIt4d8XoiMOBrTClGHDDXNg9e/fh018Qc=
github.com/pion/webrtc/v4 v4.0.8/go.mod h1:HHBeUVBAC+j4ZFnYhovEFStF02Arb1EyD4G7e7HBTJw=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.20.0 h1:jBzTZ7B099Rg24tny+qngoynol8LtVYlA2bqx3vEloI=
github.com/prometheus/client_golang v1.20.0/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo=
github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A=
github.com/quic-go/quic-go v0.46.0 h1:uuwLClEEyk1DNvchH8uCByQVjo3yKL9opKulExNDs7Y=
github.com/quic-go/quic-go v0.46.0/go.mod h1:1dLehS7TIR64+vxGR70GDcatWTOtMX2PUtnKsjbTurI=
github.com/quic-go/webtransport-go v0.8.0 h1:HxSrwun11U+LlmwpgM1kEqIqH90IT4N8auv/cD7QFJg=
github.com/quic-go/webtransport-go v0.8.0/go.mod h1:N99tjprW432Ut5ONql/aUhSLT0YVSlwHohQsuac9WaM=
github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
github.com/quic-go/quic-go v0.49.0 h1:w5iJHXwHxs1QxyBv1EHKuC50GX5to8mJAxvtnttJp94=
github.com/quic-go/quic-go v0.49.0/go.mod h1:s2wDnmCdooUQBmQfpUSTCYBl1/D4FcqbULMMkASvR6s=
github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 h1:4WFk6u3sOT6pLa1kQ50ZVdm8BQFgJNA117cepZxtLIg=
github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66/go.mod h1:Vp72IJajgeOL6ddqrAhmp7IM9zbTcgkQxD/YdxrVwMw=
github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk=
github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
@ -318,7 +323,6 @@ github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
@ -327,15 +331,15 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
github.com/wlynxg/anet v0.0.4 h1:0de1OFQxnNqAu+x2FAKKCVIrnfGKQbs7FQz++tB0+Uw=
github.com/wlynxg/anet v0.0.4/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU=
github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
@ -344,13 +348,13 @@ go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw=
go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
go.uber.org/fx v1.22.2 h1:iPW+OPxv0G8w75OemJ1RAnTUrF55zOJlXlo1TbJ0Buw=
go.uber.org/fx v1.22.2/go.mod h1:o/D9n+2mLP6v1EG+qsdT1O8wKopYAsqZasju97SDFCU=
go.uber.org/fx v1.23.0 h1:lIr/gYWQGfTwGcSXWXu4vP5Ws6iqnNEIY+F/aFzCKTg=
go.uber.org/fx v1.23.0/go.mod h1:o/D9n+2mLP6v1EG+qsdT1O8wKopYAsqZasju97SDFCU=
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU=
go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc=
go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU=
go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
@ -371,11 +375,11 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI=
golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ=
golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c h1:KL/ZBHXgKGVmuZBZ01Lt57yE5ws8ZPSkkihmEyq7FXc=
golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU=
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
@ -387,8 +391,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0=
golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM=
golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -411,8 +415,8 @@ golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@ -428,8 +432,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -442,7 +446,6 @@ golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@ -456,8 +459,8 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg=
golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
@ -473,8 +476,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
@ -493,8 +496,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24=
golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ=
golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE=
golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -515,8 +518,8 @@ google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmE
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
google.golang.org/protobuf v1.36.4 h1:6A3ZDJHn/eNqc1i+IdefRzy/9PokBTPvcqMySR7NNIM=
google.golang.org/protobuf v1.36.4/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

View File

@ -5,6 +5,7 @@ import (
"crypto/sha256"
"fmt"
"io"
"iter"
"math/rand"
"sort"
"time"
@ -19,6 +20,8 @@ import (
"github.com/libp2p/go-libp2p/core/protocol"
"github.com/libp2p/go-libp2p/core/record"
"github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem"
"go.uber.org/zap/zapcore"
)
const (
@ -66,6 +69,7 @@ var (
GossipSubGraftFloodThreshold = 10 * time.Second
GossipSubMaxIHaveLength = 5000
GossipSubMaxIHaveMessages = 10
GossipSubMaxIDontWantLength = 10
GossipSubMaxIDontWantMessages = 1000
GossipSubIWantFollowupTime = 3 * time.Second
GossipSubIDontWantMessageThreshold = 1024 // 1KB
@ -216,6 +220,10 @@ type GossipSubParams struct {
// MaxIHaveMessages is the maximum number of IHAVE messages to accept from a peer within a heartbeat.
MaxIHaveMessages int
// MaxIDontWantLength is the maximum number of messages to include in an IDONTWANT message. Also controls
// the maximum number of IDONTWANT ids we will accept to protect against IDONTWANT floods. This value
// should be adjusted if your system anticipates a larger amount than specified per heartbeat.
MaxIDontWantLength int
// MaxIDontWantMessages is the maximum number of IDONTWANT messages to accept from a peer within a heartbeat.
MaxIDontWantMessages int
@ -301,6 +309,7 @@ func DefaultGossipSubParams() GossipSubParams {
GraftFloodThreshold: GossipSubGraftFloodThreshold,
MaxIHaveLength: GossipSubMaxIHaveLength,
MaxIHaveMessages: GossipSubMaxIHaveMessages,
MaxIDontWantLength: GossipSubMaxIDontWantLength,
MaxIDontWantMessages: GossipSubMaxIDontWantMessages,
IWantFollowupTime: GossipSubIWantFollowupTime,
IDontWantMessageThreshold: GossipSubIDontWantMessageThreshold,
@ -514,6 +523,8 @@ type GossipSubRouter struct {
heartbeatTicks uint64
}
var _ BatchPublisher = &GossipSubRouter{}
type connectInfo struct {
p peer.ID
spr *record.Envelope
@ -696,10 +707,10 @@ func (gs *GossipSubRouter) AcceptFrom(p peer.ID) AcceptStatus {
return gs.gate.AcceptFrom(p)
}
// PreValidation sends the IDONTWANT control messages to all the mesh
// Preprocess sends the IDONTWANT control messages to all the mesh
// peers. They need to be sent right before the validation because they
// should be seen by the peers as soon as possible.
func (gs *GossipSubRouter) PreValidation(msgs []*Message) {
func (gs *GossipSubRouter) Preprocess(from peer.ID, msgs []*Message) {
tmids := make(map[string][]string)
for _, msg := range msgs {
if len(msg.GetData()) < gs.params.IDontWantMessageThreshold {
@ -716,6 +727,10 @@ func (gs *GossipSubRouter) PreValidation(msgs []*Message) {
shuffleStrings(mids)
// send IDONTWANT to all the mesh peers
for p := range gs.mesh[topic] {
if p == from {
// We don't send IDONTWANT to the peer that sent us the messages
continue
}
// send to only peers that support IDONTWANT
if gs.feature(GossipSubFeatureIdontwant, gs.peers[p]) {
idontwant := []*pb.ControlIDontWant{{MessageIDs: mids}}
@ -831,6 +846,11 @@ func (gs *GossipSubRouter) handleIWant(p peer.ID, ctl *pb.ControlMessage) []*pb.
ihave := make(map[string]*pb.Message)
for _, iwant := range ctl.GetIwant() {
for _, mid := range iwant.GetMessageIDs() {
// Check if that peer has sent IDONTWANT before, if so don't send them the message
if _, ok := gs.unwanted[p][computeChecksum(mid)]; ok {
continue
}
msg, count, ok := gs.mcache.GetForPeer(mid, p)
if !ok {
continue
@ -1007,9 +1027,18 @@ func (gs *GossipSubRouter) handleIDontWant(p peer.ID, ctl *pb.ControlMessage) {
}
gs.peerdontwant[p]++
totalUnwantedIds := 0
// Remember all the unwanted message ids
mainIDWLoop:
for _, idontwant := range ctl.GetIdontwant() {
for _, mid := range idontwant.GetMessageIDs() {
// IDONTWANT flood protection
if totalUnwantedIds >= gs.params.MaxIDontWantLength {
log.Debugf("IDONWANT: peer %s has advertised too many ids (%d) within this message; ignoring", p, totalUnwantedIds)
break mainIDWLoop
}
totalUnwantedIds++
gs.unwanted[p][computeChecksum(mid)] = gs.params.IDontWantMessageTTL
}
}
@ -1117,81 +1146,105 @@ func (gs *GossipSubRouter) connector() {
}
}
func (gs *GossipSubRouter) Publish(msg *Message) {
gs.mcache.Put(msg)
from := msg.ReceivedFrom
topic := msg.GetTopic()
tosend := make(map[peer.ID]struct{})
// any peers in the topic?
tmap, ok := gs.p.topics[topic]
if !ok {
return
func (gs *GossipSubRouter) PublishBatch(messages []*Message, opts *BatchPublishOptions) {
strategy := opts.Strategy
for _, msg := range messages {
msgID := gs.p.idGen.ID(msg)
for p, rpc := range gs.rpcs(msg) {
strategy.AddRPC(p, msgID, rpc)
}
}
if gs.floodPublish && from == gs.p.host.ID() {
for p := range tmap {
_, direct := gs.direct[p]
if direct || gs.score.Score(p) >= gs.publishThreshold {
tosend[p] = struct{}{}
}
}
} else {
// direct peers
for p := range gs.direct {
_, inTopic := tmap[p]
if inTopic {
tosend[p] = struct{}{}
}
}
for p, rpc := range strategy.All() {
gs.sendRPC(p, rpc, false)
}
}
// floodsub peers
for p := range tmap {
if !gs.feature(GossipSubFeatureMesh, gs.peers[p]) && gs.score.Score(p) >= gs.publishThreshold {
tosend[p] = struct{}{}
}
}
func (gs *GossipSubRouter) Publish(msg *Message) {
for p, rpc := range gs.rpcs(msg) {
gs.sendRPC(p, rpc, false)
}
}
// gossipsub peers
gmap, ok := gs.mesh[topic]
func (gs *GossipSubRouter) rpcs(msg *Message) iter.Seq2[peer.ID, *RPC] {
return func(yield func(peer.ID, *RPC) bool) {
gs.mcache.Put(msg)
from := msg.ReceivedFrom
topic := msg.GetTopic()
tosend := make(map[peer.ID]struct{})
// any peers in the topic?
tmap, ok := gs.p.topics[topic]
if !ok {
// we are not in the mesh for topic, use fanout peers
gmap, ok = gs.fanout[topic]
if !ok || len(gmap) == 0 {
// we don't have any, pick some with score above the publish threshold
peers := gs.getPeers(topic, gs.params.D, func(p peer.ID) bool {
_, direct := gs.direct[p]
return !direct && gs.score.Score(p) >= gs.publishThreshold
})
return
}
if len(peers) > 0 {
gmap = peerListToMap(peers)
gs.fanout[topic] = gmap
if gs.floodPublish && from == gs.p.host.ID() {
for p := range tmap {
_, direct := gs.direct[p]
if direct || gs.score.Score(p) >= gs.publishThreshold {
tosend[p] = struct{}{}
}
}
gs.lastpub[topic] = time.Now().UnixNano()
} else {
// direct peers
for p := range gs.direct {
_, inTopic := tmap[p]
if inTopic {
tosend[p] = struct{}{}
}
}
// floodsub peers
for p := range tmap {
if !gs.feature(GossipSubFeatureMesh, gs.peers[p]) && gs.score.Score(p) >= gs.publishThreshold {
tosend[p] = struct{}{}
}
}
// gossipsub peers
gmap, ok := gs.mesh[topic]
if !ok {
// we are not in the mesh for topic, use fanout peers
gmap, ok = gs.fanout[topic]
if !ok || len(gmap) == 0 {
// we don't have any, pick some with score above the publish threshold
peers := gs.getPeers(topic, gs.params.D, func(p peer.ID) bool {
_, direct := gs.direct[p]
return !direct && gs.score.Score(p) >= gs.publishThreshold
})
if len(peers) > 0 {
gmap = peerListToMap(peers)
gs.fanout[topic] = gmap
}
}
gs.lastpub[topic] = time.Now().UnixNano()
}
csum := computeChecksum(gs.p.idGen.ID(msg))
for p := range gmap {
// Check if it has already received an IDONTWANT for the message.
// If so, don't send it to the peer
if _, ok := gs.unwanted[p][csum]; ok {
continue
}
tosend[p] = struct{}{}
}
}
for p := range gmap {
mid := gs.p.idGen.ID(msg)
// Check if it has already received an IDONTWANT for the message.
// If so, don't send it to the peer
if _, ok := gs.unwanted[p][computeChecksum(mid)]; ok {
out := rpcWithMessages(msg.Message)
for pid := range tosend {
if pid == from || pid == peer.ID(msg.GetFrom()) {
continue
}
tosend[p] = struct{}{}
}
}
out := rpcWithMessages(msg.Message)
for pid := range tosend {
if pid == from || pid == peer.ID(msg.GetFrom()) {
continue
if !yield(pid, out) {
return
}
}
gs.sendRPC(pid, out, false)
}
}
@ -1322,19 +1375,20 @@ func (gs *GossipSubRouter) sendRPC(p peer.ID, out *RPC, urgent bool) {
}
// Potentially split the RPC into multiple RPCs that are below the max message size
outRPCs := appendOrMergeRPC(nil, gs.p.maxMessageSize, *out)
for _, rpc := range outRPCs {
for rpc := range out.split(gs.p.maxMessageSize) {
if rpc.Size() > gs.p.maxMessageSize {
// This should only happen if a single message/control is above the maxMessageSize.
gs.doDropRPC(out, p, fmt.Sprintf("Dropping oversized RPC. Size: %d, limit: %d. (Over by %d bytes)", rpc.Size(), gs.p.maxMessageSize, rpc.Size()-gs.p.maxMessageSize))
continue
}
gs.doSendRPC(rpc, p, q, urgent)
gs.doSendRPC(&rpc, p, q, urgent)
}
}
func (gs *GossipSubRouter) doDropRPC(rpc *RPC, p peer.ID, reason string) {
log.Debugf("dropping message to peer %s: %s", p, reason)
if log.Level() <= zapcore.DebugLevel {
log.Debugf("dropping message to peer %s: %s", p, reason)
}
gs.tracer.DropRPC(rpc, p)
// push control messages that need to be retried
ctl := rpc.GetControl()
@ -1357,137 +1411,6 @@ func (gs *GossipSubRouter) doSendRPC(rpc *RPC, p peer.ID, q *rpcQueue, urgent bo
gs.tracer.SendRPC(rpc, p)
}
// appendOrMergeRPC appends the given RPCs to the slice, merging them if possible.
// If any elem is too large to fit in a single RPC, it will be split into multiple RPCs.
// If an RPC is too large and can't be split further (e.g. Message data is
// bigger than the RPC limit), then it will be returned as an oversized RPC.
// The caller should filter out oversized RPCs.
func appendOrMergeRPC(slice []*RPC, limit int, elems ...RPC) []*RPC {
if len(elems) == 0 {
return slice
}
if len(slice) == 0 && len(elems) == 1 && elems[0].Size() < limit {
// Fast path: no merging needed and only one element
return append(slice, &elems[0])
}
out := slice
if len(out) == 0 {
out = append(out, &RPC{RPC: pb.RPC{}})
out[0].from = elems[0].from
}
for _, elem := range elems {
lastRPC := out[len(out)-1]
// Merge/Append publish messages
// TODO: Never merge messages. The current behavior is the same as the
// old behavior. In the future let's not merge messages. Since,
// it may increase message latency.
for _, msg := range elem.GetPublish() {
if lastRPC.Publish = append(lastRPC.Publish, msg); lastRPC.Size() > limit {
lastRPC.Publish = lastRPC.Publish[:len(lastRPC.Publish)-1]
lastRPC = &RPC{RPC: pb.RPC{}, from: elem.from}
lastRPC.Publish = append(lastRPC.Publish, msg)
out = append(out, lastRPC)
}
}
// Merge/Append Subscriptions
for _, sub := range elem.GetSubscriptions() {
if lastRPC.Subscriptions = append(lastRPC.Subscriptions, sub); lastRPC.Size() > limit {
lastRPC.Subscriptions = lastRPC.Subscriptions[:len(lastRPC.Subscriptions)-1]
lastRPC = &RPC{RPC: pb.RPC{}, from: elem.from}
lastRPC.Subscriptions = append(lastRPC.Subscriptions, sub)
out = append(out, lastRPC)
}
}
// Merge/Append Control messages
if ctl := elem.GetControl(); ctl != nil {
if lastRPC.Control == nil {
lastRPC.Control = &pb.ControlMessage{}
if lastRPC.Size() > limit {
lastRPC.Control = nil
lastRPC = &RPC{RPC: pb.RPC{Control: &pb.ControlMessage{}}, from: elem.from}
out = append(out, lastRPC)
}
}
for _, graft := range ctl.GetGraft() {
if lastRPC.Control.Graft = append(lastRPC.Control.Graft, graft); lastRPC.Size() > limit {
lastRPC.Control.Graft = lastRPC.Control.Graft[:len(lastRPC.Control.Graft)-1]
lastRPC = &RPC{RPC: pb.RPC{Control: &pb.ControlMessage{}}, from: elem.from}
lastRPC.Control.Graft = append(lastRPC.Control.Graft, graft)
out = append(out, lastRPC)
}
}
for _, prune := range ctl.GetPrune() {
if lastRPC.Control.Prune = append(lastRPC.Control.Prune, prune); lastRPC.Size() > limit {
lastRPC.Control.Prune = lastRPC.Control.Prune[:len(lastRPC.Control.Prune)-1]
lastRPC = &RPC{RPC: pb.RPC{Control: &pb.ControlMessage{}}, from: elem.from}
lastRPC.Control.Prune = append(lastRPC.Control.Prune, prune)
out = append(out, lastRPC)
}
}
for _, iwant := range ctl.GetIwant() {
if len(lastRPC.Control.Iwant) == 0 {
// Initialize with a single IWANT.
// For IWANTs we don't need more than a single one,
// since there are no topic IDs here.
newIWant := &pb.ControlIWant{}
if lastRPC.Control.Iwant = append(lastRPC.Control.Iwant, newIWant); lastRPC.Size() > limit {
lastRPC.Control.Iwant = lastRPC.Control.Iwant[:len(lastRPC.Control.Iwant)-1]
lastRPC = &RPC{RPC: pb.RPC{Control: &pb.ControlMessage{
Iwant: []*pb.ControlIWant{newIWant},
}}, from: elem.from}
out = append(out, lastRPC)
}
}
for _, msgID := range iwant.GetMessageIDs() {
if lastRPC.Control.Iwant[0].MessageIDs = append(lastRPC.Control.Iwant[0].MessageIDs, msgID); lastRPC.Size() > limit {
lastRPC.Control.Iwant[0].MessageIDs = lastRPC.Control.Iwant[0].MessageIDs[:len(lastRPC.Control.Iwant[0].MessageIDs)-1]
lastRPC = &RPC{RPC: pb.RPC{Control: &pb.ControlMessage{
Iwant: []*pb.ControlIWant{{MessageIDs: []string{msgID}}},
}}, from: elem.from}
out = append(out, lastRPC)
}
}
}
for _, ihave := range ctl.GetIhave() {
if len(lastRPC.Control.Ihave) == 0 ||
lastRPC.Control.Ihave[len(lastRPC.Control.Ihave)-1].TopicID != ihave.TopicID {
// Start a new IHAVE if we are referencing a new topic ID
newIhave := &pb.ControlIHave{TopicID: ihave.TopicID}
if lastRPC.Control.Ihave = append(lastRPC.Control.Ihave, newIhave); lastRPC.Size() > limit {
lastRPC.Control.Ihave = lastRPC.Control.Ihave[:len(lastRPC.Control.Ihave)-1]
lastRPC = &RPC{RPC: pb.RPC{Control: &pb.ControlMessage{
Ihave: []*pb.ControlIHave{newIhave},
}}, from: elem.from}
out = append(out, lastRPC)
}
}
for _, msgID := range ihave.GetMessageIDs() {
lastIHave := lastRPC.Control.Ihave[len(lastRPC.Control.Ihave)-1]
if lastIHave.MessageIDs = append(lastIHave.MessageIDs, msgID); lastRPC.Size() > limit {
lastIHave.MessageIDs = lastIHave.MessageIDs[:len(lastIHave.MessageIDs)-1]
lastRPC = &RPC{RPC: pb.RPC{Control: &pb.ControlMessage{
Ihave: []*pb.ControlIHave{{TopicID: ihave.TopicID, MessageIDs: []string{msgID}}},
}}, from: elem.from}
out = append(out, lastRPC)
}
}
}
}
}
return out
}
func (gs *GossipSubRouter) heartbeatTimer() {
time.Sleep(gs.params.HeartbeatInitialDelay)
select {
@ -1601,7 +1524,7 @@ func (gs *GossipSubRouter) heartbeat() {
}
// do we have too many peers?
if len(peers) > gs.params.Dhi {
if len(peers) >= gs.params.Dhi {
plst := peerMapToList(peers)
// sort by score (but shuffle first for the case we don't use the score)
@ -2133,6 +2056,23 @@ func (gs *GossipSubRouter) WithDefaultTagTracer() Option {
return WithRawTracer(gs.tagTracer)
}
// SendControl dispatches the given set of control messages to the given peer.
// The control messages are sent as a single RPC, with the given (optional) messages.
// Args:
//
// p: the peer to send the control messages to.
// ctl: the control messages to send.
// msgs: the messages to send in the same RPC (optional).
// The control messages are piggybacked on the messages.
//
// Returns:
//
// nothing.
func (gs *GossipSubRouter) SendControl(p peer.ID, ctl *pb.ControlMessage, msgs ...*pb.Message) {
out := rpcWithControl(msgs, ctl.Ihave, ctl.Iwant, ctl.Graft, ctl.Prune, ctl.Idontwant)
gs.sendRPC(p, out, false)
}
func peerListToMap(peers []peer.ID) map[peer.ID]struct{} {
pmap := make(map[peer.ID]struct{})
for _, p := range peers {

View File

@ -4,6 +4,7 @@ import (
"context"
"crypto/rand"
"encoding/base64"
"fmt"
"strconv"
"sync"
"testing"
@ -796,7 +797,10 @@ func TestGossipsubAttackSpamIDONTWANT(t *testing.T) {
// Checks we received some messages
var expMid string
var actMids []string
var mu sync.Mutex
checkMsgs := func() {
mu.Lock()
defer mu.Unlock()
if len(actMids) == 0 {
t.Fatalf("Expected some messages when the maximum number of IDONTWANTs is reached")
}
@ -821,6 +825,8 @@ func TestGossipsubAttackSpamIDONTWANT(t *testing.T) {
}()
newMockGS(ctx, t, hosts[2], func(writeMsg func(*pb.RPC), irpc *pb.RPC) {
mu.Lock()
defer mu.Unlock()
// Each time the host receives a message
for _, msg := range irpc.GetPublish() {
actMids = append(actMids, msgID(msg))
@ -891,6 +897,53 @@ func TestGossipsubAttackSpamIDONTWANT(t *testing.T) {
<-ctx.Done()
}
func TestGossipsubHandleIDontwantSpam(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
hosts := getDefaultHosts(t, 2)
msgID := func(pmsg *pb.Message) string {
// silly content-based test message-ID: just use the data as whole
return base64.URLEncoding.EncodeToString(pmsg.Data)
}
psubs := make([]*PubSub, 2)
psubs[0] = getGossipsub(ctx, hosts[0], WithMessageIdFn(msgID))
psubs[1] = getGossipsub(ctx, hosts[1], WithMessageIdFn(msgID))
connect(t, hosts[0], hosts[1])
topic := "foobar"
for _, ps := range psubs {
_, err := ps.Subscribe(topic)
if err != nil {
t.Fatal(err)
}
}
exceededIDWLength := GossipSubMaxIDontWantLength + 1
var idwIds []string
for i := 0; i < exceededIDWLength; i++ {
idwIds = append(idwIds, fmt.Sprintf("idontwant-%d", i))
}
rPid := hosts[1].ID()
ctrlMessage := &pb.ControlMessage{Idontwant: []*pb.ControlIDontWant{{MessageIDs: idwIds}}}
grt := psubs[0].rt.(*GossipSubRouter)
grt.handleIDontWant(rPid, ctrlMessage)
if grt.peerdontwant[rPid] != 1 {
t.Errorf("Wanted message count of %d but received %d", 1, grt.peerdontwant[rPid])
}
mid := fmt.Sprintf("idontwant-%d", GossipSubMaxIDontWantLength-1)
if _, ok := grt.unwanted[rPid][computeChecksum(mid)]; !ok {
t.Errorf("Desired message id was not stored in the unwanted map: %s", mid)
}
mid = fmt.Sprintf("idontwant-%d", GossipSubMaxIDontWantLength)
if _, ok := grt.unwanted[rPid][computeChecksum(mid)]; ok {
t.Errorf("Unwanted message id was stored in the unwanted map: %s", mid)
}
}
type mockGSOnRead func(writeMsg func(*pb.RPC), irpc *pb.RPC)
func newMockGS(ctx context.Context, t *testing.T, attacker host.Host, onReadMsg mockGSOnRead) {

View File

@ -8,10 +8,15 @@ import (
"fmt"
"io"
mrand "math/rand"
mrand2 "math/rand/v2"
"slices"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"testing"
"testing/quick"
"time"
pb "github.com/libp2p/go-libp2p-pubsub/pb"
@ -1984,6 +1989,27 @@ func TestGossipSubLeaveTopic(t *testing.T) {
<-done
}
// withRouter is a race-free way of accessing state from the PubSubRouter.
// It runs the callback synchronously
func withRouter(p *PubSub, f func(r PubSubRouter)) {
done := make(chan struct{})
p.eval <- func() {
defer close(done)
router := p.rt
f(router)
}
<-done
}
// withGSRouter is a race-free way of accessing state from the GossipSubRouter.
// It runs the callback synchronously
func withGSRouter(p *PubSub, f func(r *GossipSubRouter)) {
withRouter(p, func(r PubSubRouter) {
router := p.rt.(*GossipSubRouter)
f(router)
})
}
func TestGossipSubJoinTopic(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@ -1998,13 +2024,15 @@ func TestGossipSubJoinTopic(t *testing.T) {
connect(t, h[0], h[1])
connect(t, h[0], h[2])
router0 := psubs[0].rt.(*GossipSubRouter)
// Add in backoff for peer.
peerMap := make(map[peer.ID]time.Time)
peerMap[h[1].ID()] = time.Now().Add(router0.params.UnsubscribeBackoff)
withGSRouter(psubs[0], func(router0 *GossipSubRouter) {
peerMap[h[1].ID()] = time.Now().Add(router0.params.UnsubscribeBackoff)
})
router0.backoff["test"] = peerMap
withGSRouter(psubs[0], func(router0 *GossipSubRouter) {
router0.backoff["test"] = peerMap
})
// Join all peers
for _, ps := range psubs {
@ -2016,15 +2044,16 @@ func TestGossipSubJoinTopic(t *testing.T) {
time.Sleep(time.Second)
meshMap := router0.mesh["test"]
if len(meshMap) != 1 {
t.Fatalf("Unexpect peer included in the mesh")
}
_, ok := meshMap[h[1].ID()]
if ok {
t.Fatalf("Peer that was to be backed off is included in the mesh")
}
withGSRouter(psubs[0], func(router0 *GossipSubRouter) {
meshMap := router0.mesh["test"]
if len(meshMap) != 1 {
t.Fatalf("Unexpect peer included in the mesh")
}
_, ok := meshMap[h[1].ID()]
if ok {
t.Fatalf("Peer that was to be backed off is included in the mesh")
}
})
}
type sybilSquatter struct {
@ -2339,7 +2368,7 @@ func (iwe *iwantEverything) handleStream(s network.Stream) {
}
}
func validRPCSizes(slice []*RPC, limit int) bool {
func validRPCSizes(slice []RPC, limit int) bool {
for _, rpc := range slice {
if rpc.Size() > limit {
return false
@ -2349,8 +2378,8 @@ func validRPCSizes(slice []*RPC, limit int) bool {
}
func TestFragmentRPCFunction(t *testing.T) {
fragmentRPC := func(rpc *RPC, limit int) ([]*RPC, error) {
rpcs := appendOrMergeRPC(nil, limit, *rpc)
fragmentRPC := func(rpc *RPC, limit int) ([]RPC, error) {
rpcs := slices.Collect(rpc.split(limit))
if allValid := validRPCSizes(rpcs, limit); !allValid {
return rpcs, fmt.Errorf("RPC size exceeds limit")
}
@ -2369,7 +2398,7 @@ func TestFragmentRPCFunction(t *testing.T) {
return msg
}
ensureBelowLimit := func(rpcs []*RPC) {
ensureBelowLimit := func(rpcs []RPC) {
for _, r := range rpcs {
if r.Size() > limit {
t.Fatalf("expected fragmented RPC to be below %d bytes, was %d", limit, r.Size())
@ -2385,7 +2414,7 @@ func TestFragmentRPCFunction(t *testing.T) {
t.Fatal(err)
}
if len(results) != 1 {
t.Fatalf("expected single RPC if input is < limit, got %d", len(results))
t.Fatalf("expected single RPC if input is < limit, got %d %#v", len(results), results)
}
// if there's a message larger than the limit, we should fail
@ -2416,8 +2445,8 @@ func TestFragmentRPCFunction(t *testing.T) {
ensureBelowLimit(results)
msgsPerRPC := limit / msgSize
expectedRPCs := nMessages / msgsPerRPC
if len(results) != expectedRPCs {
t.Fatalf("expected %d RPC messages in output, got %d", expectedRPCs, len(results))
if len(results) > expectedRPCs+1 {
t.Fatalf("expected around %d RPC messages in output, got %d", expectedRPCs, len(results))
}
var nMessagesFragmented int
var nSubscriptions int
@ -2512,7 +2541,7 @@ func TestFragmentRPCFunction(t *testing.T) {
// Now we return a the giant ID in a RPC by itself so that it can be
// dropped before actually sending the RPC. This lets us log the anamoly.
// To keep this test useful, we implement the old behavior here.
filtered := make([]*RPC, 0, len(results))
filtered := make([]RPC, 0, len(results))
for _, r := range results {
if r.Size() < limit {
filtered = append(filtered, r)
@ -2539,7 +2568,7 @@ func TestFragmentRPCFunction(t *testing.T) {
}
}
func FuzzAppendOrMergeRPC(f *testing.F) {
func FuzzRPCSplit(f *testing.F) {
minMaxMsgSize := 100
maxMaxMsgSize := 2048
f.Fuzz(func(t *testing.T, data []byte) {
@ -2548,14 +2577,102 @@ func FuzzAppendOrMergeRPC(f *testing.F) {
maxSize = minMaxMsgSize
}
rpc := generateRPC(data, maxSize)
rpcs := appendOrMergeRPC(nil, maxSize, *rpc)
if !validRPCSizes(rpcs, maxSize) {
t.Fatalf("invalid RPC size")
originalControl := compressedRPC{ihave: make(map[string][]string)}
originalControl.append(&rpc.RPC)
mergedControl := compressedRPC{ihave: make(map[string][]string)}
for rpc := range rpc.split(maxSize) {
if rpc.Size() > maxSize {
t.Fatalf("invalid RPC size %v %d (max=%d)", rpc, rpc.Size(), maxSize)
}
mergedControl.append(&rpc.RPC)
}
if !originalControl.equal(&mergedControl) {
t.Fatalf("control mismatch: \n%#v\n%#v\n", originalControl, mergedControl)
}
})
}
type compressedRPC struct {
msgs [][]byte
iwant []string
ihave map[string][]string // topic -> []string
idontwant []string
prune [][]byte
graft []string // list of topic
}
func (c *compressedRPC) equal(o *compressedRPC) bool {
equalBytesSlices := func(a, b [][]byte) bool {
return slices.EqualFunc(a, b, func(e1 []byte, e2 []byte) bool {
return bytes.Equal(e1, e2)
})
}
if !equalBytesSlices(c.msgs, o.msgs) {
return false
}
if !slices.Equal(c.iwant, o.iwant) ||
!slices.Equal(c.idontwant, o.idontwant) ||
!equalBytesSlices(c.prune, o.prune) ||
!slices.Equal(c.graft, o.graft) {
return false
}
if len(c.ihave) != len(o.ihave) {
return false
}
for topic, ids := range c.ihave {
if !slices.Equal(ids, o.ihave[topic]) {
return false
}
}
return true
}
func (c *compressedRPC) append(rpc *pb.RPC) {
for _, m := range rpc.Publish {
d, err := m.Marshal()
if err != nil {
panic(err)
}
c.msgs = append(c.msgs, d)
}
ctrl := rpc.Control
if ctrl == nil {
return
}
for _, iwant := range ctrl.Iwant {
c.iwant = append(c.iwant, iwant.MessageIDs...)
c.iwant = slices.DeleteFunc(c.iwant, func(e string) bool { return len(e) == 0 })
}
for _, ihave := range ctrl.Ihave {
c.ihave[*ihave.TopicID] = append(c.ihave[*ihave.TopicID], ihave.MessageIDs...)
c.ihave[*ihave.TopicID] = slices.DeleteFunc(c.ihave[*ihave.TopicID], func(e string) bool { return len(e) == 0 })
}
for _, idontwant := range ctrl.Idontwant {
c.idontwant = append(c.idontwant, idontwant.MessageIDs...)
c.idontwant = slices.DeleteFunc(c.idontwant, func(e string) bool { return len(e) == 0 })
}
for _, prune := range ctrl.Prune {
d, err := prune.Marshal()
if err != nil {
panic(err)
}
c.prune = append(c.prune, d)
}
for _, graft := range ctrl.Graft {
c.graft = append(c.graft, *graft.TopicID)
c.graft = slices.DeleteFunc(c.graft, func(e string) bool { return len(e) == 0 })
}
}
func TestGossipsubManagesAnAddressBook(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@ -2604,10 +2721,10 @@ func TestGossipsubIdontwantSend(t *testing.T) {
return base64.URLEncoding.EncodeToString(pmsg.Data)
}
validated := false
var validated atomic.Bool
validate := func(context.Context, peer.ID, *Message) bool {
time.Sleep(100 * time.Millisecond)
validated = true
validated.Store(true)
return true
}
@ -2705,7 +2822,7 @@ func TestGossipsubIdontwantSend(t *testing.T) {
for _, idonthave := range irpc.GetControl().GetIdontwant() {
// If true, it means that, when we get IDONTWANT, the middle peer has done validation
// already, which should not be the case
if validated {
if validated.Load() {
t.Fatalf("IDONTWANT should be sent before doing validation")
}
for _, mid := range idonthave.GetMessageIDs() {
@ -2815,6 +2932,139 @@ func TestGossipsubIdontwantReceive(t *testing.T) {
<-ctx.Done()
}
type mockRawTracer struct {
onRecvRPC func(*RPC)
}
func (m *mockRawTracer) RecvRPC(rpc *RPC) {
if m.onRecvRPC != nil {
m.onRecvRPC(rpc)
}
}
func (m *mockRawTracer) AddPeer(p peer.ID, proto protocol.ID) {}
func (m *mockRawTracer) DeliverMessage(msg *Message) {}
func (m *mockRawTracer) DropRPC(rpc *RPC, p peer.ID) {}
func (m *mockRawTracer) DuplicateMessage(msg *Message) {}
func (m *mockRawTracer) Graft(p peer.ID, topic string) {}
func (m *mockRawTracer) Join(topic string) {}
func (m *mockRawTracer) Leave(topic string) {}
func (m *mockRawTracer) Prune(p peer.ID, topic string) {}
func (m *mockRawTracer) RejectMessage(msg *Message, reason string) {}
func (m *mockRawTracer) RemovePeer(p peer.ID) {}
func (m *mockRawTracer) SendRPC(rpc *RPC, p peer.ID) {}
func (m *mockRawTracer) ThrottlePeer(p peer.ID) {}
func (m *mockRawTracer) UndeliverableMessage(msg *Message) {}
func (m *mockRawTracer) ValidateMessage(msg *Message) {}
var _ RawTracer = &mockRawTracer{}
func TestGossipsubNoIDONTWANTToMessageSender(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
hosts := getDefaultHosts(t, 2)
denseConnect(t, hosts)
psubs := make([]*PubSub, 2)
receivedIDONTWANT := make(chan struct{})
psubs[0] = getGossipsub(ctx, hosts[0], WithRawTracer(&mockRawTracer{
onRecvRPC: func(rpc *RPC) {
if len(rpc.GetControl().GetIdontwant()) > 0 {
close(receivedIDONTWANT)
}
},
}))
psubs[1] = getGossipsub(ctx, hosts[1])
topicString := "foobar"
var topics []*Topic
for _, ps := range psubs {
topic, err := ps.Join(topicString)
if err != nil {
t.Fatal(err)
}
topics = append(topics, topic)
_, err = ps.Subscribe(topicString)
if err != nil {
t.Fatal(err)
}
}
time.Sleep(time.Second)
msg := make([]byte, GossipSubIDontWantMessageThreshold+1)
topics[0].Publish(ctx, msg)
select {
case <-receivedIDONTWANT:
t.Fatal("IDONTWANT should not be sent to the message sender")
case <-time.After(time.Second):
}
}
func TestGossipsubIDONTWANTBeforeFirstPublish(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
hosts := getDefaultHosts(t, 2)
denseConnect(t, hosts)
psubs := make([]*PubSub, 2)
psubs[0] = getGossipsub(ctx, hosts[0])
rpcsReceived := make(chan string)
psubs[1] = getGossipsub(ctx, hosts[1], WithRawTracer(&mockRawTracer{
onRecvRPC: func(rpc *RPC) {
if len(rpc.GetControl().GetIdontwant()) > 0 {
rpcsReceived <- "idontwant"
}
if len(rpc.GetPublish()) > 0 {
rpcsReceived <- "publish"
}
},
}))
topicString := "foobar"
var topics []*Topic
for _, ps := range psubs {
topic, err := ps.Join(topicString)
if err != nil {
t.Fatal(err)
}
topics = append(topics, topic)
_, err = ps.Subscribe(topicString)
if err != nil {
t.Fatal(err)
}
}
time.Sleep(2 * time.Second)
msg := make([]byte, GossipSubIDontWantMessageThreshold+1)
_ = topics[0].Publish(ctx, msg)
timeout := time.After(5 * time.Second)
select {
case kind := <-rpcsReceived:
if kind == "publish" {
t.Fatal("IDONTWANT should be sent before publish")
}
case <-timeout:
t.Fatal("IDONTWANT should be sent on first publish")
}
select {
case kind := <-rpcsReceived:
if kind != "publish" {
t.Fatal("Expected publish after IDONTWANT")
}
case <-timeout:
t.Fatal("Expected publish after IDONTWANT")
}
}
// Test that non-mesh peers will not get IDONTWANT
func TestGossipsubIdontwantNonMesh(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
@ -3079,6 +3329,110 @@ func TestGossipsubIdontwantSmallMessage(t *testing.T) {
<-ctx.Done()
}
// Test that IWANT will have no effect after IDONTWANT is sent
func TestGossipsubIdontwantBeforeIwant(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
hosts := getDefaultHosts(t, 3)
msgID := func(pmsg *pb.Message) string {
// silly content-based test message-ID: just use the data as whole
return base64.URLEncoding.EncodeToString(pmsg.Data)
}
psubs := make([]*PubSub, 2)
psubs[0] = getGossipsub(ctx, hosts[0], WithMessageIdFn(msgID))
psubs[1] = getGossipsub(ctx, hosts[1], WithMessageIdFn(msgID))
topic := "foobar"
for _, ps := range psubs {
_, err := ps.Subscribe(topic)
if err != nil {
t.Fatal(err)
}
}
// Wait a bit after the last message before checking the result
msgWaitMax := 2 * time.Second
msgTimer := time.NewTimer(msgWaitMax)
// Checks we received right messages
var msgReceived atomic.Bool
var ihaveReceived atomic.Bool
checkMsgs := func() {
if msgReceived.Load() {
t.Fatalf("Expected no messages received after IDONWANT")
}
if !ihaveReceived.Load() {
t.Fatalf("Expected IHAVE received")
}
}
// Wait for the timer to expire
go func() {
select {
case <-msgTimer.C:
checkMsgs()
cancel()
return
case <-ctx.Done():
checkMsgs()
}
}()
newMockGS(ctx, t, hosts[2], func(writeMsg func(*pb.RPC), irpc *pb.RPC) {
// Check if it receives any message
if len(irpc.GetPublish()) > 0 {
msgReceived.Store(true)
}
// The middle peer is supposed to send IHAVE
for _, ihave := range irpc.GetControl().GetIhave() {
ihaveReceived.Store(true)
mids := ihave.GetMessageIDs()
writeMsg(&pb.RPC{
Control: &pb.ControlMessage{Idontwant: []*pb.ControlIDontWant{{MessageIDs: mids}}},
})
// Wait for the middle peer to process IDONTWANT
time.Sleep(100 * time.Millisecond)
writeMsg(&pb.RPC{
Control: &pb.ControlMessage{Iwant: []*pb.ControlIWant{{MessageIDs: mids}}},
})
}
// When the middle peer connects it will send us its subscriptions
for _, sub := range irpc.GetSubscriptions() {
if sub.GetSubscribe() {
// Reply by subcribing to the topic and pruning to the middle peer to make sure
// that it's not in the mesh
writeMsg(&pb.RPC{
Subscriptions: []*pb.RPC_SubOpts{{Subscribe: sub.Subscribe, Topicid: sub.Topicid}},
Control: &pb.ControlMessage{Prune: []*pb.ControlPrune{{TopicID: sub.Topicid}}},
})
go func() {
// Wait for an interval to make sure the middle peer
// received and processed the subscribe
time.Sleep(100 * time.Millisecond)
data := make([]byte, 16)
crand.Read(data)
// Publish the message from the first peer
if err := psubs[0].Publish(topic, data); err != nil {
t.Error(err)
return // cannot call t.Fatal in a non-test goroutine
}
}()
}
}
})
connect(t, hosts[0], hosts[1])
connect(t, hosts[1], hosts[2])
<-ctx.Done()
}
// Test that IDONTWANT will cleared when it's old enough
func TestGossipsubIdontwantClear(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
@ -3107,9 +3461,9 @@ func TestGossipsubIdontwantClear(t *testing.T) {
msgTimer := time.NewTimer(msgWaitMax)
// Checks we received some message after the IDONTWANT is cleared
received := false
var received atomic.Bool
checkMsgs := func() {
if !received {
if !received.Load() {
t.Fatalf("Expected some message after the IDONTWANT is cleared")
}
}
@ -3129,7 +3483,7 @@ func TestGossipsubIdontwantClear(t *testing.T) {
newMockGS(ctx, t, hosts[2], func(writeMsg func(*pb.RPC), irpc *pb.RPC) {
// Check if it receives any message
if len(irpc.GetPublish()) > 0 {
received = true
received.Store(true)
}
// When the middle peer connects it will send us its subscriptions
for _, sub := range irpc.GetSubscriptions() {
@ -3175,3 +3529,360 @@ func TestGossipsubIdontwantClear(t *testing.T) {
<-ctx.Done()
}
func TestGossipsubPruneMeshCorrectly(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
hosts := getDefaultHosts(t, 9)
msgID := func(pmsg *pb.Message) string {
// silly content-based test message-ID: just use the data as whole
return base64.URLEncoding.EncodeToString(pmsg.Data)
}
params := DefaultGossipSubParams()
params.Dhi = 8
psubs := make([]*PubSub, 9)
for i := 0; i < 9; i++ {
psubs[i] = getGossipsub(ctx, hosts[i],
WithGossipSubParams(params),
WithMessageIdFn(msgID))
}
topic := "foobar"
for _, ps := range psubs {
_, err := ps.Subscribe(topic)
if err != nil {
t.Fatal(err)
}
}
// Connect first peer with the rest of the 8 other
// peers.
for i := 1; i < 9; i++ {
connect(t, hosts[0], hosts[i])
}
// Wait for 2 heartbeats to be able to prune excess peers back down to D.
totalTimeToWait := params.HeartbeatInitialDelay + 2*params.HeartbeatInterval
time.Sleep(totalTimeToWait)
withGSRouter(psubs[0], func(rt *GossipSubRouter) {
meshPeers, ok := rt.mesh[topic]
if !ok {
t.Fatal("mesh does not exist for topic")
}
if len(meshPeers) != params.D {
t.Fatalf("mesh does not have the correct number of peers. Wanted %d but got %d", params.D, len(meshPeers))
}
})
}
func BenchmarkAllocDoDropRPC(b *testing.B) {
gs := GossipSubRouter{tracer: &pubsubTracer{}}
for i := 0; i < b.N; i++ {
gs.doDropRPC(&RPC{}, "peerID", "reason")
}
}
func TestRoundRobinMessageIDScheduler(t *testing.T) {
const maxNumPeers = 256
const maxNumMessages = 1_000
err := quick.Check(func(numPeers uint16, numMessages uint16) bool {
numPeers = numPeers % maxNumPeers
numMessages = numMessages % maxNumMessages
output := make([]pendingRPC, 0, numMessages*numPeers)
var strategy RoundRobinMessageIDScheduler
peers := make([]peer.ID, numPeers)
for i := 0; i < int(numPeers); i++ {
peers[i] = peer.ID(fmt.Sprintf("peer%d", i))
}
getID := func(r pendingRPC) string {
return string(r.rpc.Publish[0].Data)
}
for i := range int(numMessages) {
for j := range int(numPeers) {
strategy.AddRPC(peers[j], fmt.Sprintf("msg%d", i), &RPC{
RPC: pb.RPC{
Publish: []*pb.Message{
{
Data: []byte(fmt.Sprintf("msg%d", i)),
},
},
},
})
}
}
for p, rpc := range strategy.All() {
output = append(output, pendingRPC{
peer: p,
rpc: rpc,
})
}
// Check invariants
// 1. The published rpcs count is the same as the number of messages added
// 2. Before all message IDs are seen, no message ID may be repeated
// 3. The set of message ID + peer ID combinations should be the same as the input
// 1.
expectedCount := int(numMessages) * int(numPeers)
if len(output) != expectedCount {
t.Logf("Expected %d RPCs, got %d", expectedCount, len(output))
return false
}
// 2.
seen := make(map[string]bool)
expected := make(map[string]bool)
for i := 0; i < int(numMessages); i++ {
expected[fmt.Sprintf("msg%d", i)] = true
}
for _, rpc := range output {
if expected[getID(rpc)] {
delete(expected, getID(rpc))
}
if seen[getID(rpc)] && len(expected) > 0 {
t.Logf("Message ID %s repeated before all message IDs are seen", getID(rpc))
return false
}
seen[getID(rpc)] = true
}
// 3.
inputSet := make(map[string]bool)
for i := range int(numMessages) {
for j := range int(numPeers) {
inputSet[fmt.Sprintf("msg%d:peer%d", i, j)] = true
}
}
for _, rpc := range output {
if !inputSet[getID(rpc)+":"+string(rpc.peer)] {
t.Logf("Message ID %s not in input", getID(rpc))
return false
}
}
return true
}, &quick.Config{MaxCount: 32})
if err != nil {
t.Fatal(err)
}
}
func BenchmarkRoundRobinMessageIDScheduler(b *testing.B) {
const numPeers = 1_000
const numMessages = 1_000
var strategy RoundRobinMessageIDScheduler
peers := make([]peer.ID, numPeers)
for i := range int(numPeers) {
peers[i] = peer.ID(fmt.Sprintf("peer%d", i))
}
msgs := make([]string, numMessages)
for i := range numMessages {
msgs[i] = fmt.Sprintf("msg%d", i)
}
emptyRPC := &RPC{}
b.ResetTimer()
for i := 0; i < b.N; i++ {
j := i % len(peers)
msgIdx := i % numMessages
strategy.AddRPC(peers[j], msgs[msgIdx], emptyRPC)
if i%100 == 0 {
for range strategy.All() {
}
}
}
}
func TestMessageBatchPublish(t *testing.T) {
concurrentAdds := []bool{false, true}
for _, concurrentAdd := range concurrentAdds {
t.Run(fmt.Sprintf("WithConcurrentAdd=%v", concurrentAdd), func(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
hosts := getDefaultHosts(t, 20)
msgIDFn := func(msg *pb.Message) string {
hdr := string(msg.Data[0:16])
msgID := strings.SplitN(hdr, " ", 2)
return msgID[0]
}
const numMessages = 100
// +8 to account for the gossiping overhead
psubs := getGossipsubs(ctx, hosts, WithMessageIdFn(msgIDFn), WithPeerOutboundQueueSize(numMessages+8), WithValidateQueueSize(numMessages+8))
var topics []*Topic
var msgs []*Subscription
for _, ps := range psubs {
topic, err := ps.Join("foobar")
if err != nil {
t.Fatal(err)
}
topics = append(topics, topic)
subch, err := topic.Subscribe(WithBufferSize(numMessages + 8))
if err != nil {
t.Fatal(err)
}
msgs = append(msgs, subch)
}
sparseConnect(t, hosts)
// wait for heartbeats to build mesh
time.Sleep(time.Second * 2)
var batch MessageBatch
var wg sync.WaitGroup
for i := 0; i < numMessages; i++ {
msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i))
if concurrentAdd {
wg.Add(1)
go func() {
defer wg.Done()
err := topics[0].AddToBatch(ctx, &batch, msg)
if err != nil {
t.Log(err)
t.Fail()
}
}()
} else {
err := topics[0].AddToBatch(ctx, &batch, msg)
if err != nil {
t.Fatal(err)
}
}
}
wg.Wait()
err := psubs[0].PublishBatch(&batch)
if err != nil {
t.Fatal(err)
}
for range numMessages {
for _, sub := range msgs {
got, err := sub.Next(ctx)
if err != nil {
t.Fatal(err)
}
id := msgIDFn(got.Message)
expected := []byte(fmt.Sprintf("%s it's not a floooooood %s", id, id))
if !bytes.Equal(expected, got.Data) {
t.Fatal("got wrong message!")
}
}
}
})
}
}
func TestPublishDuplicateMessage(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
hosts := getDefaultHosts(t, 1)
psubs := getGossipsubs(ctx, hosts, WithMessageIdFn(func(msg *pb.Message) string {
return string(msg.Data)
}))
topic, err := psubs[0].Join("foobar")
if err != nil {
t.Fatal(err)
}
err = topic.Publish(ctx, []byte("hello"))
if err != nil {
t.Fatal(err)
}
err = topic.Publish(ctx, []byte("hello"))
if err != nil {
t.Fatal("Duplicate message should not return an error")
}
}
func genNRpcs(tb testing.TB, n int, maxSize int) []*RPC {
r := mrand2.NewChaCha8([32]byte{})
rpcs := make([]*RPC, n)
for i := range rpcs {
var data [64]byte
_, err := r.Read(data[:])
if err != nil {
tb.Fatal(err)
}
rpcs[i] = generateRPC(data[:], maxSize)
}
return rpcs
}
func BenchmarkSplitRPC(b *testing.B) {
maxSize := 2048
rpcs := genNRpcs(b, 100, maxSize)
b.ResetTimer()
for i := 0; i < b.N; i++ {
rpc := rpcs[i%len(rpcs)]
rpc.split(maxSize)
}
}
func BenchmarkSplitRPCLargeMessages(b *testing.B) {
addToRPC := func(rpc *RPC, numMsgs int, msgSize int) {
msgs := make([]*pb.Message, numMsgs)
payload := make([]byte, msgSize)
for i := range msgs {
rpc.Publish = append(rpc.Publish, &pb.Message{
Data: payload,
From: []byte(strconv.Itoa(i)),
})
}
}
b.Run("Many large messages", func(b *testing.B) {
r := mrand.New(mrand.NewSource(99))
const numRPCs = 30
const msgSize = 50 * 1024
rpc := &RPC{}
for i := 0; i < numRPCs; i++ {
addToRPC(rpc, 20, msgSize+r.Intn(100))
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
for range rpc.split(DefaultMaxMessageSize) {
}
}
})
b.Run("2 large messages", func(b *testing.B) {
const numRPCs = 2
const msgSize = DefaultMaxMessageSize - 100
rpc := &RPC{}
for i := 0; i < numRPCs; i++ {
addToRPC(rpc, 1, msgSize)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
count := 0
for range rpc.split(DefaultMaxMessageSize) {
count++
}
if count != 2 {
b.Fatalf("expected 2 RPCs, got %d", count)
}
}
})
}

78
messagebatch.go Normal file
View File

@ -0,0 +1,78 @@
package pubsub
import (
"iter"
"sync"
"github.com/libp2p/go-libp2p/core/peer"
)
// MessageBatch allows a user to batch related messages and then publish them at
// once. This allows the Scheduler to define an order for outgoing RPCs.
// This helps bandwidth constrained peers.
type MessageBatch struct {
mu sync.Mutex
messages []*Message
}
func (mb *MessageBatch) add(msg *Message) {
mb.mu.Lock()
defer mb.mu.Unlock()
mb.messages = append(mb.messages, msg)
}
func (mb *MessageBatch) take() []*Message {
mb.mu.Lock()
defer mb.mu.Unlock()
messages := mb.messages
mb.messages = nil
return messages
}
type messageBatchAndPublishOptions struct {
messages []*Message
opts *BatchPublishOptions
}
// RPCScheduler schedules outgoing RPCs.
type RPCScheduler interface {
// AddRPC adds an RPC to the scheduler.
AddRPC(peer peer.ID, msgID string, rpc *RPC)
// All returns an ordered iterator of RPCs.
All() iter.Seq2[peer.ID, *RPC]
}
type pendingRPC struct {
peer peer.ID
rpc *RPC
}
// RoundRobinMessageIDScheduler schedules outgoing RPCs in round-robin order of message IDs.
type RoundRobinMessageIDScheduler struct {
rpcs map[string][]pendingRPC
}
func (s *RoundRobinMessageIDScheduler) AddRPC(peer peer.ID, msgID string, rpc *RPC) {
if s.rpcs == nil {
s.rpcs = make(map[string][]pendingRPC)
}
s.rpcs[msgID] = append(s.rpcs[msgID], pendingRPC{peer: peer, rpc: rpc})
}
func (s *RoundRobinMessageIDScheduler) All() iter.Seq2[peer.ID, *RPC] {
return func(yield func(peer.ID, *RPC) bool) {
for len(s.rpcs) > 0 {
for msgID, rpcs := range s.rpcs {
if len(rpcs) == 0 {
delete(s.rpcs, msgID)
continue
}
if !yield(rpcs[0].peer, rpcs[0].rpc) {
return
}
s.rpcs[msgID] = rpcs[1:]
}
}
}
}

View File

@ -9,6 +9,7 @@ import (
// msgIDGenerator handles computing IDs for msgs
// It allows setting custom generators(MsgIdFunction) per topic
type msgIDGenerator struct {
sync.Mutex
Default MsgIdFunction
topicGensLk sync.RWMutex
@ -31,6 +32,8 @@ func (m *msgIDGenerator) Set(topic string, gen MsgIdFunction) {
// ID computes ID for the msg or short-circuits with the cached value.
func (m *msgIDGenerator) ID(msg *Message) string {
m.Lock()
defer m.Unlock()
if msg.ID != "" {
return msg.ID
}

View File

@ -5,10 +5,11 @@ package pubsub_pb
import (
fmt "fmt"
proto "github.com/gogo/protobuf/proto"
io "io"
math "math"
math_bits "math/bits"
proto "github.com/gogo/protobuf/proto"
)
// Reference imports to suppress errors if they are not otherwise used.

View File

@ -5,10 +5,11 @@ package pubsub_pb
import (
fmt "fmt"
proto "github.com/gogo/protobuf/proto"
io "io"
math "math"
math_bits "math/bits"
proto "github.com/gogo/protobuf/proto"
)
// Reference imports to suppress errors if they are not otherwise used.

255
pubsub.go
View File

@ -5,6 +5,8 @@ import (
"encoding/binary"
"errors"
"fmt"
"iter"
"math/bits"
"math/rand"
"sync"
"sync/atomic"
@ -137,6 +139,9 @@ type PubSub struct {
// sendMsg handles messages that have been validated
sendMsg chan *Message
// sendMessageBatch publishes a batch of messages
sendMessageBatch chan messageBatchAndPublishOptions
// addVal handles validator registration requests
addVal chan *addValReq
@ -204,9 +209,9 @@ type PubSubRouter interface {
// Allows routers with internal scoring to vet peers before committing any processing resources
// to the message and implement an effective graylist and react to validation queue overload.
AcceptFrom(peer.ID) AcceptStatus
// PreValidation is invoked on messages in the RPC envelope right before pushing it to
// Preprocess is invoked on messages in the RPC envelope right before pushing it to
// the validation pipeline
PreValidation([]*Message)
Preprocess(from peer.ID, msgs []*Message)
// HandleRPC is invoked to process control messages in the RPC envelope.
// It is invoked after subscriptions and payload messages have been processed.
HandleRPC(*RPC)
@ -220,6 +225,10 @@ type PubSubRouter interface {
Leave(topic string)
}
type BatchPublisher interface {
PublishBatch(messages []*Message, opts *BatchPublishOptions)
}
type AcceptStatus int
const (
@ -251,6 +260,202 @@ type RPC struct {
from peer.ID
}
// split splits the given RPC If a sub RPC is too large and can't be split
// further (e.g. Message data is bigger than the RPC limit), then it will be
// returned as an oversized RPC. The caller should filter out oversized RPCs.
func (rpc *RPC) split(limit int) iter.Seq[RPC] {
return func(yield func(RPC) bool) {
nextRPC := RPC{from: rpc.from}
{
nextRPCSize := 0
messagesInNextRPC := 0
messageSlice := rpc.Publish
// Merge/Append publish messages. This pattern is optimized compared the
// the patterns for other fields because this is the common cause for
// splitting a message.
for _, msg := range rpc.Publish {
// We know the message field number is <15 so this is safe.
incrementalSize := pbFieldNumberLT15Size + sizeOfEmbeddedMsg(msg.Size())
if nextRPCSize+incrementalSize > limit {
// The message doesn't fit. Let's set the messages that did fit
// into this RPC, yield it, then make a new one
nextRPC.Publish = messageSlice[:messagesInNextRPC]
messageSlice = messageSlice[messagesInNextRPC:]
if !yield(nextRPC) {
return
}
nextRPC = RPC{from: rpc.from}
nextRPCSize = 0
messagesInNextRPC = 0
}
messagesInNextRPC++
nextRPCSize += incrementalSize
}
if nextRPCSize > 0 {
// yield the message here for simplicity. We aren't optimally
// packing this RPC, but we avoid successively calling .Size()
// on the messages for the next parts.
nextRPC.Publish = messageSlice[:messagesInNextRPC]
if !yield(nextRPC) {
return
}
nextRPC = RPC{from: rpc.from}
}
}
// Fast path check. It's possible the original RPC is now small enough
// without the messages to publish
nextRPC = *rpc
nextRPC.Publish = nil
if s := nextRPC.Size(); s < limit {
if s != 0 {
yield(nextRPC)
}
return
}
// We have to split the RPC into multiple parts
nextRPC = RPC{from: rpc.from}
// Merge/Append Subscriptions
for _, sub := range rpc.Subscriptions {
if nextRPC.Subscriptions = append(nextRPC.Subscriptions, sub); nextRPC.Size() > limit {
nextRPC.Subscriptions = nextRPC.Subscriptions[:len(nextRPC.Subscriptions)-1]
if !yield(nextRPC) {
return
}
nextRPC = RPC{from: rpc.from}
nextRPC.Subscriptions = append(nextRPC.Subscriptions, sub)
}
}
// Merge/Append Control messages
if ctl := rpc.Control; ctl != nil {
if nextRPC.Control == nil {
nextRPC.Control = &pb.ControlMessage{}
if nextRPC.Size() > limit {
nextRPC.Control = nil
if !yield(nextRPC) {
return
}
nextRPC = RPC{RPC: pb.RPC{Control: &pb.ControlMessage{}}, from: rpc.from}
}
}
for _, graft := range ctl.GetGraft() {
if nextRPC.Control.Graft = append(nextRPC.Control.Graft, graft); nextRPC.Size() > limit {
nextRPC.Control.Graft = nextRPC.Control.Graft[:len(nextRPC.Control.Graft)-1]
if !yield(nextRPC) {
return
}
nextRPC = RPC{RPC: pb.RPC{Control: &pb.ControlMessage{}}, from: rpc.from}
nextRPC.Control.Graft = append(nextRPC.Control.Graft, graft)
}
}
for _, prune := range ctl.GetPrune() {
if nextRPC.Control.Prune = append(nextRPC.Control.Prune, prune); nextRPC.Size() > limit {
nextRPC.Control.Prune = nextRPC.Control.Prune[:len(nextRPC.Control.Prune)-1]
if !yield(nextRPC) {
return
}
nextRPC = RPC{RPC: pb.RPC{Control: &pb.ControlMessage{}}, from: rpc.from}
nextRPC.Control.Prune = append(nextRPC.Control.Prune, prune)
}
}
for _, iwant := range ctl.GetIwant() {
if len(nextRPC.Control.Iwant) == 0 {
// Initialize with a single IWANT.
// For IWANTs we don't need more than a single one,
// since there are no topic IDs here.
newIWant := &pb.ControlIWant{}
if nextRPC.Control.Iwant = append(nextRPC.Control.Iwant, newIWant); nextRPC.Size() > limit {
nextRPC.Control.Iwant = nextRPC.Control.Iwant[:len(nextRPC.Control.Iwant)-1]
if !yield(nextRPC) {
return
}
nextRPC = RPC{RPC: pb.RPC{Control: &pb.ControlMessage{
Iwant: []*pb.ControlIWant{newIWant},
}}, from: rpc.from}
}
}
for _, msgID := range iwant.GetMessageIDs() {
if nextRPC.Control.Iwant[0].MessageIDs = append(nextRPC.Control.Iwant[0].MessageIDs, msgID); nextRPC.Size() > limit {
nextRPC.Control.Iwant[0].MessageIDs = nextRPC.Control.Iwant[0].MessageIDs[:len(nextRPC.Control.Iwant[0].MessageIDs)-1]
if !yield(nextRPC) {
return
}
nextRPC = RPC{RPC: pb.RPC{Control: &pb.ControlMessage{
Iwant: []*pb.ControlIWant{{MessageIDs: []string{msgID}}},
}}, from: rpc.from}
}
}
}
for _, ihave := range ctl.GetIhave() {
if len(nextRPC.Control.Ihave) == 0 ||
nextRPC.Control.Ihave[len(nextRPC.Control.Ihave)-1].TopicID != ihave.TopicID {
// Start a new IHAVE if we are referencing a new topic ID
newIhave := &pb.ControlIHave{TopicID: ihave.TopicID}
if nextRPC.Control.Ihave = append(nextRPC.Control.Ihave, newIhave); nextRPC.Size() > limit {
nextRPC.Control.Ihave = nextRPC.Control.Ihave[:len(nextRPC.Control.Ihave)-1]
if !yield(nextRPC) {
return
}
nextRPC = RPC{RPC: pb.RPC{Control: &pb.ControlMessage{
Ihave: []*pb.ControlIHave{newIhave},
}}, from: rpc.from}
}
}
for _, msgID := range ihave.GetMessageIDs() {
lastIHave := nextRPC.Control.Ihave[len(nextRPC.Control.Ihave)-1]
if lastIHave.MessageIDs = append(lastIHave.MessageIDs, msgID); nextRPC.Size() > limit {
lastIHave.MessageIDs = lastIHave.MessageIDs[:len(lastIHave.MessageIDs)-1]
if !yield(nextRPC) {
return
}
nextRPC = RPC{RPC: pb.RPC{Control: &pb.ControlMessage{
Ihave: []*pb.ControlIHave{{TopicID: ihave.TopicID, MessageIDs: []string{msgID}}},
}}, from: rpc.from}
}
}
}
}
if nextRPC.Size() > 0 {
if !yield(nextRPC) {
return
}
}
}
}
// pbFieldNumberLT15Size is the number of bytes required to encode a protobuf
// field number less than or equal to 15 along with its wire type. This is 1
// byte because the protobuf encoding of field numbers is a varint encoding of:
// fieldNumber << 3 | wireType
// Refer to https://protobuf.dev/programming-guides/encoding/#structure
// for more details on the encoding of messages. You may also reference the
// concrete implementation of pb.RPC.Size()
const pbFieldNumberLT15Size = 1
func sovRpc(x uint64) (n int) {
return (bits.Len64(x) + 6) / 7
}
func sizeOfEmbeddedMsg(
msgSize int,
) int {
prefixSize := sovRpc(uint64(msgSize))
return prefixSize + msgSize
}
type Option func(*PubSub) error
// NewPubSub returns a new PubSub management object.
@ -285,6 +490,7 @@ func NewPubSub(ctx context.Context, h host.Host, rt PubSubRouter, opts ...Option
rmTopic: make(chan *rmTopicReq),
getTopics: make(chan *topicReq),
sendMsg: make(chan *Message, 32),
sendMessageBatch: make(chan messageBatchAndPublishOptions, 1),
addVal: make(chan *addValReq),
rmVal: make(chan *rmValReq),
eval: make(chan func()),
@ -653,6 +859,9 @@ func (p *PubSub) processLoop(ctx context.Context) {
case msg := <-p.sendMsg:
p.publishMessage(msg)
case batchAndOpts := <-p.sendMessageBatch:
p.publishMessageBatch(batchAndOpts)
case req := <-p.addVal:
p.val.AddValidator(req)
@ -1117,7 +1326,7 @@ func (p *PubSub) handleIncomingRPC(rpc *RPC) {
toPush = append(toPush, msg)
}
}
p.rt.PreValidation(toPush)
p.rt.Preprocess(rpc.from, toPush)
for _, msg := range toPush {
p.pushMsg(msg)
}
@ -1232,6 +1441,15 @@ func (p *PubSub) publishMessage(msg *Message) {
}
}
func (p *PubSub) publishMessageBatch(batchAndOpts messageBatchAndPublishOptions) {
for _, msg := range batchAndOpts.messages {
p.tracer.DeliverMessage(msg)
p.notifySubs(msg)
}
// We type checked when pushing the batch to the channel
p.rt.(BatchPublisher).PublishBatch(batchAndOpts.messages, batchAndOpts.opts)
}
type addTopicReq struct {
topic *Topic
resp chan *Topic
@ -1369,6 +1587,37 @@ func (p *PubSub) Publish(topic string, data []byte, opts ...PubOpt) error {
return t.Publish(context.TODO(), data, opts...)
}
// PublishBatch publishes a batch of messages. This only works for routers that
// implement the BatchPublisher interface.
//
// Users should make sure there is enough space in the Peer's outbound queue to
// ensure messages are not dropped. WithPeerOutboundQueueSize should be set to
// at least the expected number of batched messages per peer plus some slack to
// account for gossip messages.
//
// The default publish strategy is RoundRobinMessageIDScheduler.
func (p *PubSub) PublishBatch(batch *MessageBatch, opts ...BatchPubOpt) error {
if _, ok := p.rt.(BatchPublisher); !ok {
return fmt.Errorf("pubsub router is not a BatchPublisher")
}
publishOptions := &BatchPublishOptions{}
for _, o := range opts {
err := o(publishOptions)
if err != nil {
return err
}
}
setDefaultBatchPublishOptions(publishOptions)
p.sendMessageBatch <- messageBatchAndPublishOptions{
messages: batch.take(),
opts: publishOptions,
}
return nil
}
func (p *PubSub) nextSeqno() []byte {
seqno := make([]byte, 8)
counter := atomic.AddUint64(&p.counter, 1)

View File

@ -40,7 +40,9 @@ func TestPubSubRemovesBlacklistedPeer(t *testing.T) {
// Bad peer is blacklisted after it has connected.
// Calling p.BlacklistPeer directly does the right thing but we should also clean
// up the peer if it has been added the the blacklist by another means.
bl.Add(hosts[0].ID())
withRouter(psubs1, func(r PubSubRouter) {
bl.Add(hosts[0].ID())
})
_, err := psubs0.Subscribe("test")
if err != nil {

View File

@ -94,7 +94,7 @@ func (rs *RandomSubRouter) AcceptFrom(peer.ID) AcceptStatus {
return AcceptAll
}
func (rs *RandomSubRouter) PreValidation([]*Message) {}
func (rs *RandomSubRouter) Preprocess(from peer.ID, msgs []*Message) {}
func (rs *RandomSubRouter) HandleRPC(rpc *RPC) {}

View File

@ -18,6 +18,10 @@ type FirstSeenCache struct {
var _ TimeCache = (*FirstSeenCache)(nil)
func newFirstSeenCache(ttl time.Duration) *FirstSeenCache {
return newFirstSeenCacheWithSweepInterval(ttl, backgroundSweepInterval)
}
func newFirstSeenCacheWithSweepInterval(ttl time.Duration, sweepInterval time.Duration) *FirstSeenCache {
tc := &FirstSeenCache{
m: make(map[string]time.Time),
ttl: ttl,
@ -25,7 +29,7 @@ func newFirstSeenCache(ttl time.Duration) *FirstSeenCache {
ctx, done := context.WithCancel(context.Background())
tc.done = done
go background(ctx, &tc.lk, tc.m)
go background(ctx, &tc.lk, tc.m, sweepInterval)
return tc
}

View File

@ -17,9 +17,7 @@ func TestFirstSeenCacheFound(t *testing.T) {
}
func TestFirstSeenCacheExpire(t *testing.T) {
backgroundSweepInterval = time.Second
tc := newFirstSeenCache(time.Second)
tc := newFirstSeenCacheWithSweepInterval(time.Second, time.Second)
for i := 0; i < 10; i++ {
tc.Add(fmt.Sprint(i))
time.Sleep(time.Millisecond * 100)
@ -34,9 +32,7 @@ func TestFirstSeenCacheExpire(t *testing.T) {
}
func TestFirstSeenCacheNotFoundAfterExpire(t *testing.T) {
backgroundSweepInterval = time.Second
tc := newFirstSeenCache(time.Second)
tc := newFirstSeenCacheWithSweepInterval(time.Second, time.Second)
tc.Add(fmt.Sprint(0))
time.Sleep(2 * time.Second)

View File

@ -19,6 +19,10 @@ type LastSeenCache struct {
var _ TimeCache = (*LastSeenCache)(nil)
func newLastSeenCache(ttl time.Duration) *LastSeenCache {
return newLastSeenCacheWithSweepInterval(ttl, backgroundSweepInterval)
}
func newLastSeenCacheWithSweepInterval(ttl time.Duration, sweepInterval time.Duration) *LastSeenCache {
tc := &LastSeenCache{
m: make(map[string]time.Time),
ttl: ttl,
@ -26,7 +30,7 @@ func newLastSeenCache(ttl time.Duration) *LastSeenCache {
ctx, done := context.WithCancel(context.Background())
tc.done = done
go background(ctx, &tc.lk, tc.m)
go background(ctx, &tc.lk, tc.m, sweepInterval)
return tc
}

View File

@ -17,8 +17,7 @@ func TestLastSeenCacheFound(t *testing.T) {
}
func TestLastSeenCacheExpire(t *testing.T) {
backgroundSweepInterval = time.Second
tc := newLastSeenCache(time.Second)
tc := newLastSeenCacheWithSweepInterval(time.Second, time.Second)
for i := 0; i < 11; i++ {
tc.Add(fmt.Sprint(i))
time.Sleep(time.Millisecond * 100)
@ -80,9 +79,7 @@ func TestLastSeenCacheSlideForward(t *testing.T) {
}
func TestLastSeenCacheNotFoundAfterExpire(t *testing.T) {
backgroundSweepInterval = time.Second
tc := newLastSeenCache(time.Second)
tc := newLastSeenCacheWithSweepInterval(time.Second, time.Second)
tc.Add(fmt.Sprint(0))
time.Sleep(2 * time.Second)

View File

@ -6,10 +6,10 @@ import (
"time"
)
var backgroundSweepInterval = time.Minute
const backgroundSweepInterval = time.Minute
func background(ctx context.Context, lk sync.Locker, m map[string]time.Time) {
ticker := time.NewTicker(backgroundSweepInterval)
func background(ctx context.Context, lk sync.Locker, m map[string]time.Time, tickerDur time.Duration) {
ticker := time.NewTicker(tickerDur)
defer ticker.Stop()
for {

View File

@ -213,19 +213,59 @@ type RouterReady func(rt PubSubRouter, topic string) (bool, error)
type ProvideKey func() (crypto.PrivKey, peer.ID)
type PublishOptions struct {
ready RouterReady
customKey ProvideKey
local bool
ready RouterReady
customKey ProvideKey
local bool
validatorData any
}
type BatchPublishOptions struct {
Strategy RPCScheduler
}
type PubOpt func(pub *PublishOptions) error
type BatchPubOpt func(pub *BatchPublishOptions) error
func setDefaultBatchPublishOptions(opts *BatchPublishOptions) {
if opts.Strategy == nil {
opts.Strategy = &RoundRobinMessageIDScheduler{}
}
}
// Publish publishes data to topic.
func (t *Topic) Publish(ctx context.Context, data []byte, opts ...PubOpt) error {
msg, err := t.validate(ctx, data, opts...)
if err != nil {
if errors.Is(err, dupeErr{}) {
// If it was a duplicate, we return nil to indicate success.
// Semantically the message was published by us or someone else.
return nil
}
return err
}
return t.p.val.sendMsgBlocking(msg)
}
func (t *Topic) AddToBatch(ctx context.Context, batch *MessageBatch, data []byte, opts ...PubOpt) error {
msg, err := t.validate(ctx, data, opts...)
if err != nil {
if errors.Is(err, dupeErr{}) {
// If it was a duplicate, we return nil to indicate success.
// Semantically the message was published by us or someone else.
// We won't add it to the batch. Since it's already been published.
return nil
}
return err
}
batch.add(msg)
return nil
}
func (t *Topic) validate(ctx context.Context, data []byte, opts ...PubOpt) (*Message, error) {
t.mux.RLock()
defer t.mux.RUnlock()
if t.closed {
return ErrTopicClosed
return nil, ErrTopicClosed
}
pid := t.p.signID
@ -235,17 +275,17 @@ func (t *Topic) Publish(ctx context.Context, data []byte, opts ...PubOpt) error
for _, opt := range opts {
err := opt(pub)
if err != nil {
return err
return nil, err
}
}
if pub.customKey != nil && !pub.local {
key, pid = pub.customKey()
if key == nil {
return ErrNilSignKey
return nil, ErrNilSignKey
}
if len(pid) == 0 {
return ErrEmptyPeerID
return nil, ErrEmptyPeerID
}
}
@ -263,7 +303,7 @@ func (t *Topic) Publish(ctx context.Context, data []byte, opts ...PubOpt) error
m.From = []byte(pid)
err := signMessage(pid, key, m)
if err != nil {
return err
return nil, err
}
}
@ -290,9 +330,9 @@ func (t *Topic) Publish(ctx context.Context, data []byte, opts ...PubOpt) error
break readyLoop
}
case <-t.p.ctx.Done():
return t.p.ctx.Err()
return nil, t.p.ctx.Err()
case <-ctx.Done():
return ctx.Err()
return nil, ctx.Err()
}
if ticker == nil {
ticker = time.NewTicker(200 * time.Millisecond)
@ -302,13 +342,27 @@ func (t *Topic) Publish(ctx context.Context, data []byte, opts ...PubOpt) error
select {
case <-ticker.C:
case <-ctx.Done():
return fmt.Errorf("router is not ready: %w", ctx.Err())
return nil, fmt.Errorf("router is not ready: %w", ctx.Err())
}
}
}
}
return t.p.val.PushLocal(&Message{m, "", t.p.host.ID(), nil, pub.local})
msg := &Message{m, "", t.p.host.ID(), pub.validatorData, pub.local}
select {
case t.p.eval <- func() {
t.p.rt.Preprocess(t.p.host.ID(), []*Message{msg})
}:
case <-t.p.ctx.Done():
return nil, t.p.ctx.Err()
case <-ctx.Done():
return nil, ctx.Err()
}
err := t.p.val.ValidateLocal(msg)
if err != nil {
return nil, err
}
return msg, nil
}
// WithReadiness returns a publishing option for only publishing when the router is ready.
@ -332,6 +386,15 @@ func WithLocalPublication(local bool) PubOpt {
}
}
// WithValidatorData returns a publishing option to set custom validator data for the message.
// This allows users to avoid deserialization of the message data when validating the message locally.
func WithValidatorData(data any) PubOpt {
return func(pub *PublishOptions) error {
pub.validatorData = data
return nil
}
}
// WithSecretKeyAndPeerId returns a publishing option for providing a custom private key and its corresponding peer ID
// This option is useful when we want to send messages from "virtual", never-connectable peers in the network
func WithSecretKeyAndPeerId(key crypto.PrivKey, pid peer.ID) PubOpt {

View File

@ -951,6 +951,23 @@ func TestTopicPublishWithKeyInvalidParameters(t *testing.T) {
})
}
func TestTopicPublishWithContextCanceled(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
const topic = "foobar"
const numHosts = 5
hosts := getDefaultHosts(t, numHosts)
topics := getTopics(getPubsubs(ctx, hosts), topic)
cancel()
err := topics[0].Publish(ctx, []byte("buff"))
if err != context.Canceled {
t.Fatal("error should have been of type context.Canceled", err)
}
}
func TestTopicRelayPublishWithKey(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()

View File

@ -26,6 +26,12 @@ func (e ValidationError) Error() string {
return e.Reason
}
type dupeErr struct{}
func (dupeErr) Error() string {
return "duplicate message"
}
// Validator is a function that validates a message with a binary decision: accept or reject.
type Validator func(context.Context, peer.ID, *Message) bool
@ -226,10 +232,9 @@ func (v *validation) RemoveValidator(req *rmValReq) {
}
}
// PushLocal synchronously pushes a locally published message and performs applicable
// validations.
// Returns an error if validation fails
func (v *validation) PushLocal(msg *Message) error {
// ValidateLocal synchronously validates a locally published message and
// performs applicable validations. Returns an error if validation fails.
func (v *validation) ValidateLocal(msg *Message) error {
v.p.tracer.PublishMessage(msg)
err := v.p.checkSigningPolicy(msg)
@ -238,7 +243,9 @@ func (v *validation) PushLocal(msg *Message) error {
}
vals := v.getValidators(msg)
return v.validate(vals, msg.ReceivedFrom, msg, true)
return v.validate(vals, msg.ReceivedFrom, msg, true, func(msg *Message) error {
return nil
})
}
// Push pushes a message into the validation pipeline.
@ -282,15 +289,26 @@ func (v *validation) validateWorker() {
for {
select {
case req := <-v.validateQ:
v.validate(req.vals, req.src, req.msg, false)
_ = v.validate(req.vals, req.src, req.msg, false, v.sendMsgBlocking)
case <-v.p.ctx.Done():
return
}
}
}
// validate performs validation and only sends the message if all validators succeed
func (v *validation) validate(vals []*validatorImpl, src peer.ID, msg *Message, synchronous bool) error {
func (v *validation) sendMsgBlocking(msg *Message) error {
select {
case v.p.sendMsg <- msg:
return nil
case <-v.p.ctx.Done():
return v.p.ctx.Err()
}
}
// validate performs validation and only calls onValid if all validators succeed.
// If synchronous is true, onValid will be called before this function returns
// if the message is new and accepted.
func (v *validation) validate(vals []*validatorImpl, src peer.ID, msg *Message, synchronous bool, onValid func(*Message) error) error {
// If signature verification is enabled, but signing is disabled,
// the Signature is required to be nil upon receiving the message in PubSub.pushMsg.
if msg.Signature != nil {
@ -306,7 +324,7 @@ func (v *validation) validate(vals []*validatorImpl, src peer.ID, msg *Message,
id := v.p.idGen.ID(msg)
if !v.p.markSeen(id) {
v.tracer.DuplicateMessage(msg)
return nil
return dupeErr{}
} else {
v.tracer.ValidateMessage(msg)
}
@ -345,7 +363,7 @@ loop:
select {
case v.validateThrottle <- struct{}{}:
go func() {
v.doValidateTopic(async, src, msg, result)
v.doValidateTopic(async, src, msg, result, onValid)
<-v.validateThrottle
}()
default:
@ -360,13 +378,8 @@ loop:
return ValidationError{Reason: RejectValidationIgnored}
}
// no async validators, accepted message, send it!
select {
case v.p.sendMsg <- msg:
return nil
case <-v.p.ctx.Done():
return v.p.ctx.Err()
}
// no async validators, accepted message
return onValid(msg)
}
func (v *validation) validateSignature(msg *Message) bool {
@ -379,7 +392,7 @@ func (v *validation) validateSignature(msg *Message) bool {
return true
}
func (v *validation) doValidateTopic(vals []*validatorImpl, src peer.ID, msg *Message, r ValidationResult) {
func (v *validation) doValidateTopic(vals []*validatorImpl, src peer.ID, msg *Message, r ValidationResult, onValid func(*Message) error) {
result := v.validateTopic(vals, src, msg)
if result == ValidationAccept && r != ValidationAccept {
@ -388,7 +401,7 @@ func (v *validation) doValidateTopic(vals []*validatorImpl, src peer.ID, msg *Me
switch result {
case ValidationAccept:
v.p.sendMsg <- msg
_ = onValid(msg)
case ValidationReject:
log.Debugf("message validation failed; dropping message from %s", src)
v.tracer.RejectMessage(msg, RejectValidationFailed)

View File

@ -20,10 +20,23 @@ import (
pb "github.com/libp2p/go-libp2p-pubsub/pb"
)
var rng *rand.Rand
var rng *concurrentRNG
type concurrentRNG struct {
mu sync.Mutex
rng *rand.Rand
}
func (r *concurrentRNG) Intn(n int) int {
r.mu.Lock()
defer r.mu.Unlock()
return r.rng.Intn(n)
}
func init() {
rng = rand.New(rand.NewSource(314159))
rng = &concurrentRNG{
rng: rand.New(rand.NewSource(314159)),
}
}
func TestBasicSeqnoValidator1(t *testing.T) {

View File

@ -1,3 +1,3 @@
{
"version": "v0.11.0"
"version": "v0.14.2"
}