chore_: bump go-waku (#5150)
This commit is contained in:
parent
5ca1cb0a0f
commit
9e0fb30f8d
|
@ -8,8 +8,8 @@ import (
|
|||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/store"
|
||||
storepb "github.com/waku-org/go-waku/waku/v2/protocol/store/pb"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/legacy_store"
|
||||
storepb "github.com/waku-org/go-waku/waku/v2/protocol/legacy_store/pb"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/status-im/status-go/connection"
|
||||
|
@ -177,19 +177,19 @@ func (w *gethWakuV2Wrapper) SendMessagesRequest(peerID []byte, r types.MessagesR
|
|||
}
|
||||
|
||||
func (w *gethWakuV2Wrapper) RequestStoreMessages(ctx context.Context, peerID []byte, r types.MessagesRequest, processEnvelopes bool) (*types.StoreRequestCursor, int, error) {
|
||||
var options []store.HistoryRequestOption
|
||||
var options []legacy_store.HistoryRequestOption
|
||||
|
||||
peer, err := peer.Decode(string(peerID))
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
options = []store.HistoryRequestOption{
|
||||
store.WithPaging(false, uint64(r.Limit)),
|
||||
options = []legacy_store.HistoryRequestOption{
|
||||
legacy_store.WithPaging(false, uint64(r.Limit)),
|
||||
}
|
||||
|
||||
if r.StoreCursor != nil {
|
||||
options = append(options, store.WithCursor(&storepb.Index{
|
||||
options = append(options, legacy_store.WithCursor(&storepb.Index{
|
||||
Digest: r.StoreCursor.Digest,
|
||||
ReceiverTime: r.StoreCursor.ReceiverTime,
|
||||
SenderTime: r.StoreCursor.SenderTime,
|
||||
|
|
107
go.mod
107
go.mod
|
@ -33,11 +33,11 @@ require (
|
|||
github.com/keighl/metabolize v0.0.0-20150915210303-97ab655d4034
|
||||
github.com/kilic/bls12-381 v0.0.0-20200607163746-32e1441c8a9f
|
||||
github.com/lib/pq v1.10.4
|
||||
github.com/libp2p/go-libp2p v0.29.2
|
||||
github.com/libp2p/go-libp2p-pubsub v0.9.3
|
||||
github.com/libp2p/go-libp2p v0.32.2
|
||||
github.com/libp2p/go-libp2p-pubsub v0.10.1
|
||||
github.com/lucasb-eyer/go-colorful v1.0.3
|
||||
github.com/mat/besticon v0.0.0-20210314201728-1579f269edb7
|
||||
github.com/multiformats/go-multiaddr v0.10.1
|
||||
github.com/multiformats/go-multiaddr v0.12.3
|
||||
github.com/multiformats/go-multibase v0.2.0
|
||||
github.com/multiformats/go-multihash v0.2.3
|
||||
github.com/multiformats/go-varint v0.0.7
|
||||
|
@ -52,19 +52,19 @@ require (
|
|||
github.com/status-im/markdown v0.0.0-20240404192634-b7e33c6ac3d4
|
||||
github.com/status-im/migrate/v4 v4.6.2-status.3
|
||||
github.com/status-im/mvds v0.0.27-0.20240111144448-92d364e4be82
|
||||
github.com/status-im/rendezvous v1.3.7
|
||||
github.com/status-im/rendezvous v1.3.8-0.20240110194857-cc5be22bf83e
|
||||
github.com/status-im/status-go/extkeys v1.1.2
|
||||
github.com/status-im/tcp-shaker v1.1.1-status
|
||||
github.com/status-im/zxcvbn-go v0.0.0-20220311183720-5e8676676857
|
||||
github.com/stretchr/testify v1.8.4
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a
|
||||
github.com/tsenart/tb v0.0.0-20181025101425-0d2499c8b6e9
|
||||
github.com/wealdtech/go-ens/v3 v3.5.0
|
||||
github.com/wealdtech/go-multicodec v1.4.0
|
||||
github.com/xeipuuv/gojsonschema v1.2.0
|
||||
github.com/zenthangplus/goccm v0.0.0-20211005163543-2f2e522aca15
|
||||
go.uber.org/zap v1.24.0
|
||||
golang.org/x/crypto v0.12.0
|
||||
go.uber.org/zap v1.27.0
|
||||
golang.org/x/crypto v0.18.0
|
||||
golang.org/x/image v0.0.0-20210220032944-ac19c3e999fb
|
||||
google.golang.org/protobuf v1.31.0
|
||||
gopkg.in/go-playground/assert.v1 v1.2.1 // indirect
|
||||
|
@ -90,20 +90,20 @@ require (
|
|||
github.com/mutecomm/go-sqlcipher/v4 v4.4.2
|
||||
github.com/schollz/peerdiscovery v1.7.0
|
||||
github.com/siphiuel/lc-proxy-wrapper v0.0.0-20230516150924-246507cee8c7
|
||||
github.com/urfave/cli/v2 v2.24.4
|
||||
github.com/waku-org/go-waku v0.8.1-0.20240415131212-6d889ca3e2fe
|
||||
github.com/urfave/cli/v2 v2.27.2
|
||||
github.com/waku-org/go-waku v0.8.1-0.20240507175626-19d27befd98b
|
||||
github.com/wk8/go-ordered-map/v2 v2.1.7
|
||||
github.com/yeqown/go-qrcode/v2 v2.2.1
|
||||
github.com/yeqown/go-qrcode/writer/standard v1.2.1
|
||||
go.uber.org/multierr v1.11.0
|
||||
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1
|
||||
golang.org/x/net v0.14.0
|
||||
golang.org/x/text v0.12.0
|
||||
golang.org/x/exp v0.0.0-20231006140011-7918f672742d
|
||||
golang.org/x/net v0.17.0
|
||||
golang.org/x/text v0.14.0
|
||||
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v1.2.1 // indirect
|
||||
github.com/BurntSushi/toml v1.3.2 // indirect
|
||||
github.com/PuerkitoBio/goquery v1.6.1 // indirect
|
||||
github.com/RoaringBitmap/roaring v0.9.4 // indirect
|
||||
github.com/VictoriaMetrics/fastcache v1.6.0 // indirect
|
||||
|
@ -137,7 +137,7 @@ require (
|
|||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/containerd/cgroups v1.1.0 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
|
||||
github.com/cruxic/go-hmac-drbg v0.0.0-20170206035330-84c46983886d // indirect
|
||||
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
|
||||
|
@ -163,21 +163,21 @@ require (
|
|||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/btree v1.0.1 // indirect
|
||||
github.com/google/gopacket v1.1.19 // indirect
|
||||
github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 // indirect
|
||||
github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b // indirect
|
||||
github.com/gorilla/securecookie v1.1.1 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-bexpr v0.1.10 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.2 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.5 // indirect
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
|
||||
github.com/holiman/uint256 v1.2.0 // indirect
|
||||
github.com/huandu/xstrings v1.3.2 // indirect
|
||||
github.com/huin/goupnp v1.2.0 // indirect
|
||||
github.com/huin/goupnp v1.3.0 // indirect
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
|
||||
github.com/klauspost/compress v1.16.7 // indirect
|
||||
github.com/klauspost/compress v1.17.2 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.7 // indirect
|
||||
github.com/koron/go-ssdp v0.0.4 // indirect
|
||||
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
|
||||
|
@ -187,19 +187,20 @@ require (
|
|||
github.com/libp2p/go-cidranger v1.1.0 // indirect
|
||||
github.com/libp2p/go-flow-metrics v0.1.0 // indirect
|
||||
github.com/libp2p/go-libp2p-asn-util v0.3.0 // indirect
|
||||
github.com/libp2p/go-libp2p-mplex v0.9.0 // indirect
|
||||
github.com/libp2p/go-mplex v0.7.0 // indirect
|
||||
github.com/libp2p/go-msgio v0.3.0 // indirect
|
||||
github.com/libp2p/go-nat v0.2.0 // indirect
|
||||
github.com/libp2p/go-netroute v0.2.1 // indirect
|
||||
github.com/libp2p/go-reuseport v0.3.0 // indirect
|
||||
github.com/libp2p/go-reuseport v0.4.0 // indirect
|
||||
github.com/libp2p/go-yamux/v4 v4.0.1 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
|
||||
github.com/mattn/go-colorable v0.1.8 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/miekg/dns v1.1.55 // indirect
|
||||
github.com/miekg/dns v1.1.56 // indirect
|
||||
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
|
||||
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
|
||||
github.com/minio/sha256-simd v1.0.1 // indirect
|
||||
|
@ -212,38 +213,36 @@ require (
|
|||
github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect
|
||||
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
|
||||
github.com/multiformats/go-multicodec v0.9.0 // indirect
|
||||
github.com/multiformats/go-multistream v0.4.1 // indirect
|
||||
github.com/multiformats/go-multistream v0.5.0 // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||
github.com/onsi/ginkgo/v2 v2.11.0 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 // indirect
|
||||
github.com/onsi/ginkgo/v2 v2.13.0 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.1.0 // indirect
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
|
||||
github.com/pion/datachannel v1.5.2 // indirect
|
||||
github.com/pion/dtls/v2 v2.1.2 // indirect
|
||||
github.com/pion/ice/v2 v2.1.20 // indirect
|
||||
github.com/pion/interceptor v0.1.7 // indirect
|
||||
github.com/pion/datachannel v1.5.5 // indirect
|
||||
github.com/pion/dtls/v2 v2.2.7 // indirect
|
||||
github.com/pion/ice/v2 v2.3.6 // indirect
|
||||
github.com/pion/interceptor v0.1.17 // indirect
|
||||
github.com/pion/logging v0.2.2 // indirect
|
||||
github.com/pion/mdns v0.0.5 // indirect
|
||||
github.com/pion/mdns v0.0.7 // indirect
|
||||
github.com/pion/randutil v0.1.0 // indirect
|
||||
github.com/pion/rtcp v1.2.9 // indirect
|
||||
github.com/pion/rtp v1.7.4 // indirect
|
||||
github.com/pion/sctp v1.8.2 // indirect
|
||||
github.com/pion/sdp/v3 v3.0.4 // indirect
|
||||
github.com/pion/srtp/v2 v2.0.5 // indirect
|
||||
github.com/pion/stun v0.3.5 // indirect
|
||||
github.com/pion/transport v0.13.0 // indirect
|
||||
github.com/pion/turn/v2 v2.0.6 // indirect
|
||||
github.com/pion/udp v0.1.1 // indirect
|
||||
github.com/pion/webrtc/v3 v3.1.24-0.20220208053747-94262c1b2b38 // indirect
|
||||
github.com/pion/rtcp v1.2.10 // indirect
|
||||
github.com/pion/rtp v1.7.13 // indirect
|
||||
github.com/pion/sctp v1.8.7 // indirect
|
||||
github.com/pion/sdp/v3 v3.0.6 // indirect
|
||||
github.com/pion/srtp/v2 v2.0.15 // indirect
|
||||
github.com/pion/stun v0.6.0 // indirect
|
||||
github.com/pion/transport/v2 v2.2.1 // indirect
|
||||
github.com/pion/turn/v2 v2.1.0 // indirect
|
||||
github.com/pion/webrtc/v3 v3.2.9 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.4.0 // indirect
|
||||
github.com/prometheus/common v0.42.0 // indirect
|
||||
github.com/prometheus/procfs v0.10.1 // indirect
|
||||
github.com/prometheus/tsdb v0.10.0 // indirect
|
||||
github.com/quic-go/qpack v0.4.0 // indirect
|
||||
github.com/quic-go/qtls-go1-19 v0.3.3 // indirect
|
||||
github.com/quic-go/qtls-go1-20 v0.2.3 // indirect
|
||||
github.com/quic-go/quic-go v0.36.4 // indirect
|
||||
github.com/quic-go/webtransport-go v0.5.3 // indirect
|
||||
github.com/quic-go/qtls-go1-20 v0.3.4 // indirect
|
||||
github.com/quic-go/quic-go v0.39.4 // indirect
|
||||
github.com/quic-go/webtransport-go v0.6.0 // indirect
|
||||
github.com/raulk/go-watchdog v1.3.0 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
|
||||
github.com/rivo/uniseg v0.2.0 // indirect
|
||||
|
@ -258,12 +257,12 @@ require (
|
|||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||
github.com/status-im/go-multiaddr-ethv4 v1.2.5 // indirect
|
||||
github.com/status-im/keycard-go v0.0.0-20200402102358-957c09536969 // indirect
|
||||
github.com/stretchr/objx v0.5.0 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.6 // indirect
|
||||
github.com/tklauser/numcpus v0.2.2 // indirect
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
|
||||
github.com/waku-org/go-discover v0.0.0-20240129014929-85f2c00b96a3 // indirect
|
||||
github.com/waku-org/go-libp2p-rendezvous v0.0.0-20230628220917-7b4e5ae4c0e7 // indirect
|
||||
github.com/waku-org/go-discover v0.0.0-20240506173252-4912704efdc5 // indirect
|
||||
github.com/waku-org/go-libp2p-rendezvous v0.0.0-20240110193335-a67d1cc760a0 // indirect
|
||||
github.com/waku-org/go-zerokit-rln v0.1.14-0.20240102145250-fa738c0bdf59 // indirect
|
||||
github.com/waku-org/go-zerokit-rln-apple v0.0.0-20230916172309-ee0ee61dde2b // indirect
|
||||
github.com/waku-org/go-zerokit-rln-arm v0.0.0-20230916171929-1dd9494ff065 // indirect
|
||||
|
@ -271,19 +270,19 @@ require (
|
|||
github.com/wk8/go-ordered-map v1.0.0 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect
|
||||
github.com/yeqown/reedsolomon v1.0.0 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.3 // indirect
|
||||
go.etcd.io/bbolt v1.3.6 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
go.uber.org/dig v1.17.0 // indirect
|
||||
go.uber.org/fx v1.20.0 // indirect
|
||||
golang.org/x/mod v0.12.0 // indirect
|
||||
golang.org/x/sync v0.3.0 // indirect
|
||||
go.uber.org/dig v1.17.1 // indirect
|
||||
go.uber.org/fx v1.20.1 // indirect
|
||||
go.uber.org/mock v0.3.0 // indirect
|
||||
golang.org/x/mod v0.13.0 // indirect
|
||||
golang.org/x/sync v0.4.0 // indirect
|
||||
golang.org/x/sys v0.18.0 // indirect
|
||||
golang.org/x/term v0.11.0 // indirect
|
||||
golang.org/x/tools v0.12.1-0.20230818130535-1517d1a3ba60 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect
|
||||
golang.org/x/term v0.16.0 // indirect
|
||||
golang.org/x/tools v0.14.0 // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
lukechampine.com/blake3 v1.2.1 // indirect
|
||||
|
|
237
go.sum
237
go.sum
|
@ -104,8 +104,8 @@ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ
|
|||
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
|
||||
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak=
|
||||
github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
|
||||
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/ClickHouse/clickhouse-go v1.4.3/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI=
|
||||
github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
|
||||
|
@ -632,8 +632,8 @@ github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfc
|
|||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
|
@ -1023,8 +1023,8 @@ github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLe
|
|||
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 h1:n6vlPhxsA+BW/XsS5+uqi7GyzaLa5MH7qlSLBZtRdiA=
|
||||
github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA=
|
||||
github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b h1:RMpPgZTSApbPf7xaVel+QkoGPRLFLrwFO89uDUHEGf0=
|
||||
github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
|
@ -1112,8 +1112,8 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ
|
|||
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.2 h1:Dwmkdr5Nc/oBiXgJS3CDHNhJtIHkuZ3DZF5twqnfBdU=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.2/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
|
||||
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
|
||||
|
@ -1135,8 +1135,8 @@ github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq
|
|||
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
|
||||
github.com/huin/goupnp v1.0.1-0.20210310174557-0ca763054c88/go.mod h1:nNs7wvRfN1eKaMknBydLNQU6146XQim8t4h+q90biWo=
|
||||
github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM=
|
||||
github.com/huin/goupnp v1.2.0 h1:uOKW26NG1hsSSbXIZ1IR7XP9Gjd1U8pnLaCMgntmkmY=
|
||||
github.com/huin/goupnp v1.2.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
|
||||
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
|
||||
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
|
||||
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
|
||||
github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
|
@ -1290,8 +1290,8 @@ github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdY
|
|||
github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
|
||||
github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||
github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4=
|
||||
github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
|
@ -1348,12 +1348,14 @@ github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38y
|
|||
github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic=
|
||||
github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM=
|
||||
github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro=
|
||||
github.com/libp2p/go-libp2p v0.29.2 h1:uPw/c8hOxoLP/KhFnzlc5Ejqf+OmAL1dwIsqE31WBtY=
|
||||
github.com/libp2p/go-libp2p v0.29.2/go.mod h1:OU7nSq0aEZMsV2wY8nXn1+XNNt9q2UiR8LjW3Kmp2UE=
|
||||
github.com/libp2p/go-libp2p v0.32.2 h1:s8GYN4YJzgUoyeYNPdW7JZeZ5Ee31iNaIBfGYMAY4FQ=
|
||||
github.com/libp2p/go-libp2p v0.32.2/go.mod h1:E0LKe+diV/ZVJVnOJby8VC5xzHF0660osg71skcxJvk=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.3.0 h1:gMDcMyYiZKkocGXDQ5nsUQyquC9+H+iLEQHwOCZ7s8s=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.3.0/go.mod h1:B1mcOrKUE35Xq/ASTmQ4tN3LNzVVaMNmq2NACuqyB9w=
|
||||
github.com/libp2p/go-libp2p-pubsub v0.9.3 h1:ihcz9oIBMaCK9kcx+yHWm3mLAFBMAUsM4ux42aikDxo=
|
||||
github.com/libp2p/go-libp2p-pubsub v0.9.3/go.mod h1:RYA7aM9jIic5VV47WXu4GkcRxRhrdElWf8xtyli+Dzc=
|
||||
github.com/libp2p/go-libp2p-mplex v0.9.0 h1:R58pDRAmuBXkYugbSSXR9wrTX3+1pFM1xP2bLuodIq8=
|
||||
github.com/libp2p/go-libp2p-mplex v0.9.0/go.mod h1:ro1i4kuwiFT+uMPbIDIFkcLs1KRbNp0QwnUXM+P64Og=
|
||||
github.com/libp2p/go-libp2p-pubsub v0.10.1 h1:/RqOZpEtAolsr8/9CC8KqROJSOZeu7lK7fPftn4MwNg=
|
||||
github.com/libp2p/go-libp2p-pubsub v0.10.1/go.mod h1:1OxbaT/pFRO5h+Dpze8hdHQ63R0ke55XTs6b6NwLLkw=
|
||||
github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA=
|
||||
github.com/libp2p/go-maddr-filter v0.1.0/go.mod h1:VzZhTXkMucEGGEOSKddrwGiOv0tUhgnKqNEmIAz/bPU=
|
||||
github.com/libp2p/go-mplex v0.7.0 h1:BDhFZdlk5tbr0oyFq/xv/NPGfjbnrsDam1EvutpBDbY=
|
||||
|
@ -1364,8 +1366,8 @@ github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk=
|
|||
github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk=
|
||||
github.com/libp2p/go-netroute v0.2.1 h1:V8kVrpD8GK0Riv15/7VN6RbUQ3URNZVosw7H2v9tksU=
|
||||
github.com/libp2p/go-netroute v0.2.1/go.mod h1:hraioZr0fhBjG0ZRXJJ6Zj2IVEVNx6tDTFQfSmcq7mQ=
|
||||
github.com/libp2p/go-reuseport v0.3.0 h1:iiZslO5byUYZEg9iCwJGf5h+sf1Agmqx2V2FDjPyvUw=
|
||||
github.com/libp2p/go-reuseport v0.3.0/go.mod h1:laea40AimhtfEqysZ71UpYj4S+R9VpH8PgqLo7L+SwI=
|
||||
github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s=
|
||||
github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU=
|
||||
github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCypkQ=
|
||||
github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4=
|
||||
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
|
||||
|
@ -1426,8 +1428,8 @@ github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2y
|
|||
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
|
@ -1458,8 +1460,8 @@ github.com/meirf/gopart v0.0.0-20180520194036-37e9492a85a8/go.mod h1:Uz8uoD6o+eQ
|
|||
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
|
||||
github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo=
|
||||
github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY=
|
||||
github.com/miekg/dns v1.1.56 h1:5imZaSeoRNvpM9SzWNhEcP9QliKiz20/dA2QabIGVnE=
|
||||
github.com/miekg/dns v1.1.56/go.mod h1:cRm6Oo2C8TY9ZS/TqsSrseAcncm74lfK5G+ikN2SWWY=
|
||||
github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
|
||||
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8=
|
||||
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms=
|
||||
|
@ -1526,8 +1528,8 @@ github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU
|
|||
github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4=
|
||||
github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u0xW5UouOmQQrn6a3Y=
|
||||
github.com/multiformats/go-multiaddr v0.3.2/go.mod h1:lCKNGP1EQ1eZ35Za2wlqnabm9xQkib3fyB+nZXHLag0=
|
||||
github.com/multiformats/go-multiaddr v0.10.1 h1:HghtFrWyZEPrpTvgAMFJi6gFdgHfs2cb0pyfDsk+lqU=
|
||||
github.com/multiformats/go-multiaddr v0.10.1/go.mod h1:jLEZsA61rwWNZQTHHnqq2HNa+4os/Hz54eqiRnsRqYQ=
|
||||
github.com/multiformats/go-multiaddr v0.12.3 h1:hVBXvPRcKG0w80VinQ23P5t7czWgg65BmIvQKjDydU8=
|
||||
github.com/multiformats/go-multiaddr v0.12.3/go.mod h1:sBXrNzucqkFJhvKOiwwLyqamGa/P5EIXNPLovyhQCII=
|
||||
github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A=
|
||||
github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk=
|
||||
github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E=
|
||||
|
@ -1543,8 +1545,8 @@ github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUj
|
|||
github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg=
|
||||
github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
|
||||
github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
|
||||
github.com/multiformats/go-multistream v0.4.1 h1:rFy0Iiyn3YT0asivDUIR05leAdwZq3de4741sbiSdfo=
|
||||
github.com/multiformats/go-multistream v0.4.1/go.mod h1:Mz5eykRVAjJWckE2U78c6xqdtyNUEhKSM0Lwar2p77Q=
|
||||
github.com/multiformats/go-multistream v0.5.0 h1:5htLSLl7lvJk3xx3qT/8Zm9J4K8vEOf/QGkvOGQAyiE=
|
||||
github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dydlEqV3l6N3/GBsX6ILA=
|
||||
github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
|
||||
github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
|
||||
github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
|
||||
|
@ -1602,8 +1604,8 @@ github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vv
|
|||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
|
||||
github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU=
|
||||
github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM=
|
||||
github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4=
|
||||
github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o=
|
||||
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
|
@ -1618,7 +1620,7 @@ github.com/onsi/gomega v1.11.0/go.mod h1:azGKhqFUon9Vuj0YmTfLSmx0FUwqXYSTl5re8lQ
|
|||
github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0=
|
||||
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
||||
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
|
||||
github.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc=
|
||||
github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI=
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
|
||||
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
|
@ -1643,8 +1645,9 @@ github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/
|
|||
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc=
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg=
|
||||
github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
|
||||
github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
|
||||
github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
|
||||
|
@ -1684,34 +1687,39 @@ github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0
|
|||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pierrec/lz4/v4 v4.1.8/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pion/datachannel v1.4.21/go.mod h1:oiNyP4gHx2DIwRzX/MFyH0Rz/Gz05OgBlayAI2hAWjg=
|
||||
github.com/pion/datachannel v1.5.2 h1:piB93s8LGmbECrpO84DnkIVWasRMk3IimbcXkTQLE6E=
|
||||
github.com/pion/datachannel v1.5.2/go.mod h1:FTGQWaHrdCwIJ1rw6xBIfZVkslikjShim5yr05XFuCQ=
|
||||
github.com/pion/datachannel v1.5.5 h1:10ef4kwdjije+M9d7Xm9im2Y3O6A6ccQb0zcqZcJew8=
|
||||
github.com/pion/datachannel v1.5.5/go.mod h1:iMz+lECmfdCMqFRhXhcA/219B0SQlbpoR2V118yimL0=
|
||||
github.com/pion/dtls/v2 v2.0.1/go.mod h1:uMQkz2W0cSqY00xav7WByQ4Hb+18xeQh2oH2fRezr5U=
|
||||
github.com/pion/dtls/v2 v2.0.2/go.mod h1:27PEO3MDdaCfo21heT59/vsdmZc0zMt9wQPcSlLu/1I=
|
||||
github.com/pion/dtls/v2 v2.0.4/go.mod h1:qAkFscX0ZHoI1E07RfYPoRw3manThveu+mlTDdOxoGI=
|
||||
github.com/pion/dtls/v2 v2.0.7/go.mod h1:QuDII+8FVvk9Dp5t5vYIMTo7hh7uBkra+8QIm7QGm10=
|
||||
github.com/pion/dtls/v2 v2.0.9/go.mod h1:O0Wr7si/Zj5/EBFlDzDd6UtVxx25CE1r7XM7BQKYQho=
|
||||
github.com/pion/dtls/v2 v2.1.1/go.mod h1:qG3gA7ZPZemBqpEFqRKyURYdKEwFZQCGb7gv9T3ON3Y=
|
||||
github.com/pion/dtls/v2 v2.1.2 h1:22Q1Jk9L++Yo7BIf9130MonNPfPVb+YgdYLeyQotuAA=
|
||||
github.com/pion/dtls/v2 v2.1.2/go.mod h1:o6+WvyLDAlXF7YiPB/RlskRoeK+/JtuaZa5emwQcWus=
|
||||
github.com/pion/dtls/v2 v2.2.7 h1:cSUBsETxepsCSFSxC3mc/aDo14qQLMSL+O6IjG28yV8=
|
||||
github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s=
|
||||
github.com/pion/ice v0.7.18/go.mod h1:+Bvnm3nYC6Nnp7VV6glUkuOfToB/AtMRZpOU8ihuf4c=
|
||||
github.com/pion/ice/v2 v2.0.15/go.mod h1:ZIiVGevpgAxF/cXiIVmuIUtCb3Xs4gCzCbXB6+nFkSI=
|
||||
github.com/pion/ice/v2 v2.1.7/go.mod h1:kV4EODVD5ux2z8XncbLHIOtcXKtYXVgLVCeVqnpoeP0=
|
||||
github.com/pion/ice/v2 v2.1.10/go.mod h1:kV4EODVD5ux2z8XncbLHIOtcXKtYXVgLVCeVqnpoeP0=
|
||||
github.com/pion/ice/v2 v2.1.12/go.mod h1:ovgYHUmwYLlRvcCLI67PnQ5YGe+upXZbGgllBDG/ktU=
|
||||
github.com/pion/ice/v2 v2.1.20 h1:xpxXyX5b4WjCh/D905gzBeW/hbJxMEPx2ptVfrhVE6M=
|
||||
github.com/pion/ice/v2 v2.1.20/go.mod h1:hEAldRzBhTtAfvlU1V/2/nLCMvveQWFKPNCop+63/Iw=
|
||||
github.com/pion/ice/v2 v2.3.6 h1:Jgqw36cAud47iD+N6rNX225uHvrgWtAlHfVyOQc3Heg=
|
||||
github.com/pion/ice/v2 v2.3.6/go.mod h1:9/TzKDRwBVAPsC+YOrKH/e3xDrubeTRACU9/sHQarsU=
|
||||
github.com/pion/interceptor v0.0.9/go.mod h1:dHgEP5dtxOTf21MObuBAjJeAayPxLUAZjerGH8Xr07c=
|
||||
github.com/pion/interceptor v0.0.12/go.mod h1:qzeuWuD/ZXvPqOnxNcnhWfkCZ2e1kwwslicyyPnhoK4=
|
||||
github.com/pion/interceptor v0.0.13/go.mod h1:svsW2QoLHLoGLUr4pDoSopGBEWk8FZwlfxId/OKRKzo=
|
||||
github.com/pion/interceptor v0.0.15/go.mod h1:pg3J253eGi5bqyKzA74+ej5Y19ez2jkWANVnF+Z9Dfk=
|
||||
github.com/pion/interceptor v0.1.7 h1:HThW0tIIKT9RRoDWGURe8rlZVOx0fJHxBHpA0ej0+bo=
|
||||
github.com/pion/interceptor v0.1.7/go.mod h1:Lh3JSl/cbJ2wP8I3ccrjh1K/deRGRn3UlSPuOTiHb6U=
|
||||
github.com/pion/interceptor v0.1.17 h1:prJtgwFh/gB8zMqGZoOgJPHivOwVAp61i2aG61Du/1w=
|
||||
github.com/pion/interceptor v0.1.17/go.mod h1:SY8kpmfVBvrbUzvj2bsXz7OJt5JvmVNZ+4Kjq7FcwrI=
|
||||
github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY=
|
||||
github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms=
|
||||
github.com/pion/mdns v0.0.4/go.mod h1:R1sL0p50l42S5lJs91oNdUL58nm0QHrhxnSegr++qC0=
|
||||
github.com/pion/mdns v0.0.5 h1:Q2oj/JB3NqfzY9xGZ1fPzZzK7sDSD8rZPOvcIQ10BCw=
|
||||
github.com/pion/mdns v0.0.5/go.mod h1:UgssrvdD3mxpi8tMxAXbsppL3vJ4Jipw1mTCW+al01g=
|
||||
github.com/pion/mdns v0.0.7 h1:P0UB4Sr6xDWEox0kTVxF0LmQihtCbSAdW0H2nEgkA3U=
|
||||
github.com/pion/mdns v0.0.7/go.mod h1:4iP2UbeFhLI/vWju/bw6ZfwjJzk0z8DNValjGxR/dD8=
|
||||
github.com/pion/quic v0.1.1/go.mod h1:zEU51v7ru8Mp4AUBJvj6psrSth5eEFNnVQK5K48oV3k=
|
||||
github.com/pion/quic v0.1.4/go.mod h1:dBhNvkLoQqRwfi6h3Vqj3IcPLgiW7rkZxBbRdp7Vzvk=
|
||||
github.com/pion/randutil v0.0.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8=
|
||||
|
@ -1720,33 +1728,41 @@ github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TB
|
|||
github.com/pion/rtcp v1.2.3/go.mod h1:zGhIv0RPRF0Z1Wiij22pUt5W/c9fevqSzT4jje/oK7I=
|
||||
github.com/pion/rtcp v1.2.4/go.mod h1:52rMNPWFsjr39z9B9MhnkqhPLoeHTv1aN63o/42bWE0=
|
||||
github.com/pion/rtcp v1.2.6/go.mod h1:52rMNPWFsjr39z9B9MhnkqhPLoeHTv1aN63o/42bWE0=
|
||||
github.com/pion/rtcp v1.2.9 h1:1ujStwg++IOLIEoOiIQ2s+qBuJ1VN81KW+9pMPsif+U=
|
||||
github.com/pion/rtcp v1.2.9/go.mod h1:qVPhiCzAm4D/rxb6XzKeyZiQK69yJpbUDJSF7TgrqNo=
|
||||
github.com/pion/rtcp v1.2.10 h1:nkr3uj+8Sp97zyItdN60tE/S6vk4al5CPRR6Gejsdjc=
|
||||
github.com/pion/rtcp v1.2.10/go.mod h1:ztfEwXZNLGyF1oQDttz/ZKIBaeeg/oWbRYqzBM9TL1I=
|
||||
github.com/pion/rtp v1.6.0/go.mod h1:QgfogHsMBVE/RFNno467U/KBqfUywEH+HK+0rtnwsdI=
|
||||
github.com/pion/rtp v1.6.1/go.mod h1:bDb5n+BFZxXx0Ea7E5qe+klMuqiBrP+w8XSjiWtCUko=
|
||||
github.com/pion/rtp v1.6.2/go.mod h1:bDb5n+BFZxXx0Ea7E5qe+klMuqiBrP+w8XSjiWtCUko=
|
||||
github.com/pion/rtp v1.6.5/go.mod h1:bDb5n+BFZxXx0Ea7E5qe+klMuqiBrP+w8XSjiWtCUko=
|
||||
github.com/pion/rtp v1.7.0/go.mod h1:bDb5n+BFZxXx0Ea7E5qe+klMuqiBrP+w8XSjiWtCUko=
|
||||
github.com/pion/rtp v1.7.2/go.mod h1:bDb5n+BFZxXx0Ea7E5qe+klMuqiBrP+w8XSjiWtCUko=
|
||||
github.com/pion/rtp v1.7.4 h1:4dMbjb1SuynU5OpA3kz1zHK+u+eOCQjW3MAeVHf1ODA=
|
||||
github.com/pion/rtp v1.7.4/go.mod h1:bDb5n+BFZxXx0Ea7E5qe+klMuqiBrP+w8XSjiWtCUko=
|
||||
github.com/pion/rtp v1.7.13 h1:qcHwlmtiI50t1XivvoawdCGTP4Uiypzfrsap+bijcoA=
|
||||
github.com/pion/rtp v1.7.13/go.mod h1:bDb5n+BFZxXx0Ea7E5qe+klMuqiBrP+w8XSjiWtCUko=
|
||||
github.com/pion/sctp v1.7.10/go.mod h1:EhpTUQu1/lcK3xI+eriS6/96fWetHGCvBi9MSsnaBN0=
|
||||
github.com/pion/sctp v1.7.11/go.mod h1:EhpTUQu1/lcK3xI+eriS6/96fWetHGCvBi9MSsnaBN0=
|
||||
github.com/pion/sctp v1.7.12/go.mod h1:xFe9cLMZ5Vj6eOzpyiKjT9SwGM4KpK/8Jbw5//jc+0s=
|
||||
github.com/pion/sctp v1.8.0/go.mod h1:xFe9cLMZ5Vj6eOzpyiKjT9SwGM4KpK/8Jbw5//jc+0s=
|
||||
github.com/pion/sctp v1.8.2 h1:yBBCIrUMJ4yFICL3RIvR4eh/H2BTTvlligmSTy+3kiA=
|
||||
github.com/pion/sctp v1.8.2/go.mod h1:xFe9cLMZ5Vj6eOzpyiKjT9SwGM4KpK/8Jbw5//jc+0s=
|
||||
github.com/pion/sctp v1.8.5/go.mod h1:SUFFfDpViyKejTAdwD1d/HQsCu+V/40cCs2nZIvC3s0=
|
||||
github.com/pion/sctp v1.8.7 h1:JnABvFakZueGAn4KU/4PSKg+GWbF6QWbKTWZOSGJjXw=
|
||||
github.com/pion/sctp v1.8.7/go.mod h1:g1Ul+ARqZq5JEmoFy87Q/4CePtKnTJ1QCL9dBBdN6AU=
|
||||
github.com/pion/sdp/v2 v2.4.0/go.mod h1:L2LxrOpSTJbAns244vfPChbciR/ReU1KWfG04OpkR7E=
|
||||
github.com/pion/sdp/v3 v3.0.4 h1:2Kf+dgrzJflNCSw3TV5v2VLeI0s/qkzy2r5jlR0wzf8=
|
||||
github.com/pion/sdp/v3 v3.0.4/go.mod h1:bNiSknmJE0HYBprTHXKPQ3+JjacTv5uap92ueJZKsRk=
|
||||
github.com/pion/sdp/v3 v3.0.6 h1:WuDLhtuFUUVpTfus9ILC4HRyHsW6TdugjEX/QY9OiUw=
|
||||
github.com/pion/sdp/v3 v3.0.6/go.mod h1:iiFWFpQO8Fy3S5ldclBkpXqmWy02ns78NOKoLLL0YQw=
|
||||
github.com/pion/srtp v1.5.1/go.mod h1:B+QgX5xPeQTNc1CJStJPHzOlHK66ViMDWTT0HZTCkcA=
|
||||
github.com/pion/srtp v1.5.2/go.mod h1:NiBff/MSxUwMUwx/fRNyD/xGE+dVvf8BOCeXhjCXZ9U=
|
||||
github.com/pion/srtp/v2 v2.0.1/go.mod h1:c8NWHhhkFf/drmHTAblkdu8++lsISEBBdAuiyxgqIsE=
|
||||
github.com/pion/srtp/v2 v2.0.2/go.mod h1:VEyLv4CuxrwGY8cxM+Ng3bmVy8ckz/1t6A0q/msKOw0=
|
||||
github.com/pion/srtp/v2 v2.0.5 h1:ks3wcTvIUE/GHndO3FAvROQ9opy0uLELpwHJaQ1yqhQ=
|
||||
github.com/pion/srtp/v2 v2.0.5/go.mod h1:8k6AJlal740mrZ6WYxc4Dg6qDqqhxoRG2GSjlUhDF0A=
|
||||
github.com/pion/stun v0.3.5 h1:uLUCBCkQby4S1cf6CGuR9QrVOKcvUwFeemaC865QHDg=
|
||||
github.com/pion/srtp/v2 v2.0.15 h1:+tqRtXGsGwHC0G0IUIAzRmdkHvriF79IHVfZGfHrQoA=
|
||||
github.com/pion/srtp/v2 v2.0.15/go.mod h1:b/pQOlDrbB0HEH5EUAQXzSYxikFbNcNuKmF8tM0hCtw=
|
||||
github.com/pion/stun v0.3.5/go.mod h1:gDMim+47EeEtfWogA37n6qXZS88L5V6LqFcf+DZA2UA=
|
||||
github.com/pion/stun v0.4.0/go.mod h1:QPsh1/SbXASntw3zkkrIk3ZJVKz4saBY2G7S10P3wCw=
|
||||
github.com/pion/stun v0.6.0 h1:JHT/2iyGDPrFWE8NNC15wnddBN8KifsEDw8swQmrEmU=
|
||||
github.com/pion/stun v0.6.0/go.mod h1:HPqcfoeqQn9cuaet7AOmB5e5xkObu9DwBdurwLKO9oA=
|
||||
github.com/pion/transport v0.6.0/go.mod h1:iWZ07doqOosSLMhZ+FXUTq+TamDoXSllxpbGcfkCmbE=
|
||||
github.com/pion/transport v0.8.10/go.mod h1:tBmha/UCjpum5hqTWhfAEs3CO4/tHSg0MYRhSzR+CZ8=
|
||||
github.com/pion/transport v0.10.0/go.mod h1:BnHnUipd0rZQyTVB2SBGojFHT9CBt5C5TcsJSQGkvSE=
|
||||
|
@ -1754,21 +1770,28 @@ github.com/pion/transport v0.10.1/go.mod h1:PBis1stIILMiis0PewDw91WJeLJkyIMcEk+D
|
|||
github.com/pion/transport v0.12.1/go.mod h1:N3+vZQD9HlDP5GWkZ85LohxNsDcNgofQmyL6ojX5d8Q=
|
||||
github.com/pion/transport v0.12.2/go.mod h1:N3+vZQD9HlDP5GWkZ85LohxNsDcNgofQmyL6ojX5d8Q=
|
||||
github.com/pion/transport v0.12.3/go.mod h1:OViWW9SP2peE/HbwBvARicmAVnesphkNkCVZIWJ6q9A=
|
||||
github.com/pion/transport v0.13.0 h1:KWTA5ZrQogizzYwPEciGtHPLwpAjE91FgXnyu+Hv2uY=
|
||||
github.com/pion/transport v0.13.0/go.mod h1:yxm9uXpK9bpBBWkITk13cLo1y5/ur5VQpG22ny6EP7g=
|
||||
github.com/pion/transport v0.14.1 h1:XSM6olwW+o8J4SCmOBb/BpwZypkHeyM0PGFCxNQBr40=
|
||||
github.com/pion/transport v0.14.1/go.mod h1:4tGmbk00NeYA3rUa9+n+dzCCoKkcy3YlYb99Jn2fNnI=
|
||||
github.com/pion/transport/v2 v2.0.0/go.mod h1:HS2MEBJTwD+1ZI2eSXSvHJx/HnzQqRy2/LXxt6eVMHc=
|
||||
github.com/pion/transport/v2 v2.1.0/go.mod h1:AdSw4YBZVDkZm8fpoz+fclXyQwANWmZAlDuQdctTThQ=
|
||||
github.com/pion/transport/v2 v2.2.0/go.mod h1:AdSw4YBZVDkZm8fpoz+fclXyQwANWmZAlDuQdctTThQ=
|
||||
github.com/pion/transport/v2 v2.2.1 h1:7qYnCBlpgSJNYMbLCKuSY9KbQdBFoETvPNETv0y4N7c=
|
||||
github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g=
|
||||
github.com/pion/turn/v2 v2.0.4/go.mod h1:1812p4DcGVbYVBTiraUmP50XoKye++AMkbfp+N27mog=
|
||||
github.com/pion/turn/v2 v2.0.5/go.mod h1:APg43CFyt/14Uy7heYUOGWdkem/Wu4PhCO/bjyrTqMw=
|
||||
github.com/pion/turn/v2 v2.0.6 h1:AsXjSPR6Im15DMTB39NlfdTY9BQfieANPBjdg/aVNwY=
|
||||
github.com/pion/turn/v2 v2.0.6/go.mod h1:+y7xl719J8bAEVpSXBXvTxStjJv3hbz9YFflvkpcGPw=
|
||||
github.com/pion/turn/v2 v2.1.0 h1:5wGHSgGhJhP/RpabkUb/T9PdsAjkGLS6toYz5HNzoSI=
|
||||
github.com/pion/turn/v2 v2.1.0/go.mod h1:yrT5XbXSGX1VFSF31A3c1kCNB5bBZgk/uu5LET162qs=
|
||||
github.com/pion/udp v0.1.0/go.mod h1:BPELIjbwE9PRbd/zxI/KYBnbo7B6+oA6YuEaNE8lths=
|
||||
github.com/pion/udp v0.1.1 h1:8UAPvyqmsxK8oOjloDk4wUt63TzFe9WEJkg5lChlj7o=
|
||||
github.com/pion/udp v0.1.1/go.mod h1:6AFo+CMdKQm7UiA0eUPA8/eVCTx8jBIITLZHc9DWX5M=
|
||||
github.com/pion/webrtc/v2 v2.2.26/go.mod h1:XMZbZRNHyPDe1gzTIHFcQu02283YO45CbiwFgKvXnmc=
|
||||
github.com/pion/webrtc/v3 v3.0.11/go.mod h1:WEvXneGTeqNmiR59v5jTsxMc4yXQyOQcRsrdAbNwSEU=
|
||||
github.com/pion/webrtc/v3 v3.0.27/go.mod h1:QpLDmsU5a/a05n230gRtxZRvfHhFzn9ukGUL2x4G5ic=
|
||||
github.com/pion/webrtc/v3 v3.0.32/go.mod h1:wX3V5dQQUGCifhT1mYftC2kCrDQX6ZJ3B7Yad0R9JK0=
|
||||
github.com/pion/webrtc/v3 v3.1.24-0.20220208053747-94262c1b2b38 h1:+IEql+S+YAj3S5e7Ftl/u4xPcZGG0WwLFsyFj6NRTz4=
|
||||
github.com/pion/webrtc/v3 v3.1.24-0.20220208053747-94262c1b2b38/go.mod h1:L5S/oAhL0Fzt/rnftVQRrP80/j5jygY7XRZzWwFx6P4=
|
||||
github.com/pion/webrtc/v3 v3.2.9 h1:U8NSjQDlZZ+Iy/hg42Q/u6mhEVSXYvKrOIZiZwYTfLc=
|
||||
github.com/pion/webrtc/v3 v3.2.9/go.mod h1:gjQLMZeyN3jXBGdxGmUYCyKjOuYX/c99BDjGqmadq0A=
|
||||
github.com/pkg/browser v0.0.0-20210706143420-7d21f8c997e2/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
|
||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
|
@ -1847,14 +1870,12 @@ github.com/prometheus/tsdb v0.10.0 h1:If5rVCMTp6W2SiRAQFlbpJNgVlgMEd+U2GZckwK38i
|
|||
github.com/prometheus/tsdb v0.10.0/go.mod h1:oi49uRhEe9dPUTlS3JRZOwJuVi6tmh10QSgwXEyGCt4=
|
||||
github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo=
|
||||
github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A=
|
||||
github.com/quic-go/qtls-go1-19 v0.3.3 h1:wznEHvJwd+2X3PqftRha0SUKmGsnb6dfArMhy9PeJVE=
|
||||
github.com/quic-go/qtls-go1-19 v0.3.3/go.mod h1:ySOI96ew8lnoKPtSqx2BlI5wCpUVPT05RMAlajtnyOI=
|
||||
github.com/quic-go/qtls-go1-20 v0.2.3 h1:m575dovXn1y2ATOb1XrRFcrv0F+EQmlowTkoraNkDPI=
|
||||
github.com/quic-go/qtls-go1-20 v0.2.3/go.mod h1:JKtK6mjbAVcUTN/9jZpvLbGxvdWIKS8uT7EiStoU1SM=
|
||||
github.com/quic-go/quic-go v0.36.4 h1:CXn/ZLN5Vntlk53fjR+kUMC8Jt7flfQe+I5Ty5A+k0o=
|
||||
github.com/quic-go/quic-go v0.36.4/go.mod h1:qxQumdeKw5GmWs1OsTZZnOxzSI+RJWuhf1O8FN35L2o=
|
||||
github.com/quic-go/webtransport-go v0.5.3 h1:5XMlzemqB4qmOlgIus5zB45AcZ2kCgCy2EptUrfOPWU=
|
||||
github.com/quic-go/webtransport-go v0.5.3/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU=
|
||||
github.com/quic-go/qtls-go1-20 v0.3.4 h1:MfFAPULvst4yoMgY9QmtpYmfij/em7O8UUi+bNVm7Cg=
|
||||
github.com/quic-go/qtls-go1-20 v0.3.4/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k=
|
||||
github.com/quic-go/quic-go v0.39.4 h1:PelfiuG7wXEffUT2yceiqz5V6Pc0TA5ruOd1LcmFc1s=
|
||||
github.com/quic-go/quic-go v0.39.4/go.mod h1:T09QsDQWjLiQ74ZmacDfqZmhY/NLnw5BC40MANNNZ1Q=
|
||||
github.com/quic-go/webtransport-go v0.6.0 h1:CvNsKqc4W2HljHJnoT+rMmbRJybShZ0YPFDD3NxaZLY=
|
||||
github.com/quic-go/webtransport-go v0.6.0/go.mod h1:9KjU4AEBqEQidGHNDkZrb8CAa1abRaosM2yGOyiikEc=
|
||||
github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk=
|
||||
github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
|
@ -2007,8 +2028,8 @@ github.com/status-im/mvds v0.0.27-0.20240111144448-92d364e4be82 h1:A7jtwOlDMUGUP
|
|||
github.com/status-im/mvds v0.0.27-0.20240111144448-92d364e4be82/go.mod h1:2fiAx0q9XYIPKYRq2B1oiO9zZESy/n4D32gWw6lMDsE=
|
||||
github.com/status-im/notify v1.0.2-status h1:x8wev0Sh8H8KAf4bVcv+L0dVHldBESOKUlqRqRY7uL8=
|
||||
github.com/status-im/notify v1.0.2-status/go.mod h1:gF3zSOrafR9DQEWSE8TjfI9NkooDxbyT4UgRGKZA0lc=
|
||||
github.com/status-im/rendezvous v1.3.7 h1:rZGWsFCjPV3MWeUkLkZSOGTAvyRf+rxx5hnEGLE4OHg=
|
||||
github.com/status-im/rendezvous v1.3.7/go.mod h1:r0vCbQJByTteMajN0f+Mcet/Vd7uAXxFPfewNpI2iXQ=
|
||||
github.com/status-im/rendezvous v1.3.8-0.20240110194857-cc5be22bf83e h1:pCOHeAYmYttXQBCn+6u01bs5d/W3XslxmplFhru4X1Y=
|
||||
github.com/status-im/rendezvous v1.3.8-0.20240110194857-cc5be22bf83e/go.mod h1:LEPENTHDBGCxXVZx6FEKNKN+tfPaIK+lmiGv1DxkJW4=
|
||||
github.com/status-im/resize v0.0.0-20201215164250-7c6d9f0d3088 h1:ClCAP2FPCvl8hGMhbUx/tq/sOu2wibztAa5jAvQEe4Q=
|
||||
github.com/status-im/resize v0.0.0-20201215164250-7c6d9f0d3088/go.mod h1:+92j1tN27DypDeBFxkg0uzkqfh1bNHTZe3Bv2PjvxpM=
|
||||
github.com/status-im/status-go/extkeys v1.1.2 h1:FSjARgDathJ3rIapJt851LsIXP9Oyuu2M2jPJKuzloU=
|
||||
|
@ -2028,8 +2049,9 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
|
|||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
|
@ -2043,8 +2065,12 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
|||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/syncthing/syncthing v0.14.48-rc.4/go.mod h1:nw3siZwHPA6M8iSfjDCWQ402eqvEIasMQOE8nFOxy7M=
|
||||
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
|
@ -2084,8 +2110,8 @@ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijb
|
|||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
|
||||
github.com/urfave/cli/v2 v2.24.4 h1:0gyJJEBYtCV87zI/x2nZCPyDxD51K6xM8SkwjHFCNEU=
|
||||
github.com/urfave/cli/v2 v2.24.4/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc=
|
||||
github.com/urfave/cli/v2 v2.27.2 h1:6e0H+AkS+zDckwPCUrZkKX38mRaau4nL2uipkJpbkcI=
|
||||
github.com/urfave/cli/v2 v2.27.2/go.mod h1:g0+79LmHHATl7DAcHO99smiR/T7uGLw84w8Y42x+4eM=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
|
||||
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
|
||||
|
@ -2099,12 +2125,12 @@ github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmF
|
|||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
||||
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
|
||||
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
|
||||
github.com/waku-org/go-discover v0.0.0-20240129014929-85f2c00b96a3 h1:Kk0KYXZE/uNnARF2TbCQyvyZ/w4SgF8VhquNdOVVsNU=
|
||||
github.com/waku-org/go-discover v0.0.0-20240129014929-85f2c00b96a3/go.mod h1:eBHgM6T4EG0RZzxpxKy+rGz/6Dw2Nd8DWxS0lm9ESDw=
|
||||
github.com/waku-org/go-libp2p-rendezvous v0.0.0-20230628220917-7b4e5ae4c0e7 h1:0e1h+p84yBp0IN7AqgbZlV7lgFBjm214lgSOE7CeJmE=
|
||||
github.com/waku-org/go-libp2p-rendezvous v0.0.0-20230628220917-7b4e5ae4c0e7/go.mod h1:pFvOZ9YTFsW0o5zJW7a0B5tr1owAijRWJctXJ2toL04=
|
||||
github.com/waku-org/go-waku v0.8.1-0.20240415131212-6d889ca3e2fe h1:rJF7qKODzvWx03iaLbYyjvA62crnCaDqIN661aqCQ8c=
|
||||
github.com/waku-org/go-waku v0.8.1-0.20240415131212-6d889ca3e2fe/go.mod h1:RjTvkTrIwpoT1cM9HeQqwa2Q7t7WOkb3hpuB/zuZ6SM=
|
||||
github.com/waku-org/go-discover v0.0.0-20240506173252-4912704efdc5 h1:4K3IS97JryAEV8pRXB//qPcg+8bPXl/O+AOLt3FeCKc=
|
||||
github.com/waku-org/go-discover v0.0.0-20240506173252-4912704efdc5/go.mod h1:eBHgM6T4EG0RZzxpxKy+rGz/6Dw2Nd8DWxS0lm9ESDw=
|
||||
github.com/waku-org/go-libp2p-rendezvous v0.0.0-20240110193335-a67d1cc760a0 h1:R4YYx2QamhBRl/moIxkDCNW+OP7AHbyWLBygDc/xIMo=
|
||||
github.com/waku-org/go-libp2p-rendezvous v0.0.0-20240110193335-a67d1cc760a0/go.mod h1:EhZP9fee0DYjKH/IOQvoNSy1tSHp2iZadsHGphcAJgY=
|
||||
github.com/waku-org/go-waku v0.8.1-0.20240507175626-19d27befd98b h1:2NR0UCjuuAFmnkhsvlCKn7PTs4JxUjSq4s7lSWaG0ek=
|
||||
github.com/waku-org/go-waku v0.8.1-0.20240507175626-19d27befd98b/go.mod h1:yXnWChXRKTb+NhALbFysluxgSwuxeTF2rhanDJkIx+k=
|
||||
github.com/waku-org/go-zerokit-rln v0.1.14-0.20240102145250-fa738c0bdf59 h1:jisj+OCI6QydLtFq3Pyhu49wl9ytPN7oAHjMfepHDrA=
|
||||
github.com/waku-org/go-zerokit-rln v0.1.14-0.20240102145250-fa738c0bdf59/go.mod h1:1PdBdPzyTaKt3VnpAHk3zj+r9dXPFOr3IHZP9nFle6E=
|
||||
github.com/waku-org/go-zerokit-rln-apple v0.0.0-20230916172309-ee0ee61dde2b h1:KgZVhsLkxsj5gb/FfndSCQu6VYwALrCOgYI3poR95yE=
|
||||
|
@ -2146,8 +2172,8 @@ github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQ
|
|||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
|
||||
github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 h1:+qGGcbkzsfDQNPPe9UDgpxAWQrhbbBXOYJFQDq/dtJw=
|
||||
github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913/go.mod h1:4aEEwZQutDLsQv2Deui4iYQ6DWTxR14g6m8Wv88+Xqk=
|
||||
github.com/yeqown/go-qrcode/v2 v2.2.1 h1:Jc1Q916fwC05R8C7mpWDbrT9tyLPaLLKDABoC5XBCe8=
|
||||
github.com/yeqown/go-qrcode/v2 v2.2.1/go.mod h1:2Qsk2APUCPne0TsRo40DIkI5MYnbzYKCnKGEFWrxd24=
|
||||
github.com/yeqown/go-qrcode/writer/standard v1.2.1 h1:FMRZiur5yApUIe4fqtqmcdl/XQTZAZWt2DhkPx4VIW0=
|
||||
|
@ -2226,14 +2252,16 @@ go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
|||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/dig v1.17.0 h1:5Chju+tUvcC+N7N6EV08BJz41UZuO3BmHcN4A287ZLI=
|
||||
go.uber.org/dig v1.17.0/go.mod h1:rTxpf7l5I0eBTlE6/9RL+lDybC7WFwY2QH55ZSjy1mU=
|
||||
go.uber.org/fx v1.20.0 h1:ZMC/pnRvhsthOZh9MZjMq5U8Or3mA9zBSPaLnzs3ihQ=
|
||||
go.uber.org/fx v1.20.0/go.mod h1:qCUj0btiR3/JnanEr1TYEePfSw6o/4qYJscgvzQ5Ub0=
|
||||
go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc=
|
||||
go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
|
||||
go.uber.org/fx v1.20.1 h1:zVwVQGS8zYvhh9Xxcu4w1M6ESyeMzebzj2NbSayZ4Mk=
|
||||
go.uber.org/fx v1.20.1/go.mod h1:iSYNbHf2y55acNCwCXKx7LbWb5WG1Bnue5RDXz1OREg=
|
||||
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
|
||||
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/mock v0.3.0 h1:3mUxI1No2/60yUYax92Pt8eNOEecx2D3lcXZh2NEZJo=
|
||||
go.uber.org/mock v0.3.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
||||
|
@ -2246,8 +2274,8 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
|||
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
|
||||
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
|
||||
go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
|
||||
go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
|
||||
go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
|
||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
|
||||
golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
|
@ -2295,8 +2323,10 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
|
|||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk=
|
||||
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
|
||||
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
|
||||
golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
|
||||
golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc=
|
||||
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
|
@ -2311,8 +2341,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
|||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 h1:MGwJjxBy0HJshjDNfLsYO8xppfqWlA5ZT9OhtUUhTNw=
|
||||
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
|
||||
golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI=
|
||||
golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo=
|
||||
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
|
@ -2350,8 +2380,9 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
|
||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY=
|
||||
golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180524181706-dfa909b99c79/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -2441,9 +2472,15 @@ golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su
|
|||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
||||
golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
|
||||
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
||||
golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
|
||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/oauth2 v0.0.0-20180227000427-d7d64896b5ff/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
|
@ -2479,8 +2516,9 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ=
|
||||
golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sys v0.0.0-20180224232135-f6cff0780e54/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
@ -2636,9 +2674,14 @@ golang.org/x/sys v0.0.0-20220317061510-51cd9980dadf/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
|
||||
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
|
@ -2646,9 +2689,15 @@ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9sn
|
|||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
|
||||
golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0=
|
||||
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
|
||||
golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
||||
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE=
|
||||
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -2658,9 +2707,14 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
|
||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
@ -2760,8 +2814,9 @@ golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
|||
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.12.1-0.20230818130535-1517d1a3ba60 h1:o4bs4seAAlSiZQAZbO6/RP5XBCZCooQS3Pgc0AUjWts=
|
||||
golang.org/x/tools v0.12.1-0.20230818130535-1517d1a3ba60/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc=
|
||||
golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg=
|
||||
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
@ -2769,8 +2824,6 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T
|
|||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f h1:uF6paiQQebLeSXkrTqHqz0MXhXXS1KgF41eUdBNvxK0=
|
||||
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
|
||||
gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
|
||||
gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU=
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
"go.uber.org/zap"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/store"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/legacy_store"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
|
@ -380,10 +380,10 @@ func (s *MessengerStoreNodeRequestSuite) ensureStoreNodeEnvelopes(contentTopic *
|
|||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Directly ensure profile is available on store node
|
||||
queryOptions := []store.HistoryRequestOption{
|
||||
store.WithLocalQuery(),
|
||||
queryOptions := []legacy_store.HistoryRequestOption{
|
||||
legacy_store.WithLocalQuery(),
|
||||
}
|
||||
query := store.Query{
|
||||
query := legacy_store.Query{
|
||||
PubsubTopic: "",
|
||||
ContentTopics: []string{contentTopic.ContentTopic()},
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@ func (m Mailserver) IDBytes() ([]byte, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []byte(id.Pretty()), err
|
||||
return []byte(id.String()), err
|
||||
}
|
||||
|
||||
node, err := enode.ParseV4(m.Address)
|
||||
|
|
|
@ -62,7 +62,7 @@ func (c *Client) PushReceivedMessages(filter transport.Filter, sshMessage *types
|
|||
func (c *Client) PushReceivedEnvelope(envelope *v2protocol.Envelope) {
|
||||
url := fmt.Sprintf("%s/received-envelope", c.serverURL)
|
||||
postBody := map[string]interface{}{
|
||||
"messageHash": types.EncodeHex(envelope.Hash()),
|
||||
"messageHash": envelope.Hash().String(),
|
||||
"sentAt": uint32(envelope.Message().GetTimestamp() / int64(time.Second)),
|
||||
"pubsubTopic": envelope.PubsubTopic(),
|
||||
"topic": envelope.Message().ContentTopic,
|
||||
|
|
|
@ -91,7 +91,7 @@ const (
|
|||
// UnmarshalText method. See the Unmarshaler example for a demonstration with
|
||||
// email addresses.
|
||||
//
|
||||
// ### Key mapping
|
||||
// # Key mapping
|
||||
//
|
||||
// TOML keys can map to either keys in a Go map or field names in a Go struct.
|
||||
// The special `toml` struct tag can be used to map TOML keys to struct fields
|
||||
|
@ -248,7 +248,7 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
|||
case reflect.Bool:
|
||||
return md.unifyBool(data, rv)
|
||||
case reflect.Interface:
|
||||
if rv.NumMethod() > 0 { // Only support empty interfaces are supported.
|
||||
if rv.NumMethod() > 0 { /// Only empty interfaces are supported.
|
||||
return md.e("unsupported type %s", rv.Type())
|
||||
}
|
||||
return md.unifyAnything(data, rv)
|
||||
|
|
|
@ -5,17 +5,25 @@ import (
|
|||
"io"
|
||||
)
|
||||
|
||||
// TextMarshaler is an alias for encoding.TextMarshaler.
|
||||
//
|
||||
// Deprecated: use encoding.TextMarshaler
|
||||
type TextMarshaler encoding.TextMarshaler
|
||||
|
||||
// TextUnmarshaler is an alias for encoding.TextUnmarshaler.
|
||||
//
|
||||
// Deprecated: use encoding.TextUnmarshaler
|
||||
type TextUnmarshaler encoding.TextUnmarshaler
|
||||
|
||||
// PrimitiveDecode is an alias for MetaData.PrimitiveDecode().
|
||||
//
|
||||
// Deprecated: use MetaData.PrimitiveDecode.
|
||||
func PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
md := MetaData{decoded: make(map[string]struct{})}
|
||||
return md.unify(primValue.undecoded, rvalue(v))
|
||||
}
|
||||
|
||||
// DecodeReader is an alias for NewDecoder(r).Decode(v).
|
||||
//
|
||||
// Deprecated: use NewDecoder(reader).Decode(&value).
|
||||
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { return NewDecoder(r).Decode(v) }
|
||||
|
|
|
@ -136,7 +136,8 @@ func NewEncoder(w io.Writer) *Encoder {
|
|||
// document.
|
||||
func (enc *Encoder) Encode(v interface{}) error {
|
||||
rv := eindirect(reflect.ValueOf(v))
|
||||
if err := enc.safeEncode(Key([]string{}), rv); err != nil {
|
||||
err := enc.safeEncode(Key([]string{}), rv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return enc.w.Flush()
|
||||
|
@ -457,6 +458,16 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
|||
|
||||
frv := eindirect(rv.Field(i))
|
||||
|
||||
if is32Bit {
|
||||
// Copy so it works correct on 32bit archs; not clear why this
|
||||
// is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4
|
||||
// This also works fine on 64bit, but 32bit archs are somewhat
|
||||
// rare and this is a wee bit faster.
|
||||
copyStart := make([]int, len(start))
|
||||
copy(copyStart, start)
|
||||
start = copyStart
|
||||
}
|
||||
|
||||
// Treat anonymous struct fields with tag names as though they are
|
||||
// not anonymous, like encoding/json does.
|
||||
//
|
||||
|
@ -470,44 +481,37 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
|||
|
||||
if typeIsTable(tomlTypeOfGo(frv)) {
|
||||
fieldsSub = append(fieldsSub, append(start, f.Index...))
|
||||
} else {
|
||||
// Copy so it works correct on 32bit archs; not clear why this
|
||||
// is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4
|
||||
// This also works fine on 64bit, but 32bit archs are somewhat
|
||||
// rare and this is a wee bit faster.
|
||||
if is32Bit {
|
||||
copyStart := make([]int, len(start))
|
||||
copy(copyStart, start)
|
||||
fieldsDirect = append(fieldsDirect, append(copyStart, f.Index...))
|
||||
} else {
|
||||
fieldsDirect = append(fieldsDirect, append(start, f.Index...))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
addFields(rt, rv, nil)
|
||||
|
||||
writeFields := func(fields [][]int) {
|
||||
for _, fieldIndex := range fields {
|
||||
fieldType := rt.FieldByIndex(fieldIndex)
|
||||
fieldVal := eindirect(rv.FieldByIndex(fieldIndex))
|
||||
|
||||
if isNil(fieldVal) { /// Don't write anything for nil fields.
|
||||
continue
|
||||
}
|
||||
fieldVal := rv.FieldByIndex(fieldIndex)
|
||||
|
||||
opts := getOptions(fieldType.Tag)
|
||||
if opts.skip {
|
||||
continue
|
||||
}
|
||||
if opts.omitempty && isEmpty(fieldVal) {
|
||||
continue
|
||||
}
|
||||
|
||||
fieldVal = eindirect(fieldVal)
|
||||
|
||||
if isNil(fieldVal) { /// Don't write anything for nil fields.
|
||||
continue
|
||||
}
|
||||
|
||||
keyName := fieldType.Name
|
||||
if opts.name != "" {
|
||||
keyName = opts.name
|
||||
}
|
||||
|
||||
if opts.omitempty && enc.isEmpty(fieldVal) {
|
||||
continue
|
||||
}
|
||||
if opts.omitzero && isZero(fieldVal) {
|
||||
continue
|
||||
}
|
||||
|
@ -649,7 +653,7 @@ func isZero(rv reflect.Value) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
func (enc *Encoder) isEmpty(rv reflect.Value) bool {
|
||||
func isEmpty(rv reflect.Value) bool {
|
||||
switch rv.Kind() {
|
||||
case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
|
||||
return rv.Len() == 0
|
||||
|
@ -664,13 +668,15 @@ func (enc *Encoder) isEmpty(rv reflect.Value) bool {
|
|||
// type b struct{ s []string }
|
||||
// s := a{field: b{s: []string{"AAA"}}}
|
||||
for i := 0; i < rv.NumField(); i++ {
|
||||
if !enc.isEmpty(rv.Field(i)) {
|
||||
if !isEmpty(rv.Field(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case reflect.Bool:
|
||||
return !rv.Bool()
|
||||
case reflect.Ptr:
|
||||
return rv.IsNil()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
@ -693,8 +699,11 @@ func (enc *Encoder) newline() {
|
|||
// v v v v vv
|
||||
// key = {k = 1, k2 = 2}
|
||||
func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) {
|
||||
/// Marshaler used on top-level document; call eElement() to just call
|
||||
/// Marshal{TOML,Text}.
|
||||
if len(key) == 0 {
|
||||
encPanic(errNoKey)
|
||||
enc.eElement(val)
|
||||
return
|
||||
}
|
||||
enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
|
||||
enc.eElement(val)
|
||||
|
|
|
@ -84,7 +84,7 @@ func (pe ParseError) Error() string {
|
|||
pe.Position.Line, pe.LastKey, msg)
|
||||
}
|
||||
|
||||
// ErrorWithUsage() returns the error with detailed location context.
|
||||
// ErrorWithPosition returns the error with detailed location context.
|
||||
//
|
||||
// See the documentation on [ParseError].
|
||||
func (pe ParseError) ErrorWithPosition() string {
|
||||
|
@ -124,7 +124,7 @@ func (pe ParseError) ErrorWithPosition() string {
|
|||
return b.String()
|
||||
}
|
||||
|
||||
// ErrorWithUsage() returns the error with detailed location context and usage
|
||||
// ErrorWithUsage returns the error with detailed location context and usage
|
||||
// guidance.
|
||||
//
|
||||
// See the documentation on [ParseError].
|
||||
|
|
|
@ -52,6 +52,7 @@ type lexer struct {
|
|||
line int
|
||||
state stateFn
|
||||
items chan item
|
||||
tomlNext bool
|
||||
|
||||
// Allow for backing up up to 4 runes. This is necessary because TOML
|
||||
// contains 3-rune tokens (""" and ''').
|
||||
|
@ -87,13 +88,14 @@ func (lx *lexer) nextItem() item {
|
|||
}
|
||||
}
|
||||
|
||||
func lex(input string) *lexer {
|
||||
func lex(input string, tomlNext bool) *lexer {
|
||||
lx := &lexer{
|
||||
input: input,
|
||||
state: lexTop,
|
||||
items: make(chan item, 10),
|
||||
stack: make([]stateFn, 0, 10),
|
||||
line: 1,
|
||||
tomlNext: tomlNext,
|
||||
}
|
||||
return lx
|
||||
}
|
||||
|
@ -408,7 +410,7 @@ func lexTableNameEnd(lx *lexer) stateFn {
|
|||
// Lexes only one part, e.g. only 'a' inside 'a.b'.
|
||||
func lexBareName(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isBareKeyChar(r) {
|
||||
if isBareKeyChar(r, lx.tomlNext) {
|
||||
return lexBareName
|
||||
}
|
||||
lx.backup()
|
||||
|
@ -618,6 +620,9 @@ func lexInlineTableValue(lx *lexer) stateFn {
|
|||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexInlineTableValue)
|
||||
case isNL(r):
|
||||
if lx.tomlNext {
|
||||
return lexSkip(lx, lexInlineTableValue)
|
||||
}
|
||||
return lx.errorPrevLine(errLexInlineTableNL{})
|
||||
case r == '#':
|
||||
lx.push(lexInlineTableValue)
|
||||
|
@ -640,6 +645,9 @@ func lexInlineTableValueEnd(lx *lexer) stateFn {
|
|||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexInlineTableValueEnd)
|
||||
case isNL(r):
|
||||
if lx.tomlNext {
|
||||
return lexSkip(lx, lexInlineTableValueEnd)
|
||||
}
|
||||
return lx.errorPrevLine(errLexInlineTableNL{})
|
||||
case r == '#':
|
||||
lx.push(lexInlineTableValueEnd)
|
||||
|
@ -648,6 +656,9 @@ func lexInlineTableValueEnd(lx *lexer) stateFn {
|
|||
lx.ignore()
|
||||
lx.skip(isWhitespace)
|
||||
if lx.peek() == '}' {
|
||||
if lx.tomlNext {
|
||||
return lexInlineTableValueEnd
|
||||
}
|
||||
return lx.errorf("trailing comma not allowed in inline tables")
|
||||
}
|
||||
return lexInlineTableValue
|
||||
|
@ -770,8 +781,8 @@ func lexRawString(lx *lexer) stateFn {
|
|||
}
|
||||
}
|
||||
|
||||
// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
|
||||
// a string. It assumes that the beginning ''' has already been consumed and
|
||||
// lexMultilineRawString consumes a raw string. Nothing can be escaped in such a
|
||||
// string. It assumes that the beginning triple-' has already been consumed and
|
||||
// ignored.
|
||||
func lexMultilineRawString(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
|
@ -828,6 +839,11 @@ func lexMultilineStringEscape(lx *lexer) stateFn {
|
|||
func lexStringEscape(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch r {
|
||||
case 'e':
|
||||
if !lx.tomlNext {
|
||||
return lx.error(errLexEscape{r})
|
||||
}
|
||||
fallthrough
|
||||
case 'b':
|
||||
fallthrough
|
||||
case 't':
|
||||
|
@ -846,6 +862,11 @@ func lexStringEscape(lx *lexer) stateFn {
|
|||
fallthrough
|
||||
case '\\':
|
||||
return lx.pop()
|
||||
case 'x':
|
||||
if !lx.tomlNext {
|
||||
return lx.error(errLexEscape{r})
|
||||
}
|
||||
return lexHexEscape
|
||||
case 'u':
|
||||
return lexShortUnicodeEscape
|
||||
case 'U':
|
||||
|
@ -854,6 +875,19 @@ func lexStringEscape(lx *lexer) stateFn {
|
|||
return lx.error(errLexEscape{r})
|
||||
}
|
||||
|
||||
func lexHexEscape(lx *lexer) stateFn {
|
||||
var r rune
|
||||
for i := 0; i < 2; i++ {
|
||||
r = lx.next()
|
||||
if !isHexadecimal(r) {
|
||||
return lx.errorf(
|
||||
`expected two hexadecimal digits after '\x', but got %q instead`,
|
||||
lx.current())
|
||||
}
|
||||
}
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
func lexShortUnicodeEscape(lx *lexer) stateFn {
|
||||
var r rune
|
||||
for i := 0; i < 4; i++ {
|
||||
|
@ -1225,7 +1259,23 @@ func isOctal(r rune) bool { return r >= '0' && r <= '7' }
|
|||
func isHexadecimal(r rune) bool {
|
||||
return (r >= '0' && r <= '9') || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F')
|
||||
}
|
||||
func isBareKeyChar(r rune) bool {
|
||||
|
||||
func isBareKeyChar(r rune, tomlNext bool) bool {
|
||||
if tomlNext {
|
||||
return (r >= 'A' && r <= 'Z') ||
|
||||
(r >= 'a' && r <= 'z') ||
|
||||
(r >= '0' && r <= '9') ||
|
||||
r == '_' || r == '-' ||
|
||||
r == 0xb2 || r == 0xb3 || r == 0xb9 || (r >= 0xbc && r <= 0xbe) ||
|
||||
(r >= 0xc0 && r <= 0xd6) || (r >= 0xd8 && r <= 0xf6) || (r >= 0xf8 && r <= 0x037d) ||
|
||||
(r >= 0x037f && r <= 0x1fff) ||
|
||||
(r >= 0x200c && r <= 0x200d) || (r >= 0x203f && r <= 0x2040) ||
|
||||
(r >= 0x2070 && r <= 0x218f) || (r >= 0x2460 && r <= 0x24ff) ||
|
||||
(r >= 0x2c00 && r <= 0x2fef) || (r >= 0x3001 && r <= 0xd7ff) ||
|
||||
(r >= 0xf900 && r <= 0xfdcf) || (r >= 0xfdf0 && r <= 0xfffd) ||
|
||||
(r >= 0x10000 && r <= 0xeffff)
|
||||
}
|
||||
|
||||
return (r >= 'A' && r <= 'Z') ||
|
||||
(r >= 'a' && r <= 'z') ||
|
||||
(r >= '0' && r <= '9') ||
|
||||
|
|
|
@ -106,7 +106,7 @@ func (k Key) maybeQuoted(i int) string {
|
|||
return `""`
|
||||
}
|
||||
for _, c := range k[i] {
|
||||
if !isBareKeyChar(c) {
|
||||
if !isBareKeyChar(c, false) {
|
||||
return `"` + dblQuotedReplacer.Replace(k[i]) + `"`
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package toml
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -15,6 +16,7 @@ type parser struct {
|
|||
context Key // Full key for the current hash in scope.
|
||||
currentKey string // Base key name for everything except hashes.
|
||||
pos Position // Current position in the TOML file.
|
||||
tomlNext bool
|
||||
|
||||
ordered []Key // List of keys in the order that they appear in the TOML data.
|
||||
|
||||
|
@ -29,6 +31,8 @@ type keyInfo struct {
|
|||
}
|
||||
|
||||
func parse(data string) (p *parser, err error) {
|
||||
_, tomlNext := os.LookupEnv("BURNTSUSHI_TOML_110")
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
if pErr, ok := r.(ParseError); ok {
|
||||
|
@ -41,9 +45,12 @@ func parse(data string) (p *parser, err error) {
|
|||
}()
|
||||
|
||||
// Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString()
|
||||
// which mangles stuff.
|
||||
if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") {
|
||||
// which mangles stuff. UTF-16 BOM isn't strictly valid, but some tools add
|
||||
// it anyway.
|
||||
if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // UTF-16
|
||||
data = data[2:]
|
||||
} else if strings.HasPrefix(data, "\xef\xbb\xbf") { // UTF-8
|
||||
data = data[3:]
|
||||
}
|
||||
|
||||
// Examine first few bytes for NULL bytes; this probably means it's a UTF-16
|
||||
|
@ -65,9 +72,10 @@ func parse(data string) (p *parser, err error) {
|
|||
p = &parser{
|
||||
keyInfo: make(map[string]keyInfo),
|
||||
mapping: make(map[string]interface{}),
|
||||
lx: lex(data),
|
||||
lx: lex(data, tomlNext),
|
||||
ordered: make([]Key, 0),
|
||||
implicits: make(map[string]struct{}),
|
||||
tomlNext: tomlNext,
|
||||
}
|
||||
for {
|
||||
item := p.next()
|
||||
|
@ -194,12 +202,12 @@ func (p *parser) topLevel(item item) {
|
|||
for i := range context {
|
||||
p.addImplicitContext(append(p.context, context[i:i+1]...))
|
||||
}
|
||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
||||
|
||||
/// Set value.
|
||||
vItem := p.next()
|
||||
val, typ := p.value(vItem, false)
|
||||
p.set(p.currentKey, val, typ, vItem.pos)
|
||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
||||
|
||||
/// Remove the context we added (preserving any context from [tbl] lines).
|
||||
p.context = outerContext
|
||||
|
@ -236,7 +244,7 @@ func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) {
|
|||
case itemString:
|
||||
return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it)
|
||||
case itemMultilineString:
|
||||
return p.replaceEscapes(it, stripFirstNewline(p.stripEscapedNewlines(it.val))), p.typeOfPrimitive(it)
|
||||
return p.replaceEscapes(it, p.stripEscapedNewlines(stripFirstNewline(it.val))), p.typeOfPrimitive(it)
|
||||
case itemRawString:
|
||||
return it.val, p.typeOfPrimitive(it)
|
||||
case itemRawMultilineString:
|
||||
|
@ -331,11 +339,17 @@ func (p *parser) valueFloat(it item) (interface{}, tomlType) {
|
|||
var dtTypes = []struct {
|
||||
fmt string
|
||||
zone *time.Location
|
||||
next bool
|
||||
}{
|
||||
{time.RFC3339Nano, time.Local},
|
||||
{"2006-01-02T15:04:05.999999999", internal.LocalDatetime},
|
||||
{"2006-01-02", internal.LocalDate},
|
||||
{"15:04:05.999999999", internal.LocalTime},
|
||||
{time.RFC3339Nano, time.Local, false},
|
||||
{"2006-01-02T15:04:05.999999999", internal.LocalDatetime, false},
|
||||
{"2006-01-02", internal.LocalDate, false},
|
||||
{"15:04:05.999999999", internal.LocalTime, false},
|
||||
|
||||
// tomlNext
|
||||
{"2006-01-02T15:04Z07:00", time.Local, true},
|
||||
{"2006-01-02T15:04", internal.LocalDatetime, true},
|
||||
{"15:04", internal.LocalTime, true},
|
||||
}
|
||||
|
||||
func (p *parser) valueDatetime(it item) (interface{}, tomlType) {
|
||||
|
@ -346,6 +360,9 @@ func (p *parser) valueDatetime(it item) (interface{}, tomlType) {
|
|||
err error
|
||||
)
|
||||
for _, dt := range dtTypes {
|
||||
if dt.next && !p.tomlNext {
|
||||
continue
|
||||
}
|
||||
t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone)
|
||||
if err == nil {
|
||||
ok = true
|
||||
|
@ -384,6 +401,7 @@ func (p *parser) valueArray(it item) (interface{}, tomlType) {
|
|||
//
|
||||
// Not entirely sure how to best store this; could use "key[0]",
|
||||
// "key[1]" notation, or maybe store it on the Array type?
|
||||
_ = types
|
||||
}
|
||||
return array, tomlArray
|
||||
}
|
||||
|
@ -426,11 +444,11 @@ func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tom
|
|||
for i := range context {
|
||||
p.addImplicitContext(append(p.context, context[i:i+1]...))
|
||||
}
|
||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
||||
|
||||
/// Set the value.
|
||||
val, typ := p.value(p.next(), false)
|
||||
p.set(p.currentKey, val, typ, it.pos)
|
||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
||||
hash[p.currentKey] = val
|
||||
|
||||
/// Restore context.
|
||||
|
@ -551,7 +569,6 @@ func (p *parser) addContext(key Key, array bool) {
|
|||
func (p *parser) set(key string, val interface{}, typ tomlType, pos Position) {
|
||||
p.setValue(key, val)
|
||||
p.setType(key, typ, pos)
|
||||
|
||||
}
|
||||
|
||||
// setValue sets the given key to the given value in the current context.
|
||||
|
@ -636,10 +653,7 @@ func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}
|
|||
func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) }
|
||||
func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok }
|
||||
func (p *parser) isArray(key Key) bool { return p.keyInfo[key.String()].tomlType == tomlArray }
|
||||
func (p *parser) addImplicitContext(key Key) {
|
||||
p.addImplicit(key)
|
||||
p.addContext(key, false)
|
||||
}
|
||||
func (p *parser) addImplicitContext(key Key) { p.addImplicit(key); p.addContext(key, false) }
|
||||
|
||||
// current returns the full key name of the current context.
|
||||
func (p *parser) current() string {
|
||||
|
@ -662,49 +676,54 @@ func stripFirstNewline(s string) string {
|
|||
return s
|
||||
}
|
||||
|
||||
// Remove newlines inside triple-quoted strings if a line ends with "\".
|
||||
// stripEscapedNewlines removes whitespace after line-ending backslashes in
|
||||
// multiline strings.
|
||||
//
|
||||
// A line-ending backslash is an unescaped \ followed only by whitespace until
|
||||
// the next newline. After a line-ending backslash, all whitespace is removed
|
||||
// until the next non-whitespace character.
|
||||
func (p *parser) stripEscapedNewlines(s string) string {
|
||||
split := strings.Split(s, "\n")
|
||||
if len(split) < 1 {
|
||||
return s
|
||||
var b strings.Builder
|
||||
var i int
|
||||
for {
|
||||
ix := strings.Index(s[i:], `\`)
|
||||
if ix < 0 {
|
||||
b.WriteString(s)
|
||||
return b.String()
|
||||
}
|
||||
i += ix
|
||||
|
||||
escNL := false // Keep track of the last non-blank line was escaped.
|
||||
for i, line := range split {
|
||||
line = strings.TrimRight(line, " \t\r")
|
||||
|
||||
if len(line) == 0 || line[len(line)-1] != '\\' {
|
||||
split[i] = strings.TrimRight(split[i], "\r")
|
||||
if !escNL && i != len(split)-1 {
|
||||
split[i] += "\n"
|
||||
}
|
||||
if len(s) > i+1 && s[i+1] == '\\' {
|
||||
// Escaped backslash.
|
||||
i += 2
|
||||
continue
|
||||
}
|
||||
|
||||
escBS := true
|
||||
for j := len(line) - 1; j >= 0 && line[j] == '\\'; j-- {
|
||||
escBS = !escBS
|
||||
// Scan until the next non-whitespace.
|
||||
j := i + 1
|
||||
whitespaceLoop:
|
||||
for ; j < len(s); j++ {
|
||||
switch s[j] {
|
||||
case ' ', '\t', '\r', '\n':
|
||||
default:
|
||||
break whitespaceLoop
|
||||
}
|
||||
if escNL {
|
||||
line = strings.TrimLeft(line, " \t\r")
|
||||
}
|
||||
escNL = !escBS
|
||||
|
||||
if escBS {
|
||||
split[i] += "\n"
|
||||
if j == i+1 {
|
||||
// Not a whitespace escape.
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
if i == len(split)-1 {
|
||||
p.panicf("invalid escape: '\\ '")
|
||||
if !strings.Contains(s[i:j], "\n") {
|
||||
// This is not a line-ending backslash.
|
||||
// (It's a bad escape sequence, but we can let
|
||||
// replaceEscapes catch it.)
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
split[i] = line[:len(line)-1] // Remove \
|
||||
if len(split)-1 > i {
|
||||
split[i+1] = strings.TrimLeft(split[i+1], " \t\r")
|
||||
b.WriteString(s[:i])
|
||||
s = s[j:]
|
||||
i = 0
|
||||
}
|
||||
}
|
||||
return strings.Join(split, "")
|
||||
}
|
||||
|
||||
func (p *parser) replaceEscapes(it item, str string) string {
|
||||
|
@ -743,12 +762,23 @@ func (p *parser) replaceEscapes(it item, str string) string {
|
|||
case 'r':
|
||||
replaced = append(replaced, rune(0x000D))
|
||||
r += 1
|
||||
case 'e':
|
||||
if p.tomlNext {
|
||||
replaced = append(replaced, rune(0x001B))
|
||||
r += 1
|
||||
}
|
||||
case '"':
|
||||
replaced = append(replaced, rune(0x0022))
|
||||
r += 1
|
||||
case '\\':
|
||||
replaced = append(replaced, rune(0x005C))
|
||||
r += 1
|
||||
case 'x':
|
||||
if p.tomlNext {
|
||||
escaped := p.asciiEscapeToUnicode(it, s[r+1:r+3])
|
||||
replaced = append(replaced, escaped)
|
||||
r += 3
|
||||
}
|
||||
case 'u':
|
||||
// At this point, we know we have a Unicode escape of the form
|
||||
// `uXXXX` at [r, r+5). (Because the lexer guarantees this
|
||||
|
|
|
@ -9,6 +9,8 @@ func Render(doc []byte) []byte {
|
|||
renderer := NewRoffRenderer()
|
||||
|
||||
return blackfriday.Run(doc,
|
||||
[]blackfriday.Option{blackfriday.WithRenderer(renderer),
|
||||
blackfriday.WithExtensions(renderer.GetExtensions())}...)
|
||||
[]blackfriday.Option{
|
||||
blackfriday.WithRenderer(renderer),
|
||||
blackfriday.WithExtensions(renderer.GetExtensions()),
|
||||
}...)
|
||||
}
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package md2man
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
@ -34,10 +36,10 @@ const (
|
|||
hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n"
|
||||
linkTag = "\n\\[la]"
|
||||
linkCloseTag = "\\[ra]"
|
||||
codespanTag = "\\fB\\fC"
|
||||
codespanTag = "\\fB"
|
||||
codespanCloseTag = "\\fR"
|
||||
codeTag = "\n.PP\n.RS\n\n.nf\n"
|
||||
codeCloseTag = "\n.fi\n.RE\n"
|
||||
codeTag = "\n.EX\n"
|
||||
codeCloseTag = ".EE\n" // Do not prepend a newline character since code blocks, by definition, include a newline already (or at least as how blackfriday gives us on).
|
||||
quoteTag = "\n.PP\n.RS\n"
|
||||
quoteCloseTag = "\n.RE\n"
|
||||
listTag = "\n.RS\n"
|
||||
|
@ -48,6 +50,7 @@ const (
|
|||
tableEnd = ".TE\n"
|
||||
tableCellStart = "T{\n"
|
||||
tableCellEnd = "\nT}\n"
|
||||
tablePreprocessor = `'\" t`
|
||||
)
|
||||
|
||||
// NewRoffRenderer creates a new blackfriday Renderer for generating roff documents
|
||||
|
@ -74,6 +77,16 @@ func (r *roffRenderer) GetExtensions() blackfriday.Extensions {
|
|||
|
||||
// RenderHeader handles outputting the header at document start
|
||||
func (r *roffRenderer) RenderHeader(w io.Writer, ast *blackfriday.Node) {
|
||||
// We need to walk the tree to check if there are any tables.
|
||||
// If there are, we need to enable the roff table preprocessor.
|
||||
ast.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
|
||||
if node.Type == blackfriday.Table {
|
||||
out(w, tablePreprocessor+"\n")
|
||||
return blackfriday.Terminate
|
||||
}
|
||||
return blackfriday.GoToNext
|
||||
})
|
||||
|
||||
// disable hyphenation
|
||||
out(w, ".nh\n")
|
||||
}
|
||||
|
@ -86,8 +99,7 @@ func (r *roffRenderer) RenderFooter(w io.Writer, ast *blackfriday.Node) {
|
|||
// RenderNode is called for each node in a markdown document; based on the node
|
||||
// type the equivalent roff output is sent to the writer
|
||||
func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
|
||||
|
||||
var walkAction = blackfriday.GoToNext
|
||||
walkAction := blackfriday.GoToNext
|
||||
|
||||
switch node.Type {
|
||||
case blackfriday.Text:
|
||||
|
@ -109,9 +121,16 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering
|
|||
out(w, strongCloseTag)
|
||||
}
|
||||
case blackfriday.Link:
|
||||
if !entering {
|
||||
out(w, linkTag+string(node.LinkData.Destination)+linkCloseTag)
|
||||
// Don't render the link text for automatic links, because this
|
||||
// will only duplicate the URL in the roff output.
|
||||
// See https://daringfireball.net/projects/markdown/syntax#autolink
|
||||
if !bytes.Equal(node.LinkData.Destination, node.FirstChild.Literal) {
|
||||
out(w, string(node.FirstChild.Literal))
|
||||
}
|
||||
// Hyphens in a link must be escaped to avoid word-wrap in the rendered man page.
|
||||
escapedLink := strings.ReplaceAll(string(node.LinkData.Destination), "-", "\\-")
|
||||
out(w, linkTag+escapedLink+linkCloseTag)
|
||||
walkAction = blackfriday.SkipChildren
|
||||
case blackfriday.Image:
|
||||
// ignore images
|
||||
walkAction = blackfriday.SkipChildren
|
||||
|
@ -160,6 +179,11 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering
|
|||
r.handleTableCell(w, node, entering)
|
||||
case blackfriday.HTMLSpan:
|
||||
// ignore other HTML tags
|
||||
case blackfriday.HTMLBlock:
|
||||
if bytes.HasPrefix(node.Literal, []byte("<!--")) {
|
||||
break // ignore comments, no warning
|
||||
}
|
||||
fmt.Fprintln(os.Stderr, "WARNING: go-md2man does not handle node type "+node.Type.String())
|
||||
default:
|
||||
fmt.Fprintln(os.Stderr, "WARNING: go-md2man does not handle node type "+node.Type.String())
|
||||
}
|
||||
|
@ -254,7 +278,7 @@ func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, ente
|
|||
start = "\t"
|
||||
}
|
||||
if node.IsHeader {
|
||||
start += codespanTag
|
||||
start += strongTag
|
||||
} else if nodeLiteralSize(node) > 30 {
|
||||
start += tableCellStart
|
||||
}
|
||||
|
@ -262,7 +286,7 @@ func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, ente
|
|||
} else {
|
||||
var end string
|
||||
if node.IsHeader {
|
||||
end = codespanCloseTag
|
||||
end = strongCloseTag
|
||||
} else if nodeLiteralSize(node) > 30 {
|
||||
end = tableCellEnd
|
||||
}
|
||||
|
@ -310,6 +334,28 @@ func out(w io.Writer, output string) {
|
|||
}
|
||||
|
||||
func escapeSpecialChars(w io.Writer, text []byte) {
|
||||
scanner := bufio.NewScanner(bytes.NewReader(text))
|
||||
|
||||
// count the number of lines in the text
|
||||
// we need to know this to avoid adding a newline after the last line
|
||||
n := bytes.Count(text, []byte{'\n'})
|
||||
idx := 0
|
||||
|
||||
for scanner.Scan() {
|
||||
dt := scanner.Bytes()
|
||||
if idx < n {
|
||||
idx++
|
||||
dt = append(dt, '\n')
|
||||
}
|
||||
escapeSpecialCharsLine(w, dt)
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func escapeSpecialCharsLine(w io.Writer, text []byte) {
|
||||
for i := 0; i < len(text); i++ {
|
||||
// escape initial apostrophe or period
|
||||
if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') {
|
||||
|
|
|
@ -1,26 +0,0 @@
|
|||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !go1.12
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
)
|
||||
|
||||
func printModuleVersion() {
|
||||
log.Printf("No version information is available for Mockgen compiled with " +
|
||||
"version 1.11")
|
||||
}
|
|
@ -2,12 +2,13 @@
|
|||
# SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
linters:
|
||||
fast: false
|
||||
disable-all: true
|
||||
enable:
|
||||
- megacheck
|
||||
- revive
|
||||
- megacheck
|
||||
- govet
|
||||
- unconvert
|
||||
- megacheck
|
||||
- gas
|
||||
- gocyclo
|
||||
- dupl
|
||||
|
@ -16,18 +17,30 @@ linters:
|
|||
- unused
|
||||
- typecheck
|
||||
- ineffassign
|
||||
- stylecheck
|
||||
# - stylecheck
|
||||
- exportloopref
|
||||
- gocritic
|
||||
- nakedret
|
||||
- gosimple
|
||||
- prealloc
|
||||
fast: false
|
||||
disable-all: true
|
||||
|
||||
# golangci-lint configuration file
|
||||
linters-settings:
|
||||
revive:
|
||||
ignore-generated-header: true
|
||||
severity: warning
|
||||
rules:
|
||||
- name: package-comments
|
||||
severity: warning
|
||||
disabled: true
|
||||
- name: exported
|
||||
severity: warning
|
||||
disabled: false
|
||||
arguments: ["checkPrivateReceivers", "disableStutteringCheck"]
|
||||
|
||||
issues:
|
||||
exclude-use-default: false
|
||||
exclude-rules:
|
||||
- path: _test\.go
|
||||
linters:
|
||||
- dupl
|
||||
exclude-use-default: false
|
||||
|
|
|
@ -181,6 +181,16 @@ func (c *TwoQueueCache[K, V]) Keys() []K {
|
|||
return append(k1, k2...)
|
||||
}
|
||||
|
||||
// Values returns a slice of the values in the cache.
|
||||
// The frequently used values are first in the returned slice.
|
||||
func (c *TwoQueueCache[K, V]) Values() []V {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
v1 := c.frequent.Values()
|
||||
v2 := c.recent.Values()
|
||||
return append(v1, v2...)
|
||||
}
|
||||
|
||||
// Remove removes the provided key from the cache.
|
||||
func (c *TwoQueueCache[K, V]) Remove(key K) {
|
||||
c.lock.Lock()
|
||||
|
|
|
@ -9,17 +9,71 @@ Documentation
|
|||
|
||||
Full docs are available on [Go Packages](https://pkg.go.dev/github.com/hashicorp/golang-lru/v2)
|
||||
|
||||
Example
|
||||
=======
|
||||
|
||||
Using the LRU is very simple:
|
||||
LRU cache example
|
||||
=================
|
||||
|
||||
```go
|
||||
l, _ := New[int, interface{}](128)
|
||||
for i := 0; i < 256; i++ {
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/hashicorp/golang-lru/v2"
|
||||
)
|
||||
|
||||
func main() {
|
||||
l, _ := lru.New[int, any](128)
|
||||
for i := 0; i < 256; i++ {
|
||||
l.Add(i, nil)
|
||||
}
|
||||
if l.Len() != 128 {
|
||||
}
|
||||
if l.Len() != 128 {
|
||||
panic(fmt.Sprintf("bad len: %v", l.Len()))
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Expirable LRU cache example
|
||||
===========================
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/golang-lru/v2/expirable"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// make cache with 10ms TTL and 5 max keys
|
||||
cache := expirable.NewLRU[string, string](5, nil, time.Millisecond*10)
|
||||
|
||||
|
||||
// set value under key1.
|
||||
cache.Add("key1", "val1")
|
||||
|
||||
// get value under key1
|
||||
r, ok := cache.Get("key1")
|
||||
|
||||
// check for OK value
|
||||
if ok {
|
||||
fmt.Printf("value before expiration is found: %v, value: %q\n", ok, r)
|
||||
}
|
||||
|
||||
// wait for cache to expire
|
||||
time.Sleep(time.Millisecond * 12)
|
||||
|
||||
// get value under key1 after key expiration
|
||||
r, ok = cache.Get("key1")
|
||||
fmt.Printf("value after expiration is found: %v, value: %q\n", ok, r)
|
||||
|
||||
// set value under key2, would evict old entry because it is already expired.
|
||||
cache.Add("key2", "val2")
|
||||
|
||||
fmt.Printf("Cache len: %d\n", cache.Len())
|
||||
// Output:
|
||||
// value before expiration is found: true, value: "val1"
|
||||
// value after expiration is found: false, value: ""
|
||||
// Cache len: 1
|
||||
}
|
||||
```
|
||||
|
|
|
@ -1,259 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package lru
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/hashicorp/golang-lru/v2/simplelru"
|
||||
)
|
||||
|
||||
// ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC).
|
||||
// ARC is an enhancement over the standard LRU cache in that tracks both
|
||||
// frequency and recency of use. This avoids a burst in access to new
|
||||
// entries from evicting the frequently used older entries. It adds some
|
||||
// additional tracking overhead to a standard LRU cache, computationally
|
||||
// it is roughly 2x the cost, and the extra memory overhead is linear
|
||||
// with the size of the cache. ARC has been patented by IBM, but is
|
||||
// similar to the TwoQueueCache (2Q) which requires setting parameters.
|
||||
type ARCCache[K comparable, V any] struct {
|
||||
size int // Size is the total capacity of the cache
|
||||
p int // P is the dynamic preference towards T1 or T2
|
||||
|
||||
t1 simplelru.LRUCache[K, V] // T1 is the LRU for recently accessed items
|
||||
b1 simplelru.LRUCache[K, struct{}] // B1 is the LRU for evictions from t1
|
||||
|
||||
t2 simplelru.LRUCache[K, V] // T2 is the LRU for frequently accessed items
|
||||
b2 simplelru.LRUCache[K, struct{}] // B2 is the LRU for evictions from t2
|
||||
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// NewARC creates an ARC of the given size
|
||||
func NewARC[K comparable, V any](size int) (*ARCCache[K, V], error) {
|
||||
// Create the sub LRUs
|
||||
b1, err := simplelru.NewLRU[K, struct{}](size, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b2, err := simplelru.NewLRU[K, struct{}](size, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t1, err := simplelru.NewLRU[K, V](size, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t2, err := simplelru.NewLRU[K, V](size, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Initialize the ARC
|
||||
c := &ARCCache[K, V]{
|
||||
size: size,
|
||||
p: 0,
|
||||
t1: t1,
|
||||
b1: b1,
|
||||
t2: t2,
|
||||
b2: b2,
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Get looks up a key's value from the cache.
|
||||
func (c *ARCCache[K, V]) Get(key K) (value V, ok bool) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
// If the value is contained in T1 (recent), then
|
||||
// promote it to T2 (frequent)
|
||||
if val, ok := c.t1.Peek(key); ok {
|
||||
c.t1.Remove(key)
|
||||
c.t2.Add(key, val)
|
||||
return val, ok
|
||||
}
|
||||
|
||||
// Check if the value is contained in T2 (frequent)
|
||||
if val, ok := c.t2.Get(key); ok {
|
||||
return val, ok
|
||||
}
|
||||
|
||||
// No hit
|
||||
return
|
||||
}
|
||||
|
||||
// Add adds a value to the cache.
|
||||
func (c *ARCCache[K, V]) Add(key K, value V) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
// Check if the value is contained in T1 (recent), and potentially
|
||||
// promote it to frequent T2
|
||||
if c.t1.Contains(key) {
|
||||
c.t1.Remove(key)
|
||||
c.t2.Add(key, value)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if the value is already in T2 (frequent) and update it
|
||||
if c.t2.Contains(key) {
|
||||
c.t2.Add(key, value)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if this value was recently evicted as part of the
|
||||
// recently used list
|
||||
if c.b1.Contains(key) {
|
||||
// T1 set is too small, increase P appropriately
|
||||
delta := 1
|
||||
b1Len := c.b1.Len()
|
||||
b2Len := c.b2.Len()
|
||||
if b2Len > b1Len {
|
||||
delta = b2Len / b1Len
|
||||
}
|
||||
if c.p+delta >= c.size {
|
||||
c.p = c.size
|
||||
} else {
|
||||
c.p += delta
|
||||
}
|
||||
|
||||
// Potentially need to make room in the cache
|
||||
if c.t1.Len()+c.t2.Len() >= c.size {
|
||||
c.replace(false)
|
||||
}
|
||||
|
||||
// Remove from B1
|
||||
c.b1.Remove(key)
|
||||
|
||||
// Add the key to the frequently used list
|
||||
c.t2.Add(key, value)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if this value was recently evicted as part of the
|
||||
// frequently used list
|
||||
if c.b2.Contains(key) {
|
||||
// T2 set is too small, decrease P appropriately
|
||||
delta := 1
|
||||
b1Len := c.b1.Len()
|
||||
b2Len := c.b2.Len()
|
||||
if b1Len > b2Len {
|
||||
delta = b1Len / b2Len
|
||||
}
|
||||
if delta >= c.p {
|
||||
c.p = 0
|
||||
} else {
|
||||
c.p -= delta
|
||||
}
|
||||
|
||||
// Potentially need to make room in the cache
|
||||
if c.t1.Len()+c.t2.Len() >= c.size {
|
||||
c.replace(true)
|
||||
}
|
||||
|
||||
// Remove from B2
|
||||
c.b2.Remove(key)
|
||||
|
||||
// Add the key to the frequently used list
|
||||
c.t2.Add(key, value)
|
||||
return
|
||||
}
|
||||
|
||||
// Potentially need to make room in the cache
|
||||
if c.t1.Len()+c.t2.Len() >= c.size {
|
||||
c.replace(false)
|
||||
}
|
||||
|
||||
// Keep the size of the ghost buffers trim
|
||||
if c.b1.Len() > c.size-c.p {
|
||||
c.b1.RemoveOldest()
|
||||
}
|
||||
if c.b2.Len() > c.p {
|
||||
c.b2.RemoveOldest()
|
||||
}
|
||||
|
||||
// Add to the recently seen list
|
||||
c.t1.Add(key, value)
|
||||
}
|
||||
|
||||
// replace is used to adaptively evict from either T1 or T2
|
||||
// based on the current learned value of P
|
||||
func (c *ARCCache[K, V]) replace(b2ContainsKey bool) {
|
||||
t1Len := c.t1.Len()
|
||||
if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) {
|
||||
k, _, ok := c.t1.RemoveOldest()
|
||||
if ok {
|
||||
c.b1.Add(k, struct{}{})
|
||||
}
|
||||
} else {
|
||||
k, _, ok := c.t2.RemoveOldest()
|
||||
if ok {
|
||||
c.b2.Add(k, struct{}{})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Len returns the number of cached entries
|
||||
func (c *ARCCache[K, V]) Len() int {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
return c.t1.Len() + c.t2.Len()
|
||||
}
|
||||
|
||||
// Keys returns all the cached keys
|
||||
func (c *ARCCache[K, V]) Keys() []K {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
k1 := c.t1.Keys()
|
||||
k2 := c.t2.Keys()
|
||||
return append(k1, k2...)
|
||||
}
|
||||
|
||||
// Remove is used to purge a key from the cache
|
||||
func (c *ARCCache[K, V]) Remove(key K) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if c.t1.Remove(key) {
|
||||
return
|
||||
}
|
||||
if c.t2.Remove(key) {
|
||||
return
|
||||
}
|
||||
if c.b1.Remove(key) {
|
||||
return
|
||||
}
|
||||
if c.b2.Remove(key) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Purge is used to clear the cache
|
||||
func (c *ARCCache[K, V]) Purge() {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
c.t1.Purge()
|
||||
c.t2.Purge()
|
||||
c.b1.Purge()
|
||||
c.b2.Purge()
|
||||
}
|
||||
|
||||
// Contains is used to check if the cache contains a key
|
||||
// without updating recency or frequency.
|
||||
func (c *ARCCache[K, V]) Contains(key K) bool {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
return c.t1.Contains(key) || c.t2.Contains(key)
|
||||
}
|
||||
|
||||
// Peek is used to inspect the cache value of a key
|
||||
// without updating recency or frequency.
|
||||
func (c *ARCCache[K, V]) Peek(key K) (value V, ok bool) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
if val, ok := c.t1.Peek(key); ok {
|
||||
return val, ok
|
||||
}
|
||||
return c.t2.Peek(key)
|
||||
}
|
|
@ -3,21 +3,21 @@
|
|||
|
||||
// Package lru provides three different LRU caches of varying sophistication.
|
||||
//
|
||||
// Cache is a simple LRU cache. It is based on the
|
||||
// LRU implementation in groupcache:
|
||||
// https://github.com/golang/groupcache/tree/master/lru
|
||||
// Cache is a simple LRU cache. It is based on the LRU implementation in
|
||||
// groupcache: https://github.com/golang/groupcache/tree/master/lru
|
||||
//
|
||||
// TwoQueueCache tracks frequently used and recently used entries separately.
|
||||
// This avoids a burst of accesses from taking out frequently used entries,
|
||||
// at the cost of about 2x computational overhead and some extra bookkeeping.
|
||||
// This avoids a burst of accesses from taking out frequently used entries, at
|
||||
// the cost of about 2x computational overhead and some extra bookkeeping.
|
||||
//
|
||||
// ARCCache is an adaptive replacement cache. It tracks recent evictions as
|
||||
// well as recent usage in both the frequent and recent caches. Its
|
||||
// computational overhead is comparable to TwoQueueCache, but the memory
|
||||
// overhead is linear with the size of the cache.
|
||||
// ARCCache is an adaptive replacement cache. It tracks recent evictions as well
|
||||
// as recent usage in both the frequent and recent caches. Its computational
|
||||
// overhead is comparable to TwoQueueCache, but the memory overhead is linear
|
||||
// with the size of the cache.
|
||||
//
|
||||
// ARC has been patented by IBM, so do not use it if that is problematic for
|
||||
// your program.
|
||||
// your program. For this reason, it is in a separate go module contained within
|
||||
// this repository.
|
||||
//
|
||||
// All caches in this package take locks while operating, and are therefore
|
||||
// thread-safe for consumers.
|
||||
|
|
|
@ -0,0 +1,142 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE_list file.
|
||||
|
||||
package internal
|
||||
|
||||
import "time"
|
||||
|
||||
// Entry is an LRU Entry
|
||||
type Entry[K comparable, V any] struct {
|
||||
// Next and previous pointers in the doubly-linked list of elements.
|
||||
// To simplify the implementation, internally a list l is implemented
|
||||
// as a ring, such that &l.root is both the next element of the last
|
||||
// list element (l.Back()) and the previous element of the first list
|
||||
// element (l.Front()).
|
||||
next, prev *Entry[K, V]
|
||||
|
||||
// The list to which this element belongs.
|
||||
list *LruList[K, V]
|
||||
|
||||
// The LRU Key of this element.
|
||||
Key K
|
||||
|
||||
// The Value stored with this element.
|
||||
Value V
|
||||
|
||||
// The time this element would be cleaned up, optional
|
||||
ExpiresAt time.Time
|
||||
|
||||
// The expiry bucket item was put in, optional
|
||||
ExpireBucket uint8
|
||||
}
|
||||
|
||||
// PrevEntry returns the previous list element or nil.
|
||||
func (e *Entry[K, V]) PrevEntry() *Entry[K, V] {
|
||||
if p := e.prev; e.list != nil && p != &e.list.root {
|
||||
return p
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// LruList represents a doubly linked list.
|
||||
// The zero Value for LruList is an empty list ready to use.
|
||||
type LruList[K comparable, V any] struct {
|
||||
root Entry[K, V] // sentinel list element, only &root, root.prev, and root.next are used
|
||||
len int // current list Length excluding (this) sentinel element
|
||||
}
|
||||
|
||||
// Init initializes or clears list l.
|
||||
func (l *LruList[K, V]) Init() *LruList[K, V] {
|
||||
l.root.next = &l.root
|
||||
l.root.prev = &l.root
|
||||
l.len = 0
|
||||
return l
|
||||
}
|
||||
|
||||
// NewList returns an initialized list.
|
||||
func NewList[K comparable, V any]() *LruList[K, V] { return new(LruList[K, V]).Init() }
|
||||
|
||||
// Length returns the number of elements of list l.
|
||||
// The complexity is O(1).
|
||||
func (l *LruList[K, V]) Length() int { return l.len }
|
||||
|
||||
// Back returns the last element of list l or nil if the list is empty.
|
||||
func (l *LruList[K, V]) Back() *Entry[K, V] {
|
||||
if l.len == 0 {
|
||||
return nil
|
||||
}
|
||||
return l.root.prev
|
||||
}
|
||||
|
||||
// lazyInit lazily initializes a zero List Value.
|
||||
func (l *LruList[K, V]) lazyInit() {
|
||||
if l.root.next == nil {
|
||||
l.Init()
|
||||
}
|
||||
}
|
||||
|
||||
// insert inserts e after at, increments l.len, and returns e.
|
||||
func (l *LruList[K, V]) insert(e, at *Entry[K, V]) *Entry[K, V] {
|
||||
e.prev = at
|
||||
e.next = at.next
|
||||
e.prev.next = e
|
||||
e.next.prev = e
|
||||
e.list = l
|
||||
l.len++
|
||||
return e
|
||||
}
|
||||
|
||||
// insertValue is a convenience wrapper for insert(&Entry{Value: v, ExpiresAt: ExpiresAt}, at).
|
||||
func (l *LruList[K, V]) insertValue(k K, v V, expiresAt time.Time, at *Entry[K, V]) *Entry[K, V] {
|
||||
return l.insert(&Entry[K, V]{Value: v, Key: k, ExpiresAt: expiresAt}, at)
|
||||
}
|
||||
|
||||
// Remove removes e from its list, decrements l.len
|
||||
func (l *LruList[K, V]) Remove(e *Entry[K, V]) V {
|
||||
e.prev.next = e.next
|
||||
e.next.prev = e.prev
|
||||
e.next = nil // avoid memory leaks
|
||||
e.prev = nil // avoid memory leaks
|
||||
e.list = nil
|
||||
l.len--
|
||||
|
||||
return e.Value
|
||||
}
|
||||
|
||||
// move moves e to next to at.
|
||||
func (l *LruList[K, V]) move(e, at *Entry[K, V]) {
|
||||
if e == at {
|
||||
return
|
||||
}
|
||||
e.prev.next = e.next
|
||||
e.next.prev = e.prev
|
||||
|
||||
e.prev = at
|
||||
e.next = at.next
|
||||
e.prev.next = e
|
||||
e.next.prev = e
|
||||
}
|
||||
|
||||
// PushFront inserts a new element e with value v at the front of list l and returns e.
|
||||
func (l *LruList[K, V]) PushFront(k K, v V) *Entry[K, V] {
|
||||
l.lazyInit()
|
||||
return l.insertValue(k, v, time.Time{}, &l.root)
|
||||
}
|
||||
|
||||
// PushFrontExpirable inserts a new expirable element e with Value v at the front of list l and returns e.
|
||||
func (l *LruList[K, V]) PushFrontExpirable(k K, v V, expiresAt time.Time) *Entry[K, V] {
|
||||
l.lazyInit()
|
||||
return l.insertValue(k, v, expiresAt, &l.root)
|
||||
}
|
||||
|
||||
// MoveToFront moves element e to the front of list l.
|
||||
// If e is not an element of l, the list is not modified.
|
||||
// The element must not be nil.
|
||||
func (l *LruList[K, V]) MoveToFront(e *Entry[K, V]) {
|
||||
if e.list != l || l.root.next == e {
|
||||
return
|
||||
}
|
||||
// see comment in List.Remove about initialization of l
|
||||
l.move(e, &l.root)
|
||||
}
|
|
@ -233,6 +233,14 @@ func (c *Cache[K, V]) Keys() []K {
|
|||
return keys
|
||||
}
|
||||
|
||||
// Values returns a slice of the values in the cache, from oldest to newest.
|
||||
func (c *Cache[K, V]) Values() []V {
|
||||
c.lock.RLock()
|
||||
values := c.lru.Values()
|
||||
c.lock.RUnlock()
|
||||
return values
|
||||
}
|
||||
|
||||
// Len returns the number of items in the cache.
|
||||
func (c *Cache[K, V]) Len() int {
|
||||
c.lock.RLock()
|
||||
|
|
|
@ -1,128 +0,0 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE_list file.
|
||||
|
||||
package simplelru
|
||||
|
||||
// entry is an LRU entry
|
||||
type entry[K comparable, V any] struct {
|
||||
// Next and previous pointers in the doubly-linked list of elements.
|
||||
// To simplify the implementation, internally a list l is implemented
|
||||
// as a ring, such that &l.root is both the next element of the last
|
||||
// list element (l.Back()) and the previous element of the first list
|
||||
// element (l.Front()).
|
||||
next, prev *entry[K, V]
|
||||
|
||||
// The list to which this element belongs.
|
||||
list *lruList[K, V]
|
||||
|
||||
// The LRU key of this element.
|
||||
key K
|
||||
|
||||
// The value stored with this element.
|
||||
value V
|
||||
}
|
||||
|
||||
// prevEntry returns the previous list element or nil.
|
||||
func (e *entry[K, V]) prevEntry() *entry[K, V] {
|
||||
if p := e.prev; e.list != nil && p != &e.list.root {
|
||||
return p
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// lruList represents a doubly linked list.
|
||||
// The zero value for lruList is an empty list ready to use.
|
||||
type lruList[K comparable, V any] struct {
|
||||
root entry[K, V] // sentinel list element, only &root, root.prev, and root.next are used
|
||||
len int // current list length excluding (this) sentinel element
|
||||
}
|
||||
|
||||
// init initializes or clears list l.
|
||||
func (l *lruList[K, V]) init() *lruList[K, V] {
|
||||
l.root.next = &l.root
|
||||
l.root.prev = &l.root
|
||||
l.len = 0
|
||||
return l
|
||||
}
|
||||
|
||||
// newList returns an initialized list.
|
||||
func newList[K comparable, V any]() *lruList[K, V] { return new(lruList[K, V]).init() }
|
||||
|
||||
// length returns the number of elements of list l.
|
||||
// The complexity is O(1).
|
||||
func (l *lruList[K, V]) length() int { return l.len }
|
||||
|
||||
// back returns the last element of list l or nil if the list is empty.
|
||||
func (l *lruList[K, V]) back() *entry[K, V] {
|
||||
if l.len == 0 {
|
||||
return nil
|
||||
}
|
||||
return l.root.prev
|
||||
}
|
||||
|
||||
// lazyInit lazily initializes a zero List value.
|
||||
func (l *lruList[K, V]) lazyInit() {
|
||||
if l.root.next == nil {
|
||||
l.init()
|
||||
}
|
||||
}
|
||||
|
||||
// insert inserts e after at, increments l.len, and returns e.
|
||||
func (l *lruList[K, V]) insert(e, at *entry[K, V]) *entry[K, V] {
|
||||
e.prev = at
|
||||
e.next = at.next
|
||||
e.prev.next = e
|
||||
e.next.prev = e
|
||||
e.list = l
|
||||
l.len++
|
||||
return e
|
||||
}
|
||||
|
||||
// insertValue is a convenience wrapper for insert(&Element{Value: v}, at).
|
||||
func (l *lruList[K, V]) insertValue(k K, v V, at *entry[K, V]) *entry[K, V] {
|
||||
return l.insert(&entry[K, V]{value: v, key: k}, at)
|
||||
}
|
||||
|
||||
// remove removes e from its list, decrements l.len
|
||||
func (l *lruList[K, V]) remove(e *entry[K, V]) V {
|
||||
e.prev.next = e.next
|
||||
e.next.prev = e.prev
|
||||
e.next = nil // avoid memory leaks
|
||||
e.prev = nil // avoid memory leaks
|
||||
e.list = nil
|
||||
l.len--
|
||||
|
||||
return e.value
|
||||
}
|
||||
|
||||
// move moves e to next to at.
|
||||
func (l *lruList[K, V]) move(e, at *entry[K, V]) {
|
||||
if e == at {
|
||||
return
|
||||
}
|
||||
e.prev.next = e.next
|
||||
e.next.prev = e.prev
|
||||
|
||||
e.prev = at
|
||||
e.next = at.next
|
||||
e.prev.next = e
|
||||
e.next.prev = e
|
||||
}
|
||||
|
||||
// pushFront inserts a new element e with value v at the front of list l and returns e.
|
||||
func (l *lruList[K, V]) pushFront(k K, v V) *entry[K, V] {
|
||||
l.lazyInit()
|
||||
return l.insertValue(k, v, &l.root)
|
||||
}
|
||||
|
||||
// moveToFront moves element e to the front of list l.
|
||||
// If e is not an element of l, the list is not modified.
|
||||
// The element must not be nil.
|
||||
func (l *lruList[K, V]) moveToFront(e *entry[K, V]) {
|
||||
if e.list != l || l.root.next == e {
|
||||
return
|
||||
}
|
||||
// see comment in List.Remove about initialization of l
|
||||
l.move(e, &l.root)
|
||||
}
|
|
@ -5,6 +5,8 @@ package simplelru
|
|||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/hashicorp/golang-lru/v2/internal"
|
||||
)
|
||||
|
||||
// EvictCallback is used to get a callback when a cache entry is evicted
|
||||
|
@ -13,8 +15,8 @@ type EvictCallback[K comparable, V any] func(key K, value V)
|
|||
// LRU implements a non-thread safe fixed size LRU cache
|
||||
type LRU[K comparable, V any] struct {
|
||||
size int
|
||||
evictList *lruList[K, V]
|
||||
items map[K]*entry[K, V]
|
||||
evictList *internal.LruList[K, V]
|
||||
items map[K]*internal.Entry[K, V]
|
||||
onEvict EvictCallback[K, V]
|
||||
}
|
||||
|
||||
|
@ -26,8 +28,8 @@ func NewLRU[K comparable, V any](size int, onEvict EvictCallback[K, V]) (*LRU[K,
|
|||
|
||||
c := &LRU[K, V]{
|
||||
size: size,
|
||||
evictList: newList[K, V](),
|
||||
items: make(map[K]*entry[K, V]),
|
||||
evictList: internal.NewList[K, V](),
|
||||
items: make(map[K]*internal.Entry[K, V]),
|
||||
onEvict: onEvict,
|
||||
}
|
||||
return c, nil
|
||||
|
@ -37,27 +39,30 @@ func NewLRU[K comparable, V any](size int, onEvict EvictCallback[K, V]) (*LRU[K,
|
|||
func (c *LRU[K, V]) Purge() {
|
||||
for k, v := range c.items {
|
||||
if c.onEvict != nil {
|
||||
c.onEvict(k, v.value)
|
||||
c.onEvict(k, v.Value)
|
||||
}
|
||||
delete(c.items, k)
|
||||
}
|
||||
c.evictList.init()
|
||||
c.evictList.Init()
|
||||
}
|
||||
|
||||
// Add adds a value to the cache. Returns true if an eviction occurred.
|
||||
func (c *LRU[K, V]) Add(key K, value V) (evicted bool) {
|
||||
// Check for existing item
|
||||
if ent, ok := c.items[key]; ok {
|
||||
c.evictList.moveToFront(ent)
|
||||
ent.value = value
|
||||
c.evictList.MoveToFront(ent)
|
||||
if c.onEvict != nil {
|
||||
c.onEvict(key, ent.Value)
|
||||
}
|
||||
ent.Value = value
|
||||
return false
|
||||
}
|
||||
|
||||
// Add new item
|
||||
ent := c.evictList.pushFront(key, value)
|
||||
ent := c.evictList.PushFront(key, value)
|
||||
c.items[key] = ent
|
||||
|
||||
evict := c.evictList.length() > c.size
|
||||
evict := c.evictList.Length() > c.size
|
||||
// Verify size not exceeded
|
||||
if evict {
|
||||
c.removeOldest()
|
||||
|
@ -68,8 +73,8 @@ func (c *LRU[K, V]) Add(key K, value V) (evicted bool) {
|
|||
// Get looks up a key's value from the cache.
|
||||
func (c *LRU[K, V]) Get(key K) (value V, ok bool) {
|
||||
if ent, ok := c.items[key]; ok {
|
||||
c.evictList.moveToFront(ent)
|
||||
return ent.value, true
|
||||
c.evictList.MoveToFront(ent)
|
||||
return ent.Value, true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -84,9 +89,9 @@ func (c *LRU[K, V]) Contains(key K) (ok bool) {
|
|||
// Peek returns the key value (or undefined if not found) without updating
|
||||
// the "recently used"-ness of the key.
|
||||
func (c *LRU[K, V]) Peek(key K) (value V, ok bool) {
|
||||
var ent *entry[K, V]
|
||||
var ent *internal.Entry[K, V]
|
||||
if ent, ok = c.items[key]; ok {
|
||||
return ent.value, true
|
||||
return ent.Value, true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -103,35 +108,46 @@ func (c *LRU[K, V]) Remove(key K) (present bool) {
|
|||
|
||||
// RemoveOldest removes the oldest item from the cache.
|
||||
func (c *LRU[K, V]) RemoveOldest() (key K, value V, ok bool) {
|
||||
if ent := c.evictList.back(); ent != nil {
|
||||
if ent := c.evictList.Back(); ent != nil {
|
||||
c.removeElement(ent)
|
||||
return ent.key, ent.value, true
|
||||
return ent.Key, ent.Value, true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetOldest returns the oldest entry
|
||||
func (c *LRU[K, V]) GetOldest() (key K, value V, ok bool) {
|
||||
if ent := c.evictList.back(); ent != nil {
|
||||
return ent.key, ent.value, true
|
||||
if ent := c.evictList.Back(); ent != nil {
|
||||
return ent.Key, ent.Value, true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Keys returns a slice of the keys in the cache, from oldest to newest.
|
||||
func (c *LRU[K, V]) Keys() []K {
|
||||
keys := make([]K, c.evictList.length())
|
||||
keys := make([]K, c.evictList.Length())
|
||||
i := 0
|
||||
for ent := c.evictList.back(); ent != nil; ent = ent.prevEntry() {
|
||||
keys[i] = ent.key
|
||||
for ent := c.evictList.Back(); ent != nil; ent = ent.PrevEntry() {
|
||||
keys[i] = ent.Key
|
||||
i++
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
// Values returns a slice of the values in the cache, from oldest to newest.
|
||||
func (c *LRU[K, V]) Values() []V {
|
||||
values := make([]V, len(c.items))
|
||||
i := 0
|
||||
for ent := c.evictList.Back(); ent != nil; ent = ent.PrevEntry() {
|
||||
values[i] = ent.Value
|
||||
i++
|
||||
}
|
||||
return values
|
||||
}
|
||||
|
||||
// Len returns the number of items in the cache.
|
||||
func (c *LRU[K, V]) Len() int {
|
||||
return c.evictList.length()
|
||||
return c.evictList.Length()
|
||||
}
|
||||
|
||||
// Resize changes the cache size.
|
||||
|
@ -149,16 +165,16 @@ func (c *LRU[K, V]) Resize(size int) (evicted int) {
|
|||
|
||||
// removeOldest removes the oldest item from the cache.
|
||||
func (c *LRU[K, V]) removeOldest() {
|
||||
if ent := c.evictList.back(); ent != nil {
|
||||
if ent := c.evictList.Back(); ent != nil {
|
||||
c.removeElement(ent)
|
||||
}
|
||||
}
|
||||
|
||||
// removeElement is used to remove a given list element from the cache
|
||||
func (c *LRU[K, V]) removeElement(e *entry[K, V]) {
|
||||
c.evictList.remove(e)
|
||||
delete(c.items, e.key)
|
||||
func (c *LRU[K, V]) removeElement(e *internal.Entry[K, V]) {
|
||||
c.evictList.Remove(e)
|
||||
delete(c.items, e.Key)
|
||||
if c.onEvict != nil {
|
||||
c.onEvict(e.key, e.value)
|
||||
c.onEvict(e.Key, e.Value)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,6 +32,9 @@ type LRUCache[K comparable, V any] interface {
|
|||
// Returns a slice of the keys in the cache, from oldest to newest.
|
||||
Keys() []K
|
||||
|
||||
// Values returns a slice of the values in the cache, from oldest to newest.
|
||||
Values() []V
|
||||
|
||||
// Returns the number of items in the cache.
|
||||
Len() int
|
||||
|
||||
|
|
|
@ -1,19 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package lru
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"math"
|
||||
"math/big"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func getRand(tb testing.TB) int64 {
|
||||
out, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64))
|
||||
if err != nil {
|
||||
tb.Fatal(err)
|
||||
}
|
||||
return out.Int64()
|
||||
}
|
|
@ -85,7 +85,10 @@ func DiscoverDevicesCtx(ctx context.Context, searchTarget string) ([]MaybeRootDe
|
|||
return nil, err
|
||||
}
|
||||
defer hcCleanup()
|
||||
responses, err := ssdp.SSDPRawSearchCtx(ctx, hc, string(searchTarget), 2, 3)
|
||||
|
||||
searchCtx, cancel := context.WithTimeout(ctx, 2*time.Second)
|
||||
defer cancel()
|
||||
responses, err := ssdp.RawSearch(searchCtx, hc, string(searchTarget), 3)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ package httpu
|
|||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
|
@ -26,6 +27,27 @@ type ClientInterface interface {
|
|||
) ([]*http.Response, error)
|
||||
}
|
||||
|
||||
// ClientInterfaceCtx is the equivalent of ClientInterface, except with methods
|
||||
// taking a context.Context parameter.
|
||||
type ClientInterfaceCtx interface {
|
||||
// DoWithContext performs a request. If the input request has a
|
||||
// deadline, then that value will be used as the timeout for how long
|
||||
// to wait before returning the responses that were received. If the
|
||||
// request's context is canceled, this method will return immediately.
|
||||
//
|
||||
// If the request's context is never canceled, and does not have a
|
||||
// deadline, then this function WILL NEVER RETURN. You MUST set an
|
||||
// appropriate deadline on the context, or otherwise cancel it when you
|
||||
// want to finish an operation.
|
||||
//
|
||||
// An error is only returned for failing to send the request. Failures
|
||||
// in receipt simply do not add to the resulting responses.
|
||||
DoWithContext(
|
||||
req *http.Request,
|
||||
numSends int,
|
||||
) ([]*http.Response, error)
|
||||
}
|
||||
|
||||
// HTTPUClient is a client for dealing with HTTPU (HTTP over UDP). Its typical
|
||||
// function is for HTTPMU, and particularly SSDP.
|
||||
type HTTPUClient struct {
|
||||
|
@ -34,6 +56,7 @@ type HTTPUClient struct {
|
|||
}
|
||||
|
||||
var _ ClientInterface = &HTTPUClient{}
|
||||
var _ ClientInterfaceCtx = &HTTPUClient{}
|
||||
|
||||
// NewHTTPUClient creates a new HTTPUClient, opening up a new UDP socket for the
|
||||
// purpose.
|
||||
|
@ -75,6 +98,25 @@ func (httpu *HTTPUClient) Do(
|
|||
req *http.Request,
|
||||
timeout time.Duration,
|
||||
numSends int,
|
||||
) ([]*http.Response, error) {
|
||||
ctx := req.Context()
|
||||
if timeout > 0 {
|
||||
var cancel func()
|
||||
ctx, cancel = context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
req = req.WithContext(ctx)
|
||||
}
|
||||
|
||||
return httpu.DoWithContext(req, numSends)
|
||||
}
|
||||
|
||||
// DoWithContext implements ClientInterfaceCtx.DoWithContext.
|
||||
//
|
||||
// Make sure to read the documentation on the ClientInterfaceCtx interface
|
||||
// regarding cancellation!
|
||||
func (httpu *HTTPUClient) DoWithContext(
|
||||
req *http.Request,
|
||||
numSends int,
|
||||
) ([]*http.Response, error) {
|
||||
httpu.connLock.Lock()
|
||||
defer httpu.connLock.Unlock()
|
||||
|
@ -101,9 +143,27 @@ func (httpu *HTTPUClient) Do(
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = httpu.conn.SetDeadline(time.Now().Add(timeout)); err != nil {
|
||||
|
||||
// Handle context deadline/timeout
|
||||
ctx := req.Context()
|
||||
deadline, ok := ctx.Deadline()
|
||||
if ok {
|
||||
if err = httpu.conn.SetDeadline(deadline); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Handle context cancelation
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
go func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// if context is cancelled, stop any connections by setting time in the past.
|
||||
httpu.conn.SetDeadline(time.Now().Add(-time.Second))
|
||||
case <-done:
|
||||
}
|
||||
}()
|
||||
|
||||
// Send request.
|
||||
for i := 0; i < numSends; i++ {
|
||||
|
|
|
@ -49,7 +49,7 @@ func (mc *MultiClient) Do(
|
|||
}
|
||||
|
||||
func (mc *MultiClient) sendRequests(
|
||||
results chan<-[]*http.Response,
|
||||
results chan<- []*http.Response,
|
||||
req *http.Request,
|
||||
timeout time.Duration,
|
||||
numSends int,
|
||||
|
@ -68,3 +68,65 @@ func (mc *MultiClient) sendRequests(
|
|||
}
|
||||
return tasks.Wait()
|
||||
}
|
||||
|
||||
// MultiClientCtx dispatches requests out to all the delegated clients.
|
||||
type MultiClientCtx struct {
|
||||
// The HTTPU clients to delegate to.
|
||||
delegates []ClientInterfaceCtx
|
||||
}
|
||||
|
||||
var _ ClientInterfaceCtx = &MultiClientCtx{}
|
||||
|
||||
// NewMultiClient creates a new MultiClient that delegates to all the given
|
||||
// clients.
|
||||
func NewMultiClientCtx(delegates []ClientInterfaceCtx) *MultiClientCtx {
|
||||
return &MultiClientCtx{
|
||||
delegates: delegates,
|
||||
}
|
||||
}
|
||||
|
||||
// DoWithContext implements ClientInterfaceCtx.DoWithContext.
|
||||
func (mc *MultiClientCtx) DoWithContext(
|
||||
req *http.Request,
|
||||
numSends int,
|
||||
) ([]*http.Response, error) {
|
||||
tasks, ctx := errgroup.WithContext(req.Context())
|
||||
req = req.WithContext(ctx) // so we cancel if the errgroup errors
|
||||
results := make(chan []*http.Response)
|
||||
|
||||
// For each client, send the request to it and collect results.
|
||||
tasks.Go(func() error {
|
||||
defer close(results)
|
||||
return mc.sendRequestsCtx(results, req, numSends)
|
||||
})
|
||||
|
||||
var responses []*http.Response
|
||||
tasks.Go(func() error {
|
||||
for rs := range results {
|
||||
responses = append(responses, rs...)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return responses, tasks.Wait()
|
||||
}
|
||||
|
||||
func (mc *MultiClientCtx) sendRequestsCtx(
|
||||
results chan<- []*http.Response,
|
||||
req *http.Request,
|
||||
numSends int,
|
||||
) error {
|
||||
tasks := &errgroup.Group{}
|
||||
for _, d := range mc.delegates {
|
||||
d := d // copy for closure
|
||||
tasks.Go(func() error {
|
||||
responses, err := d.DoWithContext(req, numSends)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
results <- responses
|
||||
return nil
|
||||
})
|
||||
}
|
||||
return tasks.Wait()
|
||||
}
|
||||
|
|
|
@ -10,14 +10,14 @@ import (
|
|||
// httpuClient creates a HTTPU client that multiplexes to all multicast-capable
|
||||
// IPv4 addresses on the host. Returns a function to clean up once the client is
|
||||
// no longer required.
|
||||
func httpuClient() (httpu.ClientInterface, func(), error) {
|
||||
func httpuClient() (httpu.ClientInterfaceCtx, func(), error) {
|
||||
addrs, err := localIPv4MCastAddrs()
|
||||
if err != nil {
|
||||
return nil, nil, ctxError(err, "requesting host IPv4 addresses")
|
||||
}
|
||||
|
||||
closers := make([]io.Closer, 0, len(addrs))
|
||||
delegates := make([]httpu.ClientInterface, 0, len(addrs))
|
||||
delegates := make([]httpu.ClientInterfaceCtx, 0, len(addrs))
|
||||
for _, addr := range addrs {
|
||||
c, err := httpu.NewHTTPUClientAddr(addr)
|
||||
if err != nil {
|
||||
|
@ -34,7 +34,7 @@ func httpuClient() (httpu.ClientInterface, func(), error) {
|
|||
}
|
||||
}
|
||||
|
||||
return httpu.NewMultiClient(delegates), closer, nil
|
||||
return httpu.NewMultiClientCtx(delegates), closer, nil
|
||||
}
|
||||
|
||||
// localIPv2MCastAddrs returns the set of IPv4 addresses on multicast-able
|
||||
|
|
|
@ -35,6 +35,15 @@ type HTTPUClient interface {
|
|||
) ([]*http.Response, error)
|
||||
}
|
||||
|
||||
// HTTPUClientCtx is an optional interface that will be used to perform
|
||||
// HTTP-over-UDP requests if the client implements it.
|
||||
type HTTPUClientCtx interface {
|
||||
DoWithContext(
|
||||
req *http.Request,
|
||||
numSends int,
|
||||
) ([]*http.Response, error)
|
||||
}
|
||||
|
||||
// SSDPRawSearchCtx performs a fairly raw SSDP search request, and returns the
|
||||
// unique response(s) that it receives. Each response has the requested
|
||||
// searchTarget, a USN, and a valid location. maxWaitSeconds states how long to
|
||||
|
@ -49,8 +58,64 @@ func SSDPRawSearchCtx(
|
|||
maxWaitSeconds int,
|
||||
numSends int,
|
||||
) ([]*http.Response, error) {
|
||||
req, err := prepareRequest(ctx, searchTarget, maxWaitSeconds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
allResponses, err := httpu.Do(req, time.Duration(maxWaitSeconds)*time.Second+100*time.Millisecond, numSends)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return processSSDPResponses(searchTarget, allResponses)
|
||||
}
|
||||
|
||||
// RawSearch performs a fairly raw SSDP search request, and returns the
|
||||
// unique response(s) that it receives. Each response has the requested
|
||||
// searchTarget, a USN, and a valid location. If the provided context times out
|
||||
// or is canceled, the search will be aborted. numSends is the number of
|
||||
// requests to send - 3 is a reasonable value for this.
|
||||
//
|
||||
// The provided context should have a deadline, since the SSDP protocol
|
||||
// requires the max wait time be included in search requests. If the context
|
||||
// has no deadline, then a default deadline of 3 seconds will be applied.
|
||||
func RawSearch(
|
||||
ctx context.Context,
|
||||
httpu HTTPUClientCtx,
|
||||
searchTarget string,
|
||||
numSends int,
|
||||
) ([]*http.Response, error) {
|
||||
// We need a timeout value to include in the SSDP request; get it by
|
||||
// checking the deadline on the context.
|
||||
var maxWaitSeconds int
|
||||
if deadline, ok := ctx.Deadline(); ok {
|
||||
maxWaitSeconds = int(deadline.Sub(time.Now()) / time.Second)
|
||||
} else {
|
||||
// Pick a default timeout of 3 seconds if none was provided.
|
||||
maxWaitSeconds = 3
|
||||
|
||||
var cancel func()
|
||||
ctx, cancel = context.WithTimeout(ctx, time.Duration(maxWaitSeconds)*time.Second)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
req, err := prepareRequest(ctx, searchTarget, maxWaitSeconds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
allResponses, err := httpu.DoWithContext(req, numSends)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return processSSDPResponses(searchTarget, allResponses)
|
||||
}
|
||||
|
||||
// prepareRequest checks the provided parameters and constructs a SSDP search
|
||||
// request to be sent.
|
||||
func prepareRequest(ctx context.Context, searchTarget string, maxWaitSeconds int) (*http.Request, error) {
|
||||
if maxWaitSeconds < 1 {
|
||||
return nil, errors.New("ssdp: maxWaitSeconds must be >= 1")
|
||||
return nil, errors.New("ssdp: request timeout must be at least 1s")
|
||||
}
|
||||
|
||||
req := (&http.Request{
|
||||
|
@ -67,11 +132,13 @@ func SSDPRawSearchCtx(
|
|||
"ST": []string{searchTarget},
|
||||
},
|
||||
}).WithContext(ctx)
|
||||
allResponses, err := httpu.Do(req, time.Duration(maxWaitSeconds)*time.Second+100*time.Millisecond, numSends)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func processSSDPResponses(
|
||||
searchTarget string,
|
||||
allResponses []*http.Response,
|
||||
) ([]*http.Response, error) {
|
||||
isExactSearch := searchTarget != SSDPAll && searchTarget != UPNPRootDevice
|
||||
|
||||
seenIDs := make(map[string]bool)
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
before:
|
||||
hooks:
|
||||
- ./gen.sh
|
||||
- go install mvdan.cc/garble@v0.9.3
|
||||
- go install mvdan.cc/garble@v0.10.1
|
||||
|
||||
builds:
|
||||
-
|
||||
|
@ -92,16 +92,7 @@ builds:
|
|||
archives:
|
||||
-
|
||||
id: s2-binaries
|
||||
name_template: "s2-{{ .Os }}_{{ .Arch }}_{{ .Version }}"
|
||||
replacements:
|
||||
aix: AIX
|
||||
darwin: OSX
|
||||
linux: Linux
|
||||
windows: Windows
|
||||
386: i386
|
||||
amd64: x86_64
|
||||
freebsd: FreeBSD
|
||||
netbsd: NetBSD
|
||||
name_template: "s2-{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
|
||||
format_overrides:
|
||||
- goos: windows
|
||||
format: zip
|
||||
|
@ -125,7 +116,7 @@ changelog:
|
|||
|
||||
nfpms:
|
||||
-
|
||||
file_name_template: "s2_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
|
||||
file_name_template: "s2_package__{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
|
||||
vendor: Klaus Post
|
||||
homepage: https://github.com/klauspost/compress
|
||||
maintainer: Klaus Post <klauspost@gmail.com>
|
||||
|
@ -134,8 +125,3 @@ nfpms:
|
|||
formats:
|
||||
- deb
|
||||
- rpm
|
||||
replacements:
|
||||
darwin: Darwin
|
||||
linux: Linux
|
||||
freebsd: FreeBSD
|
||||
amd64: x86_64
|
||||
|
|
|
@ -16,6 +16,18 @@ This package provides various compression algorithms.
|
|||
|
||||
# changelog
|
||||
|
||||
* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0)
|
||||
* Add experimental dictionary builder https://github.com/klauspost/compress/pull/853
|
||||
* Add xerial snappy read/writer https://github.com/klauspost/compress/pull/838
|
||||
* flate: Add limited window compression https://github.com/klauspost/compress/pull/843
|
||||
* s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839
|
||||
* flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837
|
||||
* gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860
|
||||
|
||||
* July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7)
|
||||
* zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829
|
||||
* s2: add GetBufferCapacity() method by @GiedriusS in https://github.com/klauspost/compress/pull/832
|
||||
|
||||
* June 13, 2023 - [v1.16.6](https://github.com/klauspost/compress/releases/tag/v1.16.6)
|
||||
* zstd: correctly ignore WithEncoderPadding(1) by @ianlancetaylor in https://github.com/klauspost/compress/pull/806
|
||||
* zstd: Add amd64 match length assembly https://github.com/klauspost/compress/pull/824
|
||||
|
@ -50,6 +62,9 @@ This package provides various compression algorithms.
|
|||
* s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747
|
||||
* s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746
|
||||
|
||||
<details>
|
||||
<summary>See changes to v1.15.x</summary>
|
||||
|
||||
* Jan 21st, 2023 (v1.15.15)
|
||||
* deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739
|
||||
* zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728
|
||||
|
@ -176,6 +191,8 @@ Stream decompression is now faster on asynchronous, since the goroutine allocati
|
|||
|
||||
While the release has been extensively tested, it is recommended to testing when upgrading.
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>See changes to v1.14.x</summary>
|
||||
|
||||
|
@ -636,6 +653,8 @@ Here are other packages of good quality and pure Go (no cgo wrappers or autoconv
|
|||
* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer.
|
||||
* [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression.
|
||||
* [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression.
|
||||
* [github.com/minio/zipindex](https://github.com/minio/zipindex) - External ZIP directory index.
|
||||
* [github.com/ybirader/pzip](https://github.com/ybirader/pzip) - Fast concurrent zip archiver and extractor.
|
||||
|
||||
# license
|
||||
|
||||
|
|
|
@ -152,12 +152,11 @@ func (b *bitWriter) flushAlign() {
|
|||
|
||||
// close will write the alignment bit and write the final byte(s)
|
||||
// to the output.
|
||||
func (b *bitWriter) close() error {
|
||||
func (b *bitWriter) close() {
|
||||
// End mark
|
||||
b.addBits16Clean(1, 1)
|
||||
// flush until next byte.
|
||||
b.flushAlign()
|
||||
return nil
|
||||
}
|
||||
|
||||
// reset and continue writing by appending to out.
|
||||
|
|
|
@ -199,7 +199,8 @@ func (s *Scratch) compress(src []byte) error {
|
|||
c2.flush(s.actualTableLog)
|
||||
c1.flush(s.actualTableLog)
|
||||
|
||||
return s.bw.close()
|
||||
s.bw.close()
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeCount will write the normalized histogram count to header.
|
||||
|
|
|
@ -94,10 +94,9 @@ func (b *bitWriter) flushAlign() {
|
|||
|
||||
// close will write the alignment bit and write the final byte(s)
|
||||
// to the output.
|
||||
func (b *bitWriter) close() error {
|
||||
func (b *bitWriter) close() {
|
||||
// End mark
|
||||
b.addBits16Clean(1, 1)
|
||||
// flush until next byte.
|
||||
b.flushAlign()
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -227,10 +227,10 @@ func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err err
|
|||
}
|
||||
|
||||
func (s *Scratch) compress1X(src []byte) ([]byte, error) {
|
||||
return s.compress1xDo(s.Out, src)
|
||||
return s.compress1xDo(s.Out, src), nil
|
||||
}
|
||||
|
||||
func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) {
|
||||
func (s *Scratch) compress1xDo(dst, src []byte) []byte {
|
||||
var bw = bitWriter{out: dst}
|
||||
|
||||
// N is length divisible by 4.
|
||||
|
@ -260,8 +260,8 @@ func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) {
|
|||
bw.encTwoSymbols(cTable, tmp[1], tmp[0])
|
||||
}
|
||||
}
|
||||
err := bw.close()
|
||||
return bw.out, err
|
||||
bw.close()
|
||||
return bw.out
|
||||
}
|
||||
|
||||
var sixZeros [6]byte
|
||||
|
@ -283,12 +283,8 @@ func (s *Scratch) compress4X(src []byte) ([]byte, error) {
|
|||
}
|
||||
src = src[len(toDo):]
|
||||
|
||||
var err error
|
||||
idx := len(s.Out)
|
||||
s.Out, err = s.compress1xDo(s.Out, toDo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.Out = s.compress1xDo(s.Out, toDo)
|
||||
if len(s.Out)-idx > math.MaxUint16 {
|
||||
// We cannot store the size in the jump table
|
||||
return nil, ErrIncompressible
|
||||
|
@ -315,7 +311,6 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) {
|
|||
|
||||
segmentSize := (len(src) + 3) / 4
|
||||
var wg sync.WaitGroup
|
||||
var errs [4]error
|
||||
wg.Add(4)
|
||||
for i := 0; i < 4; i++ {
|
||||
toDo := src
|
||||
|
@ -326,15 +321,12 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) {
|
|||
|
||||
// Separate goroutine for each block.
|
||||
go func(i int) {
|
||||
s.tmpOut[i], errs[i] = s.compress1xDo(s.tmpOut[i][:0], toDo)
|
||||
s.tmpOut[i] = s.compress1xDo(s.tmpOut[i][:0], toDo)
|
||||
wg.Done()
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
for i := 0; i < 4; i++ {
|
||||
if errs[i] != nil {
|
||||
return nil, errs[i]
|
||||
}
|
||||
o := s.tmpOut[i]
|
||||
if len(o) > math.MaxUint16 {
|
||||
// We cannot store the size in the jump table
|
||||
|
|
|
@ -17,7 +17,6 @@ import (
|
|||
// for aligning the input.
|
||||
type bitReader struct {
|
||||
in []byte
|
||||
off uint // next byte to read is at in[off - 1]
|
||||
value uint64 // Maybe use [16]byte, but shifting is awkward.
|
||||
bitsRead uint8
|
||||
}
|
||||
|
@ -28,7 +27,6 @@ func (b *bitReader) init(in []byte) error {
|
|||
return errors.New("corrupt stream: too short")
|
||||
}
|
||||
b.in = in
|
||||
b.off = uint(len(in))
|
||||
// The highest bit of the last byte indicates where to start
|
||||
v := in[len(in)-1]
|
||||
if v == 0 {
|
||||
|
@ -69,21 +67,19 @@ func (b *bitReader) fillFast() {
|
|||
if b.bitsRead < 32 {
|
||||
return
|
||||
}
|
||||
// 2 bounds checks.
|
||||
v := b.in[b.off-4:]
|
||||
v = v[:4]
|
||||
v := b.in[len(b.in)-4:]
|
||||
b.in = b.in[:len(b.in)-4]
|
||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||
b.value = (b.value << 32) | uint64(low)
|
||||
b.bitsRead -= 32
|
||||
b.off -= 4
|
||||
}
|
||||
|
||||
// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
|
||||
func (b *bitReader) fillFastStart() {
|
||||
// Do single re-slice to avoid bounds checks.
|
||||
b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
|
||||
v := b.in[len(b.in)-8:]
|
||||
b.in = b.in[:len(b.in)-8]
|
||||
b.value = binary.LittleEndian.Uint64(v)
|
||||
b.bitsRead = 0
|
||||
b.off -= 8
|
||||
}
|
||||
|
||||
// fill() will make sure at least 32 bits are available.
|
||||
|
@ -91,25 +87,25 @@ func (b *bitReader) fill() {
|
|||
if b.bitsRead < 32 {
|
||||
return
|
||||
}
|
||||
if b.off >= 4 {
|
||||
v := b.in[b.off-4:]
|
||||
v = v[:4]
|
||||
if len(b.in) >= 4 {
|
||||
v := b.in[len(b.in)-4:]
|
||||
b.in = b.in[:len(b.in)-4]
|
||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||
b.value = (b.value << 32) | uint64(low)
|
||||
b.bitsRead -= 32
|
||||
b.off -= 4
|
||||
return
|
||||
}
|
||||
for b.off > 0 {
|
||||
b.value = (b.value << 8) | uint64(b.in[b.off-1])
|
||||
b.bitsRead -= 8
|
||||
b.off--
|
||||
|
||||
b.bitsRead -= uint8(8 * len(b.in))
|
||||
for len(b.in) > 0 {
|
||||
b.value = (b.value << 8) | uint64(b.in[len(b.in)-1])
|
||||
b.in = b.in[:len(b.in)-1]
|
||||
}
|
||||
}
|
||||
|
||||
// finished returns true if all bits have been read from the bit stream.
|
||||
func (b *bitReader) finished() bool {
|
||||
return b.off == 0 && b.bitsRead >= 64
|
||||
return len(b.in) == 0 && b.bitsRead >= 64
|
||||
}
|
||||
|
||||
// overread returns true if more bits have been requested than is on the stream.
|
||||
|
@ -119,7 +115,7 @@ func (b *bitReader) overread() bool {
|
|||
|
||||
// remain returns the number of bits remaining.
|
||||
func (b *bitReader) remain() uint {
|
||||
return b.off*8 + 64 - uint(b.bitsRead)
|
||||
return 8*uint(len(b.in)) + 64 - uint(b.bitsRead)
|
||||
}
|
||||
|
||||
// close the bitstream and returns an error if out-of-buffer reads occurred.
|
||||
|
|
|
@ -97,12 +97,11 @@ func (b *bitWriter) flushAlign() {
|
|||
|
||||
// close will write the alignment bit and write the final byte(s)
|
||||
// to the output.
|
||||
func (b *bitWriter) close() error {
|
||||
func (b *bitWriter) close() {
|
||||
// End mark
|
||||
b.addBits16Clean(1, 1)
|
||||
// flush until next byte.
|
||||
b.flushAlign()
|
||||
return nil
|
||||
}
|
||||
|
||||
// reset and continue writing by appending to out.
|
||||
|
|
|
@ -361,14 +361,21 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error {
|
|||
if len(lits) >= 1024 {
|
||||
// Use 4 Streams.
|
||||
out, reUsed, err = huff0.Compress4X(lits, b.litEnc)
|
||||
} else if len(lits) > 32 {
|
||||
} else if len(lits) > 16 {
|
||||
// Use 1 stream
|
||||
single = true
|
||||
out, reUsed, err = huff0.Compress1X(lits, b.litEnc)
|
||||
} else {
|
||||
err = huff0.ErrIncompressible
|
||||
}
|
||||
|
||||
if err == nil && len(out)+5 > len(lits) {
|
||||
// If we are close, we may still be worse or equal to raw.
|
||||
var lh literalsHeader
|
||||
lh.setSizes(len(out), len(lits), single)
|
||||
if len(out)+lh.size() >= len(lits) {
|
||||
err = huff0.ErrIncompressible
|
||||
}
|
||||
}
|
||||
switch err {
|
||||
case huff0.ErrIncompressible:
|
||||
if debugEncoder {
|
||||
|
@ -503,7 +510,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
|
|||
if len(b.literals) >= 1024 && !raw {
|
||||
// Use 4 Streams.
|
||||
out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc)
|
||||
} else if len(b.literals) > 32 && !raw {
|
||||
} else if len(b.literals) > 16 && !raw {
|
||||
// Use 1 stream
|
||||
single = true
|
||||
out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc)
|
||||
|
@ -511,6 +518,17 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
|
|||
err = huff0.ErrIncompressible
|
||||
}
|
||||
|
||||
if err == nil && len(out)+5 > len(b.literals) {
|
||||
// If we are close, we may still be worse or equal to raw.
|
||||
var lh literalsHeader
|
||||
lh.setSize(len(b.literals))
|
||||
szRaw := lh.size()
|
||||
lh.setSizes(len(out), len(b.literals), single)
|
||||
szComp := lh.size()
|
||||
if len(out)+szComp >= len(b.literals)+szRaw {
|
||||
err = huff0.ErrIncompressible
|
||||
}
|
||||
}
|
||||
switch err {
|
||||
case huff0.ErrIncompressible:
|
||||
lh.setType(literalsBlockRaw)
|
||||
|
@ -773,10 +791,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
|
|||
ml.flush(mlEnc.actualTableLog)
|
||||
of.flush(ofEnc.actualTableLog)
|
||||
ll.flush(llEnc.actualTableLog)
|
||||
err = wr.close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
wr.close()
|
||||
b.output = wr.out
|
||||
|
||||
// Maybe even add a bigger margin.
|
||||
|
|
|
@ -1,10 +1,13 @@
|
|||
package zstd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sort"
|
||||
|
||||
"github.com/klauspost/compress/huff0"
|
||||
)
|
||||
|
@ -14,7 +17,6 @@ type dict struct {
|
|||
|
||||
litEnc *huff0.Scratch
|
||||
llDec, ofDec, mlDec sequenceDec
|
||||
//llEnc, ofEnc, mlEnc []*fseEncoder
|
||||
offsets [3]int
|
||||
content []byte
|
||||
}
|
||||
|
@ -159,3 +161,374 @@ func InspectDictionary(b []byte) (interface {
|
|||
d, err := loadDict(b)
|
||||
return d, err
|
||||
}
|
||||
|
||||
type BuildDictOptions struct {
|
||||
// Dictionary ID.
|
||||
ID uint32
|
||||
|
||||
// Content to use to create dictionary tables.
|
||||
Contents [][]byte
|
||||
|
||||
// History to use for all blocks.
|
||||
History []byte
|
||||
|
||||
// Offsets to use.
|
||||
Offsets [3]int
|
||||
|
||||
// CompatV155 will make the dictionary compatible with Zstd v1.5.5 and earlier.
|
||||
// See https://github.com/facebook/zstd/issues/3724
|
||||
CompatV155 bool
|
||||
|
||||
// Use the specified encoder level.
|
||||
// The dictionary will be built using the specified encoder level,
|
||||
// which will reflect speed and make the dictionary tailored for that level.
|
||||
// If not set SpeedBestCompression will be used.
|
||||
Level EncoderLevel
|
||||
|
||||
// DebugOut will write stats and other details here if set.
|
||||
DebugOut io.Writer
|
||||
}
|
||||
|
||||
func BuildDict(o BuildDictOptions) ([]byte, error) {
|
||||
initPredefined()
|
||||
hist := o.History
|
||||
contents := o.Contents
|
||||
debug := o.DebugOut != nil
|
||||
println := func(args ...interface{}) {
|
||||
if o.DebugOut != nil {
|
||||
fmt.Fprintln(o.DebugOut, args...)
|
||||
}
|
||||
}
|
||||
printf := func(s string, args ...interface{}) {
|
||||
if o.DebugOut != nil {
|
||||
fmt.Fprintf(o.DebugOut, s, args...)
|
||||
}
|
||||
}
|
||||
print := func(args ...interface{}) {
|
||||
if o.DebugOut != nil {
|
||||
fmt.Fprint(o.DebugOut, args...)
|
||||
}
|
||||
}
|
||||
|
||||
if int64(len(hist)) > dictMaxLength {
|
||||
return nil, fmt.Errorf("dictionary of size %d > %d", len(hist), int64(dictMaxLength))
|
||||
}
|
||||
if len(hist) < 8 {
|
||||
return nil, fmt.Errorf("dictionary of size %d < %d", len(hist), 8)
|
||||
}
|
||||
if len(contents) == 0 {
|
||||
return nil, errors.New("no content provided")
|
||||
}
|
||||
d := dict{
|
||||
id: o.ID,
|
||||
litEnc: nil,
|
||||
llDec: sequenceDec{},
|
||||
ofDec: sequenceDec{},
|
||||
mlDec: sequenceDec{},
|
||||
offsets: o.Offsets,
|
||||
content: hist,
|
||||
}
|
||||
block := blockEnc{lowMem: false}
|
||||
block.init()
|
||||
enc := encoder(&bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(maxMatchLen), bufferReset: math.MaxInt32 - int32(maxMatchLen*2), lowMem: false}})
|
||||
if o.Level != 0 {
|
||||
eOpts := encoderOptions{
|
||||
level: o.Level,
|
||||
blockSize: maxMatchLen,
|
||||
windowSize: maxMatchLen,
|
||||
dict: &d,
|
||||
lowMem: false,
|
||||
}
|
||||
enc = eOpts.encoder()
|
||||
} else {
|
||||
o.Level = SpeedBestCompression
|
||||
}
|
||||
var (
|
||||
remain [256]int
|
||||
ll [256]int
|
||||
ml [256]int
|
||||
of [256]int
|
||||
)
|
||||
addValues := func(dst *[256]int, src []byte) {
|
||||
for _, v := range src {
|
||||
dst[v]++
|
||||
}
|
||||
}
|
||||
addHist := func(dst *[256]int, src *[256]uint32) {
|
||||
for i, v := range src {
|
||||
dst[i] += int(v)
|
||||
}
|
||||
}
|
||||
seqs := 0
|
||||
nUsed := 0
|
||||
litTotal := 0
|
||||
newOffsets := make(map[uint32]int, 1000)
|
||||
for _, b := range contents {
|
||||
block.reset(nil)
|
||||
if len(b) < 8 {
|
||||
continue
|
||||
}
|
||||
nUsed++
|
||||
enc.Reset(&d, true)
|
||||
enc.Encode(&block, b)
|
||||
addValues(&remain, block.literals)
|
||||
litTotal += len(block.literals)
|
||||
seqs += len(block.sequences)
|
||||
block.genCodes()
|
||||
addHist(&ll, block.coders.llEnc.Histogram())
|
||||
addHist(&ml, block.coders.mlEnc.Histogram())
|
||||
addHist(&of, block.coders.ofEnc.Histogram())
|
||||
for i, seq := range block.sequences {
|
||||
if i > 3 {
|
||||
break
|
||||
}
|
||||
offset := seq.offset
|
||||
if offset == 0 {
|
||||
continue
|
||||
}
|
||||
if offset > 3 {
|
||||
newOffsets[offset-3]++
|
||||
} else {
|
||||
newOffsets[uint32(o.Offsets[offset-1])]++
|
||||
}
|
||||
}
|
||||
}
|
||||
// Find most used offsets.
|
||||
var sortedOffsets []uint32
|
||||
for k := range newOffsets {
|
||||
sortedOffsets = append(sortedOffsets, k)
|
||||
}
|
||||
sort.Slice(sortedOffsets, func(i, j int) bool {
|
||||
a, b := sortedOffsets[i], sortedOffsets[j]
|
||||
if a == b {
|
||||
// Prefer the longer offset
|
||||
return sortedOffsets[i] > sortedOffsets[j]
|
||||
}
|
||||
return newOffsets[sortedOffsets[i]] > newOffsets[sortedOffsets[j]]
|
||||
})
|
||||
if len(sortedOffsets) > 3 {
|
||||
if debug {
|
||||
print("Offsets:")
|
||||
for i, v := range sortedOffsets {
|
||||
if i > 20 {
|
||||
break
|
||||
}
|
||||
printf("[%d: %d],", v, newOffsets[v])
|
||||
}
|
||||
println("")
|
||||
}
|
||||
|
||||
sortedOffsets = sortedOffsets[:3]
|
||||
}
|
||||
for i, v := range sortedOffsets {
|
||||
o.Offsets[i] = int(v)
|
||||
}
|
||||
if debug {
|
||||
println("New repeat offsets", o.Offsets)
|
||||
}
|
||||
|
||||
if nUsed == 0 || seqs == 0 {
|
||||
return nil, fmt.Errorf("%d blocks, %d sequences found", nUsed, seqs)
|
||||
}
|
||||
if debug {
|
||||
println("Sequences:", seqs, "Blocks:", nUsed, "Literals:", litTotal)
|
||||
}
|
||||
if seqs/nUsed < 512 {
|
||||
// Use 512 as minimum.
|
||||
nUsed = seqs / 512
|
||||
}
|
||||
copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) {
|
||||
hist := dst.Histogram()
|
||||
var maxSym uint8
|
||||
var maxCount int
|
||||
var fakeLength int
|
||||
for i, v := range src {
|
||||
if v > 0 {
|
||||
v = v / nUsed
|
||||
if v == 0 {
|
||||
v = 1
|
||||
}
|
||||
}
|
||||
if v > maxCount {
|
||||
maxCount = v
|
||||
}
|
||||
if v != 0 {
|
||||
maxSym = uint8(i)
|
||||
}
|
||||
fakeLength += v
|
||||
hist[i] = uint32(v)
|
||||
}
|
||||
dst.HistogramFinished(maxSym, maxCount)
|
||||
dst.reUsed = false
|
||||
dst.useRLE = false
|
||||
err := dst.normalizeCount(fakeLength)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if debug {
|
||||
println("RAW:", dst.count[:maxSym+1], "NORM:", dst.norm[:maxSym+1], "LEN:", fakeLength)
|
||||
}
|
||||
return dst.writeCount(nil)
|
||||
}
|
||||
if debug {
|
||||
print("Literal lengths: ")
|
||||
}
|
||||
llTable, err := copyHist(block.coders.llEnc, &ll)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if debug {
|
||||
print("Match lengths: ")
|
||||
}
|
||||
mlTable, err := copyHist(block.coders.mlEnc, &ml)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if debug {
|
||||
print("Offsets: ")
|
||||
}
|
||||
ofTable, err := copyHist(block.coders.ofEnc, &of)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Literal table
|
||||
avgSize := litTotal
|
||||
if avgSize > huff0.BlockSizeMax/2 {
|
||||
avgSize = huff0.BlockSizeMax / 2
|
||||
}
|
||||
huffBuff := make([]byte, 0, avgSize)
|
||||
// Target size
|
||||
div := litTotal / avgSize
|
||||
if div < 1 {
|
||||
div = 1
|
||||
}
|
||||
if debug {
|
||||
println("Huffman weights:")
|
||||
}
|
||||
for i, n := range remain[:] {
|
||||
if n > 0 {
|
||||
n = n / div
|
||||
// Allow all entries to be represented.
|
||||
if n == 0 {
|
||||
n = 1
|
||||
}
|
||||
huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...)
|
||||
if debug {
|
||||
printf("[%d: %d], ", i, n)
|
||||
}
|
||||
}
|
||||
}
|
||||
if o.CompatV155 && remain[255]/div == 0 {
|
||||
huffBuff = append(huffBuff, 255)
|
||||
}
|
||||
scratch := &huff0.Scratch{TableLog: 11}
|
||||
for tries := 0; tries < 255; tries++ {
|
||||
scratch = &huff0.Scratch{TableLog: 11}
|
||||
_, _, err = huff0.Compress1X(huffBuff, scratch)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
if debug {
|
||||
printf("Try %d: Huffman error: %v\n", tries+1, err)
|
||||
}
|
||||
huffBuff = huffBuff[:0]
|
||||
if tries == 250 {
|
||||
if debug {
|
||||
println("Huffman: Bailing out with predefined table")
|
||||
}
|
||||
|
||||
// Bail out.... Just generate something
|
||||
huffBuff = append(huffBuff, bytes.Repeat([]byte{255}, 10000)...)
|
||||
for i := 0; i < 128; i++ {
|
||||
huffBuff = append(huffBuff, byte(i))
|
||||
}
|
||||
continue
|
||||
}
|
||||
if errors.Is(err, huff0.ErrIncompressible) {
|
||||
// Try truncating least common.
|
||||
for i, n := range remain[:] {
|
||||
if n > 0 {
|
||||
n = n / (div * (i + 1))
|
||||
if n > 0 {
|
||||
huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...)
|
||||
}
|
||||
}
|
||||
}
|
||||
if o.CompatV155 && len(huffBuff) > 0 && huffBuff[len(huffBuff)-1] != 255 {
|
||||
huffBuff = append(huffBuff, 255)
|
||||
}
|
||||
if len(huffBuff) == 0 {
|
||||
huffBuff = append(huffBuff, 0, 255)
|
||||
}
|
||||
}
|
||||
if errors.Is(err, huff0.ErrUseRLE) {
|
||||
for i, n := range remain[:] {
|
||||
n = n / (div * (i + 1))
|
||||
// Allow all entries to be represented.
|
||||
if n == 0 {
|
||||
n = 1
|
||||
}
|
||||
huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var out bytes.Buffer
|
||||
out.Write([]byte(dictMagic))
|
||||
out.Write(binary.LittleEndian.AppendUint32(nil, o.ID))
|
||||
out.Write(scratch.OutTable)
|
||||
if debug {
|
||||
println("huff table:", len(scratch.OutTable), "bytes")
|
||||
println("of table:", len(ofTable), "bytes")
|
||||
println("ml table:", len(mlTable), "bytes")
|
||||
println("ll table:", len(llTable), "bytes")
|
||||
}
|
||||
out.Write(ofTable)
|
||||
out.Write(mlTable)
|
||||
out.Write(llTable)
|
||||
out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[0])))
|
||||
out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[1])))
|
||||
out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[2])))
|
||||
out.Write(hist)
|
||||
if debug {
|
||||
_, err := loadDict(out.Bytes())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
i, err := InspectDictionary(out.Bytes())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
println("ID:", i.ID())
|
||||
println("Content size:", i.ContentSize())
|
||||
println("Encoder:", i.LitEncoder() != nil)
|
||||
println("Offsets:", i.Offsets())
|
||||
var totalSize int
|
||||
for _, b := range contents {
|
||||
totalSize += len(b)
|
||||
}
|
||||
|
||||
encWith := func(opts ...EOption) int {
|
||||
enc, err := NewWriter(nil, opts...)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer enc.Close()
|
||||
var dst []byte
|
||||
var totalSize int
|
||||
for _, b := range contents {
|
||||
dst = enc.EncodeAll(b, dst[:0])
|
||||
totalSize += len(dst)
|
||||
}
|
||||
return totalSize
|
||||
}
|
||||
plain := encWith(WithEncoderLevel(o.Level))
|
||||
withDict := encWith(WithEncoderLevel(o.Level), WithEncoderDict(out.Bytes()))
|
||||
println("Input size:", totalSize)
|
||||
println("Plain Compressed:", plain)
|
||||
println("Dict Compressed:", withDict)
|
||||
println("Saved:", plain-withDict, (plain-withDict)/len(contents), "bytes per input (rounded down)")
|
||||
}
|
||||
return out.Bytes(), nil
|
||||
}
|
||||
|
|
|
@ -197,12 +197,13 @@ encodeLoop:
|
|||
|
||||
// Set m to a match at offset if it looks like that will improve compression.
|
||||
improve := func(m *match, offset int32, s int32, first uint32, rep int32) {
|
||||
if s-offset >= e.maxMatchOff || load3232(src, offset) != first {
|
||||
delta := s - offset
|
||||
if delta >= e.maxMatchOff || delta <= 0 || load3232(src, offset) != first {
|
||||
return
|
||||
}
|
||||
if debugAsserts {
|
||||
if offset <= 0 {
|
||||
panic(offset)
|
||||
if offset >= s {
|
||||
panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff))
|
||||
}
|
||||
if !bytes.Equal(src[s:s+4], src[offset:offset+4]) {
|
||||
panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first))
|
||||
|
@ -343,8 +344,8 @@ encodeLoop:
|
|||
if best.rep > 0 {
|
||||
var seq seq
|
||||
seq.matchLen = uint32(best.length - zstdMinMatch)
|
||||
if debugAsserts && s <= nextEmit {
|
||||
panic("s <= nextEmit")
|
||||
if debugAsserts && s < nextEmit {
|
||||
panic("s < nextEmit")
|
||||
}
|
||||
addLiterals(&seq, best.s)
|
||||
|
||||
|
|
|
@ -227,10 +227,7 @@ func (e *Encoder) nextBlock(final bool) error {
|
|||
DictID: e.o.dict.ID(),
|
||||
}
|
||||
|
||||
dst, err := fh.appendTo(tmp[:0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dst := fh.appendTo(tmp[:0])
|
||||
s.headerWritten = true
|
||||
s.wWg.Wait()
|
||||
var n2 int
|
||||
|
@ -483,7 +480,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
|
|||
Checksum: false,
|
||||
DictID: 0,
|
||||
}
|
||||
dst, _ = fh.appendTo(dst)
|
||||
dst = fh.appendTo(dst)
|
||||
|
||||
// Write raw block as last one only.
|
||||
var blk blockHeader
|
||||
|
@ -518,10 +515,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
|
|||
if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem {
|
||||
dst = make([]byte, 0, len(src))
|
||||
}
|
||||
dst, err := fh.appendTo(dst)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
dst = fh.appendTo(dst)
|
||||
|
||||
// If we can do everything in one block, prefer that.
|
||||
if len(src) <= e.o.blockSize {
|
||||
|
@ -581,6 +575,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
|
|||
// Add padding with content from crypto/rand.Reader
|
||||
if e.o.pad > 0 {
|
||||
add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad))
|
||||
var err error
|
||||
dst, err = skippableFrame(dst, add, rand.Reader)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
|
|
@ -22,7 +22,7 @@ type frameHeader struct {
|
|||
|
||||
const maxHeaderSize = 14
|
||||
|
||||
func (f frameHeader) appendTo(dst []byte) ([]byte, error) {
|
||||
func (f frameHeader) appendTo(dst []byte) []byte {
|
||||
dst = append(dst, frameMagic...)
|
||||
var fhd uint8
|
||||
if f.Checksum {
|
||||
|
@ -88,7 +88,7 @@ func (f frameHeader) appendTo(dst []byte) ([]byte, error) {
|
|||
default:
|
||||
panic("invalid fcs")
|
||||
}
|
||||
return dst, nil
|
||||
return dst
|
||||
}
|
||||
|
||||
const skippableFrameHeader = 4 + 4
|
||||
|
|
|
@ -245,7 +245,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
|
|||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
var ll, mo, ml int
|
||||
if br.off > 4+((maxOffsetBits+16+16)>>3) {
|
||||
if len(br.in) > 4+((maxOffsetBits+16+16)>>3) {
|
||||
// inlined function:
|
||||
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
|
||||
|
||||
|
@ -452,18 +452,13 @@ func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol)
|
|||
|
||||
// extra bits are stored in reverse order.
|
||||
br.fill()
|
||||
if s.maxBits <= 32 {
|
||||
mo += br.getBits(moB)
|
||||
ml += br.getBits(mlB)
|
||||
ll += br.getBits(llB)
|
||||
} else {
|
||||
mo += br.getBits(moB)
|
||||
if s.maxBits > 32 {
|
||||
br.fill()
|
||||
}
|
||||
// matchlength+literal length, max 32 bits
|
||||
ml += br.getBits(mlB)
|
||||
ll += br.getBits(llB)
|
||||
|
||||
}
|
||||
mo = s.adjustOffset(mo, ll, moB)
|
||||
return
|
||||
}
|
||||
|
|
|
@ -5,11 +5,11 @@
|
|||
// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||
// Requires: CMOV
|
||||
TEXT ·sequenceDecs_decode_amd64(SB), $8-32
|
||||
MOVQ br+8(FP), AX
|
||||
MOVQ 32(AX), DX
|
||||
MOVBQZX 40(AX), BX
|
||||
MOVQ 24(AX), SI
|
||||
MOVQ (AX), AX
|
||||
MOVQ br+8(FP), CX
|
||||
MOVQ 24(CX), DX
|
||||
MOVBQZX 32(CX), BX
|
||||
MOVQ (CX), AX
|
||||
MOVQ 8(CX), SI
|
||||
ADDQ SI, AX
|
||||
MOVQ AX, (SP)
|
||||
MOVQ ctx+16(FP), AX
|
||||
|
@ -301,9 +301,9 @@ sequenceDecs_decode_amd64_match_len_ofs_ok:
|
|||
MOVQ R12, 152(AX)
|
||||
MOVQ R13, 160(AX)
|
||||
MOVQ br+8(FP), AX
|
||||
MOVQ DX, 32(AX)
|
||||
MOVB BL, 40(AX)
|
||||
MOVQ SI, 24(AX)
|
||||
MOVQ DX, 24(AX)
|
||||
MOVB BL, 32(AX)
|
||||
MOVQ SI, 8(AX)
|
||||
|
||||
// Return success
|
||||
MOVQ $0x00000000, ret+24(FP)
|
||||
|
@ -336,11 +336,11 @@ error_overread:
|
|||
// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||
// Requires: CMOV
|
||||
TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32
|
||||
MOVQ br+8(FP), AX
|
||||
MOVQ 32(AX), DX
|
||||
MOVBQZX 40(AX), BX
|
||||
MOVQ 24(AX), SI
|
||||
MOVQ (AX), AX
|
||||
MOVQ br+8(FP), CX
|
||||
MOVQ 24(CX), DX
|
||||
MOVBQZX 32(CX), BX
|
||||
MOVQ (CX), AX
|
||||
MOVQ 8(CX), SI
|
||||
ADDQ SI, AX
|
||||
MOVQ AX, (SP)
|
||||
MOVQ ctx+16(FP), AX
|
||||
|
@ -603,9 +603,9 @@ sequenceDecs_decode_56_amd64_match_len_ofs_ok:
|
|||
MOVQ R12, 152(AX)
|
||||
MOVQ R13, 160(AX)
|
||||
MOVQ br+8(FP), AX
|
||||
MOVQ DX, 32(AX)
|
||||
MOVB BL, 40(AX)
|
||||
MOVQ SI, 24(AX)
|
||||
MOVQ DX, 24(AX)
|
||||
MOVB BL, 32(AX)
|
||||
MOVQ SI, 8(AX)
|
||||
|
||||
// Return success
|
||||
MOVQ $0x00000000, ret+24(FP)
|
||||
|
@ -638,11 +638,11 @@ error_overread:
|
|||
// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||
// Requires: BMI, BMI2, CMOV
|
||||
TEXT ·sequenceDecs_decode_bmi2(SB), $8-32
|
||||
MOVQ br+8(FP), CX
|
||||
MOVQ 32(CX), AX
|
||||
MOVBQZX 40(CX), DX
|
||||
MOVQ 24(CX), BX
|
||||
MOVQ (CX), CX
|
||||
MOVQ br+8(FP), BX
|
||||
MOVQ 24(BX), AX
|
||||
MOVBQZX 32(BX), DX
|
||||
MOVQ (BX), CX
|
||||
MOVQ 8(BX), BX
|
||||
ADDQ BX, CX
|
||||
MOVQ CX, (SP)
|
||||
MOVQ ctx+16(FP), CX
|
||||
|
@ -892,9 +892,9 @@ sequenceDecs_decode_bmi2_match_len_ofs_ok:
|
|||
MOVQ R11, 152(CX)
|
||||
MOVQ R12, 160(CX)
|
||||
MOVQ br+8(FP), CX
|
||||
MOVQ AX, 32(CX)
|
||||
MOVB DL, 40(CX)
|
||||
MOVQ BX, 24(CX)
|
||||
MOVQ AX, 24(CX)
|
||||
MOVB DL, 32(CX)
|
||||
MOVQ BX, 8(CX)
|
||||
|
||||
// Return success
|
||||
MOVQ $0x00000000, ret+24(FP)
|
||||
|
@ -927,11 +927,11 @@ error_overread:
|
|||
// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||
// Requires: BMI, BMI2, CMOV
|
||||
TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32
|
||||
MOVQ br+8(FP), CX
|
||||
MOVQ 32(CX), AX
|
||||
MOVBQZX 40(CX), DX
|
||||
MOVQ 24(CX), BX
|
||||
MOVQ (CX), CX
|
||||
MOVQ br+8(FP), BX
|
||||
MOVQ 24(BX), AX
|
||||
MOVBQZX 32(BX), DX
|
||||
MOVQ (BX), CX
|
||||
MOVQ 8(BX), BX
|
||||
ADDQ BX, CX
|
||||
MOVQ CX, (SP)
|
||||
MOVQ ctx+16(FP), CX
|
||||
|
@ -1152,9 +1152,9 @@ sequenceDecs_decode_56_bmi2_match_len_ofs_ok:
|
|||
MOVQ R11, 152(CX)
|
||||
MOVQ R12, 160(CX)
|
||||
MOVQ br+8(FP), CX
|
||||
MOVQ AX, 32(CX)
|
||||
MOVB DL, 40(CX)
|
||||
MOVQ BX, 24(CX)
|
||||
MOVQ AX, 24(CX)
|
||||
MOVB DL, 32(CX)
|
||||
MOVQ BX, 8(CX)
|
||||
|
||||
// Return success
|
||||
MOVQ $0x00000000, ret+24(FP)
|
||||
|
@ -1797,11 +1797,11 @@ empty_seqs:
|
|||
// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
|
||||
// Requires: CMOV, SSE
|
||||
TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32
|
||||
MOVQ br+8(FP), AX
|
||||
MOVQ 32(AX), DX
|
||||
MOVBQZX 40(AX), BX
|
||||
MOVQ 24(AX), SI
|
||||
MOVQ (AX), AX
|
||||
MOVQ br+8(FP), CX
|
||||
MOVQ 24(CX), DX
|
||||
MOVBQZX 32(CX), BX
|
||||
MOVQ (CX), AX
|
||||
MOVQ 8(CX), SI
|
||||
ADDQ SI, AX
|
||||
MOVQ AX, (SP)
|
||||
MOVQ ctx+16(FP), AX
|
||||
|
@ -2295,9 +2295,9 @@ handle_loop:
|
|||
|
||||
loop_finished:
|
||||
MOVQ br+8(FP), AX
|
||||
MOVQ DX, 32(AX)
|
||||
MOVB BL, 40(AX)
|
||||
MOVQ SI, 24(AX)
|
||||
MOVQ DX, 24(AX)
|
||||
MOVB BL, 32(AX)
|
||||
MOVQ SI, 8(AX)
|
||||
|
||||
// Update the context
|
||||
MOVQ ctx+16(FP), AX
|
||||
|
@ -2362,11 +2362,11 @@ error_not_enough_space:
|
|||
// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
|
||||
// Requires: BMI, BMI2, CMOV, SSE
|
||||
TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32
|
||||
MOVQ br+8(FP), CX
|
||||
MOVQ 32(CX), AX
|
||||
MOVBQZX 40(CX), DX
|
||||
MOVQ 24(CX), BX
|
||||
MOVQ (CX), CX
|
||||
MOVQ br+8(FP), BX
|
||||
MOVQ 24(BX), AX
|
||||
MOVBQZX 32(BX), DX
|
||||
MOVQ (BX), CX
|
||||
MOVQ 8(BX), BX
|
||||
ADDQ BX, CX
|
||||
MOVQ CX, (SP)
|
||||
MOVQ ctx+16(FP), CX
|
||||
|
@ -2818,9 +2818,9 @@ handle_loop:
|
|||
|
||||
loop_finished:
|
||||
MOVQ br+8(FP), CX
|
||||
MOVQ AX, 32(CX)
|
||||
MOVB DL, 40(CX)
|
||||
MOVQ BX, 24(CX)
|
||||
MOVQ AX, 24(CX)
|
||||
MOVB DL, 32(CX)
|
||||
MOVQ BX, 8(CX)
|
||||
|
||||
// Update the context
|
||||
MOVQ ctx+16(FP), AX
|
||||
|
@ -2885,11 +2885,11 @@ error_not_enough_space:
|
|||
// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
|
||||
// Requires: CMOV, SSE
|
||||
TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32
|
||||
MOVQ br+8(FP), AX
|
||||
MOVQ 32(AX), DX
|
||||
MOVBQZX 40(AX), BX
|
||||
MOVQ 24(AX), SI
|
||||
MOVQ (AX), AX
|
||||
MOVQ br+8(FP), CX
|
||||
MOVQ 24(CX), DX
|
||||
MOVBQZX 32(CX), BX
|
||||
MOVQ (CX), AX
|
||||
MOVQ 8(CX), SI
|
||||
ADDQ SI, AX
|
||||
MOVQ AX, (SP)
|
||||
MOVQ ctx+16(FP), AX
|
||||
|
@ -3485,9 +3485,9 @@ handle_loop:
|
|||
|
||||
loop_finished:
|
||||
MOVQ br+8(FP), AX
|
||||
MOVQ DX, 32(AX)
|
||||
MOVB BL, 40(AX)
|
||||
MOVQ SI, 24(AX)
|
||||
MOVQ DX, 24(AX)
|
||||
MOVB BL, 32(AX)
|
||||
MOVQ SI, 8(AX)
|
||||
|
||||
// Update the context
|
||||
MOVQ ctx+16(FP), AX
|
||||
|
@ -3552,11 +3552,11 @@ error_not_enough_space:
|
|||
// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
|
||||
// Requires: BMI, BMI2, CMOV, SSE
|
||||
TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32
|
||||
MOVQ br+8(FP), CX
|
||||
MOVQ 32(CX), AX
|
||||
MOVBQZX 40(CX), DX
|
||||
MOVQ 24(CX), BX
|
||||
MOVQ (CX), CX
|
||||
MOVQ br+8(FP), BX
|
||||
MOVQ 24(BX), AX
|
||||
MOVBQZX 32(BX), DX
|
||||
MOVQ (BX), CX
|
||||
MOVQ 8(BX), BX
|
||||
ADDQ BX, CX
|
||||
MOVQ CX, (SP)
|
||||
MOVQ ctx+16(FP), CX
|
||||
|
@ -4110,9 +4110,9 @@ handle_loop:
|
|||
|
||||
loop_finished:
|
||||
MOVQ br+8(FP), CX
|
||||
MOVQ AX, 32(CX)
|
||||
MOVB DL, 40(CX)
|
||||
MOVQ BX, 24(CX)
|
||||
MOVQ AX, 24(CX)
|
||||
MOVB DL, 32(CX)
|
||||
MOVQ BX, 8(CX)
|
||||
|
||||
// Update the context
|
||||
MOVQ ctx+16(FP), AX
|
||||
|
|
|
@ -29,7 +29,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
|
|||
}
|
||||
for i := range seqs {
|
||||
var ll, mo, ml int
|
||||
if br.off > 4+((maxOffsetBits+16+16)>>3) {
|
||||
if len(br.in) > 4+((maxOffsetBits+16+16)>>3) {
|
||||
// inlined function:
|
||||
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
|
||||
|
||||
|
|
|
@ -95,10 +95,9 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
|
|||
var written int64
|
||||
var readHeader bool
|
||||
{
|
||||
var header []byte
|
||||
var n int
|
||||
header, r.err = frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0])
|
||||
header := frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0])
|
||||
|
||||
var n int
|
||||
n, r.err = w.Write(header)
|
||||
if r.err != nil {
|
||||
return written, r.err
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
*.swp
|
|
@ -1,6 +1,6 @@
|
|||
MIT License
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2018
|
||||
Copyright (c) 2014 Juan Batiz-Benet
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
@ -9,13 +9,13 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
|
@ -0,0 +1,11 @@
|
|||
# DEPRECATION NOTICE
|
||||
|
||||
mplex has been deprecated.
|
||||
|
||||
see https://github.com/libp2p/specs/issues/553 for details
|
||||
|
||||
# go-libp2p-mplex - a go-stream-muxer shim for multiplex
|
||||
|
||||
[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) [![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23ipfs) ![](https://raw.githubusercontent.com/libp2p/go-stream-muxer/master/img/badge.png)
|
||||
|
||||
This is an implementation of the [go-libp2p muxer](https://pkg.go.dev/github.com/libp2p/go-libp2p@v0.30.0/core/network#Multiplexer) interface for [multiplex](https://github.com/libp2p/go-mplex). For more information, see that repo.
|
|
@ -1,3 +1,5 @@
|
|||
// DEPRECATED: mplex has been deprecated. Users should prefer Yamux over mplex. see https://github.com/libp2p/specs/issues/553
|
||||
// for details
|
||||
package mplex
|
||||
|
||||
import (
|
|
@ -0,0 +1,3 @@
|
|||
{
|
||||
"version": "v0.9.0"
|
||||
}
|
|
@ -77,6 +77,9 @@ func (p *PubSub) handleNewStream(s network.Stream) {
|
|||
|
||||
return
|
||||
}
|
||||
if len(msgbytes) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
rpc := new(RPC)
|
||||
err = rpc.Unmarshal(msgbytes)
|
||||
|
|
|
@ -1170,20 +1170,20 @@ func (gs *GossipSubRouter) sendRPC(p peer.ID, out *RPC) {
|
|||
return
|
||||
}
|
||||
|
||||
// If we're too big, fragment into multiple RPCs and send each sequentially
|
||||
outRPCs, err := fragmentRPC(out, gs.p.maxMessageSize)
|
||||
if err != nil {
|
||||
gs.doDropRPC(out, p, fmt.Sprintf("unable to fragment RPC: %s", err))
|
||||
return
|
||||
}
|
||||
|
||||
// Potentially split the RPC into multiple RPCs that are below the max message size
|
||||
outRPCs := appendOrMergeRPC(nil, gs.p.maxMessageSize, *out)
|
||||
for _, rpc := range outRPCs {
|
||||
if rpc.Size() > gs.p.maxMessageSize {
|
||||
// This should only happen if a single message/control is above the maxMessageSize.
|
||||
gs.doDropRPC(out, p, fmt.Sprintf("Dropping oversized RPC. Size: %d, limit: %d. (Over by %d bytes)", rpc.Size(), gs.p.maxMessageSize, rpc.Size()-gs.p.maxMessageSize))
|
||||
continue
|
||||
}
|
||||
gs.doSendRPC(rpc, p, mch)
|
||||
}
|
||||
}
|
||||
|
||||
func (gs *GossipSubRouter) doDropRPC(rpc *RPC, p peer.ID, reason string) {
|
||||
log.Debugf("dropping message to peer %s: %s", p.Pretty(), reason)
|
||||
log.Debugf("dropping message to peer %s: %s", p, reason)
|
||||
gs.tracer.DropRPC(rpc, p)
|
||||
// push control messages that need to be retried
|
||||
ctl := rpc.GetControl()
|
||||
|
@ -1201,119 +1201,134 @@ func (gs *GossipSubRouter) doSendRPC(rpc *RPC, p peer.ID, mch chan *RPC) {
|
|||
}
|
||||
}
|
||||
|
||||
func fragmentRPC(rpc *RPC, limit int) ([]*RPC, error) {
|
||||
if rpc.Size() < limit {
|
||||
return []*RPC{rpc}, nil
|
||||
// appendOrMergeRPC appends the given RPCs to the slice, merging them if possible.
|
||||
// If any elem is too large to fit in a single RPC, it will be split into multiple RPCs.
|
||||
// If an RPC is too large and can't be split further (e.g. Message data is
|
||||
// bigger than the RPC limit), then it will be returned as an oversized RPC.
|
||||
// The caller should filter out oversized RPCs.
|
||||
func appendOrMergeRPC(slice []*RPC, limit int, elems ...RPC) []*RPC {
|
||||
if len(elems) == 0 {
|
||||
return slice
|
||||
}
|
||||
|
||||
c := (rpc.Size() / limit) + 1
|
||||
rpcs := make([]*RPC, 1, c)
|
||||
rpcs[0] = &RPC{RPC: pb.RPC{}, from: rpc.from}
|
||||
|
||||
// outRPC returns the current RPC message if it will fit sizeToAdd more bytes
|
||||
// otherwise, it will create a new RPC message and add it to the list.
|
||||
// if withCtl is true, the returned message will have a non-nil empty Control message.
|
||||
outRPC := func(sizeToAdd int, withCtl bool) *RPC {
|
||||
current := rpcs[len(rpcs)-1]
|
||||
// check if we can fit the new data, plus an extra byte for the protobuf field tag
|
||||
if current.Size()+sizeToAdd+1 < limit {
|
||||
if withCtl && current.Control == nil {
|
||||
current.Control = &pb.ControlMessage{}
|
||||
}
|
||||
return current
|
||||
}
|
||||
var ctl *pb.ControlMessage
|
||||
if withCtl {
|
||||
ctl = &pb.ControlMessage{}
|
||||
}
|
||||
next := &RPC{RPC: pb.RPC{Control: ctl}, from: rpc.from}
|
||||
rpcs = append(rpcs, next)
|
||||
return next
|
||||
if len(slice) == 0 && len(elems) == 1 && elems[0].Size() < limit {
|
||||
// Fast path: no merging needed and only one element
|
||||
return append(slice, &elems[0])
|
||||
}
|
||||
|
||||
for _, msg := range rpc.GetPublish() {
|
||||
s := msg.Size()
|
||||
// if an individual message is too large, we can't fragment it and have to fail entirely
|
||||
if s > limit {
|
||||
return nil, fmt.Errorf("message with len=%d exceeds limit %d", s, limit)
|
||||
}
|
||||
out := outRPC(s, false)
|
||||
out.Publish = append(out.Publish, msg)
|
||||
out := slice
|
||||
if len(out) == 0 {
|
||||
out = append(out, &RPC{RPC: pb.RPC{}})
|
||||
out[0].from = elems[0].from
|
||||
}
|
||||
|
||||
for _, sub := range rpc.GetSubscriptions() {
|
||||
out := outRPC(sub.Size(), false)
|
||||
out.Subscriptions = append(out.Subscriptions, sub)
|
||||
for _, elem := range elems {
|
||||
lastRPC := out[len(out)-1]
|
||||
|
||||
// Merge/Append publish messages
|
||||
// TODO: Never merge messages. The current behavior is the same as the
|
||||
// old behavior. In the future let's not merge messages. Since,
|
||||
// it may increase message latency.
|
||||
for _, msg := range elem.GetPublish() {
|
||||
if lastRPC.Publish = append(lastRPC.Publish, msg); lastRPC.Size() > limit {
|
||||
lastRPC.Publish = lastRPC.Publish[:len(lastRPC.Publish)-1]
|
||||
lastRPC = &RPC{RPC: pb.RPC{}, from: elem.from}
|
||||
lastRPC.Publish = append(lastRPC.Publish, msg)
|
||||
out = append(out, lastRPC)
|
||||
}
|
||||
}
|
||||
|
||||
ctl := rpc.GetControl()
|
||||
if ctl == nil {
|
||||
// if there were no control messages, we're done
|
||||
return rpcs, nil
|
||||
// Merge/Append Subscriptions
|
||||
for _, sub := range elem.GetSubscriptions() {
|
||||
if lastRPC.Subscriptions = append(lastRPC.Subscriptions, sub); lastRPC.Size() > limit {
|
||||
lastRPC.Subscriptions = lastRPC.Subscriptions[:len(lastRPC.Subscriptions)-1]
|
||||
lastRPC = &RPC{RPC: pb.RPC{}, from: elem.from}
|
||||
lastRPC.Subscriptions = append(lastRPC.Subscriptions, sub)
|
||||
out = append(out, lastRPC)
|
||||
}
|
||||
// if all the control messages fit into one RPC, we just add it to the end and return
|
||||
ctlOut := &RPC{RPC: pb.RPC{Control: ctl}, from: rpc.from}
|
||||
if ctlOut.Size() < limit {
|
||||
rpcs = append(rpcs, ctlOut)
|
||||
return rpcs, nil
|
||||
}
|
||||
|
||||
// we need to split up the control messages into multiple RPCs
|
||||
for _, graft := range ctl.Graft {
|
||||
out := outRPC(graft.Size(), true)
|
||||
out.Control.Graft = append(out.Control.Graft, graft)
|
||||
// Merge/Append Control messages
|
||||
if ctl := elem.GetControl(); ctl != nil {
|
||||
if lastRPC.Control == nil {
|
||||
lastRPC.Control = &pb.ControlMessage{}
|
||||
if lastRPC.Size() > limit {
|
||||
lastRPC.Control = nil
|
||||
lastRPC = &RPC{RPC: pb.RPC{Control: &pb.ControlMessage{}}, from: elem.from}
|
||||
out = append(out, lastRPC)
|
||||
}
|
||||
for _, prune := range ctl.Prune {
|
||||
out := outRPC(prune.Size(), true)
|
||||
out.Control.Prune = append(out.Control.Prune, prune)
|
||||
}
|
||||
|
||||
// An individual IWANT or IHAVE message could be larger than the limit if we have
|
||||
// a lot of message IDs. fragmentMessageIds will split them into buckets that
|
||||
// fit within the limit, with some overhead for the control messages themselves
|
||||
for _, iwant := range ctl.Iwant {
|
||||
const protobufOverhead = 6
|
||||
idBuckets := fragmentMessageIds(iwant.MessageIDs, limit-protobufOverhead)
|
||||
for _, ids := range idBuckets {
|
||||
iwant := &pb.ControlIWant{MessageIDs: ids}
|
||||
out := outRPC(iwant.Size(), true)
|
||||
out.Control.Iwant = append(out.Control.Iwant, iwant)
|
||||
for _, graft := range ctl.GetGraft() {
|
||||
if lastRPC.Control.Graft = append(lastRPC.Control.Graft, graft); lastRPC.Size() > limit {
|
||||
lastRPC.Control.Graft = lastRPC.Control.Graft[:len(lastRPC.Control.Graft)-1]
|
||||
lastRPC = &RPC{RPC: pb.RPC{Control: &pb.ControlMessage{}}, from: elem.from}
|
||||
lastRPC.Control.Graft = append(lastRPC.Control.Graft, graft)
|
||||
out = append(out, lastRPC)
|
||||
}
|
||||
}
|
||||
for _, ihave := range ctl.Ihave {
|
||||
const protobufOverhead = 6
|
||||
idBuckets := fragmentMessageIds(ihave.MessageIDs, limit-protobufOverhead)
|
||||
for _, ids := range idBuckets {
|
||||
ihave := &pb.ControlIHave{MessageIDs: ids}
|
||||
out := outRPC(ihave.Size(), true)
|
||||
out.Control.Ihave = append(out.Control.Ihave, ihave)
|
||||
}
|
||||
}
|
||||
return rpcs, nil
|
||||
}
|
||||
|
||||
func fragmentMessageIds(msgIds []string, limit int) [][]string {
|
||||
// account for two bytes of protobuf overhead per array element
|
||||
const protobufOverhead = 2
|
||||
for _, prune := range ctl.GetPrune() {
|
||||
if lastRPC.Control.Prune = append(lastRPC.Control.Prune, prune); lastRPC.Size() > limit {
|
||||
lastRPC.Control.Prune = lastRPC.Control.Prune[:len(lastRPC.Control.Prune)-1]
|
||||
lastRPC = &RPC{RPC: pb.RPC{Control: &pb.ControlMessage{}}, from: elem.from}
|
||||
lastRPC.Control.Prune = append(lastRPC.Control.Prune, prune)
|
||||
out = append(out, lastRPC)
|
||||
}
|
||||
}
|
||||
|
||||
out := [][]string{{}}
|
||||
var currentBucket int
|
||||
var bucketLen int
|
||||
for i := 0; i < len(msgIds); i++ {
|
||||
size := len(msgIds[i]) + protobufOverhead
|
||||
if size > limit {
|
||||
// pathological case where a single message ID exceeds the limit.
|
||||
log.Warnf("message ID length %d exceeds limit %d, removing from outgoing gossip", size, limit)
|
||||
continue
|
||||
for _, iwant := range ctl.GetIwant() {
|
||||
if len(lastRPC.Control.Iwant) == 0 {
|
||||
// Initialize with a single IWANT.
|
||||
// For IWANTs we don't need more than a single one,
|
||||
// since there are no topic IDs here.
|
||||
newIWant := &pb.ControlIWant{}
|
||||
if lastRPC.Control.Iwant = append(lastRPC.Control.Iwant, newIWant); lastRPC.Size() > limit {
|
||||
lastRPC.Control.Iwant = lastRPC.Control.Iwant[:len(lastRPC.Control.Iwant)-1]
|
||||
lastRPC = &RPC{RPC: pb.RPC{Control: &pb.ControlMessage{
|
||||
Iwant: []*pb.ControlIWant{newIWant},
|
||||
}}, from: elem.from}
|
||||
out = append(out, lastRPC)
|
||||
}
|
||||
bucketLen += size
|
||||
if bucketLen > limit {
|
||||
out = append(out, []string{})
|
||||
currentBucket++
|
||||
bucketLen = size
|
||||
}
|
||||
out[currentBucket] = append(out[currentBucket], msgIds[i])
|
||||
for _, msgID := range iwant.GetMessageIDs() {
|
||||
if lastRPC.Control.Iwant[0].MessageIDs = append(lastRPC.Control.Iwant[0].MessageIDs, msgID); lastRPC.Size() > limit {
|
||||
lastRPC.Control.Iwant[0].MessageIDs = lastRPC.Control.Iwant[0].MessageIDs[:len(lastRPC.Control.Iwant[0].MessageIDs)-1]
|
||||
lastRPC = &RPC{RPC: pb.RPC{Control: &pb.ControlMessage{
|
||||
Iwant: []*pb.ControlIWant{{MessageIDs: []string{msgID}}},
|
||||
}}, from: elem.from}
|
||||
out = append(out, lastRPC)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, ihave := range ctl.GetIhave() {
|
||||
if len(lastRPC.Control.Ihave) == 0 ||
|
||||
lastRPC.Control.Ihave[len(lastRPC.Control.Ihave)-1].TopicID != ihave.TopicID {
|
||||
// Start a new IHAVE if we are referencing a new topic ID
|
||||
newIhave := &pb.ControlIHave{TopicID: ihave.TopicID}
|
||||
if lastRPC.Control.Ihave = append(lastRPC.Control.Ihave, newIhave); lastRPC.Size() > limit {
|
||||
lastRPC.Control.Ihave = lastRPC.Control.Ihave[:len(lastRPC.Control.Ihave)-1]
|
||||
lastRPC = &RPC{RPC: pb.RPC{Control: &pb.ControlMessage{
|
||||
Ihave: []*pb.ControlIHave{newIhave},
|
||||
}}, from: elem.from}
|
||||
out = append(out, lastRPC)
|
||||
}
|
||||
}
|
||||
for _, msgID := range ihave.GetMessageIDs() {
|
||||
lastIHave := lastRPC.Control.Ihave[len(lastRPC.Control.Ihave)-1]
|
||||
if lastIHave.MessageIDs = append(lastIHave.MessageIDs, msgID); lastRPC.Size() > limit {
|
||||
lastIHave.MessageIDs = lastIHave.MessageIDs[:len(lastIHave.MessageIDs)-1]
|
||||
lastRPC = &RPC{RPC: pb.RPC{Control: &pb.ControlMessage{
|
||||
Ihave: []*pb.ControlIHave{{TopicID: ihave.TopicID, MessageIDs: []string{msgID}}},
|
||||
}}, from: elem.from}
|
||||
out = append(out, lastRPC)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
|
|
|
@ -13,18 +13,17 @@
|
|||
<a href="https://marcopolo.github.io/FlakyTests/"><img src="https://marcopolo.github.io/FlakyTests/current-score.svg"/></a>
|
||||
</p>
|
||||
|
||||
# Table of Contents
|
||||
|
||||
# Table of Contents <!-- omit in toc -->
|
||||
- [Background](#background)
|
||||
- [Roadmap](#roadmap)
|
||||
- [Usage](#usage)
|
||||
- [Examples](#examples)
|
||||
- [Development](#development)
|
||||
- [Tests](#tests)
|
||||
- [Dashboards](#dashboards)
|
||||
- [Contribute](#contribute)
|
||||
- [Supported Go Versions](#supported-go-versions)
|
||||
- [Supported Go Versions](#supported-go-versions)
|
||||
- [Notable Users](#notable-users)
|
||||
|
||||
## Background
|
||||
# Background
|
||||
|
||||
[libp2p](https://github.com/libp2p/specs) is a networking stack and library modularized out of [The IPFS Project](https://github.com/ipfs/ipfs), and bundled separately for other tools to use.
|
||||
>
|
||||
|
@ -37,12 +36,12 @@ To learn more, check out the following resources:
|
|||
- [**js-libp2p implementation**](https://github.com/libp2p/js-libp2p)
|
||||
- [**rust-libp2p implementation**](https://github.com/libp2p/rust-libp2p)
|
||||
|
||||
## Roadmap
|
||||
# Roadmap
|
||||
|
||||
Our roadmap for go-libp2p can be found here: https://github.com/libp2p/go-libp2p/blob/master/ROADMAP.md
|
||||
This document represents current projects the go-libp2p team is focused on and provides an estimation of completion targets. It is a completementary roadmap to the overarching libp2p project roadmap: https://github.com/libp2p/specs/blob/master/ROADMAP.md
|
||||
This document represents current projects the go-libp2p team is focused on and provides an estimation of completion targets. It is a complementary roadmap to the overarching libp2p project roadmap: https://github.com/libp2p/specs/blob/master/ROADMAP.md
|
||||
|
||||
## Usage
|
||||
# Usage
|
||||
|
||||
This repository (`go-libp2p`) serves as the entrypoint to the universe of packages that compose the Go implementation of the libp2p stack.
|
||||
|
||||
|
@ -52,10 +51,17 @@ You can start using go-libp2p in your Go application simply by adding imports fr
|
|||
import "github.com/libp2p/go-libp2p"
|
||||
```
|
||||
|
||||
### Examples
|
||||
## Examples
|
||||
|
||||
Examples can be found in the [examples folder](examples).
|
||||
|
||||
## Dashboards
|
||||
|
||||
We provide prebuilt Grafana dashboards so that applications can better monitor libp2p in production.
|
||||
You can find the [dashboard JSON files here](https://github.com/libp2p/go-libp2p/tree/master/dashboards).
|
||||
|
||||
We also have live [Public Dashboards](https://github.com/libp2p/go-libp2p/tree/master/dashboards/README.md#public-dashboards) that you can check out to see real time monitoring in action.
|
||||
|
||||
|
||||
# Contribute
|
||||
|
||||
|
@ -70,7 +76,7 @@ Guidelines:
|
|||
- have fun!
|
||||
|
||||
There's a few things you can do right now to help out:
|
||||
- Go through the modules below and **check out existing issues**. This would be especially useful for modules in active development. Some knowledge of IPFS/libp2p may be required, as well as the infrasture behind it - for instance, you may need to read up on p2p and more complex operations like muxing to be able to help technically.
|
||||
- Go through the modules below and **check out existing issues**. This would be especially useful for modules in active development. Some knowledge of IPFS/libp2p may be required, as well as the infrastructure behind it - for instance, you may need to read up on p2p and more complex operations like muxing to be able to help technically.
|
||||
- **Perform code reviews**.
|
||||
- **Add tests**. There can never be enough tests.
|
||||
|
||||
|
@ -99,4 +105,4 @@ Some notable users of go-libp2p are:
|
|||
- [Kairos](https://github.com/kairos-io/kairos) - A Kubernetes-focused, Cloud Native Linux meta-distribution.
|
||||
- [Oasis Core](https://github.com/oasisprotocol/oasis-core) - The consensus and runtime layers of the [Oasis protocol](https://oasisprotocol.org/).
|
||||
|
||||
Please open a pull request if you want your project to be added here.
|
||||
Please open a pull request if you want your project (min. 250 GitHub stars) to be added here.
|
||||
|
|
|
@ -261,6 +261,7 @@ func (cfg *Config) addTransports(h host.Host) error {
|
|||
}
|
||||
|
||||
fxopts = append(fxopts, fx.Provide(PrivKeyToStatelessResetKey))
|
||||
fxopts = append(fxopts, fx.Provide(PrivKeyToTokenGeneratorKey))
|
||||
if cfg.QUICReuse != nil {
|
||||
fxopts = append(fxopts, cfg.QUICReuse...)
|
||||
} else {
|
||||
|
@ -295,6 +296,15 @@ func (cfg *Config) addTransports(h host.Host) error {
|
|||
//
|
||||
// This function consumes the config. Do not reuse it (really!).
|
||||
func (cfg *Config) NewNode() (host.Host, error) {
|
||||
// If possible check that the resource manager conn limit is higher than the
|
||||
// limit set in the conn manager.
|
||||
if l, ok := cfg.ResourceManager.(connmgr.GetConnLimiter); ok {
|
||||
err := cfg.ConnManager.CheckLimit(l)
|
||||
if err != nil {
|
||||
log.Warn(fmt.Sprintf("rcmgr limit conflicts with connmgr limit: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
eventBus := eventbus.NewBus(eventbus.WithMetricsTracer(eventbus.NewMetricsTracer(eventbus.WithRegisterer(cfg.PrometheusRegisterer))))
|
||||
swrm, err := cfg.makeSwarm(eventBus, !cfg.DisableMetrics)
|
||||
if err != nil {
|
||||
|
@ -419,6 +429,11 @@ func (cfg *Config) NewNode() (host.Host, error) {
|
|||
PeerKey: autonatPrivKey,
|
||||
Peerstore: ps,
|
||||
DialRanker: swarm.NoDelayDialRanker,
|
||||
SwarmOpts: []swarm.Option{
|
||||
// It is better to disable black hole detection and just attempt a dial for autonat
|
||||
swarm.WithUDPBlackHoleConfig(false, 0, 0),
|
||||
swarm.WithIPv6BlackHoleConfig(false, 0, 0),
|
||||
},
|
||||
}
|
||||
|
||||
dialer, err := autoNatCfg.makeSwarm(eventbus.NewBus(), false)
|
||||
|
|
|
@ -11,7 +11,10 @@ import (
|
|||
"github.com/quic-go/quic-go"
|
||||
)
|
||||
|
||||
const statelessResetKeyInfo = "libp2p quic stateless reset key"
|
||||
const (
|
||||
statelessResetKeyInfo = "libp2p quic stateless reset key"
|
||||
tokenGeneratorKeyInfo = "libp2p quic token generator key"
|
||||
)
|
||||
|
||||
func PrivKeyToStatelessResetKey(key crypto.PrivKey) (quic.StatelessResetKey, error) {
|
||||
var statelessResetKey quic.StatelessResetKey
|
||||
|
@ -25,3 +28,16 @@ func PrivKeyToStatelessResetKey(key crypto.PrivKey) (quic.StatelessResetKey, err
|
|||
}
|
||||
return statelessResetKey, nil
|
||||
}
|
||||
|
||||
func PrivKeyToTokenGeneratorKey(key crypto.PrivKey) (quic.TokenGeneratorKey, error) {
|
||||
var tokenKey quic.TokenGeneratorKey
|
||||
keyBytes, err := key.Raw()
|
||||
if err != nil {
|
||||
return tokenKey, err
|
||||
}
|
||||
keyReader := hkdf.New(sha256.New, keyBytes, nil, []byte(tokenGeneratorKeyInfo))
|
||||
if _, err := io.ReadFull(keyReader, tokenKey[:]); err != nil {
|
||||
return tokenKey, err
|
||||
}
|
||||
return tokenKey, nil
|
||||
}
|
|
@ -74,6 +74,10 @@ type ConnManager interface {
|
|||
// then it will return true if the peer is protected for any tag
|
||||
IsProtected(id peer.ID, tag string) (protected bool)
|
||||
|
||||
// CheckLimit will return an error if the connection manager's internal
|
||||
// connection limit exceeds the provided system limit.
|
||||
CheckLimit(l GetConnLimiter) error
|
||||
|
||||
// Close closes the connection manager and stops background processes.
|
||||
Close() error
|
||||
}
|
||||
|
@ -89,3 +93,9 @@ type TagInfo struct {
|
|||
// Conns maps connection ids (such as remote multiaddr) to their creation time.
|
||||
Conns map[string]time.Time
|
||||
}
|
||||
|
||||
// GetConnLimiter provides access to a component's total connection limit.
|
||||
type GetConnLimiter interface {
|
||||
// GetConnLimit returns the total connection limit of the implementing component.
|
||||
GetConnLimit() int
|
||||
}
|
||||
|
|
|
@ -21,4 +21,5 @@ func (NullConnMgr) Notifee() network.Notifiee { return network.Gl
|
|||
func (NullConnMgr) Protect(peer.ID, string) {}
|
||||
func (NullConnMgr) Unprotect(peer.ID, string) bool { return false }
|
||||
func (NullConnMgr) IsProtected(peer.ID, string) bool { return false }
|
||||
func (NullConnMgr) CheckLimit(l GetConnLimiter) error { return nil }
|
||||
func (NullConnMgr) Close() error { return nil }
|
||||
|
|
|
@ -12,8 +12,7 @@ import (
|
|||
|
||||
pb "github.com/libp2p/go-libp2p/core/crypto/pb"
|
||||
"github.com/libp2p/go-libp2p/core/internal/catch"
|
||||
|
||||
"github.com/minio/sha256-simd"
|
||||
"github.com/libp2p/go-libp2p/internal/sha256"
|
||||
)
|
||||
|
||||
// ECDSAPrivateKey is an implementation of an ECDSA private key
|
||||
|
|
|
@ -10,8 +10,7 @@ import (
|
|||
|
||||
pb "github.com/libp2p/go-libp2p/core/crypto/pb"
|
||||
"github.com/libp2p/go-libp2p/core/internal/catch"
|
||||
|
||||
"github.com/minio/sha256-simd"
|
||||
"github.com/libp2p/go-libp2p/internal/sha256"
|
||||
)
|
||||
|
||||
// RsaPrivateKey is a rsa private key
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
|
||||
"github.com/decred/dcrd/dcrec/secp256k1/v4"
|
||||
"github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa"
|
||||
"github.com/minio/sha256-simd"
|
||||
"github.com/libp2p/go-libp2p/internal/sha256"
|
||||
)
|
||||
|
||||
// Secp256k1PrivateKey is a Secp256k1 private key
|
||||
|
|
|
@ -86,7 +86,7 @@ func AddrInfoFromP2pAddr(m ma.Multiaddr) (*AddrInfo, error) {
|
|||
|
||||
// AddrInfoToP2pAddrs converts an AddrInfo to a list of Multiaddrs.
|
||||
func AddrInfoToP2pAddrs(pi *AddrInfo) ([]ma.Multiaddr, error) {
|
||||
p2ppart, err := ma.NewComponent("p2p", Encode(pi.ID))
|
||||
p2ppart, err := ma.NewComponent("p2p", pi.ID.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -102,7 +102,7 @@ func AddrInfoToP2pAddrs(pi *AddrInfo) ([]ma.Multiaddr, error) {
|
|||
|
||||
func (pi *AddrInfo) Loggable() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"peerID": pi.ID.Pretty(),
|
||||
"peerID": pi.ID.String(),
|
||||
"addrs": pi.Addrs,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -41,12 +41,6 @@ const maxInlineKeyLength = 42
|
|||
// hash output as a multihash. See IDFromPublicKey for details.
|
||||
type ID string
|
||||
|
||||
// Pretty returns a base58-encoded string representation of the ID.
|
||||
// Deprecated: use String() instead.
|
||||
func (id ID) Pretty() string {
|
||||
return id.String()
|
||||
}
|
||||
|
||||
// Loggable returns a pretty peer ID string in loggable JSON format.
|
||||
func (id ID) Loggable() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
|
@ -145,16 +139,6 @@ func Decode(s string) (ID, error) {
|
|||
return FromCid(c)
|
||||
}
|
||||
|
||||
// Encode encodes a peer ID as a string.
|
||||
//
|
||||
// At the moment, it base58 encodes the peer ID but, in the future, it will
|
||||
// switch to encoding it as a CID by default.
|
||||
//
|
||||
// Deprecated: use id.String instead.
|
||||
func Encode(id ID) string {
|
||||
return id.String()
|
||||
}
|
||||
|
||||
// FromCid converts a CID to a peer ID, if possible.
|
||||
func FromCid(c cid.Cid) (ID, error) {
|
||||
code := mc.Code(c.Type())
|
||||
|
|
|
@ -45,7 +45,7 @@ func (id ID) Size() int {
|
|||
}
|
||||
|
||||
func (id ID) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(Encode(id))
|
||||
return json.Marshal(id.String())
|
||||
}
|
||||
|
||||
func (id *ID) UnmarshalJSON(data []byte) (err error) {
|
||||
|
@ -59,7 +59,7 @@ func (id *ID) UnmarshalJSON(data []byte) (err error) {
|
|||
|
||||
// MarshalText returns the text encoding of the ID.
|
||||
func (id ID) MarshalText() ([]byte, error) {
|
||||
return []byte(Encode(id)), nil
|
||||
return []byte(id.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText restores the ID from its text encoding.
|
||||
|
|
|
@ -3,6 +3,7 @@ package sec
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
|
@ -29,3 +30,14 @@ type SecureTransport interface {
|
|||
// ID is the protocol ID of the security protocol.
|
||||
ID() protocol.ID
|
||||
}
|
||||
|
||||
type ErrPeerIDMismatch struct {
|
||||
Expected peer.ID
|
||||
Actual peer.ID
|
||||
}
|
||||
|
||||
func (e ErrPeerIDMismatch) Error() string {
|
||||
return fmt.Sprintf("peer id mismatch: expected %s, but remote key matches %s", e.Expected, e.Actual)
|
||||
}
|
||||
|
||||
var _ error = (*ErrPeerIDMismatch)(nil)
|
||||
|
|
|
@ -5,6 +5,7 @@ package transport
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
|
@ -124,3 +125,47 @@ type Upgrader interface {
|
|||
// Upgrade upgrades the multiaddr/net connection into a full libp2p-transport connection.
|
||||
Upgrade(ctx context.Context, t Transport, maconn manet.Conn, dir network.Direction, p peer.ID, scope network.ConnManagementScope) (CapableConn, error)
|
||||
}
|
||||
|
||||
// DialUpdater provides updates on in progress dials.
|
||||
type DialUpdater interface {
|
||||
// DialWithUpdates dials a remote peer and provides updates on the passed channel.
|
||||
DialWithUpdates(context.Context, ma.Multiaddr, peer.ID, chan<- DialUpdate) (CapableConn, error)
|
||||
}
|
||||
|
||||
// DialUpdateKind indicates the type of DialUpdate event.
|
||||
type DialUpdateKind int
|
||||
|
||||
const (
|
||||
// UpdateKindDialFailed indicates dial failed.
|
||||
UpdateKindDialFailed DialUpdateKind = iota
|
||||
// UpdateKindDialSuccessful indicates dial succeeded.
|
||||
UpdateKindDialSuccessful
|
||||
// UpdateKindHandshakeProgressed indicates successful completion of the TCP 3-way
|
||||
// handshake
|
||||
UpdateKindHandshakeProgressed
|
||||
)
|
||||
|
||||
func (k DialUpdateKind) String() string {
|
||||
switch k {
|
||||
case UpdateKindDialFailed:
|
||||
return "DialFailed"
|
||||
case UpdateKindDialSuccessful:
|
||||
return "DialSuccessful"
|
||||
case UpdateKindHandshakeProgressed:
|
||||
return "UpdateKindHandshakeProgressed"
|
||||
default:
|
||||
return fmt.Sprintf("DialUpdateKind<Unknown-%d>", k)
|
||||
}
|
||||
}
|
||||
|
||||
// DialUpdate is used by DialUpdater to provide dial updates.
|
||||
type DialUpdate struct {
|
||||
// Kind is the kind of update event.
|
||||
Kind DialUpdateKind
|
||||
// Addr is the peer's address.
|
||||
Addr ma.Multiaddr
|
||||
// Conn is the resulting connection on success.
|
||||
Conn CapableConn
|
||||
// Err is the reason for dial failure.
|
||||
Err error
|
||||
}
|
||||
|
|
|
@ -79,11 +79,9 @@ var RandomIdentity = func(cfg *Config) error {
|
|||
var DefaultListenAddrs = func(cfg *Config) error {
|
||||
addrs := []string{
|
||||
"/ip4/0.0.0.0/tcp/0",
|
||||
"/ip4/0.0.0.0/udp/0/quic",
|
||||
"/ip4/0.0.0.0/udp/0/quic-v1",
|
||||
"/ip4/0.0.0.0/udp/0/quic-v1/webtransport",
|
||||
"/ip6/::/tcp/0",
|
||||
"/ip6/::/udp/0/quic",
|
||||
"/ip6/::/udp/0/quic-v1",
|
||||
"/ip6/::/udp/0/quic-v1/webtransport",
|
||||
}
|
||||
|
|
|
@ -0,0 +1,23 @@
|
|||
//go:build go1.21
|
||||
|
||||
// This package use build tags to select between github.com/minio/sha256-simd
|
||||
// for go1.20 and bellow and crypto/sha256 for go1.21 and above.
|
||||
// This is used because a fast SHANI implementation of sha256 is only avaiable
|
||||
// in the std for go1.21 and above. See https://go.dev/issue/50543.
|
||||
// TODO: Once go1.22 releases remove this package and replace all uses
|
||||
// with crypto/sha256 because the two supported version of go will have the fast
|
||||
// implementation.
|
||||
package sha256
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"hash"
|
||||
)
|
||||
|
||||
func Sum256(b []byte) [sha256.Size]byte {
|
||||
return sha256.Sum256(b)
|
||||
}
|
||||
|
||||
func New() hash.Hash {
|
||||
return sha256.New()
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
//go:build !go1.21
|
||||
|
||||
// This package use build tags to select between github.com/minio/sha256-simd
|
||||
// for go1.20 and bellow and crypto/sha256 for go1.21 and above.
|
||||
// This is used because a fast SHANI implementation of sha256 is only avaiable
|
||||
// in the std for go1.21 and above. See https://go.dev/issue/50543.
|
||||
// TODO: Once go1.22 releases remove this package and replace all uses
|
||||
// with crypto/sha256 because the two supported version of go will have the fast
|
||||
// implementation.
|
||||
package sha256
|
||||
|
||||
import (
|
||||
"hash"
|
||||
|
||||
"github.com/minio/sha256-simd"
|
||||
)
|
||||
|
||||
func Sum256(b []byte) [sha256.Size]byte {
|
||||
return sha256.Sum256(b)
|
||||
}
|
||||
|
||||
func New() hash.Hash {
|
||||
return sha256.New()
|
||||
}
|
|
@ -579,6 +579,7 @@ func PrometheusRegisterer(reg prometheus.Registerer) Option {
|
|||
// DialRanker configures libp2p to use d as the dial ranker. To enable smart
|
||||
// dialing use `swarm.DefaultDialRanker`. use `swarm.NoDelayDialRanker` to
|
||||
// disable smart dialing.
|
||||
//
|
||||
// Deprecated: use SwarmOpts(swarm.WithDialRanker(d)) instead
|
||||
func DialRanker(d network.DialRanker) Option {
|
||||
return func(cfg *Config) error {
|
||||
|
|
|
@ -68,7 +68,7 @@ func (as *autoNATService) handleStream(s network.Stream) {
|
|||
defer s.Close()
|
||||
|
||||
pid := s.Conn().RemotePeer()
|
||||
log.Debugf("New stream from %s", pid.Pretty())
|
||||
log.Debugf("New stream from %s", pid)
|
||||
|
||||
r := pbio.NewDelimitedReader(s, maxMsgSize)
|
||||
w := pbio.NewDelimitedWriter(s)
|
||||
|
@ -78,14 +78,14 @@ func (as *autoNATService) handleStream(s network.Stream) {
|
|||
|
||||
err := r.ReadMsg(&req)
|
||||
if err != nil {
|
||||
log.Debugf("Error reading message from %s: %s", pid.Pretty(), err.Error())
|
||||
log.Debugf("Error reading message from %s: %s", pid, err.Error())
|
||||
s.Reset()
|
||||
return
|
||||
}
|
||||
|
||||
t := req.GetType()
|
||||
if t != pb.Message_DIAL {
|
||||
log.Debugf("Unexpected message from %s: %s (%d)", pid.Pretty(), t.String(), t)
|
||||
log.Debugf("Unexpected message from %s: %s (%d)", pid, t.String(), t)
|
||||
s.Reset()
|
||||
return
|
||||
}
|
||||
|
@ -96,7 +96,7 @@ func (as *autoNATService) handleStream(s network.Stream) {
|
|||
|
||||
err = w.WriteMsg(&res)
|
||||
if err != nil {
|
||||
log.Debugf("Error writing response to %s: %s", pid.Pretty(), err.Error())
|
||||
log.Debugf("Error writing response to %s: %s", pid, err.Error())
|
||||
s.Reset()
|
||||
return
|
||||
}
|
||||
|
@ -234,7 +234,7 @@ func (as *autoNATService) doDial(pi peer.AddrInfo) *pb.Message_DialResponse {
|
|||
|
||||
conn, err := as.config.dialer.DialPeer(ctx, pi.ID)
|
||||
if err != nil {
|
||||
log.Debugf("error dialing %s: %s", pi.ID.Pretty(), err.Error())
|
||||
log.Debugf("error dialing %s: %s", pi.ID, err.Error())
|
||||
// wait for the context to timeout to avoid leaking timing information
|
||||
// this renders the service ineffective as a port scanner
|
||||
<-ctx.Done()
|
||||
|
|
|
@ -736,7 +736,7 @@ func (rf *relayFinder) relayAddrs(addrs []ma.Multiaddr) []ma.Multiaddr {
|
|||
for p := range rf.relays {
|
||||
addrs := cleanupAddressSet(rf.host.Peerstore().Addrs(p))
|
||||
relayAddrCnt += len(addrs)
|
||||
circuit := ma.StringCast(fmt.Sprintf("/p2p/%s/p2p-circuit", p.Pretty()))
|
||||
circuit := ma.StringCast(fmt.Sprintf("/p2p/%s/p2p-circuit", p))
|
||||
for _, addr := range addrs {
|
||||
pub := addr.Encapsulate(circuit)
|
||||
raddrs = append(raddrs, pub)
|
||||
|
|
|
@ -437,7 +437,7 @@ func (h *BasicHost) newStreamHandler(s network.Stream) {
|
|||
|
||||
log.Debugf("negotiated: %s (took %s)", protoID, took)
|
||||
|
||||
go handle(protoID, s)
|
||||
handle(protoID, s)
|
||||
}
|
||||
|
||||
// SignalAddressChange signals to the host that it needs to determine whether our listen addresses have recently
|
||||
|
@ -629,9 +629,6 @@ func (h *BasicHost) RemoveStreamHandler(pid protocol.ID) {
|
|||
// to create one. If ProtocolID is "", writes no header.
|
||||
// (Thread-safe)
|
||||
func (h *BasicHost) NewStream(ctx context.Context, p peer.ID, pids ...protocol.ID) (network.Stream, error) {
|
||||
// Ensure we have a connection, with peer addresses resolved by the routing system (#207)
|
||||
// It is not sufficient to let the underlying host connect, it will most likely not have
|
||||
// any addresses for the peer without any prior connections.
|
||||
// If the caller wants to prevent the host from dialing, it should use the NoDial option.
|
||||
if nodial, _ := network.GetNoDial(ctx); !nodial {
|
||||
err := h.Connect(ctx, peer.AddrInfo{ID: p})
|
||||
|
@ -669,7 +666,9 @@ func (h *BasicHost) NewStream(ctx context.Context, p peer.ID, pids ...protocol.I
|
|||
}
|
||||
|
||||
if pref != "" {
|
||||
s.SetProtocol(pref)
|
||||
if err := s.SetProtocol(pref); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lzcon := msmux.NewMSSelect(s, pref)
|
||||
return &streamWrapper{
|
||||
Stream: s,
|
||||
|
@ -795,10 +794,11 @@ func (h *BasicHost) Addrs() []ma.Multiaddr {
|
|||
continue
|
||||
}
|
||||
addrWithCerthash, added := tpt.AddCertHashes(addr)
|
||||
addrs[i] = addrWithCerthash
|
||||
if !added {
|
||||
log.Debug("Couldn't add certhashes to webtransport multiaddr because we aren't listening on webtransport")
|
||||
continue
|
||||
}
|
||||
addrs[i] = addrWithCerthash
|
||||
}
|
||||
}
|
||||
return addrs
|
||||
|
@ -945,17 +945,17 @@ func inferWebtransportAddrsFromQuic(in []ma.Multiaddr) []ma.Multiaddr {
|
|||
// Remove certhashes
|
||||
addr, _ = ma.SplitLast(addr)
|
||||
}
|
||||
webtransportAddrs[addr.String()] = struct{}{}
|
||||
webtransportAddrs[string(addr.Bytes())] = struct{}{}
|
||||
// Remove webtransport component, now it's a multiaddr that ends in /quic-v1
|
||||
addr, _ = ma.SplitLast(addr)
|
||||
}
|
||||
|
||||
if _, lastComponent := ma.SplitLast(addr); lastComponent.Protocol().Code == ma.P_QUIC_V1 {
|
||||
addrStr := addr.String()
|
||||
if _, ok := quicOrWebtransportAddrs[addrStr]; ok {
|
||||
bytes := addr.Bytes()
|
||||
if _, ok := quicOrWebtransportAddrs[string(bytes)]; ok {
|
||||
foundSameListeningAddr = true
|
||||
} else {
|
||||
quicOrWebtransportAddrs[addrStr] = struct{}{}
|
||||
quicOrWebtransportAddrs[string(bytes)] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -977,7 +977,7 @@ func inferWebtransportAddrsFromQuic(in []ma.Multiaddr) []ma.Multiaddr {
|
|||
if _, lastComponent := ma.SplitLast(addr); lastComponent.Protocol().Code == ma.P_QUIC_V1 {
|
||||
// Convert quic to webtransport
|
||||
addr = addr.Encapsulate(wtComponent)
|
||||
if _, ok := webtransportAddrs[addr.String()]; ok {
|
||||
if _, ok := webtransportAddrs[string(addr.Bytes())]; ok {
|
||||
// We already have this address
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -2,5 +2,5 @@
|
|||
|
||||
package basichost
|
||||
|
||||
//go:generate sh -c "go run github.com/golang/mock/mockgen -build_flags=\"-tags=gomock\" -package basichost -destination mock_nat_test.go github.com/libp2p/go-libp2p/p2p/host/basic NAT"
|
||||
//go:generate sh -c "go run go.uber.org/mock/mockgen -build_flags=\"-tags=gomock\" -package basichost -destination mock_nat_test.go github.com/libp2p/go-libp2p/p2p/host/basic NAT"
|
||||
type NAT nat
|
||||
|
|
|
@ -210,7 +210,7 @@ func (bh *BlankHost) newStreamHandler(s network.Stream) {
|
|||
|
||||
s.SetProtocol(protoID)
|
||||
|
||||
go handle(protoID, s)
|
||||
handle(protoID, s)
|
||||
}
|
||||
|
||||
// TODO: i'm not sure this really needs to be here
|
||||
|
|
|
@ -541,7 +541,7 @@ works best.
|
|||
|
||||
## Examples
|
||||
|
||||
Here we consider some concrete examples that can ellucidate the abstract
|
||||
Here we consider some concrete examples that can elucidate the abstract
|
||||
design as described so far.
|
||||
|
||||
### Stream Lifetime
|
||||
|
@ -578,7 +578,7 @@ More specifically the following constraints apply:
|
|||
- the peer scope, where the limits for the peer at the other end of the stream
|
||||
apply.
|
||||
- the service scope, where the limits of the specific service owning the stream apply.
|
||||
- the protcol scope, where the limits of the specific protocol for the stream apply.
|
||||
- the protocol scope, where the limits of the specific protocol for the stream apply.
|
||||
|
||||
|
||||
The resource transfer that happens in the `SetProtocol` and `SetService`
|
||||
|
|
|
@ -145,3 +145,7 @@ func (r *resourceManager) Stat() (result ResourceManagerStat) {
|
|||
|
||||
return result
|
||||
}
|
||||
|
||||
func (r *resourceManager) GetConnLimit() int {
|
||||
return r.limits.GetConnLimits().GetConnTotalLimit()
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@ package metricshelper
|
|||
|
||||
import ma "github.com/multiformats/go-multiaddr"
|
||||
|
||||
var transports = [...]int{ma.P_CIRCUIT, ma.P_WEBRTC, ma.P_WEBTRANSPORT, ma.P_QUIC, ma.P_QUIC_V1, ma.P_WSS, ma.P_WS, ma.P_TCP}
|
||||
var transports = [...]int{ma.P_CIRCUIT, ma.P_WEBRTC, ma.P_WEBRTC_DIRECT, ma.P_WEBTRANSPORT, ma.P_QUIC, ma.P_QUIC_V1, ma.P_WSS, ma.P_WS, ma.P_TCP}
|
||||
|
||||
func GetTransport(a ma.Multiaddr) string {
|
||||
for _, t := range transports {
|
||||
|
|
|
@ -2,6 +2,7 @@ package connmgr
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
@ -239,6 +240,17 @@ func (cm *BasicConnMgr) IsProtected(id peer.ID, tag string) (protected bool) {
|
|||
return protected
|
||||
}
|
||||
|
||||
func (cm *BasicConnMgr) CheckLimit(systemLimit connmgr.GetConnLimiter) error {
|
||||
if cm.cfg.highWater > systemLimit.GetConnLimit() {
|
||||
return fmt.Errorf(
|
||||
"conn manager high watermark limit: %d, exceeds the system connection limit of: %d",
|
||||
cm.cfg.highWater,
|
||||
systemLimit.GetConnLimit(),
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// peerInfo stores metadata for a given peer.
|
||||
type peerInfo struct {
|
||||
id peer.ID
|
||||
|
|
|
@ -320,7 +320,7 @@ func (t *decayingTag) Bump(p peer.ID, delta int) error {
|
|||
default:
|
||||
return fmt.Errorf(
|
||||
"unable to bump decaying tag for peer %s, tag %s, delta %d; queue full (len=%d)",
|
||||
p.Pretty(), t.name, delta, len(t.trkr.bumpTagCh))
|
||||
p, t.name, delta, len(t.trkr.bumpTagCh))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -337,7 +337,7 @@ func (t *decayingTag) Remove(p peer.ID) error {
|
|||
default:
|
||||
return fmt.Errorf(
|
||||
"unable to remove decaying tag for peer %s, tag %s; queue full (len=%d)",
|
||||
p.Pretty(), t.name, len(t.trkr.removeTagCh))
|
||||
p, t.name, len(t.trkr.removeTagCh))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,39 +0,0 @@
|
|||
package swarm
|
||||
|
||||
import (
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
)
|
||||
|
||||
// http://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml
|
||||
var lowTimeoutFilters = ma.NewFilters()
|
||||
|
||||
func init() {
|
||||
for _, p := range []string{
|
||||
"/ip4/10.0.0.0/ipcidr/8",
|
||||
"/ip4/100.64.0.0/ipcidr/10",
|
||||
"/ip4/169.254.0.0/ipcidr/16",
|
||||
"/ip4/172.16.0.0/ipcidr/12",
|
||||
"/ip4/192.0.0.0/ipcidr/24",
|
||||
"/ip4/192.0.0.0/ipcidr/29",
|
||||
"/ip4/192.0.0.8/ipcidr/32",
|
||||
"/ip4/192.0.0.170/ipcidr/32",
|
||||
"/ip4/192.0.0.171/ipcidr/32",
|
||||
"/ip4/192.0.2.0/ipcidr/24",
|
||||
"/ip4/192.168.0.0/ipcidr/16",
|
||||
"/ip4/198.18.0.0/ipcidr/15",
|
||||
"/ip4/198.51.100.0/ipcidr/24",
|
||||
"/ip4/203.0.113.0/ipcidr/24",
|
||||
"/ip4/240.0.0.0/ipcidr/4",
|
||||
} {
|
||||
f, err := ma.NewMultiaddr(p)
|
||||
if err != nil {
|
||||
panic("error in lowTimeoutFilters init: " + err.Error())
|
||||
}
|
||||
ipnet, err := manet.MultiaddrToIPNet(f)
|
||||
if err != nil {
|
||||
panic("error in lowTimeoutFilters init: " + err.Error())
|
||||
}
|
||||
lowTimeoutFilters.AddFilter(*ipnet, ma.ActionDeny)
|
||||
}
|
||||
}
|
|
@ -178,7 +178,7 @@ type blackHoleDetector struct {
|
|||
}
|
||||
|
||||
// FilterAddrs filters the peer's addresses removing black holed addresses
|
||||
func (d *blackHoleDetector) FilterAddrs(addrs []ma.Multiaddr) []ma.Multiaddr {
|
||||
func (d *blackHoleDetector) FilterAddrs(addrs []ma.Multiaddr) (valid []ma.Multiaddr, blackHoled []ma.Multiaddr) {
|
||||
hasUDP, hasIPv6 := false, false
|
||||
for _, a := range addrs {
|
||||
if !manet.IsPublicAddr(a) {
|
||||
|
@ -202,6 +202,7 @@ func (d *blackHoleDetector) FilterAddrs(addrs []ma.Multiaddr) []ma.Multiaddr {
|
|||
ipv6Res = d.ipv6.HandleRequest()
|
||||
}
|
||||
|
||||
blackHoled = make([]ma.Multiaddr, 0, len(addrs))
|
||||
return ma.FilterAddrs(
|
||||
addrs,
|
||||
func(a ma.Multiaddr) bool {
|
||||
|
@ -218,14 +219,16 @@ func (d *blackHoleDetector) FilterAddrs(addrs []ma.Multiaddr) []ma.Multiaddr {
|
|||
}
|
||||
|
||||
if udpRes == blackHoleResultBlocked && isProtocolAddr(a, ma.P_UDP) {
|
||||
blackHoled = append(blackHoled, a)
|
||||
return false
|
||||
}
|
||||
if ipv6Res == blackHoleResultBlocked && isProtocolAddr(a, ma.P_IP6) {
|
||||
blackHoled = append(blackHoled, a)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
},
|
||||
)
|
||||
), blackHoled
|
||||
}
|
||||
|
||||
// RecordResult updates the state of the relevant `blackHoleFilter`s for addr
|
||||
|
|
|
@ -30,10 +30,7 @@ func (e *DialError) recordErr(addr ma.Multiaddr, err error) {
|
|||
e.Skipped++
|
||||
return
|
||||
}
|
||||
e.DialErrors = append(e.DialErrors, TransportError{
|
||||
Address: addr,
|
||||
Cause: err,
|
||||
})
|
||||
e.DialErrors = append(e.DialErrors, TransportError{Address: addr, Cause: err})
|
||||
}
|
||||
|
||||
func (e *DialError) Error() string {
|
||||
|
@ -51,9 +48,19 @@ func (e *DialError) Error() string {
|
|||
return builder.String()
|
||||
}
|
||||
|
||||
// Unwrap implements https://godoc.org/golang.org/x/xerrors#Wrapper.
|
||||
func (e *DialError) Unwrap() error {
|
||||
return e.Cause
|
||||
func (e *DialError) Unwrap() []error {
|
||||
if e == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
errs := make([]error, len(e.DialErrors)+1)
|
||||
if e.Cause != nil {
|
||||
errs = append(errs, e.Cause)
|
||||
}
|
||||
for i := 0; i < len(e.DialErrors); i++ {
|
||||
errs = append(errs, &e.DialErrors[i])
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
var _ error = (*DialError)(nil)
|
||||
|
@ -68,4 +75,8 @@ func (e *TransportError) Error() string {
|
|||
return fmt.Sprintf("failed to dial %s: %s", e.Address, e.Cause)
|
||||
}
|
||||
|
||||
func (e *TransportError) Unwrap() error {
|
||||
return e.Cause
|
||||
}
|
||||
|
||||
var _ error = (*TransportError)(nil)
|
||||
|
|
|
@ -58,8 +58,19 @@ func NoDelayDialRanker(addrs []ma.Multiaddr) []network.AddrDelay {
|
|||
// 3. If a QUIC or WebTransport address is present, TCP addresses dials are delayed relative to the last QUIC dial:
|
||||
// We prefer to end up with a QUIC connection. For public addresses, the delay introduced is 250ms (PublicTCPDelay),
|
||||
// and for private addresses 30ms (PrivateTCPDelay).
|
||||
// 4. For the TCP addresses we follow a strategy similar to QUIC with an optimisation for handling the long TCP
|
||||
// handshake time described in 6. If both IPv6 TCP and IPv4 TCP addresses are present, we do a Happy Eyeballs
|
||||
// style ranking. First dial the IPv6 TCP address with the lowest port. After this, dial the IPv4 TCP address
|
||||
// with the lowest port delayed by 250ms (PublicTCPDelay) for public addresses, and 30ms (PrivateTCPDelay)
|
||||
// for local addresses. After this we dial all the rest of the addresses delayed by 250ms (PublicTCPDelay) for
|
||||
// public addresses, and 30ms (PrivateTCPDelay) for local addresses.
|
||||
// 5. If only one of TCP IPv6 or TCP IPv4 addresses are present, dial the TCP address with the lowest port
|
||||
// first. After this we dial the rest of the TCP addresses delayed by 250ms (PublicTCPDelay) for public
|
||||
// addresses, and 30ms (PrivateTCPDelay) for local addresses.
|
||||
// 6. When a TCP socket is connected and awaiting security and muxer upgrade, we stop new dials for 2*PrivateTCPDelay
|
||||
// to allow for the upgrade to complete.
|
||||
//
|
||||
// We dial lowest ports first for QUIC addresses as they are more likely to be the listen port.
|
||||
// We dial lowest ports first as they are more likely to be the listen port.
|
||||
func DefaultDialRanker(addrs []ma.Multiaddr) []network.AddrDelay {
|
||||
relay, addrs := filterAddrs(addrs, isRelayAddr)
|
||||
pvt, addrs := filterAddrs(addrs, manet.IsPrivateAddr)
|
||||
|
@ -88,22 +99,57 @@ func DefaultDialRanker(addrs []ma.Multiaddr) []network.AddrDelay {
|
|||
// addresses relative to direct addresses.
|
||||
func getAddrDelay(addrs []ma.Multiaddr, tcpDelay time.Duration, quicDelay time.Duration,
|
||||
offset time.Duration) []network.AddrDelay {
|
||||
if len(addrs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
sort.Slice(addrs, func(i, j int) bool { return score(addrs[i]) < score(addrs[j]) })
|
||||
|
||||
// If the first address is (QUIC, IPv6), make the second address (QUIC, IPv4).
|
||||
happyEyeballs := false
|
||||
if len(addrs) > 0 {
|
||||
// addrs is now sorted by (Transport, IPVersion). Reorder addrs for happy eyeballs dialing.
|
||||
// For QUIC and TCP, if we have both IPv6 and IPv4 addresses, move the
|
||||
// highest priority IPv4 address to the second position.
|
||||
happyEyeballsQUIC := false
|
||||
happyEyeballsTCP := false
|
||||
// tcpStartIdx is the index of the first TCP Address
|
||||
var tcpStartIdx int
|
||||
{
|
||||
i := 0
|
||||
// If the first QUIC address is IPv6 move the first QUIC IPv4 address to second position
|
||||
if isQUICAddr(addrs[0]) && isProtocolAddr(addrs[0], ma.P_IP6) {
|
||||
for i := 1; i < len(addrs); i++ {
|
||||
if isQUICAddr(addrs[i]) && isProtocolAddr(addrs[i], ma.P_IP4) {
|
||||
// make IPv4 address the second element
|
||||
if i > 1 {
|
||||
a := addrs[i]
|
||||
copy(addrs[2:], addrs[1:i])
|
||||
for j := 1; j < len(addrs); j++ {
|
||||
if isQUICAddr(addrs[j]) && isProtocolAddr(addrs[j], ma.P_IP4) {
|
||||
// The first IPv4 address is at position j
|
||||
// Move the jth element at position 1 shifting the affected elements
|
||||
if j > 1 {
|
||||
a := addrs[j]
|
||||
copy(addrs[2:], addrs[1:j])
|
||||
addrs[1] = a
|
||||
}
|
||||
happyEyeballs = true
|
||||
happyEyeballsQUIC = true
|
||||
i = j + 1
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for tcpStartIdx = i; tcpStartIdx < len(addrs); tcpStartIdx++ {
|
||||
if isProtocolAddr(addrs[tcpStartIdx], ma.P_TCP) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// If the first TCP address is IPv6 move the first TCP IPv4 address to second position
|
||||
if tcpStartIdx < len(addrs) && isProtocolAddr(addrs[tcpStartIdx], ma.P_IP6) {
|
||||
for j := tcpStartIdx + 1; j < len(addrs); j++ {
|
||||
if isProtocolAddr(addrs[j], ma.P_TCP) && isProtocolAddr(addrs[j], ma.P_IP4) {
|
||||
// First TCP IPv4 address is at position j, move it to position tcpStartIdx+1
|
||||
// which is the second priority TCP address
|
||||
if j > tcpStartIdx+1 {
|
||||
a := addrs[j]
|
||||
copy(addrs[tcpStartIdx+2:], addrs[tcpStartIdx+1:j])
|
||||
addrs[tcpStartIdx+1] = a
|
||||
}
|
||||
happyEyeballsTCP = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -111,25 +157,42 @@ func getAddrDelay(addrs []ma.Multiaddr, tcpDelay time.Duration, quicDelay time.D
|
|||
}
|
||||
|
||||
res := make([]network.AddrDelay, 0, len(addrs))
|
||||
|
||||
var totalTCPDelay time.Duration
|
||||
var tcpFirstDialDelay time.Duration
|
||||
for i, addr := range addrs {
|
||||
var delay time.Duration
|
||||
switch {
|
||||
case isQUICAddr(addr):
|
||||
// For QUIC addresses we dial an IPv6 address, then after quicDelay an IPv4
|
||||
// address, then after quicDelay we dial rest of the addresses.
|
||||
// We dial an IPv6 address, then after quicDelay an IPv4
|
||||
// address, then after a further quicDelay we dial the rest of the addresses.
|
||||
if i == 1 {
|
||||
delay = quicDelay
|
||||
}
|
||||
if i > 1 && happyEyeballs {
|
||||
if i > 1 {
|
||||
// If we have happy eyeballs for QUIC, dials after the second position
|
||||
// will be delayed by 2*quicDelay
|
||||
if happyEyeballsQUIC {
|
||||
delay = 2 * quicDelay
|
||||
} else if i > 1 {
|
||||
} else {
|
||||
delay = quicDelay
|
||||
}
|
||||
totalTCPDelay = delay + tcpDelay
|
||||
}
|
||||
tcpFirstDialDelay = delay + tcpDelay
|
||||
case isProtocolAddr(addr, ma.P_TCP):
|
||||
delay = totalTCPDelay
|
||||
// We dial an IPv6 address, then after tcpDelay an IPv4
|
||||
// address, then after a further tcpDelay we dial the rest of the addresses.
|
||||
if i == tcpStartIdx+1 {
|
||||
delay = tcpDelay
|
||||
}
|
||||
if i > tcpStartIdx+1 {
|
||||
// If we have happy eyeballs for TCP, dials after the second position
|
||||
// will be delayed by 2*tcpDelay
|
||||
if happyEyeballsTCP {
|
||||
delay = 2 * tcpDelay
|
||||
} else {
|
||||
delay = tcpDelay
|
||||
}
|
||||
}
|
||||
delay += tcpFirstDialDelay
|
||||
}
|
||||
res = append(res, network.AddrDelay{Addr: addr, Delay: offset + delay})
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package swarm
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
|
@ -11,6 +12,9 @@ import (
|
|||
// dialWorkerFunc is used by dialSync to spawn a new dial worker
|
||||
type dialWorkerFunc func(peer.ID, <-chan dialRequest)
|
||||
|
||||
// errConcurrentDialSuccessful is used to signal that a concurrent dial succeeded
|
||||
var errConcurrentDialSuccessful = errors.New("concurrent dial successful")
|
||||
|
||||
// newDialSync constructs a new dialSync
|
||||
func newDialSync(worker dialWorkerFunc) *dialSync {
|
||||
return &dialSync{
|
||||
|
@ -31,16 +35,11 @@ type activeDial struct {
|
|||
refCnt int
|
||||
|
||||
ctx context.Context
|
||||
cancel func()
|
||||
cancelCause func(error)
|
||||
|
||||
reqch chan dialRequest
|
||||
}
|
||||
|
||||
func (ad *activeDial) close() {
|
||||
ad.cancel()
|
||||
close(ad.reqch)
|
||||
}
|
||||
|
||||
func (ad *activeDial) dial(ctx context.Context) (*Conn, error) {
|
||||
dialCtx := ad.ctx
|
||||
|
||||
|
@ -74,10 +73,10 @@ func (ds *dialSync) getActiveDial(p peer.ID) (*activeDial, error) {
|
|||
if !ok {
|
||||
// This code intentionally uses the background context. Otherwise, if the first call
|
||||
// to Dial is canceled, subsequent dial calls will also be canceled.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancelCause(context.Background())
|
||||
actd = &activeDial{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cancelCause: cancel,
|
||||
reqch: make(chan dialRequest),
|
||||
}
|
||||
go ds.dialWorker(p, actd.reqch)
|
||||
|
@ -96,14 +95,21 @@ func (ds *dialSync) Dial(ctx context.Context, p peer.ID) (*Conn, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
conn, err := ad.dial(ctx)
|
||||
|
||||
ds.mutex.Lock()
|
||||
defer ds.mutex.Unlock()
|
||||
|
||||
ad.refCnt--
|
||||
if ad.refCnt == 0 {
|
||||
ad.close()
|
||||
if err == nil {
|
||||
ad.cancelCause(errConcurrentDialSuccessful)
|
||||
} else {
|
||||
ad.cancelCause(err)
|
||||
}
|
||||
close(ad.reqch)
|
||||
delete(ds.dials, p)
|
||||
}
|
||||
}()
|
||||
return ad.dial(ctx)
|
||||
|
||||
return conn, err
|
||||
}
|
||||
|
|
|
@ -8,15 +8,12 @@ import (
|
|||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
tpt "github.com/libp2p/go-libp2p/core/transport"
|
||||
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
)
|
||||
|
||||
// /////////////////////////////////////////////////////////////////////////////////
|
||||
// lo and behold, The Dialer
|
||||
// TODO explain how all this works
|
||||
// ////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// dialRequest is structure used to request dials to the peer associated with a
|
||||
// worker loop
|
||||
type dialRequest struct {
|
||||
|
@ -61,15 +58,14 @@ type addrDial struct {
|
|||
conn *Conn
|
||||
// err is the err on dialing the address
|
||||
err error
|
||||
// requests is the list of pendRequests interested in this dial
|
||||
// the value in the slice is the request number assigned to this request by the dialWorker
|
||||
requests []int
|
||||
// dialed indicates whether we have triggered the dial to the address
|
||||
dialed bool
|
||||
// createdAt is the time this struct was created
|
||||
createdAt time.Time
|
||||
// dialRankingDelay is the delay in dialing this address introduced by the ranking logic
|
||||
dialRankingDelay time.Duration
|
||||
// expectedTCPUpgradeTime is the expected time by which security upgrade will complete
|
||||
expectedTCPUpgradeTime time.Time
|
||||
}
|
||||
|
||||
// dialWorker synchronises concurrent dials to a peer. It ensures that we make at most one dial to a
|
||||
|
@ -79,17 +75,13 @@ type dialWorker struct {
|
|||
peer peer.ID
|
||||
// reqch is used to send dial requests to the worker. close reqch to end the worker loop
|
||||
reqch <-chan dialRequest
|
||||
// reqno is the request number used to track different dialRequests for a peer.
|
||||
// Each incoming request is assigned a reqno. This reqno is used in pendingRequests and in
|
||||
// addrDial objects in trackedDials to track this request
|
||||
reqno int
|
||||
// pendingRequests maps reqno to the pendRequest object for a dialRequest
|
||||
pendingRequests map[int]*pendRequest
|
||||
// trackedDials tracks dials to the peers addresses. An entry here is used to ensure that
|
||||
// pendingRequests is the set of pendingRequests
|
||||
pendingRequests map[*pendRequest]struct{}
|
||||
// trackedDials tracks dials to the peer's addresses. An entry here is used to ensure that
|
||||
// we dial an address at most once
|
||||
trackedDials map[string]*addrDial
|
||||
// resch is used to receive response for dials to the peers addresses.
|
||||
resch chan dialResult
|
||||
resch chan tpt.DialUpdate
|
||||
|
||||
connected bool // true when a connection has been successfully established
|
||||
|
||||
|
@ -106,9 +98,9 @@ func newDialWorker(s *Swarm, p peer.ID, reqch <-chan dialRequest, cl Clock) *dia
|
|||
s: s,
|
||||
peer: p,
|
||||
reqch: reqch,
|
||||
pendingRequests: make(map[int]*pendRequest),
|
||||
pendingRequests: make(map[*pendRequest]struct{}),
|
||||
trackedDials: make(map[string]*addrDial),
|
||||
resch: make(chan dialResult),
|
||||
resch: make(chan tpt.DialUpdate),
|
||||
cl: cl,
|
||||
}
|
||||
}
|
||||
|
@ -128,6 +120,8 @@ func (w *dialWorker) loop() {
|
|||
startTime := w.cl.Now()
|
||||
// dialTimer is the dialTimer used to trigger dials
|
||||
dialTimer := w.cl.InstantTimer(startTime.Add(math.MaxInt64))
|
||||
defer dialTimer.Stop()
|
||||
|
||||
timerRunning := true
|
||||
// scheduleNextDial updates timer for triggering the next dial
|
||||
scheduleNextDial := func() {
|
||||
|
@ -135,12 +129,18 @@ func (w *dialWorker) loop() {
|
|||
<-dialTimer.Ch()
|
||||
}
|
||||
timerRunning = false
|
||||
if dq.len() > 0 {
|
||||
if dq.Len() > 0 {
|
||||
if dialsInFlight == 0 && !w.connected {
|
||||
// if there are no dials in flight, trigger the next dials immediately
|
||||
dialTimer.Reset(startTime)
|
||||
} else {
|
||||
dialTimer.Reset(startTime.Add(dq.top().Delay))
|
||||
resetTime := startTime.Add(dq.top().Delay)
|
||||
for _, ad := range w.trackedDials {
|
||||
if !ad.expectedTCPUpgradeTime.IsZero() && ad.expectedTCPUpgradeTime.After(resetTime) {
|
||||
resetTime = ad.expectedTCPUpgradeTime
|
||||
}
|
||||
}
|
||||
dialTimer.Reset(resetTime)
|
||||
}
|
||||
timerRunning = true
|
||||
}
|
||||
|
@ -171,15 +171,20 @@ loop:
|
|||
// Enqueue the peer's addresses relevant to this request in dq and
|
||||
// track dials to the addresses relevant to this request.
|
||||
|
||||
c, err := w.s.bestAcceptableConnToPeer(req.ctx, w.peer)
|
||||
if c != nil || err != nil {
|
||||
req.resch <- dialResponse{conn: c, err: err}
|
||||
c := w.s.bestAcceptableConnToPeer(req.ctx, w.peer)
|
||||
if c != nil {
|
||||
req.resch <- dialResponse{conn: c}
|
||||
continue loop
|
||||
}
|
||||
|
||||
addrs, err := w.s.addrsForDial(req.ctx, w.peer)
|
||||
addrs, addrErrs, err := w.s.addrsForDial(req.ctx, w.peer)
|
||||
if err != nil {
|
||||
req.resch <- dialResponse{err: err}
|
||||
req.resch <- dialResponse{
|
||||
err: &DialError{
|
||||
Peer: w.peer,
|
||||
DialErrors: addrErrs,
|
||||
Cause: err,
|
||||
}}
|
||||
continue loop
|
||||
}
|
||||
|
||||
|
@ -191,8 +196,8 @@ loop:
|
|||
// create the pending request object
|
||||
pr := &pendRequest{
|
||||
req: req,
|
||||
err: &DialError{Peer: w.peer},
|
||||
addrs: make(map[string]struct{}, len(addrRanking)),
|
||||
err: &DialError{Peer: w.peer, DialErrors: addrErrs},
|
||||
}
|
||||
for _, adelay := range addrRanking {
|
||||
pr.addrs[string(adelay.Addr.Bytes())] = struct{}{}
|
||||
|
@ -233,14 +238,13 @@ loop:
|
|||
|
||||
if len(todial) == 0 && len(tojoin) == 0 {
|
||||
// all request applicable addrs have been dialed, we must have errored
|
||||
pr.err.Cause = ErrAllDialsFailed
|
||||
req.resch <- dialResponse{err: pr.err}
|
||||
continue loop
|
||||
}
|
||||
|
||||
// The request has some pending or new dials. We assign this request a request number.
|
||||
// This value of w.reqno is used to track this request in all the structures
|
||||
w.reqno++
|
||||
w.pendingRequests[w.reqno] = pr
|
||||
// The request has some pending or new dials
|
||||
w.pendingRequests[pr] = struct{}{}
|
||||
|
||||
for _, ad := range tojoin {
|
||||
if !ad.dialed {
|
||||
|
@ -258,7 +262,6 @@ loop:
|
|||
}
|
||||
}
|
||||
// add the request to the addrDial
|
||||
ad.requests = append(ad.requests, w.reqno)
|
||||
}
|
||||
|
||||
if len(todial) > 0 {
|
||||
|
@ -268,7 +271,6 @@ loop:
|
|||
w.trackedDials[string(a.Bytes())] = &addrDial{
|
||||
addr: a,
|
||||
ctx: req.ctx,
|
||||
requests: []int{w.reqno},
|
||||
createdAt: now,
|
||||
}
|
||||
dq.Add(network.AddrDelay{Addr: a, Delay: addrDelay[string(a.Bytes())]})
|
||||
|
@ -313,16 +315,29 @@ loop:
|
|||
// Update all requests waiting on this address. On success, complete the request.
|
||||
// On error, record the error
|
||||
|
||||
dialsInFlight--
|
||||
ad, ok := w.trackedDials[string(res.Addr.Bytes())]
|
||||
if !ok {
|
||||
log.Errorf("SWARM BUG: no entry for address %s in trackedDials", res.Addr)
|
||||
if res.Conn != nil {
|
||||
res.Conn.Close()
|
||||
}
|
||||
dialsInFlight--
|
||||
continue
|
||||
}
|
||||
|
||||
// TCP Connection has been established. Wait for connection upgrade on this address
|
||||
// before making new dials.
|
||||
if res.Kind == tpt.UpdateKindHandshakeProgressed {
|
||||
// Only wait for public addresses to complete dialing since private dials
|
||||
// are quick any way
|
||||
if manet.IsPublicAddr(res.Addr) {
|
||||
ad.expectedTCPUpgradeTime = w.cl.Now().Add(PublicTCPDelay)
|
||||
}
|
||||
scheduleNextDial()
|
||||
continue
|
||||
}
|
||||
dialsInFlight--
|
||||
ad.expectedTCPUpgradeTime = time.Time{}
|
||||
if res.Conn != nil {
|
||||
// we got a connection, add it to the swarm
|
||||
conn, err := w.s.addConn(res.Conn, network.DirOutbound)
|
||||
|
@ -333,20 +348,14 @@ loop:
|
|||
continue loop
|
||||
}
|
||||
|
||||
// request succeeded, respond to all pending requests
|
||||
for _, reqno := range ad.requests {
|
||||
pr, ok := w.pendingRequests[reqno]
|
||||
if !ok {
|
||||
// some other dial for this request succeeded before this one
|
||||
continue
|
||||
}
|
||||
for pr := range w.pendingRequests {
|
||||
if _, ok := pr.addrs[string(ad.addr.Bytes())]; ok {
|
||||
pr.req.resch <- dialResponse{conn: conn}
|
||||
delete(w.pendingRequests, reqno)
|
||||
delete(w.pendingRequests, pr)
|
||||
}
|
||||
}
|
||||
|
||||
ad.conn = conn
|
||||
ad.requests = nil
|
||||
|
||||
if !w.connected {
|
||||
w.connected = true
|
||||
if w.s.metricsTracer != nil {
|
||||
|
@ -380,32 +389,26 @@ loop:
|
|||
// dispatches an error to a specific addr dial
|
||||
func (w *dialWorker) dispatchError(ad *addrDial, err error) {
|
||||
ad.err = err
|
||||
for _, reqno := range ad.requests {
|
||||
pr, ok := w.pendingRequests[reqno]
|
||||
if !ok {
|
||||
// some other dial for this request succeeded before this one
|
||||
continue
|
||||
}
|
||||
|
||||
for pr := range w.pendingRequests {
|
||||
// accumulate the error
|
||||
if _, ok := pr.addrs[string(ad.addr.Bytes())]; ok {
|
||||
pr.err.recordErr(ad.addr, err)
|
||||
|
||||
delete(pr.addrs, string(ad.addr.Bytes()))
|
||||
if len(pr.addrs) == 0 {
|
||||
// all addrs have erred, dispatch dial error
|
||||
// but first do a last one check in case an acceptable connection has landed from
|
||||
// a simultaneous dial that started later and added new acceptable addrs
|
||||
c, _ := w.s.bestAcceptableConnToPeer(pr.req.ctx, w.peer)
|
||||
c := w.s.bestAcceptableConnToPeer(pr.req.ctx, w.peer)
|
||||
if c != nil {
|
||||
pr.req.resch <- dialResponse{conn: c}
|
||||
} else {
|
||||
pr.err.Cause = ErrAllDialsFailed
|
||||
pr.req.resch <- dialResponse{err: pr.err}
|
||||
}
|
||||
delete(w.pendingRequests, reqno)
|
||||
delete(w.pendingRequests, pr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ad.requests = nil
|
||||
|
||||
// if it was a backoff, clear the address dial so that it doesn't inhibit new dial requests.
|
||||
// this is necessary to support active listen scenarios, where a new dial comes in while
|
||||
|
@ -439,7 +442,7 @@ func newDialQueue() *dialQueue {
|
|||
// Add adds adelay to the queue. If another element exists in the queue with
|
||||
// the same address, it replaces that element.
|
||||
func (dq *dialQueue) Add(adelay network.AddrDelay) {
|
||||
for i := 0; i < dq.len(); i++ {
|
||||
for i := 0; i < dq.Len(); i++ {
|
||||
if dq.q[i].Addr.Equal(adelay.Addr) {
|
||||
if dq.q[i].Delay == adelay.Delay {
|
||||
// existing element is the same. nothing to do
|
||||
|
@ -452,7 +455,7 @@ func (dq *dialQueue) Add(adelay network.AddrDelay) {
|
|||
}
|
||||
}
|
||||
|
||||
for i := 0; i < dq.len(); i++ {
|
||||
for i := 0; i < dq.Len(); i++ {
|
||||
if dq.q[i].Delay > adelay.Delay {
|
||||
dq.q = append(dq.q, network.AddrDelay{}) // extend the slice
|
||||
copy(dq.q[i+1:], dq.q[i:])
|
||||
|
@ -465,13 +468,13 @@ func (dq *dialQueue) Add(adelay network.AddrDelay) {
|
|||
|
||||
// NextBatch returns all the elements in the queue with the highest priority
|
||||
func (dq *dialQueue) NextBatch() []network.AddrDelay {
|
||||
if dq.len() == 0 {
|
||||
if dq.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// i is the index of the second highest priority element
|
||||
var i int
|
||||
for i = 0; i < dq.len(); i++ {
|
||||
for i = 0; i < dq.Len(); i++ {
|
||||
if dq.q[i].Delay != dq.q[0].Delay {
|
||||
break
|
||||
}
|
||||
|
@ -486,7 +489,7 @@ func (dq *dialQueue) top() network.AddrDelay {
|
|||
return dq.q[0]
|
||||
}
|
||||
|
||||
// len returns the number of elements in the queue
|
||||
func (dq *dialQueue) len() int {
|
||||
// Len returns the number of elements in the queue
|
||||
func (dq *dialQueue) Len() int {
|
||||
return len(dq.q)
|
||||
}
|
||||
|
|
|
@ -13,17 +13,11 @@ import (
|
|||
ma "github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
type dialResult struct {
|
||||
Conn transport.CapableConn
|
||||
Addr ma.Multiaddr
|
||||
Err error
|
||||
}
|
||||
|
||||
type dialJob struct {
|
||||
addr ma.Multiaddr
|
||||
peer peer.ID
|
||||
ctx context.Context
|
||||
resp chan dialResult
|
||||
resp chan transport.DialUpdate
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
|
@ -45,7 +39,7 @@ type dialLimiter struct {
|
|||
waitingOnPeerLimit map[peer.ID][]*dialJob
|
||||
}
|
||||
|
||||
type dialfunc func(context.Context, peer.ID, ma.Multiaddr) (transport.CapableConn, error)
|
||||
type dialfunc func(context.Context, peer.ID, ma.Multiaddr, chan<- transport.DialUpdate) (transport.CapableConn, error)
|
||||
|
||||
func newDialLimiter(df dialfunc) *dialLimiter {
|
||||
fd := ConcurrentFdDials
|
||||
|
@ -216,9 +210,13 @@ func (dl *dialLimiter) executeDial(j *dialJob) {
|
|||
dctx, cancel := context.WithTimeout(j.ctx, j.timeout)
|
||||
defer cancel()
|
||||
|
||||
con, err := dl.dialFunc(dctx, j.peer, j.addr)
|
||||
con, err := dl.dialFunc(dctx, j.peer, j.addr, j.resp)
|
||||
kind := transport.UpdateKindDialSuccessful
|
||||
if err != nil {
|
||||
kind = transport.UpdateKindDialFailed
|
||||
}
|
||||
select {
|
||||
case j.resp <- dialResult{Conn: con, Addr: j.addr, Err: err}:
|
||||
case j.resp <- transport.DialUpdate{Kind: kind, Conn: con, Addr: j.addr, Err: err}:
|
||||
case <-j.ctx.Done():
|
||||
if con != nil {
|
||||
con.Close()
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/peerstore"
|
||||
"github.com/libp2p/go-libp2p/core/transport"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
|
@ -136,8 +137,8 @@ func WithIPv6BlackHoleConfig(enabled bool, n, min int) Option {
|
|||
// communication. The Chan sends/receives Messages, which note the
|
||||
// destination or source Peer.
|
||||
type Swarm struct {
|
||||
nextConnID uint64 // guarded by atomic
|
||||
nextStreamID uint64 // guarded by atomic
|
||||
nextConnID atomic.Uint64
|
||||
nextStreamID atomic.Uint64
|
||||
|
||||
// Close refcount. This allows us to fully wait for the swarm to be torn
|
||||
// down before continuing.
|
||||
|
@ -172,6 +173,11 @@ type Swarm struct {
|
|||
m map[network.Notifiee]struct{}
|
||||
}
|
||||
|
||||
directConnNotifs struct {
|
||||
sync.Mutex
|
||||
m map[peer.ID][]chan struct{}
|
||||
}
|
||||
|
||||
transports struct {
|
||||
sync.RWMutex
|
||||
m map[int]transport.Transport
|
||||
|
@ -231,6 +237,7 @@ func NewSwarm(local peer.ID, peers peerstore.Peerstore, eventBus event.Bus, opts
|
|||
s.listeners.m = make(map[transport.Listener]struct{})
|
||||
s.transports.m = make(map[int]transport.Transport)
|
||||
s.notifs.m = make(map[network.Notifiee]struct{})
|
||||
s.directConnNotifs.m = make(map[peer.ID][]chan struct{})
|
||||
|
||||
for _, opt := range opts {
|
||||
if err := opt(s); err != nil {
|
||||
|
@ -343,7 +350,7 @@ func (s *Swarm) addConn(tc transport.CapableConn, dir network.Direction) (*Conn,
|
|||
conn: tc,
|
||||
swarm: s,
|
||||
stat: stat,
|
||||
id: atomic.AddUint64(&s.nextConnID, 1),
|
||||
id: s.nextConnID.Add(1),
|
||||
}
|
||||
|
||||
// we ONLY check upgraded connections here so we can send them a Disconnect message.
|
||||
|
@ -353,7 +360,7 @@ func (s *Swarm) addConn(tc transport.CapableConn, dir network.Direction) (*Conn,
|
|||
// TODO Send disconnect with reason here
|
||||
err := tc.Close()
|
||||
if err != nil {
|
||||
log.Warnf("failed to close connection with peer %s and addr %s; err: %s", p.Pretty(), addr, err)
|
||||
log.Warnf("failed to close connection with peer %s and addr %s; err: %s", p, addr, err)
|
||||
}
|
||||
return nil, ErrGaterDisallowedConnection
|
||||
}
|
||||
|
@ -390,6 +397,19 @@ func (s *Swarm) addConn(tc transport.CapableConn, dir network.Direction) (*Conn,
|
|||
c.notifyLk.Lock()
|
||||
s.conns.Unlock()
|
||||
|
||||
// Notify goroutines waiting for a direct connection
|
||||
if !c.Stat().Transient {
|
||||
// Go routines interested in waiting for direct connection first acquire this lock
|
||||
// and then acquire s.conns.RLock. Do not acquire this lock before conns.Unlock to
|
||||
// prevent deadlock.
|
||||
s.directConnNotifs.Lock()
|
||||
for _, ch := range s.directConnNotifs.m[p] {
|
||||
close(ch)
|
||||
}
|
||||
delete(s.directConnNotifs.m, p)
|
||||
s.directConnNotifs.Unlock()
|
||||
}
|
||||
|
||||
// Emit event after releasing `s.conns` lock so that a consumer can still
|
||||
// use swarm methods that need the `s.conns` lock.
|
||||
if isFirstConnection {
|
||||
|
@ -429,54 +449,110 @@ func (s *Swarm) StreamHandler() network.StreamHandler {
|
|||
|
||||
// NewStream creates a new stream on any available connection to peer, dialing
|
||||
// if necessary.
|
||||
// Use network.WithUseTransient to open a stream over a transient(relayed)
|
||||
// connection.
|
||||
func (s *Swarm) NewStream(ctx context.Context, p peer.ID) (network.Stream, error) {
|
||||
log.Debugf("[%s] opening stream to peer [%s]", s.local, p)
|
||||
|
||||
// Algorithm:
|
||||
// 1. Find the best connection, otherwise, dial.
|
||||
// 2. Try opening a stream.
|
||||
// 3. If the underlying connection is, in fact, closed, close the outer
|
||||
// 2. If the best connection is transient, wait for a direct conn via conn
|
||||
// reversal or hole punching.
|
||||
// 3. Try opening a stream.
|
||||
// 4. If the underlying connection is, in fact, closed, close the outer
|
||||
// connection and try again. We do this in case we have a closed
|
||||
// connection but don't notice it until we actually try to open a
|
||||
// stream.
|
||||
//
|
||||
// Note: We only dial once.
|
||||
//
|
||||
// TODO: Try all connections even if we get an error opening a stream on
|
||||
// a non-closed connection.
|
||||
dials := 0
|
||||
numDials := 0
|
||||
for {
|
||||
// will prefer direct connections over relayed connections for opening streams
|
||||
c, err := s.bestAcceptableConnToPeer(ctx, p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c := s.bestConnToPeer(p)
|
||||
if c == nil {
|
||||
if nodial, _ := network.GetNoDial(ctx); nodial {
|
||||
return nil, network.ErrNoConn
|
||||
}
|
||||
|
||||
if dials >= DialAttempts {
|
||||
if nodial, _ := network.GetNoDial(ctx); !nodial {
|
||||
numDials++
|
||||
if numDials > DialAttempts {
|
||||
return nil, errors.New("max dial attempts exceeded")
|
||||
}
|
||||
dials++
|
||||
|
||||
var err error
|
||||
c, err = s.dialPeer(ctx, p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
return nil, network.ErrNoConn
|
||||
}
|
||||
}
|
||||
|
||||
s, err := c.NewStream(ctx)
|
||||
useTransient, _ := network.GetUseTransient(ctx)
|
||||
if !useTransient && c.Stat().Transient {
|
||||
var err error
|
||||
c, err = s.waitForDirectConn(ctx, p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
str, err := c.NewStream(ctx)
|
||||
if err != nil {
|
||||
if c.conn.IsClosed() {
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
return str, nil
|
||||
}
|
||||
}
|
||||
|
||||
// waitForDirectConn waits for a direct connection established through hole punching or connection reversal.
|
||||
func (s *Swarm) waitForDirectConn(ctx context.Context, p peer.ID) (*Conn, error) {
|
||||
s.directConnNotifs.Lock()
|
||||
c := s.bestConnToPeer(p)
|
||||
if c == nil {
|
||||
s.directConnNotifs.Unlock()
|
||||
return nil, network.ErrNoConn
|
||||
} else if !c.Stat().Transient {
|
||||
s.directConnNotifs.Unlock()
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Wait for transient connection to upgrade to a direct connection either by
|
||||
// connection reversal or hole punching.
|
||||
ch := make(chan struct{})
|
||||
s.directConnNotifs.m[p] = append(s.directConnNotifs.m[p], ch)
|
||||
s.directConnNotifs.Unlock()
|
||||
|
||||
// apply the DialPeer timeout
|
||||
ctx, cancel := context.WithTimeout(ctx, network.GetDialPeerTimeout(ctx))
|
||||
defer cancel()
|
||||
|
||||
// Wait for notification.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// Remove ourselves from the notification list
|
||||
s.directConnNotifs.Lock()
|
||||
defer s.directConnNotifs.Unlock()
|
||||
|
||||
s.directConnNotifs.m[p] = slices.DeleteFunc(
|
||||
s.directConnNotifs.m[p],
|
||||
func(c chan struct{}) bool { return c == ch },
|
||||
)
|
||||
if len(s.directConnNotifs.m[p]) == 0 {
|
||||
delete(s.directConnNotifs.m, p)
|
||||
}
|
||||
return nil, ctx.Err()
|
||||
case <-ch:
|
||||
// We do not need to remove ourselves from the list here as the notifier
|
||||
// clears the map entry
|
||||
c := s.bestConnToPeer(p)
|
||||
if c == nil {
|
||||
return nil, network.ErrNoConn
|
||||
}
|
||||
if c.Stat().Transient {
|
||||
return nil, network.ErrTransientConn
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -548,26 +624,17 @@ func (s *Swarm) bestConnToPeer(p peer.ID) *Conn {
|
|||
return best
|
||||
}
|
||||
|
||||
// - Returns the best "acceptable" connection, if available.
|
||||
// - Returns nothing if no such connection exists, but if we should try dialing anyways.
|
||||
// - Returns an error if no such connection exists, but we should not try dialing.
|
||||
func (s *Swarm) bestAcceptableConnToPeer(ctx context.Context, p peer.ID) (*Conn, error) {
|
||||
// bestAcceptableConnToPeer returns the best acceptable connection, considering the passed in ctx.
|
||||
// If network.WithForceDirectDial is used, it only returns a direct connections, ignoring
|
||||
// any transient (relayed) connections to the peer.
|
||||
func (s *Swarm) bestAcceptableConnToPeer(ctx context.Context, p peer.ID) *Conn {
|
||||
conn := s.bestConnToPeer(p)
|
||||
if conn == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
forceDirect, _ := network.GetForceDirectDial(ctx)
|
||||
if forceDirect && !isDirectConn(conn) {
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
useTransient, _ := network.GetUseTransient(ctx)
|
||||
if useTransient || !conn.Stat().Transient {
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
return nil, network.ErrTransientConn
|
||||
return conn
|
||||
}
|
||||
|
||||
func isDirectConn(c *Conn) bool {
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
ic "github.com/libp2p/go-libp2p/core/crypto"
|
||||
|
@ -49,7 +48,7 @@ func (c *Conn) IsClosed() bool {
|
|||
|
||||
func (c *Conn) ID() string {
|
||||
// format: <first 10 chars of peer id>-<global conn ordinal>
|
||||
return fmt.Sprintf("%s-%d", c.RemotePeer().Pretty()[0:10], c.id)
|
||||
return fmt.Sprintf("%s-%d", c.RemotePeer().String()[:10], c.id)
|
||||
}
|
||||
|
||||
// Close closes this connection.
|
||||
|
@ -137,6 +136,7 @@ func (c *Conn) start() {
|
|||
if h := c.swarm.StreamHandler(); h != nil {
|
||||
h(s)
|
||||
}
|
||||
s.completeAcceptStreamGoroutine()
|
||||
}()
|
||||
}
|
||||
}()
|
||||
|
@ -147,9 +147,9 @@ func (c *Conn) String() string {
|
|||
"<swarm.Conn[%T] %s (%s) <-> %s (%s)>",
|
||||
c.conn.Transport(),
|
||||
c.conn.LocalMultiaddr(),
|
||||
c.conn.LocalPeer().Pretty(),
|
||||
c.conn.LocalPeer(),
|
||||
c.conn.RemoteMultiaddr(),
|
||||
c.conn.RemotePeer().Pretty(),
|
||||
c.conn.RemotePeer(),
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -238,7 +238,8 @@ func (c *Conn) addStream(ts network.MuxedStream, dir network.Direction, scope ne
|
|||
Direction: dir,
|
||||
Opened: time.Now(),
|
||||
},
|
||||
id: atomic.AddUint64(&c.swarm.nextStreamID, 1),
|
||||
id: c.swarm.nextStreamID.Add(1),
|
||||
acceptStreamGoroutineCompleted: dir != network.DirInbound,
|
||||
}
|
||||
c.stat.NumStreams++
|
||||
c.streams.m[s] = struct{}{}
|
||||
|
|
|
@ -14,8 +14,10 @@ import (
|
|||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/peerstore"
|
||||
"github.com/libp2p/go-libp2p/core/transport"
|
||||
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
madns "github.com/multiformats/go-multiaddr-dns"
|
||||
mafmt "github.com/multiformats/go-multiaddr-fmt"
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
)
|
||||
|
||||
|
@ -65,6 +67,19 @@ var (
|
|||
ErrGaterDisallowedConnection = errors.New("gater disallows connection to peer")
|
||||
)
|
||||
|
||||
// ErrQUICDraft29 wraps ErrNoTransport and provide a more meaningful error message
|
||||
var ErrQUICDraft29 errQUICDraft29
|
||||
|
||||
type errQUICDraft29 struct{}
|
||||
|
||||
func (errQUICDraft29) Error() string {
|
||||
return "QUIC draft-29 has been removed, QUIC (RFC 9000) is accessible with /quic-v1"
|
||||
}
|
||||
|
||||
func (errQUICDraft29) Unwrap() error {
|
||||
return ErrNoTransport
|
||||
}
|
||||
|
||||
// DialAttempts governs how many times a goroutine will try to dial a given peer.
|
||||
// Note: this is down to one, as we have _too many dials_ atm. To add back in,
|
||||
// add loop back in Dial(.)
|
||||
|
@ -201,7 +216,8 @@ func (db *DialBackoff) cleanup() {
|
|||
}
|
||||
}
|
||||
|
||||
// DialPeer connects to a peer.
|
||||
// DialPeer connects to a peer. Use network.WithForceDirectDial to force a
|
||||
// direct connection.
|
||||
//
|
||||
// The idea is that the client of Swarm does not need to know what network
|
||||
// the connection will happen over. Swarm can use whichever it choses.
|
||||
|
@ -231,15 +247,14 @@ func (s *Swarm) dialPeer(ctx context.Context, p peer.ID) (*Conn, error) {
|
|||
return nil, ErrDialToSelf
|
||||
}
|
||||
|
||||
// check if we already have an open (usable) connection first, or can't have a usable
|
||||
// connection.
|
||||
conn, err := s.bestAcceptableConnToPeer(ctx, p)
|
||||
if conn != nil || err != nil {
|
||||
return conn, err
|
||||
// check if we already have an open (usable) connection.
|
||||
conn := s.bestAcceptableConnToPeer(ctx, p)
|
||||
if conn != nil {
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
if s.gater != nil && !s.gater.InterceptPeerDial(p) {
|
||||
log.Debugf("gater disallowed outbound connection to peer %s", p.Pretty())
|
||||
log.Debugf("gater disallowed outbound connection to peer %s", p)
|
||||
return nil, &DialError{Peer: p, Cause: ErrGaterDisallowedConnection}
|
||||
}
|
||||
|
||||
|
@ -280,68 +295,47 @@ func (s *Swarm) dialWorkerLoop(p peer.ID, reqch <-chan dialRequest) {
|
|||
w.loop()
|
||||
}
|
||||
|
||||
func (s *Swarm) addrsForDial(ctx context.Context, p peer.ID) ([]ma.Multiaddr, error) {
|
||||
func (s *Swarm) addrsForDial(ctx context.Context, p peer.ID) (goodAddrs []ma.Multiaddr, addrErrs []TransportError, err error) {
|
||||
peerAddrs := s.peers.Addrs(p)
|
||||
if len(peerAddrs) == 0 {
|
||||
return nil, ErrNoAddresses
|
||||
}
|
||||
|
||||
peerAddrsAfterTransportResolved := make([]ma.Multiaddr, 0, len(peerAddrs))
|
||||
for _, a := range peerAddrs {
|
||||
tpt := s.TransportForDialing(a)
|
||||
resolver, ok := tpt.(transport.Resolver)
|
||||
if ok {
|
||||
resolvedAddrs, err := resolver.Resolve(ctx, a)
|
||||
if err != nil {
|
||||
log.Warnf("Failed to resolve multiaddr %s by transport %v: %v", a, tpt, err)
|
||||
continue
|
||||
}
|
||||
peerAddrsAfterTransportResolved = append(peerAddrsAfterTransportResolved, resolvedAddrs...)
|
||||
} else {
|
||||
peerAddrsAfterTransportResolved = append(peerAddrsAfterTransportResolved, a)
|
||||
}
|
||||
return nil, nil, ErrNoAddresses
|
||||
}
|
||||
|
||||
// Resolve dns or dnsaddrs
|
||||
resolved, err := s.resolveAddrs(ctx, peer.AddrInfo{
|
||||
ID: p,
|
||||
Addrs: peerAddrsAfterTransportResolved,
|
||||
})
|
||||
resolved, err := s.resolveAddrs(ctx, peer.AddrInfo{ID: p, Addrs: peerAddrs})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
goodAddrs := s.filterKnownUndialables(p, resolved)
|
||||
goodAddrs = ma.Unique(resolved)
|
||||
goodAddrs, addrErrs = s.filterKnownUndialables(p, goodAddrs)
|
||||
if forceDirect, _ := network.GetForceDirectDial(ctx); forceDirect {
|
||||
goodAddrs = ma.FilterAddrs(goodAddrs, s.nonProxyAddr)
|
||||
}
|
||||
goodAddrs = ma.Unique(goodAddrs)
|
||||
|
||||
if len(goodAddrs) == 0 {
|
||||
return nil, ErrNoGoodAddresses
|
||||
return nil, addrErrs, ErrNoGoodAddresses
|
||||
}
|
||||
|
||||
s.peers.AddAddrs(p, goodAddrs, peerstore.TempAddrTTL)
|
||||
|
||||
return goodAddrs, nil
|
||||
return goodAddrs, addrErrs, nil
|
||||
}
|
||||
|
||||
func (s *Swarm) resolveAddrs(ctx context.Context, pi peer.AddrInfo) ([]ma.Multiaddr, error) {
|
||||
proto := ma.ProtocolWithCode(ma.P_P2P).Name
|
||||
p2paddr, err := ma.NewMultiaddr("/" + proto + "/" + pi.ID.Pretty())
|
||||
p2paddr, err := ma.NewMultiaddr("/" + ma.ProtocolWithCode(ma.P_P2P).Name + "/" + pi.ID.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resolveSteps := 0
|
||||
|
||||
var resolveSteps int
|
||||
// Recursively resolve all addrs.
|
||||
//
|
||||
// While the toResolve list is non-empty:
|
||||
// * Pop an address off.
|
||||
// * If the address is fully resolved, add it to the resolved list.
|
||||
// * Otherwise, resolve it and add the results to the "to resolve" list.
|
||||
toResolve := append(([]ma.Multiaddr)(nil), pi.Addrs...)
|
||||
toResolve := append([]ma.Multiaddr{}, pi.Addrs...)
|
||||
resolved := make([]ma.Multiaddr, 0, len(pi.Addrs))
|
||||
for len(toResolve) > 0 {
|
||||
// pop the last addr off.
|
||||
|
@ -368,6 +362,26 @@ func (s *Swarm) resolveAddrs(ctx context.Context, pi peer.AddrInfo) ([]ma.Multia
|
|||
continue
|
||||
}
|
||||
|
||||
tpt := s.TransportForDialing(addr)
|
||||
resolver, ok := tpt.(transport.Resolver)
|
||||
if ok {
|
||||
resolvedAddrs, err := resolver.Resolve(ctx, addr)
|
||||
if err != nil {
|
||||
log.Warnf("Failed to resolve multiaddr %s by transport %v: %v", addr, tpt, err)
|
||||
continue
|
||||
}
|
||||
var added bool
|
||||
for _, a := range resolvedAddrs {
|
||||
if !addr.Equal(a) {
|
||||
toResolve = append(toResolve, a)
|
||||
added = true
|
||||
}
|
||||
}
|
||||
if added {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// otherwise, resolve it
|
||||
reqaddr := addr.Encapsulate(p2paddr)
|
||||
resaddrs, err := s.maResolver.Resolve(ctx, reqaddr)
|
||||
|
@ -388,7 +402,7 @@ func (s *Swarm) resolveAddrs(ctx context.Context, pi peer.AddrInfo) ([]ma.Multia
|
|||
return resolved, nil
|
||||
}
|
||||
|
||||
func (s *Swarm) dialNextAddr(ctx context.Context, p peer.ID, addr ma.Multiaddr, resch chan dialResult) error {
|
||||
func (s *Swarm) dialNextAddr(ctx context.Context, p peer.ID, addr ma.Multiaddr, resch chan transport.DialUpdate) error {
|
||||
// check the dial backoff
|
||||
if forceDirect, _ := network.GetForceDirectDial(ctx); !forceDirect {
|
||||
if s.backf.Backoff(p, addr) {
|
||||
|
@ -402,23 +416,20 @@ func (s *Swarm) dialNextAddr(ctx context.Context, p peer.ID, addr ma.Multiaddr,
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *Swarm) canDial(addr ma.Multiaddr) bool {
|
||||
t := s.TransportForDialing(addr)
|
||||
return t != nil && t.CanDial(addr)
|
||||
}
|
||||
|
||||
func (s *Swarm) nonProxyAddr(addr ma.Multiaddr) bool {
|
||||
t := s.TransportForDialing(addr)
|
||||
return !t.Proxy()
|
||||
}
|
||||
|
||||
var quicDraft29DialMatcher = mafmt.And(mafmt.IP, mafmt.Base(ma.P_UDP), mafmt.Base(ma.P_QUIC))
|
||||
|
||||
// filterKnownUndialables takes a list of multiaddrs, and removes those
|
||||
// that we definitely don't want to dial: addresses configured to be blocked,
|
||||
// IPv6 link-local addresses, addresses without a dial-capable transport,
|
||||
// addresses that we know to be our own, and addresses with a better tranport
|
||||
// available. This is an optimization to avoid wasting time on dials that we
|
||||
// know are going to fail or for which we have a better alternative.
|
||||
func (s *Swarm) filterKnownUndialables(p peer.ID, addrs []ma.Multiaddr) []ma.Multiaddr {
|
||||
func (s *Swarm) filterKnownUndialables(p peer.ID, addrs []ma.Multiaddr) (goodAddrs []ma.Multiaddr, addrErrs []TransportError) {
|
||||
lisAddrs, _ := s.InterfaceListenAddresses()
|
||||
var ourAddrs []ma.Multiaddr
|
||||
for _, addr := range lisAddrs {
|
||||
|
@ -431,35 +442,71 @@ func (s *Swarm) filterKnownUndialables(p peer.ID, addrs []ma.Multiaddr) []ma.Mul
|
|||
})
|
||||
}
|
||||
|
||||
// The order of these two filters is important. If we can only dial /webtransport,
|
||||
// we don't want to filter /webtransport addresses out because the peer had a /quic-v1
|
||||
// address
|
||||
addrErrs = make([]TransportError, 0, len(addrs))
|
||||
|
||||
// filter addresses we cannot dial
|
||||
addrs = ma.FilterAddrs(addrs, s.canDial)
|
||||
// The order of checking for transport and filtering low priority addrs is important. If we
|
||||
// can only dial /webtransport, we don't want to filter /webtransport addresses out because
|
||||
// the peer had a /quic-v1 address
|
||||
|
||||
// filter addresses with no transport
|
||||
addrs = ma.FilterAddrs(addrs, func(a ma.Multiaddr) bool {
|
||||
if s.TransportForDialing(a) == nil {
|
||||
e := ErrNoTransport
|
||||
// We used to support QUIC draft-29 for a long time.
|
||||
// Provide a more useful error when attempting to dial a QUIC draft-29 address.
|
||||
if quicDraft29DialMatcher.Matches(a) {
|
||||
e = ErrQUICDraft29
|
||||
}
|
||||
addrErrs = append(addrErrs, TransportError{Address: a, Cause: e})
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
// filter low priority addresses among the addresses we can dial
|
||||
// We don't return an error for these addresses
|
||||
addrs = filterLowPriorityAddresses(addrs)
|
||||
|
||||
// remove black holed addrs
|
||||
addrs = s.bhd.FilterAddrs(addrs)
|
||||
addrs, blackHoledAddrs := s.bhd.FilterAddrs(addrs)
|
||||
for _, a := range blackHoledAddrs {
|
||||
addrErrs = append(addrErrs, TransportError{Address: a, Cause: ErrDialRefusedBlackHole})
|
||||
}
|
||||
|
||||
return ma.FilterAddrs(addrs,
|
||||
func(addr ma.Multiaddr) bool { return !ma.Contains(ourAddrs, addr) },
|
||||
// Linux and BSD treat an unspecified address when dialing as a localhost address.
|
||||
// Windows doesn't support this. We filter all such addresses out because peers
|
||||
// listening on unspecified addresses will advertise more specific addresses.
|
||||
// https://unix.stackexchange.com/a/419881
|
||||
// https://superuser.com/a/1755455
|
||||
func(addr ma.Multiaddr) bool {
|
||||
return !manet.IsIPUnspecified(addr)
|
||||
},
|
||||
func(addr ma.Multiaddr) bool {
|
||||
if ma.Contains(ourAddrs, addr) {
|
||||
addrErrs = append(addrErrs, TransportError{Address: addr, Cause: ErrDialToSelf})
|
||||
return false
|
||||
}
|
||||
return true
|
||||
},
|
||||
// TODO: Consider allowing link-local addresses
|
||||
func(addr ma.Multiaddr) bool { return !manet.IsIP6LinkLocal(addr) },
|
||||
func(addr ma.Multiaddr) bool {
|
||||
return s.gater == nil || s.gater.InterceptAddrDial(p, addr)
|
||||
if s.gater != nil && !s.gater.InterceptAddrDial(p, addr) {
|
||||
addrErrs = append(addrErrs, TransportError{Address: addr, Cause: ErrGaterDisallowedConnection})
|
||||
return false
|
||||
}
|
||||
return true
|
||||
},
|
||||
)
|
||||
), addrErrs
|
||||
}
|
||||
|
||||
// limitedDial will start a dial to the given peer when
|
||||
// it is able, respecting the various different types of rate
|
||||
// limiting that occur without using extra goroutines per addr
|
||||
func (s *Swarm) limitedDial(ctx context.Context, p peer.ID, a ma.Multiaddr, resp chan dialResult) {
|
||||
func (s *Swarm) limitedDial(ctx context.Context, p peer.ID, a ma.Multiaddr, resp chan transport.DialUpdate) {
|
||||
timeout := s.dialTimeout
|
||||
if lowTimeoutFilters.AddrBlocked(a) && s.dialTimeoutLocal < s.dialTimeout {
|
||||
if manet.IsPrivateAddr(a) && s.dialTimeoutLocal < s.dialTimeout {
|
||||
timeout = s.dialTimeoutLocal
|
||||
}
|
||||
s.limiter.AddDialJob(&dialJob{
|
||||
|
@ -472,7 +519,7 @@ func (s *Swarm) limitedDial(ctx context.Context, p peer.ID, a ma.Multiaddr, resp
|
|||
}
|
||||
|
||||
// dialAddr is the actual dial for an addr, indirectly invoked through the limiter
|
||||
func (s *Swarm) dialAddr(ctx context.Context, p peer.ID, addr ma.Multiaddr) (transport.CapableConn, error) {
|
||||
func (s *Swarm) dialAddr(ctx context.Context, p peer.ID, addr ma.Multiaddr, updCh chan<- transport.DialUpdate) (transport.CapableConn, error) {
|
||||
// Just to double check. Costs nothing.
|
||||
if s.local == p {
|
||||
return nil, ErrDialToSelf
|
||||
|
@ -490,7 +537,13 @@ func (s *Swarm) dialAddr(ctx context.Context, p peer.ID, addr ma.Multiaddr) (tra
|
|||
}
|
||||
|
||||
start := time.Now()
|
||||
connC, err := tpt.Dial(ctx, addr, p)
|
||||
var connC transport.CapableConn
|
||||
var err error
|
||||
if du, ok := tpt.(transport.DialUpdater); ok {
|
||||
connC, err = du.DialWithUpdates(ctx, addr, p, updCh)
|
||||
} else {
|
||||
connC, err = tpt.Dial(ctx, addr, p)
|
||||
}
|
||||
|
||||
// We're recording any error as a failure here.
|
||||
// Notably, this also applies to cancelations (i.e. if another dial attempt was faster).
|
||||
|
@ -499,7 +552,7 @@ func (s *Swarm) dialAddr(ctx context.Context, p peer.ID, addr ma.Multiaddr) (tra
|
|||
|
||||
if err != nil {
|
||||
if s.metricsTracer != nil {
|
||||
s.metricsTracer.FailedDialing(addr, err)
|
||||
s.metricsTracer.FailedDialing(addr, err, context.Cause(ctx))
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -128,7 +128,7 @@ type MetricsTracer interface {
|
|||
OpenedConnection(network.Direction, crypto.PubKey, network.ConnectionState, ma.Multiaddr)
|
||||
ClosedConnection(network.Direction, time.Duration, network.ConnectionState, ma.Multiaddr)
|
||||
CompletedHandshake(time.Duration, network.ConnectionState, ma.Multiaddr)
|
||||
FailedDialing(ma.Multiaddr, error)
|
||||
FailedDialing(ma.Multiaddr, error, error)
|
||||
DialCompleted(success bool, totalDials int)
|
||||
DialRankingDelay(d time.Duration)
|
||||
UpdatedBlackHoleFilterState(name string, state blackHoleState, nextProbeAfter int, successFraction float64)
|
||||
|
@ -216,18 +216,28 @@ func (m *metricsTracer) CompletedHandshake(t time.Duration, cs network.Connectio
|
|||
connHandshakeLatency.WithLabelValues(*tags...).Observe(t.Seconds())
|
||||
}
|
||||
|
||||
func (m *metricsTracer) FailedDialing(addr ma.Multiaddr, err error) {
|
||||
func (m *metricsTracer) FailedDialing(addr ma.Multiaddr, dialErr error, cause error) {
|
||||
transport := metricshelper.GetTransport(addr)
|
||||
e := "other"
|
||||
if errors.Is(err, context.Canceled) {
|
||||
e = "canceled"
|
||||
} else if errors.Is(err, context.DeadlineExceeded) {
|
||||
// dial deadline exceeded or the the parent contexts deadline exceeded
|
||||
if errors.Is(dialErr, context.DeadlineExceeded) || errors.Is(cause, context.DeadlineExceeded) {
|
||||
e = "deadline"
|
||||
} else if errors.Is(dialErr, context.Canceled) {
|
||||
// dial was cancelled.
|
||||
if errors.Is(cause, context.Canceled) {
|
||||
// parent context was canceled
|
||||
e = "application canceled"
|
||||
} else if errors.Is(cause, errConcurrentDialSuccessful) {
|
||||
e = "canceled: concurrent dial successful"
|
||||
} else {
|
||||
nerr, ok := err.(net.Error)
|
||||
// something else
|
||||
e = "canceled: other"
|
||||
}
|
||||
} else {
|
||||
nerr, ok := dialErr.(net.Error)
|
||||
if ok && nerr.Timeout() {
|
||||
e = "timeout"
|
||||
} else if strings.Contains(err.Error(), "connect: connection refused") {
|
||||
} else if strings.Contains(dialErr.Error(), "connect: connection refused") {
|
||||
e = "connection refused"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,7 +22,10 @@ type Stream struct {
|
|||
conn *Conn
|
||||
scope network.StreamManagementScope
|
||||
|
||||
closeOnce sync.Once
|
||||
closeMx sync.Mutex
|
||||
isClosed bool
|
||||
// acceptStreamGoroutineCompleted indicates whether the goroutine handling the incoming stream has exited
|
||||
acceptStreamGoroutineCompleted bool
|
||||
|
||||
protocol atomic.Pointer[protocol.ID]
|
||||
|
||||
|
@ -76,7 +79,7 @@ func (s *Stream) Write(p []byte) (int, error) {
|
|||
// resources.
|
||||
func (s *Stream) Close() error {
|
||||
err := s.stream.Close()
|
||||
s.closeOnce.Do(s.remove)
|
||||
s.closeAndRemoveStream()
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -84,10 +87,25 @@ func (s *Stream) Close() error {
|
|||
// associated resources.
|
||||
func (s *Stream) Reset() error {
|
||||
err := s.stream.Reset()
|
||||
s.closeOnce.Do(s.remove)
|
||||
s.closeAndRemoveStream()
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *Stream) closeAndRemoveStream() {
|
||||
s.closeMx.Lock()
|
||||
defer s.closeMx.Unlock()
|
||||
if s.isClosed {
|
||||
return
|
||||
}
|
||||
s.isClosed = true
|
||||
// We don't want to keep swarm from closing till the stream handler has exited
|
||||
s.conn.swarm.refs.Done()
|
||||
// Cleanup the stream from connection only after the stream handler has completed
|
||||
if s.acceptStreamGoroutineCompleted {
|
||||
s.conn.removeStream(s)
|
||||
}
|
||||
}
|
||||
|
||||
// CloseWrite closes the stream for writing, flushing all data and sending an EOF.
|
||||
// This function does not free resources, call Close or Reset when done with the
|
||||
// stream.
|
||||
|
@ -101,9 +119,16 @@ func (s *Stream) CloseRead() error {
|
|||
return s.stream.CloseRead()
|
||||
}
|
||||
|
||||
func (s *Stream) remove() {
|
||||
func (s *Stream) completeAcceptStreamGoroutine() {
|
||||
s.closeMx.Lock()
|
||||
defer s.closeMx.Unlock()
|
||||
if s.acceptStreamGoroutineCompleted {
|
||||
return
|
||||
}
|
||||
s.acceptStreamGoroutineCompleted = true
|
||||
if s.isClosed {
|
||||
s.conn.removeStream(s)
|
||||
s.conn.swarm.refs.Done()
|
||||
}
|
||||
}
|
||||
|
||||
// Protocol returns the protocol negotiated on this stream (if set).
|
||||
|
|
|
@ -152,7 +152,8 @@ func (u *upgrader) upgrade(ctx context.Context, t transport.Transport, maconn ma
|
|||
return nil, ipnet.ErrNotInPrivateNetwork
|
||||
}
|
||||
|
||||
sconn, security, server, err := u.setupSecurity(ctx, conn, p, dir)
|
||||
isServer := dir == network.DirInbound
|
||||
sconn, security, err := u.setupSecurity(ctx, conn, p, isServer)
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
return nil, fmt.Errorf("failed to negotiate security protocol: %w", err)
|
||||
|
@ -179,7 +180,7 @@ func (u *upgrader) upgrade(ctx context.Context, t transport.Transport, maconn ma
|
|||
}
|
||||
}
|
||||
|
||||
muxer, smconn, err := u.setupMuxer(ctx, sconn, server, connScope.PeerScope())
|
||||
muxer, smconn, err := u.setupMuxer(ctx, sconn, isServer, connScope.PeerScope())
|
||||
if err != nil {
|
||||
sconn.Close()
|
||||
return nil, fmt.Errorf("failed to negotiate stream multiplexer: %w", err)
|
||||
|
@ -199,20 +200,17 @@ func (u *upgrader) upgrade(ctx context.Context, t transport.Transport, maconn ma
|
|||
return tc, nil
|
||||
}
|
||||
|
||||
func (u *upgrader) setupSecurity(ctx context.Context, conn net.Conn, p peer.ID, dir network.Direction) (sec.SecureConn, protocol.ID, bool, error) {
|
||||
isServer := dir == network.DirInbound
|
||||
var st sec.SecureTransport
|
||||
var err error
|
||||
st, isServer, err = u.negotiateSecurity(ctx, conn, isServer)
|
||||
func (u *upgrader) setupSecurity(ctx context.Context, conn net.Conn, p peer.ID, isServer bool) (sec.SecureConn, protocol.ID, error) {
|
||||
st, err := u.negotiateSecurity(ctx, conn, isServer)
|
||||
if err != nil {
|
||||
return nil, "", false, err
|
||||
return nil, "", err
|
||||
}
|
||||
if isServer {
|
||||
sconn, err := st.SecureInbound(ctx, conn, p)
|
||||
return sconn, st.ID(), true, err
|
||||
return sconn, st.ID(), err
|
||||
}
|
||||
sconn, err := st.SecureOutbound(ctx, conn, p)
|
||||
return sconn, st.ID(), false, err
|
||||
return sconn, st.ID(), err
|
||||
}
|
||||
|
||||
func (u *upgrader) negotiateMuxer(nc net.Conn, isServer bool) (*StreamMuxer, error) {
|
||||
|
@ -308,10 +306,9 @@ func (u *upgrader) getSecurityByID(id protocol.ID) sec.SecureTransport {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (u *upgrader) negotiateSecurity(ctx context.Context, insecure net.Conn, server bool) (sec.SecureTransport, bool, error) {
|
||||
func (u *upgrader) negotiateSecurity(ctx context.Context, insecure net.Conn, server bool) (sec.SecureTransport, error) {
|
||||
type result struct {
|
||||
proto protocol.ID
|
||||
iamserver bool
|
||||
err error
|
||||
}
|
||||
|
||||
|
@ -319,30 +316,28 @@ func (u *upgrader) negotiateSecurity(ctx context.Context, insecure net.Conn, ser
|
|||
go func() {
|
||||
if server {
|
||||
var r result
|
||||
r.iamserver = true
|
||||
r.proto, _, r.err = u.securityMuxer.Negotiate(insecure)
|
||||
done <- r
|
||||
return
|
||||
}
|
||||
var r result
|
||||
r.proto, r.iamserver, r.err = mss.SelectWithSimopenOrFail(u.securityIDs, insecure)
|
||||
r.proto, r.err = mss.SelectOneOf(u.securityIDs, insecure)
|
||||
done <- r
|
||||
}()
|
||||
|
||||
select {
|
||||
case r := <-done:
|
||||
if r.err != nil {
|
||||
return nil, false, r.err
|
||||
return nil, r.err
|
||||
}
|
||||
if s := u.getSecurityByID(r.proto); s != nil {
|
||||
return s, r.iamserver, nil
|
||||
return s, nil
|
||||
}
|
||||
return nil, false, fmt.Errorf("selected unknown security transport: %s", r.proto)
|
||||
return nil, fmt.Errorf("selected unknown security transport: %s", r.proto)
|
||||
case <-ctx.Done():
|
||||
// We *must* do this. We have outstanding work on the connection
|
||||
// and it's no longer safe to use.
|
||||
// We *must* do this. We have outstanding work on the connection, and it's no longer safe to use.
|
||||
insecure.Close()
|
||||
<-done // wait to stop using the connection.
|
||||
return nil, false, ctx.Err()
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -74,7 +74,10 @@ func (c *constraints) AddReservation(p peer.ID, a ma.Multiaddr) error {
|
|||
|
||||
var asnReservations []time.Time
|
||||
var asn string
|
||||
if ip.To4() == nil {
|
||||
// Only public addresses have an ASN. Skip checking ASN for private addresses as
|
||||
// initialising the ASN store is a costly operation. Skipping this check reduces a lot of
|
||||
// flakiness in tests
|
||||
if ip.To4() == nil && manet.IsPublicAddr(a) {
|
||||
asn, _ = asnutil.Store.AsnForIPv6(ip)
|
||||
if asn != "" {
|
||||
asnReservations = c.asns[asn]
|
||||
|
|
|
@ -556,7 +556,7 @@ func readAllIDMessages(r pbio.Reader, finalMsg proto.Message) error {
|
|||
|
||||
func (ids *idService) updateSnapshot() (updated bool) {
|
||||
addrs := ids.Host.Addrs()
|
||||
slices.SortFunc(addrs, func(a, b ma.Multiaddr) bool { return bytes.Compare(a.Bytes(), b.Bytes()) == -1 })
|
||||
slices.SortFunc(addrs, func(a, b ma.Multiaddr) int { return bytes.Compare(a.Bytes(), b.Bytes()) })
|
||||
protos := ids.Host.Mux().Protocols()
|
||||
slices.Sort(protos)
|
||||
snapshot := identifySnapshot{
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue