2018-07-04 10:51:47 +00:00
|
|
|
package swarm
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
2021-10-19 13:43:41 +00:00
|
|
|
"io"
|
2018-07-04 10:51:47 +00:00
|
|
|
"strings"
|
|
|
|
"sync"
|
|
|
|
"sync/atomic"
|
|
|
|
"time"
|
|
|
|
|
2022-11-04 13:57:20 +00:00
|
|
|
"github.com/libp2p/go-libp2p/core/connmgr"
|
2023-05-19 20:23:55 +00:00
|
|
|
"github.com/libp2p/go-libp2p/core/event"
|
2022-11-04 13:57:20 +00:00
|
|
|
"github.com/libp2p/go-libp2p/core/metrics"
|
|
|
|
"github.com/libp2p/go-libp2p/core/network"
|
|
|
|
"github.com/libp2p/go-libp2p/core/peer"
|
|
|
|
"github.com/libp2p/go-libp2p/core/peerstore"
|
|
|
|
"github.com/libp2p/go-libp2p/core/transport"
|
2024-05-15 23:15:00 +00:00
|
|
|
"golang.org/x/exp/slices"
|
2019-06-09 07:24:20 +00:00
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
logging "github.com/ipfs/go-log/v2"
|
2019-10-04 15:21:24 +00:00
|
|
|
ma "github.com/multiformats/go-multiaddr"
|
2022-11-04 13:57:20 +00:00
|
|
|
madns "github.com/multiformats/go-multiaddr-dns"
|
2018-07-04 10:51:47 +00:00
|
|
|
)
|
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
const (
|
|
|
|
defaultDialTimeout = 15 * time.Second
|
|
|
|
|
|
|
|
// defaultDialTimeoutLocal is the maximum duration a Dial to local network address
|
|
|
|
// is allowed to take.
|
|
|
|
// This includes the time between dialing the raw network connection,
|
|
|
|
// protocol selection as well the handshake, if applicable.
|
|
|
|
defaultDialTimeoutLocal = 5 * time.Second
|
|
|
|
)
|
2018-07-04 10:51:47 +00:00
|
|
|
|
|
|
|
var log = logging.Logger("swarm2")
|
|
|
|
|
|
|
|
// ErrSwarmClosed is returned when one attempts to operate on a closed swarm.
|
|
|
|
var ErrSwarmClosed = errors.New("swarm closed")
|
|
|
|
|
|
|
|
// ErrAddrFiltered is returned when trying to register a connection to a
|
|
|
|
// filtered address. You shouldn't see this error unless some underlying
|
|
|
|
// transport is misbehaving.
|
|
|
|
var ErrAddrFiltered = errors.New("address filtered")
|
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
// ErrDialTimeout is returned when one a dial times out due to the global timeout
|
|
|
|
var ErrDialTimeout = errors.New("dial timed out")
|
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
type Option func(*Swarm) error
|
|
|
|
|
|
|
|
// WithConnectionGater sets a connection gater
|
|
|
|
func WithConnectionGater(gater connmgr.ConnectionGater) Option {
|
|
|
|
return func(s *Swarm) error {
|
|
|
|
s.gater = gater
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-04 13:57:20 +00:00
|
|
|
// WithMultiaddrResolver sets a custom multiaddress resolver
|
|
|
|
func WithMultiaddrResolver(maResolver *madns.Resolver) Option {
|
|
|
|
return func(s *Swarm) error {
|
|
|
|
s.maResolver = maResolver
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
// WithMetrics sets a metrics reporter
|
|
|
|
func WithMetrics(reporter metrics.Reporter) Option {
|
|
|
|
return func(s *Swarm) error {
|
|
|
|
s.bwc = reporter
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
func WithMetricsTracer(t MetricsTracer) Option {
|
|
|
|
return func(s *Swarm) error {
|
|
|
|
s.metricsTracer = t
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
func WithDialTimeout(t time.Duration) Option {
|
|
|
|
return func(s *Swarm) error {
|
|
|
|
s.dialTimeout = t
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func WithDialTimeoutLocal(t time.Duration) Option {
|
|
|
|
return func(s *Swarm) error {
|
|
|
|
s.dialTimeoutLocal = t
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func WithResourceManager(m network.ResourceManager) Option {
|
|
|
|
return func(s *Swarm) error {
|
|
|
|
s.rcmgr = m
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-06-30 13:41:32 +00:00
|
|
|
// WithDialRanker configures swarm to use d as the DialRanker
|
|
|
|
func WithDialRanker(d network.DialRanker) Option {
|
|
|
|
return func(s *Swarm) error {
|
2023-08-22 10:32:01 +00:00
|
|
|
if d == nil {
|
|
|
|
return errors.New("swarm: dial ranker cannot be nil")
|
|
|
|
}
|
2023-06-30 13:41:32 +00:00
|
|
|
s.dialRanker = d
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-22 10:32:01 +00:00
|
|
|
// WithUDPBlackHoleConfig configures swarm to use c as the config for UDP black hole detection
|
|
|
|
// n is the size of the sliding window used to evaluate black hole state
|
|
|
|
// min is the minimum number of successes out of n required to not block requests
|
|
|
|
func WithUDPBlackHoleConfig(enabled bool, n, min int) Option {
|
|
|
|
return func(s *Swarm) error {
|
|
|
|
s.udpBlackHoleConfig = blackHoleConfig{Enabled: enabled, N: n, MinSuccesses: min}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// WithIPv6BlackHoleConfig configures swarm to use c as the config for IPv6 black hole detection
|
|
|
|
// n is the size of the sliding window used to evaluate black hole state
|
|
|
|
// min is the minimum number of successes out of n required to not block requests
|
|
|
|
func WithIPv6BlackHoleConfig(enabled bool, n, min int) Option {
|
|
|
|
return func(s *Swarm) error {
|
|
|
|
s.ipv6BlackHoleConfig = blackHoleConfig{Enabled: enabled, N: n, MinSuccesses: min}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-04 10:51:47 +00:00
|
|
|
// Swarm is a connection muxer, allowing connections to other peers to
|
|
|
|
// be opened and closed, while still using the same Chan for all
|
|
|
|
// communication. The Chan sends/receives Messages, which note the
|
|
|
|
// destination or source Peer.
|
|
|
|
type Swarm struct {
|
2024-05-15 23:15:00 +00:00
|
|
|
nextConnID atomic.Uint64
|
|
|
|
nextStreamID atomic.Uint64
|
2021-10-19 13:43:41 +00:00
|
|
|
|
2018-07-04 10:51:47 +00:00
|
|
|
// Close refcount. This allows us to fully wait for the swarm to be torn
|
|
|
|
// down before continuing.
|
|
|
|
refs sync.WaitGroup
|
|
|
|
|
2023-05-19 20:23:55 +00:00
|
|
|
emitter event.Emitter
|
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
rcmgr network.ResourceManager
|
|
|
|
|
2018-07-04 10:51:47 +00:00
|
|
|
local peer.ID
|
2019-06-09 07:24:20 +00:00
|
|
|
peers peerstore.Peerstore
|
2018-07-04 10:51:47 +00:00
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
dialTimeout time.Duration
|
|
|
|
dialTimeoutLocal time.Duration
|
|
|
|
|
2018-07-04 10:51:47 +00:00
|
|
|
conns struct {
|
|
|
|
sync.RWMutex
|
|
|
|
m map[peer.ID][]*Conn
|
|
|
|
}
|
|
|
|
|
|
|
|
listeners struct {
|
|
|
|
sync.RWMutex
|
2019-10-04 15:21:24 +00:00
|
|
|
|
|
|
|
ifaceListenAddres []ma.Multiaddr
|
|
|
|
cacheEOL time.Time
|
|
|
|
|
2018-07-04 10:51:47 +00:00
|
|
|
m map[transport.Listener]struct{}
|
|
|
|
}
|
|
|
|
|
|
|
|
notifs struct {
|
|
|
|
sync.RWMutex
|
2019-06-09 07:24:20 +00:00
|
|
|
m map[network.Notifiee]struct{}
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
2024-05-15 23:15:00 +00:00
|
|
|
directConnNotifs struct {
|
|
|
|
sync.Mutex
|
|
|
|
m map[peer.ID][]chan struct{}
|
|
|
|
}
|
|
|
|
|
2018-07-04 10:51:47 +00:00
|
|
|
transports struct {
|
|
|
|
sync.RWMutex
|
|
|
|
m map[int]transport.Transport
|
|
|
|
}
|
|
|
|
|
2022-11-04 13:57:20 +00:00
|
|
|
maResolver *madns.Resolver
|
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
// stream handlers
|
2023-04-07 18:23:07 +00:00
|
|
|
streamh atomic.Pointer[network.StreamHandler]
|
2018-07-04 10:51:47 +00:00
|
|
|
|
|
|
|
// dialing helpers
|
2022-04-01 16:16:46 +00:00
|
|
|
dsync *dialSync
|
2018-07-04 10:51:47 +00:00
|
|
|
backf DialBackoff
|
|
|
|
limiter *dialLimiter
|
2021-06-16 20:19:45 +00:00
|
|
|
gater connmgr.ConnectionGater
|
2018-07-04 10:51:47 +00:00
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
closeOnce sync.Once
|
|
|
|
ctx context.Context // is canceled when Close is called
|
|
|
|
ctxCancel context.CancelFunc
|
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
bwc metrics.Reporter
|
|
|
|
metricsTracer MetricsTracer
|
2023-06-30 13:41:32 +00:00
|
|
|
|
|
|
|
dialRanker network.DialRanker
|
2023-08-22 10:32:01 +00:00
|
|
|
|
2024-06-05 20:10:03 +00:00
|
|
|
udpBlackHoleConfig blackHoleConfig
|
|
|
|
ipv6BlackHoleConfig blackHoleConfig
|
|
|
|
bhd *blackHoleDetector
|
|
|
|
connectednessEventEmitter *connectednessEventEmitter
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
// NewSwarm constructs a Swarm.
|
2023-05-19 20:23:55 +00:00
|
|
|
func NewSwarm(local peer.ID, peers peerstore.Peerstore, eventBus event.Bus, opts ...Option) (*Swarm, error) {
|
|
|
|
emitter, err := eventBus.Emitter(new(event.EvtPeerConnectednessChanged))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2022-04-01 16:16:46 +00:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
2018-07-04 10:51:47 +00:00
|
|
|
s := &Swarm{
|
2022-04-01 16:16:46 +00:00
|
|
|
local: local,
|
|
|
|
peers: peers,
|
2023-05-19 20:23:55 +00:00
|
|
|
emitter: emitter,
|
2022-04-01 16:16:46 +00:00
|
|
|
ctx: ctx,
|
|
|
|
ctxCancel: cancel,
|
|
|
|
dialTimeout: defaultDialTimeout,
|
|
|
|
dialTimeoutLocal: defaultDialTimeoutLocal,
|
2022-11-04 13:57:20 +00:00
|
|
|
maResolver: madns.DefaultResolver,
|
2023-06-30 13:41:32 +00:00
|
|
|
dialRanker: DefaultDialRanker,
|
2023-08-22 10:32:01 +00:00
|
|
|
|
|
|
|
// A black hole is a binary property. On a network if UDP dials are blocked or there is
|
|
|
|
// no IPv6 connectivity, all dials will fail. So a low success rate of 5 out 100 dials
|
|
|
|
// is good enough.
|
|
|
|
udpBlackHoleConfig: blackHoleConfig{Enabled: true, N: 100, MinSuccesses: 5},
|
|
|
|
ipv6BlackHoleConfig: blackHoleConfig{Enabled: true, N: 100, MinSuccesses: 5},
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
s.conns.m = make(map[peer.ID][]*Conn)
|
|
|
|
s.listeners.m = make(map[transport.Listener]struct{})
|
|
|
|
s.transports.m = make(map[int]transport.Transport)
|
2019-06-09 07:24:20 +00:00
|
|
|
s.notifs.m = make(map[network.Notifiee]struct{})
|
2024-05-15 23:15:00 +00:00
|
|
|
s.directConnNotifs.m = make(map[peer.ID][]chan struct{})
|
2024-06-05 20:10:03 +00:00
|
|
|
s.connectednessEventEmitter = newConnectednessEventEmitter(s.Connectedness, emitter)
|
2018-07-04 10:51:47 +00:00
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
for _, opt := range opts {
|
|
|
|
if err := opt(s); err != nil {
|
|
|
|
return nil, err
|
2021-06-16 20:19:45 +00:00
|
|
|
}
|
|
|
|
}
|
2022-04-01 16:16:46 +00:00
|
|
|
if s.rcmgr == nil {
|
2023-02-22 21:58:17 +00:00
|
|
|
s.rcmgr = &network.NullResourceManager{}
|
2022-04-01 16:16:46 +00:00
|
|
|
}
|
2021-06-16 20:19:45 +00:00
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
s.dsync = newDialSync(s.dialWorkerLoop)
|
2023-08-22 10:32:01 +00:00
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
s.limiter = newDialLimiter(s.dialAddr)
|
2021-06-16 20:19:45 +00:00
|
|
|
s.backf.init(s.ctx)
|
2023-08-22 10:32:01 +00:00
|
|
|
|
|
|
|
s.bhd = newBlackHoleDetector(s.udpBlackHoleConfig, s.ipv6BlackHoleConfig, s.metricsTracer)
|
2022-04-01 16:16:46 +00:00
|
|
|
return s, nil
|
|
|
|
}
|
2021-06-16 20:19:45 +00:00
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
func (s *Swarm) Close() error {
|
|
|
|
s.closeOnce.Do(s.close)
|
|
|
|
return nil
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
2024-06-05 20:10:03 +00:00
|
|
|
// Done returns a channel that is closed when the swarm is closed.
|
|
|
|
func (s *Swarm) Done() <-chan struct{} {
|
|
|
|
return s.ctx.Done()
|
|
|
|
}
|
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
func (s *Swarm) close() {
|
|
|
|
s.ctxCancel()
|
2019-10-04 15:21:24 +00:00
|
|
|
|
2018-07-04 10:51:47 +00:00
|
|
|
// Prevents new connections and/or listeners from being added to the swarm.
|
|
|
|
s.listeners.Lock()
|
|
|
|
listeners := s.listeners.m
|
|
|
|
s.listeners.m = nil
|
|
|
|
s.listeners.Unlock()
|
|
|
|
|
|
|
|
s.conns.Lock()
|
|
|
|
conns := s.conns.m
|
|
|
|
s.conns.m = nil
|
|
|
|
s.conns.Unlock()
|
|
|
|
|
2019-06-09 07:24:20 +00:00
|
|
|
// Lots of goroutines but we might as well do this in parallel. We want to shut down as fast as
|
|
|
|
// possible.
|
2024-06-05 20:10:03 +00:00
|
|
|
s.refs.Add(len(listeners))
|
2018-07-04 10:51:47 +00:00
|
|
|
for l := range listeners {
|
|
|
|
go func(l transport.Listener) {
|
2024-06-05 20:10:03 +00:00
|
|
|
defer s.refs.Done()
|
2023-06-30 13:41:32 +00:00
|
|
|
if err := l.Close(); err != nil && err != transport.ErrListenerClosed {
|
2018-07-04 10:51:47 +00:00
|
|
|
log.Errorf("error when shutting down listener: %s", err)
|
|
|
|
}
|
|
|
|
}(l)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, cs := range conns {
|
|
|
|
for _, c := range cs {
|
|
|
|
go func(c *Conn) {
|
|
|
|
if err := c.Close(); err != nil {
|
|
|
|
log.Errorf("error when shutting down connection: %s", err)
|
|
|
|
}
|
|
|
|
}(c)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for everything to finish.
|
|
|
|
s.refs.Wait()
|
2024-06-05 20:10:03 +00:00
|
|
|
s.connectednessEventEmitter.Close()
|
|
|
|
s.emitter.Close()
|
2018-07-04 10:51:47 +00:00
|
|
|
|
2021-10-19 13:43:41 +00:00
|
|
|
// Now close out any transports (if necessary). Do this after closing
|
|
|
|
// all connections/listeners.
|
|
|
|
s.transports.Lock()
|
|
|
|
transports := s.transports.m
|
|
|
|
s.transports.m = nil
|
|
|
|
s.transports.Unlock()
|
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
// Dedup transports that may be listening on multiple protocols
|
|
|
|
transportsToClose := make(map[transport.Transport]struct{}, len(transports))
|
2021-10-19 13:43:41 +00:00
|
|
|
for _, t := range transports {
|
2023-02-22 21:58:17 +00:00
|
|
|
transportsToClose[t] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
for t := range transportsToClose {
|
2021-10-19 13:43:41 +00:00
|
|
|
if closer, ok := t.(io.Closer); ok {
|
|
|
|
wg.Add(1)
|
|
|
|
go func(c io.Closer) {
|
|
|
|
defer wg.Done()
|
|
|
|
if err := closer.Close(); err != nil {
|
|
|
|
log.Errorf("error when closing down transport %T: %s", c, err)
|
|
|
|
}
|
|
|
|
}(closer)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
wg.Wait()
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
2019-06-09 07:24:20 +00:00
|
|
|
func (s *Swarm) addConn(tc transport.CapableConn, dir network.Direction) (*Conn, error) {
|
2021-06-16 20:19:45 +00:00
|
|
|
var (
|
|
|
|
p = tc.RemotePeer()
|
|
|
|
addr = tc.RemoteMultiaddr()
|
|
|
|
)
|
|
|
|
|
2021-10-19 13:43:41 +00:00
|
|
|
// create the Stat object, initializing with the underlying connection Stat if available
|
2022-04-01 16:16:46 +00:00
|
|
|
var stat network.ConnStats
|
2021-10-19 13:43:41 +00:00
|
|
|
if cs, ok := tc.(network.ConnStat); ok {
|
|
|
|
stat = cs.Stat()
|
2021-06-16 20:19:45 +00:00
|
|
|
}
|
2021-10-19 13:43:41 +00:00
|
|
|
stat.Direction = dir
|
|
|
|
stat.Opened = time.Now()
|
2024-06-05 20:10:03 +00:00
|
|
|
isLimited := stat.Limited
|
2021-06-16 20:19:45 +00:00
|
|
|
|
|
|
|
// Wrap and register the connection.
|
|
|
|
c := &Conn{
|
|
|
|
conn: tc,
|
|
|
|
swarm: s,
|
|
|
|
stat: stat,
|
2024-05-15 23:15:00 +00:00
|
|
|
id: s.nextConnID.Add(1),
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
// we ONLY check upgraded connections here so we can send them a Disconnect message.
|
|
|
|
// If we do this in the Upgrader, we will not be able to do this.
|
|
|
|
if s.gater != nil {
|
|
|
|
if allow, _ := s.gater.InterceptUpgraded(c); !allow {
|
|
|
|
// TODO Send disconnect with reason here
|
|
|
|
err := tc.Close()
|
|
|
|
if err != nil {
|
2024-05-15 23:15:00 +00:00
|
|
|
log.Warnf("failed to close connection with peer %s and addr %s; err: %s", p, addr, err)
|
2021-06-16 20:19:45 +00:00
|
|
|
}
|
|
|
|
return nil, ErrGaterDisallowedConnection
|
|
|
|
}
|
|
|
|
}
|
2018-07-04 10:51:47 +00:00
|
|
|
|
|
|
|
// Add the public key.
|
|
|
|
if pk := tc.RemotePublicKey(); pk != nil {
|
|
|
|
s.peers.AddPubKey(p, pk)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Clear any backoffs
|
|
|
|
s.backf.Clear(p)
|
|
|
|
|
|
|
|
// Finally, add the peer.
|
|
|
|
s.conns.Lock()
|
|
|
|
// Check if we're still online
|
|
|
|
if s.conns.m == nil {
|
|
|
|
s.conns.Unlock()
|
|
|
|
tc.Close()
|
|
|
|
return nil, ErrSwarmClosed
|
|
|
|
}
|
|
|
|
|
|
|
|
c.streams.m = make(map[*Stream]struct{})
|
|
|
|
s.conns.m[p] = append(s.conns.m[p], c)
|
|
|
|
// Add two swarm refs:
|
|
|
|
// * One will be decremented after the close notifications fire in Conn.doClose
|
|
|
|
// * The other will be decremented when Conn.start exits.
|
|
|
|
s.refs.Add(2)
|
|
|
|
// Take the notification lock before releasing the conns lock to block
|
|
|
|
// Disconnect notifications until after the Connect notifications done.
|
2024-06-05 20:10:03 +00:00
|
|
|
// This lock also ensures that swarm.refs.Wait() exits after we have
|
|
|
|
// enqueued the peer connectedness changed notification.
|
|
|
|
// TODO: Fix this fragility by taking a swarm ref for dial worker loop
|
2018-07-04 10:51:47 +00:00
|
|
|
c.notifyLk.Lock()
|
|
|
|
s.conns.Unlock()
|
|
|
|
|
2024-06-05 20:10:03 +00:00
|
|
|
s.connectednessEventEmitter.AddConn(p)
|
|
|
|
|
|
|
|
if !isLimited {
|
|
|
|
// Notify goroutines waiting for a direct connection
|
|
|
|
//
|
2024-05-15 23:15:00 +00:00
|
|
|
// Go routines interested in waiting for direct connection first acquire this lock
|
|
|
|
// and then acquire s.conns.RLock. Do not acquire this lock before conns.Unlock to
|
|
|
|
// prevent deadlock.
|
|
|
|
s.directConnNotifs.Lock()
|
|
|
|
for _, ch := range s.directConnNotifs.m[p] {
|
|
|
|
close(ch)
|
|
|
|
}
|
|
|
|
delete(s.directConnNotifs.m, p)
|
|
|
|
s.directConnNotifs.Unlock()
|
|
|
|
}
|
2019-06-09 07:24:20 +00:00
|
|
|
s.notifyAll(func(f network.Notifiee) {
|
2018-07-04 10:51:47 +00:00
|
|
|
f.Connected(s, c)
|
|
|
|
})
|
|
|
|
c.notifyLk.Unlock()
|
|
|
|
|
|
|
|
c.start()
|
|
|
|
return c, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Peerstore returns this swarms internal Peerstore.
|
2019-06-09 07:24:20 +00:00
|
|
|
func (s *Swarm) Peerstore() peerstore.Peerstore {
|
2018-07-04 10:51:47 +00:00
|
|
|
return s.peers
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetStreamHandler assigns the handler for new streams.
|
2019-06-09 07:24:20 +00:00
|
|
|
func (s *Swarm) SetStreamHandler(handler network.StreamHandler) {
|
2023-04-07 18:23:07 +00:00
|
|
|
s.streamh.Store(&handler)
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// StreamHandler gets the handler for new streams.
|
2019-06-09 07:24:20 +00:00
|
|
|
func (s *Swarm) StreamHandler() network.StreamHandler {
|
2023-04-07 18:23:07 +00:00
|
|
|
handler := s.streamh.Load()
|
|
|
|
if handler == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return *handler
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewStream creates a new stream on any available connection to peer, dialing
|
|
|
|
// if necessary.
|
2024-06-05 20:10:03 +00:00
|
|
|
// Use network.WithAllowLimitedConn to open a stream over a limited(relayed)
|
2024-05-15 23:15:00 +00:00
|
|
|
// connection.
|
2019-06-09 07:24:20 +00:00
|
|
|
func (s *Swarm) NewStream(ctx context.Context, p peer.ID) (network.Stream, error) {
|
2018-07-04 10:51:47 +00:00
|
|
|
log.Debugf("[%s] opening stream to peer [%s]", s.local, p)
|
|
|
|
|
|
|
|
// Algorithm:
|
|
|
|
// 1. Find the best connection, otherwise, dial.
|
2024-06-05 20:10:03 +00:00
|
|
|
// 2. If the best connection is limited, wait for a direct conn via conn
|
2024-05-15 23:15:00 +00:00
|
|
|
// reversal or hole punching.
|
|
|
|
// 3. Try opening a stream.
|
|
|
|
// 4. If the underlying connection is, in fact, closed, close the outer
|
2018-07-04 10:51:47 +00:00
|
|
|
// connection and try again. We do this in case we have a closed
|
|
|
|
// connection but don't notice it until we actually try to open a
|
|
|
|
// stream.
|
|
|
|
//
|
|
|
|
// TODO: Try all connections even if we get an error opening a stream on
|
|
|
|
// a non-closed connection.
|
2024-05-15 23:15:00 +00:00
|
|
|
numDials := 0
|
2018-07-04 10:51:47 +00:00
|
|
|
for {
|
2024-05-15 23:15:00 +00:00
|
|
|
c := s.bestConnToPeer(p)
|
2018-07-04 10:51:47 +00:00
|
|
|
if c == nil {
|
2024-05-15 23:15:00 +00:00
|
|
|
if nodial, _ := network.GetNoDial(ctx); !nodial {
|
|
|
|
numDials++
|
|
|
|
if numDials > DialAttempts {
|
|
|
|
return nil, errors.New("max dial attempts exceeded")
|
|
|
|
}
|
|
|
|
var err error
|
|
|
|
c, err = s.dialPeer(ctx, p)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
} else {
|
2019-06-09 07:24:20 +00:00
|
|
|
return nil, network.ErrNoConn
|
|
|
|
}
|
2024-05-15 23:15:00 +00:00
|
|
|
}
|
2019-06-09 07:24:20 +00:00
|
|
|
|
2024-06-05 20:10:03 +00:00
|
|
|
limitedAllowed, _ := network.GetAllowLimitedConn(ctx)
|
|
|
|
if !limitedAllowed && c.Stat().Limited {
|
2018-07-04 10:51:47 +00:00
|
|
|
var err error
|
2024-05-15 23:15:00 +00:00
|
|
|
c, err = s.waitForDirectConn(ctx, p)
|
2018-07-04 10:51:47 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
2021-10-19 13:43:41 +00:00
|
|
|
|
2024-05-15 23:15:00 +00:00
|
|
|
str, err := c.NewStream(ctx)
|
2018-07-04 10:51:47 +00:00
|
|
|
if err != nil {
|
|
|
|
if c.conn.IsClosed() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
2024-05-15 23:15:00 +00:00
|
|
|
return str, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// waitForDirectConn waits for a direct connection established through hole punching or connection reversal.
|
|
|
|
func (s *Swarm) waitForDirectConn(ctx context.Context, p peer.ID) (*Conn, error) {
|
|
|
|
s.directConnNotifs.Lock()
|
|
|
|
c := s.bestConnToPeer(p)
|
|
|
|
if c == nil {
|
|
|
|
s.directConnNotifs.Unlock()
|
|
|
|
return nil, network.ErrNoConn
|
2024-06-05 20:10:03 +00:00
|
|
|
} else if !c.Stat().Limited {
|
2024-05-15 23:15:00 +00:00
|
|
|
s.directConnNotifs.Unlock()
|
|
|
|
return c, nil
|
|
|
|
}
|
|
|
|
|
2024-06-05 20:10:03 +00:00
|
|
|
// Wait for limited connection to upgrade to a direct connection either by
|
2024-05-15 23:15:00 +00:00
|
|
|
// connection reversal or hole punching.
|
|
|
|
ch := make(chan struct{})
|
|
|
|
s.directConnNotifs.m[p] = append(s.directConnNotifs.m[p], ch)
|
|
|
|
s.directConnNotifs.Unlock()
|
|
|
|
|
|
|
|
// apply the DialPeer timeout
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, network.GetDialPeerTimeout(ctx))
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
// Wait for notification.
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
// Remove ourselves from the notification list
|
|
|
|
s.directConnNotifs.Lock()
|
|
|
|
defer s.directConnNotifs.Unlock()
|
|
|
|
|
|
|
|
s.directConnNotifs.m[p] = slices.DeleteFunc(
|
|
|
|
s.directConnNotifs.m[p],
|
|
|
|
func(c chan struct{}) bool { return c == ch },
|
|
|
|
)
|
|
|
|
if len(s.directConnNotifs.m[p]) == 0 {
|
|
|
|
delete(s.directConnNotifs.m, p)
|
|
|
|
}
|
|
|
|
return nil, ctx.Err()
|
|
|
|
case <-ch:
|
|
|
|
// We do not need to remove ourselves from the list here as the notifier
|
|
|
|
// clears the map entry
|
|
|
|
c := s.bestConnToPeer(p)
|
|
|
|
if c == nil {
|
|
|
|
return nil, network.ErrNoConn
|
|
|
|
}
|
2024-06-05 20:10:03 +00:00
|
|
|
if c.Stat().Limited {
|
|
|
|
return nil, network.ErrLimitedConn
|
2024-05-15 23:15:00 +00:00
|
|
|
}
|
|
|
|
return c, nil
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ConnsToPeer returns all the live connections to peer.
|
2019-06-09 07:24:20 +00:00
|
|
|
func (s *Swarm) ConnsToPeer(p peer.ID) []network.Conn {
|
2018-07-04 10:51:47 +00:00
|
|
|
// TODO: Consider sorting the connection list best to worst. Currently,
|
|
|
|
// it's sorted oldest to newest.
|
|
|
|
s.conns.RLock()
|
|
|
|
defer s.conns.RUnlock()
|
|
|
|
conns := s.conns.m[p]
|
2019-06-09 07:24:20 +00:00
|
|
|
output := make([]network.Conn, len(conns))
|
2018-07-04 10:51:47 +00:00
|
|
|
for i, c := range conns {
|
|
|
|
output[i] = c
|
|
|
|
}
|
|
|
|
return output
|
|
|
|
}
|
|
|
|
|
2021-10-19 13:43:41 +00:00
|
|
|
func isBetterConn(a, b *Conn) bool {
|
2024-06-05 20:10:03 +00:00
|
|
|
// If one is limited and not the other, prefer the unlimited connection.
|
|
|
|
aLimited := a.Stat().Limited
|
|
|
|
bLimited := b.Stat().Limited
|
|
|
|
if aLimited != bLimited {
|
|
|
|
return !aLimited
|
2021-10-19 13:43:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// If one is direct and not the other, prefer the direct connection.
|
|
|
|
aDirect := isDirectConn(a)
|
|
|
|
bDirect := isDirectConn(b)
|
|
|
|
if aDirect != bDirect {
|
|
|
|
return aDirect
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, prefer the connection with more open streams.
|
|
|
|
a.streams.Lock()
|
|
|
|
aLen := len(a.streams.m)
|
|
|
|
a.streams.Unlock()
|
|
|
|
|
|
|
|
b.streams.Lock()
|
|
|
|
bLen := len(b.streams.m)
|
|
|
|
b.streams.Unlock()
|
|
|
|
|
|
|
|
if aLen != bLen {
|
|
|
|
return aLen > bLen
|
|
|
|
}
|
|
|
|
|
|
|
|
// finally, pick the last connection.
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2018-07-04 10:51:47 +00:00
|
|
|
// bestConnToPeer returns the best connection to peer.
|
|
|
|
func (s *Swarm) bestConnToPeer(p peer.ID) *Conn {
|
2021-10-19 13:43:41 +00:00
|
|
|
|
|
|
|
// TODO: Prefer some transports over others.
|
|
|
|
// For now, prefers direct connections over Relayed connections.
|
|
|
|
// For tie-breaking, select the newest non-closed connection with the most streams.
|
2018-07-04 10:51:47 +00:00
|
|
|
s.conns.RLock()
|
|
|
|
defer s.conns.RUnlock()
|
|
|
|
|
|
|
|
var best *Conn
|
|
|
|
for _, c := range s.conns.m[p] {
|
|
|
|
if c.conn.IsClosed() {
|
|
|
|
// We *will* garbage collect this soon anyways.
|
|
|
|
continue
|
|
|
|
}
|
2021-10-19 13:43:41 +00:00
|
|
|
if best == nil || isBetterConn(c, best) {
|
2018-07-04 10:51:47 +00:00
|
|
|
best = c
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return best
|
|
|
|
}
|
|
|
|
|
2024-05-15 23:15:00 +00:00
|
|
|
// bestAcceptableConnToPeer returns the best acceptable connection, considering the passed in ctx.
|
|
|
|
// If network.WithForceDirectDial is used, it only returns a direct connections, ignoring
|
2024-06-05 20:10:03 +00:00
|
|
|
// any limited (relayed) connections to the peer.
|
2024-05-15 23:15:00 +00:00
|
|
|
func (s *Swarm) bestAcceptableConnToPeer(ctx context.Context, p peer.ID) *Conn {
|
2021-10-19 13:43:41 +00:00
|
|
|
conn := s.bestConnToPeer(p)
|
2022-08-19 16:34:07 +00:00
|
|
|
|
|
|
|
forceDirect, _ := network.GetForceDirectDial(ctx)
|
|
|
|
if forceDirect && !isDirectConn(conn) {
|
2024-05-15 23:15:00 +00:00
|
|
|
return nil
|
2024-01-18 18:54:54 +00:00
|
|
|
}
|
2024-05-15 23:15:00 +00:00
|
|
|
return conn
|
2021-10-19 13:43:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func isDirectConn(c *Conn) bool {
|
|
|
|
return c != nil && !c.conn.Transport().Proxy()
|
|
|
|
}
|
|
|
|
|
2018-07-04 10:51:47 +00:00
|
|
|
// Connectedness returns our "connectedness" state with the given peer.
|
|
|
|
//
|
|
|
|
// To check if we have an open connection, use `s.Connectedness(p) ==
|
2019-06-09 07:24:20 +00:00
|
|
|
// network.Connected`.
|
|
|
|
func (s *Swarm) Connectedness(p peer.ID) network.Connectedness {
|
2024-06-05 20:10:03 +00:00
|
|
|
s.conns.RLock()
|
|
|
|
defer s.conns.RUnlock()
|
|
|
|
|
|
|
|
return s.connectednessUnlocked(p)
|
|
|
|
}
|
|
|
|
|
|
|
|
// connectednessUnlocked returns the connectedness of a peer.
|
|
|
|
func (s *Swarm) connectednessUnlocked(p peer.ID) network.Connectedness {
|
|
|
|
var haveLimited bool
|
|
|
|
for _, c := range s.conns.m[p] {
|
|
|
|
if c.IsClosed() {
|
|
|
|
// These will be garbage collected soon
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if c.Stat().Limited {
|
|
|
|
haveLimited = true
|
|
|
|
} else {
|
|
|
|
return network.Connected
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if haveLimited {
|
|
|
|
return network.Limited
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
2019-06-09 07:24:20 +00:00
|
|
|
return network.NotConnected
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Conns returns a slice of all connections.
|
2019-06-09 07:24:20 +00:00
|
|
|
func (s *Swarm) Conns() []network.Conn {
|
2018-07-04 10:51:47 +00:00
|
|
|
s.conns.RLock()
|
|
|
|
defer s.conns.RUnlock()
|
|
|
|
|
2019-06-09 07:24:20 +00:00
|
|
|
conns := make([]network.Conn, 0, len(s.conns.m))
|
2018-07-04 10:51:47 +00:00
|
|
|
for _, cs := range s.conns.m {
|
|
|
|
for _, c := range cs {
|
|
|
|
conns = append(conns, c)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return conns
|
|
|
|
}
|
|
|
|
|
|
|
|
// ClosePeer closes all connections to the given peer.
|
|
|
|
func (s *Swarm) ClosePeer(p peer.ID) error {
|
|
|
|
conns := s.ConnsToPeer(p)
|
|
|
|
switch len(conns) {
|
|
|
|
case 0:
|
|
|
|
return nil
|
|
|
|
case 1:
|
|
|
|
return conns[0].Close()
|
|
|
|
default:
|
|
|
|
errCh := make(chan error)
|
|
|
|
for _, c := range conns {
|
2019-06-09 07:24:20 +00:00
|
|
|
go func(c network.Conn) {
|
2018-07-04 10:51:47 +00:00
|
|
|
errCh <- c.Close()
|
|
|
|
}(c)
|
|
|
|
}
|
|
|
|
|
|
|
|
var errs []string
|
2021-10-19 13:43:41 +00:00
|
|
|
for range conns {
|
2018-07-04 10:51:47 +00:00
|
|
|
err := <-errCh
|
|
|
|
if err != nil {
|
|
|
|
errs = append(errs, err.Error())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(errs) > 0 {
|
|
|
|
return fmt.Errorf("when disconnecting from peer %s: %s", p, strings.Join(errs, ", "))
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Peers returns a copy of the set of peers swarm is connected to.
|
|
|
|
func (s *Swarm) Peers() []peer.ID {
|
|
|
|
s.conns.RLock()
|
|
|
|
defer s.conns.RUnlock()
|
|
|
|
peers := make([]peer.ID, 0, len(s.conns.m))
|
|
|
|
for p := range s.conns.m {
|
|
|
|
peers = append(peers, p)
|
|
|
|
}
|
|
|
|
|
|
|
|
return peers
|
|
|
|
}
|
|
|
|
|
|
|
|
// LocalPeer returns the local peer swarm is associated to.
|
|
|
|
func (s *Swarm) LocalPeer() peer.ID {
|
|
|
|
return s.local
|
|
|
|
}
|
|
|
|
|
|
|
|
// Backoff returns the DialBackoff object for this swarm.
|
|
|
|
func (s *Swarm) Backoff() *DialBackoff {
|
|
|
|
return &s.backf
|
|
|
|
}
|
|
|
|
|
|
|
|
// notifyAll sends a signal to all Notifiees
|
2019-06-09 07:24:20 +00:00
|
|
|
func (s *Swarm) notifyAll(notify func(network.Notifiee)) {
|
2018-07-04 10:51:47 +00:00
|
|
|
s.notifs.RLock()
|
|
|
|
for f := range s.notifs.m {
|
2022-08-19 16:34:07 +00:00
|
|
|
notify(f)
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
s.notifs.RUnlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Notify signs up Notifiee to receive signals when events happen
|
2019-06-09 07:24:20 +00:00
|
|
|
func (s *Swarm) Notify(f network.Notifiee) {
|
2018-07-04 10:51:47 +00:00
|
|
|
s.notifs.Lock()
|
|
|
|
s.notifs.m[f] = struct{}{}
|
|
|
|
s.notifs.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
// StopNotify unregisters Notifiee fromr receiving signals
|
2019-06-09 07:24:20 +00:00
|
|
|
func (s *Swarm) StopNotify(f network.Notifiee) {
|
2018-07-04 10:51:47 +00:00
|
|
|
s.notifs.Lock()
|
|
|
|
delete(s.notifs.m, f)
|
|
|
|
s.notifs.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Swarm) removeConn(c *Conn) {
|
|
|
|
p := c.RemotePeer()
|
|
|
|
|
|
|
|
s.conns.Lock()
|
|
|
|
cs := s.conns.m[p]
|
|
|
|
for i, ci := range cs {
|
|
|
|
if ci == c {
|
2023-06-30 13:41:32 +00:00
|
|
|
// NOTE: We're intentionally preserving order.
|
|
|
|
// This way, connections to a peer are always
|
|
|
|
// sorted oldest to newest.
|
|
|
|
copy(cs[i:], cs[i+1:])
|
|
|
|
cs[len(cs)-1] = nil
|
|
|
|
s.conns.m[p] = cs[:len(cs)-1]
|
2023-05-19 20:23:55 +00:00
|
|
|
break
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
}
|
2024-06-05 20:10:03 +00:00
|
|
|
if len(s.conns.m[p]) == 0 {
|
|
|
|
delete(s.conns.m, p)
|
|
|
|
}
|
|
|
|
s.conns.Unlock()
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// String returns a string representation of Network.
|
|
|
|
func (s *Swarm) String() string {
|
|
|
|
return fmt.Sprintf("<Swarm %s>", s.LocalPeer())
|
|
|
|
}
|
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
func (s *Swarm) ResourceManager() network.ResourceManager {
|
|
|
|
return s.rcmgr
|
|
|
|
}
|
|
|
|
|
2018-07-04 10:51:47 +00:00
|
|
|
// Swarm is a Network.
|
2019-06-09 07:24:20 +00:00
|
|
|
var _ network.Network = (*Swarm)(nil)
|
|
|
|
var _ transport.TransportNetwork = (*Swarm)(nil)
|
2023-04-07 18:23:07 +00:00
|
|
|
|
|
|
|
type connWithMetrics struct {
|
|
|
|
transport.CapableConn
|
|
|
|
opened time.Time
|
|
|
|
dir network.Direction
|
|
|
|
metricsTracer MetricsTracer
|
|
|
|
}
|
|
|
|
|
|
|
|
func wrapWithMetrics(capableConn transport.CapableConn, metricsTracer MetricsTracer, opened time.Time, dir network.Direction) connWithMetrics {
|
|
|
|
c := connWithMetrics{CapableConn: capableConn, opened: opened, dir: dir, metricsTracer: metricsTracer}
|
|
|
|
c.metricsTracer.OpenedConnection(c.dir, capableConn.RemotePublicKey(), capableConn.ConnState(), capableConn.LocalMultiaddr())
|
|
|
|
return c
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c connWithMetrics) completedHandshake() {
|
|
|
|
c.metricsTracer.CompletedHandshake(time.Since(c.opened), c.ConnState(), c.LocalMultiaddr())
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c connWithMetrics) Close() error {
|
|
|
|
c.metricsTracer.ClosedConnection(c.dir, time.Since(c.opened), c.ConnState(), c.LocalMultiaddr())
|
|
|
|
return c.CapableConn.Close()
|
|
|
|
}
|
2023-08-22 10:32:01 +00:00
|
|
|
|
|
|
|
func (c connWithMetrics) Stat() network.ConnStats {
|
|
|
|
if cs, ok := c.CapableConn.(network.ConnStat); ok {
|
|
|
|
return cs.Stat()
|
|
|
|
}
|
|
|
|
return network.ConnStats{}
|
|
|
|
}
|
|
|
|
|
|
|
|
var _ network.ConnStat = connWithMetrics{}
|