Merge pull request #278 from libp2p/remove-goprocess

stop using goprocess to control teardown
This commit is contained in:
Marten Seemann 2021-09-08 17:32:14 +01:00 committed by GitHub
commit a93170bad5
12 changed files with 102 additions and 202 deletions

View File

@ -7,21 +7,21 @@ import (
"testing"
"time"
addrutil "github.com/libp2p/go-addr-util"
. "github.com/libp2p/go-libp2p-swarm"
addrutil "github.com/libp2p/go-addr-util"
"github.com/libp2p/go-libp2p-core/network"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/libp2p/go-libp2p-core/peerstore"
"github.com/libp2p/go-libp2p-core/transport"
testutil "github.com/libp2p/go-libp2p-core/test"
"github.com/libp2p/go-libp2p-core/transport"
swarmt "github.com/libp2p/go-libp2p-swarm/testing"
"github.com/libp2p/go-libp2p-testing/ci"
ma "github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
. "github.com/libp2p/go-libp2p-swarm"
"github.com/stretchr/testify/require"
)
func init() {
@ -36,50 +36,37 @@ func closeSwarms(swarms []*Swarm) {
func TestBasicDialPeer(t *testing.T) {
t.Parallel()
ctx := context.Background()
swarms := makeSwarms(ctx, t, 2)
swarms := makeSwarms(t, 2)
defer closeSwarms(swarms)
s1 := swarms[0]
s2 := swarms[1]
s1.Peerstore().AddAddrs(s2.LocalPeer(), s2.ListenAddresses(), peerstore.PermanentAddrTTL)
c, err := s1.DialPeer(ctx, s2.LocalPeer())
if err != nil {
t.Fatal(err)
}
s, err := c.NewStream(ctx)
if err != nil {
t.Fatal(err)
}
c, err := s1.DialPeer(context.Background(), s2.LocalPeer())
require.NoError(t, err)
s, err := c.NewStream(context.Background())
require.NoError(t, err)
s.Close()
}
func TestDialWithNoListeners(t *testing.T) {
t.Parallel()
ctx := context.Background()
s1 := makeDialOnlySwarm(ctx, t)
swarms := makeSwarms(ctx, t, 1)
s1 := makeDialOnlySwarm(t)
swarms := makeSwarms(t, 1)
defer closeSwarms(swarms)
s2 := swarms[0]
s1.Peerstore().AddAddrs(s2.LocalPeer(), s2.ListenAddresses(), peerstore.PermanentAddrTTL)
c, err := s1.DialPeer(ctx, s2.LocalPeer())
if err != nil {
t.Fatal(err)
}
s, err := c.NewStream(ctx)
if err != nil {
t.Fatal(err)
}
c, err := s1.DialPeer(context.Background(), s2.LocalPeer())
require.NoError(t, err)
s, err := c.NewStream(context.Background())
require.NoError(t, err)
s.Close()
}
@ -104,7 +91,7 @@ func TestSimultDials(t *testing.T) {
t.Parallel()
ctx := context.Background()
swarms := makeSwarms(ctx, t, 2, swarmt.OptDisableReuseport)
swarms := makeSwarms(t, 2, swarmt.OptDisableReuseport)
// connect everyone
{
@ -175,7 +162,7 @@ func TestDialWait(t *testing.T) {
t.Parallel()
ctx := context.Background()
swarms := makeSwarms(ctx, t, 1)
swarms := makeSwarms(t, 1)
s1 := swarms[0]
defer s1.Close()
@ -215,7 +202,7 @@ func TestDialBackoff(t *testing.T) {
t.Parallel()
ctx := context.Background()
swarms := makeSwarms(ctx, t, 2)
swarms := makeSwarms(t, 2)
s1 := swarms[0]
s2 := swarms[1]
defer s1.Close()
@ -422,7 +409,7 @@ func TestDialBackoffClears(t *testing.T) {
t.Parallel()
ctx := context.Background()
swarms := makeSwarms(ctx, t, 2)
swarms := makeSwarms(t, 2)
s1 := swarms[0]
s2 := swarms[1]
defer s1.Close()
@ -491,7 +478,7 @@ func TestDialPeerFailed(t *testing.T) {
t.Parallel()
ctx := context.Background()
swarms := makeSwarms(ctx, t, 2)
swarms := makeSwarms(t, 2)
defer closeSwarms(swarms)
testedSwarm, targetSwarm := swarms[0], swarms[1]
@ -530,7 +517,7 @@ func TestDialPeerFailed(t *testing.T) {
func TestDialExistingConnection(t *testing.T) {
ctx := context.Background()
swarms := makeSwarms(ctx, t, 2)
swarms := makeSwarms(t, 2)
defer closeSwarms(swarms)
s1 := swarms[0]
s2 := swarms[1]
@ -574,7 +561,7 @@ func TestDialSimultaneousJoin(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
swarms := makeSwarms(ctx, t, 2)
swarms := makeSwarms(t, 2)
s1 := swarms[0]
s2 := swarms[1]
defer s1.Close()
@ -676,12 +663,10 @@ func TestDialSelf(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
swarms := makeSwarms(ctx, t, 2)
swarms := makeSwarms(t, 2)
s1 := swarms[0]
defer s1.Close()
_, err := s1.DialPeer(ctx, s1.LocalPeer())
if err != ErrDialToSelf {
t.Fatal("expected error from self dial")
}
require.ErrorIs(t, err, ErrDialToSelf, "expected error from self dial")
}

View File

@ -15,7 +15,7 @@ import (
func TestPeers(t *testing.T) {
ctx := context.Background()
swarms := makeSwarms(ctx, t, 2)
swarms := makeSwarms(t, 2)
s1 := swarms[0]
s2 := swarms[1]

View File

@ -18,11 +18,8 @@ import (
)
func TestSimultOpen(t *testing.T) {
t.Parallel()
ctx := context.Background()
swarms := makeSwarms(ctx, t, 2, swarmt.OptDisableReuseport)
swarms := makeSwarms(t, 2, swarmt.OptDisableReuseport)
// connect everyone
{
@ -32,7 +29,7 @@ func TestSimultOpen(t *testing.T) {
// copy for other peer
log.Debugf("TestSimultOpen: connecting: %s --> %s (%s)", s.LocalPeer(), dst, addr)
s.Peerstore().AddAddr(dst, addr, peerstore.PermanentAddrTTL)
if _, err := s.DialPeer(ctx, dst); err != nil {
if _, err := s.DialPeer(context.Background(), dst); err != nil {
t.Error("error swarm dialing to peer", err)
}
}

View File

@ -18,8 +18,6 @@ import (
"github.com/libp2p/go-libp2p-core/transport"
logging "github.com/ipfs/go-log"
"github.com/jbenet/goprocess"
goprocessctx "github.com/jbenet/goprocess/context"
ma "github.com/multiformats/go-multiaddr"
)
@ -92,9 +90,11 @@ type Swarm struct {
limiter *dialLimiter
gater connmgr.ConnectionGater
proc goprocess.Process
ctx context.Context
bwc metrics.Reporter
closeOnce sync.Once
ctx context.Context // is canceled when Close is called
ctxCancel context.CancelFunc
bwc metrics.Reporter
}
// NewSwarm constructs a Swarm.
@ -103,11 +103,14 @@ type Swarm struct {
// `extra` interface{} parameter facilitates the future migration. Supported
// elements are:
// - connmgr.ConnectionGater
func NewSwarm(ctx context.Context, local peer.ID, peers peerstore.Peerstore, bwc metrics.Reporter, extra ...interface{}) *Swarm {
func NewSwarm(local peer.ID, peers peerstore.Peerstore, bwc metrics.Reporter, extra ...interface{}) *Swarm {
ctx, cancel := context.WithCancel(context.Background())
s := &Swarm{
local: local,
peers: peers,
bwc: bwc,
local: local,
peers: peers,
bwc: bwc,
ctx: ctx,
ctxCancel: cancel,
}
s.conns.m = make(map[peer.ID][]*Conn)
@ -124,25 +127,19 @@ func NewSwarm(ctx context.Context, local peer.ID, peers peerstore.Peerstore, bwc
s.dsync = newDialSync(s.dialWorkerLoop)
s.limiter = newDialLimiter(s.dialAddr)
s.proc = goprocessctx.WithContext(ctx)
s.ctx = goprocessctx.OnClosingContext(s.proc)
s.backf.init(s.ctx)
// Set teardown after setting the context/process so we don't start the
// teardown process early.
s.proc.SetTeardown(s.teardown)
return s
}
func (s *Swarm) teardown() error {
// Wait for the context to be canceled.
// This allows other parts of the swarm to detect that we're shutting
// down.
<-s.ctx.Done()
func (s *Swarm) Close() error {
s.closeOnce.Do(s.close)
return nil
}
func (s *Swarm) close() {
s.ctxCancel()
// Prevents new connections and/or listeners from being added to the swarm.
s.listeners.Lock()
listeners := s.listeners.m
s.listeners.m = nil
@ -197,13 +194,6 @@ func (s *Swarm) teardown() error {
}
}
wg.Wait()
return nil
}
// Process returns the Process of the swarm
func (s *Swarm) Process() goprocess.Process {
return s.proc
}
func (s *Swarm) addConn(tc transport.CapableConn, dir network.Direction) (*Conn, error) {
@ -293,16 +283,6 @@ func (s *Swarm) Peerstore() peerstore.Peerstore {
return s.peers
}
// Context returns the context of the swarm
func (s *Swarm) Context() context.Context {
return s.ctx
}
// Close stops the Swarm.
func (s *Swarm) Close() error {
return s.proc.Close()
}
// TODO: We probably don't need the conn handlers.
// SetConnHandler assigns the handler for new connections.

View File

@ -6,6 +6,7 @@ import (
"github.com/libp2p/go-libp2p-core/peerstore"
"github.com/libp2p/go-libp2p-core/test"
"github.com/stretchr/testify/require"
ma "github.com/multiformats/go-multiaddr"
@ -13,7 +14,6 @@ import (
)
func TestDialBadAddrs(t *testing.T) {
m := func(s string) ma.Multiaddr {
maddr, err := ma.NewMultiaddr(s)
if err != nil {
@ -22,13 +22,12 @@ func TestDialBadAddrs(t *testing.T) {
return maddr
}
ctx := context.Background()
s := makeSwarms(ctx, t, 1)[0]
s := makeSwarms(t, 1)[0]
test := func(a ma.Multiaddr) {
p := test.RandPeerIDFatal(t)
s.Peerstore().AddAddr(p, a, peerstore.PermanentAddrTTL)
if _, err := s.DialPeer(ctx, p); err == nil {
if _, err := s.DialPeer(context.Background(), p); err == nil {
t.Errorf("swarm should not dial: %s", p)
}
}
@ -39,19 +38,13 @@ func TestDialBadAddrs(t *testing.T) {
}
func TestAddrRace(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
s := makeSwarms(ctx, t, 1)[0]
s := makeSwarms(t, 1)[0]
defer s.Close()
a1, err := s.InterfaceListenAddresses()
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
a2, err := s.InterfaceListenAddresses()
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
if len(a1) > 0 && len(a2) > 0 && &a1[0] == &a2[0] {
t.Fatal("got the exact same address set twice; this could lead to data races")
@ -59,15 +52,8 @@ func TestAddrRace(t *testing.T) {
}
func TestAddressesWithoutListening(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
s := swarmt.GenSwarm(t, ctx, swarmt.OptDialOnly)
s := swarmt.GenSwarm(t, swarmt.OptDialOnly)
a1, err := s.InterfaceListenAddresses()
if err != nil {
t.Fatal(err)
}
if len(a1) != 0 {
t.Fatalf("expected to be listening on no addresses, was listening on %d", len(a1))
}
require.NoError(t, err)
require.Empty(t, a1, "expected to be listening on no addresses")
}

View File

@ -46,7 +46,7 @@ func (s *Swarm) AddListenAddr(a ma.Multiaddr) error {
//
// Distinguish between these two cases to avoid confusing users.
select {
case <-s.proc.Closing():
case <-s.ctx.Done():
return ErrSwarmClosed
default:
return ErrNoTransport

View File

@ -7,6 +7,8 @@ import (
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/libp2p/go-libp2p-core/network"
. "github.com/libp2p/go-libp2p-swarm/testing"
@ -15,19 +17,16 @@ import (
// TestConnectednessCorrect starts a few networks, connects a few
// and tests Connectedness value is correct.
func TestConnectednessCorrect(t *testing.T) {
ctx := context.Background()
nets := make([]network.Network, 4)
for i := 0; i < 4; i++ {
nets[i] = GenSwarm(t, ctx)
nets[i] = GenSwarm(t)
}
// connect 0-1, 0-2, 0-3, 1-2, 2-3
dial := func(a, b network.Network) {
DivulgeAddresses(b, a)
if _, err := a.DialPeer(ctx, b.LocalPeer()); err != nil {
if _, err := a.DialPeer(context.Background(), b.LocalPeer()); err != nil {
t.Fatalf("Failed to dial: %s", err)
}
}
@ -54,33 +53,17 @@ func TestConnectednessCorrect(t *testing.T) {
expectConnectedness(t, nets[0], nets[2], network.NotConnected)
expectConnectedness(t, nets[1], nets[3], network.NotConnected)
if len(nets[0].Peers()) != 2 {
t.Fatal("expected net 0 to have two peers")
}
if len(nets[2].Peers()) != 2 {
t.Fatal("expected net 2 to have two peers")
}
if len(nets[1].ConnsToPeer(nets[3].LocalPeer())) != 0 {
t.Fatal("net 1 should have no connections to net 3")
}
if err := nets[2].ClosePeer(nets[1].LocalPeer()); err != nil {
t.Fatal(err)
}
require.Len(t, nets[0].Peers(), 2, "expected net 0 to have two peers")
require.Len(t, nets[2].Peers(), 2, "expected net 2 to have two peers")
require.NotZerof(t, nets[1].ConnsToPeer(nets[3].LocalPeer()), "net 1 should have no connections to net 3")
require.NoError(t, nets[2].ClosePeer(nets[1].LocalPeer()))
time.Sleep(time.Millisecond * 50)
expectConnectedness(t, nets[2], nets[1], network.NotConnected)
for _, n := range nets {
n.Close()
}
for _, n := range nets {
<-n.Process().Closed()
}
}
func expectConnectedness(t *testing.T, a, b network.Network, expected network.Connectedness) {
@ -113,7 +96,7 @@ func TestNetworkOpenStream(t *testing.T) {
nets := make([]network.Network, 4)
for i := 0; i < 4; i++ {
nets[i] = GenSwarm(t, ctx)
nets[i] = GenSwarm(t)
}
dial := func(a, b network.Network) {

View File

@ -5,6 +5,8 @@ import (
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/libp2p/go-libp2p-core/network"
"github.com/libp2p/go-libp2p-core/peer"
@ -18,8 +20,7 @@ func TestNotifications(t *testing.T) {
notifiees := make([]*netNotifiee, swarmSize)
ctx := context.Background()
swarms := makeSwarms(ctx, t, swarmSize)
swarms := makeSwarms(t, swarmSize)
defer func() {
for i, s := range swarms {
select {
@ -27,10 +28,7 @@ func TestNotifications(t *testing.T) {
t.Error("should not have been closed")
default:
}
err := s.Close()
if err != nil {
t.Error(err)
}
require.NoError(t, s.Close())
select {
case <-notifiees[i].listenClose:
default:
@ -48,7 +46,7 @@ func TestNotifications(t *testing.T) {
notifiees[i] = n
}
connectSwarms(t, ctx, swarms)
connectSwarms(t, context.Background(), swarms)
time.Sleep(50 * time.Millisecond)
// should've gotten 5 by now.

View File

@ -58,27 +58,23 @@ func EchoStreamHandler(stream network.Stream) {
}()
}
func makeDialOnlySwarm(ctx context.Context, t *testing.T) *Swarm {
swarm := GenSwarm(t, ctx, OptDialOnly)
func makeDialOnlySwarm(t *testing.T) *Swarm {
swarm := GenSwarm(t, OptDialOnly)
swarm.SetStreamHandler(EchoStreamHandler)
return swarm
}
func makeSwarms(ctx context.Context, t *testing.T, num int, opts ...Option) []*Swarm {
func makeSwarms(t *testing.T, num int, opts ...Option) []*Swarm {
swarms := make([]*Swarm, 0, num)
for i := 0; i < num; i++ {
swarm := GenSwarm(t, ctx, opts...)
swarm := GenSwarm(t, opts...)
swarm.SetStreamHandler(EchoStreamHandler)
swarms = append(swarms, swarm)
}
return swarms
}
func connectSwarms(t *testing.T, ctx context.Context, swarms []*Swarm) {
var wg sync.WaitGroup
connect := func(s *Swarm, dst peer.ID, addr ma.Multiaddr) {
// TODO: make a DialAddr func.
@ -104,13 +100,10 @@ func connectSwarms(t *testing.T, ctx context.Context, swarms []*Swarm) {
}
func SubtestSwarm(t *testing.T, SwarmNum int, MsgNum int) {
// t.Skip("skipping for another test")
ctx := context.Background()
swarms := makeSwarms(ctx, t, SwarmNum, OptDisableReuseport)
swarms := makeSwarms(t, SwarmNum, OptDisableReuseport)
// connect everyone
connectSwarms(t, ctx, swarms)
connectSwarms(t, context.Background(), swarms)
// ping/pong
for _, s1 := range swarms {
@ -118,7 +111,7 @@ func SubtestSwarm(t *testing.T, SwarmNum int, MsgNum int) {
log.Debugf("%s ping pong round", s1.LocalPeer())
log.Debugf("-------------------------------------------------------")
_, cancel := context.WithCancel(ctx)
_, cancel := context.WithCancel(context.Background())
got := map[peer.ID]int{}
errChan := make(chan error, MsgNum*len(swarms))
streamChan := make(chan network.Stream, MsgNum)
@ -132,7 +125,7 @@ func SubtestSwarm(t *testing.T, SwarmNum int, MsgNum int) {
defer wg.Done()
// first, one stream per peer (nice)
stream, err := s1.NewStream(ctx, p)
stream, err := s1.NewStream(context.Background(), p)
if err != nil {
errChan <- err
return
@ -253,7 +246,7 @@ func TestConnHandler(t *testing.T) {
t.Parallel()
ctx := context.Background()
swarms := makeSwarms(ctx, t, 5)
swarms := makeSwarms(t, 5)
gotconn := make(chan struct{}, 10)
swarms[0].SetConnHandler(func(conn network.Conn) {
@ -387,8 +380,8 @@ func TestConnectionGating(t *testing.T) {
p2Gater = tc.p2Gater(p2Gater)
}
sw1 := GenSwarm(t, ctx, OptConnGater(p1Gater), optTransport)
sw2 := GenSwarm(t, ctx, OptConnGater(p2Gater), optTransport)
sw1 := GenSwarm(t, OptConnGater(p1Gater), optTransport)
sw2 := GenSwarm(t, OptConnGater(p2Gater), optTransport)
p1 := sw1.LocalPeer()
p2 := sw2.LocalPeer()
@ -408,10 +401,9 @@ func TestConnectionGating(t *testing.T) {
}
func TestNoDial(t *testing.T) {
ctx := context.Background()
swarms := makeSwarms(ctx, t, 2)
swarms := makeSwarms(t, 2)
_, err := swarms[0].NewStream(network.WithNoDial(ctx, "swarm test"), swarms[1].LocalPeer())
_, err := swarms[0].NewStream(network.WithNoDial(context.Background(), "swarm test"), swarms[1].LocalPeer())
if err != network.ErrNoConn {
t.Fatal("should have failed with ErrNoConn")
}
@ -419,36 +411,29 @@ func TestNoDial(t *testing.T) {
func TestCloseWithOpenStreams(t *testing.T) {
ctx := context.Background()
swarms := makeSwarms(ctx, t, 2)
swarms := makeSwarms(t, 2)
connectSwarms(t, ctx, swarms)
s, err := swarms[0].NewStream(ctx, swarms[1].LocalPeer())
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
defer s.Close()
// close swarm before stream.
err = swarms[0].Close()
if err != nil {
t.Fatal(err)
}
require.NoError(t, swarms[0].Close())
}
func TestTypedNilConn(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
s := GenSwarm(t, ctx)
s := GenSwarm(t)
defer s.Close()
// We can't dial ourselves.
c, err := s.DialPeer(ctx, s.LocalPeer())
c, err := s.DialPeer(context.Background(), s.LocalPeer())
require.Error(t, err)
// If we fail to dial, the connection should be nil.
require.True(t, c == nil)
require.Nil(t, c)
}
func TestPreventDialListenAddr(t *testing.T) {
s := GenSwarm(t, context.Background(), OptDialOnly)
s := GenSwarm(t, OptDialOnly)
if err := s.Listen(ma.StringCast("/ip4/0.0.0.0/udp/0/quic")); err != nil {
t.Fatal(err)
}

View File

@ -1,7 +1,6 @@
package testing
import (
"context"
"testing"
csms "github.com/libp2p/go-conn-security-multistream"
@ -22,7 +21,6 @@ import (
msmux "github.com/libp2p/go-stream-muxer-multistream"
"github.com/libp2p/go-tcp-transport"
"github.com/jbenet/goprocess"
ma "github.com/multiformats/go-multiaddr"
)
@ -89,7 +87,7 @@ func GenUpgrader(n *swarm.Swarm) *tptu.Upgrader {
}
// GenSwarm generates a new test swarm.
func GenSwarm(t *testing.T, ctx context.Context, opts ...Option) *swarm.Swarm {
func GenSwarm(t *testing.T, opts ...Option) *swarm.Swarm {
var cfg config
for _, o := range opts {
o(t, &cfg)
@ -113,11 +111,9 @@ func GenSwarm(t *testing.T, ctx context.Context, opts ...Option) *swarm.Swarm {
ps := pstoremem.NewPeerstore()
ps.AddPubKey(p.ID, p.PubKey)
ps.AddPrivKey(p.ID, p.PrivKey)
s := swarm.NewSwarm(ctx, p.ID, ps, metrics.NewBandwidthCounter(), cfg.connectionGater)
t.Cleanup(func() { ps.Close() })
// Call AddChildNoWait because we can't call AddChild after the process
// may have been closed (e.g., if the context was canceled).
s.Process().AddChildNoWait(goprocess.WithTeardown(ps.Close))
s := swarm.NewSwarm(p.ID, ps, metrics.NewBandwidthCounter(), cfg.connectionGater)
upgrader := GenUpgrader(s)
upgrader.ConnGater = cfg.connectionGater

View File

@ -1,14 +1,13 @@
package testing
import (
"context"
"testing"
"github.com/stretchr/testify/require"
)
func TestGenSwarm(t *testing.T) {
swarm := GenSwarm(t, context.Background())
swarm := GenSwarm(t)
require.NoError(t, swarm.Close())
GenUpgrader(swarm)
}

View File

@ -9,7 +9,10 @@ import (
"github.com/libp2p/go-libp2p-core/peer"
"github.com/libp2p/go-libp2p-core/transport"
ma "github.com/multiformats/go-multiaddr"
"github.com/stretchr/testify/require"
)
type dummyTransport struct {
@ -43,34 +46,22 @@ func (dt *dummyTransport) Close() error {
}
func TestUselessTransport(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
s := swarmt.GenSwarm(t, ctx)
err := s.AddTransport(new(dummyTransport))
if err == nil {
t.Fatal("adding a transport that supports no protocols should have failed")
}
s := swarmt.GenSwarm(t)
require.Error(t, s.AddTransport(new(dummyTransport)), "adding a transport that supports no protocols should have failed")
}
func TestTransportClose(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
s := swarmt.GenSwarm(t, ctx)
s := swarmt.GenSwarm(t)
tpt := &dummyTransport{protocols: []int{1}}
if err := s.AddTransport(tpt); err != nil {
t.Fatal(err)
}
require.NoError(t, s.AddTransport(tpt))
_ = s.Close()
if !tpt.closed {
t.Fatal("expected transport to be closed")
}
}
func TestTransportAfterClose(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
s := swarmt.GenSwarm(t, ctx)
s := swarmt.GenSwarm(t)
s.Close()
tpt := &dummyTransport{protocols: []int{1}}