identify: refactor sending of Identify pushes (#1984)
* identify: cache the snapshot * identify: refactor sending of Identify pushes * identify: fix concurrency when sending pushes * identify: fix timestamp handling * identify: remove unneeded pushSemaphore * identify: improve logging * identify: use a sequence number instead of a timestamp * identify: start with an empty snapshot * identify: wait until we've actually finished setting up
This commit is contained in:
parent
ad2f6d0223
commit
235f25b487
|
@ -181,7 +181,7 @@ func NewHost(n network.Network, opts *HostOpts) (*BasicHost, error) {
|
|||
|
||||
h.updateLocalIpAddr()
|
||||
|
||||
if h.emitters.evtLocalProtocolsUpdated, err = h.eventbus.Emitter(&event.EvtLocalProtocolsUpdated{}); err != nil {
|
||||
if h.emitters.evtLocalProtocolsUpdated, err = h.eventbus.Emitter(&event.EvtLocalProtocolsUpdated{}, eventbus.Stateful); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if h.emitters.evtLocalAddrsUpdated, err = h.eventbus.Emitter(&event.EvtLocalAddressesUpdated{}, eventbus.Stateful); err != nil {
|
||||
|
@ -367,6 +367,7 @@ func (h *BasicHost) updateLocalIpAddr() {
|
|||
func (h *BasicHost) Start() {
|
||||
h.psManager.Start()
|
||||
h.refCount.Add(1)
|
||||
h.ids.Start()
|
||||
go h.background()
|
||||
}
|
||||
|
||||
|
|
|
@ -176,7 +176,7 @@ func TestHostAddrsFactory(t *testing.T) {
|
|||
|
||||
addrs := h.Addrs()
|
||||
if len(addrs) != 1 {
|
||||
t.Fatalf("expected 1 addr, got %d", len(addrs))
|
||||
t.Fatalf("expected 1 addr, got %+v", addrs)
|
||||
}
|
||||
if !addrs[0].Equal(maddr) {
|
||||
t.Fatalf("expected %s, got %s", maddr.String(), addrs[0].String())
|
||||
|
@ -245,8 +245,10 @@ func getHostPair(t *testing.T) (host.Host, host.Host) {
|
|||
|
||||
h1, err := NewHost(swarmt.GenSwarm(t), nil)
|
||||
require.NoError(t, err)
|
||||
h1.Start()
|
||||
h2, err := NewHost(swarmt.GenSwarm(t), nil)
|
||||
require.NoError(t, err)
|
||||
h2.Start()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
defer cancel()
|
||||
|
@ -342,14 +344,12 @@ func TestHostProtoMismatch(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHostProtoPreknowledge(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
h1, err := NewHost(swarmt.GenSwarm(t), nil)
|
||||
require.NoError(t, err)
|
||||
defer h1.Close()
|
||||
|
||||
h2, err := NewHost(swarmt.GenSwarm(t), nil)
|
||||
require.NoError(t, err)
|
||||
defer h1.Close()
|
||||
defer h2.Close()
|
||||
|
||||
conn := make(chan protocol.ID)
|
||||
|
@ -362,8 +362,11 @@ func TestHostProtoPreknowledge(t *testing.T) {
|
|||
// Prevent pushing identify information so this test actually _uses_ the super protocol.
|
||||
h1.RemoveStreamHandler(identify.IDPush)
|
||||
|
||||
h1.Start()
|
||||
h2.Start()
|
||||
|
||||
h2pi := h2.Peerstore().PeerInfo(h2.ID())
|
||||
require.NoError(t, h1.Connect(ctx, h2pi))
|
||||
require.NoError(t, h1.Connect(context.Background(), h2pi))
|
||||
|
||||
// wait for identify handshake to finish completely
|
||||
select {
|
||||
|
@ -380,12 +383,12 @@ func TestHostProtoPreknowledge(t *testing.T) {
|
|||
|
||||
h2.SetStreamHandler("/foo", handler)
|
||||
|
||||
s, err := h1.NewStream(ctx, h2.ID(), "/foo", "/bar", "/super")
|
||||
s, err := h1.NewStream(context.Background(), h2.ID(), "/foo", "/bar", "/super")
|
||||
require.NoError(t, err)
|
||||
|
||||
select {
|
||||
case p := <-conn:
|
||||
t.Fatal("shouldnt have gotten connection yet, we should have a lazy stream: ", p)
|
||||
t.Fatal("shouldn't have gotten connection yet, we should have a lazy stream: ", p)
|
||||
case <-time.After(time.Millisecond * 50):
|
||||
}
|
||||
|
||||
|
@ -532,7 +535,6 @@ func TestAddrChangeImmediatelyIfAddressNonEmpty(t *testing.T) {
|
|||
return taddrs
|
||||
}})
|
||||
require.NoError(t, err)
|
||||
h.Start()
|
||||
defer h.Close()
|
||||
|
||||
sub, err := h.EventBus().Subscribe(&event.EvtLocalAddressesUpdated{})
|
||||
|
@ -541,6 +543,7 @@ func TestAddrChangeImmediatelyIfAddressNonEmpty(t *testing.T) {
|
|||
t.Error(err)
|
||||
}
|
||||
defer sub.Close()
|
||||
h.Start()
|
||||
|
||||
expected := event.EvtLocalAddressesUpdated{
|
||||
Diffs: true,
|
||||
|
|
|
@ -528,7 +528,7 @@ func TestLimitedStreams(t *testing.T) {
|
|||
}
|
||||
|
||||
wg.Wait()
|
||||
if !within(time.Since(before), time.Second*2, time.Second) {
|
||||
if !within(time.Since(before), time.Second*5/2, time.Second) {
|
||||
t.Fatal("Expected 2ish seconds but got ", time.Since(before))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,6 +7,10 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/p2p/transport/tcp"
|
||||
|
||||
"github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/proto"
|
||||
|
||||
"github.com/libp2p/go-libp2p"
|
||||
"github.com/libp2p/go-libp2p-testing/race"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
|
@ -68,6 +72,8 @@ var _ identify.IDService = &mockIDService{}
|
|||
func newMockIDService(t *testing.T, h host.Host) identify.IDService {
|
||||
ids, err := identify.NewIDService(h)
|
||||
require.NoError(t, err)
|
||||
ids.Start()
|
||||
t.Cleanup(func() { ids.Close() })
|
||||
return &mockIDService{IDService: ids}
|
||||
}
|
||||
|
||||
|
@ -448,10 +454,23 @@ func makeRelayedHosts(t *testing.T, h1opt, h2opt []holepunch.Option, addHolePunc
|
|||
libp2p.ResourceManager(&network.NullResourceManager{}),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = relayv1.NewRelay(relay)
|
||||
require.NoError(t, err)
|
||||
|
||||
// make sure the relay service is started and advertised by Identify
|
||||
h, err := libp2p.New(
|
||||
libp2p.NoListenAddrs,
|
||||
libp2p.Transport(tcp.NewTCPTransport),
|
||||
libp2p.DisableRelay(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
defer h.Close()
|
||||
require.NoError(t, h.Connect(context.Background(), peer.AddrInfo{ID: relay.ID(), Addrs: relay.Addrs()}))
|
||||
require.Eventually(t, func() bool {
|
||||
supported, err := h.Peerstore().SupportsProtocols(relay.ID(), proto.ProtoIDv2Hop, relayv1.ProtoID)
|
||||
return err == nil && len(supported) > 0
|
||||
}, 3*time.Second, 100*time.Millisecond)
|
||||
|
||||
h2 = mkHostWithStaticAutoRelay(t, relay)
|
||||
if addHolePuncher {
|
||||
hps = addHolePunchService(t, h2, h2opt...)
|
||||
|
|
|
@ -30,9 +30,13 @@ import (
|
|||
|
||||
var log = logging.Logger("net/identify")
|
||||
|
||||
// ID is the protocol.ID of version 1.0.0 of the identify
|
||||
// service.
|
||||
const ID = "/ipfs/id/1.0.0"
|
||||
const (
|
||||
// ID is the protocol.ID of version 1.0.0 of the identify service.
|
||||
ID = "/ipfs/id/1.0.0"
|
||||
// IDPush is the protocol.ID of the Identify push protocol.
|
||||
// It sends full identify messages containing the current state of the peer.
|
||||
IDPush = "/ipfs/id/push/1.0.0"
|
||||
)
|
||||
|
||||
const DefaultProtocolVersion = "ipfs/0.1.0"
|
||||
|
||||
|
@ -51,13 +55,11 @@ const (
|
|||
|
||||
var defaultUserAgent = "github.com/libp2p/go-libp2p"
|
||||
|
||||
type addPeerHandlerReq struct {
|
||||
rp peer.ID
|
||||
resp chan *peerHandler
|
||||
}
|
||||
|
||||
type rmPeerHandlerReq struct {
|
||||
p peer.ID
|
||||
type identifySnapshot struct {
|
||||
seq uint64
|
||||
protocols []protocol.ID
|
||||
addrs []ma.Multiaddr
|
||||
record *record.Envelope
|
||||
}
|
||||
|
||||
type IDService interface {
|
||||
|
@ -75,22 +77,45 @@ type IDService interface {
|
|||
// ObservedAddrsFor returns the addresses peers have reported we've dialed from,
|
||||
// for a specific local address.
|
||||
ObservedAddrsFor(local ma.Multiaddr) []ma.Multiaddr
|
||||
Start()
|
||||
io.Closer
|
||||
}
|
||||
|
||||
type identifyPushSupport uint8
|
||||
|
||||
const (
|
||||
identifyPushSupportUnknown identifyPushSupport = iota
|
||||
identifyPushSupported
|
||||
identifyPushUnsupported
|
||||
)
|
||||
|
||||
type entry struct {
|
||||
// The IdentifyWaitChan is created when IdentifyWait is called for the first time.
|
||||
// IdentifyWait closes this channel when the Identify request completes, or when it fails.
|
||||
IdentifyWaitChan chan struct{}
|
||||
|
||||
// PushSupport saves our knowledge about the peer's support of the Identify Push protocol.
|
||||
// Before the identify request returns, we don't know yet if the peer supports Identify Push.
|
||||
PushSupport identifyPushSupport
|
||||
// Sequence is the sequence number of the last snapshot we sent to this peer.
|
||||
Sequence uint64
|
||||
}
|
||||
|
||||
// idService is a structure that implements ProtocolIdentify.
|
||||
// It is a trivial service that gives the other peer some
|
||||
// useful information about the local peer. A sort of hello.
|
||||
//
|
||||
// The idService sends:
|
||||
// - Our IPFS Protocol Version
|
||||
// - Our IPFS Agent Version
|
||||
// - Our libp2p Protocol Version
|
||||
// - Our libp2p Agent Version
|
||||
// - Our public Listen Addresses
|
||||
type idService struct {
|
||||
Host host.Host
|
||||
UserAgent string
|
||||
ProtocolVersion string
|
||||
|
||||
setupCompleted chan struct{} // is closed when Start has finished setting up
|
||||
|
||||
ctx context.Context
|
||||
ctxCancel context.CancelFunc
|
||||
// track resources that need to be shut down before we shut down
|
||||
|
@ -98,9 +123,13 @@ type idService struct {
|
|||
|
||||
disableSignedPeerRecord bool
|
||||
|
||||
// Identified connections (finished and in progress).
|
||||
connsMu sync.RWMutex
|
||||
conns map[network.Conn]chan struct{}
|
||||
// The conns map contains all connections we're currently handling.
|
||||
// Connections are inserted as soon as they're available in the swarm, and - crucially -
|
||||
// before any stream can be opened or accepted on that connection.
|
||||
// Connections are removed from the map when the connection disconnects.
|
||||
// It is therefore safe to assume that a connection was (recently) closed if there's no entry in this map.
|
||||
conns map[network.Conn]entry
|
||||
|
||||
addrMu sync.Mutex
|
||||
|
||||
|
@ -113,12 +142,10 @@ type idService struct {
|
|||
evtPeerIdentificationFailed event.Emitter
|
||||
}
|
||||
|
||||
addPeerHandlerCh chan addPeerHandlerReq
|
||||
rmPeerHandlerCh chan rmPeerHandlerReq
|
||||
|
||||
// pushSemaphore limits the push concurrency to avoid storms
|
||||
// that clog the transient scope.
|
||||
pushSemaphore chan struct{}
|
||||
currentSnapshot struct {
|
||||
sync.Mutex
|
||||
snapshot identifySnapshot
|
||||
}
|
||||
}
|
||||
|
||||
// NewIDService constructs a new *idService and activates it by
|
||||
|
@ -139,21 +166,17 @@ func NewIDService(h host.Host, opts ...Option) (*idService, error) {
|
|||
protocolVersion = cfg.protocolVersion
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
s := &idService{
|
||||
Host: h,
|
||||
UserAgent: userAgent,
|
||||
ProtocolVersion: protocolVersion,
|
||||
|
||||
conns: make(map[network.Conn]chan struct{}),
|
||||
|
||||
Host: h,
|
||||
UserAgent: userAgent,
|
||||
ProtocolVersion: protocolVersion,
|
||||
ctx: ctx,
|
||||
ctxCancel: cancel,
|
||||
conns: make(map[network.Conn]entry),
|
||||
disableSignedPeerRecord: cfg.disableSignedPeerRecord,
|
||||
|
||||
addPeerHandlerCh: make(chan addPeerHandlerReq),
|
||||
rmPeerHandlerCh: make(chan rmPeerHandlerReq),
|
||||
|
||||
pushSemaphore: make(chan struct{}, maxPushConcurrency),
|
||||
setupCompleted: make(chan struct{}),
|
||||
}
|
||||
s.ctx, s.ctxCancel = context.WithCancel(context.Background())
|
||||
|
||||
observedAddrs, err := NewObservedAddrManager(h)
|
||||
if err != nil {
|
||||
|
@ -161,9 +184,6 @@ func NewIDService(h host.Host, opts ...Option) (*idService, error) {
|
|||
}
|
||||
s.observedAddrs = observedAddrs
|
||||
|
||||
s.refCount.Add(1)
|
||||
go s.loop()
|
||||
|
||||
s.emitters.evtPeerProtocolsUpdated, err = h.EventBus().Emitter(&event.EvtPeerProtocolsUpdated{})
|
||||
if err != nil {
|
||||
log.Warnf("identify service not emitting peer protocol updates; err: %s", err)
|
||||
|
@ -176,19 +196,23 @@ func NewIDService(h host.Host, opts ...Option) (*idService, error) {
|
|||
if err != nil {
|
||||
log.Warnf("identify service not emitting identification failed events; err: %s", err)
|
||||
}
|
||||
|
||||
// register protocols that do not depend on peer records.
|
||||
h.SetStreamHandler(ID, s.sendIdentifyResp)
|
||||
h.SetStreamHandler(IDPush, s.pushHandler)
|
||||
|
||||
h.Network().Notify((*netNotifiee)(s))
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (ids *idService) loop() {
|
||||
func (ids *idService) Start() {
|
||||
ids.Host.Network().Notify((*netNotifiee)(ids))
|
||||
ids.Host.SetStreamHandler(ID, ids.handleIdentifyRequest)
|
||||
ids.Host.SetStreamHandler(IDPush, ids.handlePush)
|
||||
ids.updateSnapshot()
|
||||
close(ids.setupCompleted)
|
||||
|
||||
ids.refCount.Add(1)
|
||||
go ids.loop(ids.ctx)
|
||||
}
|
||||
|
||||
func (ids *idService) loop(ctx context.Context) {
|
||||
defer ids.refCount.Done()
|
||||
|
||||
phs := make(map[peer.ID]*peerHandler)
|
||||
sub, err := ids.Host.EventBus().Subscribe(
|
||||
[]any{&event.EvtLocalProtocolsUpdated{}, &event.EvtLocalAddressesUpdated{}},
|
||||
eventbus.BufSize(256),
|
||||
|
@ -198,93 +222,93 @@ func (ids *idService) loop() {
|
|||
log.Errorf("failed to subscribe to events on the bus, err=%s", err)
|
||||
return
|
||||
}
|
||||
defer sub.Close()
|
||||
|
||||
phClosedCh := make(chan peer.ID)
|
||||
// Send pushes from a separate Go routine.
|
||||
// That way, we can end up with
|
||||
// * this Go routine busy looping over all peers in sendPushes
|
||||
// * another push being queued in the triggerPush channel
|
||||
triggerPush := make(chan struct{}, 1)
|
||||
ids.refCount.Add(1)
|
||||
go func() {
|
||||
defer ids.refCount.Done()
|
||||
|
||||
defer func() {
|
||||
sub.Close()
|
||||
// The context will cancel the workers. Now, wait for them to
|
||||
// exit.
|
||||
for range phs {
|
||||
<-phClosedCh
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-triggerPush:
|
||||
ids.sendPushes(ctx)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Use a fresh context for the handlers. Otherwise, they'll get canceled
|
||||
// before we're ready to shutdown and they'll have "stopped" without us
|
||||
// _calling_ stop.
|
||||
handlerCtx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
for {
|
||||
select {
|
||||
case addReq := <-ids.addPeerHandlerCh:
|
||||
rp := addReq.rp
|
||||
ph, ok := phs[rp]
|
||||
if !ok && ids.Host.Network().Connectedness(rp) == network.Connected {
|
||||
ph = newPeerHandler(rp, ids)
|
||||
ph.start(handlerCtx, func() { phClosedCh <- rp })
|
||||
phs[rp] = ph
|
||||
case <-sub.Out():
|
||||
ids.updateSnapshot()
|
||||
select {
|
||||
case triggerPush <- struct{}{}:
|
||||
default: // we already have one more push queued, no need to queue another one
|
||||
}
|
||||
addReq.resp <- ph
|
||||
case rmReq := <-ids.rmPeerHandlerCh:
|
||||
rp := rmReq.p
|
||||
if ids.Host.Network().Connectedness(rp) != network.Connected {
|
||||
// before we remove the peerhandler, we should ensure that it will not send any
|
||||
// more messages. Otherwise, we might create a new handler and the Identify response
|
||||
// synchronized with the new handler might be overwritten by a message sent by this "old" handler.
|
||||
ph, ok := phs[rp]
|
||||
if !ok {
|
||||
// move on, move on, there's nothing to see here.
|
||||
continue
|
||||
}
|
||||
// This is idempotent if already stopped.
|
||||
ph.stop()
|
||||
}
|
||||
|
||||
case rp := <-phClosedCh:
|
||||
ph := phs[rp]
|
||||
|
||||
// If we are connected to the peer, it means that we got a connection from the peer
|
||||
// before we could finish removing it's handler on the previous disconnection.
|
||||
// If we delete the handler, we wont be able to push updates to it
|
||||
// till we see a new connection. So, we should restart the handler.
|
||||
// The fact that we got the handler on this channel means that it's context and handler
|
||||
// have completed because we write the handler to this chanel only after it closed.
|
||||
if ids.Host.Network().Connectedness(rp) == network.Connected {
|
||||
ph.start(handlerCtx, func() { phClosedCh <- rp })
|
||||
} else {
|
||||
delete(phs, rp)
|
||||
}
|
||||
|
||||
case e, more := <-sub.Out():
|
||||
if !more {
|
||||
return
|
||||
}
|
||||
switch e.(type) {
|
||||
case event.EvtLocalAddressesUpdated:
|
||||
for pid := range phs {
|
||||
select {
|
||||
case phs[pid].pushCh <- struct{}{}:
|
||||
default:
|
||||
log.Debugf("dropping addr updated message for %s as buffer full", pid)
|
||||
}
|
||||
}
|
||||
case event.EvtLocalProtocolsUpdated:
|
||||
for pid := range phs {
|
||||
select {
|
||||
case phs[pid].pushCh <- struct{}{}:
|
||||
default:
|
||||
log.Debugf("dropping protocol updated message for %s as buffer full", pid)
|
||||
}
|
||||
}
|
||||
}
|
||||
case <-ids.ctx.Done():
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ids *idService) sendPushes(ctx context.Context) {
|
||||
ids.connsMu.RLock()
|
||||
conns := make([]network.Conn, 0, len(ids.conns))
|
||||
for c, e := range ids.conns {
|
||||
// Push even if we don't know if push is supported.
|
||||
// This will be only the case while the IdentifyWaitChan call is in flight.
|
||||
if e.PushSupport == identifyPushSupported || e.PushSupport == identifyPushSupportUnknown {
|
||||
conns = append(conns, c)
|
||||
}
|
||||
}
|
||||
ids.connsMu.RUnlock()
|
||||
|
||||
sem := make(chan struct{}, maxPushConcurrency)
|
||||
var wg sync.WaitGroup
|
||||
for _, c := range conns {
|
||||
// check if the connection is still alive
|
||||
ids.connsMu.RLock()
|
||||
e, ok := ids.conns[c]
|
||||
ids.connsMu.RUnlock()
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
// check if we already sent the current snapshot to this peer
|
||||
ids.currentSnapshot.Lock()
|
||||
snapshot := ids.currentSnapshot.snapshot
|
||||
ids.currentSnapshot.Unlock()
|
||||
if e.Sequence >= snapshot.seq {
|
||||
log.Debugw("already sent this snapshot to peer", "peer", c.RemotePeer(), "seq", snapshot.seq)
|
||||
continue
|
||||
}
|
||||
// we haven't, send it now
|
||||
sem <- struct{}{}
|
||||
wg.Add(1)
|
||||
go func(c network.Conn) {
|
||||
defer wg.Done()
|
||||
defer func() { <-sem }()
|
||||
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
str, err := ids.Host.NewStream(ctx, c.RemotePeer(), IDPush)
|
||||
if err != nil { // connection might have been closed recently
|
||||
return
|
||||
}
|
||||
// TODO: find out if the peer supports push if we didn't have any information about push support
|
||||
if err := ids.sendIdentifyResp(str); err != nil {
|
||||
log.Debugw("failed to send identify push", "peer", c.RemotePeer(), "error", err)
|
||||
return
|
||||
}
|
||||
}(c)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// Close shuts down the idService
|
||||
func (ids *idService) Close() error {
|
||||
ids.ctxCancel()
|
||||
|
@ -301,58 +325,56 @@ func (ids *idService) ObservedAddrsFor(local ma.Multiaddr) []ma.Multiaddr {
|
|||
return ids.observedAddrs.AddrsFor(local)
|
||||
}
|
||||
|
||||
// IdentifyConn runs the Identify protocol on a connection.
|
||||
// It returns when we've received the peer's Identify message (or the request fails).
|
||||
// If successful, the peer store will contain the peer's addresses and supported protocols.
|
||||
func (ids *idService) IdentifyConn(c network.Conn) {
|
||||
<-ids.IdentifyWait(c)
|
||||
}
|
||||
|
||||
// IdentifyWait runs the Identify protocol on a connection.
|
||||
// It doesn't block and returns a channel that is closed when we receive
|
||||
// the peer's Identify message (or the request fails).
|
||||
// If successful, the peer store will contain the peer's addresses and supported protocols.
|
||||
func (ids *idService) IdentifyWait(c network.Conn) <-chan struct{} {
|
||||
ids.connsMu.RLock()
|
||||
wait, found := ids.conns[c]
|
||||
ids.connsMu.RUnlock()
|
||||
|
||||
if found {
|
||||
return wait
|
||||
}
|
||||
|
||||
ids.connsMu.Lock()
|
||||
defer ids.connsMu.Unlock()
|
||||
|
||||
wait, found = ids.conns[c]
|
||||
if !found {
|
||||
wait = make(chan struct{})
|
||||
ids.conns[c] = wait
|
||||
|
||||
// Spawn an identify. The connection may actually be closed
|
||||
// already, but that doesn't really matter. We'll fail to open a
|
||||
// stream then forget the connection.
|
||||
go func() {
|
||||
defer close(wait)
|
||||
if err := ids.identifyConn(c); err != nil {
|
||||
log.Warnf("failed to identify %s: %s", c.RemotePeer(), err)
|
||||
ids.emitters.evtPeerIdentificationFailed.Emit(event.EvtPeerIdentificationFailed{Peer: c.RemotePeer(), Reason: err})
|
||||
return
|
||||
}
|
||||
ids.emitters.evtPeerIdentificationCompleted.Emit(event.EvtPeerIdentificationCompleted{Peer: c.RemotePeer()})
|
||||
}()
|
||||
e, found := ids.conns[c]
|
||||
if !found { // No entry found. Connection was most likely closed (and removed from this map) recently.
|
||||
ch := make(chan struct{})
|
||||
close(ch)
|
||||
return ch
|
||||
}
|
||||
|
||||
return wait
|
||||
}
|
||||
if e.IdentifyWaitChan != nil {
|
||||
return e.IdentifyWaitChan
|
||||
}
|
||||
// First call to IdentifyWait for this connection. Create the channel.
|
||||
e.IdentifyWaitChan = make(chan struct{})
|
||||
ids.conns[c] = e
|
||||
|
||||
func (ids *idService) removeConn(c network.Conn) {
|
||||
ids.connsMu.Lock()
|
||||
delete(ids.conns, c)
|
||||
ids.connsMu.Unlock()
|
||||
// Spawn an identify. The connection may actually be closed
|
||||
// already, but that doesn't really matter. We'll fail to open a
|
||||
// stream then forget the connection.
|
||||
go func() {
|
||||
defer close(e.IdentifyWaitChan)
|
||||
if err := ids.identifyConn(c); err != nil {
|
||||
log.Warnf("failed to identify %s: %s", c.RemotePeer(), err)
|
||||
ids.emitters.evtPeerIdentificationFailed.Emit(event.EvtPeerIdentificationFailed{Peer: c.RemotePeer(), Reason: err})
|
||||
return
|
||||
}
|
||||
|
||||
ids.emitters.evtPeerIdentificationCompleted.Emit(event.EvtPeerIdentificationCompleted{Peer: c.RemotePeer()})
|
||||
}()
|
||||
|
||||
return e.IdentifyWaitChan
|
||||
}
|
||||
|
||||
func (ids *idService) identifyConn(c network.Conn) error {
|
||||
s, err := c.NewStream(network.WithUseTransient(context.TODO(), "identify"))
|
||||
if err != nil {
|
||||
log.Debugw("error opening identify stream", "error", err)
|
||||
|
||||
// We usually do this on disconnect, but we may have already
|
||||
// processed the disconnect event.
|
||||
ids.removeConn(c)
|
||||
log.Debugw("error opening identify stream", "peer", c.RemotePeer(), "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -371,39 +393,43 @@ func (ids *idService) identifyConn(c network.Conn) error {
|
|||
return ids.handleIdentifyResponse(s, false)
|
||||
}
|
||||
|
||||
func (ids *idService) sendIdentifyResp(s network.Stream) {
|
||||
if err := s.Scope().SetService(ServiceName); err != nil {
|
||||
log.Warnf("error attaching stream to identify service: %s", err)
|
||||
s.Reset()
|
||||
return
|
||||
}
|
||||
// handlePush handles incoming identify push streams
|
||||
func (ids *idService) handlePush(s network.Stream) {
|
||||
ids.handleIdentifyResponse(s, true)
|
||||
}
|
||||
|
||||
func (ids *idService) handleIdentifyRequest(s network.Stream) {
|
||||
_ = ids.sendIdentifyResp(s)
|
||||
}
|
||||
|
||||
func (ids *idService) sendIdentifyResp(s network.Stream) error {
|
||||
if err := s.Scope().SetService(ServiceName); err != nil {
|
||||
s.Reset()
|
||||
return fmt.Errorf("failed to attaching stream to identify service: %w", err)
|
||||
}
|
||||
defer s.Close()
|
||||
|
||||
c := s.Conn()
|
||||
|
||||
phCh := make(chan *peerHandler, 1)
|
||||
select {
|
||||
case ids.addPeerHandlerCh <- addPeerHandlerReq{c.RemotePeer(), phCh}:
|
||||
case <-ids.ctx.Done():
|
||||
return
|
||||
ids.currentSnapshot.Lock()
|
||||
snapshot := ids.currentSnapshot.snapshot
|
||||
ids.currentSnapshot.Unlock()
|
||||
log.Debugf("%s sending message to %s %s", ID, s.Conn().RemotePeer(), s.Conn().RemoteMultiaddr())
|
||||
if err := ids.writeChunkedIdentifyMsg(s, &snapshot); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var ph *peerHandler
|
||||
select {
|
||||
case ph = <-phCh:
|
||||
case <-ids.ctx.Done():
|
||||
return
|
||||
ids.connsMu.Lock()
|
||||
defer ids.connsMu.Unlock()
|
||||
e, ok := ids.conns[s.Conn()]
|
||||
// The connection might already have been closed.
|
||||
// We *should* receive the Connected notification from the swarm before we're able to accept the peer's
|
||||
// Identify stream, but if that for some reason doesn't work, we also wouldn't have a map entry here.
|
||||
// The only consequence would be that we send a spurious Push to that peer later.
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
if ph == nil {
|
||||
// Peer disconnected, abort.
|
||||
s.Reset()
|
||||
return
|
||||
}
|
||||
|
||||
ids.writeChunkedIdentifyMsg(c, s)
|
||||
log.Debugf("%s sent message to %s %s", ID, c.RemotePeer(), c.RemoteMultiaddr())
|
||||
e.Sequence = snapshot.seq
|
||||
ids.conns[s.Conn()] = e
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ids *idService) handleIdentifyResponse(s network.Stream, isPush bool) error {
|
||||
|
@ -439,6 +465,19 @@ func (ids *idService) handleIdentifyResponse(s network.Stream, isPush bool) erro
|
|||
|
||||
ids.consumeMessage(mes, c, isPush)
|
||||
|
||||
ids.connsMu.Lock()
|
||||
defer ids.connsMu.Unlock()
|
||||
e, ok := ids.conns[c]
|
||||
if !ok { // might already have disconnected
|
||||
return nil
|
||||
}
|
||||
sup, err := ids.Host.Peerstore().SupportsProtocols(c.RemotePeer(), IDPush)
|
||||
if supportsIdentifyPush := err == nil && len(sup) > 0; supportsIdentifyPush {
|
||||
e.PushSupport = identifyPushSupported
|
||||
} else {
|
||||
e.PushSupport = identifyPushUnsupported
|
||||
}
|
||||
ids.conns[c] = e
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -458,20 +497,29 @@ func readAllIDMessages(r pbio.Reader, finalMsg proto.Message) error {
|
|||
return fmt.Errorf("too many parts")
|
||||
}
|
||||
|
||||
func (ids *idService) getSnapshot() *identifySnapshot {
|
||||
snapshot := new(identifySnapshot)
|
||||
func (ids *idService) updateSnapshot() {
|
||||
snapshot := identifySnapshot{
|
||||
addrs: ids.Host.Addrs(),
|
||||
protocols: ids.Host.Mux().Protocols(),
|
||||
}
|
||||
if !ids.disableSignedPeerRecord {
|
||||
if cab, ok := peerstore.GetCertifiedAddrBook(ids.Host.Peerstore()); ok {
|
||||
snapshot.record = cab.GetPeerRecord(ids.Host.ID())
|
||||
}
|
||||
}
|
||||
snapshot.addrs = ids.Host.Addrs()
|
||||
snapshot.protocols = ids.Host.Mux().Protocols()
|
||||
return snapshot
|
||||
|
||||
ids.currentSnapshot.Lock()
|
||||
snapshot.seq = ids.currentSnapshot.snapshot.seq + 1
|
||||
ids.currentSnapshot.snapshot = snapshot
|
||||
ids.currentSnapshot.Unlock()
|
||||
|
||||
log.Debugw("updating snapshot", "seq", snapshot.seq, "addrs", snapshot.addrs)
|
||||
}
|
||||
|
||||
func (ids *idService) writeChunkedIdentifyMsg(c network.Conn, s network.Stream) error {
|
||||
snapshot := ids.getSnapshot()
|
||||
func (ids *idService) writeChunkedIdentifyMsg(s network.Stream, snapshot *identifySnapshot) error {
|
||||
c := s.Conn()
|
||||
log.Debugw("sending snapshot", "seq", snapshot.seq, "protocols", snapshot.protocols, "addrs", snapshot.addrs)
|
||||
|
||||
mes := ids.createBaseIdentifyResponse(c, snapshot)
|
||||
sr := ids.getSignedRecord(snapshot)
|
||||
mes.SignedPeerRecord = sr
|
||||
|
@ -480,21 +528,16 @@ func (ids *idService) writeChunkedIdentifyMsg(c network.Conn, s network.Stream)
|
|||
if sr == nil || proto.Size(mes) <= legacyIDSize {
|
||||
return writer.WriteMsg(mes)
|
||||
}
|
||||
|
||||
mes.SignedPeerRecord = nil
|
||||
if err := writer.WriteMsg(mes); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// then write just the signed record
|
||||
m := &pb.Identify{SignedPeerRecord: sr}
|
||||
err := writer.WriteMsg(m)
|
||||
return err
|
||||
return writer.WriteMsg(&pb.Identify{SignedPeerRecord: sr})
|
||||
}
|
||||
|
||||
func (ids *idService) createBaseIdentifyResponse(
|
||||
conn network.Conn,
|
||||
snapshot *identifySnapshot,
|
||||
) *pb.Identify {
|
||||
func (ids *idService) createBaseIdentifyResponse(conn network.Conn, snapshot *identifySnapshot) *pb.Identify {
|
||||
mes := &pb.Identify{}
|
||||
|
||||
remoteAddr := conn.RemoteMultiaddr()
|
||||
|
@ -805,38 +848,41 @@ func signedPeerRecordFromMessage(msg *pb.Identify) (*record.Envelope, error) {
|
|||
return env, err
|
||||
}
|
||||
|
||||
// netNotifiee defines methods to be used with the IpfsDHT
|
||||
// netNotifiee defines methods to be used with the swarm
|
||||
type netNotifiee idService
|
||||
|
||||
func (nn *netNotifiee) IDService() *idService {
|
||||
return (*idService)(nn)
|
||||
}
|
||||
|
||||
func (nn *netNotifiee) Connected(n network.Network, v network.Conn) {
|
||||
nn.IDService().IdentifyWait(v)
|
||||
func (nn *netNotifiee) Connected(_ network.Network, c network.Conn) {
|
||||
// We rely on this notification being received before we receive any incoming streams on the connection.
|
||||
// The swarm implementation guarantees this.
|
||||
ids := nn.IDService()
|
||||
|
||||
<-ids.setupCompleted
|
||||
|
||||
ids.connsMu.Lock()
|
||||
ids.conns[c] = entry{}
|
||||
ids.connsMu.Unlock()
|
||||
|
||||
nn.IDService().IdentifyWait(c)
|
||||
}
|
||||
|
||||
func (nn *netNotifiee) Disconnected(n network.Network, v network.Conn) {
|
||||
func (nn *netNotifiee) Disconnected(_ network.Network, c network.Conn) {
|
||||
ids := nn.IDService()
|
||||
|
||||
// Stop tracking the connection.
|
||||
ids.removeConn(v)
|
||||
|
||||
// undo the setting of addresses to peer.ConnectedAddrTTL we did
|
||||
ids.addrMu.Lock()
|
||||
defer ids.addrMu.Unlock()
|
||||
|
||||
if ids.Host.Network().Connectedness(v.RemotePeer()) != network.Connected {
|
||||
// consider removing the peer handler for this
|
||||
select {
|
||||
case ids.rmPeerHandlerCh <- rmPeerHandlerReq{v.RemotePeer()}:
|
||||
case <-ids.ctx.Done():
|
||||
return
|
||||
}
|
||||
ids.connsMu.Lock()
|
||||
delete(ids.conns, c)
|
||||
ids.connsMu.Unlock()
|
||||
|
||||
if ids.Host.Network().Connectedness(c.RemotePeer()) != network.Connected {
|
||||
// Last disconnect.
|
||||
ps := ids.Host.Peerstore()
|
||||
ps.UpdateAddrs(v.RemotePeer(), peerstore.ConnectedAddrTTL, peerstore.RecentlyConnectedAddrTTL)
|
||||
// Undo the setting of addresses to peer.ConnectedAddrTTL we did
|
||||
ids.addrMu.Lock()
|
||||
defer ids.addrMu.Unlock()
|
||||
ids.Host.Peerstore().UpdateAddrs(c.RemotePeer(), peerstore.ConnectedAddrTTL, peerstore.RecentlyConnectedAddrTTL)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@ func TestFastDisconnect(t *testing.T) {
|
|||
ids, err := NewIDService(target)
|
||||
require.NoError(t, err)
|
||||
defer ids.Close()
|
||||
ids.Start()
|
||||
|
||||
sync := make(chan struct{})
|
||||
target.SetStreamHandler(ID, func(s network.Stream) {
|
||||
|
@ -50,7 +51,7 @@ func TestFastDisconnect(t *testing.T) {
|
|||
// This should not block indefinitely, or panic, or anything like that.
|
||||
//
|
||||
// However, if we have a bug, that _could_ happen.
|
||||
ids.sendIdentifyResp(s)
|
||||
ids.handleIdentifyRequest(s)
|
||||
|
||||
// Ok, allow the outer test to continue.
|
||||
select {
|
||||
|
|
|
@ -1,14 +0,0 @@
|
|||
package identify
|
||||
|
||||
import (
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
)
|
||||
|
||||
// IDPush is the protocol.ID of the Identify push protocol.
|
||||
// It sends full identify messages containing the current state of the peer.
|
||||
const IDPush = "/ipfs/id/push/1.0.0"
|
||||
|
||||
// pushHandler handles incoming identify push streams. The behaviour is identical to the ordinary identify protocol.
|
||||
func (ids *idService) pushHandler(s network.Stream) {
|
||||
ids.handleIdentifyResponse(s, true)
|
||||
}
|
|
@ -165,10 +165,12 @@ func TestIDService(t *testing.T) {
|
|||
ids1, err := identify.NewIDService(h1)
|
||||
require.NoError(t, err)
|
||||
defer ids1.Close()
|
||||
ids1.Start()
|
||||
|
||||
ids2, err := identify.NewIDService(h2)
|
||||
require.NoError(t, err)
|
||||
defer ids2.Close()
|
||||
ids2.Start()
|
||||
|
||||
sub, err := ids1.Host.EventBus().Subscribe(new(event.EvtPeerIdentificationCompleted))
|
||||
if err != nil {
|
||||
|
@ -322,12 +324,15 @@ func TestLocalhostAddrFiltering(t *testing.T) {
|
|||
|
||||
ids1, err := identify.NewIDService(p1)
|
||||
require.NoError(t, err)
|
||||
ids1.Start()
|
||||
|
||||
ids2, err := identify.NewIDService(p2)
|
||||
require.NoError(t, err)
|
||||
ids2.Start()
|
||||
|
||||
ids3, err := identify.NewIDService(p3)
|
||||
require.NoError(t, err)
|
||||
ids3.Start()
|
||||
|
||||
defer func() {
|
||||
ids1.Close()
|
||||
|
@ -360,6 +365,7 @@ func TestLocalhostAddrFiltering(t *testing.T) {
|
|||
|
||||
// TestIdentifyPushWhileIdentifyingConn tests that the host waits to push updates if an identify is ongoing.
|
||||
func TestIdentifyPushWhileIdentifyingConn(t *testing.T) {
|
||||
t.Skip()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
|
@ -367,12 +373,16 @@ func TestIdentifyPushWhileIdentifyingConn(t *testing.T) {
|
|||
h2 := blhost.NewBlankHost(swarmt.GenSwarm(t))
|
||||
defer h2.Close()
|
||||
defer h1.Close()
|
||||
t.Log("h1:", h1.ID())
|
||||
t.Log("h2:", h2.ID())
|
||||
|
||||
ids1, err := identify.NewIDService(h1)
|
||||
require.NoError(t, err)
|
||||
ids1.Start()
|
||||
|
||||
ids2, err := identify.NewIDService(h2)
|
||||
require.NoError(t, err)
|
||||
ids2.Start()
|
||||
|
||||
defer ids1.Close()
|
||||
defer ids2.Close()
|
||||
|
@ -440,11 +450,13 @@ func TestIdentifyPushOnAddrChange(t *testing.T) {
|
|||
|
||||
ids1, err := identify.NewIDService(h1)
|
||||
require.NoError(t, err)
|
||||
defer ids1.Close()
|
||||
ids1.Start()
|
||||
|
||||
ids2, err := identify.NewIDService(h2)
|
||||
require.NoError(t, err)
|
||||
|
||||
defer ids1.Close()
|
||||
defer ids2.Close()
|
||||
ids2.Start()
|
||||
|
||||
testKnowsAddrs(t, h1, h2p, []ma.Multiaddr{}) // nothing
|
||||
testKnowsAddrs(t, h2, h1p, []ma.Multiaddr{}) // nothing
|
||||
|
@ -568,14 +580,13 @@ func TestSendPush(t *testing.T) {
|
|||
|
||||
ids1, err := identify.NewIDService(h1)
|
||||
require.NoError(t, err)
|
||||
defer ids1.Close()
|
||||
ids1.Start()
|
||||
|
||||
ids2, err := identify.NewIDService(h2)
|
||||
require.NoError(t, err)
|
||||
|
||||
defer func() {
|
||||
ids1.Close()
|
||||
ids2.Close()
|
||||
}()
|
||||
defer ids2.Close()
|
||||
ids2.Start()
|
||||
|
||||
err = h1.Connect(ctx, peer.AddrInfo{ID: h2.ID(), Addrs: h2.Addrs()})
|
||||
require.NoError(t, err)
|
||||
|
@ -624,10 +635,12 @@ func TestLargeIdentifyMessage(t *testing.T) {
|
|||
ids1, err := identify.NewIDService(h1)
|
||||
require.NoError(t, err)
|
||||
defer ids1.Close()
|
||||
ids1.Start()
|
||||
|
||||
ids2, err := identify.NewIDService(h2)
|
||||
require.NoError(t, err)
|
||||
defer ids2.Close()
|
||||
ids2.Start()
|
||||
|
||||
sub, err := ids1.Host.EventBus().Subscribe(new(event.EvtPeerIdentificationCompleted))
|
||||
require.NoError(t, err)
|
||||
|
@ -729,12 +742,13 @@ func TestLargePushMessage(t *testing.T) {
|
|||
|
||||
ids1, err := identify.NewIDService(h1)
|
||||
require.NoError(t, err)
|
||||
defer ids1.Close()
|
||||
ids1.Start()
|
||||
|
||||
ids2, err := identify.NewIDService(h2)
|
||||
require.NoError(t, err)
|
||||
|
||||
defer ids1.Close()
|
||||
defer ids2.Close()
|
||||
ids2.Start()
|
||||
|
||||
testKnowsAddrs(t, h1, h2p, []ma.Multiaddr{}) // nothing
|
||||
testKnowsAddrs(t, h2, h1p, []ma.Multiaddr{}) // nothing
|
||||
|
@ -805,12 +819,14 @@ func TestIdentifyResponseReadTimeout(t *testing.T) {
|
|||
h2p := h2.ID()
|
||||
ids1, err := identify.NewIDService(h1)
|
||||
require.NoError(t, err)
|
||||
defer ids1.Close()
|
||||
ids1.Start()
|
||||
|
||||
ids2, err := identify.NewIDService(h2)
|
||||
require.NoError(t, err)
|
||||
|
||||
defer ids1.Close()
|
||||
defer ids2.Close()
|
||||
ids2.Start()
|
||||
|
||||
// remote stream handler will just hang and not send back an identify response
|
||||
h2.SetStreamHandler(identify.ID, func(s network.Stream) {
|
||||
time.Sleep(100 * time.Second)
|
||||
|
@ -851,12 +867,13 @@ func TestIncomingIDStreamsTimeout(t *testing.T) {
|
|||
|
||||
ids1, err := identify.NewIDService(h1)
|
||||
require.NoError(t, err)
|
||||
defer ids1.Close()
|
||||
ids1.Start()
|
||||
|
||||
ids2, err := identify.NewIDService(h2)
|
||||
require.NoError(t, err)
|
||||
|
||||
defer ids1.Close()
|
||||
defer ids2.Close()
|
||||
ids2.Start()
|
||||
|
||||
h2p := h2.ID()
|
||||
h2pi := h2.Peerstore().PeerInfo(h2p)
|
||||
|
|
|
@ -1,131 +0,0 @@
|
|||
package identify
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/libp2p/go-libp2p/core/record"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
var errProtocolNotSupported = errors.New("protocol not supported")
|
||||
|
||||
type identifySnapshot struct {
|
||||
protocols []protocol.ID
|
||||
addrs []ma.Multiaddr
|
||||
record *record.Envelope
|
||||
}
|
||||
|
||||
type peerHandler struct {
|
||||
ids *idService
|
||||
|
||||
cancel context.CancelFunc
|
||||
|
||||
pid peer.ID
|
||||
|
||||
pushCh chan struct{}
|
||||
}
|
||||
|
||||
func newPeerHandler(pid peer.ID, ids *idService) *peerHandler {
|
||||
return &peerHandler{
|
||||
ids: ids,
|
||||
pid: pid,
|
||||
pushCh: make(chan struct{}, 1),
|
||||
}
|
||||
}
|
||||
|
||||
// start starts a handler. This may only be called on a stopped handler, and must
|
||||
// not be called concurrently with start/stop.
|
||||
//
|
||||
// This may _not_ be called on a _canceled_ handler. I.e., a handler where the
|
||||
// passed in context expired.
|
||||
func (ph *peerHandler) start(ctx context.Context, onExit func()) {
|
||||
if ph.cancel != nil {
|
||||
// If this happens, we have a bug. It means we tried to start
|
||||
// before we stopped.
|
||||
panic("peer handler already running")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
ph.cancel = cancel
|
||||
|
||||
go func() {
|
||||
ph.loop(ctx)
|
||||
onExit()
|
||||
}()
|
||||
}
|
||||
|
||||
// stop stops a handler. This may not be called concurrently with any
|
||||
// other calls to stop/start.
|
||||
func (ph *peerHandler) stop() error {
|
||||
if ph.cancel != nil {
|
||||
ph.cancel()
|
||||
ph.cancel = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// per peer loop for pushing updates
|
||||
func (ph *peerHandler) loop(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
// our listen addresses have changed, send an IDPush.
|
||||
case <-ph.pushCh:
|
||||
if err := ph.sendPush(ctx); err != nil {
|
||||
log.Warnw("failed to send Identify Push", "peer", ph.pid, "error", err)
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ph *peerHandler) sendPush(ctx context.Context) error {
|
||||
dp, err := ph.openStream(ctx, IDPush)
|
||||
if err == errProtocolNotSupported {
|
||||
log.Debugw("not sending push as peer does not support protocol", "peer", ph.pid)
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open push stream: %w", err)
|
||||
}
|
||||
defer dp.Close()
|
||||
|
||||
if err := ph.ids.writeChunkedIdentifyMsg(dp.Conn(), dp); err != nil {
|
||||
_ = dp.Reset()
|
||||
return fmt.Errorf("failed to send push message: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ph *peerHandler) openStream(ctx context.Context, proto protocol.ID) (network.Stream, error) {
|
||||
// wait for the other peer to send us an Identify response on "all" connections we have with it
|
||||
// so we can look at it's supported protocols and avoid a multistream-select roundtrip to negotiate the protocol
|
||||
// if we know for a fact that it doesn't support the protocol.
|
||||
conns := ph.ids.Host.Network().ConnsToPeer(ph.pid)
|
||||
for _, c := range conns {
|
||||
select {
|
||||
case <-ph.ids.IdentifyWait(c):
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
if sup, err := ph.ids.Host.Peerstore().SupportsProtocols(ph.pid, proto); err != nil || len(sup) == 0 {
|
||||
return nil, errProtocolNotSupported
|
||||
}
|
||||
ph.ids.pushSemaphore <- struct{}{}
|
||||
defer func() {
|
||||
<-ph.ids.pushSemaphore
|
||||
}()
|
||||
|
||||
// negotiate a stream without opening a new connection as we "should" already have a connection.
|
||||
ctx, cancel := context.WithTimeout(network.WithNoDial(ctx, "should already have connection"), 30*time.Second)
|
||||
defer cancel()
|
||||
return ph.ids.Host.NewStream(ctx, ph.pid, proto)
|
||||
}
|
|
@ -1,41 +0,0 @@
|
|||
package identify
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
blhost "github.com/libp2p/go-libp2p/p2p/host/blank"
|
||||
swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestHandlerClose(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
h1 := blhost.NewBlankHost(swarmt.GenSwarm(t))
|
||||
defer h1.Close()
|
||||
ids1, err := NewIDService(h1)
|
||||
require.NoError(t, err)
|
||||
ph := newPeerHandler(h1.ID(), ids1)
|
||||
closedCh := make(chan struct{}, 2)
|
||||
ph.start(ctx, func() {
|
||||
closedCh <- struct{}{}
|
||||
})
|
||||
|
||||
require.NoError(t, ph.stop())
|
||||
select {
|
||||
case <-closedCh:
|
||||
case <-time.After(time.Second):
|
||||
t.Fatal("expected the handler to close")
|
||||
}
|
||||
|
||||
require.NoError(t, ph.stop())
|
||||
select {
|
||||
case <-closedCh:
|
||||
t.Fatal("expected only one close event")
|
||||
case <-time.After(10 * time.Millisecond):
|
||||
}
|
||||
}
|
|
@ -11,7 +11,6 @@ import (
|
|||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
bhost "github.com/libp2p/go-libp2p/p2p/host/basic"
|
||||
swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
|
||||
|
@ -60,17 +59,11 @@ func TestReconnect5(t *testing.T) {
|
|||
}
|
||||
|
||||
func runRound(t *testing.T, hosts []host.Host) {
|
||||
for _, h := range hosts {
|
||||
h.SetStreamHandler(protocol.TestingID, EchoStreamHandler)
|
||||
}
|
||||
|
||||
// connect all hosts
|
||||
for _, h1 := range hosts {
|
||||
h1.SetStreamHandler(protocol.TestingID, EchoStreamHandler)
|
||||
|
||||
for _, h2 := range hosts {
|
||||
if h1.ID() >= h2.ID() {
|
||||
continue
|
||||
}
|
||||
require.NoError(t, h1.Connect(context.Background(), peer.AddrInfo{ID: h2.ID(), Addrs: h2.Peerstore().Addrs(h2.ID())}))
|
||||
h1.Peerstore().AddAddrs(h2.ID(), h2.Addrs(), time.Hour)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -107,9 +100,6 @@ func runRound(t *testing.T, hosts []host.Host) {
|
|||
// close connection
|
||||
cs := h1.Network().Conns()
|
||||
for _, c := range cs {
|
||||
if c.LocalPeer() > c.RemotePeer() {
|
||||
continue
|
||||
}
|
||||
c.Close()
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue