2018-07-04 10:51:47 +00:00
|
|
|
package identify
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2019-10-04 15:21:24 +00:00
|
|
|
"fmt"
|
2021-06-16 20:19:45 +00:00
|
|
|
"io"
|
2018-07-04 10:51:47 +00:00
|
|
|
"sync"
|
2019-06-09 07:24:20 +00:00
|
|
|
"time"
|
|
|
|
|
2021-10-19 13:43:41 +00:00
|
|
|
"github.com/libp2p/go-libp2p-core/crypto"
|
2019-10-04 15:21:24 +00:00
|
|
|
"github.com/libp2p/go-libp2p-core/event"
|
2019-06-09 07:24:20 +00:00
|
|
|
"github.com/libp2p/go-libp2p-core/host"
|
|
|
|
"github.com/libp2p/go-libp2p-core/network"
|
|
|
|
"github.com/libp2p/go-libp2p-core/peer"
|
|
|
|
"github.com/libp2p/go-libp2p-core/peerstore"
|
2021-06-16 20:19:45 +00:00
|
|
|
"github.com/libp2p/go-libp2p-core/record"
|
2018-07-04 10:51:47 +00:00
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
"github.com/libp2p/go-eventbus"
|
|
|
|
"github.com/libp2p/go-msgio/protoio"
|
2018-07-04 10:51:47 +00:00
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
pb "github.com/libp2p/go-libp2p/p2p/protocol/identify/pb"
|
2019-06-09 07:24:20 +00:00
|
|
|
|
2018-07-04 10:51:47 +00:00
|
|
|
ma "github.com/multiformats/go-multiaddr"
|
2021-06-16 20:19:45 +00:00
|
|
|
manet "github.com/multiformats/go-multiaddr/net"
|
2018-07-04 10:51:47 +00:00
|
|
|
msmux "github.com/multiformats/go-multistream"
|
2021-06-16 20:19:45 +00:00
|
|
|
|
|
|
|
"github.com/gogo/protobuf/proto"
|
2021-10-19 13:43:41 +00:00
|
|
|
logging "github.com/ipfs/go-log/v2"
|
2018-07-04 10:51:47 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
var log = logging.Logger("net/identify")
|
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
// ID is the protocol.ID of version 1.0.0 of the identify
|
|
|
|
// service.
|
2018-07-04 10:51:47 +00:00
|
|
|
const ID = "/ipfs/id/1.0.0"
|
|
|
|
|
|
|
|
// LibP2PVersion holds the current protocol version for a client running this code
|
|
|
|
// TODO(jbenet): fix the versioning mess.
|
2019-10-04 15:21:24 +00:00
|
|
|
// XXX: Don't change this till 2020. You'll break all go-ipfs versions prior to
|
|
|
|
// 0.4.17 which asserted an exact version match.
|
2018-07-04 10:51:47 +00:00
|
|
|
const LibP2PVersion = "ipfs/0.1.0"
|
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
const ServiceName = "libp2p.identify"
|
|
|
|
|
|
|
|
const maxPushConcurrency = 32
|
|
|
|
|
2021-10-19 13:43:41 +00:00
|
|
|
// StreamReadTimeout is the read timeout on all incoming Identify family streams.
|
|
|
|
var StreamReadTimeout = 60 * time.Second
|
2019-10-04 15:21:24 +00:00
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
var (
|
2021-10-19 13:43:41 +00:00
|
|
|
legacyIDSize = 2 * 1024 // 2k Bytes
|
|
|
|
signedIDSize = 8 * 1024 // 8K
|
|
|
|
maxMessages = 10
|
|
|
|
defaultUserAgent = "github.com/libp2p/go-libp2p"
|
2021-06-16 20:19:45 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
type addPeerHandlerReq struct {
|
|
|
|
rp peer.ID
|
|
|
|
resp chan *peerHandler
|
|
|
|
}
|
|
|
|
|
|
|
|
type rmPeerHandlerReq struct {
|
|
|
|
p peer.ID
|
|
|
|
}
|
2019-06-09 07:24:20 +00:00
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
type IDService interface {
|
|
|
|
// IdentifyConn synchronously triggers an identify request on the connection and
|
|
|
|
// waits for it to complete. If the connection is being identified by another
|
|
|
|
// caller, this call will wait. If the connection has already been identified,
|
|
|
|
// it will return immediately.
|
|
|
|
IdentifyConn(network.Conn)
|
|
|
|
// IdentifyWait triggers an identify (if the connection has not already been
|
|
|
|
// identified) and returns a channel that is closed when the identify protocol
|
|
|
|
// completes.
|
|
|
|
IdentifyWait(network.Conn) <-chan struct{}
|
|
|
|
// OwnObservedAddrs returns the addresses peers have reported we've dialed from
|
|
|
|
OwnObservedAddrs() []ma.Multiaddr
|
|
|
|
// ObservedAddrsFor returns the addresses peers have reported we've dialed from,
|
|
|
|
// for a specific local address.
|
|
|
|
ObservedAddrsFor(local ma.Multiaddr) []ma.Multiaddr
|
|
|
|
io.Closer
|
|
|
|
}
|
|
|
|
|
|
|
|
// idService is a structure that implements ProtocolIdentify.
|
2018-07-04 10:51:47 +00:00
|
|
|
// It is a trivial service that gives the other peer some
|
|
|
|
// useful information about the local peer. A sort of hello.
|
|
|
|
//
|
2022-04-01 16:16:46 +00:00
|
|
|
// The idService sends:
|
2018-07-04 10:51:47 +00:00
|
|
|
// * Our IPFS Protocol Version
|
|
|
|
// * Our IPFS Agent Version
|
|
|
|
// * Our public Listen Addresses
|
2022-04-01 16:16:46 +00:00
|
|
|
type idService struct {
|
2019-10-04 15:21:24 +00:00
|
|
|
Host host.Host
|
|
|
|
UserAgent string
|
2018-07-04 10:51:47 +00:00
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
ctx context.Context
|
|
|
|
ctxCancel context.CancelFunc
|
|
|
|
// track resources that need to be shut down before we shut down
|
|
|
|
refCount sync.WaitGroup
|
|
|
|
|
|
|
|
disableSignedPeerRecord bool
|
2019-06-09 07:24:20 +00:00
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
// Identified connections (finished and in progress).
|
|
|
|
connsMu sync.RWMutex
|
|
|
|
conns map[network.Conn]chan struct{}
|
2018-07-04 10:51:47 +00:00
|
|
|
|
|
|
|
addrMu sync.Mutex
|
|
|
|
|
|
|
|
// our own observed addresses.
|
2021-06-16 20:19:45 +00:00
|
|
|
observedAddrs *ObservedAddrManager
|
2019-10-04 15:21:24 +00:00
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
emitters struct {
|
|
|
|
evtPeerProtocolsUpdated event.Emitter
|
|
|
|
evtPeerIdentificationCompleted event.Emitter
|
|
|
|
evtPeerIdentificationFailed event.Emitter
|
2019-10-04 15:21:24 +00:00
|
|
|
}
|
2021-06-16 20:19:45 +00:00
|
|
|
|
|
|
|
addPeerHandlerCh chan addPeerHandlerReq
|
|
|
|
rmPeerHandlerCh chan rmPeerHandlerReq
|
2022-04-01 16:16:46 +00:00
|
|
|
|
|
|
|
// pushSemaphore limits the push/delta concurrency to avoid storms
|
|
|
|
// that clog the transient scope.
|
|
|
|
pushSemaphore chan struct{}
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
// NewIDService constructs a new *idService and activates it by
|
2018-07-04 10:51:47 +00:00
|
|
|
// attaching its stream handler to the given host.Host.
|
2022-04-01 16:16:46 +00:00
|
|
|
func NewIDService(h host.Host, opts ...Option) (*idService, error) {
|
2019-10-04 15:21:24 +00:00
|
|
|
var cfg config
|
|
|
|
for _, opt := range opts {
|
|
|
|
opt(&cfg)
|
|
|
|
}
|
|
|
|
|
2021-10-19 13:43:41 +00:00
|
|
|
userAgent := defaultUserAgent
|
2019-10-04 15:21:24 +00:00
|
|
|
if cfg.userAgent != "" {
|
|
|
|
userAgent = cfg.userAgent
|
|
|
|
}
|
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
s := &idService{
|
2019-10-04 15:21:24 +00:00
|
|
|
Host: h,
|
|
|
|
UserAgent: userAgent,
|
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
conns: make(map[network.Conn]chan struct{}),
|
2021-06-16 20:19:45 +00:00
|
|
|
|
|
|
|
disableSignedPeerRecord: cfg.disableSignedPeerRecord,
|
|
|
|
|
|
|
|
addPeerHandlerCh: make(chan addPeerHandlerReq),
|
|
|
|
rmPeerHandlerCh: make(chan rmPeerHandlerReq),
|
2022-04-01 16:16:46 +00:00
|
|
|
|
|
|
|
pushSemaphore: make(chan struct{}, maxPushConcurrency),
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
2022-04-01 16:16:46 +00:00
|
|
|
s.ctx, s.ctxCancel = context.WithCancel(context.Background())
|
2019-10-04 15:21:24 +00:00
|
|
|
|
|
|
|
// handle local protocol handler updates, and push deltas to peers.
|
|
|
|
var err error
|
2021-06-16 20:19:45 +00:00
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
observedAddrs, err := NewObservedAddrManager(h)
|
2021-10-19 13:43:41 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to create observed address manager: %s", err)
|
|
|
|
}
|
|
|
|
s.observedAddrs = observedAddrs
|
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
s.refCount.Add(1)
|
|
|
|
go s.loop()
|
2019-10-04 15:21:24 +00:00
|
|
|
|
|
|
|
s.emitters.evtPeerProtocolsUpdated, err = h.EventBus().Emitter(&event.EvtPeerProtocolsUpdated{})
|
|
|
|
if err != nil {
|
2021-06-16 20:19:45 +00:00
|
|
|
log.Warnf("identify service not emitting peer protocol updates; err: %s", err)
|
|
|
|
}
|
|
|
|
s.emitters.evtPeerIdentificationCompleted, err = h.EventBus().Emitter(&event.EvtPeerIdentificationCompleted{})
|
|
|
|
if err != nil {
|
|
|
|
log.Warnf("identify service not emitting identification completed events; err: %s", err)
|
|
|
|
}
|
|
|
|
s.emitters.evtPeerIdentificationFailed, err = h.EventBus().Emitter(&event.EvtPeerIdentificationFailed{})
|
|
|
|
if err != nil {
|
|
|
|
log.Warnf("identify service not emitting identification failed events; err: %s", err)
|
2019-10-04 15:21:24 +00:00
|
|
|
}
|
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
// register protocols that do not depend on peer records.
|
2019-10-04 15:21:24 +00:00
|
|
|
h.SetStreamHandler(IDDelta, s.deltaHandler)
|
2021-06-16 20:19:45 +00:00
|
|
|
h.SetStreamHandler(ID, s.sendIdentifyResp)
|
|
|
|
h.SetStreamHandler(IDPush, s.pushHandler)
|
|
|
|
|
2018-07-04 10:51:47 +00:00
|
|
|
h.Network().Notify((*netNotifiee)(s))
|
2021-10-19 13:43:41 +00:00
|
|
|
return s, nil
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
func (ids *idService) loop() {
|
2021-06-16 20:19:45 +00:00
|
|
|
defer ids.refCount.Done()
|
|
|
|
|
|
|
|
phs := make(map[peer.ID]*peerHandler)
|
|
|
|
sub, err := ids.Host.EventBus().Subscribe([]interface{}{&event.EvtLocalProtocolsUpdated{},
|
|
|
|
&event.EvtLocalAddressesUpdated{}}, eventbus.BufSize(256))
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("failed to subscribe to events on the bus, err=%s", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
phClosedCh := make(chan peer.ID)
|
|
|
|
|
2019-10-04 15:21:24 +00:00
|
|
|
defer func() {
|
2021-06-16 20:19:45 +00:00
|
|
|
sub.Close()
|
|
|
|
// The context will cancel the workers. Now, wait for them to
|
|
|
|
// exit.
|
|
|
|
for range phs {
|
|
|
|
<-phClosedCh
|
2019-10-04 15:21:24 +00:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
// Use a fresh context for the handlers. Otherwise, they'll get canceled
|
|
|
|
// before we're ready to shutdown and they'll have "stopped" without us
|
|
|
|
// _calling_ stop.
|
|
|
|
handlerCtx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
2019-10-04 15:21:24 +00:00
|
|
|
for {
|
|
|
|
select {
|
2021-06-16 20:19:45 +00:00
|
|
|
case addReq := <-ids.addPeerHandlerCh:
|
|
|
|
rp := addReq.rp
|
|
|
|
ph, ok := phs[rp]
|
|
|
|
if !ok && ids.Host.Network().Connectedness(rp) == network.Connected {
|
|
|
|
ph = newPeerHandler(rp, ids)
|
|
|
|
ph.start(handlerCtx, func() { phClosedCh <- rp })
|
|
|
|
phs[rp] = ph
|
|
|
|
}
|
|
|
|
addReq.resp <- ph
|
|
|
|
case rmReq := <-ids.rmPeerHandlerCh:
|
|
|
|
rp := rmReq.p
|
|
|
|
if ids.Host.Network().Connectedness(rp) != network.Connected {
|
|
|
|
// before we remove the peerhandler, we should ensure that it will not send any
|
|
|
|
// more messages. Otherwise, we might create a new handler and the Identify response
|
|
|
|
// synchronized with the new handler might be overwritten by a message sent by this "old" handler.
|
|
|
|
ph, ok := phs[rp]
|
|
|
|
if !ok {
|
|
|
|
// move on, move on, there's nothing to see here.
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// This is idempotent if already stopped.
|
|
|
|
ph.stop()
|
|
|
|
}
|
|
|
|
|
|
|
|
case rp := <-phClosedCh:
|
|
|
|
ph := phs[rp]
|
|
|
|
|
|
|
|
// If we are connected to the peer, it means that we got a connection from the peer
|
|
|
|
// before we could finish removing it's handler on the previous disconnection.
|
|
|
|
// If we delete the handler, we wont be able to push updates to it
|
|
|
|
// till we see a new connection. So, we should restart the handler.
|
|
|
|
// The fact that we got the handler on this channel means that it's context and handler
|
|
|
|
// have completed because we write the handler to this chanel only after it closed.
|
|
|
|
if ids.Host.Network().Connectedness(rp) == network.Connected {
|
|
|
|
ph.start(handlerCtx, func() { phClosedCh <- rp })
|
|
|
|
} else {
|
|
|
|
delete(phs, rp)
|
|
|
|
}
|
|
|
|
|
|
|
|
case e, more := <-sub.Out():
|
2019-10-04 15:21:24 +00:00
|
|
|
if !more {
|
|
|
|
return
|
|
|
|
}
|
2021-06-16 20:19:45 +00:00
|
|
|
switch e.(type) {
|
|
|
|
case event.EvtLocalAddressesUpdated:
|
|
|
|
for pid := range phs {
|
|
|
|
select {
|
|
|
|
case phs[pid].pushCh <- struct{}{}:
|
|
|
|
default:
|
|
|
|
log.Debugf("dropping addr updated message for %s as buffer full", pid.Pretty())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
case event.EvtLocalProtocolsUpdated:
|
|
|
|
for pid := range phs {
|
|
|
|
select {
|
|
|
|
case phs[pid].deltaCh <- struct{}{}:
|
|
|
|
default:
|
|
|
|
log.Debugf("dropping protocol updated message for %s as buffer full", pid.Pretty())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-04 15:21:24 +00:00
|
|
|
case <-ids.ctx.Done():
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
// Close shuts down the idService
|
|
|
|
func (ids *idService) Close() error {
|
|
|
|
ids.ctxCancel()
|
|
|
|
ids.observedAddrs.Close()
|
|
|
|
ids.refCount.Wait()
|
2021-06-16 20:19:45 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
func (ids *idService) OwnObservedAddrs() []ma.Multiaddr {
|
2018-07-04 10:51:47 +00:00
|
|
|
return ids.observedAddrs.Addrs()
|
|
|
|
}
|
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
func (ids *idService) ObservedAddrsFor(local ma.Multiaddr) []ma.Multiaddr {
|
2019-06-09 07:24:20 +00:00
|
|
|
return ids.observedAddrs.AddrsFor(local)
|
|
|
|
}
|
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
func (ids *idService) IdentifyConn(c network.Conn) {
|
2021-06-16 20:19:45 +00:00
|
|
|
<-ids.IdentifyWait(c)
|
|
|
|
}
|
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
func (ids *idService) IdentifyWait(c network.Conn) <-chan struct{} {
|
2021-06-16 20:19:45 +00:00
|
|
|
ids.connsMu.RLock()
|
|
|
|
wait, found := ids.conns[c]
|
|
|
|
ids.connsMu.RUnlock()
|
|
|
|
|
|
|
|
if found {
|
|
|
|
return wait
|
|
|
|
}
|
|
|
|
|
|
|
|
ids.connsMu.Lock()
|
|
|
|
defer ids.connsMu.Unlock()
|
|
|
|
|
|
|
|
wait, found = ids.conns[c]
|
|
|
|
if !found {
|
|
|
|
wait = make(chan struct{})
|
|
|
|
ids.conns[c] = wait
|
|
|
|
|
|
|
|
// Spawn an identify. The connection may actually be closed
|
|
|
|
// already, but that doesn't really matter. We'll fail to open a
|
|
|
|
// stream then forget the connection.
|
2022-04-01 16:16:46 +00:00
|
|
|
go func() {
|
|
|
|
defer close(wait)
|
|
|
|
if err := ids.identifyConn(c); err != nil {
|
2022-08-19 16:34:07 +00:00
|
|
|
log.Warnf("failed to identify %s: %s", c.RemotePeer(), err)
|
2022-04-01 16:16:46 +00:00
|
|
|
ids.emitters.evtPeerIdentificationFailed.Emit(event.EvtPeerIdentificationFailed{Peer: c.RemotePeer(), Reason: err})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
ids.emitters.evtPeerIdentificationCompleted.Emit(event.EvtPeerIdentificationCompleted{Peer: c.RemotePeer()})
|
|
|
|
}()
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
2021-06-16 20:19:45 +00:00
|
|
|
|
|
|
|
return wait
|
|
|
|
}
|
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
func (ids *idService) removeConn(c network.Conn) {
|
2021-06-16 20:19:45 +00:00
|
|
|
ids.connsMu.Lock()
|
|
|
|
delete(ids.conns, c)
|
|
|
|
ids.connsMu.Unlock()
|
|
|
|
}
|
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
func (ids *idService) identifyConn(c network.Conn) error {
|
|
|
|
s, err := c.NewStream(network.WithUseTransient(context.TODO(), "identify"))
|
2018-07-04 10:51:47 +00:00
|
|
|
if err != nil {
|
2021-06-16 20:19:45 +00:00
|
|
|
log.Debugw("error opening identify stream", "error", err)
|
|
|
|
|
|
|
|
// We usually do this on disconnect, but we may have already
|
|
|
|
// processed the disconnect event.
|
|
|
|
ids.removeConn(c)
|
2022-04-01 16:16:46 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := s.SetProtocol(ID); err != nil {
|
|
|
|
log.Warnf("error setting identify protocol for stream: %s", err)
|
|
|
|
s.Reset()
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// ok give the response to our handler.
|
2022-04-01 16:16:46 +00:00
|
|
|
if err := msmux.SelectProtoOrFail(ID, s); err != nil {
|
|
|
|
log.Infow("failed negotiate identify protocol with peer", "peer", c.RemotePeer(), "error", err)
|
2019-06-09 07:24:20 +00:00
|
|
|
s.Reset()
|
2022-04-01 16:16:46 +00:00
|
|
|
return err
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
2021-10-19 13:43:41 +00:00
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
return ids.handleIdentifyResponse(s)
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
func (ids *idService) sendIdentifyResp(s network.Stream) {
|
|
|
|
if err := s.Scope().SetService(ServiceName); err != nil {
|
|
|
|
log.Warnf("error attaching stream to identify service: %s", err)
|
|
|
|
s.Reset()
|
|
|
|
return
|
|
|
|
}
|
2021-06-16 20:19:45 +00:00
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
defer s.Close()
|
2021-06-16 20:19:45 +00:00
|
|
|
|
2018-07-04 10:51:47 +00:00
|
|
|
c := s.Conn()
|
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
phCh := make(chan *peerHandler, 1)
|
|
|
|
select {
|
|
|
|
case ids.addPeerHandlerCh <- addPeerHandlerReq{c.RemotePeer(), phCh}:
|
|
|
|
case <-ids.ctx.Done():
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
var ph *peerHandler
|
2021-06-16 20:19:45 +00:00
|
|
|
select {
|
|
|
|
case ph = <-phCh:
|
|
|
|
case <-ids.ctx.Done():
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if ph == nil {
|
|
|
|
// Peer disconnected, abort.
|
|
|
|
s.Reset()
|
|
|
|
return
|
|
|
|
}
|
2018-07-04 10:51:47 +00:00
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
ph.snapshotMu.RLock()
|
2022-04-01 16:16:46 +00:00
|
|
|
snapshot := ph.snapshot
|
|
|
|
ph.snapshotMu.RUnlock()
|
|
|
|
ids.writeChunkedIdentifyMsg(c, snapshot, s)
|
2019-10-04 15:21:24 +00:00
|
|
|
log.Debugf("%s sent message to %s %s", ID, c.RemotePeer(), c.RemoteMultiaddr())
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
func (ids *idService) handleIdentifyResponse(s network.Stream) error {
|
|
|
|
if err := s.Scope().SetService(ServiceName); err != nil {
|
|
|
|
log.Warnf("error attaching stream to identify service: %s", err)
|
|
|
|
s.Reset()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := s.Scope().ReserveMemory(signedIDSize, network.ReservationPriorityAlways); err != nil {
|
|
|
|
log.Warnf("error reserving memory for identify stream: %s", err)
|
|
|
|
s.Reset()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer s.Scope().ReleaseMemory(signedIDSize)
|
|
|
|
|
2021-10-19 13:43:41 +00:00
|
|
|
_ = s.SetReadDeadline(time.Now().Add(StreamReadTimeout))
|
|
|
|
|
2018-07-04 10:51:47 +00:00
|
|
|
c := s.Conn()
|
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
r := protoio.NewDelimitedReader(s, signedIDSize)
|
|
|
|
mes := &pb.Identify{}
|
|
|
|
|
|
|
|
if err := readAllIDMessages(r, mes); err != nil {
|
2021-10-19 13:43:41 +00:00
|
|
|
log.Warn("error reading identify message: ", err)
|
2019-06-09 07:24:20 +00:00
|
|
|
s.Reset()
|
2021-10-19 13:43:41 +00:00
|
|
|
return err
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
2019-06-09 07:24:20 +00:00
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
defer s.Close()
|
2019-06-09 07:24:20 +00:00
|
|
|
|
2019-10-04 15:21:24 +00:00
|
|
|
log.Debugf("%s received message from %s %s", s.Protocol(), c.RemotePeer(), c.RemoteMultiaddr())
|
2021-06-16 20:19:45 +00:00
|
|
|
|
|
|
|
ids.consumeMessage(mes, c)
|
2021-10-19 13:43:41 +00:00
|
|
|
|
|
|
|
return nil
|
2019-06-09 07:24:20 +00:00
|
|
|
}
|
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
func readAllIDMessages(r protoio.Reader, finalMsg proto.Message) error {
|
|
|
|
mes := &pb.Identify{}
|
|
|
|
for i := 0; i < maxMessages; i++ {
|
|
|
|
switch err := r.ReadMsg(mes); err {
|
|
|
|
case io.EOF:
|
|
|
|
return nil
|
|
|
|
case nil:
|
|
|
|
proto.Merge(finalMsg, mes)
|
|
|
|
default:
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2019-06-09 07:24:20 +00:00
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
return fmt.Errorf("too many parts")
|
|
|
|
}
|
2019-06-09 07:24:20 +00:00
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
func (ids *idService) getSnapshot() *identifySnapshot {
|
2021-06-16 20:19:45 +00:00
|
|
|
snapshot := new(identifySnapshot)
|
|
|
|
if !ids.disableSignedPeerRecord {
|
|
|
|
if cab, ok := peerstore.GetCertifiedAddrBook(ids.Host.Peerstore()); ok {
|
|
|
|
snapshot.record = cab.GetPeerRecord(ids.Host.ID())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
snapshot.addrs = ids.Host.Addrs()
|
|
|
|
snapshot.protocols = ids.Host.Mux().Protocols()
|
|
|
|
return snapshot
|
|
|
|
}
|
2019-10-04 15:21:24 +00:00
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
func (ids *idService) writeChunkedIdentifyMsg(c network.Conn, snapshot *identifySnapshot, s network.Stream) error {
|
2021-06-16 20:19:45 +00:00
|
|
|
mes := ids.createBaseIdentifyResponse(c, snapshot)
|
|
|
|
sr := ids.getSignedRecord(snapshot)
|
|
|
|
mes.SignedPeerRecord = sr
|
|
|
|
writer := protoio.NewDelimitedWriter(s)
|
2019-06-09 07:24:20 +00:00
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
if sr == nil || proto.Size(mes) <= legacyIDSize {
|
|
|
|
return writer.WriteMsg(mes)
|
|
|
|
}
|
|
|
|
mes.SignedPeerRecord = nil
|
|
|
|
if err := writer.WriteMsg(mes); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-10-04 15:21:24 +00:00
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
// then write just the signed record
|
|
|
|
m := &pb.Identify{SignedPeerRecord: sr}
|
|
|
|
err := writer.WriteMsg(m)
|
|
|
|
return err
|
2019-10-04 15:21:24 +00:00
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
}
|
2019-06-09 07:24:20 +00:00
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
func (ids *idService) createBaseIdentifyResponse(
|
2021-06-16 20:19:45 +00:00
|
|
|
conn network.Conn,
|
|
|
|
snapshot *identifySnapshot,
|
|
|
|
) *pb.Identify {
|
|
|
|
mes := &pb.Identify{}
|
2019-06-09 07:24:20 +00:00
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
remoteAddr := conn.RemoteMultiaddr()
|
|
|
|
localAddr := conn.LocalMultiaddr()
|
2018-07-04 10:51:47 +00:00
|
|
|
|
|
|
|
// set protocols this node is currently handling
|
2021-06-16 20:19:45 +00:00
|
|
|
mes.Protocols = snapshot.protocols
|
2018-07-04 10:51:47 +00:00
|
|
|
|
|
|
|
// observed address so other side is informed of their
|
|
|
|
// "public" address, at least in relation to us.
|
2021-06-16 20:19:45 +00:00
|
|
|
mes.ObservedAddr = remoteAddr.Bytes()
|
|
|
|
|
|
|
|
// populate unsigned addresses.
|
|
|
|
// peers that do not yet support signed addresses will need this.
|
|
|
|
// Note: LocalMultiaddr is sometimes 0.0.0.0
|
|
|
|
viaLoopback := manet.IsIPLoopback(localAddr) || manet.IsIPLoopback(remoteAddr)
|
|
|
|
mes.ListenAddrs = make([][]byte, 0, len(snapshot.addrs))
|
|
|
|
for _, addr := range snapshot.addrs {
|
|
|
|
if !viaLoopback && manet.IsIPLoopback(addr) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
mes.ListenAddrs = append(mes.ListenAddrs, addr.Bytes())
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
// set our public key
|
|
|
|
ownKey := ids.Host.Peerstore().PubKey(ids.Host.ID())
|
2019-06-09 07:24:20 +00:00
|
|
|
|
|
|
|
// check if we even have a public key.
|
2018-07-04 10:51:47 +00:00
|
|
|
if ownKey == nil {
|
2019-06-09 07:24:20 +00:00
|
|
|
// public key is nil. We are either using insecure transport or something erratic happened.
|
|
|
|
// check if we're even operating in "secure mode"
|
|
|
|
if ids.Host.Peerstore().PrivKey(ids.Host.ID()) != nil {
|
|
|
|
// private key is present. But NO public key. Something bad happened.
|
|
|
|
log.Errorf("did not have own public key in Peerstore")
|
|
|
|
}
|
|
|
|
// if neither of the key is present it is safe to assume that we are using an insecure transport.
|
2018-07-04 10:51:47 +00:00
|
|
|
} else {
|
2019-06-09 07:24:20 +00:00
|
|
|
// public key is present. Safe to proceed.
|
2021-10-19 13:43:41 +00:00
|
|
|
if kb, err := crypto.MarshalPublicKey(ownKey); err != nil {
|
2018-07-04 10:51:47 +00:00
|
|
|
log.Errorf("failed to convert key to bytes")
|
|
|
|
} else {
|
|
|
|
mes.PublicKey = kb
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// set protocol versions
|
|
|
|
pv := LibP2PVersion
|
2019-10-04 15:21:24 +00:00
|
|
|
av := ids.UserAgent
|
2018-07-04 10:51:47 +00:00
|
|
|
mes.ProtocolVersion = &pv
|
|
|
|
mes.AgentVersion = &av
|
2021-06-16 20:19:45 +00:00
|
|
|
|
|
|
|
return mes
|
|
|
|
}
|
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
func (ids *idService) getSignedRecord(snapshot *identifySnapshot) []byte {
|
2021-06-16 20:19:45 +00:00
|
|
|
if ids.disableSignedPeerRecord || snapshot.record == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
recBytes, err := snapshot.record.Marshal()
|
|
|
|
if err != nil {
|
|
|
|
log.Errorw("failed to marshal signed record", "err", err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return recBytes
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
func (ids *idService) consumeMessage(mes *pb.Identify, c network.Conn) {
|
2018-07-04 10:51:47 +00:00
|
|
|
p := c.RemotePeer()
|
|
|
|
|
|
|
|
// mes.Protocols
|
|
|
|
ids.Host.Peerstore().SetProtocols(p, mes.Protocols...)
|
|
|
|
|
|
|
|
// mes.ObservedAddr
|
|
|
|
ids.consumeObservedAddress(mes.GetObservedAddr(), c)
|
|
|
|
|
|
|
|
// mes.ListenAddrs
|
|
|
|
laddrs := mes.GetListenAddrs()
|
|
|
|
lmaddrs := make([]ma.Multiaddr, 0, len(laddrs))
|
|
|
|
for _, addr := range laddrs {
|
|
|
|
maddr, err := ma.NewMultiaddrBytes(addr)
|
|
|
|
if err != nil {
|
|
|
|
log.Debugf("%s failed to parse multiaddr from %s %s", ID,
|
|
|
|
p, c.RemoteMultiaddr())
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
lmaddrs = append(lmaddrs, maddr)
|
|
|
|
}
|
|
|
|
|
2019-06-09 07:24:20 +00:00
|
|
|
// NOTE: Do not add `c.RemoteMultiaddr()` to the peerstore if the remote
|
|
|
|
// peer doesn't tell us to do so. Otherwise, we'll advertise it.
|
|
|
|
//
|
|
|
|
// This can cause an "addr-splosion" issue where the network will slowly
|
|
|
|
// gossip and collect observed but unadvertised addresses. Given a NAT
|
|
|
|
// that picks random source ports, this can cause DHT nodes to collect
|
|
|
|
// many undialable addresses for other peers.
|
2018-07-04 10:51:47 +00:00
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
// add certified addresses for the peer, if they sent us a signed peer record
|
|
|
|
// otherwise use the unsigned addresses.
|
|
|
|
var signedPeerRecord *record.Envelope
|
|
|
|
signedPeerRecord, err := signedPeerRecordFromMessage(mes)
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("error getting peer record from Identify message: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-07-04 10:51:47 +00:00
|
|
|
// Extend the TTLs on the known (probably) good addresses.
|
|
|
|
// Taking the lock ensures that we don't concurrently process a disconnect.
|
|
|
|
ids.addrMu.Lock()
|
2021-06-16 20:19:45 +00:00
|
|
|
ttl := peerstore.RecentlyConnectedAddrTTL
|
|
|
|
if ids.Host.Network().Connectedness(p) == network.Connected {
|
|
|
|
ttl = peerstore.ConnectedAddrTTL
|
|
|
|
}
|
|
|
|
|
|
|
|
// Downgrade connected and recently connected addrs to a temporary TTL.
|
|
|
|
for _, ttl := range []time.Duration{
|
|
|
|
peerstore.RecentlyConnectedAddrTTL,
|
|
|
|
peerstore.ConnectedAddrTTL,
|
|
|
|
} {
|
|
|
|
ids.Host.Peerstore().UpdateAddrs(p, ttl, peerstore.TempAddrTTL)
|
|
|
|
}
|
|
|
|
|
|
|
|
// add signed addrs if we have them and the peerstore supports them
|
|
|
|
cab, ok := peerstore.GetCertifiedAddrBook(ids.Host.Peerstore())
|
|
|
|
if ok && signedPeerRecord != nil {
|
|
|
|
_, addErr := cab.ConsumePeerRecord(signedPeerRecord, ttl)
|
|
|
|
if addErr != nil {
|
|
|
|
log.Debugf("error adding signed addrs to peerstore: %v", addErr)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ids.Host.Peerstore().AddAddrs(p, lmaddrs, ttl)
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
2021-06-16 20:19:45 +00:00
|
|
|
|
|
|
|
// Finally, expire all temporary addrs.
|
|
|
|
ids.Host.Peerstore().UpdateAddrs(p, peerstore.TempAddrTTL, 0)
|
2018-07-04 10:51:47 +00:00
|
|
|
ids.addrMu.Unlock()
|
|
|
|
|
|
|
|
log.Debugf("%s received listen addrs for %s: %s", c.LocalPeer(), c.RemotePeer(), lmaddrs)
|
|
|
|
|
|
|
|
// get protocol versions
|
|
|
|
pv := mes.GetProtocolVersion()
|
|
|
|
av := mes.GetAgentVersion()
|
|
|
|
|
|
|
|
ids.Host.Peerstore().Put(p, "ProtocolVersion", pv)
|
|
|
|
ids.Host.Peerstore().Put(p, "AgentVersion", av)
|
|
|
|
|
|
|
|
// get the key from the other side. we may not have it (no-auth transport)
|
|
|
|
ids.consumeReceivedPubKey(c, mes.PublicKey)
|
|
|
|
}
|
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
func (ids *idService) consumeReceivedPubKey(c network.Conn, kb []byte) {
|
2018-07-04 10:51:47 +00:00
|
|
|
lp := c.LocalPeer()
|
|
|
|
rp := c.RemotePeer()
|
|
|
|
|
|
|
|
if kb == nil {
|
|
|
|
log.Debugf("%s did not receive public key for remote peer: %s", lp, rp)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-10-19 13:43:41 +00:00
|
|
|
newKey, err := crypto.UnmarshalPublicKey(kb)
|
2018-07-04 10:51:47 +00:00
|
|
|
if err != nil {
|
2021-10-19 13:43:41 +00:00
|
|
|
log.Warnf("%s cannot unmarshal key from remote peer: %s, %s", lp, rp, err)
|
2018-07-04 10:51:47 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// verify key matches peer.ID
|
|
|
|
np, err := peer.IDFromPublicKey(newKey)
|
|
|
|
if err != nil {
|
|
|
|
log.Debugf("%s cannot get peer.ID from key of remote peer: %s, %s", lp, rp, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if np != rp {
|
|
|
|
// if the newKey's peer.ID does not match known peer.ID...
|
|
|
|
|
|
|
|
if rp == "" && np != "" {
|
|
|
|
// if local peerid is empty, then use the new, sent key.
|
|
|
|
err := ids.Host.Peerstore().AddPubKey(rp, newKey)
|
|
|
|
if err != nil {
|
|
|
|
log.Debugf("%s could not add key for %s to peerstore: %s", lp, rp, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
|
|
|
// we have a local peer.ID and it does not match the sent key... error.
|
|
|
|
log.Errorf("%s received key for remote peer %s mismatch: %s", lp, rp, np)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
currKey := ids.Host.Peerstore().PubKey(rp)
|
|
|
|
if currKey == nil {
|
|
|
|
// no key? no auth transport. set this one.
|
|
|
|
err := ids.Host.Peerstore().AddPubKey(rp, newKey)
|
|
|
|
if err != nil {
|
|
|
|
log.Debugf("%s could not add key for %s to peerstore: %s", lp, rp, err)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// ok, we have a local key, we should verify they match.
|
|
|
|
if currKey.Equals(newKey) {
|
|
|
|
return // ok great. we're done.
|
|
|
|
}
|
|
|
|
|
|
|
|
// weird, got a different key... but the different key MATCHES the peer.ID.
|
|
|
|
// this odd. let's log error and investigate. this should basically never happen
|
|
|
|
// and it means we have something funky going on and possibly a bug.
|
|
|
|
log.Errorf("%s identify got a different key for: %s", lp, rp)
|
|
|
|
|
|
|
|
// okay... does ours NOT match the remote peer.ID?
|
|
|
|
cp, err := peer.IDFromPublicKey(currKey)
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("%s cannot get peer.ID from local key of remote peer: %s, %s", lp, rp, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if cp != rp {
|
|
|
|
log.Errorf("%s local key for remote peer %s yields different peer.ID: %s", lp, rp, cp)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// okay... curr key DOES NOT match new key. both match peer.ID. wat?
|
|
|
|
log.Errorf("%s local key and received key for %s do not match, but match peer.ID", lp, rp)
|
|
|
|
}
|
|
|
|
|
|
|
|
// HasConsistentTransport returns true if the address 'a' shares a
|
|
|
|
// protocol set with any address in the green set. This is used
|
|
|
|
// to check if a given address might be one of the addresses a peer is
|
|
|
|
// listening on.
|
|
|
|
func HasConsistentTransport(a ma.Multiaddr, green []ma.Multiaddr) bool {
|
|
|
|
protosMatch := func(a, b []ma.Protocol) bool {
|
|
|
|
if len(a) != len(b) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, p := range a {
|
|
|
|
if b[i].Code != p.Code {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
protos := a.Protocols()
|
|
|
|
|
|
|
|
for _, ga := range green {
|
|
|
|
if protosMatch(protos, ga.Protocols()) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
func (ids *idService) consumeObservedAddress(observed []byte, c network.Conn) {
|
2018-07-04 10:51:47 +00:00
|
|
|
if observed == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
maddr, err := ma.NewMultiaddrBytes(observed)
|
|
|
|
if err != nil {
|
|
|
|
log.Debugf("error parsing received observed addr for %s: %s", c, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
ids.observedAddrs.Record(c, maddr)
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
func signedPeerRecordFromMessage(msg *pb.Identify) (*record.Envelope, error) {
|
|
|
|
if msg.SignedPeerRecord == nil || len(msg.SignedPeerRecord) == 0 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
env, _, err := record.ConsumeEnvelope(msg.SignedPeerRecord, peer.PeerRecordEnvelopeDomain)
|
|
|
|
return env, err
|
|
|
|
}
|
|
|
|
|
2018-07-04 10:51:47 +00:00
|
|
|
// netNotifiee defines methods to be used with the IpfsDHT
|
2022-04-01 16:16:46 +00:00
|
|
|
type netNotifiee idService
|
2018-07-04 10:51:47 +00:00
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
func (nn *netNotifiee) IDService() *idService {
|
|
|
|
return (*idService)(nn)
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
2019-06-09 07:24:20 +00:00
|
|
|
func (nn *netNotifiee) Connected(n network.Network, v network.Conn) {
|
2021-06-16 20:19:45 +00:00
|
|
|
nn.IDService().IdentifyWait(v)
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
2019-06-09 07:24:20 +00:00
|
|
|
func (nn *netNotifiee) Disconnected(n network.Network, v network.Conn) {
|
2018-07-04 10:51:47 +00:00
|
|
|
ids := nn.IDService()
|
2021-06-16 20:19:45 +00:00
|
|
|
|
|
|
|
// Stop tracking the connection.
|
|
|
|
ids.removeConn(v)
|
|
|
|
|
|
|
|
// undo the setting of addresses to peer.ConnectedAddrTTL we did
|
2018-07-04 10:51:47 +00:00
|
|
|
ids.addrMu.Lock()
|
|
|
|
defer ids.addrMu.Unlock()
|
|
|
|
|
2019-06-09 07:24:20 +00:00
|
|
|
if ids.Host.Network().Connectedness(v.RemotePeer()) != network.Connected {
|
2021-06-16 20:19:45 +00:00
|
|
|
// consider removing the peer handler for this
|
|
|
|
select {
|
|
|
|
case ids.rmPeerHandlerCh <- rmPeerHandlerReq{v.RemotePeer()}:
|
|
|
|
case <-ids.ctx.Done():
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-07-04 10:51:47 +00:00
|
|
|
// Last disconnect.
|
|
|
|
ps := ids.Host.Peerstore()
|
2019-06-09 07:24:20 +00:00
|
|
|
ps.UpdateAddrs(v.RemotePeer(), peerstore.ConnectedAddrTTL, peerstore.RecentlyConnectedAddrTTL)
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-19 16:34:07 +00:00
|
|
|
func (nn *netNotifiee) Listen(n network.Network, a ma.Multiaddr) {}
|
|
|
|
func (nn *netNotifiee) ListenClose(n network.Network, a ma.Multiaddr) {}
|