2018-07-04 10:51:47 +00:00
|
|
|
package identify
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2019-10-04 15:21:24 +00:00
|
|
|
"fmt"
|
2021-06-16 20:19:45 +00:00
|
|
|
"io"
|
2018-07-04 10:51:47 +00:00
|
|
|
"sync"
|
2019-06-09 07:24:20 +00:00
|
|
|
"time"
|
|
|
|
|
2022-11-04 13:57:20 +00:00
|
|
|
"github.com/libp2p/go-libp2p/core/crypto"
|
|
|
|
"github.com/libp2p/go-libp2p/core/event"
|
|
|
|
"github.com/libp2p/go-libp2p/core/host"
|
|
|
|
"github.com/libp2p/go-libp2p/core/network"
|
|
|
|
"github.com/libp2p/go-libp2p/core/peer"
|
|
|
|
"github.com/libp2p/go-libp2p/core/peerstore"
|
2023-02-22 21:58:17 +00:00
|
|
|
"github.com/libp2p/go-libp2p/core/protocol"
|
2022-11-04 13:57:20 +00:00
|
|
|
"github.com/libp2p/go-libp2p/core/record"
|
|
|
|
"github.com/libp2p/go-libp2p/p2p/host/eventbus"
|
2023-02-22 21:58:17 +00:00
|
|
|
"github.com/libp2p/go-libp2p/p2p/protocol/identify/pb"
|
2021-06-16 20:19:45 +00:00
|
|
|
|
2021-10-19 13:43:41 +00:00
|
|
|
logging "github.com/ipfs/go-log/v2"
|
2023-02-22 21:58:17 +00:00
|
|
|
"github.com/libp2p/go-msgio/pbio"
|
2022-11-04 13:57:20 +00:00
|
|
|
ma "github.com/multiformats/go-multiaddr"
|
|
|
|
manet "github.com/multiformats/go-multiaddr/net"
|
|
|
|
msmux "github.com/multiformats/go-multistream"
|
2023-02-22 21:58:17 +00:00
|
|
|
"google.golang.org/protobuf/proto"
|
2018-07-04 10:51:47 +00:00
|
|
|
)
|
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
//go:generate protoc --proto_path=$PWD:$PWD/../../.. --go_out=. --go_opt=Mpb/identify.proto=./pb pb/identify.proto
|
|
|
|
|
2018-07-04 10:51:47 +00:00
|
|
|
var log = logging.Logger("net/identify")
|
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
const (
|
|
|
|
// ID is the protocol.ID of version 1.0.0 of the identify service.
|
|
|
|
ID = "/ipfs/id/1.0.0"
|
|
|
|
// IDPush is the protocol.ID of the Identify push protocol.
|
|
|
|
// It sends full identify messages containing the current state of the peer.
|
|
|
|
IDPush = "/ipfs/id/push/1.0.0"
|
|
|
|
)
|
2018-07-04 10:51:47 +00:00
|
|
|
|
2022-11-04 13:57:20 +00:00
|
|
|
const DefaultProtocolVersion = "ipfs/0.1.0"
|
2018-07-04 10:51:47 +00:00
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
const ServiceName = "libp2p.identify"
|
|
|
|
|
|
|
|
const maxPushConcurrency = 32
|
|
|
|
|
2021-10-19 13:43:41 +00:00
|
|
|
// StreamReadTimeout is the read timeout on all incoming Identify family streams.
|
|
|
|
var StreamReadTimeout = 60 * time.Second
|
2019-10-04 15:21:24 +00:00
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
const (
|
|
|
|
legacyIDSize = 2 * 1024 // 2k Bytes
|
|
|
|
signedIDSize = 8 * 1024 // 8K
|
|
|
|
maxMessages = 10
|
2021-06-16 20:19:45 +00:00
|
|
|
)
|
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
var defaultUserAgent = "github.com/libp2p/go-libp2p"
|
2021-06-16 20:19:45 +00:00
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
type identifySnapshot struct {
|
|
|
|
seq uint64
|
|
|
|
protocols []protocol.ID
|
|
|
|
addrs []ma.Multiaddr
|
|
|
|
record *record.Envelope
|
2021-06-16 20:19:45 +00:00
|
|
|
}
|
2019-06-09 07:24:20 +00:00
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
type IDService interface {
|
|
|
|
// IdentifyConn synchronously triggers an identify request on the connection and
|
|
|
|
// waits for it to complete. If the connection is being identified by another
|
|
|
|
// caller, this call will wait. If the connection has already been identified,
|
|
|
|
// it will return immediately.
|
|
|
|
IdentifyConn(network.Conn)
|
|
|
|
// IdentifyWait triggers an identify (if the connection has not already been
|
|
|
|
// identified) and returns a channel that is closed when the identify protocol
|
|
|
|
// completes.
|
|
|
|
IdentifyWait(network.Conn) <-chan struct{}
|
|
|
|
// OwnObservedAddrs returns the addresses peers have reported we've dialed from
|
|
|
|
OwnObservedAddrs() []ma.Multiaddr
|
|
|
|
// ObservedAddrsFor returns the addresses peers have reported we've dialed from,
|
|
|
|
// for a specific local address.
|
|
|
|
ObservedAddrsFor(local ma.Multiaddr) []ma.Multiaddr
|
2023-02-22 21:58:17 +00:00
|
|
|
Start()
|
2022-04-01 16:16:46 +00:00
|
|
|
io.Closer
|
|
|
|
}
|
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
type identifyPushSupport uint8
|
|
|
|
|
|
|
|
const (
|
|
|
|
identifyPushSupportUnknown identifyPushSupport = iota
|
|
|
|
identifyPushSupported
|
|
|
|
identifyPushUnsupported
|
|
|
|
)
|
|
|
|
|
|
|
|
type entry struct {
|
|
|
|
// The IdentifyWaitChan is created when IdentifyWait is called for the first time.
|
|
|
|
// IdentifyWait closes this channel when the Identify request completes, or when it fails.
|
|
|
|
IdentifyWaitChan chan struct{}
|
|
|
|
|
|
|
|
// PushSupport saves our knowledge about the peer's support of the Identify Push protocol.
|
|
|
|
// Before the identify request returns, we don't know yet if the peer supports Identify Push.
|
|
|
|
PushSupport identifyPushSupport
|
|
|
|
// Sequence is the sequence number of the last snapshot we sent to this peer.
|
|
|
|
Sequence uint64
|
|
|
|
}
|
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
// idService is a structure that implements ProtocolIdentify.
|
2018-07-04 10:51:47 +00:00
|
|
|
// It is a trivial service that gives the other peer some
|
|
|
|
// useful information about the local peer. A sort of hello.
|
|
|
|
//
|
2022-04-01 16:16:46 +00:00
|
|
|
// The idService sends:
|
2023-02-22 21:58:17 +00:00
|
|
|
// - Our libp2p Protocol Version
|
|
|
|
// - Our libp2p Agent Version
|
2022-11-04 13:57:20 +00:00
|
|
|
// - Our public Listen Addresses
|
2022-04-01 16:16:46 +00:00
|
|
|
type idService struct {
|
2022-11-04 13:57:20 +00:00
|
|
|
Host host.Host
|
|
|
|
UserAgent string
|
|
|
|
ProtocolVersion string
|
2018-07-04 10:51:47 +00:00
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
metricsTracer MetricsTracer
|
|
|
|
|
|
|
|
setupCompleted chan struct{} // is closed when Start has finished setting up
|
|
|
|
ctx context.Context
|
|
|
|
ctxCancel context.CancelFunc
|
2021-06-16 20:19:45 +00:00
|
|
|
// track resources that need to be shut down before we shut down
|
|
|
|
refCount sync.WaitGroup
|
|
|
|
|
|
|
|
disableSignedPeerRecord bool
|
2019-06-09 07:24:20 +00:00
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
connsMu sync.RWMutex
|
2023-02-22 21:58:17 +00:00
|
|
|
// The conns map contains all connections we're currently handling.
|
|
|
|
// Connections are inserted as soon as they're available in the swarm, and - crucially -
|
|
|
|
// before any stream can be opened or accepted on that connection.
|
|
|
|
// Connections are removed from the map when the connection disconnects.
|
|
|
|
// It is therefore safe to assume that a connection was (recently) closed if there's no entry in this map.
|
|
|
|
conns map[network.Conn]entry
|
2018-07-04 10:51:47 +00:00
|
|
|
|
|
|
|
addrMu sync.Mutex
|
|
|
|
|
|
|
|
// our own observed addresses.
|
2021-06-16 20:19:45 +00:00
|
|
|
observedAddrs *ObservedAddrManager
|
2019-10-04 15:21:24 +00:00
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
emitters struct {
|
|
|
|
evtPeerProtocolsUpdated event.Emitter
|
|
|
|
evtPeerIdentificationCompleted event.Emitter
|
|
|
|
evtPeerIdentificationFailed event.Emitter
|
2019-10-04 15:21:24 +00:00
|
|
|
}
|
2021-06-16 20:19:45 +00:00
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
currentSnapshot struct {
|
|
|
|
sync.Mutex
|
|
|
|
snapshot identifySnapshot
|
|
|
|
}
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
// NewIDService constructs a new *idService and activates it by
|
2018-07-04 10:51:47 +00:00
|
|
|
// attaching its stream handler to the given host.Host.
|
2022-04-01 16:16:46 +00:00
|
|
|
func NewIDService(h host.Host, opts ...Option) (*idService, error) {
|
2019-10-04 15:21:24 +00:00
|
|
|
var cfg config
|
|
|
|
for _, opt := range opts {
|
|
|
|
opt(&cfg)
|
|
|
|
}
|
|
|
|
|
2021-10-19 13:43:41 +00:00
|
|
|
userAgent := defaultUserAgent
|
2019-10-04 15:21:24 +00:00
|
|
|
if cfg.userAgent != "" {
|
|
|
|
userAgent = cfg.userAgent
|
|
|
|
}
|
|
|
|
|
2022-11-04 13:57:20 +00:00
|
|
|
protocolVersion := DefaultProtocolVersion
|
|
|
|
if cfg.protocolVersion != "" {
|
|
|
|
protocolVersion = cfg.protocolVersion
|
|
|
|
}
|
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
2022-04-01 16:16:46 +00:00
|
|
|
s := &idService{
|
2023-02-22 21:58:17 +00:00
|
|
|
Host: h,
|
|
|
|
UserAgent: userAgent,
|
|
|
|
ProtocolVersion: protocolVersion,
|
|
|
|
ctx: ctx,
|
|
|
|
ctxCancel: cancel,
|
|
|
|
conns: make(map[network.Conn]entry),
|
2021-06-16 20:19:45 +00:00
|
|
|
disableSignedPeerRecord: cfg.disableSignedPeerRecord,
|
2023-02-22 21:58:17 +00:00
|
|
|
setupCompleted: make(chan struct{}),
|
|
|
|
metricsTracer: cfg.metricsTracer,
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
2021-06-16 20:19:45 +00:00
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
observedAddrs, err := NewObservedAddrManager(h)
|
2021-10-19 13:43:41 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to create observed address manager: %s", err)
|
|
|
|
}
|
|
|
|
s.observedAddrs = observedAddrs
|
|
|
|
|
2019-10-04 15:21:24 +00:00
|
|
|
s.emitters.evtPeerProtocolsUpdated, err = h.EventBus().Emitter(&event.EvtPeerProtocolsUpdated{})
|
|
|
|
if err != nil {
|
2021-06-16 20:19:45 +00:00
|
|
|
log.Warnf("identify service not emitting peer protocol updates; err: %s", err)
|
|
|
|
}
|
|
|
|
s.emitters.evtPeerIdentificationCompleted, err = h.EventBus().Emitter(&event.EvtPeerIdentificationCompleted{})
|
|
|
|
if err != nil {
|
|
|
|
log.Warnf("identify service not emitting identification completed events; err: %s", err)
|
|
|
|
}
|
|
|
|
s.emitters.evtPeerIdentificationFailed, err = h.EventBus().Emitter(&event.EvtPeerIdentificationFailed{})
|
|
|
|
if err != nil {
|
|
|
|
log.Warnf("identify service not emitting identification failed events; err: %s", err)
|
2019-10-04 15:21:24 +00:00
|
|
|
}
|
2023-02-22 21:58:17 +00:00
|
|
|
return s, nil
|
|
|
|
}
|
2019-10-04 15:21:24 +00:00
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
func (ids *idService) Start() {
|
|
|
|
ids.Host.Network().Notify((*netNotifiee)(ids))
|
|
|
|
ids.Host.SetStreamHandler(ID, ids.handleIdentifyRequest)
|
|
|
|
ids.Host.SetStreamHandler(IDPush, ids.handlePush)
|
|
|
|
ids.updateSnapshot()
|
|
|
|
close(ids.setupCompleted)
|
2021-06-16 20:19:45 +00:00
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
ids.refCount.Add(1)
|
|
|
|
go ids.loop(ids.ctx)
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
func (ids *idService) loop(ctx context.Context) {
|
2021-06-16 20:19:45 +00:00
|
|
|
defer ids.refCount.Done()
|
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
sub, err := ids.Host.EventBus().Subscribe(
|
|
|
|
[]any{&event.EvtLocalProtocolsUpdated{}, &event.EvtLocalAddressesUpdated{}},
|
|
|
|
eventbus.BufSize(256),
|
|
|
|
eventbus.Name("identify (loop)"),
|
|
|
|
)
|
2021-06-16 20:19:45 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Errorf("failed to subscribe to events on the bus, err=%s", err)
|
|
|
|
return
|
|
|
|
}
|
2023-02-22 21:58:17 +00:00
|
|
|
defer sub.Close()
|
2021-06-16 20:19:45 +00:00
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
// Send pushes from a separate Go routine.
|
|
|
|
// That way, we can end up with
|
|
|
|
// * this Go routine busy looping over all peers in sendPushes
|
|
|
|
// * another push being queued in the triggerPush channel
|
|
|
|
triggerPush := make(chan struct{}, 1)
|
|
|
|
ids.refCount.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer ids.refCount.Done()
|
2021-06-16 20:19:45 +00:00
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
case <-triggerPush:
|
|
|
|
ids.sendPushes(ctx)
|
|
|
|
}
|
2019-10-04 15:21:24 +00:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
2023-02-22 21:58:17 +00:00
|
|
|
case e, ok := <-sub.Out():
|
|
|
|
if !ok {
|
|
|
|
return
|
2021-06-16 20:19:45 +00:00
|
|
|
}
|
2023-02-22 21:58:17 +00:00
|
|
|
if ids.metricsTracer != nil {
|
|
|
|
ids.metricsTracer.TriggeredPushes(e)
|
2021-06-16 20:19:45 +00:00
|
|
|
}
|
2023-02-22 21:58:17 +00:00
|
|
|
ids.updateSnapshot()
|
|
|
|
select {
|
|
|
|
case triggerPush <- struct{}{}:
|
|
|
|
default: // we already have one more push queued, no need to queue another one
|
2021-06-16 20:19:45 +00:00
|
|
|
}
|
2023-02-22 21:58:17 +00:00
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-06-16 20:19:45 +00:00
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
func (ids *idService) sendPushes(ctx context.Context) {
|
|
|
|
ids.connsMu.RLock()
|
|
|
|
conns := make([]network.Conn, 0, len(ids.conns))
|
|
|
|
for c, e := range ids.conns {
|
|
|
|
// Push even if we don't know if push is supported.
|
|
|
|
// This will be only the case while the IdentifyWaitChan call is in flight.
|
|
|
|
if e.PushSupport == identifyPushSupported || e.PushSupport == identifyPushSupportUnknown {
|
|
|
|
conns = append(conns, c)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ids.connsMu.RUnlock()
|
|
|
|
|
|
|
|
sem := make(chan struct{}, maxPushConcurrency)
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
for _, c := range conns {
|
|
|
|
// check if the connection is still alive
|
|
|
|
ids.connsMu.RLock()
|
|
|
|
e, ok := ids.conns[c]
|
|
|
|
ids.connsMu.RUnlock()
|
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// check if we already sent the current snapshot to this peer
|
|
|
|
ids.currentSnapshot.Lock()
|
|
|
|
snapshot := ids.currentSnapshot.snapshot
|
|
|
|
ids.currentSnapshot.Unlock()
|
|
|
|
if e.Sequence >= snapshot.seq {
|
|
|
|
log.Debugw("already sent this snapshot to peer", "peer", c.RemotePeer(), "seq", snapshot.seq)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// we haven't, send it now
|
|
|
|
sem <- struct{}{}
|
|
|
|
wg.Add(1)
|
|
|
|
go func(c network.Conn) {
|
|
|
|
defer wg.Done()
|
|
|
|
defer func() { <-sem }()
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
|
|
|
defer cancel()
|
|
|
|
str, err := ids.Host.NewStream(ctx, c.RemotePeer(), IDPush)
|
|
|
|
if err != nil { // connection might have been closed recently
|
2019-10-04 15:21:24 +00:00
|
|
|
return
|
|
|
|
}
|
2023-02-22 21:58:17 +00:00
|
|
|
// TODO: find out if the peer supports push if we didn't have any information about push support
|
2023-04-07 18:23:07 +00:00
|
|
|
if err := ids.sendIdentifyResp(str, true); err != nil {
|
2023-02-22 21:58:17 +00:00
|
|
|
log.Debugw("failed to send identify push", "peer", c.RemotePeer(), "error", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}(c)
|
2019-10-04 15:21:24 +00:00
|
|
|
}
|
2023-02-22 21:58:17 +00:00
|
|
|
wg.Wait()
|
2019-10-04 15:21:24 +00:00
|
|
|
}
|
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
// Close shuts down the idService
|
|
|
|
func (ids *idService) Close() error {
|
|
|
|
ids.ctxCancel()
|
|
|
|
ids.observedAddrs.Close()
|
|
|
|
ids.refCount.Wait()
|
2021-06-16 20:19:45 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
func (ids *idService) OwnObservedAddrs() []ma.Multiaddr {
|
2018-07-04 10:51:47 +00:00
|
|
|
return ids.observedAddrs.Addrs()
|
|
|
|
}
|
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
func (ids *idService) ObservedAddrsFor(local ma.Multiaddr) []ma.Multiaddr {
|
2019-06-09 07:24:20 +00:00
|
|
|
return ids.observedAddrs.AddrsFor(local)
|
|
|
|
}
|
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
// IdentifyConn runs the Identify protocol on a connection.
|
|
|
|
// It returns when we've received the peer's Identify message (or the request fails).
|
|
|
|
// If successful, the peer store will contain the peer's addresses and supported protocols.
|
2022-04-01 16:16:46 +00:00
|
|
|
func (ids *idService) IdentifyConn(c network.Conn) {
|
2021-06-16 20:19:45 +00:00
|
|
|
<-ids.IdentifyWait(c)
|
|
|
|
}
|
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
// IdentifyWait runs the Identify protocol on a connection.
|
|
|
|
// It doesn't block and returns a channel that is closed when we receive
|
|
|
|
// the peer's Identify message (or the request fails).
|
|
|
|
// If successful, the peer store will contain the peer's addresses and supported protocols.
|
2022-04-01 16:16:46 +00:00
|
|
|
func (ids *idService) IdentifyWait(c network.Conn) <-chan struct{} {
|
2021-06-16 20:19:45 +00:00
|
|
|
ids.connsMu.Lock()
|
|
|
|
defer ids.connsMu.Unlock()
|
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
e, found := ids.conns[c]
|
|
|
|
if !found { // No entry found. Connection was most likely closed (and removed from this map) recently.
|
|
|
|
ch := make(chan struct{})
|
|
|
|
close(ch)
|
|
|
|
return ch
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
2021-06-16 20:19:45 +00:00
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
if e.IdentifyWaitChan != nil {
|
|
|
|
return e.IdentifyWaitChan
|
|
|
|
}
|
|
|
|
// First call to IdentifyWait for this connection. Create the channel.
|
|
|
|
e.IdentifyWaitChan = make(chan struct{})
|
|
|
|
ids.conns[c] = e
|
2021-06-16 20:19:45 +00:00
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
// Spawn an identify. The connection may actually be closed
|
|
|
|
// already, but that doesn't really matter. We'll fail to open a
|
|
|
|
// stream then forget the connection.
|
|
|
|
go func() {
|
|
|
|
defer close(e.IdentifyWaitChan)
|
|
|
|
if err := ids.identifyConn(c); err != nil {
|
|
|
|
log.Warnf("failed to identify %s: %s", c.RemotePeer(), err)
|
|
|
|
ids.emitters.evtPeerIdentificationFailed.Emit(event.EvtPeerIdentificationFailed{Peer: c.RemotePeer(), Reason: err})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
ids.emitters.evtPeerIdentificationCompleted.Emit(event.EvtPeerIdentificationCompleted{Peer: c.RemotePeer()})
|
|
|
|
}()
|
|
|
|
|
|
|
|
return e.IdentifyWaitChan
|
2021-06-16 20:19:45 +00:00
|
|
|
}
|
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
func (ids *idService) identifyConn(c network.Conn) error {
|
|
|
|
s, err := c.NewStream(network.WithUseTransient(context.TODO(), "identify"))
|
2018-07-04 10:51:47 +00:00
|
|
|
if err != nil {
|
2023-02-22 21:58:17 +00:00
|
|
|
log.Debugw("error opening identify stream", "peer", c.RemotePeer(), "error", err)
|
2022-04-01 16:16:46 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := s.SetProtocol(ID); err != nil {
|
|
|
|
log.Warnf("error setting identify protocol for stream: %s", err)
|
|
|
|
s.Reset()
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// ok give the response to our handler.
|
2022-04-01 16:16:46 +00:00
|
|
|
if err := msmux.SelectProtoOrFail(ID, s); err != nil {
|
|
|
|
log.Infow("failed negotiate identify protocol with peer", "peer", c.RemotePeer(), "error", err)
|
2019-06-09 07:24:20 +00:00
|
|
|
s.Reset()
|
2022-04-01 16:16:46 +00:00
|
|
|
return err
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
2021-10-19 13:43:41 +00:00
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
return ids.handleIdentifyResponse(s, false)
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
// handlePush handles incoming identify push streams
|
|
|
|
func (ids *idService) handlePush(s network.Stream) {
|
|
|
|
ids.handleIdentifyResponse(s, true)
|
|
|
|
}
|
2021-06-16 20:19:45 +00:00
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
func (ids *idService) handleIdentifyRequest(s network.Stream) {
|
2023-04-07 18:23:07 +00:00
|
|
|
_ = ids.sendIdentifyResp(s, false)
|
2023-02-22 21:58:17 +00:00
|
|
|
}
|
2021-06-16 20:19:45 +00:00
|
|
|
|
2023-04-07 18:23:07 +00:00
|
|
|
func (ids *idService) sendIdentifyResp(s network.Stream, isPush bool) error {
|
2023-02-22 21:58:17 +00:00
|
|
|
if err := s.Scope().SetService(ServiceName); err != nil {
|
|
|
|
s.Reset()
|
|
|
|
return fmt.Errorf("failed to attaching stream to identify service: %w", err)
|
2021-06-16 20:19:45 +00:00
|
|
|
}
|
2023-02-22 21:58:17 +00:00
|
|
|
defer s.Close()
|
2021-06-16 20:19:45 +00:00
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
ids.currentSnapshot.Lock()
|
|
|
|
snapshot := ids.currentSnapshot.snapshot
|
|
|
|
ids.currentSnapshot.Unlock()
|
2023-04-07 18:23:07 +00:00
|
|
|
|
|
|
|
log.Debugw("sending snapshot", "seq", snapshot.seq, "protocols", snapshot.protocols, "addrs", snapshot.addrs)
|
|
|
|
|
|
|
|
mes := ids.createBaseIdentifyResponse(s.Conn(), &snapshot)
|
|
|
|
mes.SignedPeerRecord = ids.getSignedRecord(&snapshot)
|
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
log.Debugf("%s sending message to %s %s", ID, s.Conn().RemotePeer(), s.Conn().RemoteMultiaddr())
|
2023-04-07 18:23:07 +00:00
|
|
|
if err := ids.writeChunkedIdentifyMsg(s, mes); err != nil {
|
2023-02-22 21:58:17 +00:00
|
|
|
return err
|
2021-06-16 20:19:45 +00:00
|
|
|
}
|
2018-07-04 10:51:47 +00:00
|
|
|
|
2023-04-07 18:23:07 +00:00
|
|
|
if ids.metricsTracer != nil {
|
|
|
|
ids.metricsTracer.IdentifySent(isPush, len(mes.Protocols), len(mes.ListenAddrs))
|
|
|
|
}
|
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
ids.connsMu.Lock()
|
|
|
|
defer ids.connsMu.Unlock()
|
|
|
|
e, ok := ids.conns[s.Conn()]
|
|
|
|
// The connection might already have been closed.
|
|
|
|
// We *should* receive the Connected notification from the swarm before we're able to accept the peer's
|
|
|
|
// Identify stream, but if that for some reason doesn't work, we also wouldn't have a map entry here.
|
|
|
|
// The only consequence would be that we send a spurious Push to that peer later.
|
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
e.Sequence = snapshot.seq
|
|
|
|
ids.conns[s.Conn()] = e
|
|
|
|
return nil
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
func (ids *idService) handleIdentifyResponse(s network.Stream, isPush bool) error {
|
2022-04-01 16:16:46 +00:00
|
|
|
if err := s.Scope().SetService(ServiceName); err != nil {
|
|
|
|
log.Warnf("error attaching stream to identify service: %s", err)
|
|
|
|
s.Reset()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := s.Scope().ReserveMemory(signedIDSize, network.ReservationPriorityAlways); err != nil {
|
|
|
|
log.Warnf("error reserving memory for identify stream: %s", err)
|
|
|
|
s.Reset()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer s.Scope().ReleaseMemory(signedIDSize)
|
|
|
|
|
2021-10-19 13:43:41 +00:00
|
|
|
_ = s.SetReadDeadline(time.Now().Add(StreamReadTimeout))
|
|
|
|
|
2018-07-04 10:51:47 +00:00
|
|
|
c := s.Conn()
|
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
r := pbio.NewDelimitedReader(s, signedIDSize)
|
2021-06-16 20:19:45 +00:00
|
|
|
mes := &pb.Identify{}
|
|
|
|
|
|
|
|
if err := readAllIDMessages(r, mes); err != nil {
|
2021-10-19 13:43:41 +00:00
|
|
|
log.Warn("error reading identify message: ", err)
|
2019-06-09 07:24:20 +00:00
|
|
|
s.Reset()
|
2021-10-19 13:43:41 +00:00
|
|
|
return err
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
2019-06-09 07:24:20 +00:00
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
defer s.Close()
|
2019-06-09 07:24:20 +00:00
|
|
|
|
2019-10-04 15:21:24 +00:00
|
|
|
log.Debugf("%s received message from %s %s", s.Protocol(), c.RemotePeer(), c.RemoteMultiaddr())
|
2021-06-16 20:19:45 +00:00
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
ids.consumeMessage(mes, c, isPush)
|
2021-10-19 13:43:41 +00:00
|
|
|
|
2023-04-07 18:23:07 +00:00
|
|
|
if ids.metricsTracer != nil {
|
|
|
|
ids.metricsTracer.IdentifyReceived(isPush, len(mes.Protocols), len(mes.ListenAddrs))
|
|
|
|
}
|
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
ids.connsMu.Lock()
|
|
|
|
defer ids.connsMu.Unlock()
|
|
|
|
e, ok := ids.conns[c]
|
|
|
|
if !ok { // might already have disconnected
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
sup, err := ids.Host.Peerstore().SupportsProtocols(c.RemotePeer(), IDPush)
|
|
|
|
if supportsIdentifyPush := err == nil && len(sup) > 0; supportsIdentifyPush {
|
|
|
|
e.PushSupport = identifyPushSupported
|
|
|
|
} else {
|
|
|
|
e.PushSupport = identifyPushUnsupported
|
|
|
|
}
|
2023-04-07 18:23:07 +00:00
|
|
|
|
|
|
|
if ids.metricsTracer != nil {
|
|
|
|
ids.metricsTracer.ConnPushSupport(e.PushSupport)
|
|
|
|
}
|
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
ids.conns[c] = e
|
2021-10-19 13:43:41 +00:00
|
|
|
return nil
|
2019-06-09 07:24:20 +00:00
|
|
|
}
|
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
func readAllIDMessages(r pbio.Reader, finalMsg proto.Message) error {
|
2021-06-16 20:19:45 +00:00
|
|
|
mes := &pb.Identify{}
|
|
|
|
for i := 0; i < maxMessages; i++ {
|
|
|
|
switch err := r.ReadMsg(mes); err {
|
|
|
|
case io.EOF:
|
|
|
|
return nil
|
|
|
|
case nil:
|
|
|
|
proto.Merge(finalMsg, mes)
|
|
|
|
default:
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2019-06-09 07:24:20 +00:00
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
return fmt.Errorf("too many parts")
|
|
|
|
}
|
2019-06-09 07:24:20 +00:00
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
func (ids *idService) updateSnapshot() {
|
|
|
|
snapshot := identifySnapshot{
|
|
|
|
addrs: ids.Host.Addrs(),
|
|
|
|
protocols: ids.Host.Mux().Protocols(),
|
|
|
|
}
|
2021-06-16 20:19:45 +00:00
|
|
|
if !ids.disableSignedPeerRecord {
|
|
|
|
if cab, ok := peerstore.GetCertifiedAddrBook(ids.Host.Peerstore()); ok {
|
|
|
|
snapshot.record = cab.GetPeerRecord(ids.Host.ID())
|
|
|
|
}
|
|
|
|
}
|
2023-02-22 21:58:17 +00:00
|
|
|
|
|
|
|
ids.currentSnapshot.Lock()
|
|
|
|
snapshot.seq = ids.currentSnapshot.snapshot.seq + 1
|
|
|
|
ids.currentSnapshot.snapshot = snapshot
|
|
|
|
ids.currentSnapshot.Unlock()
|
|
|
|
|
|
|
|
log.Debugw("updating snapshot", "seq", snapshot.seq, "addrs", snapshot.addrs)
|
2021-06-16 20:19:45 +00:00
|
|
|
}
|
2019-10-04 15:21:24 +00:00
|
|
|
|
2023-04-07 18:23:07 +00:00
|
|
|
func (ids *idService) writeChunkedIdentifyMsg(s network.Stream, mes *pb.Identify) error {
|
2023-02-22 21:58:17 +00:00
|
|
|
writer := pbio.NewDelimitedWriter(s)
|
2019-06-09 07:24:20 +00:00
|
|
|
|
2023-04-07 18:23:07 +00:00
|
|
|
if mes.SignedPeerRecord == nil || proto.Size(mes) <= legacyIDSize {
|
2021-06-16 20:19:45 +00:00
|
|
|
return writer.WriteMsg(mes)
|
|
|
|
}
|
2023-02-22 21:58:17 +00:00
|
|
|
|
2023-04-07 18:23:07 +00:00
|
|
|
sr := mes.SignedPeerRecord
|
2021-06-16 20:19:45 +00:00
|
|
|
mes.SignedPeerRecord = nil
|
|
|
|
if err := writer.WriteMsg(mes); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// then write just the signed record
|
2023-02-22 21:58:17 +00:00
|
|
|
return writer.WriteMsg(&pb.Identify{SignedPeerRecord: sr})
|
2021-06-16 20:19:45 +00:00
|
|
|
}
|
2019-06-09 07:24:20 +00:00
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
func (ids *idService) createBaseIdentifyResponse(conn network.Conn, snapshot *identifySnapshot) *pb.Identify {
|
2021-06-16 20:19:45 +00:00
|
|
|
mes := &pb.Identify{}
|
2019-06-09 07:24:20 +00:00
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
remoteAddr := conn.RemoteMultiaddr()
|
|
|
|
localAddr := conn.LocalMultiaddr()
|
2018-07-04 10:51:47 +00:00
|
|
|
|
|
|
|
// set protocols this node is currently handling
|
2023-02-22 21:58:17 +00:00
|
|
|
mes.Protocols = protocol.ConvertToStrings(snapshot.protocols)
|
2018-07-04 10:51:47 +00:00
|
|
|
|
|
|
|
// observed address so other side is informed of their
|
|
|
|
// "public" address, at least in relation to us.
|
2021-06-16 20:19:45 +00:00
|
|
|
mes.ObservedAddr = remoteAddr.Bytes()
|
|
|
|
|
|
|
|
// populate unsigned addresses.
|
|
|
|
// peers that do not yet support signed addresses will need this.
|
|
|
|
// Note: LocalMultiaddr is sometimes 0.0.0.0
|
|
|
|
viaLoopback := manet.IsIPLoopback(localAddr) || manet.IsIPLoopback(remoteAddr)
|
|
|
|
mes.ListenAddrs = make([][]byte, 0, len(snapshot.addrs))
|
|
|
|
for _, addr := range snapshot.addrs {
|
|
|
|
if !viaLoopback && manet.IsIPLoopback(addr) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
mes.ListenAddrs = append(mes.ListenAddrs, addr.Bytes())
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
// set our public key
|
|
|
|
ownKey := ids.Host.Peerstore().PubKey(ids.Host.ID())
|
2019-06-09 07:24:20 +00:00
|
|
|
|
|
|
|
// check if we even have a public key.
|
2018-07-04 10:51:47 +00:00
|
|
|
if ownKey == nil {
|
2019-06-09 07:24:20 +00:00
|
|
|
// public key is nil. We are either using insecure transport or something erratic happened.
|
|
|
|
// check if we're even operating in "secure mode"
|
|
|
|
if ids.Host.Peerstore().PrivKey(ids.Host.ID()) != nil {
|
|
|
|
// private key is present. But NO public key. Something bad happened.
|
|
|
|
log.Errorf("did not have own public key in Peerstore")
|
|
|
|
}
|
|
|
|
// if neither of the key is present it is safe to assume that we are using an insecure transport.
|
2018-07-04 10:51:47 +00:00
|
|
|
} else {
|
2019-06-09 07:24:20 +00:00
|
|
|
// public key is present. Safe to proceed.
|
2021-10-19 13:43:41 +00:00
|
|
|
if kb, err := crypto.MarshalPublicKey(ownKey); err != nil {
|
2018-07-04 10:51:47 +00:00
|
|
|
log.Errorf("failed to convert key to bytes")
|
|
|
|
} else {
|
|
|
|
mes.PublicKey = kb
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// set protocol versions
|
2022-11-04 13:57:20 +00:00
|
|
|
mes.ProtocolVersion = &ids.ProtocolVersion
|
|
|
|
mes.AgentVersion = &ids.UserAgent
|
2021-06-16 20:19:45 +00:00
|
|
|
|
|
|
|
return mes
|
|
|
|
}
|
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
func (ids *idService) getSignedRecord(snapshot *identifySnapshot) []byte {
|
2021-06-16 20:19:45 +00:00
|
|
|
if ids.disableSignedPeerRecord || snapshot.record == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
recBytes, err := snapshot.record.Marshal()
|
|
|
|
if err != nil {
|
|
|
|
log.Errorw("failed to marshal signed record", "err", err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return recBytes
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
// diff takes two slices of strings (a and b) and computes which elements were added and removed in b
|
|
|
|
func diff(a, b []protocol.ID) (added, removed []protocol.ID) {
|
|
|
|
// This is O(n^2), but it's fine because the slices are small.
|
|
|
|
for _, x := range b {
|
|
|
|
var found bool
|
|
|
|
for _, y := range a {
|
|
|
|
if x == y {
|
|
|
|
found = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !found {
|
|
|
|
added = append(added, x)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, x := range a {
|
|
|
|
var found bool
|
|
|
|
for _, y := range b {
|
|
|
|
if x == y {
|
|
|
|
found = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !found {
|
|
|
|
removed = append(removed, x)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ids *idService) consumeMessage(mes *pb.Identify, c network.Conn, isPush bool) {
|
2018-07-04 10:51:47 +00:00
|
|
|
p := c.RemotePeer()
|
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
supported, _ := ids.Host.Peerstore().GetProtocols(p)
|
|
|
|
mesProtocols := protocol.ConvertFromStrings(mes.Protocols)
|
|
|
|
added, removed := diff(supported, mesProtocols)
|
|
|
|
ids.Host.Peerstore().SetProtocols(p, mesProtocols...)
|
|
|
|
if isPush {
|
|
|
|
ids.emitters.evtPeerProtocolsUpdated.Emit(event.EvtPeerProtocolsUpdated{
|
|
|
|
Peer: p,
|
|
|
|
Added: added,
|
|
|
|
Removed: removed,
|
|
|
|
})
|
|
|
|
}
|
2018-07-04 10:51:47 +00:00
|
|
|
|
|
|
|
// mes.ObservedAddr
|
|
|
|
ids.consumeObservedAddress(mes.GetObservedAddr(), c)
|
|
|
|
|
|
|
|
// mes.ListenAddrs
|
|
|
|
laddrs := mes.GetListenAddrs()
|
|
|
|
lmaddrs := make([]ma.Multiaddr, 0, len(laddrs))
|
|
|
|
for _, addr := range laddrs {
|
|
|
|
maddr, err := ma.NewMultiaddrBytes(addr)
|
|
|
|
if err != nil {
|
|
|
|
log.Debugf("%s failed to parse multiaddr from %s %s", ID,
|
|
|
|
p, c.RemoteMultiaddr())
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
lmaddrs = append(lmaddrs, maddr)
|
|
|
|
}
|
|
|
|
|
2019-06-09 07:24:20 +00:00
|
|
|
// NOTE: Do not add `c.RemoteMultiaddr()` to the peerstore if the remote
|
|
|
|
// peer doesn't tell us to do so. Otherwise, we'll advertise it.
|
|
|
|
//
|
|
|
|
// This can cause an "addr-splosion" issue where the network will slowly
|
|
|
|
// gossip and collect observed but unadvertised addresses. Given a NAT
|
|
|
|
// that picks random source ports, this can cause DHT nodes to collect
|
|
|
|
// many undialable addresses for other peers.
|
2018-07-04 10:51:47 +00:00
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
// add certified addresses for the peer, if they sent us a signed peer record
|
|
|
|
// otherwise use the unsigned addresses.
|
|
|
|
signedPeerRecord, err := signedPeerRecordFromMessage(mes)
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("error getting peer record from Identify message: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-07-04 10:51:47 +00:00
|
|
|
// Extend the TTLs on the known (probably) good addresses.
|
|
|
|
// Taking the lock ensures that we don't concurrently process a disconnect.
|
|
|
|
ids.addrMu.Lock()
|
2021-06-16 20:19:45 +00:00
|
|
|
ttl := peerstore.RecentlyConnectedAddrTTL
|
|
|
|
if ids.Host.Network().Connectedness(p) == network.Connected {
|
|
|
|
ttl = peerstore.ConnectedAddrTTL
|
|
|
|
}
|
|
|
|
|
|
|
|
// Downgrade connected and recently connected addrs to a temporary TTL.
|
|
|
|
for _, ttl := range []time.Duration{
|
|
|
|
peerstore.RecentlyConnectedAddrTTL,
|
|
|
|
peerstore.ConnectedAddrTTL,
|
|
|
|
} {
|
|
|
|
ids.Host.Peerstore().UpdateAddrs(p, ttl, peerstore.TempAddrTTL)
|
|
|
|
}
|
|
|
|
|
|
|
|
// add signed addrs if we have them and the peerstore supports them
|
|
|
|
cab, ok := peerstore.GetCertifiedAddrBook(ids.Host.Peerstore())
|
|
|
|
if ok && signedPeerRecord != nil {
|
|
|
|
_, addErr := cab.ConsumePeerRecord(signedPeerRecord, ttl)
|
|
|
|
if addErr != nil {
|
|
|
|
log.Debugf("error adding signed addrs to peerstore: %v", addErr)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ids.Host.Peerstore().AddAddrs(p, lmaddrs, ttl)
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
2021-06-16 20:19:45 +00:00
|
|
|
|
|
|
|
// Finally, expire all temporary addrs.
|
|
|
|
ids.Host.Peerstore().UpdateAddrs(p, peerstore.TempAddrTTL, 0)
|
2018-07-04 10:51:47 +00:00
|
|
|
ids.addrMu.Unlock()
|
|
|
|
|
|
|
|
log.Debugf("%s received listen addrs for %s: %s", c.LocalPeer(), c.RemotePeer(), lmaddrs)
|
|
|
|
|
|
|
|
// get protocol versions
|
|
|
|
pv := mes.GetProtocolVersion()
|
|
|
|
av := mes.GetAgentVersion()
|
|
|
|
|
|
|
|
ids.Host.Peerstore().Put(p, "ProtocolVersion", pv)
|
|
|
|
ids.Host.Peerstore().Put(p, "AgentVersion", av)
|
|
|
|
|
|
|
|
// get the key from the other side. we may not have it (no-auth transport)
|
|
|
|
ids.consumeReceivedPubKey(c, mes.PublicKey)
|
|
|
|
}
|
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
func (ids *idService) consumeReceivedPubKey(c network.Conn, kb []byte) {
|
2018-07-04 10:51:47 +00:00
|
|
|
lp := c.LocalPeer()
|
|
|
|
rp := c.RemotePeer()
|
|
|
|
|
|
|
|
if kb == nil {
|
|
|
|
log.Debugf("%s did not receive public key for remote peer: %s", lp, rp)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-10-19 13:43:41 +00:00
|
|
|
newKey, err := crypto.UnmarshalPublicKey(kb)
|
2018-07-04 10:51:47 +00:00
|
|
|
if err != nil {
|
2021-10-19 13:43:41 +00:00
|
|
|
log.Warnf("%s cannot unmarshal key from remote peer: %s, %s", lp, rp, err)
|
2018-07-04 10:51:47 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// verify key matches peer.ID
|
|
|
|
np, err := peer.IDFromPublicKey(newKey)
|
|
|
|
if err != nil {
|
|
|
|
log.Debugf("%s cannot get peer.ID from key of remote peer: %s, %s", lp, rp, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if np != rp {
|
|
|
|
// if the newKey's peer.ID does not match known peer.ID...
|
|
|
|
|
|
|
|
if rp == "" && np != "" {
|
|
|
|
// if local peerid is empty, then use the new, sent key.
|
|
|
|
err := ids.Host.Peerstore().AddPubKey(rp, newKey)
|
|
|
|
if err != nil {
|
|
|
|
log.Debugf("%s could not add key for %s to peerstore: %s", lp, rp, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
|
|
|
// we have a local peer.ID and it does not match the sent key... error.
|
|
|
|
log.Errorf("%s received key for remote peer %s mismatch: %s", lp, rp, np)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
currKey := ids.Host.Peerstore().PubKey(rp)
|
|
|
|
if currKey == nil {
|
|
|
|
// no key? no auth transport. set this one.
|
|
|
|
err := ids.Host.Peerstore().AddPubKey(rp, newKey)
|
|
|
|
if err != nil {
|
|
|
|
log.Debugf("%s could not add key for %s to peerstore: %s", lp, rp, err)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// ok, we have a local key, we should verify they match.
|
|
|
|
if currKey.Equals(newKey) {
|
|
|
|
return // ok great. we're done.
|
|
|
|
}
|
|
|
|
|
|
|
|
// weird, got a different key... but the different key MATCHES the peer.ID.
|
|
|
|
// this odd. let's log error and investigate. this should basically never happen
|
|
|
|
// and it means we have something funky going on and possibly a bug.
|
|
|
|
log.Errorf("%s identify got a different key for: %s", lp, rp)
|
|
|
|
|
|
|
|
// okay... does ours NOT match the remote peer.ID?
|
|
|
|
cp, err := peer.IDFromPublicKey(currKey)
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("%s cannot get peer.ID from local key of remote peer: %s, %s", lp, rp, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if cp != rp {
|
|
|
|
log.Errorf("%s local key for remote peer %s yields different peer.ID: %s", lp, rp, cp)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// okay... curr key DOES NOT match new key. both match peer.ID. wat?
|
|
|
|
log.Errorf("%s local key and received key for %s do not match, but match peer.ID", lp, rp)
|
|
|
|
}
|
|
|
|
|
|
|
|
// HasConsistentTransport returns true if the address 'a' shares a
|
|
|
|
// protocol set with any address in the green set. This is used
|
|
|
|
// to check if a given address might be one of the addresses a peer is
|
|
|
|
// listening on.
|
|
|
|
func HasConsistentTransport(a ma.Multiaddr, green []ma.Multiaddr) bool {
|
|
|
|
protosMatch := func(a, b []ma.Protocol) bool {
|
|
|
|
if len(a) != len(b) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, p := range a {
|
|
|
|
if b[i].Code != p.Code {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
protos := a.Protocols()
|
|
|
|
|
|
|
|
for _, ga := range green {
|
|
|
|
if protosMatch(protos, ga.Protocols()) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
func (ids *idService) consumeObservedAddress(observed []byte, c network.Conn) {
|
2018-07-04 10:51:47 +00:00
|
|
|
if observed == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
maddr, err := ma.NewMultiaddrBytes(observed)
|
|
|
|
if err != nil {
|
|
|
|
log.Debugf("error parsing received observed addr for %s: %s", c, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
ids.observedAddrs.Record(c, maddr)
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
func signedPeerRecordFromMessage(msg *pb.Identify) (*record.Envelope, error) {
|
|
|
|
if msg.SignedPeerRecord == nil || len(msg.SignedPeerRecord) == 0 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
env, _, err := record.ConsumeEnvelope(msg.SignedPeerRecord, peer.PeerRecordEnvelopeDomain)
|
|
|
|
return env, err
|
|
|
|
}
|
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
// netNotifiee defines methods to be used with the swarm
|
2022-04-01 16:16:46 +00:00
|
|
|
type netNotifiee idService
|
2018-07-04 10:51:47 +00:00
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
func (nn *netNotifiee) IDService() *idService {
|
|
|
|
return (*idService)(nn)
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
func (nn *netNotifiee) Connected(_ network.Network, c network.Conn) {
|
|
|
|
// We rely on this notification being received before we receive any incoming streams on the connection.
|
|
|
|
// The swarm implementation guarantees this.
|
|
|
|
ids := nn.IDService()
|
|
|
|
|
|
|
|
<-ids.setupCompleted
|
|
|
|
|
|
|
|
ids.connsMu.Lock()
|
|
|
|
ids.conns[c] = entry{}
|
|
|
|
ids.connsMu.Unlock()
|
|
|
|
|
|
|
|
nn.IDService().IdentifyWait(c)
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
func (nn *netNotifiee) Disconnected(_ network.Network, c network.Conn) {
|
2018-07-04 10:51:47 +00:00
|
|
|
ids := nn.IDService()
|
2021-06-16 20:19:45 +00:00
|
|
|
|
|
|
|
// Stop tracking the connection.
|
2023-02-22 21:58:17 +00:00
|
|
|
ids.connsMu.Lock()
|
|
|
|
delete(ids.conns, c)
|
|
|
|
ids.connsMu.Unlock()
|
2021-06-16 20:19:45 +00:00
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
if ids.Host.Network().Connectedness(c.RemotePeer()) != network.Connected {
|
2018-07-04 10:51:47 +00:00
|
|
|
// Last disconnect.
|
2023-02-22 21:58:17 +00:00
|
|
|
// Undo the setting of addresses to peer.ConnectedAddrTTL we did
|
|
|
|
ids.addrMu.Lock()
|
|
|
|
defer ids.addrMu.Unlock()
|
|
|
|
ids.Host.Peerstore().UpdateAddrs(c.RemotePeer(), peerstore.ConnectedAddrTTL, peerstore.RecentlyConnectedAddrTTL)
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-19 16:34:07 +00:00
|
|
|
func (nn *netNotifiee) Listen(n network.Network, a ma.Multiaddr) {}
|
|
|
|
func (nn *netNotifiee) ListenClose(n network.Network, a ma.Multiaddr) {}
|