status-go/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/id.go

520 lines
15 KiB
Go
Raw Normal View History

package identify
import (
"context"
"sync"
2019-06-09 07:24:20 +00:00
"time"
ic "github.com/libp2p/go-libp2p-core/crypto"
"github.com/libp2p/go-libp2p-core/helpers"
"github.com/libp2p/go-libp2p-core/host"
"github.com/libp2p/go-libp2p-core/network"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/libp2p/go-libp2p-core/peerstore"
pb "github.com/libp2p/go-libp2p/p2p/protocol/identify/pb"
ggio "github.com/gogo/protobuf/io"
logging "github.com/ipfs/go-log"
2019-06-09 07:24:20 +00:00
lgbl "github.com/libp2p/go-libp2p-loggables"
2019-06-09 07:24:20 +00:00
ma "github.com/multiformats/go-multiaddr"
msmux "github.com/multiformats/go-multistream"
)
var log = logging.Logger("net/identify")
// ID is the protocol.ID of the Identify Service.
const ID = "/ipfs/id/1.0.0"
2019-06-09 07:24:20 +00:00
// IDPush is the protocol.ID of the Identify push protocol
const IDPush = "/ipfs/id/push/1.0.0"
// LibP2PVersion holds the current protocol version for a client running this code
// TODO(jbenet): fix the versioning mess.
const LibP2PVersion = "ipfs/0.1.0"
var ClientVersion = "go-libp2p/3.3.4"
2019-06-09 07:24:20 +00:00
// transientTTL is a short ttl for invalidated previously connected addrs
const transientTTL = 10 * time.Second
// IDService is a structure that implements ProtocolIdentify.
// It is a trivial service that gives the other peer some
// useful information about the local peer. A sort of hello.
//
// The IDService sends:
// * Our IPFS Protocol Version
// * Our IPFS Agent Version
// * Our public Listen Addresses
type IDService struct {
Host host.Host
2019-06-09 07:24:20 +00:00
ctx context.Context
// connections undergoing identification
// for wait purposes
2019-06-09 07:24:20 +00:00
currid map[network.Conn]chan struct{}
currmu sync.RWMutex
addrMu sync.Mutex
// our own observed addresses.
// TODO: instead of expiring, remove these when we disconnect
2019-06-09 07:24:20 +00:00
observedAddrs *ObservedAddrSet
}
// NewIDService constructs a new *IDService and activates it by
// attaching its stream handler to the given host.Host.
2019-06-09 07:24:20 +00:00
func NewIDService(ctx context.Context, h host.Host) *IDService {
s := &IDService{
2019-06-09 07:24:20 +00:00
Host: h,
ctx: ctx,
currid: make(map[network.Conn]chan struct{}),
observedAddrs: NewObservedAddrSet(ctx),
}
h.SetStreamHandler(ID, s.requestHandler)
2019-06-09 07:24:20 +00:00
h.SetStreamHandler(IDPush, s.pushHandler)
h.Network().Notify((*netNotifiee)(s))
return s
}
// OwnObservedAddrs returns the addresses peers have reported we've dialed from
func (ids *IDService) OwnObservedAddrs() []ma.Multiaddr {
return ids.observedAddrs.Addrs()
}
2019-06-09 07:24:20 +00:00
func (ids *IDService) ObservedAddrsFor(local ma.Multiaddr) []ma.Multiaddr {
return ids.observedAddrs.AddrsFor(local)
}
func (ids *IDService) IdentifyConn(c network.Conn) {
ids.currmu.Lock()
if wait, found := ids.currid[c]; found {
ids.currmu.Unlock()
log.Debugf("IdentifyConn called twice on: %s", c)
<-wait // already identifying it. wait for it.
return
}
ch := make(chan struct{})
ids.currid[c] = ch
ids.currmu.Unlock()
2019-06-09 07:24:20 +00:00
defer func() {
close(ch)
ids.currmu.Lock()
delete(ids.currid, c)
ids.currmu.Unlock()
}()
s, err := c.NewStream()
if err != nil {
log.Debugf("error opening initial stream for %s: %s", ID, err)
log.Event(context.TODO(), "IdentifyOpenFailed", c.RemotePeer())
c.Close()
return
}
s.SetProtocol(ID)
// ok give the response to our handler.
if err := msmux.SelectProtoOrFail(ID, s); err != nil {
log.Event(context.TODO(), "IdentifyOpenFailed", c.RemotePeer(), logging.Metadata{"error": err})
2019-06-09 07:24:20 +00:00
s.Reset()
return
}
ids.responseHandler(s)
}
2019-06-09 07:24:20 +00:00
func (ids *IDService) requestHandler(s network.Stream) {
defer helpers.FullClose(s)
c := s.Conn()
w := ggio.NewDelimitedWriter(s)
mes := pb.Identify{}
ids.populateMessage(&mes, s.Conn())
w.WriteMsg(&mes)
log.Debugf("%s sent message to %s %s", ID,
c.RemotePeer(), c.RemoteMultiaddr())
}
2019-06-09 07:24:20 +00:00
func (ids *IDService) responseHandler(s network.Stream) {
c := s.Conn()
r := ggio.NewDelimitedReader(s, 2048)
mes := pb.Identify{}
if err := r.ReadMsg(&mes); err != nil {
log.Warning("error reading identify message: ", err)
2019-06-09 07:24:20 +00:00
s.Reset()
return
}
ids.consumeMessage(&mes, c)
log.Debugf("%s received message from %s %s", ID,
c.RemotePeer(), c.RemoteMultiaddr())
2019-06-09 07:24:20 +00:00
go helpers.FullClose(s)
}
func (ids *IDService) pushHandler(s network.Stream) {
ids.responseHandler(s)
}
func (ids *IDService) Push() {
var wg sync.WaitGroup
ctx, cancel := context.WithTimeout(ids.ctx, 30*time.Second)
ctx = network.WithNoDial(ctx, "identify push")
for _, p := range ids.Host.Network().Peers() {
wg.Add(1)
go func(p peer.ID) {
defer wg.Done()
s, err := ids.Host.NewStream(ctx, p, IDPush)
if err != nil {
log.Debugf("error opening push stream to %s: %s", p, err.Error())
return
}
rch := make(chan struct{}, 1)
go func() {
ids.requestHandler(s)
rch <- struct{}{}
}()
select {
case <-rch:
case <-ctx.Done():
// this is taking too long, abort!
s.Reset()
}
}(p)
}
// this supervisory goroutine is necessary to cancel the context
go func() {
wg.Wait()
cancel()
}()
}
2019-06-09 07:24:20 +00:00
func (ids *IDService) populateMessage(mes *pb.Identify, c network.Conn) {
// set protocols this node is currently handling
protos := ids.Host.Mux().Protocols()
mes.Protocols = make([]string, len(protos))
for i, p := range protos {
mes.Protocols[i] = string(p)
}
// observed address so other side is informed of their
// "public" address, at least in relation to us.
mes.ObservedAddr = c.RemoteMultiaddr().Bytes()
// set listen addrs, get our latest addrs from Host.
laddrs := ids.Host.Addrs()
mes.ListenAddrs = make([][]byte, len(laddrs))
for i, addr := range laddrs {
mes.ListenAddrs[i] = addr.Bytes()
}
log.Debugf("%s sent listen addrs to %s: %s", c.LocalPeer(), c.RemotePeer(), laddrs)
// set our public key
ownKey := ids.Host.Peerstore().PubKey(ids.Host.ID())
2019-06-09 07:24:20 +00:00
// check if we even have a public key.
if ownKey == nil {
2019-06-09 07:24:20 +00:00
// public key is nil. We are either using insecure transport or something erratic happened.
// check if we're even operating in "secure mode"
if ids.Host.Peerstore().PrivKey(ids.Host.ID()) != nil {
// private key is present. But NO public key. Something bad happened.
log.Errorf("did not have own public key in Peerstore")
}
// if neither of the key is present it is safe to assume that we are using an insecure transport.
} else {
2019-06-09 07:24:20 +00:00
// public key is present. Safe to proceed.
if kb, err := ownKey.Bytes(); err != nil {
log.Errorf("failed to convert key to bytes")
} else {
mes.PublicKey = kb
}
}
// set protocol versions
pv := LibP2PVersion
av := ClientVersion
mes.ProtocolVersion = &pv
mes.AgentVersion = &av
}
2019-06-09 07:24:20 +00:00
func (ids *IDService) consumeMessage(mes *pb.Identify, c network.Conn) {
p := c.RemotePeer()
// mes.Protocols
ids.Host.Peerstore().SetProtocols(p, mes.Protocols...)
// mes.ObservedAddr
ids.consumeObservedAddress(mes.GetObservedAddr(), c)
// mes.ListenAddrs
laddrs := mes.GetListenAddrs()
lmaddrs := make([]ma.Multiaddr, 0, len(laddrs))
for _, addr := range laddrs {
maddr, err := ma.NewMultiaddrBytes(addr)
if err != nil {
log.Debugf("%s failed to parse multiaddr from %s %s", ID,
p, c.RemoteMultiaddr())
continue
}
lmaddrs = append(lmaddrs, maddr)
}
2019-06-09 07:24:20 +00:00
// NOTE: Do not add `c.RemoteMultiaddr()` to the peerstore if the remote
// peer doesn't tell us to do so. Otherwise, we'll advertise it.
//
// This can cause an "addr-splosion" issue where the network will slowly
// gossip and collect observed but unadvertised addresses. Given a NAT
// that picks random source ports, this can cause DHT nodes to collect
// many undialable addresses for other peers.
// Extend the TTLs on the known (probably) good addresses.
// Taking the lock ensures that we don't concurrently process a disconnect.
ids.addrMu.Lock()
switch ids.Host.Network().Connectedness(p) {
2019-06-09 07:24:20 +00:00
case network.Connected:
// invalidate previous addrs -- we use a transient ttl instead of 0 to ensure there
// is no period of having no good addrs whatsoever
ids.Host.Peerstore().UpdateAddrs(p, peerstore.ConnectedAddrTTL, transientTTL)
ids.Host.Peerstore().AddAddrs(p, lmaddrs, peerstore.ConnectedAddrTTL)
default:
2019-06-09 07:24:20 +00:00
ids.Host.Peerstore().UpdateAddrs(p, peerstore.ConnectedAddrTTL, transientTTL)
ids.Host.Peerstore().AddAddrs(p, lmaddrs, peerstore.RecentlyConnectedAddrTTL)
}
ids.addrMu.Unlock()
log.Debugf("%s received listen addrs for %s: %s", c.LocalPeer(), c.RemotePeer(), lmaddrs)
// get protocol versions
pv := mes.GetProtocolVersion()
av := mes.GetAgentVersion()
ids.Host.Peerstore().Put(p, "ProtocolVersion", pv)
ids.Host.Peerstore().Put(p, "AgentVersion", av)
// get the key from the other side. we may not have it (no-auth transport)
ids.consumeReceivedPubKey(c, mes.PublicKey)
}
2019-06-09 07:24:20 +00:00
func (ids *IDService) consumeReceivedPubKey(c network.Conn, kb []byte) {
lp := c.LocalPeer()
rp := c.RemotePeer()
if kb == nil {
log.Debugf("%s did not receive public key for remote peer: %s", lp, rp)
return
}
newKey, err := ic.UnmarshalPublicKey(kb)
if err != nil {
2019-06-09 07:24:20 +00:00
log.Warningf("%s cannot unmarshal key from remote peer: %s, %s", lp, rp, err)
return
}
// verify key matches peer.ID
np, err := peer.IDFromPublicKey(newKey)
if err != nil {
log.Debugf("%s cannot get peer.ID from key of remote peer: %s, %s", lp, rp, err)
return
}
if np != rp {
// if the newKey's peer.ID does not match known peer.ID...
if rp == "" && np != "" {
// if local peerid is empty, then use the new, sent key.
err := ids.Host.Peerstore().AddPubKey(rp, newKey)
if err != nil {
log.Debugf("%s could not add key for %s to peerstore: %s", lp, rp, err)
}
} else {
// we have a local peer.ID and it does not match the sent key... error.
log.Errorf("%s received key for remote peer %s mismatch: %s", lp, rp, np)
}
return
}
currKey := ids.Host.Peerstore().PubKey(rp)
if currKey == nil {
// no key? no auth transport. set this one.
err := ids.Host.Peerstore().AddPubKey(rp, newKey)
if err != nil {
log.Debugf("%s could not add key for %s to peerstore: %s", lp, rp, err)
}
return
}
// ok, we have a local key, we should verify they match.
if currKey.Equals(newKey) {
return // ok great. we're done.
}
// weird, got a different key... but the different key MATCHES the peer.ID.
// this odd. let's log error and investigate. this should basically never happen
// and it means we have something funky going on and possibly a bug.
log.Errorf("%s identify got a different key for: %s", lp, rp)
// okay... does ours NOT match the remote peer.ID?
cp, err := peer.IDFromPublicKey(currKey)
if err != nil {
log.Errorf("%s cannot get peer.ID from local key of remote peer: %s, %s", lp, rp, err)
return
}
if cp != rp {
log.Errorf("%s local key for remote peer %s yields different peer.ID: %s", lp, rp, cp)
return
}
// okay... curr key DOES NOT match new key. both match peer.ID. wat?
log.Errorf("%s local key and received key for %s do not match, but match peer.ID", lp, rp)
}
// HasConsistentTransport returns true if the address 'a' shares a
// protocol set with any address in the green set. This is used
// to check if a given address might be one of the addresses a peer is
// listening on.
func HasConsistentTransport(a ma.Multiaddr, green []ma.Multiaddr) bool {
protosMatch := func(a, b []ma.Protocol) bool {
if len(a) != len(b) {
return false
}
for i, p := range a {
if b[i].Code != p.Code {
return false
}
}
return true
}
protos := a.Protocols()
for _, ga := range green {
if protosMatch(protos, ga.Protocols()) {
return true
}
}
return false
}
// IdentifyWait returns a channel which will be closed once
// "ProtocolIdentify" (handshake3) finishes on given conn.
// This happens async so the connection can start to be used
// even if handshake3 knowledge is not necesary.
// Users **MUST** call IdentifyWait _after_ IdentifyConn
2019-06-09 07:24:20 +00:00
func (ids *IDService) IdentifyWait(c network.Conn) <-chan struct{} {
ids.currmu.Lock()
ch, found := ids.currid[c]
ids.currmu.Unlock()
if found {
return ch
}
// if not found, it means we are already done identifying it, or
// haven't even started. either way, return a new channel closed.
ch = make(chan struct{})
close(ch)
return ch
}
2019-06-09 07:24:20 +00:00
func (ids *IDService) consumeObservedAddress(observed []byte, c network.Conn) {
if observed == nil {
return
}
maddr, err := ma.NewMultiaddrBytes(observed)
if err != nil {
log.Debugf("error parsing received observed addr for %s: %s", c, err)
return
}
// we should only use ObservedAddr when our connection's LocalAddr is one
// of our ListenAddrs. If we Dial out using an ephemeral addr, knowing that
// address's external mapping is not very useful because the port will not be
// the same as the listen addr.
ifaceaddrs, err := ids.Host.Network().InterfaceListenAddresses()
if err != nil {
log.Infof("failed to get interface listen addrs", err)
return
}
log.Debugf("identify identifying observed multiaddr: %s %s", c.LocalMultiaddr(), ifaceaddrs)
2019-06-09 07:24:20 +00:00
if !addrInAddrs(c.LocalMultiaddr(), ifaceaddrs) && !addrInAddrs(c.LocalMultiaddr(), ids.Host.Network().ListenAddresses()) {
// not in our list
return
}
2019-06-09 07:24:20 +00:00
if !HasConsistentTransport(maddr, ids.Host.Addrs()) {
log.Debugf("ignoring observed multiaddr that doesn't match the transports of any addresses we're announcing", c.RemoteMultiaddr())
return
}
// ok! we have the observed version of one of our ListenAddresses!
log.Debugf("added own observed listen addr: %s --> %s", c.LocalMultiaddr(), maddr)
2019-06-09 07:24:20 +00:00
ids.observedAddrs.Add(maddr, c.LocalMultiaddr(), c.RemoteMultiaddr(),
c.Stat().Direction)
}
func addrInAddrs(a ma.Multiaddr, as []ma.Multiaddr) bool {
for _, b := range as {
if a.Equal(b) {
return true
}
}
return false
}
// netNotifiee defines methods to be used with the IpfsDHT
type netNotifiee IDService
func (nn *netNotifiee) IDService() *IDService {
return (*IDService)(nn)
}
2019-06-09 07:24:20 +00:00
func (nn *netNotifiee) Connected(n network.Network, v network.Conn) {
// TODO: deprecate the setConnHandler hook, and kick off
// identification here.
}
2019-06-09 07:24:20 +00:00
func (nn *netNotifiee) Disconnected(n network.Network, v network.Conn) {
// undo the setting of addresses to peer.ConnectedAddrTTL we did
ids := nn.IDService()
ids.addrMu.Lock()
defer ids.addrMu.Unlock()
2019-06-09 07:24:20 +00:00
if ids.Host.Network().Connectedness(v.RemotePeer()) != network.Connected {
// Last disconnect.
ps := ids.Host.Peerstore()
2019-06-09 07:24:20 +00:00
ps.UpdateAddrs(v.RemotePeer(), peerstore.ConnectedAddrTTL, peerstore.RecentlyConnectedAddrTTL)
}
}
2019-06-09 07:24:20 +00:00
func (nn *netNotifiee) OpenedStream(n network.Network, v network.Stream) {}
func (nn *netNotifiee) ClosedStream(n network.Network, v network.Stream) {}
func (nn *netNotifiee) Listen(n network.Network, a ma.Multiaddr) {}
func (nn *netNotifiee) ListenClose(n network.Network, a ma.Multiaddr) {}
2019-06-09 07:24:20 +00:00
func logProtocolMismatchDisconnect(c network.Conn, protocol, agent string) {
lm := make(lgbl.DeferredMap)
lm["remotePeer"] = func() interface{} { return c.RemotePeer().Pretty() }
lm["remoteAddr"] = func() interface{} { return c.RemoteMultiaddr().String() }
lm["protocolVersion"] = protocol
lm["agentVersion"] = agent
log.Event(context.TODO(), "IdentifyProtocolMismatch", lm)
log.Debugf("IdentifyProtocolMismatch %s %s %s (disconnected)", c.RemotePeer(), protocol, agent)
}