2023-08-03 16:21:15 +00:00
|
|
|
package peermanager
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2023-09-27 06:46:37 +00:00
|
|
|
"errors"
|
|
|
|
"sync"
|
2023-08-03 16:21:15 +00:00
|
|
|
"time"
|
|
|
|
|
2023-11-07 17:13:19 +00:00
|
|
|
"github.com/ethereum/go-ethereum/p2p/enr"
|
2023-09-27 06:46:37 +00:00
|
|
|
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
2023-09-19 06:05:29 +00:00
|
|
|
"github.com/libp2p/go-libp2p/core/event"
|
2023-08-03 16:21:15 +00:00
|
|
|
"github.com/libp2p/go-libp2p/core/host"
|
2023-08-15 01:27:51 +00:00
|
|
|
"github.com/libp2p/go-libp2p/core/network"
|
2023-08-03 16:21:15 +00:00
|
|
|
"github.com/libp2p/go-libp2p/core/peer"
|
2023-08-10 12:58:22 +00:00
|
|
|
"github.com/libp2p/go-libp2p/core/peerstore"
|
2023-08-03 16:21:15 +00:00
|
|
|
"github.com/libp2p/go-libp2p/core/protocol"
|
2023-08-10 12:58:22 +00:00
|
|
|
ma "github.com/multiformats/go-multiaddr"
|
|
|
|
"github.com/waku-org/go-waku/logging"
|
2023-11-07 17:13:19 +00:00
|
|
|
"github.com/waku-org/go-waku/waku/v2/discv5"
|
2023-08-03 16:21:15 +00:00
|
|
|
wps "github.com/waku-org/go-waku/waku/v2/peerstore"
|
2023-09-27 06:46:37 +00:00
|
|
|
waku_proto "github.com/waku-org/go-waku/waku/v2/protocol"
|
2023-09-14 15:00:06 +00:00
|
|
|
wenr "github.com/waku-org/go-waku/waku/v2/protocol/enr"
|
2024-03-14 14:21:47 +00:00
|
|
|
"github.com/waku-org/go-waku/waku/v2/protocol/metadata"
|
2023-09-14 08:36:08 +00:00
|
|
|
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
2023-11-07 17:13:19 +00:00
|
|
|
"github.com/waku-org/go-waku/waku/v2/service"
|
2023-08-03 16:21:15 +00:00
|
|
|
|
|
|
|
"go.uber.org/zap"
|
|
|
|
)
|
|
|
|
|
2024-02-08 09:54:58 +00:00
|
|
|
type TopicHealth int
|
|
|
|
|
|
|
|
const (
|
|
|
|
UnHealthy = iota
|
|
|
|
MinimallyHealthy = 1
|
|
|
|
SufficientlyHealthy = 2
|
|
|
|
)
|
|
|
|
|
|
|
|
func (t TopicHealth) String() string {
|
|
|
|
switch t {
|
|
|
|
case UnHealthy:
|
|
|
|
return "UnHealthy"
|
|
|
|
case MinimallyHealthy:
|
|
|
|
return "MinimallyHealthy"
|
|
|
|
case SufficientlyHealthy:
|
|
|
|
return "SufficientlyHealthy"
|
|
|
|
default:
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type TopicHealthStatus struct {
|
|
|
|
Topic string
|
|
|
|
Health TopicHealth
|
|
|
|
}
|
|
|
|
|
2023-09-27 06:46:37 +00:00
|
|
|
// NodeTopicDetails stores pubSubTopic related data like topicHandle for the node.
|
|
|
|
type NodeTopicDetails struct {
|
2024-02-08 09:54:58 +00:00
|
|
|
topic *pubsub.Topic
|
|
|
|
healthStatus TopicHealth
|
2023-09-27 06:46:37 +00:00
|
|
|
}
|
|
|
|
|
2023-11-07 17:13:19 +00:00
|
|
|
// WakuProtoInfo holds protocol specific info
|
|
|
|
// To be used at a later stage to set various config such as criteria for peer management specific to each Waku protocols
|
|
|
|
// This should make peer-manager agnostic to protocol
|
|
|
|
type WakuProtoInfo struct {
|
|
|
|
waku2ENRBitField uint8
|
|
|
|
}
|
|
|
|
|
2023-08-03 16:21:15 +00:00
|
|
|
// PeerManager applies various controls and manage connections towards peers.
|
|
|
|
type PeerManager struct {
|
2023-11-07 17:13:19 +00:00
|
|
|
peerConnector *PeerConnectionStrategy
|
2024-03-14 14:21:47 +00:00
|
|
|
metadata *metadata.WakuMetadata
|
2023-11-07 17:13:19 +00:00
|
|
|
maxPeers int
|
|
|
|
maxRelayPeers int
|
|
|
|
logger *zap.Logger
|
|
|
|
InRelayPeersTarget int
|
|
|
|
OutRelayPeersTarget int
|
|
|
|
host host.Host
|
|
|
|
serviceSlots *ServiceSlots
|
|
|
|
ctx context.Context
|
|
|
|
sub event.Subscription
|
|
|
|
topicMutex sync.RWMutex
|
|
|
|
subRelayTopics map[string]*NodeTopicDetails
|
|
|
|
discoveryService *discv5.DiscoveryV5
|
|
|
|
wakuprotoToENRFieldMap map[protocol.ID]WakuProtoInfo
|
2024-02-08 09:54:58 +00:00
|
|
|
TopicHealthNotifCh chan<- TopicHealthStatus
|
2024-05-13 18:56:34 +00:00
|
|
|
rttCache *FastestPeerSelector
|
2023-08-03 16:21:15 +00:00
|
|
|
}
|
|
|
|
|
2023-10-16 16:42:01 +00:00
|
|
|
// PeerSelection provides various options based on which Peer is selected from a list of peers.
|
|
|
|
type PeerSelection int
|
|
|
|
|
|
|
|
const (
|
|
|
|
Automatic PeerSelection = iota
|
|
|
|
LowestRTT
|
|
|
|
)
|
|
|
|
|
|
|
|
// ErrNoPeersAvailable is emitted when no suitable peers are found for
|
|
|
|
// some protocol
|
|
|
|
var ErrNoPeersAvailable = errors.New("no suitable peers found")
|
|
|
|
|
2023-08-03 16:21:15 +00:00
|
|
|
const peerConnectivityLoopSecs = 15
|
2023-09-27 06:46:37 +00:00
|
|
|
const maxConnsToPeerRatio = 5
|
2023-08-30 11:57:22 +00:00
|
|
|
|
|
|
|
// 80% relay peers 20% service peers
|
|
|
|
func relayAndServicePeers(maxConnections int) (int, int) {
|
|
|
|
return maxConnections - maxConnections/5, maxConnections / 5
|
|
|
|
}
|
|
|
|
|
|
|
|
// 66% inRelayPeers 33% outRelayPeers
|
|
|
|
func inAndOutRelayPeers(relayPeers int) (int, int) {
|
|
|
|
outRelayPeers := relayPeers / 3
|
|
|
|
//
|
|
|
|
const minOutRelayConns = 10
|
|
|
|
if outRelayPeers < minOutRelayConns {
|
|
|
|
outRelayPeers = minOutRelayConns
|
|
|
|
}
|
|
|
|
return relayPeers - outRelayPeers, outRelayPeers
|
|
|
|
}
|
2023-08-03 16:21:15 +00:00
|
|
|
|
2024-02-08 09:54:58 +00:00
|
|
|
// checkAndUpdateTopicHealth finds health of specified topic and updates and notifies of the same.
|
|
|
|
// Also returns the healthyPeerCount
|
|
|
|
func (pm *PeerManager) checkAndUpdateTopicHealth(topic *NodeTopicDetails) int {
|
|
|
|
healthyPeerCount := 0
|
|
|
|
for _, p := range topic.topic.ListPeers() {
|
|
|
|
if pm.host.Network().Connectedness(p) == network.Connected {
|
|
|
|
pThreshold, err := pm.host.Peerstore().(wps.WakuPeerstore).Score(p)
|
|
|
|
if err == nil {
|
|
|
|
if pThreshold < relay.PeerPublishThreshold {
|
|
|
|
pm.logger.Debug("peer score below publish threshold", logging.HostID("peer", p), zap.Float64("score", pThreshold))
|
|
|
|
} else {
|
|
|
|
healthyPeerCount++
|
|
|
|
}
|
|
|
|
} else {
|
2024-06-05 19:03:33 +00:00
|
|
|
if errors.Is(err, peerstore.ErrNotFound) {
|
|
|
|
// For now considering peer as healthy if we can't fetch score.
|
|
|
|
healthyPeerCount++
|
|
|
|
pm.logger.Debug("peer score is not available yet", logging.HostID("peer", p))
|
|
|
|
} else {
|
|
|
|
pm.logger.Warn("failed to fetch peer score ", zap.Error(err), logging.HostID("peer", p))
|
|
|
|
}
|
2024-02-08 09:54:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
//Update topic's health
|
|
|
|
oldHealth := topic.healthStatus
|
|
|
|
if healthyPeerCount < 1 { //Ideally this check should be done with minPeersForRelay, but leaving it as is for now.
|
|
|
|
topic.healthStatus = UnHealthy
|
|
|
|
} else if healthyPeerCount < waku_proto.GossipSubDMin {
|
|
|
|
topic.healthStatus = MinimallyHealthy
|
|
|
|
} else {
|
|
|
|
topic.healthStatus = SufficientlyHealthy
|
|
|
|
}
|
|
|
|
|
|
|
|
if oldHealth != topic.healthStatus {
|
|
|
|
//Check old health, and if there is a change notify of the same.
|
|
|
|
pm.logger.Debug("topic health has changed", zap.String("pubsubtopic", topic.topic.String()), zap.Stringer("health", topic.healthStatus))
|
|
|
|
pm.TopicHealthNotifCh <- TopicHealthStatus{topic.topic.String(), topic.healthStatus}
|
|
|
|
}
|
|
|
|
return healthyPeerCount
|
|
|
|
}
|
|
|
|
|
|
|
|
// TopicHealth can be used to fetch health of a specific pubsubTopic.
|
|
|
|
// Returns error if topic is not found.
|
|
|
|
func (pm *PeerManager) TopicHealth(pubsubTopic string) (TopicHealth, error) {
|
|
|
|
pm.topicMutex.RLock()
|
|
|
|
defer pm.topicMutex.RUnlock()
|
|
|
|
|
|
|
|
topicDetails, ok := pm.subRelayTopics[pubsubTopic]
|
|
|
|
if !ok {
|
|
|
|
return UnHealthy, errors.New("topic not found")
|
|
|
|
}
|
|
|
|
return topicDetails.healthStatus, nil
|
|
|
|
}
|
|
|
|
|
2023-08-03 16:21:15 +00:00
|
|
|
// NewPeerManager creates a new peerManager instance.
|
2024-03-14 14:21:47 +00:00
|
|
|
func NewPeerManager(maxConnections int, maxPeers int, metadata *metadata.WakuMetadata, logger *zap.Logger) *PeerManager {
|
2023-08-03 16:21:15 +00:00
|
|
|
|
2023-08-30 11:57:22 +00:00
|
|
|
maxRelayPeers, _ := relayAndServicePeers(maxConnections)
|
|
|
|
inRelayPeersTarget, outRelayPeersTarget := inAndOutRelayPeers(maxRelayPeers)
|
2023-08-03 16:21:15 +00:00
|
|
|
|
2023-09-27 06:46:37 +00:00
|
|
|
if maxPeers == 0 || maxConnections > maxPeers {
|
|
|
|
maxPeers = maxConnsToPeerRatio * maxConnections
|
|
|
|
}
|
|
|
|
|
2023-08-03 16:21:15 +00:00
|
|
|
pm := &PeerManager{
|
2023-11-07 17:13:19 +00:00
|
|
|
logger: logger.Named("peer-manager"),
|
2024-03-14 14:21:47 +00:00
|
|
|
metadata: metadata,
|
2023-11-07 17:13:19 +00:00
|
|
|
maxRelayPeers: maxRelayPeers,
|
|
|
|
InRelayPeersTarget: inRelayPeersTarget,
|
|
|
|
OutRelayPeersTarget: outRelayPeersTarget,
|
|
|
|
serviceSlots: NewServiceSlot(),
|
|
|
|
subRelayTopics: make(map[string]*NodeTopicDetails),
|
|
|
|
maxPeers: maxPeers,
|
|
|
|
wakuprotoToENRFieldMap: map[protocol.ID]WakuProtoInfo{},
|
2024-05-13 18:56:34 +00:00
|
|
|
rttCache: NewFastestPeerSelector(logger),
|
2023-08-03 16:21:15 +00:00
|
|
|
}
|
2023-08-15 01:27:51 +00:00
|
|
|
logger.Info("PeerManager init values", zap.Int("maxConnections", maxConnections),
|
2023-08-30 11:57:22 +00:00
|
|
|
zap.Int("maxRelayPeers", maxRelayPeers),
|
|
|
|
zap.Int("outRelayPeersTarget", outRelayPeersTarget),
|
2023-09-27 06:46:37 +00:00
|
|
|
zap.Int("inRelayPeersTarget", pm.InRelayPeersTarget),
|
|
|
|
zap.Int("maxPeers", maxPeers))
|
2023-08-03 16:21:15 +00:00
|
|
|
|
|
|
|
return pm
|
|
|
|
}
|
|
|
|
|
2023-11-07 17:13:19 +00:00
|
|
|
// SetDiscv5 sets the discoveryv5 service to be used for peer discovery.
|
|
|
|
func (pm *PeerManager) SetDiscv5(discv5 *discv5.DiscoveryV5) {
|
|
|
|
pm.discoveryService = discv5
|
|
|
|
}
|
|
|
|
|
2023-08-15 01:27:51 +00:00
|
|
|
// SetHost sets the host to be used in order to access the peerStore.
|
2023-08-03 16:21:15 +00:00
|
|
|
func (pm *PeerManager) SetHost(host host.Host) {
|
|
|
|
pm.host = host
|
2024-05-13 18:56:34 +00:00
|
|
|
pm.rttCache.SetHost(host)
|
2023-08-03 16:21:15 +00:00
|
|
|
}
|
|
|
|
|
2023-08-15 01:27:51 +00:00
|
|
|
// SetPeerConnector sets the peer connector to be used for establishing relay connections.
|
|
|
|
func (pm *PeerManager) SetPeerConnector(pc *PeerConnectionStrategy) {
|
|
|
|
pm.peerConnector = pc
|
|
|
|
}
|
|
|
|
|
2023-08-03 16:21:15 +00:00
|
|
|
// Start starts the processing to be done by peer manager.
|
|
|
|
func (pm *PeerManager) Start(ctx context.Context) {
|
2023-11-07 17:13:19 +00:00
|
|
|
pm.RegisterWakuProtocol(relay.WakuRelayID_v200, relay.WakuRelayENRField)
|
|
|
|
|
2023-08-15 01:27:51 +00:00
|
|
|
pm.ctx = ctx
|
2023-09-19 06:05:29 +00:00
|
|
|
if pm.sub != nil {
|
|
|
|
go pm.peerEventLoop(ctx)
|
|
|
|
}
|
2023-08-03 16:21:15 +00:00
|
|
|
go pm.connectivityLoop(ctx)
|
|
|
|
}
|
|
|
|
|
|
|
|
// This is a connectivity loop, which currently checks and prunes inbound connections.
|
|
|
|
func (pm *PeerManager) connectivityLoop(ctx context.Context) {
|
2023-09-27 06:46:37 +00:00
|
|
|
pm.connectToRelayPeers()
|
2023-08-03 16:21:15 +00:00
|
|
|
t := time.NewTicker(peerConnectivityLoopSecs * time.Second)
|
2023-08-15 01:27:51 +00:00
|
|
|
defer t.Stop()
|
2023-08-03 16:21:15 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
case <-t.C:
|
2023-08-15 01:27:51 +00:00
|
|
|
pm.connectToRelayPeers()
|
2023-08-03 16:21:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-15 01:27:51 +00:00
|
|
|
// GroupPeersByDirection returns all the connected peers in peer store grouped by Inbound or outBound direction
|
2023-09-29 05:13:25 +00:00
|
|
|
func (pm *PeerManager) GroupPeersByDirection(specificPeers ...peer.ID) (inPeers peer.IDSlice, outPeers peer.IDSlice, err error) {
|
2023-09-27 06:46:37 +00:00
|
|
|
if len(specificPeers) == 0 {
|
|
|
|
specificPeers = pm.host.Network().Peers()
|
|
|
|
}
|
2023-08-15 01:27:51 +00:00
|
|
|
|
2023-09-27 06:46:37 +00:00
|
|
|
for _, p := range specificPeers {
|
2023-08-15 01:27:51 +00:00
|
|
|
direction, err := pm.host.Peerstore().(wps.WakuPeerstore).Direction(p)
|
|
|
|
if err == nil {
|
|
|
|
if direction == network.DirInbound {
|
|
|
|
inPeers = append(inPeers, p)
|
|
|
|
} else if direction == network.DirOutbound {
|
|
|
|
outPeers = append(outPeers, p)
|
|
|
|
}
|
|
|
|
} else {
|
2023-11-15 14:39:09 +00:00
|
|
|
pm.logger.Error("failed to retrieve peer direction",
|
2023-08-15 01:27:51 +00:00
|
|
|
logging.HostID("peerID", p), zap.Error(err))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return inPeers, outPeers, nil
|
|
|
|
}
|
2023-08-03 16:21:15 +00:00
|
|
|
|
2023-09-27 06:46:37 +00:00
|
|
|
// getRelayPeers - Returns list of in and out peers supporting WakuRelayProtocol within specifiedPeers.
|
|
|
|
// If specifiedPeers is empty, it checks within all peers in peerStore.
|
2023-09-29 05:13:25 +00:00
|
|
|
func (pm *PeerManager) getRelayPeers(specificPeers ...peer.ID) (inRelayPeers peer.IDSlice, outRelayPeers peer.IDSlice) {
|
2023-08-03 16:21:15 +00:00
|
|
|
//Group peers by their connected direction inbound or outbound.
|
2023-09-29 05:13:25 +00:00
|
|
|
inPeers, outPeers, err := pm.GroupPeersByDirection(specificPeers...)
|
2023-08-03 16:21:15 +00:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2023-11-15 14:39:09 +00:00
|
|
|
pm.logger.Debug("number of peers connected", zap.Int("inPeers", inPeers.Len()),
|
2023-08-15 01:27:51 +00:00
|
|
|
zap.Int("outPeers", outPeers.Len()))
|
2023-08-03 16:21:15 +00:00
|
|
|
|
|
|
|
//Need to filter peers to check if they support relay
|
2023-08-17 12:56:20 +00:00
|
|
|
if inPeers.Len() != 0 {
|
2024-05-22 06:15:53 +00:00
|
|
|
inRelayPeers, _ = pm.FilterPeersByProto(inPeers, nil, relay.WakuRelayID_v200)
|
2023-08-17 12:56:20 +00:00
|
|
|
}
|
|
|
|
if outPeers.Len() != 0 {
|
2024-05-22 06:15:53 +00:00
|
|
|
outRelayPeers, _ = pm.FilterPeersByProto(outPeers, nil, relay.WakuRelayID_v200)
|
2023-08-17 12:56:20 +00:00
|
|
|
}
|
2023-08-15 01:27:51 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-09-27 06:46:37 +00:00
|
|
|
// ensureMinRelayConnsPerTopic makes sure there are min of D conns per pubsubTopic.
|
|
|
|
// If not it will look into peerStore to initiate more connections.
|
|
|
|
// If peerStore doesn't have enough peers, will wait for discv5 to find more and try in next cycle
|
|
|
|
func (pm *PeerManager) ensureMinRelayConnsPerTopic() {
|
|
|
|
pm.topicMutex.RLock()
|
|
|
|
defer pm.topicMutex.RUnlock()
|
|
|
|
for topicStr, topicInst := range pm.subRelayTopics {
|
2023-12-06 13:38:56 +00:00
|
|
|
|
|
|
|
// @cammellos reported that ListPeers returned an invalid number of
|
|
|
|
// peers. This will ensure that the peers returned by this function
|
|
|
|
// match those peers that are currently connected
|
2024-02-08 09:54:58 +00:00
|
|
|
|
|
|
|
curPeerLen := pm.checkAndUpdateTopicHealth(topicInst)
|
|
|
|
if curPeerLen < waku_proto.GossipSubDMin {
|
|
|
|
pm.logger.Debug("subscribed topic is not sufficiently healthy, initiating more connections to maintain health",
|
2023-09-27 06:46:37 +00:00
|
|
|
zap.String("pubSubTopic", topicStr), zap.Int("connectedPeerCount", curPeerLen),
|
2024-02-08 09:54:58 +00:00
|
|
|
zap.Int("optimumPeers", waku_proto.GossipSubDMin))
|
2023-09-27 06:46:37 +00:00
|
|
|
//Find not connected peers.
|
|
|
|
notConnectedPeers := pm.getNotConnectedPers(topicStr)
|
|
|
|
if notConnectedPeers.Len() == 0 {
|
2023-11-15 14:39:09 +00:00
|
|
|
pm.logger.Debug("could not find any peers in peerstore to connect to, discovering more", zap.String("pubSubTopic", topicStr))
|
2024-04-15 12:33:23 +00:00
|
|
|
go pm.discoverPeersByPubsubTopics([]string{topicStr}, relay.WakuRelayID_v200, pm.ctx, 2)
|
2023-09-27 06:46:37 +00:00
|
|
|
continue
|
|
|
|
}
|
2023-11-15 14:39:09 +00:00
|
|
|
pm.logger.Debug("connecting to eligible peers in peerstore", zap.String("pubSubTopic", topicStr))
|
2023-09-27 06:46:37 +00:00
|
|
|
//Connect to eligible peers.
|
2024-02-08 09:54:58 +00:00
|
|
|
numPeersToConnect := waku_proto.GossipSubDMin - curPeerLen
|
2023-09-27 06:46:37 +00:00
|
|
|
|
|
|
|
if numPeersToConnect > notConnectedPeers.Len() {
|
|
|
|
numPeersToConnect = notConnectedPeers.Len()
|
|
|
|
}
|
|
|
|
pm.connectToPeers(notConnectedPeers[0:numPeersToConnect])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2023-08-15 01:27:51 +00:00
|
|
|
|
2023-09-27 06:46:37 +00:00
|
|
|
// connectToRelayPeers ensures minimum D connections are there for each pubSubTopic.
|
|
|
|
// If not, initiates connections to additional peers.
|
|
|
|
// It also checks for incoming relay connections and prunes once they cross inRelayTarget
|
|
|
|
func (pm *PeerManager) connectToRelayPeers() {
|
2023-08-15 01:27:51 +00:00
|
|
|
//Check for out peer connections and connect to more peers.
|
2023-09-27 06:46:37 +00:00
|
|
|
pm.ensureMinRelayConnsPerTopic()
|
|
|
|
|
2023-09-29 05:13:25 +00:00
|
|
|
inRelayPeers, outRelayPeers := pm.getRelayPeers()
|
2023-11-15 14:39:09 +00:00
|
|
|
pm.logger.Debug("number of relay peers connected",
|
2023-09-27 06:46:37 +00:00
|
|
|
zap.Int("in", inRelayPeers.Len()),
|
|
|
|
zap.Int("out", outRelayPeers.Len()))
|
2023-08-15 01:27:51 +00:00
|
|
|
if inRelayPeers.Len() > 0 &&
|
|
|
|
inRelayPeers.Len() > pm.InRelayPeersTarget {
|
2023-08-28 06:47:48 +00:00
|
|
|
pm.pruneInRelayConns(inRelayPeers)
|
2023-08-15 01:27:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-09-27 06:46:37 +00:00
|
|
|
// connectToPeers connects to peers provided in the list if the addresses have not expired.
|
2023-08-15 01:27:51 +00:00
|
|
|
func (pm *PeerManager) connectToPeers(peers peer.IDSlice) {
|
|
|
|
for _, peerID := range peers {
|
2023-11-15 14:39:09 +00:00
|
|
|
peerData := AddrInfoToPeerData(wps.PeerManager, peerID, pm.host)
|
2023-09-27 06:46:37 +00:00
|
|
|
if peerData == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
pm.peerConnector.PushToChan(*peerData)
|
2023-08-15 01:27:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-09-27 06:46:37 +00:00
|
|
|
// getNotConnectedPers returns peers for a pubSubTopic that are not connected.
|
|
|
|
func (pm *PeerManager) getNotConnectedPers(pubsubTopic string) (notConnectedPeers peer.IDSlice) {
|
|
|
|
var peerList peer.IDSlice
|
|
|
|
if pubsubTopic == "" {
|
|
|
|
peerList = pm.host.Peerstore().Peers()
|
|
|
|
} else {
|
|
|
|
peerList = pm.host.Peerstore().(*wps.WakuPeerstoreImpl).PeersByPubSubTopic(pubsubTopic)
|
|
|
|
}
|
|
|
|
for _, peerID := range peerList {
|
2023-08-15 01:27:51 +00:00
|
|
|
if pm.host.Network().Connectedness(peerID) != network.Connected {
|
|
|
|
notConnectedPeers = append(notConnectedPeers, peerID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-09-27 06:46:37 +00:00
|
|
|
// pruneInRelayConns prune any incoming relay connections crossing derived inrelayPeerTarget
|
2023-08-28 06:47:48 +00:00
|
|
|
func (pm *PeerManager) pruneInRelayConns(inRelayPeers peer.IDSlice) {
|
2023-08-15 01:27:51 +00:00
|
|
|
|
|
|
|
//Start disconnecting peers, based on what?
|
2023-09-27 06:46:37 +00:00
|
|
|
//For now no preference is used
|
2023-08-15 01:27:51 +00:00
|
|
|
//TODO: Need to have more intelligent way of doing this, maybe peer scores.
|
2023-09-27 06:46:37 +00:00
|
|
|
//TODO: Keep optimalPeersRequired for a pubSubTopic in mind while pruning connections to peers.
|
|
|
|
pm.logger.Info("peer connections exceed target relay peers, hence pruning",
|
|
|
|
zap.Int("cnt", inRelayPeers.Len()), zap.Int("target", pm.InRelayPeersTarget))
|
2023-08-15 01:27:51 +00:00
|
|
|
for pruningStartIndex := pm.InRelayPeersTarget; pruningStartIndex < inRelayPeers.Len(); pruningStartIndex++ {
|
|
|
|
p := inRelayPeers[pruningStartIndex]
|
|
|
|
err := pm.host.Network().ClosePeer(p)
|
|
|
|
if err != nil {
|
2023-11-15 14:39:09 +00:00
|
|
|
pm.logger.Warn("failed to disconnect connection towards peer",
|
2023-08-15 01:27:51 +00:00
|
|
|
logging.HostID("peerID", p))
|
2023-08-03 16:21:15 +00:00
|
|
|
}
|
2023-11-15 14:39:09 +00:00
|
|
|
pm.logger.Debug("successfully disconnected connection towards peer",
|
2023-08-15 01:27:51 +00:00
|
|
|
logging.HostID("peerID", p))
|
2023-08-03 16:21:15 +00:00
|
|
|
}
|
|
|
|
}
|
2023-08-10 12:58:22 +00:00
|
|
|
|
2023-11-07 17:13:19 +00:00
|
|
|
func (pm *PeerManager) processPeerENR(p *service.PeerData) []protocol.ID {
|
|
|
|
shards, err := wenr.RelaySharding(p.ENR.Record())
|
|
|
|
if err != nil {
|
|
|
|
pm.logger.Error("could not derive relayShards from ENR", zap.Error(err),
|
|
|
|
logging.HostID("peer", p.AddrInfo.ID), zap.String("enr", p.ENR.String()))
|
|
|
|
} else {
|
|
|
|
if shards != nil {
|
2023-11-13 22:52:46 +00:00
|
|
|
p.PubsubTopics = make([]string, 0)
|
2023-11-07 17:13:19 +00:00
|
|
|
topics := shards.Topics()
|
|
|
|
for _, topic := range topics {
|
|
|
|
topicStr := topic.String()
|
2023-11-13 22:52:46 +00:00
|
|
|
p.PubsubTopics = append(p.PubsubTopics, topicStr)
|
2023-11-07 17:13:19 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
pm.logger.Debug("ENR doesn't have relay shards", logging.HostID("peer", p.AddrInfo.ID))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
supportedProtos := []protocol.ID{}
|
|
|
|
//Identify and specify protocols supported by the peer based on the discovered peer's ENR
|
|
|
|
var enrField wenr.WakuEnrBitfield
|
|
|
|
if err := p.ENR.Record().Load(enr.WithEntry(wenr.WakuENRField, &enrField)); err == nil {
|
|
|
|
for proto, protoENR := range pm.wakuprotoToENRFieldMap {
|
|
|
|
protoENRField := protoENR.waku2ENRBitField
|
|
|
|
if protoENRField&enrField != 0 {
|
|
|
|
supportedProtos = append(supportedProtos, proto)
|
|
|
|
//Add Service peers to serviceSlots.
|
|
|
|
pm.addPeerToServiceSlot(proto, p.AddrInfo.ID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return supportedProtos
|
|
|
|
}
|
|
|
|
|
2023-08-10 12:58:22 +00:00
|
|
|
// AddDiscoveredPeer to add dynamically discovered peers.
|
|
|
|
// Note that these peers will not be set in service-slots.
|
2023-11-07 17:13:19 +00:00
|
|
|
func (pm *PeerManager) AddDiscoveredPeer(p service.PeerData, connectNow bool) {
|
2023-09-27 06:46:37 +00:00
|
|
|
//Doing this check again inside addPeer, in order to avoid additional complexity of rollingBack other changes.
|
|
|
|
if pm.maxPeers <= pm.host.Peerstore().Peers().Len() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
//Check if the peer is already present, if so skip adding
|
|
|
|
_, err := pm.host.Peerstore().(wps.WakuPeerstore).Origin(p.AddrInfo.ID)
|
|
|
|
if err == nil {
|
2024-06-18 02:36:16 +00:00
|
|
|
//Add addresses if existing addresses have expired
|
|
|
|
existingAddrs := pm.host.Peerstore().Addrs(p.AddrInfo.ID)
|
|
|
|
if len(existingAddrs) == 0 {
|
|
|
|
pm.host.Peerstore().AddAddrs(p.AddrInfo.ID, p.AddrInfo.Addrs, peerstore.AddressTTL)
|
|
|
|
}
|
2023-12-06 13:12:48 +00:00
|
|
|
enr, err := pm.host.Peerstore().(wps.WakuPeerstore).ENR(p.AddrInfo.ID)
|
|
|
|
// Verifying if the enr record is more recent (DiscV5 and peer exchange can return peers already seen)
|
2024-05-28 12:50:47 +00:00
|
|
|
if err == nil {
|
|
|
|
if p.ENR != nil {
|
|
|
|
if enr.Record().Seq() >= p.ENR.Seq() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
//Peer is already in peer-store but stored ENR is older than discovered one.
|
|
|
|
pm.logger.Info("peer already found in peerstore, but re-adding it as ENR sequence is higher than locally stored",
|
|
|
|
logging.HostID("peer", p.AddrInfo.ID), zap.Uint64("newENRSeq", p.ENR.Seq()), zap.Uint64("storedENRSeq", enr.Record().Seq()))
|
|
|
|
} else {
|
|
|
|
pm.logger.Info("peer already found in peerstore, but no new ENR", logging.HostID("peer", p.AddrInfo.ID))
|
|
|
|
}
|
2024-01-04 14:33:42 +00:00
|
|
|
} else {
|
2024-05-28 12:50:47 +00:00
|
|
|
//Peer is in peer-store but it doesn't have an enr
|
|
|
|
pm.logger.Info("peer already found in peerstore, but doesn't have an ENR record, re-adding",
|
|
|
|
logging.HostID("peer", p.AddrInfo.ID))
|
2024-01-04 14:33:42 +00:00
|
|
|
}
|
2023-09-27 06:46:37 +00:00
|
|
|
}
|
2023-12-06 13:12:48 +00:00
|
|
|
|
2023-11-07 17:13:19 +00:00
|
|
|
supportedProtos := []protocol.ID{}
|
2023-11-13 22:52:46 +00:00
|
|
|
if len(p.PubsubTopics) == 0 && p.ENR != nil {
|
2023-11-07 17:13:19 +00:00
|
|
|
// Try to fetch shard info and supported protocols from ENR to arrive at pubSub topics.
|
|
|
|
supportedProtos = pm.processPeerENR(&p)
|
2023-09-14 15:00:06 +00:00
|
|
|
}
|
2023-08-10 12:58:22 +00:00
|
|
|
|
2023-11-13 22:52:46 +00:00
|
|
|
_ = pm.addPeer(p.AddrInfo.ID, p.AddrInfo.Addrs, p.Origin, p.PubsubTopics, supportedProtos...)
|
2023-08-10 12:58:22 +00:00
|
|
|
|
|
|
|
if p.ENR != nil {
|
|
|
|
err := pm.host.Peerstore().(wps.WakuPeerstore).SetENR(p.AddrInfo.ID, p.ENR)
|
|
|
|
if err != nil {
|
2023-08-15 01:27:51 +00:00
|
|
|
pm.logger.Error("could not store enr", zap.Error(err),
|
|
|
|
logging.HostID("peer", p.AddrInfo.ID), zap.String("enr", p.ENR.String()))
|
2023-08-10 12:58:22 +00:00
|
|
|
}
|
|
|
|
}
|
2023-09-27 06:46:37 +00:00
|
|
|
if connectNow {
|
2023-11-15 14:39:09 +00:00
|
|
|
pm.logger.Debug("connecting now to discovered peer", logging.HostID("peer", p.AddrInfo.ID))
|
2023-10-26 01:25:56 +00:00
|
|
|
go pm.peerConnector.PushToChan(p)
|
2023-09-27 06:46:37 +00:00
|
|
|
}
|
2023-08-10 12:58:22 +00:00
|
|
|
}
|
|
|
|
|
2024-06-18 02:36:16 +00:00
|
|
|
// addPeer adds peer to the peerStore.
|
|
|
|
// It also sets additional metadata such as origin and supported protocols
|
2023-09-14 15:00:06 +00:00
|
|
|
func (pm *PeerManager) addPeer(ID peer.ID, addrs []ma.Multiaddr, origin wps.Origin, pubSubTopics []string, protocols ...protocol.ID) error {
|
2023-09-27 06:46:37 +00:00
|
|
|
if pm.maxPeers <= pm.host.Peerstore().Peers().Len() {
|
2023-11-15 14:39:09 +00:00
|
|
|
pm.logger.Error("could not add peer as peer store capacity is reached", logging.HostID("peer", ID), zap.Int("capacity", pm.maxPeers))
|
2023-09-27 06:46:37 +00:00
|
|
|
return errors.New("peer store capacity reached")
|
|
|
|
}
|
2023-08-10 12:58:22 +00:00
|
|
|
pm.logger.Info("adding peer to peerstore", logging.HostID("peer", ID))
|
2023-09-27 06:46:37 +00:00
|
|
|
if origin == wps.Static {
|
|
|
|
pm.host.Peerstore().AddAddrs(ID, addrs, peerstore.PermanentAddrTTL)
|
|
|
|
} else {
|
|
|
|
//Need to re-evaluate the address expiry
|
|
|
|
// For now expiring them with default addressTTL which is an hour.
|
|
|
|
pm.host.Peerstore().AddAddrs(ID, addrs, peerstore.AddressTTL)
|
|
|
|
}
|
2023-08-10 12:58:22 +00:00
|
|
|
err := pm.host.Peerstore().(wps.WakuPeerstore).SetOrigin(ID, origin)
|
|
|
|
if err != nil {
|
|
|
|
pm.logger.Error("could not set origin", zap.Error(err), logging.HostID("peer", ID))
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(protocols) > 0 {
|
|
|
|
err = pm.host.Peerstore().AddProtocols(ID, protocols...)
|
|
|
|
if err != nil {
|
2023-11-15 14:39:09 +00:00
|
|
|
pm.logger.Error("could not set protocols", zap.Error(err), logging.HostID("peer", ID))
|
2023-08-10 12:58:22 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2023-09-14 15:00:06 +00:00
|
|
|
if len(pubSubTopics) == 0 {
|
|
|
|
// Probably the peer is discovered via DNSDiscovery (for which we don't have pubSubTopic info)
|
|
|
|
//If pubSubTopic and enr is empty or no shard info in ENR,then set to defaultPubSubTopic
|
|
|
|
pubSubTopics = []string{relay.DefaultWakuTopic}
|
|
|
|
}
|
|
|
|
err = pm.host.Peerstore().(wps.WakuPeerstore).SetPubSubTopics(ID, pubSubTopics)
|
|
|
|
if err != nil {
|
|
|
|
pm.logger.Error("could not store pubSubTopic", zap.Error(err),
|
|
|
|
logging.HostID("peer", ID), zap.Strings("topics", pubSubTopics))
|
|
|
|
}
|
2023-08-10 12:58:22 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-11-13 22:52:46 +00:00
|
|
|
func AddrInfoToPeerData(origin wps.Origin, peerID peer.ID, host host.Host, pubsubTopics ...string) *service.PeerData {
|
|
|
|
addrs := host.Peerstore().Addrs(peerID)
|
|
|
|
if len(addrs) == 0 {
|
|
|
|
//Addresses expired, remove peer from peerStore
|
|
|
|
host.Peerstore().RemovePeer(peerID)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return &service.PeerData{
|
|
|
|
Origin: origin,
|
|
|
|
AddrInfo: peer.AddrInfo{
|
|
|
|
ID: peerID,
|
|
|
|
Addrs: addrs,
|
|
|
|
},
|
|
|
|
PubsubTopics: pubsubTopics,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-10 12:58:22 +00:00
|
|
|
// AddPeer adds peer to the peerStore and also to service slots
|
2023-11-13 22:52:46 +00:00
|
|
|
func (pm *PeerManager) AddPeer(address ma.Multiaddr, origin wps.Origin, pubsubTopics []string, protocols ...protocol.ID) (*service.PeerData, error) {
|
2023-08-10 12:58:22 +00:00
|
|
|
//Assuming all addresses have peerId
|
|
|
|
info, err := peer.AddrInfoFromP2pAddr(address)
|
|
|
|
if err != nil {
|
2023-11-13 22:52:46 +00:00
|
|
|
return nil, err
|
2023-08-10 12:58:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
//Add Service peers to serviceSlots.
|
|
|
|
for _, proto := range protocols {
|
2023-08-30 14:33:57 +00:00
|
|
|
pm.addPeerToServiceSlot(proto, info.ID)
|
2023-08-10 12:58:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
//Add to the peer-store
|
2023-11-13 22:52:46 +00:00
|
|
|
err = pm.addPeer(info.ID, info.Addrs, origin, pubsubTopics, protocols...)
|
2023-08-10 12:58:22 +00:00
|
|
|
if err != nil {
|
2023-11-13 22:52:46 +00:00
|
|
|
return nil, err
|
2023-08-10 12:58:22 +00:00
|
|
|
}
|
|
|
|
|
2023-11-13 22:52:46 +00:00
|
|
|
pData := &service.PeerData{
|
|
|
|
Origin: origin,
|
|
|
|
AddrInfo: peer.AddrInfo{
|
|
|
|
ID: info.ID,
|
|
|
|
Addrs: info.Addrs,
|
|
|
|
},
|
|
|
|
PubsubTopics: pubsubTopics,
|
|
|
|
}
|
|
|
|
|
|
|
|
return pData, nil
|
|
|
|
}
|
|
|
|
|
2023-11-14 11:17:49 +00:00
|
|
|
// Connect establishes a connection to a
|
2023-11-13 22:52:46 +00:00
|
|
|
func (pm *PeerManager) Connect(pData *service.PeerData) {
|
|
|
|
go pm.peerConnector.PushToChan(*pData)
|
2023-08-10 12:58:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// RemovePeer deletes peer from the peerStore after disconnecting it.
|
|
|
|
// It also removes the peer from serviceSlot.
|
|
|
|
func (pm *PeerManager) RemovePeer(peerID peer.ID) {
|
|
|
|
pm.host.Peerstore().RemovePeer(peerID)
|
|
|
|
//Search if this peer is in serviceSlot and if so, remove it from there
|
|
|
|
// TODO:Add another peer which is statically configured to the serviceSlot.
|
2023-08-30 11:57:22 +00:00
|
|
|
pm.serviceSlots.removePeer(peerID)
|
2023-08-10 12:58:22 +00:00
|
|
|
}
|
|
|
|
|
2023-08-30 14:33:57 +00:00
|
|
|
// addPeerToServiceSlot adds a peerID to serviceSlot.
|
2023-08-10 12:58:22 +00:00
|
|
|
// Adding to peerStore is expected to be already done by caller.
|
|
|
|
// If relay proto is passed, it is not added to serviceSlot.
|
2023-08-30 14:33:57 +00:00
|
|
|
func (pm *PeerManager) addPeerToServiceSlot(proto protocol.ID, peerID peer.ID) {
|
2023-09-14 08:36:08 +00:00
|
|
|
if proto == relay.WakuRelayID_v200 {
|
2023-11-14 11:17:49 +00:00
|
|
|
pm.logger.Debug("cannot add Relay peer to service peer slots")
|
2023-08-10 12:58:22 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
//For now adding the peer to serviceSlot which means the latest added peer would be given priority.
|
|
|
|
//TODO: Ideally we should sort the peers per service and return best peer based on peer score or RTT etc.
|
2023-11-15 14:39:09 +00:00
|
|
|
pm.logger.Info("adding peer to service slots", logging.HostID("peer", peerID),
|
2023-08-15 01:27:51 +00:00
|
|
|
zap.String("service", string(proto)))
|
2023-08-30 11:57:22 +00:00
|
|
|
// getPeers returns nil for WakuRelayIDv200 protocol, but we don't run this ServiceSlot code for WakuRelayIDv200 protocol
|
|
|
|
pm.serviceSlots.getPeers(proto).add(peerID)
|
2023-08-10 12:58:22 +00:00
|
|
|
}
|