mirror of https://github.com/status-im/go-waku.git
feat: use CommonService in peerConnector (#737)
* feat(CommonService): add channel and use commonService in discv5 * fix: add mutex to PushToChan * fix: remove generic functionality * feat: use CommonService in peerConnector * fix: remove generic functionality * nit: add error log
This commit is contained in:
parent
be4601e8f1
commit
a650469fae
|
@ -7,6 +7,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/libp2p/go-libp2p/core/host"
|
"github.com/libp2p/go-libp2p/core/host"
|
||||||
|
@ -17,8 +18,6 @@ import (
|
||||||
"github.com/waku-org/go-waku/logging"
|
"github.com/waku-org/go-waku/logging"
|
||||||
wps "github.com/waku-org/go-waku/waku/v2/peerstore"
|
wps "github.com/waku-org/go-waku/waku/v2/peerstore"
|
||||||
|
|
||||||
"sync/atomic"
|
|
||||||
|
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
|
||||||
lru "github.com/hashicorp/golang-lru"
|
lru "github.com/hashicorp/golang-lru"
|
||||||
|
@ -27,22 +26,17 @@ import (
|
||||||
// PeerConnectionStrategy is a utility to connect to peers,
|
// PeerConnectionStrategy is a utility to connect to peers,
|
||||||
// but only if we have not recently tried connecting to them already
|
// but only if we have not recently tried connecting to them already
|
||||||
type PeerConnectionStrategy struct {
|
type PeerConnectionStrategy struct {
|
||||||
sync.RWMutex
|
mux sync.Mutex
|
||||||
|
cache *lru.TwoQueueCache
|
||||||
|
host host.Host
|
||||||
|
pm *PeerManager
|
||||||
|
|
||||||
cache *lru.TwoQueueCache
|
paused atomic.Bool
|
||||||
host host.Host
|
dialTimeout time.Duration
|
||||||
pm *PeerManager
|
*CommonDiscoveryService
|
||||||
cancel context.CancelFunc
|
|
||||||
|
|
||||||
paused atomic.Bool
|
|
||||||
|
|
||||||
wg sync.WaitGroup
|
|
||||||
dialTimeout time.Duration
|
|
||||||
dialCh chan peer.AddrInfo
|
|
||||||
subscriptions []<-chan PeerData
|
subscriptions []<-chan PeerData
|
||||||
|
|
||||||
backoff backoff.BackoffFactory
|
backoff backoff.BackoffFactory
|
||||||
mux sync.Mutex
|
|
||||||
logger *zap.Logger
|
logger *zap.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -69,12 +63,12 @@ func NewPeerConnectionStrategy(pm *PeerManager,
|
||||||
}
|
}
|
||||||
//
|
//
|
||||||
pc := &PeerConnectionStrategy{
|
pc := &PeerConnectionStrategy{
|
||||||
cache: cache,
|
cache: cache,
|
||||||
wg: sync.WaitGroup{},
|
dialTimeout: dialTimeout,
|
||||||
dialTimeout: dialTimeout,
|
CommonDiscoveryService: NewCommonDiscoveryService(),
|
||||||
pm: pm,
|
pm: pm,
|
||||||
backoff: getBackOff(),
|
backoff: getBackOff(),
|
||||||
logger: logger.Named("discovery-connector"),
|
logger: logger.Named("discovery-connector"),
|
||||||
}
|
}
|
||||||
pm.SetPeerConnector(pc)
|
pm.SetPeerConnector(pc)
|
||||||
return pc, nil
|
return pc, nil
|
||||||
|
@ -87,36 +81,40 @@ type connCacheData struct {
|
||||||
|
|
||||||
// Subscribe receives channels on which discovered peers should be pushed
|
// Subscribe receives channels on which discovered peers should be pushed
|
||||||
func (c *PeerConnectionStrategy) Subscribe(ctx context.Context, ch <-chan PeerData) {
|
func (c *PeerConnectionStrategy) Subscribe(ctx context.Context, ch <-chan PeerData) {
|
||||||
if c.cancel != nil {
|
// if not running yet, store the subscription and return
|
||||||
c.wg.Add(1)
|
if err := c.ErrOnNotRunning(); err != nil {
|
||||||
go func() {
|
c.mux.Lock()
|
||||||
defer c.wg.Done()
|
|
||||||
c.consumeSubscription(ctx, ch)
|
|
||||||
}()
|
|
||||||
} else {
|
|
||||||
c.subscriptions = append(c.subscriptions, ch)
|
c.subscriptions = append(c.subscriptions, ch)
|
||||||
|
c.mux.Unlock()
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
// if running start a goroutine to consume the subscription
|
||||||
|
c.WaitGroup().Add(1)
|
||||||
|
go func() {
|
||||||
|
defer c.WaitGroup().Done()
|
||||||
|
c.consumeSubscription(ch)
|
||||||
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *PeerConnectionStrategy) consumeSubscription(ctx context.Context, ch <-chan PeerData) {
|
func (c *PeerConnectionStrategy) consumeSubscription(ch <-chan PeerData) {
|
||||||
for {
|
for {
|
||||||
// for returning from the loop when peerConnector is paused.
|
// for returning from the loop when peerConnector is paused.
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-c.Context().Done():
|
||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
//
|
//
|
||||||
if !c.isPaused() {
|
if !c.isPaused() {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-c.Context().Done():
|
||||||
return
|
return
|
||||||
case p, ok := <-ch:
|
case p, ok := <-ch:
|
||||||
if !ok {
|
if !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
c.pm.AddDiscoveredPeer(p)
|
c.pm.AddDiscoveredPeer(p)
|
||||||
c.publishWork(ctx, p.AddrInfo)
|
c.PushToChan(p)
|
||||||
case <-time.After(1 * time.Second):
|
case <-time.After(1 * time.Second):
|
||||||
// This timeout is to not lock the goroutine
|
// This timeout is to not lock the goroutine
|
||||||
break
|
break
|
||||||
|
@ -135,48 +133,36 @@ func (c *PeerConnectionStrategy) SetHost(h host.Host) {
|
||||||
// Start attempts to connect to the peers passed in by peerCh.
|
// Start attempts to connect to the peers passed in by peerCh.
|
||||||
// Will not connect to peers if they are within the backoff period.
|
// Will not connect to peers if they are within the backoff period.
|
||||||
func (c *PeerConnectionStrategy) Start(ctx context.Context) error {
|
func (c *PeerConnectionStrategy) Start(ctx context.Context) error {
|
||||||
if c.cancel != nil {
|
return c.CommonDiscoveryService.Start(ctx, c.start)
|
||||||
return errors.New("already started")
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
}
|
||||||
c.cancel = cancel
|
func (c *PeerConnectionStrategy) start() error {
|
||||||
c.dialCh = make(chan peer.AddrInfo)
|
c.WaitGroup().Add(2)
|
||||||
|
go c.shouldDialPeers()
|
||||||
|
go c.dialPeers()
|
||||||
|
|
||||||
c.wg.Add(2)
|
c.consumeSubscriptions()
|
||||||
go c.shouldDialPeers(ctx)
|
|
||||||
go c.dialPeers(ctx)
|
|
||||||
|
|
||||||
c.consumeSubscriptions(ctx)
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop terminates the peer-connector
|
// Stop terminates the peer-connector
|
||||||
func (c *PeerConnectionStrategy) Stop() {
|
func (c *PeerConnectionStrategy) Stop() {
|
||||||
if c.cancel == nil {
|
c.CommonDiscoveryService.Stop(func() {})
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.cancel()
|
|
||||||
c.cancel = nil
|
|
||||||
c.wg.Wait()
|
|
||||||
|
|
||||||
close(c.dialCh)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *PeerConnectionStrategy) isPaused() bool {
|
func (c *PeerConnectionStrategy) isPaused() bool {
|
||||||
return c.paused.Load()
|
return c.paused.Load()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *PeerConnectionStrategy) shouldDialPeers(ctx context.Context) {
|
func (c *PeerConnectionStrategy) shouldDialPeers() {
|
||||||
defer c.wg.Done()
|
defer c.WaitGroup().Done()
|
||||||
|
|
||||||
ticker := time.NewTicker(1 * time.Second)
|
ticker := time.NewTicker(1 * time.Second)
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-c.Context().Done():
|
||||||
return
|
return
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
_, outRelayPeers := c.pm.getRelayPeers()
|
_, outRelayPeers := c.pm.getRelayPeers()
|
||||||
|
@ -186,25 +172,17 @@ func (c *PeerConnectionStrategy) shouldDialPeers(ctx context.Context) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// it might happen Subscribe is called before peerConnector has started so store these subscriptions in subscriptions array and custom after c.cancel is set.
|
// it might happen Subscribe is called before peerConnector has started so store these subscriptions in subscriptions array and custom after c.cancel is set.
|
||||||
func (c *PeerConnectionStrategy) consumeSubscriptions(ctx context.Context) {
|
func (c *PeerConnectionStrategy) consumeSubscriptions() {
|
||||||
for _, subs := range c.subscriptions {
|
for _, subs := range c.subscriptions {
|
||||||
c.wg.Add(1)
|
c.WaitGroup().Add(1)
|
||||||
go func(s <-chan PeerData) {
|
go func(s <-chan PeerData) {
|
||||||
defer c.wg.Done()
|
defer c.WaitGroup().Done()
|
||||||
c.consumeSubscription(ctx, s)
|
c.consumeSubscription(s)
|
||||||
}(subs)
|
}(subs)
|
||||||
}
|
}
|
||||||
c.subscriptions = nil
|
c.subscriptions = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *PeerConnectionStrategy) publishWork(ctx context.Context, p peer.AddrInfo) {
|
|
||||||
select {
|
|
||||||
case c.dialCh <- p:
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const maxActiveDials = 5
|
const maxActiveDials = 5
|
||||||
|
|
||||||
// c.cache is thread safe
|
// c.cache is thread safe
|
||||||
|
@ -230,8 +208,8 @@ func (c *PeerConnectionStrategy) canDialPeer(pi peer.AddrInfo) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *PeerConnectionStrategy) dialPeers(ctx context.Context) {
|
func (c *PeerConnectionStrategy) dialPeers() {
|
||||||
defer c.wg.Done()
|
defer c.WaitGroup().Done()
|
||||||
|
|
||||||
maxGoRoutines := c.pm.OutRelayPeersTarget
|
maxGoRoutines := c.pm.OutRelayPeersTarget
|
||||||
if maxGoRoutines > maxActiveDials {
|
if maxGoRoutines > maxActiveDials {
|
||||||
|
@ -242,30 +220,31 @@ func (c *PeerConnectionStrategy) dialPeers(ctx context.Context) {
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case pi, ok := <-c.dialCh:
|
case pd, ok := <-c.GetListeningChan():
|
||||||
if !ok {
|
if !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
addrInfo := pd.AddrInfo
|
||||||
|
|
||||||
if pi.ID == c.host.ID() || pi.ID == "" ||
|
if addrInfo.ID == c.host.ID() || addrInfo.ID == "" ||
|
||||||
c.host.Network().Connectedness(pi.ID) == network.Connected {
|
c.host.Network().Connectedness(addrInfo.ID) == network.Connected {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.canDialPeer(pi) {
|
if c.canDialPeer(addrInfo) {
|
||||||
sem <- struct{}{}
|
sem <- struct{}{}
|
||||||
c.wg.Add(1)
|
c.WaitGroup().Add(1)
|
||||||
go c.dialPeer(ctx, pi, sem)
|
go c.dialPeer(addrInfo, sem)
|
||||||
}
|
}
|
||||||
case <-ctx.Done():
|
case <-c.Context().Done():
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *PeerConnectionStrategy) dialPeer(ctx context.Context, pi peer.AddrInfo, sem chan struct{}) {
|
func (c *PeerConnectionStrategy) dialPeer(pi peer.AddrInfo, sem chan struct{}) {
|
||||||
defer c.wg.Done()
|
defer c.WaitGroup().Done()
|
||||||
ctx, cancel := context.WithTimeout(ctx, c.dialTimeout)
|
ctx, cancel := context.WithTimeout(c.Context(), c.dialTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
err := c.host.Connect(ctx, pi)
|
err := c.host.Connect(ctx, pi)
|
||||||
if err != nil && !errors.Is(err, context.Canceled) {
|
if err != nil && !errors.Is(err, context.Canceled) {
|
||||||
|
|
|
@ -172,13 +172,19 @@ func (pm *PeerManager) connectToRelayPeers() {
|
||||||
} //Else: Should we raise some sort of unhealthy event??
|
} //Else: Should we raise some sort of unhealthy event??
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func addrInfoToPeerData(origin wps.Origin, peerID peer.ID, host host.Host) PeerData {
|
||||||
|
return PeerData{
|
||||||
|
Origin: origin,
|
||||||
|
AddrInfo: peer.AddrInfo{
|
||||||
|
ID: peerID,
|
||||||
|
Addrs: host.Peerstore().Addrs(peerID),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
func (pm *PeerManager) connectToPeers(peers peer.IDSlice) {
|
func (pm *PeerManager) connectToPeers(peers peer.IDSlice) {
|
||||||
for _, peerID := range peers {
|
for _, peerID := range peers {
|
||||||
peerInfo := peer.AddrInfo{
|
peerData := addrInfoToPeerData(wps.PeerManager, peerID, pm.host)
|
||||||
ID: peerID,
|
pm.peerConnector.PushToChan(peerData)
|
||||||
Addrs: pm.host.Peerstore().Addrs(peerID),
|
|
||||||
}
|
|
||||||
pm.peerConnector.publishWork(pm.ctx, peerInfo)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -110,6 +110,7 @@ func (r *Rendezvous) DiscoverWithNamespace(ctx context.Context, namespace string
|
||||||
PubSubTopics: []string{namespace},
|
PubSubTopics: []string{namespace},
|
||||||
}
|
}
|
||||||
if !r.PushToChan(peer) {
|
if !r.PushToChan(peer) {
|
||||||
|
r.log.Error("could push to closed channel/context completed")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue