2021-12-08 14:21:30 +00:00
|
|
|
package node
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/libp2p/go-libp2p-core/network"
|
|
|
|
"github.com/libp2p/go-libp2p-core/peer"
|
|
|
|
"github.com/libp2p/go-libp2p/p2p/protocol/ping"
|
2022-05-27 13:25:06 +00:00
|
|
|
"github.com/status-im/go-waku/logging"
|
|
|
|
"go.uber.org/zap"
|
2021-12-08 14:21:30 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
const maxAllowedPingFailures = 2
|
|
|
|
const maxPublishAttempt = 5
|
|
|
|
|
|
|
|
// startKeepAlive creates a go routine that periodically pings connected peers.
|
|
|
|
// This is necessary because TCP connections are automatically closed due to inactivity,
|
|
|
|
// and doing a ping will avoid this (with a small bandwidth cost)
|
|
|
|
func (w *WakuNode) startKeepAlive(t time.Duration) {
|
|
|
|
go func() {
|
|
|
|
defer w.wg.Done()
|
2022-05-27 13:25:06 +00:00
|
|
|
w.log.Info("setting up ping protocol", zap.Duration("duration", t))
|
2021-12-08 14:21:30 +00:00
|
|
|
ticker := time.NewTicker(t)
|
|
|
|
defer ticker.Stop()
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ticker.C:
|
|
|
|
// Compared to Network's peers collection,
|
|
|
|
// Peerstore contains all peers ever connected to,
|
|
|
|
// thus if a host goes down and back again,
|
|
|
|
// pinging a peer will trigger identification process,
|
|
|
|
// which is not possible when iterating
|
|
|
|
// through Network's peer collection, as it will be empty
|
|
|
|
for _, p := range w.host.Peerstore().Peers() {
|
|
|
|
if p != w.host.ID() {
|
|
|
|
w.wg.Add(1)
|
|
|
|
go w.pingPeer(p)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case <-w.quit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (w *WakuNode) pingPeer(peer peer.ID) {
|
|
|
|
w.keepAliveMutex.Lock()
|
|
|
|
defer w.keepAliveMutex.Unlock()
|
|
|
|
defer w.wg.Done()
|
|
|
|
|
|
|
|
ctx, cancel := context.WithTimeout(w.ctx, 3*time.Second)
|
|
|
|
defer cancel()
|
|
|
|
|
2022-05-27 13:25:06 +00:00
|
|
|
logger := w.log.With(logging.HostID("peer", peer))
|
|
|
|
logger.Debug("pinging")
|
2021-12-08 14:21:30 +00:00
|
|
|
pr := ping.Ping(ctx, w.host, peer)
|
|
|
|
select {
|
|
|
|
case res := <-pr:
|
|
|
|
if res.Error != nil {
|
|
|
|
w.keepAliveFails[peer]++
|
2022-05-27 13:25:06 +00:00
|
|
|
logger.Debug("could not ping", zap.Error(res.Error))
|
2021-12-08 14:21:30 +00:00
|
|
|
} else {
|
|
|
|
w.keepAliveFails[peer] = 0
|
|
|
|
}
|
|
|
|
case <-ctx.Done():
|
|
|
|
w.keepAliveFails[peer]++
|
2022-05-27 13:25:06 +00:00
|
|
|
logger.Debug("could not ping (context done)", zap.Error(ctx.Err()))
|
2021-12-08 14:21:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if w.keepAliveFails[peer] > maxAllowedPingFailures && w.host.Network().Connectedness(peer) == network.Connected {
|
2022-05-27 13:25:06 +00:00
|
|
|
logger.Info("disconnecting peer")
|
2021-12-08 14:21:30 +00:00
|
|
|
if err := w.host.Network().ClosePeer(peer); err != nil {
|
2022-05-27 13:25:06 +00:00
|
|
|
logger.Debug("closing conn to peer", zap.Error(err))
|
2021-12-08 14:21:30 +00:00
|
|
|
}
|
|
|
|
w.keepAliveFails[peer] = 0
|
|
|
|
}
|
|
|
|
}
|