bump go-waku
This commit is contained in:
parent
973dee9003
commit
368510dc0a
2
go.mod
2
go.mod
|
@ -291,3 +291,5 @@ require (
|
|||
modernc.org/sqlite v1.14.2-0.20211125151325-d4ed92c0a70f // indirect
|
||||
zombiezen.com/go/sqlite v0.8.0 // indirect
|
||||
)
|
||||
|
||||
replace github.com/waku-org/go-waku => github.com/waku-org/go-waku v0.8.1-0.20240923214107-798c9c5d819a
|
||||
|
|
4
go.sum
4
go.sum
|
@ -2136,8 +2136,8 @@ github.com/waku-org/go-libp2p-pubsub v0.12.0-gowaku.0.20240823143342-b0f2429ca27
|
|||
github.com/waku-org/go-libp2p-pubsub v0.12.0-gowaku.0.20240823143342-b0f2429ca27f/go.mod h1:Oi0zw9aw8/Y5GC99zt+Ef2gYAl+0nZlwdJonDyOz/sE=
|
||||
github.com/waku-org/go-libp2p-rendezvous v0.0.0-20240110193335-a67d1cc760a0 h1:R4YYx2QamhBRl/moIxkDCNW+OP7AHbyWLBygDc/xIMo=
|
||||
github.com/waku-org/go-libp2p-rendezvous v0.0.0-20240110193335-a67d1cc760a0/go.mod h1:EhZP9fee0DYjKH/IOQvoNSy1tSHp2iZadsHGphcAJgY=
|
||||
github.com/waku-org/go-waku v0.8.1-0.20240926162117-12abd041d699 h1:Pr+YZX3wSv/DK6GLPY6yFDL6L8KxoIUYuTSH2bgKacA=
|
||||
github.com/waku-org/go-waku v0.8.1-0.20240926162117-12abd041d699/go.mod h1:1BRnyg2mQ2aBNLTBaPq6vEvobzywGykPOhGQFbHGf74=
|
||||
github.com/waku-org/go-waku v0.8.1-0.20240923214107-798c9c5d819a h1:aPT10FgDIUdsnAqy9y5Vzng/dqcr2Qyz1sXOyB7T6ik=
|
||||
github.com/waku-org/go-waku v0.8.1-0.20240923214107-798c9c5d819a/go.mod h1:VNbVmh5UYg3vIvhGV4hCw8QEykq3RScDACo2Y2dIFfg=
|
||||
github.com/waku-org/go-zerokit-rln v0.1.14-0.20240102145250-fa738c0bdf59 h1:jisj+OCI6QydLtFq3Pyhu49wl9ytPN7oAHjMfepHDrA=
|
||||
github.com/waku-org/go-zerokit-rln v0.1.14-0.20240102145250-fa738c0bdf59/go.mod h1:1PdBdPzyTaKt3VnpAHk3zj+r9dXPFOr3IHZP9nFle6E=
|
||||
github.com/waku-org/go-zerokit-rln-apple v0.0.0-20230916172309-ee0ee61dde2b h1:KgZVhsLkxsj5gb/FfndSCQu6VYwALrCOgYI3poR95yE=
|
||||
|
|
|
@ -14,7 +14,6 @@ import (
|
|||
"github.com/waku-org/go-waku/waku/v2/protocol/legacy_store/pb"
|
||||
wpb "github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
"github.com/waku-org/go-waku/waku/v2/timesource"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
@ -187,7 +186,6 @@ func (d *DBStore) Start(ctx context.Context, timesource timesource.Timesource) e
|
|||
}
|
||||
|
||||
func (d *DBStore) updateMetrics(ctx context.Context) {
|
||||
defer utils.LogOnPanic()
|
||||
ticker := time.NewTicker(5 * time.Second)
|
||||
defer ticker.Stop()
|
||||
defer d.wg.Done()
|
||||
|
@ -253,7 +251,6 @@ func (d *DBStore) getDeleteOldRowsQuery() string {
|
|||
}
|
||||
|
||||
func (d *DBStore) checkForOlderRecords(ctx context.Context, t time.Duration) {
|
||||
defer utils.LogOnPanic()
|
||||
defer d.wg.Done()
|
||||
|
||||
ticker := time.NewTicker(t)
|
||||
|
|
|
@ -11,7 +11,6 @@ import (
|
|||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/filter"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/subscription"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -99,7 +98,6 @@ func Subscribe(ctx context.Context, wf *filter.WakuFilterLightNode, contentFilte
|
|||
}
|
||||
|
||||
func (apiSub *Sub) Unsubscribe(contentFilter protocol.ContentFilter) {
|
||||
defer utils.LogOnPanic()
|
||||
_, err := apiSub.wf.Unsubscribe(apiSub.ctx, contentFilter)
|
||||
//Not reading result unless we want to do specific error handling?
|
||||
if err != nil {
|
||||
|
@ -108,7 +106,6 @@ func (apiSub *Sub) Unsubscribe(contentFilter protocol.ContentFilter) {
|
|||
}
|
||||
|
||||
func (apiSub *Sub) subscriptionLoop(batchInterval time.Duration) {
|
||||
defer utils.LogOnPanic()
|
||||
ticker := time.NewTicker(batchInterval)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
|
@ -216,14 +213,12 @@ func (apiSub *Sub) multiplex(subs []*subscription.SubscriptionDetails) {
|
|||
for _, subDetails := range subs {
|
||||
apiSub.subs[subDetails.ID] = subDetails
|
||||
go func(subDetails *subscription.SubscriptionDetails) {
|
||||
defer utils.LogOnPanic()
|
||||
apiSub.log.Debug("new multiplex", zap.String("sub-id", subDetails.ID))
|
||||
for env := range subDetails.C {
|
||||
apiSub.DataCh <- env
|
||||
}
|
||||
}(subDetails)
|
||||
go func(subDetails *subscription.SubscriptionDetails) {
|
||||
defer utils.LogOnPanic()
|
||||
select {
|
||||
case <-apiSub.ctx.Done():
|
||||
return
|
||||
|
|
|
@ -13,7 +13,6 @@ import (
|
|||
"github.com/waku-org/go-waku/waku/v2/onlinechecker"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/filter"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
)
|
||||
|
||||
// Methods on FilterManager just aggregate filters from application and subscribe to them
|
||||
|
@ -88,7 +87,6 @@ func NewFilterManager(ctx context.Context, logger *zap.Logger, minPeersPerFilter
|
|||
}
|
||||
|
||||
func (mgr *FilterManager) startFilterSubLoop() {
|
||||
defer utils.LogOnPanic()
|
||||
ticker := time.NewTicker(mgr.filterSubBatchDuration)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
|
@ -159,7 +157,6 @@ func (mgr *FilterManager) SubscribeFilter(filterID string, cf protocol.ContentFi
|
|||
}
|
||||
|
||||
func (mgr *FilterManager) subscribeAndRunLoop(f filterConfig) {
|
||||
defer utils.LogOnPanic()
|
||||
ctx, cancel := context.WithCancel(mgr.ctx)
|
||||
config := FilterConfig{MaxPeers: mgr.minPeersPerFilter}
|
||||
sub, err := Subscribe(ctx, mgr.node, f.contentFilter, config, mgr.logger, mgr.params)
|
||||
|
|
|
@ -15,7 +15,6 @@ import (
|
|||
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/store"
|
||||
"github.com/waku-org/go-waku/waku/v2/timesource"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
@ -103,7 +102,6 @@ func (m *MissingMessageVerifier) Start(ctx context.Context) {
|
|||
m.C = c
|
||||
|
||||
go func() {
|
||||
defer utils.LogOnPanic()
|
||||
t := time.NewTicker(m.params.interval)
|
||||
defer t.Stop()
|
||||
|
||||
|
@ -125,7 +123,6 @@ func (m *MissingMessageVerifier) Start(ctx context.Context) {
|
|||
default:
|
||||
semaphore <- struct{}{}
|
||||
go func(interest criteriaInterest) {
|
||||
defer utils.LogOnPanic()
|
||||
m.fetchHistory(c, interest)
|
||||
<-semaphore
|
||||
}(interest)
|
||||
|
@ -279,7 +276,6 @@ func (m *MissingMessageVerifier) fetchMessagesBatch(c chan<- *protocol.Envelope,
|
|||
|
||||
wg.Add(1)
|
||||
go func(messageHashes []pb.MessageHash) {
|
||||
defer utils.LogOnPanic()
|
||||
defer wg.Wait()
|
||||
|
||||
result, err := m.storeQueryWithRetry(interest.ctx, func(ctx context.Context) (*store.Result, error) {
|
||||
|
|
|
@ -14,7 +14,6 @@ import (
|
|||
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/store"
|
||||
"github.com/waku-org/go-waku/waku/v2/timesource"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -146,7 +145,6 @@ func (m *MessageSentCheck) SetStorePeerID(peerID peer.ID) {
|
|||
|
||||
// Start checks if the tracked outgoing messages are stored periodically
|
||||
func (m *MessageSentCheck) Start() {
|
||||
defer utils.LogOnPanic()
|
||||
ticker := time.NewTicker(m.hashQueryInterval)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
)
|
||||
|
||||
// MessagePriority determines the ordering for the message priority queue
|
||||
|
@ -183,7 +182,6 @@ func (m *MessageQueue) Pop(ctx context.Context) <-chan *protocol.Envelope {
|
|||
ch := make(chan *protocol.Envelope)
|
||||
|
||||
go func() {
|
||||
defer utils.LogOnPanic()
|
||||
defer close(ch)
|
||||
|
||||
select {
|
||||
|
|
|
@ -172,7 +172,6 @@ func (d *DiscoveryV5) listen(ctx context.Context) error {
|
|||
if d.NAT != nil && !d.udpAddr.IP.IsLoopback() {
|
||||
d.WaitGroup().Add(1)
|
||||
go func() {
|
||||
defer utils.LogOnPanic()
|
||||
defer d.WaitGroup().Done()
|
||||
nat.Map(d.NAT, ctx.Done(), "udp", d.udpAddr.Port, d.udpAddr.Port, "go-waku discv5 discovery")
|
||||
}()
|
||||
|
@ -218,7 +217,6 @@ func (d *DiscoveryV5) start() error {
|
|||
if d.params.autoFindPeers {
|
||||
d.WaitGroup().Add(1)
|
||||
go func() {
|
||||
defer utils.LogOnPanic()
|
||||
defer d.WaitGroup().Done()
|
||||
d.runDiscoveryV5Loop(d.Context())
|
||||
}()
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/waku-org/go-waku/waku/v2/service"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
)
|
||||
|
||||
// TestPeerDiscoverer is mock peer discoverer for testing
|
||||
|
@ -27,7 +26,6 @@ func NewTestPeerDiscoverer() *TestPeerDiscoverer {
|
|||
// Subscribe is for subscribing to peer discoverer
|
||||
func (t *TestPeerDiscoverer) Subscribe(ctx context.Context, ch <-chan service.PeerData) {
|
||||
go func() {
|
||||
defer utils.LogOnPanic()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
|
|
|
@ -12,7 +12,6 @@ import (
|
|||
"go.uber.org/zap"
|
||||
|
||||
wps "github.com/waku-org/go-waku/waku/v2/peerstore"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
)
|
||||
|
||||
// PeerStatis is a map of peer IDs to supported protocols
|
||||
|
@ -102,7 +101,6 @@ func (c ConnectionNotifier) Close() {
|
|||
}
|
||||
|
||||
func (w *WakuNode) connectednessListener(ctx context.Context) {
|
||||
defer utils.LogOnPanic()
|
||||
defer w.wg.Done()
|
||||
|
||||
for {
|
||||
|
|
|
@ -12,7 +12,6 @@ import (
|
|||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/p2p/protocol/ping"
|
||||
"github.com/waku-org/go-waku/logging"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
@ -41,7 +40,6 @@ func disconnectAllPeers(host host.Host, logger *zap.Logger) {
|
|||
// This is necessary because TCP connections are automatically closed due to inactivity,
|
||||
// and doing a ping will avoid this (with a small bandwidth cost)
|
||||
func (w *WakuNode) startKeepAlive(ctx context.Context, randomPeersPingDuration time.Duration, allPeersPingDuration time.Duration) {
|
||||
defer utils.LogOnPanic()
|
||||
defer w.wg.Done()
|
||||
|
||||
if !w.opts.enableRelay {
|
||||
|
@ -170,7 +168,6 @@ func (w *WakuNode) startKeepAlive(ctx context.Context, randomPeersPingDuration t
|
|||
}
|
||||
|
||||
func (w *WakuNode) pingPeer(ctx context.Context, wg *sync.WaitGroup, peerID peer.ID, resultChan chan bool) {
|
||||
defer utils.LogOnPanic()
|
||||
defer wg.Done()
|
||||
|
||||
logger := w.log.With(logging.HostID("peer", peerID))
|
||||
|
|
|
@ -15,7 +15,6 @@ import (
|
|||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
wenr "github.com/waku-org/go-waku/waku/v2/protocol/enr"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -359,7 +358,6 @@ func (w *WakuNode) watchTopicShards(ctx context.Context) error {
|
|||
}
|
||||
|
||||
go func() {
|
||||
defer utils.LogOnPanic()
|
||||
defer evtRelaySubscribed.Close()
|
||||
defer evtRelayUnsubscribed.Close()
|
||||
|
||||
|
@ -413,7 +411,6 @@ func (w *WakuNode) registerAndMonitorReachability(ctx context.Context) {
|
|||
}
|
||||
w.wg.Add(1)
|
||||
go func() {
|
||||
defer utils.LogOnPanic()
|
||||
defer myEventSub.Close()
|
||||
defer w.wg.Done()
|
||||
|
||||
|
|
|
@ -3,7 +3,6 @@ package node
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/metrics"
|
||||
"github.com/libp2p/go-libp2p/p2p/metricshelper"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
@ -34,20 +33,11 @@ var peerStoreSize = prometheus.NewGauge(
|
|||
Help: "Size of Peer Store",
|
||||
})
|
||||
|
||||
var bandwidthTotal = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "libp2p_network_bytes_total",
|
||||
Help: "Bandwidth usage total",
|
||||
},
|
||||
[]string{"direction"},
|
||||
)
|
||||
|
||||
var collectors = []prometheus.Collector{
|
||||
gitVersion,
|
||||
peerDials,
|
||||
connectedPeers,
|
||||
peerStoreSize,
|
||||
bandwidthTotal,
|
||||
}
|
||||
|
||||
// Metrics exposes the functions required to update prometheus metrics for the waku node
|
||||
|
@ -57,7 +47,6 @@ type Metrics interface {
|
|||
RecordPeerConnected()
|
||||
RecordPeerDisconnected()
|
||||
SetPeerStoreSize(int)
|
||||
RecordBandwidth(metrics.Stats)
|
||||
}
|
||||
|
||||
type metricsImpl struct {
|
||||
|
@ -95,9 +84,3 @@ func (m *metricsImpl) RecordPeerDisconnected() {
|
|||
func (m *metricsImpl) SetPeerStoreSize(size int) {
|
||||
peerStoreSize.Set(float64(size))
|
||||
}
|
||||
|
||||
func (m *metricsImpl) RecordBandwidth(stats metrics.Stats) {
|
||||
bandwidthTotal.WithLabelValues("in").Add(float64(stats.TotalIn))
|
||||
bandwidthTotal.WithLabelValues("out").Add(float64(stats.TotalOut))
|
||||
|
||||
}
|
||||
|
|
|
@ -18,7 +18,6 @@ import (
|
|||
|
||||
"github.com/libp2p/go-libp2p/core/event"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/metrics"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/peerstore"
|
||||
|
@ -85,12 +84,11 @@ type RLNRelay interface {
|
|||
}
|
||||
|
||||
type WakuNode struct {
|
||||
host host.Host
|
||||
opts *WakuNodeParameters
|
||||
log *zap.Logger
|
||||
timesource timesource.Timesource
|
||||
metrics Metrics
|
||||
bandwidthCounter *metrics.BandwidthCounter
|
||||
host host.Host
|
||||
opts *WakuNodeParameters
|
||||
log *zap.Logger
|
||||
timesource timesource.Timesource
|
||||
metrics Metrics
|
||||
|
||||
peerstore peerstore.Peerstore
|
||||
peerConnector *peermanager.PeerConnectionStrategy
|
||||
|
@ -195,10 +193,8 @@ func New(opts ...WakuNodeOption) (*WakuNode, error) {
|
|||
w.wakuFlag = enr.NewWakuEnrBitfield(w.opts.enableLightPush, w.opts.enableFilterFullNode, w.opts.enableStore, w.opts.enableRelay)
|
||||
w.circuitRelayNodes = make(chan peer.AddrInfo)
|
||||
w.metrics = newMetrics(params.prometheusReg)
|
||||
w.metrics.RecordVersion(Version, GitCommit)
|
||||
|
||||
w.bandwidthCounter = metrics.NewBandwidthCounter()
|
||||
params.libP2POpts = append(params.libP2POpts, libp2p.BandwidthReporter(w.bandwidthCounter))
|
||||
w.metrics.RecordVersion(Version, GitCommit)
|
||||
|
||||
// Setup peerstore wrapper
|
||||
if params.peerstore != nil {
|
||||
|
@ -218,7 +214,6 @@ func New(opts ...WakuNodeOption) (*WakuNode, error) {
|
|||
func(ctx context.Context, numPeers int) <-chan peer.AddrInfo {
|
||||
r := make(chan peer.AddrInfo)
|
||||
go func() {
|
||||
defer utils.LogOnPanic()
|
||||
defer close(r)
|
||||
for ; numPeers != 0; numPeers-- {
|
||||
select {
|
||||
|
@ -313,7 +308,6 @@ func New(opts ...WakuNodeOption) (*WakuNode, error) {
|
|||
}
|
||||
|
||||
func (w *WakuNode) watchMultiaddressChanges(ctx context.Context) {
|
||||
defer utils.LogOnPanic()
|
||||
defer w.wg.Done()
|
||||
|
||||
addrsSet := utils.MultiAddrSet(w.ListenAddresses()...)
|
||||
|
@ -364,23 +358,6 @@ func (w *WakuNode) Start(ctx context.Context) error {
|
|||
|
||||
w.host = host
|
||||
|
||||
// Bandwidth reporter created for comparing IDONTWANT performance
|
||||
go func() {
|
||||
ticker := time.NewTicker(time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
totals := w.bandwidthCounter.GetBandwidthTotals()
|
||||
w.bandwidthCounter.Reset()
|
||||
w.metrics.RecordBandwidth(totals)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if w.addressChangesSub, err = host.EventBus().Subscribe(new(event.EvtLocalAddressesUpdated)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -573,7 +550,6 @@ func (w *WakuNode) ID() string {
|
|||
}
|
||||
|
||||
func (w *WakuNode) watchENRChanges(ctx context.Context) {
|
||||
defer utils.LogOnPanic()
|
||||
defer w.wg.Done()
|
||||
|
||||
var prevNodeVal string
|
||||
|
@ -911,7 +887,6 @@ func (w *WakuNode) PeersByContentTopic(contentTopic string) peer.IDSlice {
|
|||
}
|
||||
|
||||
func (w *WakuNode) findRelayNodes(ctx context.Context) {
|
||||
defer utils.LogOnPanic()
|
||||
defer w.wg.Done()
|
||||
|
||||
// Feed peers more often right after the bootstrap, then backoff
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -78,7 +77,6 @@ func (c *ConnectionGater) InterceptUpgraded(_ network.Conn) (allow bool, reason
|
|||
|
||||
// NotifyDisconnect is called when a connection disconnects.
|
||||
func (c *ConnectionGater) NotifyDisconnect(addr multiaddr.Multiaddr) {
|
||||
defer utils.LogOnPanic()
|
||||
ip, err := manet.ToIP(addr)
|
||||
if err != nil {
|
||||
return
|
||||
|
|
3
vendor/github.com/waku-org/go-waku/waku/v2/peermanager/fastest_peer_selector.go
generated
vendored
3
vendor/github.com/waku-org/go-waku/waku/v2/peermanager/fastest_peer_selector.go
generated
vendored
|
@ -12,7 +12,6 @@ import (
|
|||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/p2p/protocol/ping"
|
||||
"github.com/waku-org/go-waku/logging"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -70,11 +69,9 @@ func (r *FastestPeerSelector) FastestPeer(ctx context.Context, peers peer.IDSlic
|
|||
pinged := make(map[peer.ID]struct{})
|
||||
|
||||
go func() {
|
||||
defer utils.LogOnPanic()
|
||||
// Ping any peer with no latency recorded
|
||||
for peerToPing := range pingCh {
|
||||
go func(p peer.ID) {
|
||||
defer utils.LogOnPanic()
|
||||
defer wg.Done()
|
||||
rtt := time.Hour
|
||||
result, err := r.PingPeer(ctx, p)
|
||||
|
|
|
@ -18,7 +18,6 @@ import (
|
|||
"github.com/waku-org/go-waku/waku/v2/onlinechecker"
|
||||
wps "github.com/waku-org/go-waku/waku/v2/peerstore"
|
||||
"github.com/waku-org/go-waku/waku/v2/service"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
|
@ -104,7 +103,6 @@ func (c *PeerConnectionStrategy) Subscribe(ctx context.Context, ch <-chan servic
|
|||
// if running start a goroutine to consume the subscription
|
||||
c.WaitGroup().Add(1)
|
||||
go func() {
|
||||
defer utils.LogOnPanic()
|
||||
defer c.WaitGroup().Done()
|
||||
c.consumeSubscription(subscription{ctx, ch})
|
||||
}()
|
||||
|
@ -188,7 +186,6 @@ func (c *PeerConnectionStrategy) consumeSubscriptions() {
|
|||
for _, subs := range c.subscriptions {
|
||||
c.WaitGroup().Add(1)
|
||||
go func(s subscription) {
|
||||
defer utils.LogOnPanic()
|
||||
defer c.WaitGroup().Done()
|
||||
c.consumeSubscription(s)
|
||||
}(subs)
|
||||
|
@ -236,7 +233,6 @@ func (c *PeerConnectionStrategy) addConnectionBackoff(peerID peer.ID) {
|
|||
}
|
||||
|
||||
func (c *PeerConnectionStrategy) dialPeers() {
|
||||
defer utils.LogOnPanic()
|
||||
defer c.WaitGroup().Done()
|
||||
|
||||
maxGoRoutines := c.pm.OutPeersTarget
|
||||
|
@ -276,7 +272,6 @@ func (c *PeerConnectionStrategy) dialPeers() {
|
|||
}
|
||||
|
||||
func (c *PeerConnectionStrategy) dialPeer(pi peer.AddrInfo, sem chan struct{}) {
|
||||
defer utils.LogOnPanic()
|
||||
defer c.WaitGroup().Done()
|
||||
ctx, cancel := context.WithTimeout(c.Context(), c.dialTimeout)
|
||||
defer cancel()
|
||||
|
|
|
@ -11,7 +11,6 @@ import (
|
|||
wenr "github.com/waku-org/go-waku/waku/v2/protocol/enr"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||
"github.com/waku-org/go-waku/waku/v2/service"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -104,7 +103,6 @@ func (pm *PeerManager) discoverOnDemand(cluster uint16,
|
|||
}
|
||||
|
||||
func (pm *PeerManager) discoverPeersByPubsubTopics(pubsubTopics []string, proto protocol.ID, ctx context.Context, maxCount int) {
|
||||
defer utils.LogOnPanic()
|
||||
shardsInfo, err := waku_proto.TopicsToRelayShards(pubsubTopics...)
|
||||
if err != nil {
|
||||
pm.logger.Error("failed to convert pubsub topic to shard", zap.Strings("topics", pubsubTopics), zap.Error(err))
|
||||
|
|
|
@ -262,7 +262,6 @@ func (pm *PeerManager) Start(ctx context.Context) {
|
|||
}
|
||||
|
||||
func (pm *PeerManager) peerStoreLoop(ctx context.Context) {
|
||||
defer utils.LogOnPanic()
|
||||
t := time.NewTicker(prunePeerStoreInterval)
|
||||
defer t.Stop()
|
||||
for {
|
||||
|
@ -364,7 +363,6 @@ func (pm *PeerManager) prunePeerStore() {
|
|||
|
||||
// This is a connectivity loop, which currently checks and prunes inbound connections.
|
||||
func (pm *PeerManager) connectivityLoop(ctx context.Context) {
|
||||
defer utils.LogOnPanic()
|
||||
pm.connectToPeers()
|
||||
t := time.NewTicker(peerConnectivityLoopSecs * time.Second)
|
||||
defer t.Stop()
|
||||
|
|
|
@ -12,7 +12,6 @@ import (
|
|||
wps "github.com/waku-org/go-waku/waku/v2/peerstore"
|
||||
waku_proto "github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
@ -163,7 +162,6 @@ func (pm *PeerManager) handlerPeerTopicEvent(peerEvt relay.EvtPeerTopic) {
|
|||
}
|
||||
|
||||
func (pm *PeerManager) peerEventLoop(ctx context.Context) {
|
||||
defer utils.LogOnPanic()
|
||||
defer pm.sub.Close()
|
||||
for {
|
||||
select {
|
||||
|
|
|
@ -28,7 +28,6 @@ import (
|
|||
"github.com/waku-org/go-waku/waku/v2/protocol/subscription"
|
||||
"github.com/waku-org/go-waku/waku/v2/service"
|
||||
"github.com/waku-org/go-waku/waku/v2/timesource"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/exp/slices"
|
||||
|
@ -128,7 +127,6 @@ func (wf *WakuFilterLightNode) Stop() {
|
|||
wf.h.RemoveStreamHandler(FilterPushID_v20beta1)
|
||||
if wf.subscriptions.Count() > 0 {
|
||||
go func() {
|
||||
defer utils.LogOnPanic()
|
||||
defer func() {
|
||||
_ = recover()
|
||||
}()
|
||||
|
@ -416,7 +414,6 @@ func (wf *WakuFilterLightNode) Subscribe(ctx context.Context, contentFilter prot
|
|||
for i, peerID := range selectedPeers {
|
||||
wg.Add(1)
|
||||
go func(index int, ID peer.ID) {
|
||||
defer utils.LogOnPanic()
|
||||
defer wg.Done()
|
||||
err := wf.request(
|
||||
reqCtx,
|
||||
|
@ -568,7 +565,6 @@ func (wf *WakuFilterLightNode) Unsubscribe(ctx context.Context, contentFilter pr
|
|||
// send unsubscribe request to all the peers
|
||||
for peerID := range peers {
|
||||
go func(peerID peer.ID) {
|
||||
defer utils.LogOnPanic()
|
||||
defer func() {
|
||||
if params.wg != nil {
|
||||
params.wg.Done()
|
||||
|
@ -691,7 +687,6 @@ func (wf *WakuFilterLightNode) unsubscribeAll(ctx context.Context, opts ...Filte
|
|||
}
|
||||
for peerId := range peers {
|
||||
go func(peerID peer.ID) {
|
||||
defer utils.LogOnPanic()
|
||||
defer func() {
|
||||
if params.wg != nil {
|
||||
params.wg.Done()
|
||||
|
|
3
vendor/github.com/waku-org/go-waku/waku/v2/protocol/filter/filter_health_check.go
generated
vendored
3
vendor/github.com/waku-org/go-waku/waku/v2/protocol/filter/filter_health_check.go
generated
vendored
|
@ -5,7 +5,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -20,7 +19,6 @@ func (wf *WakuFilterLightNode) PingPeers() {
|
|||
}
|
||||
|
||||
func (wf *WakuFilterLightNode) PingPeer(peer peer.ID) {
|
||||
defer utils.LogOnPanic()
|
||||
ctxWithTimeout, cancel := context.WithTimeout(wf.CommonService.Context(), PingTimeout)
|
||||
defer cancel()
|
||||
err := wf.Ping(ctxWithTimeout, peer)
|
||||
|
@ -43,7 +41,6 @@ func (wf *WakuFilterLightNode) PingPeer(peer peer.ID) {
|
|||
}
|
||||
|
||||
func (wf *WakuFilterLightNode) FilterHealthCheckLoop() {
|
||||
defer utils.LogOnPanic()
|
||||
defer wf.WaitGroup().Done()
|
||||
ticker := time.NewTicker(wf.peerPingInterval)
|
||||
defer ticker.Stop()
|
||||
|
|
|
@ -218,7 +218,6 @@ func (wf *WakuFilterFullNode) unsubscribeAll(ctx context.Context, stream network
|
|||
}
|
||||
|
||||
func (wf *WakuFilterFullNode) filterListener(ctx context.Context) {
|
||||
defer utils.LogOnPanic()
|
||||
defer wf.WaitGroup().Done()
|
||||
|
||||
// This function is invoked for each message received
|
||||
|
@ -240,7 +239,6 @@ func (wf *WakuFilterFullNode) filterListener(ctx context.Context) {
|
|||
logger.Debug("pushing message to light node")
|
||||
wf.WaitGroup().Add(1)
|
||||
go func(subscriber peer.ID) {
|
||||
defer utils.LogOnPanic()
|
||||
defer wf.WaitGroup().Done()
|
||||
start := time.Now()
|
||||
err := wf.pushMessage(ctx, logger, subscriber, envelope)
|
||||
|
|
|
@ -10,7 +10,6 @@ import (
|
|||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
)
|
||||
|
||||
type PeerSet map[peer.ID]struct{}
|
||||
|
@ -189,7 +188,6 @@ func (sub *SubscribersMap) Items(pubsubTopic string, contentTopic string) <-chan
|
|||
key := getKey(pubsubTopic, contentTopic)
|
||||
|
||||
f := func() {
|
||||
defer utils.LogOnPanic()
|
||||
sub.RLock()
|
||||
defer sub.RUnlock()
|
||||
|
||||
|
@ -238,7 +236,6 @@ func (sub *SubscribersMap) Refresh(peerID peer.ID) {
|
|||
}
|
||||
|
||||
func (sub *SubscribersMap) cleanUp(ctx context.Context, cleanupInterval time.Duration) {
|
||||
defer utils.LogOnPanic()
|
||||
t := time.NewTicker(cleanupInterval)
|
||||
defer t.Stop()
|
||||
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
wpb "github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||
"github.com/waku-org/go-waku/waku/v2/timesource"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
)
|
||||
|
||||
func findMessages(query *pb.HistoryQuery, msgProvider MessageProvider) ([]*wpb.WakuMessage, *pb.PagingInfo, error) {
|
||||
|
@ -160,11 +159,9 @@ func (store *WakuStore) storeMessage(env *protocol.Envelope) error {
|
|||
}
|
||||
|
||||
func (store *WakuStore) storeIncomingMessages(ctx context.Context) {
|
||||
defer utils.LogOnPanic()
|
||||
defer store.wg.Done()
|
||||
for envelope := range store.MsgC.Ch {
|
||||
go func(env *protocol.Envelope) {
|
||||
defer utils.LogOnPanic()
|
||||
_ = store.storeMessage(env)
|
||||
}(envelope)
|
||||
}
|
||||
|
|
1
vendor/github.com/waku-org/go-waku/waku/v2/protocol/lightpush/waku_lightpush.go
generated
vendored
1
vendor/github.com/waku-org/go-waku/waku/v2/protocol/lightpush/waku_lightpush.go
generated
vendored
|
@ -334,7 +334,6 @@ func (wakuLP *WakuLightPush) Publish(ctx context.Context, message *wpb.WakuMessa
|
|||
for i, peerID := range params.selectedPeers {
|
||||
wg.Add(1)
|
||||
go func(index int, id peer.ID) {
|
||||
defer utils.LogOnPanic()
|
||||
paramsValue := *params
|
||||
paramsValue.requestID = protocol.GenerateRequestID()
|
||||
defer wg.Done()
|
||||
|
|
|
@ -20,7 +20,6 @@ import (
|
|||
"github.com/waku-org/go-waku/waku/v2/protocol/enr"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/metadata/pb"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -226,7 +225,6 @@ func (wakuM *WakuMetadata) disconnectPeer(peerID peer.ID, reason error) {
|
|||
// Connected is called when a connection is opened
|
||||
func (wakuM *WakuMetadata) Connected(n network.Network, cc network.Conn) {
|
||||
go func() {
|
||||
defer utils.LogOnPanic()
|
||||
wakuM.log.Debug("peer connected", zap.Stringer("peer", cc.RemotePeer()))
|
||||
// Metadata verification is done only if a clusterID is specified
|
||||
if wakuM.clusterID == 0 {
|
||||
|
|
|
@ -16,7 +16,6 @@ import (
|
|||
wenr "github.com/waku-org/go-waku/waku/v2/protocol/enr"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/peer_exchange/pb"
|
||||
"github.com/waku-org/go-waku/waku/v2/service"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -155,7 +154,6 @@ func (wakuPX *WakuPeerExchange) handleResponse(ctx context.Context, response *pb
|
|||
wakuPX.log.Info("connecting to newly discovered peers", zap.Int("count", len(discoveredPeers)))
|
||||
wakuPX.WaitGroup().Add(1)
|
||||
go func() {
|
||||
defer utils.LogOnPanic()
|
||||
defer wakuPX.WaitGroup().Done()
|
||||
|
||||
peerCh := make(chan service.PeerData)
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
wenr "github.com/waku-org/go-waku/waku/v2/protocol/enr"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/peer_exchange/pb"
|
||||
"github.com/waku-org/go-waku/waku/v2/service"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
@ -224,7 +223,6 @@ func (wakuPX *WakuPeerExchange) iterate(ctx context.Context) error {
|
|||
}
|
||||
|
||||
func (wakuPX *WakuPeerExchange) runPeerExchangeDiscv5Loop(ctx context.Context) {
|
||||
defer utils.LogOnPanic()
|
||||
defer wakuPX.WaitGroup().Done()
|
||||
|
||||
// Runs a discv5 loop adding new peers to the px peer cache
|
||||
|
|
|
@ -7,7 +7,6 @@ import (
|
|||
"sync/atomic"
|
||||
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
)
|
||||
|
||||
type BroadcasterParameters struct {
|
||||
|
@ -175,7 +174,6 @@ func (b *broadcaster) Start(ctx context.Context) error {
|
|||
}
|
||||
|
||||
func (b *broadcaster) run(ctx context.Context) {
|
||||
defer utils.LogOnPanic()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/waku-org/go-waku/logging"
|
||||
waku_proto "github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -57,7 +56,6 @@ func newMetrics(reg prometheus.Registerer, logger *zap.Logger) Metrics {
|
|||
// RecordMessage is used to increase the counter for the number of messages received via waku relay
|
||||
func (m *metricsImpl) RecordMessage(envelope *waku_proto.Envelope) {
|
||||
go func() {
|
||||
defer utils.LogOnPanic()
|
||||
payloadSizeInBytes := len(envelope.Message().Payload)
|
||||
payloadSizeInKb := float64(payloadSizeInBytes) / 1000
|
||||
messageSize.Observe(payloadSizeInKb)
|
||||
|
|
|
@ -8,7 +8,6 @@ import (
|
|||
"github.com/libp2p/go-libp2p/core/event"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/waku-org/go-waku/logging"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -52,7 +51,6 @@ func (w *WakuRelay) addPeerTopicEventListener(topic *pubsub.Topic) (*pubsub.Topi
|
|||
}
|
||||
|
||||
func (w *WakuRelay) topicEventPoll(topic string, handler *pubsub.TopicEventHandler) {
|
||||
defer utils.LogOnPanic()
|
||||
defer w.WaitGroup().Done()
|
||||
for {
|
||||
evt, err := handler.NextPeerEvent(w.Context())
|
||||
|
|
|
@ -439,7 +439,6 @@ func (w *WakuRelay) subscribe(ctx context.Context, contentFilter waku_proto.Cont
|
|||
|
||||
subscriptions = append(subscriptions, subscription)
|
||||
go func() {
|
||||
defer utils.LogOnPanic()
|
||||
<-ctx.Done()
|
||||
subscription.Unsubscribe()
|
||||
}()
|
||||
|
@ -534,7 +533,6 @@ func (w *WakuRelay) unsubscribeFromPubsubTopic(topicData *pubsubTopicSubscriptio
|
|||
}
|
||||
|
||||
func (w *WakuRelay) pubsubTopicMsgHandler(sub *pubsub.Subscription) {
|
||||
defer utils.LogOnPanic()
|
||||
defer w.WaitGroup().Done()
|
||||
|
||||
for {
|
||||
|
|
|
@ -15,7 +15,6 @@ import (
|
|||
"github.com/waku-org/go-waku/waku/v2/protocol/rln/contracts"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/rln/web3"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
"github.com/waku-org/go-zerokit-rln/rln"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -121,7 +120,6 @@ func (mf *MembershipFetcher) loadOldEvents(ctx context.Context, fromBlock, toBlo
|
|||
}
|
||||
|
||||
func (mf *MembershipFetcher) watchNewEvents(ctx context.Context, fromBlock uint64, handler RegistrationEventHandler, errCh chan<- error) {
|
||||
defer utils.LogOnPanic()
|
||||
defer mf.wg.Done()
|
||||
|
||||
// Watch for new events
|
||||
|
|
|
@ -7,7 +7,6 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
"github.com/waku-org/go-zerokit-rln/rln"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -90,7 +89,6 @@ func (n *NullifierLog) HasDuplicate(proofMD rln.ProofMetadata) (bool, error) {
|
|||
|
||||
// cleanup cleans up the log every time there are more than MaxEpochGap epochs stored in it
|
||||
func (n *NullifierLog) cleanup(ctx context.Context) {
|
||||
defer utils.LogOnPanic()
|
||||
t := time.NewTicker(1 * time.Minute) // TODO: tune this
|
||||
defer t.Stop()
|
||||
|
||||
|
|
|
@ -13,7 +13,6 @@ import (
|
|||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
dbi "github.com/waku-org/go-libp2p-rendezvous/db"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -316,7 +315,6 @@ func (db *DB) ValidCookie(ns string, cookie []byte) bool {
|
|||
}
|
||||
|
||||
func (db *DB) background(ctx context.Context) {
|
||||
defer utils.LogOnPanic()
|
||||
for {
|
||||
db.cleanupExpired()
|
||||
|
||||
|
|
|
@ -11,7 +11,6 @@ import (
|
|||
"github.com/waku-org/go-waku/waku/v2/peerstore"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/service"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -143,7 +142,6 @@ func (r *Rendezvous) Register(ctx context.Context, rendezvousPoints []*Rendezvou
|
|||
|
||||
// RegisterShard registers the node in the rendezvous points using a shard as namespace
|
||||
func (r *Rendezvous) RegisterShard(ctx context.Context, cluster uint16, shard uint16, rendezvousPoints []*RendezvousPoint) {
|
||||
defer utils.LogOnPanic()
|
||||
namespace := ShardToNamespace(cluster, shard)
|
||||
r.RegisterWithNamespace(ctx, namespace, rendezvousPoints)
|
||||
}
|
||||
|
@ -160,7 +158,6 @@ func (r *Rendezvous) RegisterWithNamespace(ctx context.Context, namespace string
|
|||
for _, m := range rendezvousPoints {
|
||||
r.WaitGroup().Add(1)
|
||||
go func(m *RendezvousPoint) {
|
||||
defer utils.LogOnPanic()
|
||||
r.WaitGroup().Done()
|
||||
|
||||
rendezvousClient := rvs.NewRendezvousClient(r.host, m.id)
|
||||
|
|
|
@ -7,7 +7,6 @@ import (
|
|||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
wps "github.com/waku-org/go-waku/waku/v2/peerstore"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
)
|
||||
|
||||
// PeerData contains information about a peer useful in establishing connections with it.
|
||||
|
@ -59,7 +58,6 @@ func (sp *CommonDiscoveryService) GetListeningChan() <-chan PeerData {
|
|||
return sp.channel
|
||||
}
|
||||
func (sp *CommonDiscoveryService) PushToChan(data PeerData) bool {
|
||||
defer utils.LogOnPanic()
|
||||
if err := sp.ErrOnNotRunning(); err != nil {
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/beevik/ntp"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -70,7 +69,6 @@ func computeOffset(timeQuery ntpQuery, servers []string, allowedFailures int) (t
|
|||
responses := make(chan queryResponse, len(servers))
|
||||
for _, server := range servers {
|
||||
go func(server string) {
|
||||
defer utils.LogOnPanic()
|
||||
response, err := timeQuery(server, ntp.QueryOptions{
|
||||
Timeout: DefaultRPCTimeout,
|
||||
})
|
||||
|
@ -174,7 +172,6 @@ func (s *NTPTimeSource) runPeriodically(ctx context.Context, fn func() error) er
|
|||
// we try to do it synchronously so that user can have reliable messages right away
|
||||
s.wg.Add(1)
|
||||
go func() {
|
||||
defer utils.LogOnPanic()
|
||||
for {
|
||||
select {
|
||||
case <-time.After(period):
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package utils
|
||||
|
||||
import (
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
|
@ -82,10 +81,3 @@ func InitLogger(encoding string, output string, name string, level zapcore.Level
|
|||
|
||||
log = logging.Logger(name).Desugar()
|
||||
}
|
||||
|
||||
func LogOnPanic() {
|
||||
if err := recover(); err != nil {
|
||||
Logger().Error("panic in goroutine", zap.Any("error", err), zap.String("stacktrace", string(debug.Stack())))
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1015,7 +1015,7 @@ github.com/waku-org/go-discover/discover/v5wire
|
|||
github.com/waku-org/go-libp2p-rendezvous
|
||||
github.com/waku-org/go-libp2p-rendezvous/db
|
||||
github.com/waku-org/go-libp2p-rendezvous/pb
|
||||
# github.com/waku-org/go-waku v0.8.1-0.20240926162117-12abd041d699
|
||||
# github.com/waku-org/go-waku v0.8.1-0.20240926162117-12abd041d699 => github.com/waku-org/go-waku v0.8.1-0.20240923214107-798c9c5d819a
|
||||
## explicit; go 1.21
|
||||
github.com/waku-org/go-waku/logging
|
||||
github.com/waku-org/go-waku/tests
|
||||
|
@ -1424,3 +1424,4 @@ zombiezen.com/go/sqlite/sqlitex
|
|||
# github.com/docker/docker => github.com/docker/engine v1.4.2-0.20190717161051-705d9623b7c1
|
||||
# github.com/nfnt/resize => github.com/status-im/resize v0.0.0-20201215164250-7c6d9f0d3088
|
||||
# github.com/forPelevin/gomoji => github.com/status-im/gomoji v1.1.3-0.20220213022530-e5ac4a8732d4
|
||||
# github.com/waku-org/go-waku => github.com/waku-org/go-waku v0.8.1-0.20240923214107-798c9c5d819a
|
||||
|
|
Loading…
Reference in New Issue