Refactor filter health checks; add debugging logs
This commit is contained in:
parent
9d374bcadc
commit
356b2f5ca3
1
Makefile
1
Makefile
|
@ -35,6 +35,7 @@ GIT_COMMIT = $(shell git rev-parse --short HEAD)
|
|||
AUTHOR ?= $(shell git config user.email || echo $$USER)
|
||||
|
||||
ENABLE_METRICS ?= true
|
||||
BUILD_TAGS ?= gowaku_no_rln
|
||||
BUILD_FLAGS ?= $(shell echo "-ldflags='\
|
||||
-X github.com/status-im/status-go/params.Version=$(RELEASE_TAG:v%=%) \
|
||||
-X github.com/status-im/status-go/params.GitCommit=$(GIT_COMMIT) \
|
||||
|
|
|
@ -162,11 +162,11 @@ func (w *gethWakuV2Wrapper) UnsubscribeMany(ids []string) error {
|
|||
|
||||
func (w *gethWakuV2Wrapper) createFilterWrapper(id string, keyAsym *ecdsa.PrivateKey, keySym []byte, pow float64, pubsubTopic string, topics [][]byte) (types.Filter, error) {
|
||||
return NewWakuV2FilterWrapper(&wakucommon.Filter{
|
||||
KeyAsym: keyAsym,
|
||||
KeySym: keySym,
|
||||
Topics: topics,
|
||||
PubsubTopic: pubsubTopic,
|
||||
Messages: wakucommon.NewMemoryMessageStore(),
|
||||
KeyAsym: keyAsym,
|
||||
KeySym: keySym,
|
||||
ContentTopics: wakucommon.NewTopicSetFromBytes(topics),
|
||||
PubsubTopic: pubsubTopic,
|
||||
Messages: wakucommon.NewMemoryMessageStore(),
|
||||
}, id), nil
|
||||
}
|
||||
|
||||
|
|
10
go.mod
10
go.mod
|
@ -84,7 +84,7 @@ require (
|
|||
github.com/mutecomm/go-sqlcipher/v4 v4.4.2
|
||||
github.com/schollz/peerdiscovery v1.7.0
|
||||
github.com/siphiuel/lc-proxy-wrapper v0.0.0-20230516150924-246507cee8c7
|
||||
github.com/waku-org/go-waku v0.7.1-0.20230907093131-092811658ea3
|
||||
github.com/waku-org/go-waku v0.8.1-0.20230930175749-dcc828749f67
|
||||
github.com/wk8/go-ordered-map/v2 v2.1.7
|
||||
github.com/yeqown/go-qrcode/v2 v2.2.1
|
||||
github.com/yeqown/go-qrcode/writer/standard v1.2.1
|
||||
|
@ -257,10 +257,10 @@ require (
|
|||
github.com/urfave/cli/v2 v2.24.4 // indirect
|
||||
github.com/waku-org/go-discover v0.0.0-20221209174356-61c833f34d98 // indirect
|
||||
github.com/waku-org/go-libp2p-rendezvous v0.0.0-20230628220917-7b4e5ae4c0e7 // indirect
|
||||
github.com/waku-org/go-zerokit-rln v0.1.14-0.20230905214645-ca686a02e816 // indirect
|
||||
github.com/waku-org/go-zerokit-rln-apple v0.0.0-20230905213302-1d6d18a03e7c // indirect
|
||||
github.com/waku-org/go-zerokit-rln-arm v0.0.0-20230905183322-05f4cda61468 // indirect
|
||||
github.com/waku-org/go-zerokit-rln-x86_64 v0.0.0-20230905182930-2b11e72ef866 // indirect
|
||||
github.com/waku-org/go-zerokit-rln v0.1.14-0.20230916173259-d284a3d8f2fd // indirect
|
||||
github.com/waku-org/go-zerokit-rln-apple v0.0.0-20230916172309-ee0ee61dde2b // indirect
|
||||
github.com/waku-org/go-zerokit-rln-arm v0.0.0-20230916171929-1dd9494ff065 // indirect
|
||||
github.com/waku-org/go-zerokit-rln-x86_64 v0.0.0-20230916171518-2a77c3734dd1 // indirect
|
||||
github.com/wk8/go-ordered-map v1.0.0 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
|
|
20
go.sum
20
go.sum
|
@ -2094,16 +2094,16 @@ github.com/waku-org/go-discover v0.0.0-20221209174356-61c833f34d98 h1:xwY0kW5XZF
|
|||
github.com/waku-org/go-discover v0.0.0-20221209174356-61c833f34d98/go.mod h1:eBHgM6T4EG0RZzxpxKy+rGz/6Dw2Nd8DWxS0lm9ESDw=
|
||||
github.com/waku-org/go-libp2p-rendezvous v0.0.0-20230628220917-7b4e5ae4c0e7 h1:0e1h+p84yBp0IN7AqgbZlV7lgFBjm214lgSOE7CeJmE=
|
||||
github.com/waku-org/go-libp2p-rendezvous v0.0.0-20230628220917-7b4e5ae4c0e7/go.mod h1:pFvOZ9YTFsW0o5zJW7a0B5tr1owAijRWJctXJ2toL04=
|
||||
github.com/waku-org/go-waku v0.7.1-0.20230907093131-092811658ea3 h1:lwXUUy6XWnWr/svnQG30H/FlWKOvPAGjAFn3pwwjWbY=
|
||||
github.com/waku-org/go-waku v0.7.1-0.20230907093131-092811658ea3/go.mod h1:HW6QoUlzw3tLUbLzhHCGCEVIFcAWIjqCF6+JU0pSyus=
|
||||
github.com/waku-org/go-zerokit-rln v0.1.14-0.20230905214645-ca686a02e816 h1:M5skPFmapY5i5a9jSiGWft9PZMiQr2nCi8uzJc2IfBI=
|
||||
github.com/waku-org/go-zerokit-rln v0.1.14-0.20230905214645-ca686a02e816/go.mod h1:zc3FBSLP6vy2sOjAnqIju3yKLRq1WkcxsS1Lh9w0CuA=
|
||||
github.com/waku-org/go-zerokit-rln-apple v0.0.0-20230905213302-1d6d18a03e7c h1:aDn17iEMrdXeQ6dp+Cv3ywJYStkomkvKWv8I00iy79c=
|
||||
github.com/waku-org/go-zerokit-rln-apple v0.0.0-20230905213302-1d6d18a03e7c/go.mod h1:KYykqtdApHVYZ3G0spwMnoxc5jH5eI3jyO9SwsSfi48=
|
||||
github.com/waku-org/go-zerokit-rln-arm v0.0.0-20230905183322-05f4cda61468 h1:yNRDUyWJu/wHEPLps5D/Zce24mu/5ax2u1pXsMwRPbg=
|
||||
github.com/waku-org/go-zerokit-rln-arm v0.0.0-20230905183322-05f4cda61468/go.mod h1:7cSGUoGVIla1IpnChrLbkVjkYgdOcr7rcifEfh4ReR4=
|
||||
github.com/waku-org/go-zerokit-rln-x86_64 v0.0.0-20230905182930-2b11e72ef866 h1:dURzhyGtPrpmBJcnY4hpY83dW81cZimkZ8U+S89ANd0=
|
||||
github.com/waku-org/go-zerokit-rln-x86_64 v0.0.0-20230905182930-2b11e72ef866/go.mod h1:+LeEYoW5/uBUTVjtBGLEVCUe9mOYAlu5ZPkIxLOSr5Y=
|
||||
github.com/waku-org/go-waku v0.8.1-0.20230930175749-dcc828749f67 h1:EL0KljfCIFPXbY1IfT0JjVIjJekuF951ys1WL2WnWyM=
|
||||
github.com/waku-org/go-waku v0.8.1-0.20230930175749-dcc828749f67/go.mod h1:MnMLFtym7XUt+GNN4zTkjm5NJCsm7TERLWVPOV/Ct6w=
|
||||
github.com/waku-org/go-zerokit-rln v0.1.14-0.20230916173259-d284a3d8f2fd h1:cu7CsUo7BK6ac/v193RIaqAzUcmpa6MNY4xYW9AenQI=
|
||||
github.com/waku-org/go-zerokit-rln v0.1.14-0.20230916173259-d284a3d8f2fd/go.mod h1:1PdBdPzyTaKt3VnpAHk3zj+r9dXPFOr3IHZP9nFle6E=
|
||||
github.com/waku-org/go-zerokit-rln-apple v0.0.0-20230916172309-ee0ee61dde2b h1:KgZVhsLkxsj5gb/FfndSCQu6VYwALrCOgYI3poR95yE=
|
||||
github.com/waku-org/go-zerokit-rln-apple v0.0.0-20230916172309-ee0ee61dde2b/go.mod h1:KYykqtdApHVYZ3G0spwMnoxc5jH5eI3jyO9SwsSfi48=
|
||||
github.com/waku-org/go-zerokit-rln-arm v0.0.0-20230916171929-1dd9494ff065 h1:Sd7QD/1Yo2o2M1MY49F8Zr4KNBPUEK5cz5HoXQVJbrs=
|
||||
github.com/waku-org/go-zerokit-rln-arm v0.0.0-20230916171929-1dd9494ff065/go.mod h1:7cSGUoGVIla1IpnChrLbkVjkYgdOcr7rcifEfh4ReR4=
|
||||
github.com/waku-org/go-zerokit-rln-x86_64 v0.0.0-20230916171518-2a77c3734dd1 h1:4HSdWMFMufpRo3ECTX6BrvA+VzKhXZf7mS0rTa5cCWU=
|
||||
github.com/waku-org/go-zerokit-rln-x86_64 v0.0.0-20230916171518-2a77c3734dd1/go.mod h1:+LeEYoW5/uBUTVjtBGLEVCUe9mOYAlu5ZPkIxLOSr5Y=
|
||||
github.com/wealdtech/go-ens/v3 v3.5.0 h1:Huc9GxBgiGweCOGTYomvsg07K2QggAqZpZ5SuiZdC8o=
|
||||
github.com/wealdtech/go-ens/v3 v3.5.0/go.mod h1:bVuYoWYEEeEu7Zy95rIMjPR34QFJarxt8p84ywSo0YM=
|
||||
github.com/wealdtech/go-multicodec v1.4.0 h1:iq5PgxwssxnXGGPTIK1srvt6U5bJwIp7k6kBrudIWxg=
|
||||
|
|
|
@ -68,7 +68,7 @@ func HostID(key string, id peer.ID) zapcore.Field {
|
|||
return zap.Stringer(key, hostID(id))
|
||||
}
|
||||
|
||||
func (id hostID) String() string { return peer.Encode(peer.ID(id)) }
|
||||
func (id hostID) String() string { return peer.ID(id).String() }
|
||||
|
||||
// Time - Waku uses Nanosecond Unix Time
|
||||
type timestamp int64
|
||||
|
|
|
@ -410,6 +410,10 @@ func (d *DBStore) prepareQuerySQL(query *pb.HistoryQuery) (string, []interface{}
|
|||
paramCnt++
|
||||
|
||||
sqlQuery += fmt.Sprintf("LIMIT $%d", paramCnt)
|
||||
// Always search for _max page size_ + 1. If the extra row does not exist, do not return pagination info.
|
||||
pageSize := query.PagingInfo.PageSize + 1
|
||||
parameters = append(parameters, pageSize)
|
||||
|
||||
sqlQuery = fmt.Sprintf(sqlQuery, conditionStr, orderDirection, orderDirection, orderDirection, orderDirection)
|
||||
d.log.Info(fmt.Sprintf("sqlQuery: %s", sqlQuery))
|
||||
|
||||
|
@ -434,10 +438,7 @@ func (d *DBStore) Query(query *pb.HistoryQuery) (*pb.Index, []StoredMessage, err
|
|||
return nil, nil, err
|
||||
}
|
||||
defer stmt.Close()
|
||||
pageSize := query.PagingInfo.PageSize + 1
|
||||
|
||||
parameters = append(parameters, pageSize)
|
||||
|
||||
//
|
||||
measurementStart := time.Now()
|
||||
rows, err := stmt.Query(parameters...)
|
||||
if err != nil {
|
||||
|
@ -458,6 +459,7 @@ func (d *DBStore) Query(query *pb.HistoryQuery) (*pb.Index, []StoredMessage, err
|
|||
|
||||
var cursor *pb.Index
|
||||
if len(result) != 0 {
|
||||
// since there are more rows than pagingInfo.PageSize, we need to return a cursor, for pagination
|
||||
if len(result) > int(query.PagingInfo.PageSize) {
|
||||
result = result[0:query.PagingInfo.PageSize]
|
||||
lastMsgIdx := len(result) - 1
|
||||
|
|
|
@ -6,8 +6,6 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
|
@ -34,23 +32,20 @@ type PeerConnector interface {
|
|||
}
|
||||
|
||||
type DiscoveryV5 struct {
|
||||
params *discV5Parameters
|
||||
host host.Host
|
||||
config discover.Config
|
||||
udpAddr *net.UDPAddr
|
||||
listener *discover.UDPv5
|
||||
localnode *enode.LocalNode
|
||||
metrics Metrics
|
||||
peerChannel *peerChannel
|
||||
params *discV5Parameters
|
||||
host host.Host
|
||||
config discover.Config
|
||||
udpAddr *net.UDPAddr
|
||||
listener *discover.UDPv5
|
||||
localnode *enode.LocalNode
|
||||
metrics Metrics
|
||||
|
||||
peerConnector PeerConnector
|
||||
NAT nat.Interface
|
||||
|
||||
log *zap.Logger
|
||||
|
||||
started atomic.Bool
|
||||
cancel context.CancelFunc
|
||||
wg *sync.WaitGroup
|
||||
*peermanager.CommonDiscoveryService
|
||||
}
|
||||
|
||||
type discV5Parameters struct {
|
||||
|
@ -76,6 +71,7 @@ func WithAutoUpdate(autoUpdate bool) DiscoveryV5Option {
|
|||
}
|
||||
}
|
||||
|
||||
// WithBootnodes is an option used to specify the bootstrap nodes to use with DiscV5
|
||||
func WithBootnodes(bootnodes []*enode.Node) DiscoveryV5Option {
|
||||
return func(params *discV5Parameters) {
|
||||
params.bootnodes = bootnodes
|
||||
|
@ -106,6 +102,7 @@ func WithAutoFindPeers(find bool) DiscoveryV5Option {
|
|||
}
|
||||
}
|
||||
|
||||
// DefaultOptions contains the default list of options used when setting up DiscoveryV5
|
||||
func DefaultOptions() []DiscoveryV5Option {
|
||||
return []DiscoveryV5Option{
|
||||
WithUDPPort(9000),
|
||||
|
@ -124,19 +121,18 @@ func NewDiscoveryV5(priv *ecdsa.PrivateKey, localnode *enode.LocalNode, peerConn
|
|||
|
||||
logger := log.Named("discv5")
|
||||
|
||||
var NAT nat.Interface = nil
|
||||
var NAT nat.Interface
|
||||
if params.advertiseAddr == nil {
|
||||
NAT = nat.Any()
|
||||
}
|
||||
|
||||
return &DiscoveryV5{
|
||||
params: params,
|
||||
peerConnector: peerConnector,
|
||||
NAT: NAT,
|
||||
wg: &sync.WaitGroup{},
|
||||
peerChannel: &peerChannel{},
|
||||
localnode: localnode,
|
||||
metrics: newMetrics(reg),
|
||||
params: params,
|
||||
peerConnector: peerConnector,
|
||||
NAT: NAT,
|
||||
CommonDiscoveryService: peermanager.NewCommonDiscoveryService(),
|
||||
localnode: localnode,
|
||||
metrics: newMetrics(reg),
|
||||
config: discover.Config{
|
||||
PrivateKey: priv,
|
||||
Bootnodes: params.bootnodes,
|
||||
|
@ -165,9 +161,9 @@ func (d *DiscoveryV5) listen(ctx context.Context) error {
|
|||
d.udpAddr = conn.LocalAddr().(*net.UDPAddr)
|
||||
|
||||
if d.NAT != nil && !d.udpAddr.IP.IsLoopback() {
|
||||
d.wg.Add(1)
|
||||
d.WaitGroup().Add(1)
|
||||
go func() {
|
||||
defer d.wg.Done()
|
||||
defer d.WaitGroup().Done()
|
||||
nat.Map(d.NAT, ctx.Done(), "udp", d.udpAddr.Port, d.udpAddr.Port, "go-waku discv5 discovery")
|
||||
}()
|
||||
|
||||
|
@ -195,80 +191,31 @@ func (d *DiscoveryV5) SetHost(h host.Host) {
|
|||
d.host = h
|
||||
}
|
||||
|
||||
type peerChannel struct {
|
||||
mutex sync.Mutex
|
||||
channel chan peermanager.PeerData
|
||||
started bool
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (p *peerChannel) Start(ctx context.Context) {
|
||||
p.mutex.Lock()
|
||||
defer p.mutex.Unlock()
|
||||
p.started = true
|
||||
p.ctx = ctx
|
||||
p.channel = make(chan peermanager.PeerData)
|
||||
}
|
||||
|
||||
func (p *peerChannel) Stop() {
|
||||
p.mutex.Lock()
|
||||
defer p.mutex.Unlock()
|
||||
if !p.started {
|
||||
return
|
||||
}
|
||||
p.started = false
|
||||
close(p.channel)
|
||||
}
|
||||
|
||||
func (p *peerChannel) Subscribe() chan peermanager.PeerData {
|
||||
return p.channel
|
||||
}
|
||||
|
||||
func (p *peerChannel) Publish(peer peermanager.PeerData) bool {
|
||||
p.mutex.Lock()
|
||||
defer p.mutex.Unlock()
|
||||
if !p.started {
|
||||
return false
|
||||
}
|
||||
select {
|
||||
case p.channel <- peer:
|
||||
case <-p.ctx.Done():
|
||||
return false
|
||||
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// only works if the discovery v5 hasn't been started yet.
|
||||
func (d *DiscoveryV5) Start(ctx context.Context) error {
|
||||
// compare and swap sets the discovery v5 to `started` state
|
||||
// and prevents multiple calls to the start method by being atomic.
|
||||
if !d.started.CompareAndSwap(false, true) {
|
||||
return nil
|
||||
}
|
||||
return d.CommonDiscoveryService.Start(ctx, d.start)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
d.cancel = cancel
|
||||
func (d *DiscoveryV5) start() error {
|
||||
d.peerConnector.Subscribe(d.Context(), d.GetListeningChan())
|
||||
|
||||
d.peerChannel.Start(ctx)
|
||||
d.peerConnector.Subscribe(ctx, d.peerChannel.Subscribe())
|
||||
|
||||
err := d.listen(ctx)
|
||||
err := d.listen(d.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if d.params.autoFindPeers {
|
||||
d.wg.Add(1)
|
||||
d.WaitGroup().Add(1)
|
||||
go func() {
|
||||
defer d.wg.Done()
|
||||
d.runDiscoveryV5Loop(ctx)
|
||||
defer d.WaitGroup().Done()
|
||||
d.runDiscoveryV5Loop(d.Context())
|
||||
}()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetBootnodes is used to setup the bootstrap nodes to use for discovering new peers
|
||||
func (d *DiscoveryV5) SetBootnodes(nodes []*enode.Node) error {
|
||||
if d.listener == nil {
|
||||
return ErrNoDiscV5Listener
|
||||
|
@ -277,30 +224,22 @@ func (d *DiscoveryV5) SetBootnodes(nodes []*enode.Node) error {
|
|||
return d.listener.SetFallbackNodes(nodes)
|
||||
}
|
||||
|
||||
// Stop is a function that stops the execution of DiscV5.
|
||||
// only works if the discovery v5 is in running state
|
||||
// so we can assume that cancel method is set
|
||||
func (d *DiscoveryV5) Stop() {
|
||||
if !d.started.CompareAndSwap(true, false) { // if Discoveryv5 is running, set started to false
|
||||
return
|
||||
}
|
||||
|
||||
d.cancel()
|
||||
|
||||
if d.listener != nil {
|
||||
d.listener.Close()
|
||||
d.listener = nil
|
||||
d.log.Info("stopped Discovery V5")
|
||||
}
|
||||
|
||||
d.wg.Wait()
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
d.log.Info("recovering from panic and quitting")
|
||||
}
|
||||
}()
|
||||
|
||||
d.peerChannel.Stop()
|
||||
d.CommonDiscoveryService.Stop(func() {
|
||||
if d.listener != nil {
|
||||
d.listener.Close()
|
||||
d.listener = nil
|
||||
d.log.Info("stopped Discovery V5")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -491,7 +430,7 @@ func (d *DiscoveryV5) peerLoop(ctx context.Context) error {
|
|||
ENR: n,
|
||||
}
|
||||
|
||||
if d.peerChannel.Publish(peer) {
|
||||
if d.PushToChan(peer) {
|
||||
d.log.Debug("published peer into peer channel", logging.HostID("peerID", peer.AddrInfo.ID))
|
||||
} else {
|
||||
d.log.Debug("could not publish peer into peer channel", logging.HostID("peerID", peer.AddrInfo.ID))
|
||||
|
@ -523,7 +462,3 @@ restartLoop:
|
|||
}
|
||||
d.log.Warn("Discv5 loop stopped")
|
||||
}
|
||||
|
||||
func (d *DiscoveryV5) IsStarted() bool {
|
||||
return d.started.Load()
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@ func FilterPredicate(predicate func(*enode.Node) bool) Predicate {
|
|||
}
|
||||
|
||||
// FilterShard creates a Predicate that filters nodes that belong to a specific shard
|
||||
func FilterShard(iterator enode.Iterator, cluster, index uint16) Predicate {
|
||||
func FilterShard(cluster, index uint16) Predicate {
|
||||
return func(iterator enode.Iterator) enode.Iterator {
|
||||
predicate := func(node *enode.Node) bool {
|
||||
rs, err := wenr.RelaySharding(node.Record())
|
||||
|
@ -33,7 +33,7 @@ func FilterShard(iterator enode.Iterator, cluster, index uint16) Predicate {
|
|||
}
|
||||
|
||||
// FilterCapabilities creates a Predicate to filter nodes that support specific protocols
|
||||
func FilterCapabilities(iterator enode.Iterator, flags wenr.WakuEnrBitfield) Predicate {
|
||||
func FilterCapabilities(flags wenr.WakuEnrBitfield) Predicate {
|
||||
return func(iterator enode.Iterator) enode.Iterator {
|
||||
predicate := func(node *enode.Node) bool {
|
||||
enrField := new(wenr.WakuEnrBitfield)
|
||||
|
|
|
@ -17,10 +17,10 @@ type dnsDiscoveryParameters struct {
|
|||
nameserver string
|
||||
}
|
||||
|
||||
type DnsDiscoveryOption func(*dnsDiscoveryParameters)
|
||||
type DNSDiscoveryOption func(*dnsDiscoveryParameters)
|
||||
|
||||
// WithNameserver is a DnsDiscoveryOption that configures the nameserver to use
|
||||
func WithNameserver(nameserver string) DnsDiscoveryOption {
|
||||
func WithNameserver(nameserver string) DNSDiscoveryOption {
|
||||
return func(params *dnsDiscoveryParameters) {
|
||||
params.nameserver = nameserver
|
||||
}
|
||||
|
@ -32,7 +32,7 @@ type DiscoveredNode struct {
|
|||
ENR *enode.Node
|
||||
}
|
||||
|
||||
var metrics Metrics = nil
|
||||
var metrics Metrics
|
||||
|
||||
// SetPrometheusRegisterer is used to setup a custom prometheus registerer for metrics
|
||||
func SetPrometheusRegisterer(reg prometheus.Registerer, logger *zap.Logger) {
|
||||
|
@ -44,7 +44,7 @@ func init() {
|
|||
}
|
||||
|
||||
// RetrieveNodes returns a list of multiaddress given a url to a DNS discoverable ENR tree
|
||||
func RetrieveNodes(ctx context.Context, url string, opts ...DnsDiscoveryOption) ([]DiscoveredNode, error) {
|
||||
func RetrieveNodes(ctx context.Context, url string, opts ...DNSDiscoveryOption) ([]DiscoveredNode, error) {
|
||||
var discoveredNodes []DiscoveredNode
|
||||
|
||||
params := new(dnsDiscoveryParameters)
|
||||
|
|
|
@ -10,6 +10,7 @@ var sha256Pool = sync.Pool{New: func() interface{} {
|
|||
return sha256.New()
|
||||
}}
|
||||
|
||||
// SHA256 generates the SHA256 hash from the input data
|
||||
func SHA256(data ...[]byte) []byte {
|
||||
h, ok := sha256Pool.Get().(hash.Hash)
|
||||
if !ok {
|
||||
|
|
|
@ -16,7 +16,7 @@ import (
|
|||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func (w *WakuNode) updateLocalNode(localnode *enode.LocalNode, multiaddrs []ma.Multiaddr, ipAddr *net.TCPAddr, udpPort uint, wakuFlags wenr.WakuEnrBitfield, advertiseAddr []ma.Multiaddr, shouldAutoUpdate bool, log *zap.Logger) error {
|
||||
func (w *WakuNode) updateLocalNode(localnode *enode.LocalNode, multiaddrs []ma.Multiaddr, ipAddr *net.TCPAddr, udpPort uint, wakuFlags wenr.WakuEnrBitfield, advertiseAddr []ma.Multiaddr, shouldAutoUpdate bool) error {
|
||||
var options []wenr.ENROption
|
||||
options = append(options, wenr.WithUDPPort(udpPort))
|
||||
options = append(options, wenr.WithWakuBitfield(wakuFlags))
|
||||
|
@ -268,7 +268,7 @@ func (w *WakuNode) setupENR(ctx context.Context, addrs []ma.Multiaddr) error {
|
|||
return err
|
||||
}
|
||||
|
||||
err = w.updateLocalNode(w.localNode, multiaddresses, ipAddr, w.opts.udpPort, w.wakuFlag, w.opts.advertiseAddrs, w.opts.discV5autoUpdate, w.log)
|
||||
err = w.updateLocalNode(w.localNode, multiaddresses, ipAddr, w.opts.udpPort, w.wakuFlag, w.opts.advertiseAddrs, w.opts.discV5autoUpdate)
|
||||
if err != nil {
|
||||
w.log.Error("updating localnode ENR record", zap.Error(err))
|
||||
return err
|
||||
|
@ -281,6 +281,8 @@ func (w *WakuNode) setupENR(ctx context.Context, addrs []ma.Multiaddr) error {
|
|||
}
|
||||
}
|
||||
|
||||
w.enrChangeCh <- struct{}{}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
|
|
@ -10,7 +10,6 @@ import (
|
|||
backoffv4 "github.com/cenkalti/backoff/v4"
|
||||
golog "github.com/ipfs/go-log/v2"
|
||||
"github.com/libp2p/go-libp2p"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
|
@ -32,6 +31,7 @@ import (
|
|||
"github.com/waku-org/go-waku/waku/v2/discv5"
|
||||
"github.com/waku-org/go-waku/waku/v2/peermanager"
|
||||
wps "github.com/waku-org/go-waku/waku/v2/peerstore"
|
||||
wakuprotocol "github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/enr"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/filter"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/legacy_filter"
|
||||
|
@ -66,15 +66,16 @@ type IdentityCredential = struct {
|
|||
IDCommitment byte32 `json:"idCommitment"`
|
||||
}
|
||||
|
||||
type SpamHandler = func(message *pb.WakuMessage) error
|
||||
type SpamHandler = func(message *pb.WakuMessage, topic string) error
|
||||
|
||||
type RLNRelay interface {
|
||||
IdentityCredential() (IdentityCredential, error)
|
||||
MembershipIndex() uint
|
||||
AppendRLNProof(msg *pb.WakuMessage, senderEpochTime time.Time) error
|
||||
Validator(spamHandler SpamHandler) func(ctx context.Context, peerID peer.ID, message *pubsub.Message) bool
|
||||
Validator(spamHandler SpamHandler) func(ctx context.Context, message *pb.WakuMessage, topic string) bool
|
||||
Start(ctx context.Context) error
|
||||
Stop() error
|
||||
IsReady(ctx context.Context) (bool, error)
|
||||
}
|
||||
|
||||
type WakuNode struct {
|
||||
|
@ -236,7 +237,8 @@ func New(opts ...WakuNodeOption) (*WakuNode, error) {
|
|||
}()
|
||||
return r
|
||||
},
|
||||
autorelay.WithMinInterval(2*time.Second),
|
||||
autorelay.WithMinInterval(params.circuitRelayMinInterval),
|
||||
autorelay.WithBootDelay(params.circuitRelayBootDelay),
|
||||
))
|
||||
|
||||
if params.enableNTP {
|
||||
|
@ -251,7 +253,7 @@ func New(opts ...WakuNodeOption) (*WakuNode, error) {
|
|||
}
|
||||
|
||||
//Initialize peer manager.
|
||||
w.peermanager = peermanager.NewPeerManager(w.opts.maxPeerConnections, w.log)
|
||||
w.peermanager = peermanager.NewPeerManager(w.opts.maxPeerConnections, w.opts.peerStoreCapacity, w.log)
|
||||
|
||||
w.peerConnector, err = peermanager.NewPeerConnectionStrategy(w.peermanager, discoveryConnectTimeout, w.log)
|
||||
if err != nil {
|
||||
|
@ -272,6 +274,8 @@ func New(opts ...WakuNodeOption) (*WakuNode, error) {
|
|||
|
||||
w.rendezvous = rendezvous.NewRendezvous(w.opts.rendezvousDB, w.peerConnector, w.log)
|
||||
|
||||
w.relay = relay.NewWakuRelay(w.bcaster, w.opts.minRelayPeersToPublish, w.timesource, w.opts.prometheusReg, w.log, w.opts.pubsubOpts...)
|
||||
|
||||
if w.opts.enableRelay {
|
||||
err = w.setupRLNRelay()
|
||||
if err != nil {
|
||||
|
@ -279,7 +283,6 @@ func New(opts ...WakuNodeOption) (*WakuNode, error) {
|
|||
}
|
||||
}
|
||||
|
||||
w.relay = relay.NewWakuRelay(w.bcaster, w.opts.minRelayPeersToPublish, w.timesource, w.opts.prometheusReg, w.log, w.opts.pubsubOpts...)
|
||||
w.legacyFilter = legacy_filter.NewWakuFilter(w.bcaster, w.opts.isLegacyFilterFullNode, w.timesource, w.opts.prometheusReg, w.log, w.opts.legacyFilterOpts...)
|
||||
w.filterFullNode = filter.NewWakuFilterFullNode(w.timesource, w.opts.prometheusReg, w.log, w.opts.filterOpts...)
|
||||
w.filterLightNode = filter.NewWakuFilterLightNode(w.bcaster, w.peermanager, w.timesource, w.opts.prometheusReg, w.log)
|
||||
|
@ -310,7 +313,6 @@ func (w *WakuNode) watchMultiaddressChanges(ctx context.Context) {
|
|||
return
|
||||
case <-first:
|
||||
w.log.Info("listening", logging.MultiAddrs("multiaddr", addrs...))
|
||||
w.enrChangeCh <- struct{}{}
|
||||
case <-w.addressChangesSub.Out():
|
||||
newAddrs := w.ListenAddresses()
|
||||
diff := false
|
||||
|
@ -327,8 +329,10 @@ func (w *WakuNode) watchMultiaddressChanges(ctx context.Context) {
|
|||
if diff {
|
||||
addrs = newAddrs
|
||||
w.log.Info("listening addresses update received", logging.MultiAddrs("multiaddr", addrs...))
|
||||
_ = w.setupENR(ctx, addrs)
|
||||
w.enrChangeCh <- struct{}{}
|
||||
err := w.setupENR(ctx, addrs)
|
||||
if err != nil {
|
||||
w.log.Warn("could not update ENR", zap.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -417,6 +421,10 @@ func (w *WakuNode) Start(ctx context.Context) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = w.peermanager.SubscribeToRelayEvtBus(w.relay.(*relay.WakuRelay).Events())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.peermanager.Start(ctx)
|
||||
w.registerAndMonitorReachability(ctx)
|
||||
}
|
||||
|
@ -687,20 +695,22 @@ func (w *WakuNode) startStore(ctx context.Context, sub relay.Subscription) error
|
|||
}
|
||||
|
||||
// AddPeer is used to add a peer and the protocols it support to the node peerstore
|
||||
func (w *WakuNode) AddPeer(address ma.Multiaddr, origin wps.Origin, protocols ...protocol.ID) (peer.ID, error) {
|
||||
return w.peermanager.AddPeer(address, origin, protocols...)
|
||||
// TODO: Need to update this for autosharding, to only take contentTopics and optional pubSubTopics or provide an alternate API only for contentTopics.
|
||||
func (w *WakuNode) AddPeer(address ma.Multiaddr, origin wps.Origin, pubSubTopics []string, protocols ...protocol.ID) (peer.ID, error) {
|
||||
return w.peermanager.AddPeer(address, origin, pubSubTopics, protocols...)
|
||||
}
|
||||
|
||||
// AddDiscoveredPeer to add a discovered peer to the node peerStore
|
||||
func (w *WakuNode) AddDiscoveredPeer(ID peer.ID, addrs []ma.Multiaddr, origin wps.Origin) {
|
||||
func (w *WakuNode) AddDiscoveredPeer(ID peer.ID, addrs []ma.Multiaddr, origin wps.Origin, pubsubTopics []string, connectNow bool) {
|
||||
p := peermanager.PeerData{
|
||||
Origin: origin,
|
||||
AddrInfo: peer.AddrInfo{
|
||||
ID: ID,
|
||||
Addrs: addrs,
|
||||
},
|
||||
PubSubTopics: pubsubTopics,
|
||||
}
|
||||
w.peermanager.AddDiscoveredPeer(p)
|
||||
w.peermanager.AddDiscoveredPeer(p, connectNow)
|
||||
}
|
||||
|
||||
// DialPeerWithMultiAddress is used to connect to a peer using a multiaddress
|
||||
|
@ -745,12 +755,12 @@ func (w *WakuNode) connect(ctx context.Context, info peer.AddrInfo) error {
|
|||
// host.Connect adds the addresses with a TempAddressTTL
|
||||
// however, identify will filter out all non IP addresses
|
||||
// and expire all temporary addrs. So in the meantime, let's
|
||||
// store dns4 addresses with a connectedAddressTTL, otherwise
|
||||
// store dns4 addresses with a RecentlyConnectedAddrTTL, otherwise
|
||||
// it will have trouble with the status fleet circuit relay addresses
|
||||
// See https://github.com/libp2p/go-libp2p/issues/2550
|
||||
_, err := addr.ValueForProtocol(ma.P_DNS4)
|
||||
if err == nil {
|
||||
w.host.Peerstore().AddAddrs(info.ID, info.Addrs, peerstore.ConnectedAddrTTL)
|
||||
w.host.Peerstore().AddAddrs(info.ID, info.Addrs, peerstore.RecentlyConnectedAddrTTL)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -837,6 +847,21 @@ func (w *WakuNode) Peers() ([]*Peer, error) {
|
|||
return peers, nil
|
||||
}
|
||||
|
||||
// PeersByShard filters peers based on shard information following static sharding
|
||||
func (w *WakuNode) PeersByStaticShard(cluster uint16, shard uint16) peer.IDSlice {
|
||||
pTopic := wakuprotocol.NewStaticShardingPubsubTopic(cluster, shard).String()
|
||||
return w.peerstore.(wps.WakuPeerstore).PeersByPubSubTopic(pTopic)
|
||||
}
|
||||
|
||||
// PeersByContentTopics filters peers based on contentTopic
|
||||
func (w *WakuNode) PeersByContentTopic(contentTopic string) peer.IDSlice {
|
||||
pTopic, err := wakuprotocol.GetPubSubTopicFromContentTopic(contentTopic)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return w.peerstore.(wps.WakuPeerstore).PeersByPubSubTopic(pTopic)
|
||||
}
|
||||
|
||||
func (w *WakuNode) findRelayNodes(ctx context.Context) {
|
||||
defer w.wg.Done()
|
||||
|
||||
|
|
|
@ -1,10 +1,11 @@
|
|||
//go:build !gowaku_rln
|
||||
// +build !gowaku_rln
|
||||
//go:build gowaku_no_rln
|
||||
// +build gowaku_no_rln
|
||||
|
||||
package node
|
||||
|
||||
import "context"
|
||||
|
||||
// RLNRelay is used to access any operation related to Waku RLN protocol
|
||||
func (w *WakuNode) RLNRelay() RLNRelay {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
//go:build gowaku_rln
|
||||
// +build gowaku_rln
|
||||
//go:build !gowaku_no_rln
|
||||
// +build !gowaku_no_rln
|
||||
|
||||
package node
|
||||
|
||||
|
@ -8,8 +8,8 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/rln"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/dynamic"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/static"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/rln/keystore"
|
||||
|
@ -23,31 +23,48 @@ func (w *WakuNode) RLNRelay() RLNRelay {
|
|||
|
||||
func (w *WakuNode) setupRLNRelay() error {
|
||||
var err error
|
||||
var groupManager rln.GroupManager
|
||||
|
||||
if !w.opts.enableRLN {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !w.opts.enableRelay {
|
||||
return errors.New("rln requires relay")
|
||||
}
|
||||
|
||||
var groupManager group_manager.GroupManager
|
||||
|
||||
rlnInstance, rootTracker, err := rln.GetRLNInstanceAndRootTracker(w.opts.rlnTreePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.opts.rlnRelayDynamic {
|
||||
w.log.Info("setting up waku-rln-relay in off-chain mode")
|
||||
|
||||
index := uint(0)
|
||||
if w.opts.rlnRelayMemIndex != nil {
|
||||
index = *w.opts.rlnRelayMemIndex
|
||||
}
|
||||
|
||||
// set up rln relay inputs
|
||||
groupKeys, idCredential, err := static.Setup(w.opts.rlnRelayMemIndex)
|
||||
groupKeys, idCredential, err := static.Setup(index)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
groupManager, err = static.NewStaticGroupManager(groupKeys, idCredential, w.opts.rlnRelayMemIndex, w.log)
|
||||
groupManager, err = static.NewStaticGroupManager(groupKeys, idCredential, index, rlnInstance, rootTracker, w.log)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
w.log.Info("setting up waku-rln-relay in on-chain mode")
|
||||
|
||||
appKeystore, err := keystore.New(w.opts.keystorePath, dynamic.RLNAppInfo, w.log)
|
||||
if err != nil {
|
||||
return err
|
||||
var appKeystore *keystore.AppKeystore
|
||||
if w.opts.keystorePath != "" {
|
||||
appKeystore, err = keystore.New(w.opts.keystorePath, dynamic.RLNAppInfo, w.log)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
groupManager, err = dynamic.NewDynamicGroupManager(
|
||||
|
@ -57,6 +74,8 @@ func (w *WakuNode) setupRLNRelay() error {
|
|||
appKeystore,
|
||||
w.opts.keystorePassword,
|
||||
w.opts.prometheusReg,
|
||||
rlnInstance,
|
||||
rootTracker,
|
||||
w.log,
|
||||
)
|
||||
if err != nil {
|
||||
|
@ -64,15 +83,15 @@ func (w *WakuNode) setupRLNRelay() error {
|
|||
}
|
||||
}
|
||||
|
||||
rlnRelay, err := rln.New(groupManager, w.opts.rlnTreePath, w.timesource, w.opts.prometheusReg, w.log)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rlnRelay := rln.New(group_manager.Details{
|
||||
GroupManager: groupManager,
|
||||
RootTracker: rootTracker,
|
||||
RLN: rlnInstance,
|
||||
}, w.timesource, w.opts.prometheusReg, w.log)
|
||||
|
||||
w.rlnRelay = rlnRelay
|
||||
|
||||
// Adding RLN as a default validator
|
||||
w.opts.pubsubOpts = append(w.opts.pubsubOpts, pubsub.WithDefaultValidator(rlnRelay.Validator(w.opts.rlnSpamHandler)))
|
||||
w.Relay().RegisterDefaultValidator(w.rlnRelay.Validator(w.opts.rlnSpamHandler))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -55,6 +55,9 @@ type WakuNodeParameters struct {
|
|||
peerstore peerstore.Peerstore
|
||||
prometheusReg prometheus.Registerer
|
||||
|
||||
circuitRelayMinInterval time.Duration
|
||||
circuitRelayBootDelay time.Duration
|
||||
|
||||
enableNTP bool
|
||||
ntpURLs []string
|
||||
|
||||
|
@ -85,6 +88,7 @@ type WakuNodeParameters struct {
|
|||
rendezvousDB *rendezvous.DB
|
||||
|
||||
maxPeerConnections int
|
||||
peerStoreCapacity int
|
||||
|
||||
enableDiscV5 bool
|
||||
udpPort uint
|
||||
|
@ -94,9 +98,9 @@ type WakuNodeParameters struct {
|
|||
enablePeerExchange bool
|
||||
|
||||
enableRLN bool
|
||||
rlnRelayMemIndex uint
|
||||
rlnRelayMemIndex *uint
|
||||
rlnRelayDynamic bool
|
||||
rlnSpamHandler func(message *pb.WakuMessage) error
|
||||
rlnSpamHandler func(message *pb.WakuMessage, topic string) error
|
||||
rlnETHClientAddress string
|
||||
keystorePath string
|
||||
keystorePassword string
|
||||
|
@ -119,6 +123,7 @@ type WakuNodeOption func(*WakuNodeParameters) error
|
|||
var DefaultWakuNodeOptions = []WakuNodeOption{
|
||||
WithPrometheusRegisterer(prometheus.NewRegistry()),
|
||||
WithMaxPeerConnections(50),
|
||||
WithCircuitRelayParams(2*time.Second, 3*time.Minute),
|
||||
}
|
||||
|
||||
// MultiAddresses return the list of multiaddresses configured in the node
|
||||
|
@ -171,8 +176,8 @@ func WithPrometheusRegisterer(reg prometheus.Registerer) WakuNodeOption {
|
|||
}
|
||||
}
|
||||
|
||||
// WithDns4Domain is a WakuNodeOption that adds a custom domain name to listen
|
||||
func WithDns4Domain(dns4Domain string) WakuNodeOption {
|
||||
// WithDNS4Domain is a WakuNodeOption that adds a custom domain name to listen
|
||||
func WithDNS4Domain(dns4Domain string) WakuNodeOption {
|
||||
return func(params *WakuNodeParameters) error {
|
||||
params.dns4Domain = dns4Domain
|
||||
previousAddrFactory := params.addressFactory
|
||||
|
@ -190,8 +195,11 @@ func WithDns4Domain(dns4Domain string) WakuNodeOption {
|
|||
|
||||
if params.enableWS || params.enableWSS {
|
||||
if params.enableWSS {
|
||||
// WSS is deprecated in https://github.com/multiformats/multiaddr/pull/109
|
||||
wss, _ := multiaddr.NewMultiaddr(fmt.Sprintf("/tcp/%d/wss", params.wssPort))
|
||||
addresses = append(addresses, hostAddrMA.Encapsulate(wss))
|
||||
tlsws, _ := multiaddr.NewMultiaddr(fmt.Sprintf("/tcp/%d/tls/ws", params.wssPort))
|
||||
addresses = append(addresses, hostAddrMA.Encapsulate(tlsws))
|
||||
} else {
|
||||
ws, _ := multiaddr.NewMultiaddr(fmt.Sprintf("/tcp/%d/ws", params.wsPort))
|
||||
addresses = append(addresses, hostAddrMA.Encapsulate(ws))
|
||||
|
@ -200,9 +208,9 @@ func WithDns4Domain(dns4Domain string) WakuNodeOption {
|
|||
|
||||
if previousAddrFactory != nil {
|
||||
return previousAddrFactory(addresses)
|
||||
} else {
|
||||
return addresses
|
||||
}
|
||||
|
||||
return addresses
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -349,6 +357,13 @@ func WithMaxPeerConnections(maxPeers int) WakuNodeOption {
|
|||
}
|
||||
}
|
||||
|
||||
func WithPeerStoreCapacity(capacity int) WakuNodeOption {
|
||||
return func(params *WakuNodeParameters) error {
|
||||
params.peerStoreCapacity = capacity
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDiscoveryV5 is a WakuOption used to enable DiscV5 peer discovery
|
||||
func WithDiscoveryV5(udpPort uint, bootnodes []*enode.Node, autoUpdate bool) WakuNodeOption {
|
||||
return func(params *WakuNodeParameters) error {
|
||||
|
@ -514,6 +529,14 @@ func WithSecureWebsockets(address string, port int, certPath string, keyPath str
|
|||
}
|
||||
}
|
||||
|
||||
func WithCircuitRelayParams(minInterval time.Duration, bootDelay time.Duration) WakuNodeOption {
|
||||
return func(params *WakuNodeParameters) error {
|
||||
params.circuitRelayBootDelay = bootDelay
|
||||
params.circuitRelayMinInterval = minInterval
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Default options used in the libp2p node
|
||||
var DefaultLibP2POptions = []libp2p.Option{
|
||||
libp2p.ChainOptions(
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
//go:build gowaku_rln
|
||||
// +build gowaku_rln
|
||||
//go:build !gowaku_no_rln
|
||||
// +build !gowaku_no_rln
|
||||
|
||||
package node
|
||||
|
||||
|
@ -10,8 +10,7 @@ import (
|
|||
)
|
||||
|
||||
// WithStaticRLNRelay enables the Waku V2 RLN protocol in offchain mode
|
||||
// Requires the `gowaku_rln` build constrain (or the env variable RLN=true if building go-waku)
|
||||
func WithStaticRLNRelay(memberIndex r.MembershipIndex, spamHandler rln.SpamHandler) WakuNodeOption {
|
||||
func WithStaticRLNRelay(memberIndex *r.MembershipIndex, spamHandler rln.SpamHandler) WakuNodeOption {
|
||||
return func(params *WakuNodeParameters) error {
|
||||
params.enableRLN = true
|
||||
params.rlnRelayDynamic = false
|
||||
|
@ -22,8 +21,7 @@ func WithStaticRLNRelay(memberIndex r.MembershipIndex, spamHandler rln.SpamHandl
|
|||
}
|
||||
|
||||
// WithDynamicRLNRelay enables the Waku V2 RLN protocol in onchain mode.
|
||||
// Requires the `gowaku_rln` build constrain (or the env variable RLN=true if building go-waku)
|
||||
func WithDynamicRLNRelay(keystorePath string, keystorePassword string, treePath string, membershipContract common.Address, membershipIndex uint, spamHandler rln.SpamHandler, ethClientAddress string) WakuNodeOption {
|
||||
func WithDynamicRLNRelay(keystorePath string, keystorePassword string, treePath string, membershipContract common.Address, membershipIndex *uint, spamHandler rln.SpamHandler, ethClientAddress string) WakuNodeOption {
|
||||
return func(params *WakuNodeParameters) error {
|
||||
params.enableRLN = true
|
||||
params.rlnRelayDynamic = true
|
||||
|
|
|
@ -72,16 +72,15 @@ func (payload Payload) Encode(version uint32) ([]byte, error) {
|
|||
encoded, err := encryptSymmetric(data, payload.Key.SymKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't encrypt using symmetric key: %w", err)
|
||||
} else {
|
||||
return encoded, nil
|
||||
}
|
||||
|
||||
return encoded, nil
|
||||
case Asymmetric:
|
||||
encoded, err := encryptAsymmetric(data, &payload.Key.PubKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't encrypt using asymmetric key: %w", err)
|
||||
} else {
|
||||
return encoded, nil
|
||||
}
|
||||
return encoded, nil
|
||||
case None:
|
||||
return nil, errors.New("non supported KeyKind")
|
||||
}
|
||||
|
|
81
vendor/github.com/waku-org/go-waku/waku/v2/peermanager/common_discovery_service.go
generated
vendored
Normal file
81
vendor/github.com/waku-org/go-waku/waku/v2/peermanager/common_discovery_service.go
generated
vendored
Normal file
|
@ -0,0 +1,81 @@
|
|||
package peermanager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
wps "github.com/waku-org/go-waku/waku/v2/peerstore"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
)
|
||||
|
||||
// PeerData contains information about a peer useful in establishing connections with it.
|
||||
type PeerData struct {
|
||||
Origin wps.Origin
|
||||
AddrInfo peer.AddrInfo
|
||||
ENR *enode.Node
|
||||
PubSubTopics []string
|
||||
}
|
||||
|
||||
type CommonDiscoveryService struct {
|
||||
commonService *protocol.CommonService
|
||||
channel chan PeerData
|
||||
}
|
||||
|
||||
func NewCommonDiscoveryService() *CommonDiscoveryService {
|
||||
return &CommonDiscoveryService{
|
||||
commonService: protocol.NewCommonService(),
|
||||
}
|
||||
}
|
||||
|
||||
func (sp *CommonDiscoveryService) Start(ctx context.Context, fn func() error) error {
|
||||
return sp.commonService.Start(ctx, func() error {
|
||||
// currently is used in discv5,peerConnector,rendevzous for returning new discovered Peers to peerConnector for connecting with them
|
||||
// mutex protection for this operation
|
||||
sp.channel = make(chan PeerData)
|
||||
return fn()
|
||||
})
|
||||
}
|
||||
|
||||
func (sp *CommonDiscoveryService) Stop(stopFn func()) {
|
||||
sp.commonService.Stop(func() {
|
||||
stopFn()
|
||||
sp.WaitGroup().Wait() // waitgroup is waited here so that channel can be closed after all the go rountines have stopped in service.
|
||||
// there is a wait in the CommonService too
|
||||
close(sp.channel)
|
||||
})
|
||||
}
|
||||
func (sp *CommonDiscoveryService) GetListeningChan() <-chan PeerData {
|
||||
return sp.channel
|
||||
}
|
||||
func (sp *CommonDiscoveryService) PushToChan(data PeerData) bool {
|
||||
sp.RLock()
|
||||
defer sp.RUnlock()
|
||||
if err := sp.ErrOnNotRunning(); err != nil {
|
||||
return false
|
||||
}
|
||||
select {
|
||||
case sp.channel <- data:
|
||||
return true
|
||||
case <-sp.Context().Done():
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (sp *CommonDiscoveryService) RLock() {
|
||||
sp.commonService.RLock()
|
||||
}
|
||||
func (sp *CommonDiscoveryService) RUnlock() {
|
||||
sp.commonService.RUnlock()
|
||||
}
|
||||
|
||||
func (sp *CommonDiscoveryService) Context() context.Context {
|
||||
return sp.commonService.Context()
|
||||
}
|
||||
func (sp *CommonDiscoveryService) ErrOnNotRunning() error {
|
||||
return sp.commonService.ErrOnNotRunning()
|
||||
}
|
||||
func (sp *CommonDiscoveryService) WaitGroup() *sync.WaitGroup {
|
||||
return sp.commonService.WaitGroup()
|
||||
}
|
13
vendor/github.com/waku-org/go-waku/waku/v2/peermanager/mock_peer_discoverer.go
generated
vendored
13
vendor/github.com/waku-org/go-waku/waku/v2/peermanager/mock_peer_discoverer.go
generated
vendored
|
@ -25,10 +25,15 @@ func NewTestPeerDiscoverer() *TestPeerDiscoverer {
|
|||
// Subscribe is for subscribing to peer discoverer
|
||||
func (t *TestPeerDiscoverer) Subscribe(ctx context.Context, ch <-chan PeerData) {
|
||||
go func() {
|
||||
for p := range ch {
|
||||
t.Lock()
|
||||
t.peerMap[p.AddrInfo.ID] = struct{}{}
|
||||
t.Unlock()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case p := <-ch:
|
||||
t.Lock()
|
||||
t.peerMap[p.AddrInfo.ID] = struct{}{}
|
||||
t.Unlock()
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
|
|
@ -7,9 +7,9 @@ import (
|
|||
"errors"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
|
@ -17,40 +17,27 @@ import (
|
|||
"github.com/libp2p/go-libp2p/p2p/discovery/backoff"
|
||||
"github.com/waku-org/go-waku/logging"
|
||||
wps "github.com/waku-org/go-waku/waku/v2/peerstore"
|
||||
|
||||
"sync/atomic"
|
||||
waku_proto "github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
)
|
||||
|
||||
// PeerData contains information about a peer useful in establishing connections with it.
|
||||
type PeerData struct {
|
||||
Origin wps.Origin
|
||||
AddrInfo peer.AddrInfo
|
||||
ENR *enode.Node
|
||||
}
|
||||
|
||||
// PeerConnectionStrategy is a utility to connect to peers,
|
||||
// but only if we have not recently tried connecting to them already
|
||||
type PeerConnectionStrategy struct {
|
||||
sync.RWMutex
|
||||
mux sync.Mutex
|
||||
cache *lru.TwoQueueCache
|
||||
host host.Host
|
||||
pm *PeerManager
|
||||
|
||||
cache *lru.TwoQueueCache
|
||||
host host.Host
|
||||
pm *PeerManager
|
||||
cancel context.CancelFunc
|
||||
|
||||
paused atomic.Bool
|
||||
|
||||
wg sync.WaitGroup
|
||||
dialTimeout time.Duration
|
||||
dialCh chan peer.AddrInfo
|
||||
paused atomic.Bool
|
||||
dialTimeout time.Duration
|
||||
*CommonDiscoveryService
|
||||
subscriptions []<-chan PeerData
|
||||
|
||||
backoff backoff.BackoffFactory
|
||||
mux sync.Mutex
|
||||
logger *zap.Logger
|
||||
}
|
||||
|
||||
|
@ -77,12 +64,12 @@ func NewPeerConnectionStrategy(pm *PeerManager,
|
|||
}
|
||||
//
|
||||
pc := &PeerConnectionStrategy{
|
||||
cache: cache,
|
||||
wg: sync.WaitGroup{},
|
||||
dialTimeout: dialTimeout,
|
||||
pm: pm,
|
||||
backoff: getBackOff(),
|
||||
logger: logger.Named("discovery-connector"),
|
||||
cache: cache,
|
||||
dialTimeout: dialTimeout,
|
||||
CommonDiscoveryService: NewCommonDiscoveryService(),
|
||||
pm: pm,
|
||||
backoff: getBackOff(),
|
||||
logger: logger.Named("discovery-connector"),
|
||||
}
|
||||
pm.SetPeerConnector(pc)
|
||||
return pc, nil
|
||||
|
@ -95,36 +82,46 @@ type connCacheData struct {
|
|||
|
||||
// Subscribe receives channels on which discovered peers should be pushed
|
||||
func (c *PeerConnectionStrategy) Subscribe(ctx context.Context, ch <-chan PeerData) {
|
||||
if c.cancel != nil {
|
||||
c.wg.Add(1)
|
||||
go func() {
|
||||
defer c.wg.Done()
|
||||
c.consumeSubscription(ctx, ch)
|
||||
}()
|
||||
} else {
|
||||
// if not running yet, store the subscription and return
|
||||
if err := c.ErrOnNotRunning(); err != nil {
|
||||
c.mux.Lock()
|
||||
c.subscriptions = append(c.subscriptions, ch)
|
||||
c.mux.Unlock()
|
||||
return
|
||||
}
|
||||
// if running start a goroutine to consume the subscription
|
||||
c.WaitGroup().Add(1)
|
||||
go func() {
|
||||
defer c.WaitGroup().Done()
|
||||
c.consumeSubscription(ch)
|
||||
}()
|
||||
}
|
||||
|
||||
func (c *PeerConnectionStrategy) consumeSubscription(ctx context.Context, ch <-chan PeerData) {
|
||||
func (c *PeerConnectionStrategy) consumeSubscription(ch <-chan PeerData) {
|
||||
for {
|
||||
// for returning from the loop when peerConnector is paused.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-c.Context().Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
//
|
||||
if !c.isPaused() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-c.Context().Done():
|
||||
return
|
||||
case p, ok := <-ch:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
c.pm.AddDiscoveredPeer(p)
|
||||
c.publishWork(ctx, p.AddrInfo)
|
||||
triggerImmediateConnection := false
|
||||
//Not connecting to peer as soon as it is discovered,
|
||||
// rather expecting this to be pushed from PeerManager based on the need.
|
||||
if len(c.host.Network().Peers()) < waku_proto.GossipSubOptimalFullMeshSize {
|
||||
triggerImmediateConnection = true
|
||||
}
|
||||
c.pm.AddDiscoveredPeer(p, triggerImmediateConnection)
|
||||
|
||||
case <-time.After(1 * time.Second):
|
||||
// This timeout is to not lock the goroutine
|
||||
break
|
||||
|
@ -143,76 +140,40 @@ func (c *PeerConnectionStrategy) SetHost(h host.Host) {
|
|||
// Start attempts to connect to the peers passed in by peerCh.
|
||||
// Will not connect to peers if they are within the backoff period.
|
||||
func (c *PeerConnectionStrategy) Start(ctx context.Context) error {
|
||||
if c.cancel != nil {
|
||||
return errors.New("already started")
|
||||
}
|
||||
return c.CommonDiscoveryService.Start(ctx, c.start)
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
c.cancel = cancel
|
||||
c.dialCh = make(chan peer.AddrInfo)
|
||||
}
|
||||
func (c *PeerConnectionStrategy) start() error {
|
||||
c.WaitGroup().Add(1)
|
||||
|
||||
c.wg.Add(2)
|
||||
go c.shouldDialPeers(ctx)
|
||||
go c.dialPeers(ctx)
|
||||
go c.dialPeers()
|
||||
|
||||
c.consumeSubscriptions(ctx)
|
||||
c.consumeSubscriptions()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop terminates the peer-connector
|
||||
func (c *PeerConnectionStrategy) Stop() {
|
||||
if c.cancel == nil {
|
||||
return
|
||||
}
|
||||
|
||||
c.cancel()
|
||||
c.cancel = nil
|
||||
c.wg.Wait()
|
||||
|
||||
close(c.dialCh)
|
||||
c.CommonDiscoveryService.Stop(func() {})
|
||||
}
|
||||
|
||||
func (c *PeerConnectionStrategy) isPaused() bool {
|
||||
return c.paused.Load()
|
||||
}
|
||||
|
||||
func (c *PeerConnectionStrategy) shouldDialPeers(ctx context.Context) {
|
||||
defer c.wg.Done()
|
||||
|
||||
ticker := time.NewTicker(1 * time.Second)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
_, outRelayPeers := c.pm.getRelayPeers()
|
||||
c.paused.Store(outRelayPeers.Len() >= c.pm.OutRelayPeersTarget) // pause if no of OutPeers more than or eq to target
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// it might happen Subscribe is called before peerConnector has started so store these subscriptions in subscriptions array and custom after c.cancel is set.
|
||||
func (c *PeerConnectionStrategy) consumeSubscriptions(ctx context.Context) {
|
||||
func (c *PeerConnectionStrategy) consumeSubscriptions() {
|
||||
for _, subs := range c.subscriptions {
|
||||
c.wg.Add(1)
|
||||
c.WaitGroup().Add(1)
|
||||
go func(s <-chan PeerData) {
|
||||
defer c.wg.Done()
|
||||
c.consumeSubscription(ctx, s)
|
||||
defer c.WaitGroup().Done()
|
||||
c.consumeSubscription(s)
|
||||
}(subs)
|
||||
}
|
||||
c.subscriptions = nil
|
||||
}
|
||||
|
||||
func (c *PeerConnectionStrategy) publishWork(ctx context.Context, p peer.AddrInfo) {
|
||||
select {
|
||||
case c.dialCh <- p:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
const maxActiveDials = 5
|
||||
|
||||
// c.cache is thread safe
|
||||
|
@ -238,8 +199,8 @@ func (c *PeerConnectionStrategy) canDialPeer(pi peer.AddrInfo) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func (c *PeerConnectionStrategy) dialPeers(ctx context.Context) {
|
||||
defer c.wg.Done()
|
||||
func (c *PeerConnectionStrategy) dialPeers() {
|
||||
defer c.WaitGroup().Done()
|
||||
|
||||
maxGoRoutines := c.pm.OutRelayPeersTarget
|
||||
if maxGoRoutines > maxActiveDials {
|
||||
|
@ -250,30 +211,31 @@ func (c *PeerConnectionStrategy) dialPeers(ctx context.Context) {
|
|||
|
||||
for {
|
||||
select {
|
||||
case pi, ok := <-c.dialCh:
|
||||
case pd, ok := <-c.GetListeningChan():
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
addrInfo := pd.AddrInfo
|
||||
|
||||
if pi.ID == c.host.ID() || pi.ID == "" ||
|
||||
c.host.Network().Connectedness(pi.ID) == network.Connected {
|
||||
if addrInfo.ID == c.host.ID() || addrInfo.ID == "" ||
|
||||
c.host.Network().Connectedness(addrInfo.ID) == network.Connected {
|
||||
continue
|
||||
}
|
||||
|
||||
if c.canDialPeer(pi) {
|
||||
if c.canDialPeer(addrInfo) {
|
||||
sem <- struct{}{}
|
||||
c.wg.Add(1)
|
||||
go c.dialPeer(ctx, pi, sem)
|
||||
c.WaitGroup().Add(1)
|
||||
go c.dialPeer(addrInfo, sem)
|
||||
}
|
||||
case <-ctx.Done():
|
||||
case <-c.Context().Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *PeerConnectionStrategy) dialPeer(ctx context.Context, pi peer.AddrInfo, sem chan struct{}) {
|
||||
defer c.wg.Done()
|
||||
ctx, cancel := context.WithTimeout(ctx, c.dialTimeout)
|
||||
func (c *PeerConnectionStrategy) dialPeer(pi peer.AddrInfo, sem chan struct{}) {
|
||||
defer c.WaitGroup().Done()
|
||||
ctx, cancel := context.WithTimeout(c.Context(), c.dialTimeout)
|
||||
defer cancel()
|
||||
err := c.host.Connect(ctx, pi)
|
||||
if err != nil && !errors.Is(err, context.Canceled) {
|
||||
|
|
|
@ -2,8 +2,12 @@ package peermanager
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/event"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
|
@ -12,18 +16,23 @@ import (
|
|||
ma "github.com/multiformats/go-multiaddr"
|
||||
"github.com/waku-org/go-waku/logging"
|
||||
wps "github.com/waku-org/go-waku/waku/v2/peerstore"
|
||||
waku_proto "github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
wenr "github.com/waku-org/go-waku/waku/v2/protocol/enr"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// WakuRelayIDv200 is protocol ID for Waku v2 relay protocol
|
||||
// TODO: Move all the protocol IDs to a common location.
|
||||
const WakuRelayIDv200 = protocol.ID("/vac/waku/relay/2.0.0")
|
||||
// NodeTopicDetails stores pubSubTopic related data like topicHandle for the node.
|
||||
type NodeTopicDetails struct {
|
||||
topic *pubsub.Topic
|
||||
}
|
||||
|
||||
// PeerManager applies various controls and manage connections towards peers.
|
||||
type PeerManager struct {
|
||||
peerConnector *PeerConnectionStrategy
|
||||
maxPeers int
|
||||
maxRelayPeers int
|
||||
logger *zap.Logger
|
||||
InRelayPeersTarget int
|
||||
|
@ -31,9 +40,13 @@ type PeerManager struct {
|
|||
host host.Host
|
||||
serviceSlots *ServiceSlots
|
||||
ctx context.Context
|
||||
sub event.Subscription
|
||||
topicMutex sync.RWMutex
|
||||
subRelayTopics map[string]*NodeTopicDetails
|
||||
}
|
||||
|
||||
const peerConnectivityLoopSecs = 15
|
||||
const maxConnsToPeerRatio = 5
|
||||
|
||||
// 80% relay peers 20% service peers
|
||||
func relayAndServicePeers(maxConnections int) (int, int) {
|
||||
|
@ -52,22 +65,29 @@ func inAndOutRelayPeers(relayPeers int) (int, int) {
|
|||
}
|
||||
|
||||
// NewPeerManager creates a new peerManager instance.
|
||||
func NewPeerManager(maxConnections int, logger *zap.Logger) *PeerManager {
|
||||
func NewPeerManager(maxConnections int, maxPeers int, logger *zap.Logger) *PeerManager {
|
||||
|
||||
maxRelayPeers, _ := relayAndServicePeers(maxConnections)
|
||||
inRelayPeersTarget, outRelayPeersTarget := inAndOutRelayPeers(maxRelayPeers)
|
||||
|
||||
if maxPeers == 0 || maxConnections > maxPeers {
|
||||
maxPeers = maxConnsToPeerRatio * maxConnections
|
||||
}
|
||||
|
||||
pm := &PeerManager{
|
||||
logger: logger.Named("peer-manager"),
|
||||
maxRelayPeers: maxRelayPeers,
|
||||
InRelayPeersTarget: inRelayPeersTarget,
|
||||
OutRelayPeersTarget: outRelayPeersTarget,
|
||||
serviceSlots: NewServiceSlot(),
|
||||
subRelayTopics: make(map[string]*NodeTopicDetails),
|
||||
maxPeers: maxPeers,
|
||||
}
|
||||
logger.Info("PeerManager init values", zap.Int("maxConnections", maxConnections),
|
||||
zap.Int("maxRelayPeers", maxRelayPeers),
|
||||
zap.Int("outRelayPeersTarget", outRelayPeersTarget),
|
||||
zap.Int("inRelayPeersTarget", pm.InRelayPeersTarget))
|
||||
zap.Int("inRelayPeersTarget", pm.InRelayPeersTarget),
|
||||
zap.Int("maxPeers", maxPeers))
|
||||
|
||||
return pm
|
||||
}
|
||||
|
@ -85,11 +105,15 @@ func (pm *PeerManager) SetPeerConnector(pc *PeerConnectionStrategy) {
|
|||
// Start starts the processing to be done by peer manager.
|
||||
func (pm *PeerManager) Start(ctx context.Context) {
|
||||
pm.ctx = ctx
|
||||
if pm.sub != nil {
|
||||
go pm.peerEventLoop(ctx)
|
||||
}
|
||||
go pm.connectivityLoop(ctx)
|
||||
}
|
||||
|
||||
// This is a connectivity loop, which currently checks and prunes inbound connections.
|
||||
func (pm *PeerManager) connectivityLoop(ctx context.Context) {
|
||||
pm.connectToRelayPeers()
|
||||
t := time.NewTicker(peerConnectivityLoopSecs * time.Second)
|
||||
defer t.Stop()
|
||||
for {
|
||||
|
@ -103,10 +127,12 @@ func (pm *PeerManager) connectivityLoop(ctx context.Context) {
|
|||
}
|
||||
|
||||
// GroupPeersByDirection returns all the connected peers in peer store grouped by Inbound or outBound direction
|
||||
func (pm *PeerManager) GroupPeersByDirection() (inPeers peer.IDSlice, outPeers peer.IDSlice, err error) {
|
||||
peers := pm.host.Network().Peers()
|
||||
func (pm *PeerManager) GroupPeersByDirection(specificPeers ...peer.ID) (inPeers peer.IDSlice, outPeers peer.IDSlice, err error) {
|
||||
if len(specificPeers) == 0 {
|
||||
specificPeers = pm.host.Network().Peers()
|
||||
}
|
||||
|
||||
for _, p := range peers {
|
||||
for _, p := range specificPeers {
|
||||
direction, err := pm.host.Peerstore().(wps.WakuPeerstore).Direction(p)
|
||||
if err == nil {
|
||||
if direction == network.DirInbound {
|
||||
|
@ -122,9 +148,11 @@ func (pm *PeerManager) GroupPeersByDirection() (inPeers peer.IDSlice, outPeers p
|
|||
return inPeers, outPeers, nil
|
||||
}
|
||||
|
||||
func (pm *PeerManager) getRelayPeers() (inRelayPeers peer.IDSlice, outRelayPeers peer.IDSlice) {
|
||||
// getRelayPeers - Returns list of in and out peers supporting WakuRelayProtocol within specifiedPeers.
|
||||
// If specifiedPeers is empty, it checks within all peers in peerStore.
|
||||
func (pm *PeerManager) getRelayPeers(specificPeers ...peer.ID) (inRelayPeers peer.IDSlice, outRelayPeers peer.IDSlice) {
|
||||
//Group peers by their connected direction inbound or outbound.
|
||||
inPeers, outPeers, err := pm.GroupPeersByDirection()
|
||||
inPeers, outPeers, err := pm.GroupPeersByDirection(specificPeers...)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
@ -133,59 +161,99 @@ func (pm *PeerManager) getRelayPeers() (inRelayPeers peer.IDSlice, outRelayPeers
|
|||
|
||||
//Need to filter peers to check if they support relay
|
||||
if inPeers.Len() != 0 {
|
||||
inRelayPeers, _ = utils.FilterPeersByProto(pm.host, inPeers, WakuRelayIDv200)
|
||||
inRelayPeers, _ = utils.FilterPeersByProto(pm.host, inPeers, relay.WakuRelayID_v200)
|
||||
}
|
||||
if outPeers.Len() != 0 {
|
||||
outRelayPeers, _ = utils.FilterPeersByProto(pm.host, outPeers, WakuRelayIDv200)
|
||||
outRelayPeers, _ = utils.FilterPeersByProto(pm.host, outPeers, relay.WakuRelayID_v200)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (pm *PeerManager) connectToRelayPeers() {
|
||||
// ensureMinRelayConnsPerTopic makes sure there are min of D conns per pubsubTopic.
|
||||
// If not it will look into peerStore to initiate more connections.
|
||||
// If peerStore doesn't have enough peers, will wait for discv5 to find more and try in next cycle
|
||||
func (pm *PeerManager) ensureMinRelayConnsPerTopic() {
|
||||
pm.topicMutex.RLock()
|
||||
defer pm.topicMutex.RUnlock()
|
||||
for topicStr, topicInst := range pm.subRelayTopics {
|
||||
curPeers := topicInst.topic.ListPeers()
|
||||
curPeerLen := len(curPeers)
|
||||
if curPeerLen < waku_proto.GossipSubOptimalFullMeshSize {
|
||||
pm.logger.Info("Subscribed topic is unhealthy, initiating more connections to maintain health",
|
||||
zap.String("pubSubTopic", topicStr), zap.Int("connectedPeerCount", curPeerLen),
|
||||
zap.Int("optimumPeers", waku_proto.GossipSubOptimalFullMeshSize))
|
||||
//Find not connected peers.
|
||||
notConnectedPeers := pm.getNotConnectedPers(topicStr)
|
||||
if notConnectedPeers.Len() == 0 {
|
||||
//TODO: Trigger on-demand discovery for this topic.
|
||||
continue
|
||||
}
|
||||
//Connect to eligible peers.
|
||||
numPeersToConnect := waku_proto.GossipSubOptimalFullMeshSize - curPeerLen
|
||||
|
||||
if numPeersToConnect > notConnectedPeers.Len() {
|
||||
numPeersToConnect = notConnectedPeers.Len()
|
||||
}
|
||||
pm.connectToPeers(notConnectedPeers[0:numPeersToConnect])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// connectToRelayPeers ensures minimum D connections are there for each pubSubTopic.
|
||||
// If not, initiates connections to additional peers.
|
||||
// It also checks for incoming relay connections and prunes once they cross inRelayTarget
|
||||
func (pm *PeerManager) connectToRelayPeers() {
|
||||
//Check for out peer connections and connect to more peers.
|
||||
pm.ensureMinRelayConnsPerTopic()
|
||||
|
||||
inRelayPeers, outRelayPeers := pm.getRelayPeers()
|
||||
pm.logger.Info("Number of Relay peers connected", zap.Int("inRelayPeers", inRelayPeers.Len()),
|
||||
zap.Int("outRelayPeers", outRelayPeers.Len()))
|
||||
pm.logger.Info("number of relay peers connected",
|
||||
zap.Int("in", inRelayPeers.Len()),
|
||||
zap.Int("out", outRelayPeers.Len()))
|
||||
if inRelayPeers.Len() > 0 &&
|
||||
inRelayPeers.Len() > pm.InRelayPeersTarget {
|
||||
pm.pruneInRelayConns(inRelayPeers)
|
||||
}
|
||||
|
||||
if outRelayPeers.Len() > pm.OutRelayPeersTarget {
|
||||
return
|
||||
}
|
||||
totalRelayPeers := inRelayPeers.Len() + outRelayPeers.Len()
|
||||
// Establish additional connections connected peers are lesser than target.
|
||||
//What if the not connected peers in peerstore are not relay peers???
|
||||
if totalRelayPeers < pm.maxRelayPeers {
|
||||
//Find not connected peers.
|
||||
notConnectedPeers := pm.getNotConnectedPers()
|
||||
if notConnectedPeers.Len() == 0 {
|
||||
return
|
||||
}
|
||||
//Connect to eligible peers.
|
||||
numPeersToConnect := pm.maxRelayPeers - totalRelayPeers
|
||||
|
||||
if numPeersToConnect > notConnectedPeers.Len() {
|
||||
numPeersToConnect = notConnectedPeers.Len()
|
||||
}
|
||||
pm.connectToPeers(notConnectedPeers[0:numPeersToConnect])
|
||||
} //Else: Should we raise some sort of unhealthy event??
|
||||
}
|
||||
|
||||
// addrInfoToPeerData returns addressinfo for a peer
|
||||
// If addresses are expired, it removes the peer from host peerStore and returns nil.
|
||||
func addrInfoToPeerData(origin wps.Origin, peerID peer.ID, host host.Host) *PeerData {
|
||||
addrs := host.Peerstore().Addrs(peerID)
|
||||
if len(addrs) == 0 {
|
||||
//Addresses expired, remove peer from peerStore
|
||||
host.Peerstore().RemovePeer(peerID)
|
||||
return nil
|
||||
}
|
||||
return &PeerData{
|
||||
Origin: origin,
|
||||
AddrInfo: peer.AddrInfo{
|
||||
ID: peerID,
|
||||
Addrs: addrs,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// connectToPeers connects to peers provided in the list if the addresses have not expired.
|
||||
func (pm *PeerManager) connectToPeers(peers peer.IDSlice) {
|
||||
for _, peerID := range peers {
|
||||
peerInfo := peer.AddrInfo{
|
||||
ID: peerID,
|
||||
Addrs: pm.host.Peerstore().Addrs(peerID),
|
||||
peerData := addrInfoToPeerData(wps.PeerManager, peerID, pm.host)
|
||||
if peerData == nil {
|
||||
continue
|
||||
}
|
||||
pm.peerConnector.publishWork(pm.ctx, peerInfo)
|
||||
pm.peerConnector.PushToChan(*peerData)
|
||||
}
|
||||
}
|
||||
|
||||
func (pm *PeerManager) getNotConnectedPers() (notConnectedPeers peer.IDSlice) {
|
||||
for _, peerID := range pm.host.Peerstore().Peers() {
|
||||
// getNotConnectedPers returns peers for a pubSubTopic that are not connected.
|
||||
func (pm *PeerManager) getNotConnectedPers(pubsubTopic string) (notConnectedPeers peer.IDSlice) {
|
||||
var peerList peer.IDSlice
|
||||
if pubsubTopic == "" {
|
||||
peerList = pm.host.Peerstore().Peers()
|
||||
} else {
|
||||
peerList = pm.host.Peerstore().(*wps.WakuPeerstoreImpl).PeersByPubSubTopic(pubsubTopic)
|
||||
}
|
||||
for _, peerID := range peerList {
|
||||
if pm.host.Network().Connectedness(peerID) != network.Connected {
|
||||
notConnectedPeers = append(notConnectedPeers, peerID)
|
||||
}
|
||||
|
@ -193,13 +261,15 @@ func (pm *PeerManager) getNotConnectedPers() (notConnectedPeers peer.IDSlice) {
|
|||
return
|
||||
}
|
||||
|
||||
// pruneInRelayConns prune any incoming relay connections crossing derived inrelayPeerTarget
|
||||
func (pm *PeerManager) pruneInRelayConns(inRelayPeers peer.IDSlice) {
|
||||
|
||||
//Start disconnecting peers, based on what?
|
||||
//For now, just disconnect most recently connected peers
|
||||
//For now no preference is used
|
||||
//TODO: Need to have more intelligent way of doing this, maybe peer scores.
|
||||
pm.logger.Info("Number of in peer connections exceed targer relay peers, hence pruning",
|
||||
zap.Int("inRelayPeers", inRelayPeers.Len()), zap.Int("inRelayPeersTarget", pm.InRelayPeersTarget))
|
||||
//TODO: Keep optimalPeersRequired for a pubSubTopic in mind while pruning connections to peers.
|
||||
pm.logger.Info("peer connections exceed target relay peers, hence pruning",
|
||||
zap.Int("cnt", inRelayPeers.Len()), zap.Int("target", pm.InRelayPeersTarget))
|
||||
for pruningStartIndex := pm.InRelayPeersTarget; pruningStartIndex < inRelayPeers.Len(); pruningStartIndex++ {
|
||||
p := inRelayPeers[pruningStartIndex]
|
||||
err := pm.host.Network().ClosePeer(p)
|
||||
|
@ -215,9 +285,38 @@ func (pm *PeerManager) pruneInRelayConns(inRelayPeers peer.IDSlice) {
|
|||
// AddDiscoveredPeer to add dynamically discovered peers.
|
||||
// Note that these peers will not be set in service-slots.
|
||||
// TODO: It maybe good to set in service-slots based on services supported in the ENR
|
||||
func (pm *PeerManager) AddDiscoveredPeer(p PeerData) {
|
||||
func (pm *PeerManager) AddDiscoveredPeer(p PeerData, connectNow bool) {
|
||||
//Doing this check again inside addPeer, in order to avoid additional complexity of rollingBack other changes.
|
||||
if pm.maxPeers <= pm.host.Peerstore().Peers().Len() {
|
||||
return
|
||||
}
|
||||
//Check if the peer is already present, if so skip adding
|
||||
_, err := pm.host.Peerstore().(wps.WakuPeerstore).Origin(p.AddrInfo.ID)
|
||||
if err == nil {
|
||||
pm.logger.Debug("Found discovered peer already in peerStore", logging.HostID("peer", p.AddrInfo.ID))
|
||||
return
|
||||
}
|
||||
// Try to fetch shard info from ENR to arrive at pubSub topics.
|
||||
if len(p.PubSubTopics) == 0 && p.ENR != nil {
|
||||
shards, err := wenr.RelaySharding(p.ENR.Record())
|
||||
if err != nil {
|
||||
pm.logger.Error("Could not derive relayShards from ENR", zap.Error(err),
|
||||
logging.HostID("peer", p.AddrInfo.ID), zap.String("enr", p.ENR.String()))
|
||||
} else {
|
||||
if shards != nil {
|
||||
p.PubSubTopics = make([]string, 0)
|
||||
topics := shards.Topics()
|
||||
for _, topic := range topics {
|
||||
topicStr := topic.String()
|
||||
p.PubSubTopics = append(p.PubSubTopics, topicStr)
|
||||
}
|
||||
} else {
|
||||
pm.logger.Debug("ENR doesn't have relay shards", logging.HostID("peer", p.AddrInfo.ID))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_ = pm.addPeer(p.AddrInfo.ID, p.AddrInfo.Addrs, p.Origin)
|
||||
_ = pm.addPeer(p.AddrInfo.ID, p.AddrInfo.Addrs, p.Origin, p.PubSubTopics)
|
||||
|
||||
if p.ENR != nil {
|
||||
err := pm.host.Peerstore().(wps.WakuPeerstore).SetENR(p.AddrInfo.ID, p.ENR)
|
||||
|
@ -226,13 +325,25 @@ func (pm *PeerManager) AddDiscoveredPeer(p PeerData) {
|
|||
logging.HostID("peer", p.AddrInfo.ID), zap.String("enr", p.ENR.String()))
|
||||
}
|
||||
}
|
||||
if connectNow {
|
||||
pm.peerConnector.PushToChan(p)
|
||||
}
|
||||
}
|
||||
|
||||
// addPeer adds peer to only the peerStore.
|
||||
// It also sets additional metadata such as origin, ENR and supported protocols
|
||||
func (pm *PeerManager) addPeer(ID peer.ID, addrs []ma.Multiaddr, origin wps.Origin, protocols ...protocol.ID) error {
|
||||
func (pm *PeerManager) addPeer(ID peer.ID, addrs []ma.Multiaddr, origin wps.Origin, pubSubTopics []string, protocols ...protocol.ID) error {
|
||||
if pm.maxPeers <= pm.host.Peerstore().Peers().Len() {
|
||||
return errors.New("peer store capacity reached")
|
||||
}
|
||||
pm.logger.Info("adding peer to peerstore", logging.HostID("peer", ID))
|
||||
pm.host.Peerstore().AddAddrs(ID, addrs, peerstore.AddressTTL)
|
||||
if origin == wps.Static {
|
||||
pm.host.Peerstore().AddAddrs(ID, addrs, peerstore.PermanentAddrTTL)
|
||||
} else {
|
||||
//Need to re-evaluate the address expiry
|
||||
// For now expiring them with default addressTTL which is an hour.
|
||||
pm.host.Peerstore().AddAddrs(ID, addrs, peerstore.AddressTTL)
|
||||
}
|
||||
err := pm.host.Peerstore().(wps.WakuPeerstore).SetOrigin(ID, origin)
|
||||
if err != nil {
|
||||
pm.logger.Error("could not set origin", zap.Error(err), logging.HostID("peer", ID))
|
||||
|
@ -245,11 +356,21 @@ func (pm *PeerManager) addPeer(ID peer.ID, addrs []ma.Multiaddr, origin wps.Orig
|
|||
return err
|
||||
}
|
||||
}
|
||||
if len(pubSubTopics) == 0 {
|
||||
// Probably the peer is discovered via DNSDiscovery (for which we don't have pubSubTopic info)
|
||||
//If pubSubTopic and enr is empty or no shard info in ENR,then set to defaultPubSubTopic
|
||||
pubSubTopics = []string{relay.DefaultWakuTopic}
|
||||
}
|
||||
err = pm.host.Peerstore().(wps.WakuPeerstore).SetPubSubTopics(ID, pubSubTopics)
|
||||
if err != nil {
|
||||
pm.logger.Error("could not store pubSubTopic", zap.Error(err),
|
||||
logging.HostID("peer", ID), zap.Strings("topics", pubSubTopics))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddPeer adds peer to the peerStore and also to service slots
|
||||
func (pm *PeerManager) AddPeer(address ma.Multiaddr, origin wps.Origin, protocols ...protocol.ID) (peer.ID, error) {
|
||||
func (pm *PeerManager) AddPeer(address ma.Multiaddr, origin wps.Origin, pubSubTopics []string, protocols ...protocol.ID) (peer.ID, error) {
|
||||
//Assuming all addresses have peerId
|
||||
info, err := peer.AddrInfoFromP2pAddr(address)
|
||||
if err != nil {
|
||||
|
@ -262,7 +383,7 @@ func (pm *PeerManager) AddPeer(address ma.Multiaddr, origin wps.Origin, protocol
|
|||
}
|
||||
|
||||
//Add to the peer-store
|
||||
err = pm.addPeer(info.ID, info.Addrs, origin, protocols...)
|
||||
err = pm.addPeer(info.ID, info.Addrs, origin, pubSubTopics, protocols...)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -283,7 +404,7 @@ func (pm *PeerManager) RemovePeer(peerID peer.ID) {
|
|||
// Adding to peerStore is expected to be already done by caller.
|
||||
// If relay proto is passed, it is not added to serviceSlot.
|
||||
func (pm *PeerManager) addPeerToServiceSlot(proto protocol.ID, peerID peer.ID) {
|
||||
if proto == WakuRelayIDv200 {
|
||||
if proto == relay.WakuRelayID_v200 {
|
||||
pm.logger.Warn("Cannot add Relay peer to service peer slots")
|
||||
return
|
||||
}
|
||||
|
@ -296,22 +417,32 @@ func (pm *PeerManager) addPeerToServiceSlot(proto protocol.ID, peerID peer.ID) {
|
|||
pm.serviceSlots.getPeers(proto).add(peerID)
|
||||
}
|
||||
|
||||
// SelectPeerByContentTopic is used to return a random peer that supports a given protocol for given contentTopic.
|
||||
// If a list of specific peers is passed, the peer will be chosen from that list assuming
|
||||
// it supports the chosen protocol and contentTopic, otherwise it will chose a peer from the service slot.
|
||||
// If a peer cannot be found in the service slot, a peer will be selected from node peerstore
|
||||
func (pm *PeerManager) SelectPeerByContentTopic(proto protocol.ID, contentTopic string, specificPeers ...peer.ID) (peer.ID, error) {
|
||||
pubsubTopic, err := waku_proto.GetPubSubTopicFromContentTopic(contentTopic)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return pm.SelectPeer(proto, pubsubTopic, specificPeers...)
|
||||
}
|
||||
|
||||
// SelectPeer is used to return a random peer that supports a given protocol.
|
||||
// If a list of specific peers is passed, the peer will be chosen from that list assuming
|
||||
// it supports the chosen protocol, otherwise it will chose a peer from the service slot.
|
||||
// If a peer cannot be found in the service slot, a peer will be selected from node peerstore
|
||||
func (pm *PeerManager) SelectPeer(proto protocol.ID, specificPeers []peer.ID, logger *zap.Logger) (peer.ID, error) {
|
||||
// if pubSubTopic is specified, peer is selected from list that support the pubSubTopic
|
||||
func (pm *PeerManager) SelectPeer(proto protocol.ID, pubSubTopic string, specificPeers ...peer.ID) (peer.ID, error) {
|
||||
// @TODO We need to be more strategic about which peers we dial. Right now we just set one on the service.
|
||||
// Ideally depending on the query and our set of peers we take a subset of ideal peers.
|
||||
// This will require us to check for various factors such as:
|
||||
// - which topics they track
|
||||
// - latency?
|
||||
|
||||
//Try to fetch from serviceSlot
|
||||
if slot := pm.serviceSlots.getPeers(proto); slot != nil {
|
||||
if peerID, err := slot.getRandom(); err == nil {
|
||||
return peerID, nil
|
||||
}
|
||||
if peerID := pm.selectServicePeer(proto, pubSubTopic, specificPeers...); peerID != nil {
|
||||
return *peerID, nil
|
||||
}
|
||||
|
||||
// if not found in serviceSlots or proto == WakuRelayIDv200
|
||||
|
@ -319,5 +450,36 @@ func (pm *PeerManager) SelectPeer(proto protocol.ID, specificPeers []peer.ID, lo
|
|||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if pubSubTopic != "" {
|
||||
filteredPeers = pm.host.Peerstore().(wps.WakuPeerstore).PeersByPubSubTopic(pubSubTopic, filteredPeers...)
|
||||
}
|
||||
return utils.SelectRandomPeer(filteredPeers, pm.logger)
|
||||
}
|
||||
|
||||
func (pm *PeerManager) selectServicePeer(proto protocol.ID, pubSubTopic string, specificPeers ...peer.ID) (peerIDPtr *peer.ID) {
|
||||
peerIDPtr = nil
|
||||
|
||||
//Try to fetch from serviceSlot
|
||||
if slot := pm.serviceSlots.getPeers(proto); slot != nil {
|
||||
if pubSubTopic == "" {
|
||||
if peerID, err := slot.getRandom(); err == nil {
|
||||
peerIDPtr = &peerID
|
||||
} else {
|
||||
pm.logger.Debug("could not retrieve random peer from slot", zap.Error(err))
|
||||
}
|
||||
} else { //PubsubTopic based selection
|
||||
keys := make([]peer.ID, 0, len(slot.m))
|
||||
for i := range slot.m {
|
||||
keys = append(keys, i)
|
||||
}
|
||||
selectedPeers := pm.host.Peerstore().(wps.WakuPeerstore).PeersByPubSubTopic(pubSubTopic, keys...)
|
||||
peerID, err := utils.SelectRandomPeer(selectedPeers, pm.logger)
|
||||
if err == nil {
|
||||
peerIDPtr = &peerID
|
||||
} else {
|
||||
pm.logger.Debug("could not select random peer", zap.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
)
|
||||
|
||||
|
@ -57,7 +58,7 @@ func NewServiceSlot() *ServiceSlots {
|
|||
// getPeers for getting all the peers for a given protocol
|
||||
// since peerMap is only used in peerManager that's why it is unexported
|
||||
func (slots *ServiceSlots) getPeers(proto protocol.ID) *peerMap {
|
||||
if proto == WakuRelayIDv200 {
|
||||
if proto == relay.WakuRelayID_v200 {
|
||||
return nil
|
||||
}
|
||||
slots.mu.Lock()
|
||||
|
|
166
vendor/github.com/waku-org/go-waku/waku/v2/peermanager/topic_event_handler.go
generated
vendored
Normal file
166
vendor/github.com/waku-org/go-waku/waku/v2/peermanager/topic_event_handler.go
generated
vendored
Normal file
|
@ -0,0 +1,166 @@
|
|||
package peermanager
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/event"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/waku-org/go-waku/logging"
|
||||
wps "github.com/waku-org/go-waku/waku/v2/peerstore"
|
||||
waku_proto "github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func (pm *PeerManager) SubscribeToRelayEvtBus(bus event.Bus) error {
|
||||
var err error
|
||||
pm.sub, err = bus.Subscribe([]interface{}{new(relay.EvtPeerTopic), new(relay.EvtRelaySubscribed), new(relay.EvtRelayUnsubscribed)})
|
||||
return err
|
||||
}
|
||||
|
||||
func (pm *PeerManager) handleNewRelayTopicSubscription(pubsubTopic string, topicInst *pubsub.Topic) {
|
||||
pm.logger.Info("handleNewRelayTopicSubscription", zap.String("pubSubTopic", pubsubTopic))
|
||||
pm.topicMutex.Lock()
|
||||
defer pm.topicMutex.Unlock()
|
||||
|
||||
_, ok := pm.subRelayTopics[pubsubTopic]
|
||||
if ok {
|
||||
//Nothing to be done, as we are already subscribed to this topic.
|
||||
return
|
||||
}
|
||||
pm.subRelayTopics[pubsubTopic] = &NodeTopicDetails{topicInst}
|
||||
//Check how many relay peers we are connected to that subscribe to this topic, if less than D find peers in peerstore and connect.
|
||||
//If no peers in peerStore, trigger discovery for this topic?
|
||||
relevantPeersForPubSubTopic := pm.host.Peerstore().(*wps.WakuPeerstoreImpl).PeersByPubSubTopic(pubsubTopic)
|
||||
var notConnectedPeers peer.IDSlice
|
||||
connectedPeers := 0
|
||||
for _, peer := range relevantPeersForPubSubTopic {
|
||||
if pm.host.Network().Connectedness(peer) == network.Connected {
|
||||
connectedPeers++
|
||||
} else {
|
||||
notConnectedPeers = append(notConnectedPeers, peer)
|
||||
}
|
||||
}
|
||||
|
||||
if connectedPeers >= waku_proto.GossipSubOptimalFullMeshSize { //TODO: Use a config rather than hard-coding.
|
||||
// Should we use optimal number or define some sort of a config for the node to choose from?
|
||||
// A desktop node may choose this to be 4-6, whereas a service node may choose this to be 8-12 based on resources it has
|
||||
// or bandwidth it can support.
|
||||
// Should we link this to bandwidth management somehow or just depend on some sort of config profile?
|
||||
pm.logger.Info("Optimal required relay peers for new pubSubTopic are already connected ", zap.String("pubSubTopic", pubsubTopic),
|
||||
zap.Int("connectedPeerCount", connectedPeers))
|
||||
return
|
||||
}
|
||||
triggerDiscovery := false
|
||||
if notConnectedPeers.Len() > 0 {
|
||||
numPeersToConnect := notConnectedPeers.Len() - connectedPeers
|
||||
if numPeersToConnect < 0 {
|
||||
numPeersToConnect = notConnectedPeers.Len()
|
||||
} else if numPeersToConnect-connectedPeers > waku_proto.GossipSubOptimalFullMeshSize {
|
||||
numPeersToConnect = waku_proto.GossipSubOptimalFullMeshSize - connectedPeers
|
||||
}
|
||||
if numPeersToConnect+connectedPeers < waku_proto.GossipSubOptimalFullMeshSize {
|
||||
triggerDiscovery = true
|
||||
}
|
||||
//For now all peers are being given same priority,
|
||||
// Later we may want to choose peers that have more shards in common over others.
|
||||
pm.connectToPeers(notConnectedPeers[0:numPeersToConnect])
|
||||
} else {
|
||||
triggerDiscovery = true
|
||||
}
|
||||
|
||||
if triggerDiscovery {
|
||||
//TODO: Initiate on-demand discovery for this pubSubTopic.
|
||||
// Use peer-exchange and rendevouz?
|
||||
//Should we query discoverycache to find out if there are any more peers before triggering discovery?
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (pm *PeerManager) handleNewRelayTopicUnSubscription(pubsubTopic string) {
|
||||
pm.logger.Info("handleNewRelayTopicUnSubscription", zap.String("pubSubTopic", pubsubTopic))
|
||||
pm.topicMutex.Lock()
|
||||
defer pm.topicMutex.Unlock()
|
||||
_, ok := pm.subRelayTopics[pubsubTopic]
|
||||
if !ok {
|
||||
//Nothing to be done, as we are already unsubscribed from this topic.
|
||||
return
|
||||
}
|
||||
delete(pm.subRelayTopics, pubsubTopic)
|
||||
|
||||
//If there are peers only subscribed to this topic, disconnect them.
|
||||
relevantPeersForPubSubTopic := pm.host.Peerstore().(*wps.WakuPeerstoreImpl).PeersByPubSubTopic(pubsubTopic)
|
||||
for _, peer := range relevantPeersForPubSubTopic {
|
||||
if pm.host.Network().Connectedness(peer) == network.Connected {
|
||||
peerTopics, err := pm.host.Peerstore().(*wps.WakuPeerstoreImpl).PubSubTopics(peer)
|
||||
if err != nil {
|
||||
pm.logger.Error("Could not retrieve pubsub topics for peer", zap.Error(err),
|
||||
logging.HostID("peerID", peer))
|
||||
continue
|
||||
}
|
||||
if len(peerTopics) == 1 && peerTopics[0] == pubsubTopic {
|
||||
err := pm.host.Network().ClosePeer(peer)
|
||||
if err != nil {
|
||||
pm.logger.Warn("Failed to disconnect connection towards peer",
|
||||
logging.HostID("peerID", peer))
|
||||
continue
|
||||
}
|
||||
pm.logger.Debug("Successfully disconnected connection towards peer",
|
||||
logging.HostID("peerID", peer))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pm *PeerManager) handlerPeerTopicEvent(peerEvt relay.EvtPeerTopic) {
|
||||
wps := pm.host.Peerstore().(*wps.WakuPeerstoreImpl)
|
||||
peerID := peerEvt.PeerID
|
||||
if peerEvt.State == relay.PEER_JOINED {
|
||||
err := wps.AddPubSubTopic(peerID, peerEvt.PubsubTopic)
|
||||
if err != nil {
|
||||
pm.logger.Error("failed to add pubSubTopic for peer",
|
||||
logging.HostID("peerID", peerID), zap.String("topic", peerEvt.PubsubTopic), zap.Error(err))
|
||||
}
|
||||
} else if peerEvt.State == relay.PEER_LEFT {
|
||||
err := wps.RemovePubSubTopic(peerID, peerEvt.PubsubTopic)
|
||||
if err != nil {
|
||||
pm.logger.Error("failed to remove pubSubTopic for peer",
|
||||
logging.HostID("peerID", peerID), zap.Error(err))
|
||||
}
|
||||
} else {
|
||||
pm.logger.Error("unknown peer event received", zap.Int("eventState", int(peerEvt.State)))
|
||||
}
|
||||
}
|
||||
|
||||
func (pm *PeerManager) peerEventLoop(ctx context.Context) {
|
||||
defer pm.sub.Close()
|
||||
for {
|
||||
select {
|
||||
case e := <-pm.sub.Out():
|
||||
switch e := e.(type) {
|
||||
case relay.EvtPeerTopic:
|
||||
{
|
||||
peerEvt := (relay.EvtPeerTopic)(e)
|
||||
pm.handlerPeerTopicEvent(peerEvt)
|
||||
}
|
||||
case relay.EvtRelaySubscribed:
|
||||
{
|
||||
eventDetails := (relay.EvtRelaySubscribed)(e)
|
||||
pm.handleNewRelayTopicSubscription(eventDetails.Topic, eventDetails.TopicInst)
|
||||
}
|
||||
case relay.EvtRelayUnsubscribed:
|
||||
{
|
||||
eventDetails := (relay.EvtRelayUnsubscribed)(e)
|
||||
pm.handleNewRelayTopicUnSubscription(eventDetails.Topic)
|
||||
}
|
||||
default:
|
||||
pm.logger.Error("unsupported event type", zap.Any("eventType", e))
|
||||
}
|
||||
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,6 +1,7 @@
|
|||
package peerstore
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
|
@ -18,13 +19,15 @@ const (
|
|||
Discv5
|
||||
Static
|
||||
PeerExchange
|
||||
DnsDiscovery
|
||||
DNSDiscovery
|
||||
Rendezvous
|
||||
PeerManager
|
||||
)
|
||||
|
||||
const peerOrigin = "origin"
|
||||
const peerENR = "enr"
|
||||
const peerDirection = "direction"
|
||||
const peerPubSubTopics = "pubSubTopics"
|
||||
|
||||
// ConnectionFailures contains connection failure information towards all peers
|
||||
type ConnectionFailures struct {
|
||||
|
@ -51,6 +54,12 @@ type WakuPeerstore interface {
|
|||
|
||||
SetDirection(p peer.ID, direction network.Direction) error
|
||||
Direction(p peer.ID) (network.Direction, error)
|
||||
|
||||
AddPubSubTopic(p peer.ID, topic string) error
|
||||
RemovePubSubTopic(p peer.ID, topic string) error
|
||||
PubSubTopics(p peer.ID) ([]string, error)
|
||||
SetPubSubTopics(p peer.ID, topics []string) error
|
||||
PeersByPubSubTopic(pubSubTopic string, specificPeers ...peer.ID) peer.IDSlice
|
||||
}
|
||||
|
||||
// NewWakuPeerstore creates a new WakuPeerStore object
|
||||
|
@ -139,3 +148,81 @@ func (ps *WakuPeerstoreImpl) Direction(p peer.ID) (network.Direction, error) {
|
|||
|
||||
return result.(network.Direction), nil
|
||||
}
|
||||
|
||||
// AddPubSubTopic adds a new pubSubTopic for a peer
|
||||
func (ps *WakuPeerstoreImpl) AddPubSubTopic(p peer.ID, topic string) error {
|
||||
existingTopics, err := ps.PubSubTopics(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, t := range existingTopics {
|
||||
if t == topic {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
existingTopics = append(existingTopics, topic)
|
||||
return ps.peerStore.Put(p, peerPubSubTopics, existingTopics)
|
||||
}
|
||||
|
||||
// RemovePubSubTopic removes a pubSubTopic from the peer
|
||||
func (ps *WakuPeerstoreImpl) RemovePubSubTopic(p peer.ID, topic string) error {
|
||||
existingTopics, err := ps.PubSubTopics(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(existingTopics) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for i := range existingTopics {
|
||||
if existingTopics[i] == topic {
|
||||
existingTopics = append(existingTopics[:i], existingTopics[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
err = ps.SetPubSubTopics(p, existingTopics)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetPubSubTopics sets pubSubTopics for a peer, it also overrides existing ones that were set previously..
|
||||
func (ps *WakuPeerstoreImpl) SetPubSubTopics(p peer.ID, topics []string) error {
|
||||
return ps.peerStore.Put(p, peerPubSubTopics, topics)
|
||||
}
|
||||
|
||||
// PubSubTopics fetches list of pubSubTopics for a peer
|
||||
func (ps *WakuPeerstoreImpl) PubSubTopics(p peer.ID) ([]string, error) {
|
||||
result, err := ps.peerStore.Get(p, peerPubSubTopics)
|
||||
if err != nil {
|
||||
if errors.Is(err, peerstore.ErrNotFound) {
|
||||
return nil, nil
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return result.([]string), nil
|
||||
}
|
||||
|
||||
// PeersByPubSubTopic Returns list of peers by pubSubTopic
|
||||
// If specifiPeers are listed, filtering is done from them otherwise from all peers in peerstore
|
||||
func (ps *WakuPeerstoreImpl) PeersByPubSubTopic(pubSubTopic string, specificPeers ...peer.ID) peer.IDSlice {
|
||||
if specificPeers == nil {
|
||||
specificPeers = ps.Peers()
|
||||
}
|
||||
var result peer.IDSlice
|
||||
for _, p := range specificPeers {
|
||||
topics, err := ps.PubSubTopics(p)
|
||||
if err == nil {
|
||||
for _, topic := range topics {
|
||||
if topic == pubSubTopic {
|
||||
result = append(result, p)
|
||||
}
|
||||
}
|
||||
} //Note: skipping a peer in case of an error as there would be others available.
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
|
73
vendor/github.com/waku-org/go-waku/waku/v2/protocol/common_service.go
generated
vendored
Normal file
73
vendor/github.com/waku-org/go-waku/waku/v2/protocol/common_service.go
generated
vendored
Normal file
|
@ -0,0 +1,73 @@
|
|||
package protocol
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// this is common layout for all the services that require mutex protection and a guarantee that all running goroutines will be finished before stop finishes execution. This guarantee comes from waitGroup all one has to use CommonService.WaitGroup() in the goroutines that should finish by the end of stop function.
|
||||
type CommonService struct {
|
||||
sync.RWMutex
|
||||
cancel context.CancelFunc
|
||||
ctx context.Context
|
||||
wg sync.WaitGroup
|
||||
started bool
|
||||
}
|
||||
|
||||
func NewCommonService() *CommonService {
|
||||
return &CommonService{
|
||||
wg: sync.WaitGroup{},
|
||||
RWMutex: sync.RWMutex{},
|
||||
}
|
||||
}
|
||||
|
||||
// mutex protected start function
|
||||
// creates internal context over provided context and runs fn safely
|
||||
// fn is excerpt to be executed to start the protocol
|
||||
func (sp *CommonService) Start(ctx context.Context, fn func() error) error {
|
||||
sp.Lock()
|
||||
defer sp.Unlock()
|
||||
if sp.started {
|
||||
return ErrAlreadyStarted
|
||||
}
|
||||
sp.started = true
|
||||
sp.ctx, sp.cancel = context.WithCancel(ctx)
|
||||
if err := fn(); err != nil {
|
||||
sp.started = false
|
||||
sp.cancel()
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var ErrAlreadyStarted = errors.New("already started")
|
||||
var ErrNotStarted = errors.New("not started")
|
||||
|
||||
// mutex protected stop function
|
||||
func (sp *CommonService) Stop(fn func()) {
|
||||
sp.Lock()
|
||||
defer sp.Unlock()
|
||||
if !sp.started {
|
||||
return
|
||||
}
|
||||
sp.cancel()
|
||||
fn()
|
||||
sp.wg.Wait()
|
||||
sp.started = false
|
||||
}
|
||||
|
||||
// This is not a mutex protected function, it is up to the caller to use it in a mutex protected context
|
||||
func (sp *CommonService) ErrOnNotRunning() error {
|
||||
if !sp.started {
|
||||
return ErrNotStarted
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sp *CommonService) Context() context.Context {
|
||||
return sp.ctx
|
||||
}
|
||||
func (sp *CommonService) WaitGroup() *sync.WaitGroup {
|
||||
return &sp.wg
|
||||
}
|
30
vendor/github.com/waku-org/go-waku/waku/v2/protocol/content_filter.go
generated
vendored
Normal file
30
vendor/github.com/waku-org/go-waku/waku/v2/protocol/content_filter.go
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
package protocol
|
||||
|
||||
import "golang.org/x/exp/maps"
|
||||
|
||||
type ContentTopicSet map[string]struct{}
|
||||
|
||||
func NewContentTopicSet(contentTopics ...string) ContentTopicSet {
|
||||
s := make(ContentTopicSet, len(contentTopics))
|
||||
for _, ct := range contentTopics {
|
||||
s[ct] = struct{}{}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// ContentFilter is used to specify the filter to be applied for a FilterNode.
|
||||
// Topic means pubSubTopic - optional in case of using contentTopics that following Auto sharding, mandatory in case of named or static sharding.
|
||||
// ContentTopics - Specify list of content topics to be filtered under a pubSubTopic (for named and static sharding), or a list of contentTopics (in case ofAuto sharding)
|
||||
// If pubSub topic is not specified, then content-topics are used to derive the shard and corresponding pubSubTopic using autosharding algorithm
|
||||
type ContentFilter struct {
|
||||
PubsubTopic string
|
||||
ContentTopics ContentTopicSet
|
||||
}
|
||||
|
||||
func (cf ContentFilter) ContentTopicsList() []string {
|
||||
return maps.Keys(cf.ContentTopics)
|
||||
}
|
||||
|
||||
func NewContentFilter(pubsubTopic string, contentTopics ...string) ContentFilter {
|
||||
return ContentFilter{pubsubTopic, NewContentTopicSet(contentTopics...)}
|
||||
}
|
|
@ -28,7 +28,7 @@ type WakuEnrBitfield = uint8
|
|||
|
||||
// NewWakuEnrBitfield creates a WakuEnrBitField whose value will depend on which protocols are enabled in the node
|
||||
func NewWakuEnrBitfield(lightpush, filter, store, relay bool) WakuEnrBitfield {
|
||||
var v uint8 = 0
|
||||
var v uint8
|
||||
|
||||
if lightpush {
|
||||
v |= (1 << 3)
|
||||
|
@ -91,10 +91,9 @@ func Multiaddress(node *enode.Node) (peer.ID, []multiaddr.Multiaddr, error) {
|
|||
if err := node.Record().Load(enr.WithEntry(MultiaddrENRField, &multiaddrRaw)); err != nil {
|
||||
if !enr.IsNotFound(err) {
|
||||
return "", nil, err
|
||||
} else {
|
||||
// No multiaddr entry on enr
|
||||
return peerID, result, nil
|
||||
}
|
||||
// No multiaddr entry on enr
|
||||
return peerID, result, nil
|
||||
}
|
||||
|
||||
if len(multiaddrRaw) < 2 {
|
||||
|
|
|
@ -35,15 +35,14 @@ func WithMultiaddress(multiaddrs ...multiaddr.Multiaddr) ENROption {
|
|||
failedOnceWritingENR := false
|
||||
couldWriteENRatLeastOnce := false
|
||||
successIdx := -1
|
||||
for i := len(multiaddrs) - 1; i >= 0; i-- {
|
||||
for i := len(multiaddrs); i > 0; i-- {
|
||||
err = writeMultiaddressField(localnode, multiaddrs[0:i])
|
||||
if err == nil {
|
||||
couldWriteENRatLeastOnce = true
|
||||
successIdx = i
|
||||
break
|
||||
} else {
|
||||
failedOnceWritingENR = true
|
||||
}
|
||||
failedOnceWritingENR = true
|
||||
}
|
||||
|
||||
if failedOnceWritingENR && couldWriteENRatLeastOnce {
|
||||
|
|
|
@ -37,9 +37,9 @@ func WithWakuRelaySharding(rs protocol.RelayShards) ENROption {
|
|||
return func(localnode *enode.LocalNode) error {
|
||||
if len(rs.Indices) >= 64 {
|
||||
return WithWakuRelayShardingBitVector(rs)(localnode)
|
||||
} else {
|
||||
return WithWakuRelayShardingIndicesList(rs)(localnode)
|
||||
}
|
||||
|
||||
return WithWakuRelayShardingIndicesList(rs)(localnode)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -20,12 +20,12 @@ type Envelope struct {
|
|||
// as well as generating a hash based on the bytes that compose the message
|
||||
func NewEnvelope(msg *wpb.WakuMessage, receiverTime int64, pubSubTopic string) *Envelope {
|
||||
messageHash := msg.Hash(pubSubTopic)
|
||||
hash := hash.SHA256([]byte(msg.ContentTopic), msg.Payload)
|
||||
digest := hash.SHA256([]byte(msg.ContentTopic), msg.Payload)
|
||||
return &Envelope{
|
||||
msg: msg,
|
||||
hash: messageHash,
|
||||
index: &pb.Index{
|
||||
Digest: hash[:],
|
||||
Digest: digest[:],
|
||||
ReceiverTime: receiverTime,
|
||||
SenderTime: msg.Timestamp,
|
||||
PubsubTopic: pubSubTopic,
|
||||
|
@ -48,6 +48,6 @@ func (e *Envelope) Hash() []byte {
|
|||
return e.hash
|
||||
}
|
||||
|
||||
func (env *Envelope) Index() *pb.Index {
|
||||
return env.index
|
||||
func (e *Envelope) Index() *pb.Index {
|
||||
return e.index
|
||||
}
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"sync"
|
||||
"strings"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
|
@ -21,6 +21,7 @@ import (
|
|||
"github.com/waku-org/go-waku/waku/v2/protocol/filter/pb"
|
||||
wpb "github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/subscription"
|
||||
"github.com/waku-org/go-waku/waku/v2/timesource"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -34,35 +35,23 @@ var (
|
|||
)
|
||||
|
||||
type WakuFilterLightNode struct {
|
||||
sync.RWMutex
|
||||
started bool
|
||||
|
||||
cancel context.CancelFunc
|
||||
ctx context.Context
|
||||
*protocol.CommonService
|
||||
h host.Host
|
||||
broadcaster relay.Broadcaster
|
||||
broadcaster relay.Broadcaster //TODO: Move the broadcast functionality outside of relay client to a higher SDK layer.s
|
||||
timesource timesource.Timesource
|
||||
metrics Metrics
|
||||
wg *sync.WaitGroup
|
||||
log *zap.Logger
|
||||
subscriptions *SubscriptionsMap
|
||||
subscriptions *subscription.SubscriptionsMap
|
||||
pm *peermanager.PeerManager
|
||||
}
|
||||
|
||||
type ContentFilter struct {
|
||||
Topic string
|
||||
ContentTopics []string
|
||||
}
|
||||
|
||||
type WakuFilterPushResult struct {
|
||||
Err error
|
||||
PeerID peer.ID
|
||||
}
|
||||
|
||||
var errNotStarted = errors.New("not started")
|
||||
var errAlreadyStarted = errors.New("already started")
|
||||
|
||||
// NewWakuFilterLightnode returns a new instance of Waku Filter struct setup according to the chosen parameter and options
|
||||
// Note that broadcaster is optional.
|
||||
// Takes an optional peermanager if WakuFilterLightnode is being created along with WakuNode.
|
||||
// If using libp2p host, then pass peermanager as nil
|
||||
func NewWakuFilterLightNode(broadcaster relay.Broadcaster, pm *peermanager.PeerManager,
|
||||
|
@ -71,8 +60,8 @@ func NewWakuFilterLightNode(broadcaster relay.Broadcaster, pm *peermanager.PeerM
|
|||
wf.log = log.Named("filterv2-lightnode")
|
||||
wf.broadcaster = broadcaster
|
||||
wf.timesource = timesource
|
||||
wf.wg = &sync.WaitGroup{}
|
||||
wf.pm = pm
|
||||
wf.CommonService = protocol.NewCommonService()
|
||||
wf.metrics = newMetrics(reg)
|
||||
|
||||
return wf
|
||||
|
@ -84,66 +73,42 @@ func (wf *WakuFilterLightNode) SetHost(h host.Host) {
|
|||
}
|
||||
|
||||
func (wf *WakuFilterLightNode) Start(ctx context.Context) error {
|
||||
wf.Lock()
|
||||
defer wf.Unlock()
|
||||
return wf.CommonService.Start(ctx, wf.start)
|
||||
|
||||
if wf.started {
|
||||
return errAlreadyStarted
|
||||
}
|
||||
}
|
||||
|
||||
wf.wg.Wait() // Wait for any goroutines to stop
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
wf.cancel = cancel
|
||||
wf.ctx = ctx
|
||||
wf.subscriptions = NewSubscriptionMap(wf.log)
|
||||
wf.started = true
|
||||
|
||||
wf.h.SetStreamHandlerMatch(FilterPushID_v20beta1, protocol.PrefixTextMatch(string(FilterPushID_v20beta1)), wf.onRequest(ctx))
|
||||
func (wf *WakuFilterLightNode) start() error {
|
||||
wf.subscriptions = subscription.NewSubscriptionMap(wf.log)
|
||||
wf.h.SetStreamHandlerMatch(FilterPushID_v20beta1, protocol.PrefixTextMatch(string(FilterPushID_v20beta1)), wf.onRequest(wf.Context()))
|
||||
|
||||
wf.log.Info("filter-push protocol started")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop unmounts the filter protocol
|
||||
func (wf *WakuFilterLightNode) Stop() {
|
||||
wf.Lock()
|
||||
defer wf.Unlock()
|
||||
|
||||
if !wf.started {
|
||||
return
|
||||
}
|
||||
|
||||
wf.cancel()
|
||||
|
||||
wf.h.RemoveStreamHandler(FilterPushID_v20beta1)
|
||||
|
||||
res, err := wf.unsubscribeAll(wf.ctx)
|
||||
if err != nil {
|
||||
wf.log.Warn("unsubscribing from full nodes", zap.Error(err))
|
||||
}
|
||||
|
||||
for r := range res {
|
||||
if r.Err != nil {
|
||||
wf.log.Warn("unsubscribing from full nodes", zap.Error(r.Err), logging.HostID("peerID", r.PeerID))
|
||||
wf.CommonService.Stop(func() {
|
||||
wf.h.RemoveStreamHandler(FilterPushID_v20beta1)
|
||||
res, err := wf.unsubscribeAll(wf.Context())
|
||||
if err != nil {
|
||||
wf.log.Warn("unsubscribing from full nodes", zap.Error(err))
|
||||
}
|
||||
|
||||
}
|
||||
for r := range res {
|
||||
if r.Err != nil {
|
||||
wf.log.Warn("unsubscribing from full nodes", zap.Error(r.Err), logging.HostID("peerID", r.PeerID))
|
||||
}
|
||||
|
||||
wf.subscriptions.Clear()
|
||||
|
||||
wf.started = false
|
||||
wf.cancel = nil
|
||||
|
||||
wf.wg.Wait()
|
||||
}
|
||||
//
|
||||
wf.subscriptions.Clear()
|
||||
})
|
||||
}
|
||||
|
||||
func (wf *WakuFilterLightNode) onRequest(ctx context.Context) func(s network.Stream) {
|
||||
return func(s network.Stream) {
|
||||
defer s.Close()
|
||||
logger := wf.log.With(logging.HostID("peer", s.Conn().RemotePeer()))
|
||||
|
||||
if !wf.subscriptions.IsSubscribedTo(s.Conn().RemotePeer()) {
|
||||
logger.Warn("received message push from unknown peer", logging.HostID("peerID", s.Conn().RemotePeer()))
|
||||
wf.metrics.RecordError(unknownPeerMessagePush)
|
||||
|
@ -159,16 +124,29 @@ func (wf *WakuFilterLightNode) onRequest(ctx context.Context) func(s network.Str
|
|||
wf.metrics.RecordError(decodeRPCFailure)
|
||||
return
|
||||
}
|
||||
|
||||
if !wf.subscriptions.Has(s.Conn().RemotePeer(), messagePush.PubsubTopic, messagePush.WakuMessage.ContentTopic) {
|
||||
logger.Warn("received messagepush with invalid subscription parameters", logging.HostID("peerID", s.Conn().RemotePeer()), zap.String("topic", messagePush.PubsubTopic), zap.String("contentTopic", messagePush.WakuMessage.ContentTopic))
|
||||
pubSubTopic := ""
|
||||
//For now returning failure, this will get addressed with autosharding changes for filter.
|
||||
if messagePush.PubsubTopic == nil {
|
||||
pubSubTopic, err = protocol.GetPubSubTopicFromContentTopic(messagePush.WakuMessage.ContentTopic)
|
||||
if err != nil {
|
||||
logger.Error("could not derive pubSubTopic from contentTopic", zap.Error(err))
|
||||
wf.metrics.RecordError(decodeRPCFailure)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
pubSubTopic = *messagePush.PubsubTopic
|
||||
}
|
||||
if !wf.subscriptions.Has(s.Conn().RemotePeer(), protocol.NewContentFilter(pubSubTopic, messagePush.WakuMessage.ContentTopic)) {
|
||||
logger.Warn("received messagepush with invalid subscription parameters",
|
||||
logging.HostID("peerID", s.Conn().RemotePeer()), zap.String("topic", pubSubTopic),
|
||||
zap.String("contentTopic", messagePush.WakuMessage.ContentTopic))
|
||||
wf.metrics.RecordError(invalidSubscriptionMessage)
|
||||
return
|
||||
}
|
||||
|
||||
wf.metrics.RecordMessage()
|
||||
|
||||
wf.notify(s.Conn().RemotePeer(), messagePush.PubsubTopic, messagePush.WakuMessage)
|
||||
wf.notify(s.Conn().RemotePeer(), pubSubTopic, messagePush.WakuMessage)
|
||||
|
||||
logger.Info("received message push")
|
||||
}
|
||||
|
@ -177,14 +155,16 @@ func (wf *WakuFilterLightNode) onRequest(ctx context.Context) func(s network.Str
|
|||
func (wf *WakuFilterLightNode) notify(remotePeerID peer.ID, pubsubTopic string, msg *wpb.WakuMessage) {
|
||||
envelope := protocol.NewEnvelope(msg, wf.timesource.Now().UnixNano(), pubsubTopic)
|
||||
|
||||
// Broadcasting message so it's stored
|
||||
wf.broadcaster.Submit(envelope)
|
||||
|
||||
if wf.broadcaster != nil {
|
||||
// Broadcasting message so it's stored
|
||||
wf.broadcaster.Submit(envelope)
|
||||
}
|
||||
// Notify filter subscribers
|
||||
wf.subscriptions.Notify(remotePeerID, envelope)
|
||||
}
|
||||
|
||||
func (wf *WakuFilterLightNode) request(ctx context.Context, params *FilterSubscribeParameters, reqType pb.FilterSubscribeRequest_FilterSubscribeType, contentFilter ContentFilter) error {
|
||||
func (wf *WakuFilterLightNode) request(ctx context.Context, params *FilterSubscribeParameters,
|
||||
reqType pb.FilterSubscribeRequest_FilterSubscribeType, contentFilter protocol.ContentFilter) error {
|
||||
conn, err := wf.h.NewStream(ctx, params.selectedPeer, FilterSubscribeID_v20beta1)
|
||||
if err != nil {
|
||||
wf.metrics.RecordError(dialFailure)
|
||||
|
@ -198,8 +178,8 @@ func (wf *WakuFilterLightNode) request(ctx context.Context, params *FilterSubscr
|
|||
request := &pb.FilterSubscribeRequest{
|
||||
RequestId: hex.EncodeToString(params.requestID),
|
||||
FilterSubscribeType: reqType,
|
||||
PubsubTopic: contentFilter.Topic,
|
||||
ContentTopics: contentFilter.ContentTopics,
|
||||
PubsubTopic: &contentFilter.PubsubTopic,
|
||||
ContentTopics: contentFilter.ContentTopicsList(),
|
||||
}
|
||||
|
||||
wf.log.Debug("sending FilterSubscribeRequest", zap.Stringer("request", request))
|
||||
|
@ -217,7 +197,6 @@ func (wf *WakuFilterLightNode) request(ctx context.Context, params *FilterSubscr
|
|||
wf.metrics.RecordError(decodeRPCFailure)
|
||||
return err
|
||||
}
|
||||
|
||||
if filterSubscribeResponse.RequestId != request.RequestId {
|
||||
wf.log.Error("requestID mismatch", zap.String("expected", request.RequestId), zap.String("received", filterSubscribeResponse.RequestId))
|
||||
wf.metrics.RecordError(requestIDMismatch)
|
||||
|
@ -234,17 +213,38 @@ func (wf *WakuFilterLightNode) request(ctx context.Context, params *FilterSubscr
|
|||
return nil
|
||||
}
|
||||
|
||||
// This function converts a contentFilter into a map of pubSubTopics and corresponding contentTopics
|
||||
func contentFilterToPubSubTopicMap(contentFilter protocol.ContentFilter) (map[string][]string, error) {
|
||||
pubSubTopicMap := make(map[string][]string)
|
||||
|
||||
if contentFilter.PubsubTopic != "" {
|
||||
pubSubTopicMap[contentFilter.PubsubTopic] = contentFilter.ContentTopicsList()
|
||||
} else {
|
||||
//Parse the content-Topics to figure out shards.
|
||||
for _, cTopicString := range contentFilter.ContentTopicsList() {
|
||||
pTopicStr, err := protocol.GetPubSubTopicFromContentTopic(cTopicString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, ok := pubSubTopicMap[pTopicStr]
|
||||
if !ok {
|
||||
pubSubTopicMap[pTopicStr] = []string{}
|
||||
}
|
||||
pubSubTopicMap[pTopicStr] = append(pubSubTopicMap[pTopicStr], cTopicString)
|
||||
}
|
||||
}
|
||||
return pubSubTopicMap, nil
|
||||
}
|
||||
|
||||
// Subscribe setups a subscription to receive messages that match a specific content filter
|
||||
func (wf *WakuFilterLightNode) Subscribe(ctx context.Context, contentFilter ContentFilter, opts ...FilterSubscribeOption) (*SubscriptionDetails, error) {
|
||||
// If contentTopics passed result in different pubSub topics (due to Auto/Static sharding), then multiple subscription requests are sent to the peer.
|
||||
// This may change if Filterv2 protocol is updated to handle such a scenario in a single request.
|
||||
// Note: In case of partial failure, results are returned for successful subscriptions along with error indicating failed contentTopics.
|
||||
func (wf *WakuFilterLightNode) Subscribe(ctx context.Context, contentFilter protocol.ContentFilter, opts ...FilterSubscribeOption) ([]*subscription.SubscriptionDetails, error) {
|
||||
wf.RLock()
|
||||
defer wf.RUnlock()
|
||||
|
||||
if !wf.started {
|
||||
return nil, errNotStarted
|
||||
}
|
||||
|
||||
if contentFilter.Topic == "" {
|
||||
return nil, errors.New("topic is required")
|
||||
if err := wf.ErrOnNotRunning(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(contentFilter.ContentTopics) == 0 {
|
||||
|
@ -271,32 +271,49 @@ func (wf *WakuFilterLightNode) Subscribe(ctx context.Context, contentFilter Cont
|
|||
return nil, ErrNoPeersAvailable
|
||||
}
|
||||
|
||||
err := wf.request(ctx, params, pb.FilterSubscribeRequest_SUBSCRIBE, contentFilter)
|
||||
pubSubTopicMap, err := contentFilterToPubSubTopicMap(contentFilter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
failedContentTopics := []string{}
|
||||
subscriptions := make([]*subscription.SubscriptionDetails, 0)
|
||||
for pubSubTopic, cTopics := range pubSubTopicMap {
|
||||
var cFilter protocol.ContentFilter
|
||||
cFilter.PubsubTopic = pubSubTopic
|
||||
cFilter.ContentTopics = protocol.NewContentTopicSet(cTopics...)
|
||||
err := wf.request(ctx, params, pb.FilterSubscribeRequest_SUBSCRIBE, cFilter)
|
||||
if err != nil {
|
||||
wf.log.Error("Failed to subscribe", zap.String("pubSubTopic", pubSubTopic), zap.Strings("contentTopics", cTopics),
|
||||
zap.Error(err))
|
||||
failedContentTopics = append(failedContentTopics, cTopics...)
|
||||
}
|
||||
subscriptions = append(subscriptions, wf.subscriptions.NewSubscription(params.selectedPeer, cFilter))
|
||||
}
|
||||
|
||||
return wf.subscriptions.NewSubscription(params.selectedPeer, contentFilter.Topic, contentFilter.ContentTopics), nil
|
||||
if len(failedContentTopics) > 0 {
|
||||
return subscriptions, fmt.Errorf("subscriptions failed for contentTopics: %s", strings.Join(failedContentTopics, ","))
|
||||
} else {
|
||||
return subscriptions, nil
|
||||
}
|
||||
}
|
||||
|
||||
// FilterSubscription is used to obtain an object from which you could receive messages received via filter protocol
|
||||
func (wf *WakuFilterLightNode) FilterSubscription(peerID peer.ID, contentFilter ContentFilter) (*SubscriptionDetails, error) {
|
||||
func (wf *WakuFilterLightNode) FilterSubscription(peerID peer.ID, contentFilter protocol.ContentFilter) (*subscription.SubscriptionDetails, error) {
|
||||
wf.RLock()
|
||||
defer wf.RUnlock()
|
||||
|
||||
if !wf.started {
|
||||
return nil, errNotStarted
|
||||
if err := wf.ErrOnNotRunning(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !wf.subscriptions.Has(peerID, contentFilter.Topic, contentFilter.ContentTopics...) {
|
||||
if !wf.subscriptions.Has(peerID, contentFilter) {
|
||||
return nil, errors.New("subscription does not exist")
|
||||
}
|
||||
|
||||
return wf.subscriptions.NewSubscription(peerID, contentFilter.Topic, contentFilter.ContentTopics), nil
|
||||
return wf.subscriptions.NewSubscription(peerID, contentFilter), nil
|
||||
}
|
||||
|
||||
func (wf *WakuFilterLightNode) getUnsubscribeParameters(opts ...FilterUnsubscribeOption) (*FilterUnsubscribeParameters, error) {
|
||||
params := new(FilterUnsubscribeParameters)
|
||||
func (wf *WakuFilterLightNode) getUnsubscribeParameters(opts ...FilterSubscribeOption) (*FilterSubscribeParameters, error) {
|
||||
params := new(FilterSubscribeParameters)
|
||||
params.log = wf.log
|
||||
opts = append(DefaultUnsubscribeOptions(), opts...)
|
||||
for _, opt := range opts {
|
||||
|
@ -309,45 +326,42 @@ func (wf *WakuFilterLightNode) getUnsubscribeParameters(opts ...FilterUnsubscrib
|
|||
func (wf *WakuFilterLightNode) Ping(ctx context.Context, peerID peer.ID) error {
|
||||
wf.RLock()
|
||||
defer wf.RUnlock()
|
||||
|
||||
if !wf.started {
|
||||
return errNotStarted
|
||||
if err := wf.ErrOnNotRunning(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return wf.request(
|
||||
ctx,
|
||||
&FilterSubscribeParameters{selectedPeer: peerID},
|
||||
&FilterSubscribeParameters{selectedPeer: peerID, requestID: protocol.GenerateRequestID()},
|
||||
pb.FilterSubscribeRequest_SUBSCRIBER_PING,
|
||||
ContentFilter{})
|
||||
protocol.ContentFilter{})
|
||||
}
|
||||
|
||||
func (wf *WakuFilterLightNode) IsSubscriptionAlive(ctx context.Context, subscription *SubscriptionDetails) error {
|
||||
func (wf *WakuFilterLightNode) IsSubscriptionAlive(ctx context.Context, subscription *subscription.SubscriptionDetails) error {
|
||||
wf.RLock()
|
||||
defer wf.RUnlock()
|
||||
|
||||
if !wf.started {
|
||||
return errNotStarted
|
||||
if err := wf.ErrOnNotRunning(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return wf.Ping(ctx, subscription.PeerID)
|
||||
}
|
||||
|
||||
func (wf *WakuFilterLightNode) Subscriptions() []*SubscriptionDetails {
|
||||
func (wf *WakuFilterLightNode) Subscriptions() []*subscription.SubscriptionDetails {
|
||||
wf.RLock()
|
||||
defer wf.RUnlock()
|
||||
|
||||
if !wf.started {
|
||||
if err := wf.ErrOnNotRunning(); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
wf.subscriptions.RLock()
|
||||
defer wf.subscriptions.RUnlock()
|
||||
|
||||
var output []*SubscriptionDetails
|
||||
var output []*subscription.SubscriptionDetails
|
||||
|
||||
for _, peerSubscription := range wf.subscriptions.items {
|
||||
for _, subscriptionPerTopic := range peerSubscription.subscriptionsPerTopic {
|
||||
for _, subscriptionDetail := range subscriptionPerTopic {
|
||||
for _, peerSubscription := range wf.subscriptions.Items {
|
||||
for _, subscriptions := range peerSubscription.SubsPerPubsubTopic {
|
||||
for _, subscriptionDetail := range subscriptions {
|
||||
output = append(output, subscriptionDetail)
|
||||
}
|
||||
}
|
||||
|
@ -356,48 +370,40 @@ func (wf *WakuFilterLightNode) Subscriptions() []*SubscriptionDetails {
|
|||
return output
|
||||
}
|
||||
|
||||
func (wf *WakuFilterLightNode) cleanupSubscriptions(peerID peer.ID, contentFilter ContentFilter) {
|
||||
func (wf *WakuFilterLightNode) cleanupSubscriptions(peerID peer.ID, contentFilter protocol.ContentFilter) {
|
||||
wf.subscriptions.Lock()
|
||||
defer wf.subscriptions.Unlock()
|
||||
|
||||
peerSubscription, ok := wf.subscriptions.items[peerID]
|
||||
peerSubscription, ok := wf.subscriptions.Items[peerID]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
subscriptionDetailList, ok := peerSubscription.subscriptionsPerTopic[contentFilter.Topic]
|
||||
subscriptionDetailList, ok := peerSubscription.SubsPerPubsubTopic[contentFilter.PubsubTopic]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
for subscriptionDetailID, subscriptionDetail := range subscriptionDetailList {
|
||||
subscriptionDetail.Remove(contentFilter.ContentTopics...)
|
||||
if len(subscriptionDetail.ContentTopics) == 0 {
|
||||
subscriptionDetail.Remove(contentFilter.ContentTopicsList()...)
|
||||
if len(subscriptionDetail.ContentFilter.ContentTopics) == 0 {
|
||||
delete(subscriptionDetailList, subscriptionDetailID)
|
||||
} else {
|
||||
subscriptionDetailList[subscriptionDetailID] = subscriptionDetail
|
||||
subscriptionDetail.CloseC()
|
||||
}
|
||||
}
|
||||
|
||||
if len(subscriptionDetailList) == 0 {
|
||||
delete(wf.subscriptions.items[peerID].subscriptionsPerTopic, contentFilter.Topic)
|
||||
} else {
|
||||
wf.subscriptions.items[peerID].subscriptionsPerTopic[contentFilter.Topic] = subscriptionDetailList
|
||||
delete(wf.subscriptions.Items[peerID].SubsPerPubsubTopic, contentFilter.PubsubTopic)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Unsubscribe is used to stop receiving messages from a peer that match a content filter
|
||||
func (wf *WakuFilterLightNode) Unsubscribe(ctx context.Context, contentFilter ContentFilter, opts ...FilterUnsubscribeOption) (<-chan WakuFilterPushResult, error) {
|
||||
func (wf *WakuFilterLightNode) Unsubscribe(ctx context.Context, contentFilter protocol.ContentFilter, opts ...FilterSubscribeOption) (<-chan WakuFilterPushResult, error) {
|
||||
wf.RLock()
|
||||
defer wf.RUnlock()
|
||||
|
||||
if !wf.started {
|
||||
return nil, errNotStarted
|
||||
}
|
||||
|
||||
if contentFilter.Topic == "" {
|
||||
return nil, errors.New("topic is required")
|
||||
if err := wf.ErrOnNotRunning(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(contentFilter.ContentTopics) == 0 {
|
||||
|
@ -413,57 +419,49 @@ func (wf *WakuFilterLightNode) Unsubscribe(ctx context.Context, contentFilter Co
|
|||
return nil, err
|
||||
}
|
||||
|
||||
resultChan := make(chan WakuFilterPushResult, len(wf.subscriptions.items))
|
||||
for peerID := range wf.subscriptions.items {
|
||||
if params.selectedPeer != "" && peerID != params.selectedPeer {
|
||||
continue
|
||||
}
|
||||
pubSubTopicMap, err := contentFilterToPubSubTopicMap(contentFilter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resultChan := make(chan WakuFilterPushResult, len(wf.subscriptions.Items))
|
||||
for pTopic, cTopics := range pubSubTopicMap {
|
||||
cFilter := protocol.NewContentFilter(pTopic, cTopics...)
|
||||
for peerID := range wf.subscriptions.Items {
|
||||
if params.selectedPeer != "" && peerID != params.selectedPeer {
|
||||
continue
|
||||
}
|
||||
|
||||
subscriptions, ok := wf.subscriptions.items[peerID]
|
||||
if !ok || subscriptions == nil {
|
||||
continue
|
||||
}
|
||||
subscriptions, ok := wf.subscriptions.Items[peerID]
|
||||
if !ok || subscriptions == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
wf.cleanupSubscriptions(peerID, contentFilter)
|
||||
if len(subscriptions.subscriptionsPerTopic) == 0 {
|
||||
delete(wf.subscriptions.items, peerID)
|
||||
}
|
||||
|
||||
if params.wg != nil {
|
||||
params.wg.Add(1)
|
||||
}
|
||||
|
||||
go func(peerID peer.ID) {
|
||||
defer func() {
|
||||
if params.wg != nil {
|
||||
params.wg.Done()
|
||||
}
|
||||
}()
|
||||
|
||||
err := wf.request(
|
||||
ctx,
|
||||
&FilterSubscribeParameters{selectedPeer: peerID, requestID: params.requestID},
|
||||
pb.FilterSubscribeRequest_UNSUBSCRIBE,
|
||||
contentFilter)
|
||||
if err != nil {
|
||||
ferr, ok := err.(*FilterError)
|
||||
if ok && ferr.Code == http.StatusNotFound {
|
||||
wf.log.Warn("peer does not have a subscription", logging.HostID("peerID", peerID), zap.Error(err))
|
||||
} else {
|
||||
wf.log.Error("could not unsubscribe from peer", logging.HostID("peerID", peerID), zap.Error(err))
|
||||
return
|
||||
}
|
||||
wf.cleanupSubscriptions(peerID, cFilter)
|
||||
if len(subscriptions.SubsPerPubsubTopic) == 0 {
|
||||
delete(wf.subscriptions.Items, peerID)
|
||||
}
|
||||
|
||||
if params.wg != nil {
|
||||
resultChan <- WakuFilterPushResult{
|
||||
Err: err,
|
||||
PeerID: peerID,
|
||||
}
|
||||
params.wg.Add(1)
|
||||
}
|
||||
}(peerID)
|
||||
}
|
||||
|
||||
go func(peerID peer.ID) {
|
||||
defer func() {
|
||||
if params.wg != nil {
|
||||
params.wg.Done()
|
||||
}
|
||||
}()
|
||||
err := wf.unsubscribeFromServer(ctx, &FilterSubscribeParameters{selectedPeer: peerID, requestID: params.requestID}, cFilter)
|
||||
|
||||
if params.wg != nil {
|
||||
resultChan <- WakuFilterPushResult{
|
||||
Err: err,
|
||||
PeerID: peerID,
|
||||
}
|
||||
}
|
||||
}(peerID)
|
||||
}
|
||||
}
|
||||
if params.wg != nil {
|
||||
params.wg.Wait()
|
||||
}
|
||||
|
@ -473,26 +471,55 @@ func (wf *WakuFilterLightNode) Unsubscribe(ctx context.Context, contentFilter Co
|
|||
return resultChan, nil
|
||||
}
|
||||
|
||||
// Unsubscribe is used to stop receiving messages from a peer that match a content filter
|
||||
func (wf *WakuFilterLightNode) UnsubscribeWithSubscription(ctx context.Context, sub *SubscriptionDetails, opts ...FilterUnsubscribeOption) (<-chan WakuFilterPushResult, error) {
|
||||
// UnsubscribeWithSubscription is used to close a particular subscription
|
||||
// If there are no more subscriptions matching the passed [peer, contentFilter] pair,
|
||||
// server unsubscribe is also performed
|
||||
func (wf *WakuFilterLightNode) UnsubscribeWithSubscription(ctx context.Context, sub *subscription.SubscriptionDetails,
|
||||
opts ...FilterSubscribeOption) (<-chan WakuFilterPushResult, error) {
|
||||
wf.RLock()
|
||||
defer wf.RUnlock()
|
||||
|
||||
if !wf.started {
|
||||
return nil, errNotStarted
|
||||
if err := wf.ErrOnNotRunning(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var contentTopics []string
|
||||
for k := range sub.ContentTopics {
|
||||
contentTopics = append(contentTopics, k)
|
||||
params, err := wf.getUnsubscribeParameters(opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
opts = append(opts, Peer(sub.PeerID))
|
||||
// Close this sub
|
||||
sub.Close()
|
||||
|
||||
resultChan := make(chan WakuFilterPushResult, 1)
|
||||
|
||||
if !wf.subscriptions.Has(sub.PeerID, sub.ContentFilter) {
|
||||
// Last sub for this [peer, contentFilter] pair
|
||||
err = wf.unsubscribeFromServer(ctx, &FilterSubscribeParameters{selectedPeer: sub.PeerID, requestID: params.requestID}, sub.ContentFilter)
|
||||
resultChan <- WakuFilterPushResult{
|
||||
Err: err,
|
||||
PeerID: sub.PeerID,
|
||||
}
|
||||
}
|
||||
close(resultChan)
|
||||
return resultChan, err
|
||||
|
||||
return wf.Unsubscribe(ctx, ContentFilter{Topic: sub.PubsubTopic, ContentTopics: contentTopics}, opts...)
|
||||
}
|
||||
|
||||
func (wf *WakuFilterLightNode) unsubscribeAll(ctx context.Context, opts ...FilterUnsubscribeOption) (<-chan WakuFilterPushResult, error) {
|
||||
func (wf *WakuFilterLightNode) unsubscribeFromServer(ctx context.Context, params *FilterSubscribeParameters, cFilter protocol.ContentFilter) error {
|
||||
err := wf.request(ctx, params, pb.FilterSubscribeRequest_UNSUBSCRIBE, cFilter)
|
||||
if err != nil {
|
||||
ferr, ok := err.(*FilterError)
|
||||
if ok && ferr.Code == http.StatusNotFound {
|
||||
wf.log.Warn("peer does not have a subscription", logging.HostID("peerID", params.selectedPeer), zap.Error(err))
|
||||
} else {
|
||||
wf.log.Error("could not unsubscribe from peer", logging.HostID("peerID", params.selectedPeer), zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (wf *WakuFilterLightNode) unsubscribeAll(ctx context.Context, opts ...FilterSubscribeOption) (<-chan WakuFilterPushResult, error) {
|
||||
params, err := wf.getUnsubscribeParameters(opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -501,14 +528,14 @@ func (wf *WakuFilterLightNode) unsubscribeAll(ctx context.Context, opts ...Filte
|
|||
wf.subscriptions.Lock()
|
||||
defer wf.subscriptions.Unlock()
|
||||
|
||||
resultChan := make(chan WakuFilterPushResult, len(wf.subscriptions.items))
|
||||
resultChan := make(chan WakuFilterPushResult, len(wf.subscriptions.Items))
|
||||
|
||||
for peerID := range wf.subscriptions.items {
|
||||
for peerID := range wf.subscriptions.Items {
|
||||
if params.selectedPeer != "" && peerID != params.selectedPeer {
|
||||
continue
|
||||
}
|
||||
|
||||
delete(wf.subscriptions.items, peerID)
|
||||
delete(wf.subscriptions.Items, peerID)
|
||||
|
||||
if params.wg != nil {
|
||||
params.wg.Add(1)
|
||||
|
@ -525,7 +552,7 @@ func (wf *WakuFilterLightNode) unsubscribeAll(ctx context.Context, opts ...Filte
|
|||
ctx,
|
||||
&FilterSubscribeParameters{selectedPeer: peerID, requestID: params.requestID},
|
||||
pb.FilterSubscribeRequest_UNSUBSCRIBE_ALL,
|
||||
ContentFilter{})
|
||||
protocol.ContentFilter{})
|
||||
if err != nil {
|
||||
wf.log.Error("could not unsubscribe from peer", logging.HostID("peerID", peerID), zap.Error(err))
|
||||
}
|
||||
|
@ -548,12 +575,11 @@ func (wf *WakuFilterLightNode) unsubscribeAll(ctx context.Context, opts ...Filte
|
|||
}
|
||||
|
||||
// UnsubscribeAll is used to stop receiving messages from peer(s). It does not close subscriptions
|
||||
func (wf *WakuFilterLightNode) UnsubscribeAll(ctx context.Context, opts ...FilterUnsubscribeOption) (<-chan WakuFilterPushResult, error) {
|
||||
func (wf *WakuFilterLightNode) UnsubscribeAll(ctx context.Context, opts ...FilterSubscribeOption) (<-chan WakuFilterPushResult, error) {
|
||||
wf.RLock()
|
||||
defer wf.RUnlock()
|
||||
|
||||
if !wf.started {
|
||||
return nil, errNotStarted
|
||||
if err := wf.ErrOnNotRunning(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return wf.unsubscribeAll(ctx, opts...)
|
||||
|
|
|
@ -15,18 +15,16 @@ import (
|
|||
|
||||
type (
|
||||
FilterSubscribeParameters struct {
|
||||
host host.Host
|
||||
selectedPeer peer.ID
|
||||
pm *peermanager.PeerManager
|
||||
requestID []byte
|
||||
log *zap.Logger
|
||||
}
|
||||
|
||||
FilterUnsubscribeParameters struct {
|
||||
// Subscribe-specific
|
||||
host host.Host
|
||||
pm *peermanager.PeerManager
|
||||
|
||||
// Unsubscribe-specific
|
||||
unsubscribeAll bool
|
||||
selectedPeer peer.ID
|
||||
requestID []byte
|
||||
log *zap.Logger
|
||||
wg *sync.WaitGroup
|
||||
}
|
||||
|
||||
|
@ -37,8 +35,7 @@ type (
|
|||
|
||||
Option func(*FilterParameters)
|
||||
|
||||
FilterSubscribeOption func(*FilterSubscribeParameters)
|
||||
FilterUnsubscribeOption func(*FilterUnsubscribeParameters)
|
||||
FilterSubscribeOption func(*FilterSubscribeParameters)
|
||||
)
|
||||
|
||||
func WithTimeout(timeout time.Duration) Option {
|
||||
|
@ -63,7 +60,7 @@ func WithAutomaticPeerSelection(fromThesePeers ...peer.ID) FilterSubscribeOption
|
|||
if params.pm == nil {
|
||||
p, err = utils.SelectPeer(params.host, FilterSubscribeID_v20beta1, fromThesePeers, params.log)
|
||||
} else {
|
||||
p, err = params.pm.SelectPeer(FilterSubscribeID_v20beta1, fromThesePeers, params.log)
|
||||
p, err = params.pm.SelectPeer(FilterSubscribeID_v20beta1, "", fromThesePeers...)
|
||||
}
|
||||
if err == nil {
|
||||
params.selectedPeer = p
|
||||
|
@ -89,7 +86,7 @@ func WithFastestPeerSelection(ctx context.Context, fromThesePeers ...peer.ID) Fi
|
|||
}
|
||||
|
||||
// WithRequestID is an option to set a specific request ID to be used when
|
||||
// creating a filter subscription
|
||||
// creating/removing a filter subscription
|
||||
func WithRequestID(requestID []byte) FilterSubscribeOption {
|
||||
return func(params *FilterSubscribeParameters) {
|
||||
params.requestID = requestID
|
||||
|
@ -100,7 +97,7 @@ func WithRequestID(requestID []byte) FilterSubscribeOption {
|
|||
// when creating a filter subscription
|
||||
func WithAutomaticRequestID() FilterSubscribeOption {
|
||||
return func(params *FilterSubscribeParameters) {
|
||||
params.requestID = protocol.GenerateRequestId()
|
||||
params.requestID = protocol.GenerateRequestID()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -111,51 +108,31 @@ func DefaultSubscriptionOptions() []FilterSubscribeOption {
|
|||
}
|
||||
}
|
||||
|
||||
func UnsubscribeAll() FilterUnsubscribeOption {
|
||||
return func(params *FilterUnsubscribeParameters) {
|
||||
func UnsubscribeAll() FilterSubscribeOption {
|
||||
return func(params *FilterSubscribeParameters) {
|
||||
params.unsubscribeAll = true
|
||||
}
|
||||
}
|
||||
|
||||
func Peer(p peer.ID) FilterUnsubscribeOption {
|
||||
return func(params *FilterUnsubscribeParameters) {
|
||||
params.selectedPeer = p
|
||||
}
|
||||
}
|
||||
|
||||
// RequestID is an option to set a specific request ID to be used when
|
||||
// removing a subscription from a filter node
|
||||
func RequestID(requestID []byte) FilterUnsubscribeOption {
|
||||
return func(params *FilterUnsubscribeParameters) {
|
||||
params.requestID = requestID
|
||||
}
|
||||
}
|
||||
|
||||
func AutomaticRequestId() FilterUnsubscribeOption {
|
||||
return func(params *FilterUnsubscribeParameters) {
|
||||
params.requestID = protocol.GenerateRequestId()
|
||||
}
|
||||
}
|
||||
|
||||
// WithWaitGroup allos specigying a waitgroup to wait until all
|
||||
// WithWaitGroup allows specifying a waitgroup to wait until all
|
||||
// unsubscribe requests are complete before the function is complete
|
||||
func WithWaitGroup(wg *sync.WaitGroup) FilterUnsubscribeOption {
|
||||
return func(params *FilterUnsubscribeParameters) {
|
||||
func WithWaitGroup(wg *sync.WaitGroup) FilterSubscribeOption {
|
||||
return func(params *FilterSubscribeParameters) {
|
||||
params.wg = wg
|
||||
}
|
||||
}
|
||||
|
||||
// DontWait is used to fire and forget an unsubscription, and don't
|
||||
// care about the results of it
|
||||
func DontWait() FilterUnsubscribeOption {
|
||||
return func(params *FilterUnsubscribeParameters) {
|
||||
func DontWait() FilterSubscribeOption {
|
||||
return func(params *FilterSubscribeParameters) {
|
||||
params.wg = nil
|
||||
}
|
||||
}
|
||||
|
||||
func DefaultUnsubscribeOptions() []FilterUnsubscribeOption {
|
||||
return []FilterUnsubscribeOption{
|
||||
AutomaticRequestId(),
|
||||
func DefaultUnsubscribeOptions() []FilterSubscribeOption {
|
||||
return []FilterSubscribeOption{
|
||||
WithAutomaticRequestID(),
|
||||
WithWaitGroup(&sync.WaitGroup{}),
|
||||
}
|
||||
}
|
||||
|
|
74
vendor/github.com/waku-org/go-waku/waku/v2/protocol/filter/pb/waku_filter_v2.pb.go
generated
vendored
74
vendor/github.com/waku-org/go-waku/waku/v2/protocol/filter/pb/waku_filter_v2.pb.go
generated
vendored
|
@ -1,7 +1,7 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.26.0
|
||||
// protoc v3.21.12
|
||||
// protoc-gen-go v1.31.0
|
||||
// protoc v4.23.4
|
||||
// source: waku_filter_v2.proto
|
||||
|
||||
// 12/WAKU2-FILTER rfc: https://rfc.vac.dev/spec/12/
|
||||
|
@ -84,7 +84,7 @@ type FilterSubscribeRequest struct {
|
|||
RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
|
||||
FilterSubscribeType FilterSubscribeRequest_FilterSubscribeType `protobuf:"varint,2,opt,name=filter_subscribe_type,json=filterSubscribeType,proto3,enum=pb.FilterSubscribeRequest_FilterSubscribeType" json:"filter_subscribe_type,omitempty"`
|
||||
// Filter criteria
|
||||
PubsubTopic string `protobuf:"bytes,10,opt,name=pubsub_topic,json=pubsubTopic,proto3" json:"pubsub_topic,omitempty"`
|
||||
PubsubTopic *string `protobuf:"bytes,10,opt,name=pubsub_topic,json=pubsubTopic,proto3,oneof" json:"pubsub_topic,omitempty"`
|
||||
ContentTopics []string `protobuf:"bytes,11,rep,name=content_topics,json=contentTopics,proto3" json:"content_topics,omitempty"`
|
||||
}
|
||||
|
||||
|
@ -135,8 +135,8 @@ func (x *FilterSubscribeRequest) GetFilterSubscribeType() FilterSubscribeRequest
|
|||
}
|
||||
|
||||
func (x *FilterSubscribeRequest) GetPubsubTopic() string {
|
||||
if x != nil {
|
||||
return x.PubsubTopic
|
||||
if x != nil && x.PubsubTopic != nil {
|
||||
return *x.PubsubTopic
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
@ -218,7 +218,7 @@ type MessagePushV2 struct {
|
|||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
WakuMessage *pb.WakuMessage `protobuf:"bytes,1,opt,name=waku_message,json=wakuMessage,proto3" json:"waku_message,omitempty"`
|
||||
PubsubTopic string `protobuf:"bytes,2,opt,name=pubsub_topic,json=pubsubTopic,proto3" json:"pubsub_topic,omitempty"`
|
||||
PubsubTopic *string `protobuf:"bytes,2,opt,name=pubsub_topic,json=pubsubTopic,proto3,oneof" json:"pubsub_topic,omitempty"`
|
||||
}
|
||||
|
||||
func (x *MessagePushV2) Reset() {
|
||||
|
@ -261,8 +261,8 @@ func (x *MessagePushV2) GetWakuMessage() *pb.WakuMessage {
|
|||
}
|
||||
|
||||
func (x *MessagePushV2) GetPubsubTopic() string {
|
||||
if x != nil {
|
||||
return x.PubsubTopic
|
||||
if x != nil && x.PubsubTopic != nil {
|
||||
return *x.PubsubTopic
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
@ -272,7 +272,7 @@ var File_waku_filter_v2_proto protoreflect.FileDescriptor
|
|||
var file_waku_filter_v2_proto_rawDesc = []byte{
|
||||
0x0a, 0x14, 0x77, 0x61, 0x6b, 0x75, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x76, 0x32,
|
||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x12, 0x77, 0x61, 0x6b, 0x75,
|
||||
0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc6,
|
||||
0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xdc,
|
||||
0x02, 0x0a, 0x16, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69,
|
||||
0x62, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72,
|
||||
|
@ -282,33 +282,35 @@ var file_waku_filter_v2_proto_rawDesc = []byte{
|
|||
0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72,
|
||||
0x69, 0x62, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x13, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53,
|
||||
0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c,
|
||||
0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x26, 0x0a, 0x0c,
|
||||
0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x5f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x0a, 0x20, 0x01,
|
||||
0x28, 0x09, 0x52, 0x0b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12,
|
||||
0x25, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x6f, 0x70, 0x69, 0x63,
|
||||
0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
|
||||
0x54, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x22, 0x5f, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72,
|
||||
0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x13, 0x0a,
|
||||
0x0f, 0x53, 0x55, 0x42, 0x53, 0x43, 0x52, 0x49, 0x42, 0x45, 0x52, 0x5f, 0x50, 0x49, 0x4e, 0x47,
|
||||
0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x55, 0x42, 0x53, 0x43, 0x52, 0x49, 0x42, 0x45, 0x10,
|
||||
0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x55, 0x42, 0x53, 0x43, 0x52, 0x49, 0x42, 0x45,
|
||||
0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x55, 0x4e, 0x53, 0x55, 0x42, 0x53, 0x43, 0x52, 0x49, 0x42,
|
||||
0x45, 0x5f, 0x41, 0x4c, 0x4c, 0x10, 0x03, 0x22, 0x7a, 0x0a, 0x17, 0x46, 0x69, 0x6c, 0x74, 0x65,
|
||||
0x72, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49,
|
||||
0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65,
|
||||
0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f,
|
||||
0x64, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x64, 0x65, 0x73,
|
||||
0x63, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44,
|
||||
0x65, 0x73, 0x63, 0x22, 0x66, 0x0a, 0x0d, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x50, 0x75,
|
||||
0x73, 0x68, 0x56, 0x32, 0x12, 0x32, 0x0a, 0x0c, 0x77, 0x61, 0x6b, 0x75, 0x5f, 0x6d, 0x65, 0x73,
|
||||
0x73, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x62, 0x2e,
|
||||
0x57, 0x61, 0x6b, 0x75, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x0b, 0x77, 0x61, 0x6b,
|
||||
0x75, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x75, 0x62, 0x73,
|
||||
0x75, 0x62, 0x5f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
|
||||
0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x62, 0x06, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x33,
|
||||
0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x54, 0x6f, 0x70, 0x69,
|
||||
0x63, 0x88, 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f,
|
||||
0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x63, 0x6f,
|
||||
0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x22, 0x5f, 0x0a, 0x13, 0x46,
|
||||
0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x54, 0x79,
|
||||
0x70, 0x65, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x55, 0x42, 0x53, 0x43, 0x52, 0x49, 0x42, 0x45, 0x52,
|
||||
0x5f, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x55, 0x42, 0x53, 0x43,
|
||||
0x52, 0x49, 0x42, 0x45, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x55, 0x42, 0x53,
|
||||
0x43, 0x52, 0x49, 0x42, 0x45, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x55, 0x4e, 0x53, 0x55, 0x42,
|
||||
0x53, 0x43, 0x52, 0x49, 0x42, 0x45, 0x5f, 0x41, 0x4c, 0x4c, 0x10, 0x03, 0x42, 0x0f, 0x0a, 0x0d,
|
||||
0x5f, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x5f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x22, 0x7a, 0x0a,
|
||||
0x17, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65,
|
||||
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65,
|
||||
0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75,
|
||||
0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x74,
|
||||
0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74,
|
||||
0x75, 0x73, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73,
|
||||
0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x65, 0x73, 0x63, 0x22, 0x7c, 0x0a, 0x0d, 0x4d, 0x65, 0x73,
|
||||
0x73, 0x61, 0x67, 0x65, 0x50, 0x75, 0x73, 0x68, 0x56, 0x32, 0x12, 0x32, 0x0a, 0x0c, 0x77, 0x61,
|
||||
0x6b, 0x75, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
|
||||
0x32, 0x0f, 0x2e, 0x70, 0x62, 0x2e, 0x57, 0x61, 0x6b, 0x75, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67,
|
||||
0x65, 0x52, 0x0b, 0x77, 0x61, 0x6b, 0x75, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x26,
|
||||
0x0a, 0x0c, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x5f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02,
|
||||
0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x54, 0x6f,
|
||||
0x70, 0x69, 0x63, 0x88, 0x01, 0x01, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x70, 0x75, 0x62, 0x73, 0x75,
|
||||
0x62, 0x5f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -385,6 +387,8 @@ func file_waku_filter_v2_proto_init() {
|
|||
}
|
||||
}
|
||||
}
|
||||
file_waku_filter_v2_proto_msgTypes[0].OneofWrappers = []interface{}{}
|
||||
file_waku_filter_v2_proto_msgTypes[2].OneofWrappers = []interface{}{}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
|
|
4
vendor/github.com/waku-org/go-waku/waku/v2/protocol/filter/pb/waku_filter_v2.proto
generated
vendored
4
vendor/github.com/waku-org/go-waku/waku/v2/protocol/filter/pb/waku_filter_v2.proto
generated
vendored
|
@ -19,7 +19,7 @@ message FilterSubscribeRequest {
|
|||
FilterSubscribeType filter_subscribe_type = 2;
|
||||
|
||||
// Filter criteria
|
||||
string pubsub_topic = 10;
|
||||
optional string pubsub_topic = 10;
|
||||
repeated string content_topics = 11;
|
||||
}
|
||||
|
||||
|
@ -32,5 +32,5 @@ message FilterSubscribeResponse {
|
|||
// Protocol identifier: /vac/waku/filter-push/2.0.0-beta1
|
||||
message MessagePushV2 {
|
||||
WakuMessage waku_message = 1;
|
||||
string pubsub_topic = 2;
|
||||
optional string pubsub_topic = 2;
|
||||
}
|
|
@ -6,7 +6,6 @@ import (
|
|||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
|
@ -31,13 +30,11 @@ const peerHasNoSubscription = "peer has no subscriptions"
|
|||
|
||||
type (
|
||||
WakuFilterFullNode struct {
|
||||
cancel context.CancelFunc
|
||||
h host.Host
|
||||
msgSub relay.Subscription
|
||||
metrics Metrics
|
||||
wg *sync.WaitGroup
|
||||
log *zap.Logger
|
||||
|
||||
*protocol.CommonService
|
||||
subscriptions *SubscribersMap
|
||||
|
||||
maxSubscriptions int
|
||||
|
@ -56,7 +53,7 @@ func NewWakuFilterFullNode(timesource timesource.Timesource, reg prometheus.Regi
|
|||
opt(params)
|
||||
}
|
||||
|
||||
wf.wg = &sync.WaitGroup{}
|
||||
wf.CommonService = protocol.NewCommonService()
|
||||
wf.metrics = newMetrics(reg)
|
||||
wf.subscriptions = NewSubscribersMap(params.Timeout)
|
||||
wf.maxSubscriptions = params.MaxSubscribers
|
||||
|
@ -70,19 +67,19 @@ func (wf *WakuFilterFullNode) SetHost(h host.Host) {
|
|||
}
|
||||
|
||||
func (wf *WakuFilterFullNode) Start(ctx context.Context, sub relay.Subscription) error {
|
||||
wf.wg.Wait() // Wait for any goroutines to stop
|
||||
return wf.CommonService.Start(ctx, func() error {
|
||||
return wf.start(sub)
|
||||
})
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
func (wf *WakuFilterFullNode) start(sub relay.Subscription) error {
|
||||
wf.h.SetStreamHandlerMatch(FilterSubscribeID_v20beta1, protocol.PrefixTextMatch(string(FilterSubscribeID_v20beta1)), wf.onRequest(wf.Context()))
|
||||
|
||||
wf.h.SetStreamHandlerMatch(FilterSubscribeID_v20beta1, protocol.PrefixTextMatch(string(FilterSubscribeID_v20beta1)), wf.onRequest(ctx))
|
||||
|
||||
wf.cancel = cancel
|
||||
wf.msgSub = sub
|
||||
wf.wg.Add(1)
|
||||
go wf.filterListener(ctx)
|
||||
wf.WaitGroup().Add(1)
|
||||
go wf.filterListener(wf.Context())
|
||||
|
||||
wf.log.Info("filter-subscriber protocol started")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -107,13 +104,13 @@ func (wf *WakuFilterFullNode) onRequest(ctx context.Context) func(s network.Stre
|
|||
|
||||
switch subscribeRequest.FilterSubscribeType {
|
||||
case pb.FilterSubscribeRequest_SUBSCRIBE:
|
||||
wf.subscribe(ctx, s, logger, subscribeRequest)
|
||||
wf.subscribe(ctx, s, subscribeRequest)
|
||||
case pb.FilterSubscribeRequest_SUBSCRIBER_PING:
|
||||
wf.ping(ctx, s, logger, subscribeRequest)
|
||||
wf.ping(ctx, s, subscribeRequest)
|
||||
case pb.FilterSubscribeRequest_UNSUBSCRIBE:
|
||||
wf.unsubscribe(ctx, s, logger, subscribeRequest)
|
||||
wf.unsubscribe(ctx, s, subscribeRequest)
|
||||
case pb.FilterSubscribeRequest_UNSUBSCRIBE_ALL:
|
||||
wf.unsubscribeAll(ctx, s, logger, subscribeRequest)
|
||||
wf.unsubscribeAll(ctx, s, subscribeRequest)
|
||||
}
|
||||
|
||||
wf.metrics.RecordRequest(subscribeRequest.FilterSubscribeType.String(), time.Since(start))
|
||||
|
@ -142,7 +139,7 @@ func (wf *WakuFilterFullNode) reply(ctx context.Context, s network.Stream, reque
|
|||
}
|
||||
}
|
||||
|
||||
func (wf *WakuFilterFullNode) ping(ctx context.Context, s network.Stream, logger *zap.Logger, request *pb.FilterSubscribeRequest) {
|
||||
func (wf *WakuFilterFullNode) ping(ctx context.Context, s network.Stream, request *pb.FilterSubscribeRequest) {
|
||||
exists := wf.subscriptions.Has(s.Conn().RemotePeer())
|
||||
|
||||
if exists {
|
||||
|
@ -152,8 +149,8 @@ func (wf *WakuFilterFullNode) ping(ctx context.Context, s network.Stream, logger
|
|||
}
|
||||
}
|
||||
|
||||
func (wf *WakuFilterFullNode) subscribe(ctx context.Context, s network.Stream, logger *zap.Logger, request *pb.FilterSubscribeRequest) {
|
||||
if request.PubsubTopic == "" {
|
||||
func (wf *WakuFilterFullNode) subscribe(ctx context.Context, s network.Stream, request *pb.FilterSubscribeRequest) {
|
||||
if request.PubsubTopic == nil {
|
||||
wf.reply(ctx, s, request, http.StatusBadRequest, "pubsubtopic can't be empty")
|
||||
return
|
||||
}
|
||||
|
@ -186,14 +183,14 @@ func (wf *WakuFilterFullNode) subscribe(ctx context.Context, s network.Stream, l
|
|||
}
|
||||
}
|
||||
|
||||
wf.subscriptions.Set(peerID, request.PubsubTopic, request.ContentTopics)
|
||||
wf.subscriptions.Set(peerID, *request.PubsubTopic, request.ContentTopics)
|
||||
|
||||
wf.metrics.RecordSubscriptions(wf.subscriptions.Count())
|
||||
wf.reply(ctx, s, request, http.StatusOK)
|
||||
}
|
||||
|
||||
func (wf *WakuFilterFullNode) unsubscribe(ctx context.Context, s network.Stream, logger *zap.Logger, request *pb.FilterSubscribeRequest) {
|
||||
if request.PubsubTopic == "" {
|
||||
func (wf *WakuFilterFullNode) unsubscribe(ctx context.Context, s network.Stream, request *pb.FilterSubscribeRequest) {
|
||||
if request.PubsubTopic == nil {
|
||||
wf.reply(ctx, s, request, http.StatusBadRequest, "pubsubtopic can't be empty")
|
||||
return
|
||||
}
|
||||
|
@ -207,7 +204,7 @@ func (wf *WakuFilterFullNode) unsubscribe(ctx context.Context, s network.Stream,
|
|||
wf.reply(ctx, s, request, http.StatusBadRequest, fmt.Sprintf("exceeds maximum content topics: %d", MaxContentTopicsPerRequest))
|
||||
}
|
||||
|
||||
err := wf.subscriptions.Delete(s.Conn().RemotePeer(), request.PubsubTopic, request.ContentTopics)
|
||||
err := wf.subscriptions.Delete(s.Conn().RemotePeer(), *request.PubsubTopic, request.ContentTopics)
|
||||
if err != nil {
|
||||
wf.reply(ctx, s, request, http.StatusNotFound, peerHasNoSubscription)
|
||||
} else {
|
||||
|
@ -216,7 +213,7 @@ func (wf *WakuFilterFullNode) unsubscribe(ctx context.Context, s network.Stream,
|
|||
}
|
||||
}
|
||||
|
||||
func (wf *WakuFilterFullNode) unsubscribeAll(ctx context.Context, s network.Stream, logger *zap.Logger, request *pb.FilterSubscribeRequest) {
|
||||
func (wf *WakuFilterFullNode) unsubscribeAll(ctx context.Context, s network.Stream, request *pb.FilterSubscribeRequest) {
|
||||
err := wf.subscriptions.DeleteAll(s.Conn().RemotePeer())
|
||||
if err != nil {
|
||||
wf.reply(ctx, s, request, http.StatusNotFound, peerHasNoSubscription)
|
||||
|
@ -227,7 +224,7 @@ func (wf *WakuFilterFullNode) unsubscribeAll(ctx context.Context, s network.Stre
|
|||
}
|
||||
|
||||
func (wf *WakuFilterFullNode) filterListener(ctx context.Context) {
|
||||
defer wf.wg.Done()
|
||||
defer wf.WaitGroup().Done()
|
||||
|
||||
// This function is invoked for each message received
|
||||
// on the full node in context of Waku2-Filter
|
||||
|
@ -243,9 +240,9 @@ func (wf *WakuFilterFullNode) filterListener(ctx context.Context) {
|
|||
subscriber := subscriber // https://golang.org/doc/faq#closures_and_goroutines
|
||||
// Do a message push to light node
|
||||
logger.Info("pushing message to light node")
|
||||
wf.wg.Add(1)
|
||||
wf.WaitGroup().Add(1)
|
||||
go func(subscriber peer.ID) {
|
||||
defer wf.wg.Done()
|
||||
defer wf.WaitGroup().Done()
|
||||
start := time.Now()
|
||||
err := wf.pushMessage(ctx, subscriber, envelope)
|
||||
if err != nil {
|
||||
|
@ -273,9 +270,9 @@ func (wf *WakuFilterFullNode) pushMessage(ctx context.Context, peerID peer.ID, e
|
|||
zap.String("pubsubTopic", env.PubsubTopic()),
|
||||
zap.String("contentTopic", env.Message().ContentTopic),
|
||||
)
|
||||
|
||||
pubSubTopic := env.PubsubTopic()
|
||||
messagePush := &pb.MessagePushV2{
|
||||
PubsubTopic: env.PubsubTopic(),
|
||||
PubsubTopic: &pubSubTopic,
|
||||
WakuMessage: env.Message(),
|
||||
}
|
||||
|
||||
|
@ -317,15 +314,8 @@ func (wf *WakuFilterFullNode) pushMessage(ctx context.Context, peerID peer.ID, e
|
|||
|
||||
// Stop unmounts the filter protocol
|
||||
func (wf *WakuFilterFullNode) Stop() {
|
||||
if wf.cancel == nil {
|
||||
return
|
||||
}
|
||||
|
||||
wf.h.RemoveStreamHandler(FilterSubscribeID_v20beta1)
|
||||
|
||||
wf.cancel()
|
||||
|
||||
wf.msgSub.Unsubscribe()
|
||||
|
||||
wf.wg.Wait()
|
||||
wf.CommonService.Stop(func() {
|
||||
wf.h.RemoveStreamHandler(FilterSubscribeID_v20beta1)
|
||||
wf.msgSub.Unsubscribe()
|
||||
})
|
||||
}
|
||||
|
|
|
@ -8,15 +8,14 @@ import (
|
|||
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
)
|
||||
|
||||
var ErrNotFound = errors.New("not found")
|
||||
|
||||
type ContentTopicSet map[string]struct{}
|
||||
|
||||
type PeerSet map[peer.ID]struct{}
|
||||
|
||||
type PubsubTopics map[string]ContentTopicSet // pubsubTopic => contentTopics
|
||||
type PubsubTopics map[string]protocol.ContentTopicSet // pubsubTopic => contentTopics
|
||||
|
||||
var errNotFound = errors.New("not found")
|
||||
|
||||
type SubscribersMap struct {
|
||||
sync.RWMutex
|
||||
|
@ -57,7 +56,7 @@ func (sub *SubscribersMap) Set(peerID peer.ID, pubsubTopic string, contentTopics
|
|||
|
||||
contentTopicsMap, ok := pubsubTopicMap[pubsubTopic]
|
||||
if !ok {
|
||||
contentTopicsMap = make(ContentTopicSet)
|
||||
contentTopicsMap = make(protocol.ContentTopicSet)
|
||||
}
|
||||
|
||||
for _, c := range contentTopics {
|
||||
|
@ -98,12 +97,12 @@ func (sub *SubscribersMap) Delete(peerID peer.ID, pubsubTopic string, contentTop
|
|||
|
||||
pubsubTopicMap, ok := sub.items[peerID]
|
||||
if !ok {
|
||||
return ErrNotFound
|
||||
return errNotFound
|
||||
}
|
||||
|
||||
contentTopicsMap, ok := pubsubTopicMap[pubsubTopic]
|
||||
if !ok {
|
||||
return ErrNotFound
|
||||
return errNotFound
|
||||
}
|
||||
|
||||
// Removing content topics individually
|
||||
|
@ -132,7 +131,7 @@ func (sub *SubscribersMap) Delete(peerID peer.ID, pubsubTopic string, contentTop
|
|||
func (sub *SubscribersMap) deleteAll(peerID peer.ID) error {
|
||||
pubsubTopicMap, ok := sub.items[peerID]
|
||||
if !ok {
|
||||
return ErrNotFound
|
||||
return errNotFound
|
||||
}
|
||||
|
||||
for pubsubTopic, contentTopicsMap := range pubsubTopicMap {
|
||||
|
|
|
@ -155,9 +155,9 @@ func (sub *Subscribers) RemoveContentFilters(peerID peer.ID, requestID string, c
|
|||
|
||||
// make sure we delete the subscriber
|
||||
// if no more content filters left
|
||||
for _, peerId := range peerIdsToRemove {
|
||||
for _, peerID := range peerIdsToRemove {
|
||||
for i, s := range sub.subscribers {
|
||||
if s.peer == peerId && s.requestID == requestID {
|
||||
if s.peer == peerID && s.requestID == requestID {
|
||||
l := len(sub.subscribers) - 1
|
||||
sub.subscribers[i] = sub.subscribers[l]
|
||||
sub.subscribers = sub.subscribers[:l]
|
||||
|
|
68
vendor/github.com/waku-org/go-waku/waku/v2/protocol/legacy_filter/waku_filter.go
generated
vendored
68
vendor/github.com/waku-org/go-waku/waku/v2/protocol/legacy_filter/waku_filter.go
generated
vendored
|
@ -5,7 +5,6 @@ import (
|
|||
"encoding/hex"
|
||||
"errors"
|
||||
"math"
|
||||
"sync"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
|
@ -47,12 +46,11 @@ type (
|
|||
}
|
||||
|
||||
WakuFilter struct {
|
||||
cancel context.CancelFunc
|
||||
*protocol.CommonService
|
||||
h host.Host
|
||||
isFullNode bool
|
||||
msgSub relay.Subscription
|
||||
metrics Metrics
|
||||
wg *sync.WaitGroup
|
||||
log *zap.Logger
|
||||
|
||||
filters *FilterMap
|
||||
|
@ -75,8 +73,8 @@ func NewWakuFilter(broadcaster relay.Broadcaster, isFullNode bool, timesource ti
|
|||
opt(params)
|
||||
}
|
||||
|
||||
wf.wg = &sync.WaitGroup{}
|
||||
wf.isFullNode = isFullNode
|
||||
wf.CommonService = protocol.NewCommonService()
|
||||
wf.filters = NewFilterMap(broadcaster, timesource)
|
||||
wf.subscribers = NewSubscribers(params.Timeout)
|
||||
wf.metrics = newMetrics(reg)
|
||||
|
@ -90,23 +88,19 @@ func (wf *WakuFilter) SetHost(h host.Host) {
|
|||
}
|
||||
|
||||
func (wf *WakuFilter) Start(ctx context.Context, sub relay.Subscription) error {
|
||||
wf.wg.Wait() // Wait for any goroutines to stop
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
||||
wf.h.SetStreamHandlerMatch(FilterID_v20beta1, protocol.PrefixTextMatch(string(FilterID_v20beta1)), wf.onRequest(ctx))
|
||||
|
||||
wf.cancel = cancel
|
||||
wf.msgSub = sub
|
||||
|
||||
wf.wg.Add(1)
|
||||
go wf.filterListener(ctx)
|
||||
|
||||
wf.log.Info("filter protocol started")
|
||||
|
||||
return nil
|
||||
return wf.CommonService.Start(ctx, func() error {
|
||||
return wf.start(sub)
|
||||
})
|
||||
}
|
||||
|
||||
func (wf *WakuFilter) start(sub relay.Subscription) error {
|
||||
wf.h.SetStreamHandlerMatch(FilterID_v20beta1, protocol.PrefixTextMatch(string(FilterID_v20beta1)), wf.onRequest(wf.Context()))
|
||||
wf.msgSub = sub
|
||||
wf.WaitGroup().Add(1)
|
||||
go wf.filterListener(wf.Context())
|
||||
wf.log.Info("filter protocol started")
|
||||
return nil
|
||||
}
|
||||
func (wf *WakuFilter) onRequest(ctx context.Context) func(s network.Stream) {
|
||||
return func(s network.Stream) {
|
||||
defer s.Close()
|
||||
|
@ -143,13 +137,13 @@ func (wf *WakuFilter) onRequest(ctx context.Context) func(s network.Stream) {
|
|||
subscriber.filter.Topic = relay.DefaultWakuTopic
|
||||
}
|
||||
|
||||
len := wf.subscribers.Append(subscriber)
|
||||
subscribersLen := wf.subscribers.Append(subscriber)
|
||||
|
||||
logger.Info("adding subscriber")
|
||||
wf.metrics.RecordSubscribers(len)
|
||||
wf.metrics.RecordSubscribers(subscribersLen)
|
||||
} else {
|
||||
peerId := s.Conn().RemotePeer()
|
||||
wf.subscribers.RemoveContentFilters(peerId, filterRPCRequest.RequestId, filterRPCRequest.Request.ContentFilters)
|
||||
peerID := s.Conn().RemotePeer()
|
||||
wf.subscribers.RemoveContentFilters(peerID, filterRPCRequest.RequestId, filterRPCRequest.Request.ContentFilters)
|
||||
|
||||
logger.Info("removing subscriber")
|
||||
wf.metrics.RecordSubscribers(wf.subscribers.Length())
|
||||
|
@ -188,7 +182,7 @@ func (wf *WakuFilter) pushMessage(ctx context.Context, subscriber Subscriber, ms
|
|||
}
|
||||
|
||||
func (wf *WakuFilter) filterListener(ctx context.Context) {
|
||||
defer wf.wg.Done()
|
||||
defer wf.WaitGroup().Done()
|
||||
|
||||
// This function is invoked for each message received
|
||||
// on the full node in context of Waku2-Filter
|
||||
|
@ -270,7 +264,7 @@ func (wf *WakuFilter) requestSubscription(ctx context.Context, filter ContentFil
|
|||
defer conn.Close()
|
||||
|
||||
// This is the only successful path to subscription
|
||||
requestID := hex.EncodeToString(protocol.GenerateRequestId())
|
||||
requestID := hex.EncodeToString(protocol.GenerateRequestID())
|
||||
|
||||
writer := pbio.NewDelimitedWriter(conn)
|
||||
filterRPC := &pb.FilterRPC{RequestId: requestID, Request: request}
|
||||
|
@ -301,7 +295,7 @@ func (wf *WakuFilter) Unsubscribe(ctx context.Context, contentFilter ContentFilt
|
|||
defer conn.Close()
|
||||
|
||||
// This is the only successful path to subscription
|
||||
id := protocol.GenerateRequestId()
|
||||
id := protocol.GenerateRequestID()
|
||||
|
||||
var contentFilters []*pb.FilterRequest_ContentFilter
|
||||
for _, ct := range contentFilter.ContentTopics {
|
||||
|
@ -327,19 +321,13 @@ func (wf *WakuFilter) Unsubscribe(ctx context.Context, contentFilter ContentFilt
|
|||
|
||||
// Stop unmounts the filter protocol
|
||||
func (wf *WakuFilter) Stop() {
|
||||
if wf.cancel == nil {
|
||||
return
|
||||
}
|
||||
wf.CommonService.Stop(func() {
|
||||
wf.msgSub.Unsubscribe()
|
||||
|
||||
wf.cancel()
|
||||
|
||||
wf.msgSub.Unsubscribe()
|
||||
|
||||
wf.h.RemoveStreamHandler(FilterID_v20beta1)
|
||||
wf.filters.RemoveAll()
|
||||
wf.subscribers.Clear()
|
||||
|
||||
wf.wg.Wait()
|
||||
wf.h.RemoveStreamHandler(FilterID_v20beta1)
|
||||
wf.filters.RemoveAll()
|
||||
wf.subscribers.Clear()
|
||||
})
|
||||
}
|
||||
|
||||
// Subscribe setups a subscription to receive messages that match a specific content filter
|
||||
|
@ -444,8 +432,8 @@ func (wf *WakuFilter) UnsubscribeFilter(ctx context.Context, cf ContentFilter) e
|
|||
}
|
||||
}
|
||||
|
||||
for rId := range idsToRemove {
|
||||
wf.filters.Delete(rId)
|
||||
for rID := range idsToRemove {
|
||||
wf.filters.Delete(rID)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
57
vendor/github.com/waku-org/go-waku/waku/v2/protocol/lightpush/waku_lightpush.go
generated
vendored
57
vendor/github.com/waku-org/go-waku/waku/v2/protocol/lightpush/waku_lightpush.go
generated
vendored
|
@ -25,7 +25,7 @@ const LightPushID_v20beta1 = libp2pProtocol.ID("/vac/waku/lightpush/2.0.0-beta1"
|
|||
|
||||
var (
|
||||
ErrNoPeersAvailable = errors.New("no suitable remote peers")
|
||||
ErrInvalidId = errors.New("invalid request id")
|
||||
ErrInvalidID = errors.New("invalid request id")
|
||||
)
|
||||
|
||||
// WakuLightPush is the implementation of the Waku LightPush protocol
|
||||
|
@ -72,8 +72,8 @@ func (wakuLP *WakuLightPush) Start(ctx context.Context) error {
|
|||
}
|
||||
|
||||
// relayIsNotAvailable determines if this node supports relaying messages for other lightpush clients
|
||||
func (wakuLp *WakuLightPush) relayIsNotAvailable() bool {
|
||||
return wakuLp.relay == nil
|
||||
func (wakuLP *WakuLightPush) relayIsNotAvailable() bool {
|
||||
return wakuLP.relay == nil
|
||||
}
|
||||
|
||||
func (wakuLP *WakuLightPush) onRequest(ctx context.Context) func(s network.Stream) {
|
||||
|
@ -144,15 +144,10 @@ func (wakuLP *WakuLightPush) onRequest(ctx context.Context) func(s network.Strea
|
|||
}
|
||||
}
|
||||
|
||||
func (wakuLP *WakuLightPush) request(ctx context.Context, req *pb.PushRequest, opts ...Option) (*pb.PushResponse, error) {
|
||||
params := new(lightPushParameters)
|
||||
params.host = wakuLP.h
|
||||
params.log = wakuLP.log
|
||||
params.pm = wakuLP.pm
|
||||
|
||||
optList := append(DefaultOptions(wakuLP.h), opts...)
|
||||
for _, opt := range optList {
|
||||
opt(params)
|
||||
// request sends a message via lightPush protocol to either a specified peer or peer that is selected.
|
||||
func (wakuLP *WakuLightPush) request(ctx context.Context, req *pb.PushRequest, params *lightPushParameters) (*pb.PushResponse, error) {
|
||||
if params == nil {
|
||||
return nil, errors.New("lightpush params are mandatory")
|
||||
}
|
||||
|
||||
if params.selectedPeer == "" {
|
||||
|
@ -161,7 +156,7 @@ func (wakuLP *WakuLightPush) request(ctx context.Context, req *pb.PushRequest, o
|
|||
}
|
||||
|
||||
if len(params.requestID) == 0 {
|
||||
return nil, ErrInvalidId
|
||||
return nil, ErrInvalidID
|
||||
}
|
||||
|
||||
logger := wakuLP.log.With(logging.HostID("peer", params.selectedPeer))
|
||||
|
@ -215,31 +210,49 @@ func (wakuLP *WakuLightPush) Stop() {
|
|||
wakuLP.h.RemoveStreamHandler(LightPushID_v20beta1)
|
||||
}
|
||||
|
||||
// PublishToTopic is used to broadcast a WakuMessage to a pubsub topic via lightpush protocol
|
||||
func (wakuLP *WakuLightPush) PublishToTopic(ctx context.Context, message *wpb.WakuMessage, topic string, opts ...Option) ([]byte, error) {
|
||||
// Optional PublishToTopic is used to broadcast a WakuMessage to a pubsub topic via lightpush protocol
|
||||
// If pubSubTopic is not provided, then contentTopic is use to derive the relevant pubSubTopic via autosharding.
|
||||
func (wakuLP *WakuLightPush) PublishToTopic(ctx context.Context, message *wpb.WakuMessage, opts ...Option) ([]byte, error) {
|
||||
if message == nil {
|
||||
return nil, errors.New("message can't be null")
|
||||
}
|
||||
params := new(lightPushParameters)
|
||||
params.host = wakuLP.h
|
||||
params.log = wakuLP.log
|
||||
params.pm = wakuLP.pm
|
||||
|
||||
optList := append(DefaultOptions(wakuLP.h), opts...)
|
||||
for _, opt := range optList {
|
||||
opt(params)
|
||||
}
|
||||
|
||||
if params.pubsubTopic == "" {
|
||||
var err error
|
||||
params.pubsubTopic, err = protocol.GetPubSubTopicFromContentTopic(message.ContentTopic)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
req := new(pb.PushRequest)
|
||||
req.Message = message
|
||||
req.PubsubTopic = topic
|
||||
req.PubsubTopic = params.pubsubTopic
|
||||
|
||||
response, err := wakuLP.request(ctx, req, opts...)
|
||||
response, err := wakuLP.request(ctx, req, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if response.IsSuccess {
|
||||
hash := message.Hash(topic)
|
||||
hash := message.Hash(params.pubsubTopic)
|
||||
wakuLP.log.Info("waku.lightpush published", logging.HexString("hash", hash))
|
||||
return hash, nil
|
||||
} else {
|
||||
return nil, errors.New(response.Info)
|
||||
}
|
||||
|
||||
return nil, errors.New(response.Info)
|
||||
}
|
||||
|
||||
// Publish is used to broadcast a WakuMessage to the default waku pubsub topic via lightpush protocol
|
||||
// Publish is used to broadcast a WakuMessage to the pubSubTopic (which is derived from the contentTopic) via lightpush protocol
|
||||
// If auto-sharding is not to be used, then PublishToTopic API should be used
|
||||
func (wakuLP *WakuLightPush) Publish(ctx context.Context, message *wpb.WakuMessage, opts ...Option) ([]byte, error) {
|
||||
return wakuLP.PublishToTopic(ctx, message, relay.DefaultWakuTopic, opts...)
|
||||
return wakuLP.PublishToTopic(ctx, message, opts...)
|
||||
}
|
||||
|
|
11
vendor/github.com/waku-org/go-waku/waku/v2/protocol/lightpush/waku_lightpush_option.go
generated
vendored
11
vendor/github.com/waku-org/go-waku/waku/v2/protocol/lightpush/waku_lightpush_option.go
generated
vendored
|
@ -17,6 +17,7 @@ type lightPushParameters struct {
|
|||
requestID []byte
|
||||
pm *peermanager.PeerManager
|
||||
log *zap.Logger
|
||||
pubsubTopic string
|
||||
}
|
||||
|
||||
// Option is the type of options accepted when performing LightPush protocol requests
|
||||
|
@ -40,7 +41,7 @@ func WithAutomaticPeerSelection(fromThesePeers ...peer.ID) Option {
|
|||
if params.pm == nil {
|
||||
p, err = utils.SelectPeer(params.host, LightPushID_v20beta1, fromThesePeers, params.log)
|
||||
} else {
|
||||
p, err = params.pm.SelectPeer(LightPushID_v20beta1, fromThesePeers, params.log)
|
||||
p, err = params.pm.SelectPeer(LightPushID_v20beta1, "", fromThesePeers...)
|
||||
}
|
||||
if err == nil {
|
||||
params.selectedPeer = p
|
||||
|
@ -50,6 +51,12 @@ func WithAutomaticPeerSelection(fromThesePeers ...peer.ID) Option {
|
|||
}
|
||||
}
|
||||
|
||||
func WithPubSubTopic(pubsubTopic string) Option {
|
||||
return func(params *lightPushParameters) {
|
||||
params.pubsubTopic = pubsubTopic
|
||||
}
|
||||
}
|
||||
|
||||
// WithFastestPeerSelection is an option used to select a peer from the peer store
|
||||
// with the lowest ping. If a list of specific peers is passed, the peer will be chosen
|
||||
// from that list assuming it supports the chosen protocol, otherwise it will chose a peer
|
||||
|
@ -77,7 +84,7 @@ func WithRequestID(requestID []byte) Option {
|
|||
// when publishing a message
|
||||
func WithAutomaticRequestID() Option {
|
||||
return func(params *lightPushParameters) {
|
||||
params.requestID = protocol.GenerateRequestId()
|
||||
params.requestID = protocol.GenerateRequestID()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -100,9 +100,9 @@ func (wakuPX *WakuPeerExchange) handleResponse(ctx context.Context, response *pb
|
|||
|
||||
if len(discoveredPeers) != 0 {
|
||||
wakuPX.log.Info("connecting to newly discovered peers", zap.Int("count", len(discoveredPeers)))
|
||||
wakuPX.wg.Add(1)
|
||||
wakuPX.WaitGroup().Add(1)
|
||||
go func() {
|
||||
defer wakuPX.wg.Done()
|
||||
defer wakuPX.WaitGroup().Done()
|
||||
|
||||
peerCh := make(chan peermanager.PeerData)
|
||||
defer close(peerCh)
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
|
@ -28,7 +27,7 @@ const MaxCacheSize = 1000
|
|||
|
||||
var (
|
||||
ErrNoPeersAvailable = errors.New("no suitable remote peers")
|
||||
ErrInvalidId = errors.New("invalid request id")
|
||||
ErrInvalidID = errors.New("invalid request id")
|
||||
)
|
||||
|
||||
// PeerConnector will subscribe to a channel containing the information for all peers found by this discovery protocol
|
||||
|
@ -43,9 +42,8 @@ type WakuPeerExchange struct {
|
|||
metrics Metrics
|
||||
log *zap.Logger
|
||||
|
||||
cancel context.CancelFunc
|
||||
*protocol.CommonService
|
||||
|
||||
wg sync.WaitGroup
|
||||
peerConnector PeerConnector
|
||||
enrCache *enrCache
|
||||
}
|
||||
|
@ -65,35 +63,31 @@ func NewWakuPeerExchange(disc *discv5.DiscoveryV5, peerConnector PeerConnector,
|
|||
wakuPX.enrCache = newEnrCache
|
||||
wakuPX.peerConnector = peerConnector
|
||||
wakuPX.pm = pm
|
||||
wakuPX.CommonService = protocol.NewCommonService()
|
||||
|
||||
return wakuPX, nil
|
||||
}
|
||||
|
||||
// Sets the host to be able to mount or consume a protocol
|
||||
// SetHost sets the host to be able to mount or consume a protocol
|
||||
func (wakuPX *WakuPeerExchange) SetHost(h host.Host) {
|
||||
wakuPX.h = h
|
||||
}
|
||||
|
||||
// Start inits the peer exchange protocol
|
||||
func (wakuPX *WakuPeerExchange) Start(ctx context.Context) error {
|
||||
if wakuPX.cancel != nil {
|
||||
return errors.New("peer exchange already started")
|
||||
}
|
||||
return wakuPX.CommonService.Start(ctx, wakuPX.start)
|
||||
}
|
||||
|
||||
wakuPX.wg.Wait() // Waiting for any go routines to stop
|
||||
func (wakuPX *WakuPeerExchange) start() error {
|
||||
wakuPX.h.SetStreamHandlerMatch(PeerExchangeID_v20alpha1, protocol.PrefixTextMatch(string(PeerExchangeID_v20alpha1)), wakuPX.onRequest())
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
wakuPX.cancel = cancel
|
||||
|
||||
wakuPX.h.SetStreamHandlerMatch(PeerExchangeID_v20alpha1, protocol.PrefixTextMatch(string(PeerExchangeID_v20alpha1)), wakuPX.onRequest(ctx))
|
||||
wakuPX.WaitGroup().Add(1)
|
||||
go wakuPX.runPeerExchangeDiscv5Loop(wakuPX.Context())
|
||||
wakuPX.log.Info("Peer exchange protocol started")
|
||||
|
||||
wakuPX.wg.Add(1)
|
||||
go wakuPX.runPeerExchangeDiscv5Loop(ctx)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (wakuPX *WakuPeerExchange) onRequest(ctx context.Context) func(s network.Stream) {
|
||||
func (wakuPX *WakuPeerExchange) onRequest() func(s network.Stream) {
|
||||
return func(s network.Stream) {
|
||||
defer s.Close()
|
||||
logger := wakuPX.log.With(logging.HostID("peer", s.Conn().RemotePeer()))
|
||||
|
@ -133,12 +127,9 @@ func (wakuPX *WakuPeerExchange) onRequest(ctx context.Context) func(s network.St
|
|||
|
||||
// Stop unmounts the peer exchange protocol
|
||||
func (wakuPX *WakuPeerExchange) Stop() {
|
||||
if wakuPX.cancel == nil {
|
||||
return
|
||||
}
|
||||
wakuPX.h.RemoveStreamHandler(PeerExchangeID_v20alpha1)
|
||||
wakuPX.cancel()
|
||||
wakuPX.wg.Wait()
|
||||
wakuPX.CommonService.Stop(func() {
|
||||
wakuPX.h.RemoveStreamHandler(PeerExchangeID_v20alpha1)
|
||||
})
|
||||
}
|
||||
|
||||
func (wakuPX *WakuPeerExchange) iterate(ctx context.Context) error {
|
||||
|
@ -173,7 +164,7 @@ func (wakuPX *WakuPeerExchange) iterate(ctx context.Context) error {
|
|||
}
|
||||
|
||||
func (wakuPX *WakuPeerExchange) runPeerExchangeDiscv5Loop(ctx context.Context) {
|
||||
defer wakuPX.wg.Done()
|
||||
defer wakuPX.WaitGroup().Done()
|
||||
|
||||
// Runs a discv5 loop adding new peers to the px peer cache
|
||||
if wakuPX.disc == nil {
|
||||
|
|
|
@ -37,7 +37,7 @@ func WithAutomaticPeerSelection(fromThesePeers ...peer.ID) PeerExchangeOption {
|
|||
if params.pm == nil {
|
||||
p, err = utils.SelectPeer(params.host, PeerExchangeID_v20alpha1, fromThesePeers, params.log)
|
||||
} else {
|
||||
p, err = params.pm.SelectPeer(PeerExchangeID_v20alpha1, fromThesePeers, params.log)
|
||||
p, err = params.pm.SelectPeer(PeerExchangeID_v20alpha1, "", fromThesePeers...)
|
||||
}
|
||||
if err == nil {
|
||||
params.selectedPeer = p
|
||||
|
|
|
@ -7,15 +7,23 @@ import (
|
|||
"strings"
|
||||
)
|
||||
|
||||
// Waku2PubsubTopicPrefix is the expected prefix to be used for pubsub topics
|
||||
const Waku2PubsubTopicPrefix = "/waku/2"
|
||||
|
||||
// StaticShardingPubsubTopicPrefix is the expected prefix to be used for static sharding pubsub topics
|
||||
const StaticShardingPubsubTopicPrefix = Waku2PubsubTopicPrefix + "/rs"
|
||||
|
||||
// ErrInvalidStructure indicates that the pubsub topic is malformed
|
||||
var ErrInvalidStructure = errors.New("invalid topic structure")
|
||||
|
||||
// ErrInvalidTopicPrefix indicates that the pubsub topic is missing the prefix /waku/2
|
||||
var ErrInvalidTopicPrefix = errors.New("must start with " + Waku2PubsubTopicPrefix)
|
||||
var ErrMissingTopicName = errors.New("missing topic-name")
|
||||
var ErrInvalidShardedTopicPrefix = errors.New("must start with " + StaticShardingPubsubTopicPrefix)
|
||||
var ErrMissingClusterIndex = errors.New("missing shard_cluster_index")
|
||||
var ErrMissingShardNumber = errors.New("missing shard_number")
|
||||
|
||||
// ErrInvalidNumberFormat indicates that a number exceeds the allowed range
|
||||
var ErrInvalidNumberFormat = errors.New("only 2^16 numbers are allowed")
|
||||
|
||||
// NamespacedPubsubTopicKind used to represent kind of NamespacedPubsubTopicKind
|
||||
|
@ -107,7 +115,7 @@ func (s StaticShardingPubsubTopic) Cluster() uint16 {
|
|||
return s.cluster
|
||||
}
|
||||
|
||||
// Cluster returns the shard number
|
||||
// Shard returns the shard number
|
||||
func (s StaticShardingPubsubTopic) Shard() uint16 {
|
||||
return s.shard
|
||||
}
|
||||
|
@ -174,14 +182,14 @@ func ToShardedPubsubTopic(topic string) (NamespacedPubsubTopic, error) {
|
|||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
} else {
|
||||
s := NamedShardingPubsubTopic{}
|
||||
err := s.Parse(topic)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
s := NamedShardingPubsubTopic{}
|
||||
err := s.Parse(topic)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// DefaultPubsubTopic is the default pubSub topic used in waku
|
||||
|
|
|
@ -75,15 +75,15 @@ func (s *chStore) broadcast(ctx context.Context, m *protocol.Envelope) {
|
|||
}
|
||||
}
|
||||
|
||||
func (b *chStore) close() {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
for _, chans := range b.topicToChans {
|
||||
func (s *chStore) close() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
for _, chans := range s.topicToChans {
|
||||
for _, ch := range chans {
|
||||
close(ch)
|
||||
}
|
||||
}
|
||||
b.topicToChans = nil
|
||||
s.topicToChans = nil
|
||||
}
|
||||
|
||||
// Broadcaster is used to create a fanout for an envelope that will be received by any subscriber interested in the topic of the message
|
||||
|
|
|
@ -10,14 +10,14 @@ import (
|
|||
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/crypto/secp256k1"
|
||||
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
proto "google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/waku-org/go-waku/waku/v2/hash"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
"github.com/waku-org/go-waku/waku/v2/timesource"
|
||||
"go.uber.org/zap"
|
||||
proto "google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func msgHash(pubSubTopic string, msg *pb.WakuMessage) []byte {
|
||||
|
@ -38,6 +38,68 @@ func msgHash(pubSubTopic string, msg *pb.WakuMessage) []byte {
|
|||
)
|
||||
}
|
||||
|
||||
type validatorFn = func(ctx context.Context, msg *pb.WakuMessage, topic string) bool
|
||||
|
||||
func (w *WakuRelay) RegisterDefaultValidator(fn validatorFn) {
|
||||
w.topicValidatorMutex.Lock()
|
||||
defer w.topicValidatorMutex.Unlock()
|
||||
w.defaultTopicValidators = append(w.defaultTopicValidators, fn)
|
||||
}
|
||||
|
||||
func (w *WakuRelay) RegisterTopicValidator(topic string, fn validatorFn) {
|
||||
w.topicValidatorMutex.Lock()
|
||||
defer w.topicValidatorMutex.Unlock()
|
||||
|
||||
w.topicValidators[topic] = append(w.topicValidators[topic], fn)
|
||||
}
|
||||
|
||||
func (w *WakuRelay) RemoveTopicValidator(topic string) {
|
||||
w.topicValidatorMutex.Lock()
|
||||
defer w.topicValidatorMutex.Unlock()
|
||||
|
||||
delete(w.topicValidators, topic)
|
||||
}
|
||||
|
||||
func (w *WakuRelay) topicValidator(topic string) func(ctx context.Context, peerID peer.ID, message *pubsub.Message) bool {
|
||||
return func(ctx context.Context, peerID peer.ID, message *pubsub.Message) bool {
|
||||
msg := new(pb.WakuMessage)
|
||||
err := proto.Unmarshal(message.Data, msg)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
w.topicValidatorMutex.RLock()
|
||||
validators, exists := w.topicValidators[topic]
|
||||
validators = append(validators, w.defaultTopicValidators...)
|
||||
w.topicValidatorMutex.RUnlock()
|
||||
|
||||
if exists {
|
||||
for _, v := range validators {
|
||||
if !v(ctx, msg, topic) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// AddSignedTopicValidator registers a gossipsub validator for a topic which will check that messages Meta field contains a valid ECDSA signature for the specified pubsub topic. This is used as a DoS prevention mechanism
|
||||
func (w *WakuRelay) AddSignedTopicValidator(topic string, publicKey *ecdsa.PublicKey) error {
|
||||
w.log.Info("adding validator to signed topic", zap.String("topic", topic), zap.String("publicKey", hex.EncodeToString(elliptic.Marshal(publicKey.Curve, publicKey.X, publicKey.Y))))
|
||||
|
||||
fn := signedTopicBuilder(w.timesource, publicKey)
|
||||
|
||||
w.RegisterTopicValidator(topic, fn)
|
||||
|
||||
if !w.IsSubscribed(topic) {
|
||||
w.log.Warn("relay is not subscribed to signed topic", zap.String("topic", topic))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const messageWindowDuration = time.Minute * 5
|
||||
|
||||
func withinTimeWindow(t timesource.Timesource, msg *pb.WakuMessage) bool {
|
||||
|
@ -51,17 +113,9 @@ func withinTimeWindow(t timesource.Timesource, msg *pb.WakuMessage) bool {
|
|||
return now.Sub(msgTime).Abs() <= messageWindowDuration
|
||||
}
|
||||
|
||||
type validatorFn = func(ctx context.Context, peerID peer.ID, message *pubsub.Message) bool
|
||||
|
||||
func validatorFnBuilder(t timesource.Timesource, topic string, publicKey *ecdsa.PublicKey) (validatorFn, error) {
|
||||
func signedTopicBuilder(t timesource.Timesource, publicKey *ecdsa.PublicKey) validatorFn {
|
||||
publicKeyBytes := crypto.FromECDSAPub(publicKey)
|
||||
return func(ctx context.Context, peerID peer.ID, message *pubsub.Message) bool {
|
||||
msg := new(pb.WakuMessage)
|
||||
err := proto.Unmarshal(message.Data, msg)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return func(ctx context.Context, msg *pb.WakuMessage, topic string) bool {
|
||||
if !withinTimeWindow(t, msg) {
|
||||
return false
|
||||
}
|
||||
|
@ -70,28 +124,7 @@ func validatorFnBuilder(t timesource.Timesource, topic string, publicKey *ecdsa.
|
|||
signature := msg.Meta
|
||||
|
||||
return secp256k1.VerifySignature(publicKeyBytes, msgHash, signature)
|
||||
}, nil
|
||||
}
|
||||
|
||||
// AddSignedTopicValidator registers a gossipsub validator for a topic which will check that messages Meta field contains a valid ECDSA signature for the specified pubsub topic. This is used as a DoS prevention mechanism
|
||||
func (w *WakuRelay) AddSignedTopicValidator(topic string, publicKey *ecdsa.PublicKey) error {
|
||||
w.log.Info("adding validator to signed topic", zap.String("topic", topic), zap.String("publicKey", hex.EncodeToString(elliptic.Marshal(publicKey.Curve, publicKey.X, publicKey.Y))))
|
||||
|
||||
fn, err := validatorFnBuilder(w.timesource, topic, publicKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = w.pubsub.RegisterTopicValidator(topic, fn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !w.IsSubscribed(topic) {
|
||||
w.log.Warn("relay is not subscribed to signed topic", zap.String("topic", topic))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SignMessage adds an ECDSA signature to a WakuMessage as an opt-in mechanism for DoS prevention
|
||||
|
|
|
@ -49,25 +49,30 @@ type WakuRelay struct {
|
|||
|
||||
minPeersToPublish int
|
||||
|
||||
topicValidatorMutex sync.RWMutex
|
||||
topicValidators map[string][]validatorFn
|
||||
defaultTopicValidators []validatorFn
|
||||
|
||||
// TODO: convert to concurrent maps
|
||||
topicsMutex sync.Mutex
|
||||
topicsMutex sync.RWMutex
|
||||
wakuRelayTopics map[string]*pubsub.Topic
|
||||
relaySubs map[string]*pubsub.Subscription
|
||||
topicEvtHanders map[string]*pubsub.TopicEventHandler
|
||||
|
||||
events event.Bus
|
||||
emitters struct {
|
||||
EvtRelaySubscribed event.Emitter
|
||||
EvtRelayUnsubscribed event.Emitter
|
||||
EvtPeerTopic event.Emitter
|
||||
}
|
||||
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
wg sync.WaitGroup
|
||||
*waku_proto.CommonService
|
||||
}
|
||||
|
||||
// EvtRelaySubscribed is an event emitted when a new subscription to a pubsub topic is created
|
||||
type EvtRelaySubscribed struct {
|
||||
Topic string
|
||||
Topic string
|
||||
TopicInst *pubsub.Topic
|
||||
}
|
||||
|
||||
// EvtRelayUnsubscribed is an event emitted when a subscription to a pubsub topic is closed
|
||||
|
@ -75,7 +80,20 @@ type EvtRelayUnsubscribed struct {
|
|||
Topic string
|
||||
}
|
||||
|
||||
func msgIdFn(pmsg *pubsub_pb.Message) string {
|
||||
type PeerTopicState int
|
||||
|
||||
const (
|
||||
PEER_JOINED = iota
|
||||
PEER_LEFT
|
||||
)
|
||||
|
||||
type EvtPeerTopic struct {
|
||||
PubsubTopic string
|
||||
PeerID peer.ID
|
||||
State PeerTopicState
|
||||
}
|
||||
|
||||
func msgIDFn(pmsg *pubsub_pb.Message) string {
|
||||
return string(hash.SHA256(pmsg.Data))
|
||||
}
|
||||
|
||||
|
@ -85,9 +103,11 @@ func NewWakuRelay(bcaster Broadcaster, minPeersToPublish int, timesource timesou
|
|||
w.timesource = timesource
|
||||
w.wakuRelayTopics = make(map[string]*pubsub.Topic)
|
||||
w.relaySubs = make(map[string]*pubsub.Subscription)
|
||||
w.topicEvtHanders = make(map[string]*pubsub.TopicEventHandler)
|
||||
w.topicValidators = make(map[string][]validatorFn)
|
||||
w.bcaster = bcaster
|
||||
w.minPeersToPublish = minPeersToPublish
|
||||
w.wg = sync.WaitGroup{}
|
||||
w.CommonService = waku_proto.NewCommonService()
|
||||
w.log = log.Named("relay")
|
||||
w.events = eventbus.NewBus()
|
||||
w.metrics = newMetrics(reg, w.log)
|
||||
|
@ -96,11 +116,11 @@ func NewWakuRelay(bcaster Broadcaster, minPeersToPublish int, timesource timesou
|
|||
cfg.PruneBackoff = time.Minute
|
||||
cfg.UnsubscribeBackoff = 5 * time.Second
|
||||
cfg.GossipFactor = 0.25
|
||||
cfg.D = 6
|
||||
cfg.D = waku_proto.GossipSubOptimalFullMeshSize
|
||||
cfg.Dlo = 4
|
||||
cfg.Dhi = 12
|
||||
cfg.Dout = 3
|
||||
cfg.Dlazy = 6
|
||||
cfg.Dlazy = waku_proto.GossipSubOptimalFullMeshSize
|
||||
cfg.HeartbeatInterval = time.Second
|
||||
cfg.HistoryLength = 6
|
||||
cfg.HistoryGossip = 3
|
||||
|
@ -160,7 +180,7 @@ func NewWakuRelay(bcaster Broadcaster, minPeersToPublish int, timesource timesou
|
|||
w.opts = append([]pubsub.Option{
|
||||
pubsub.WithMessageSignaturePolicy(pubsub.StrictNoSign),
|
||||
pubsub.WithNoAuthor(),
|
||||
pubsub.WithMessageIdFn(msgIdFn),
|
||||
pubsub.WithMessageIdFn(msgIDFn),
|
||||
pubsub.WithGossipSubProtocols(
|
||||
[]protocol.ID{WakuRelayID_v200, pubsub.GossipSubID_v11, pubsub.GossipSubID_v10, pubsub.FloodSubID},
|
||||
func(feat pubsub.GossipSubFeature, proto protocol.ID) bool {
|
||||
|
@ -179,12 +199,6 @@ func NewWakuRelay(bcaster Broadcaster, minPeersToPublish int, timesource timesou
|
|||
pubsub.WithSeenMessagesTTL(2 * time.Minute),
|
||||
pubsub.WithPeerScore(w.peerScoreParams, w.peerScoreThresholds),
|
||||
pubsub.WithPeerScoreInspect(w.peerScoreInspector, 6*time.Second),
|
||||
// TODO: to improve - setup default validator only if no default validator has been set.
|
||||
pubsub.WithDefaultValidator(func(ctx context.Context, peerID peer.ID, message *pubsub.Message) bool {
|
||||
msg := new(pb.WakuMessage)
|
||||
err := proto.Unmarshal(message.Data, msg)
|
||||
return err == nil
|
||||
}),
|
||||
}, opts...)
|
||||
|
||||
return w
|
||||
|
@ -213,12 +227,11 @@ func (w *WakuRelay) SetHost(h host.Host) {
|
|||
|
||||
// Start initiates the WakuRelay protocol
|
||||
func (w *WakuRelay) Start(ctx context.Context) error {
|
||||
w.wg.Wait()
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
w.ctx = ctx // TODO: create worker for creating subscriptions instead of storing context
|
||||
w.cancel = cancel
|
||||
return w.CommonService.Start(ctx, w.start)
|
||||
}
|
||||
|
||||
ps, err := pubsub.NewGossipSub(ctx, w.host, w.opts...)
|
||||
func (w *WakuRelay) start() error {
|
||||
ps, err := pubsub.NewGossipSub(w.Context(), w.host, w.opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -233,6 +246,11 @@ func (w *WakuRelay) Start(ctx context.Context) error {
|
|||
return err
|
||||
}
|
||||
|
||||
w.emitters.EvtPeerTopic, err = w.events.Emitter(new(EvtPeerTopic))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.log.Info("Relay protocol started")
|
||||
return nil
|
||||
}
|
||||
|
@ -244,8 +262,8 @@ func (w *WakuRelay) PubSub() *pubsub.PubSub {
|
|||
|
||||
// Topics returns a list of all the pubsub topics currently subscribed to
|
||||
func (w *WakuRelay) Topics() []string {
|
||||
defer w.topicsMutex.Unlock()
|
||||
w.topicsMutex.Lock()
|
||||
defer w.topicsMutex.RUnlock()
|
||||
w.topicsMutex.RLock()
|
||||
|
||||
var result []string
|
||||
for topic := range w.relaySubs {
|
||||
|
@ -256,8 +274,8 @@ func (w *WakuRelay) Topics() []string {
|
|||
|
||||
// IsSubscribed indicates whether the node is subscribed to a pubsub topic or not
|
||||
func (w *WakuRelay) IsSubscribed(topic string) bool {
|
||||
defer w.topicsMutex.Unlock()
|
||||
w.topicsMutex.Lock()
|
||||
w.topicsMutex.RLock()
|
||||
defer w.topicsMutex.RUnlock()
|
||||
_, ok := w.relaySubs[topic]
|
||||
return ok
|
||||
}
|
||||
|
@ -273,6 +291,11 @@ func (w *WakuRelay) upsertTopic(topic string) (*pubsub.Topic, error) {
|
|||
|
||||
pubSubTopic, ok := w.wakuRelayTopics[topic]
|
||||
if !ok { // Joins topic if node hasn't joined yet
|
||||
err := w.pubsub.RegisterTopicValidator(topic, w.topicValidator(topic))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newTopic, err := w.pubsub.Join(string(topic))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -302,15 +325,20 @@ func (w *WakuRelay) subscribe(topic string) (subs *pubsub.Subscription, err erro
|
|||
return nil, err
|
||||
}
|
||||
|
||||
evtHandler, err := w.addPeerTopicEventListener(pubSubTopic)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
w.topicEvtHanders[topic] = evtHandler
|
||||
w.relaySubs[topic] = sub
|
||||
|
||||
err = w.emitters.EvtRelaySubscribed.Emit(EvtRelaySubscribed{topic})
|
||||
err = w.emitters.EvtRelaySubscribed.Emit(EvtRelaySubscribed{topic, pubSubTopic})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if w.bcaster != nil {
|
||||
w.wg.Add(1)
|
||||
w.WaitGroup().Add(1)
|
||||
go w.subscribeToTopic(topic, sub)
|
||||
}
|
||||
w.log.Info("subscribing to topic", zap.String("topic", sub.Topic()))
|
||||
|
@ -364,15 +392,11 @@ func (w *WakuRelay) Publish(ctx context.Context, message *pb.WakuMessage) ([]byt
|
|||
|
||||
// Stop unmounts the relay protocol and stops all subscriptions
|
||||
func (w *WakuRelay) Stop() {
|
||||
if w.cancel == nil {
|
||||
return // Not started
|
||||
}
|
||||
|
||||
w.host.RemoveStreamHandler(WakuRelayID_v200)
|
||||
w.emitters.EvtRelaySubscribed.Close()
|
||||
w.emitters.EvtRelayUnsubscribed.Close()
|
||||
w.cancel()
|
||||
w.wg.Wait()
|
||||
w.CommonService.Stop(func() {
|
||||
w.host.RemoveStreamHandler(WakuRelayID_v200)
|
||||
w.emitters.EvtRelaySubscribed.Close()
|
||||
w.emitters.EvtRelayUnsubscribed.Close()
|
||||
})
|
||||
}
|
||||
|
||||
// EnoughPeersToPublish returns whether there are enough peers connected in the default waku pubsub topic
|
||||
|
@ -404,13 +428,16 @@ func (w *WakuRelay) SubscribeToTopic(ctx context.Context, topic string) (*Subscr
|
|||
return &subscription, nil
|
||||
}
|
||||
|
||||
// SubscribeToTopic returns a Subscription to receive messages from the default waku pubsub topic
|
||||
// Subscribe returns a Subscription to receive messages from the default waku pubsub topic
|
||||
func (w *WakuRelay) Subscribe(ctx context.Context) (*Subscription, error) {
|
||||
return w.SubscribeToTopic(ctx, DefaultWakuTopic)
|
||||
}
|
||||
|
||||
// Unsubscribe closes a subscription to a pubsub topic
|
||||
func (w *WakuRelay) Unsubscribe(ctx context.Context, topic string) error {
|
||||
w.topicsMutex.Lock()
|
||||
defer w.topicsMutex.Unlock()
|
||||
|
||||
sub, ok := w.relaySubs[topic]
|
||||
if !ok {
|
||||
return fmt.Errorf("not subscribed to topic")
|
||||
|
@ -420,12 +447,20 @@ func (w *WakuRelay) Unsubscribe(ctx context.Context, topic string) error {
|
|||
w.relaySubs[topic].Cancel()
|
||||
delete(w.relaySubs, topic)
|
||||
|
||||
evtHandler, ok := w.topicEvtHanders[topic]
|
||||
if ok {
|
||||
evtHandler.Cancel()
|
||||
delete(w.topicEvtHanders, topic)
|
||||
}
|
||||
|
||||
err := w.wakuRelayTopics[topic].Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
delete(w.wakuRelayTopics, topic)
|
||||
|
||||
w.RemoveTopicValidator(topic)
|
||||
|
||||
err = w.emitters.EvtRelayUnsubscribed.Emit(EvtRelayUnsubscribed{topic})
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -454,12 +489,12 @@ func (w *WakuRelay) nextMessage(ctx context.Context, sub *pubsub.Subscription) <
|
|||
}
|
||||
|
||||
func (w *WakuRelay) subscribeToTopic(pubsubTopic string, sub *pubsub.Subscription) {
|
||||
defer w.wg.Done()
|
||||
defer w.WaitGroup().Done()
|
||||
|
||||
subChannel := w.nextMessage(w.ctx, sub)
|
||||
subChannel := w.nextMessage(w.Context(), sub)
|
||||
for {
|
||||
select {
|
||||
case <-w.ctx.Done():
|
||||
case <-w.Context().Done():
|
||||
return
|
||||
// TODO: if there are no more relay subscriptions, close the pubsub subscription
|
||||
case msg, ok := <-subChannel:
|
||||
|
@ -493,3 +528,46 @@ func (w *WakuRelay) Params() pubsub.GossipSubParams {
|
|||
func (w *WakuRelay) Events() event.Bus {
|
||||
return w.events
|
||||
}
|
||||
|
||||
func (w *WakuRelay) addPeerTopicEventListener(topic *pubsub.Topic) (*pubsub.TopicEventHandler, error) {
|
||||
handler, err := topic.EventHandler()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
w.WaitGroup().Add(1)
|
||||
go w.topicEventPoll(topic.String(), handler)
|
||||
return handler, nil
|
||||
}
|
||||
|
||||
func (w *WakuRelay) topicEventPoll(topic string, handler *pubsub.TopicEventHandler) {
|
||||
defer w.WaitGroup().Done()
|
||||
for {
|
||||
evt, err := handler.NextPeerEvent(w.Context())
|
||||
if err != nil {
|
||||
if err == context.Canceled {
|
||||
break
|
||||
}
|
||||
w.log.Error("failed to get next peer event", zap.String("topic", topic), zap.Error(err))
|
||||
continue
|
||||
}
|
||||
if evt.Peer.Validate() != nil { //Empty peerEvent is returned when context passed in done.
|
||||
break
|
||||
}
|
||||
if evt.Type == pubsub.PeerJoin {
|
||||
w.log.Debug("received a PeerJoin event", zap.String("topic", topic), logging.HostID("peerID", evt.Peer))
|
||||
err = w.emitters.EvtPeerTopic.Emit(EvtPeerTopic{PubsubTopic: topic, PeerID: evt.Peer, State: PEER_JOINED})
|
||||
if err != nil {
|
||||
w.log.Error("failed to emit PeerJoin", zap.String("topic", topic), zap.Error(err))
|
||||
}
|
||||
} else if evt.Type == pubsub.PeerLeave {
|
||||
w.log.Debug("received a PeerLeave event", zap.String("topic", topic), logging.HostID("peerID", evt.Peer))
|
||||
err = w.emitters.EvtPeerTopic.Emit(EvtPeerTopic{PubsubTopic: topic, PeerID: evt.Peer, State: PEER_LEFT})
|
||||
if err != nil {
|
||||
w.log.Error("failed to emit PeerLeave", zap.String("topic", topic), zap.Error(err))
|
||||
}
|
||||
} else {
|
||||
w.log.Error("unknown event type received", zap.String("topic", topic),
|
||||
zap.Int("eventType", int(evt.Type)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,9 +18,9 @@ var brHmacDrbgPool = sync.Pool{New: func() interface{} {
|
|||
return hmacdrbg.NewHmacDrbg(256, seed, nil)
|
||||
}}
|
||||
|
||||
// GenerateRequestId generates a random 32 byte slice that can be used for
|
||||
// GenerateRequestID generates a random 32 byte slice that can be used for
|
||||
// creating requests inf the filter, store and lightpush protocols
|
||||
func GenerateRequestId() []byte {
|
||||
func GenerateRequestID() []byte {
|
||||
rng := brHmacDrbgPool.Get().(*hmacdrbg.HmacDrbg)
|
||||
defer brHmacDrbgPool.Put(rng)
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@ const acceptableRootWindowSize = 5
|
|||
|
||||
type RegistrationHandler = func(tx *types.Transaction)
|
||||
|
||||
type SpamHandler = func(message *pb.WakuMessage) error
|
||||
type SpamHandler = func(msg *pb.WakuMessage, topic string) error
|
||||
|
||||
func toRLNSignal(wakuMessage *pb.WakuMessage) []byte {
|
||||
if wakuMessage == nil {
|
||||
|
|
|
@ -2,27 +2,27 @@
|
|||
"language": "Solidity",
|
||||
"sources": {
|
||||
"WakuRln.sol": {
|
||||
"urls": ["./waku-rln-contract/contracts/WakuRln.sol"]
|
||||
"urls": ["../../../../../libs/waku-rln-contract/contracts/WakuRln.sol"]
|
||||
},
|
||||
"WakuRlnRegistry.sol": {
|
||||
"urls": ["./waku-rln-contract/contracts/WakuRlnRegistry.sol"]
|
||||
"urls": ["../../../../../libs/waku-rln-contract/contracts/WakuRlnRegistry.sol"]
|
||||
},
|
||||
"rln-contract/PoseidonHasher.sol": {
|
||||
"urls": [
|
||||
"./waku-rln-contract/lib/rln-contract/contracts/PoseidonHasher.sol"
|
||||
"../../../../../libs/waku-rln-contract/lib/rln-contract/contracts/PoseidonHasher.sol"
|
||||
]
|
||||
},
|
||||
"rln-contract/RlnBase.sol": {
|
||||
"urls": ["./waku-rln-contract/lib/rln-contract/contracts/RlnBase.sol"]
|
||||
"urls": ["../../../../../libs/waku-rln-contract/lib/rln-contract/contracts/RlnBase.sol"]
|
||||
},
|
||||
"rln-contract/IVerifier.sol": {
|
||||
"urls": ["./waku-rln-contract/lib/rln-contract/contracts/IVerifier.sol"]
|
||||
"urls": ["../../../../../libs/waku-rln-contract/lib/rln-contract/contracts/IVerifier.sol"]
|
||||
},
|
||||
"openzeppelin-contracts/contracts/access/Ownable.sol": {
|
||||
"urls": ["./waku-rln-contract/lib/openzeppelin-contracts/contracts/access/Ownable.sol"]
|
||||
"urls": ["../../../../../libs/waku-rln-contract/lib/openzeppelin-contracts/contracts/access/Ownable.sol"]
|
||||
},
|
||||
"openzeppelin-contracts/contracts/utils/Context.sol": {
|
||||
"urls": ["./waku-rln-contract/lib/openzeppelin-contracts/contracts/utils/Context.sol"]
|
||||
"urls": ["../../../../../libs/waku-rln-contract/lib/openzeppelin-contracts/contracts/utils/Context.sol"]
|
||||
}
|
||||
},
|
||||
"settings": {
|
||||
|
|
109
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/dynamic/dynamic.go
generated
vendored
109
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/dynamic/dynamic.go
generated
vendored
|
@ -3,6 +3,7 @@ package dynamic
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"sync"
|
||||
"time"
|
||||
|
@ -28,28 +29,26 @@ var RLNAppInfo = keystore.AppInfo{
|
|||
}
|
||||
|
||||
type DynamicGroupManager struct {
|
||||
rln *rln.RLN
|
||||
log *zap.Logger
|
||||
MembershipFetcher
|
||||
metrics Metrics
|
||||
|
||||
cancel context.CancelFunc
|
||||
wg sync.WaitGroup
|
||||
|
||||
identityCredential *rln.IdentityCredential
|
||||
membershipIndex rln.MembershipIndex
|
||||
|
||||
web3Config *web3.Config
|
||||
lastBlockProcessed uint64
|
||||
lastBlockProcessedMutex sync.RWMutex
|
||||
lastBlockProcessed uint64
|
||||
|
||||
eventHandler RegistrationEventHandler
|
||||
|
||||
appKeystore *keystore.AppKeystore
|
||||
keystorePassword string
|
||||
|
||||
rootTracker *group_manager.MerkleRootTracker
|
||||
appKeystore *keystore.AppKeystore
|
||||
keystorePassword string
|
||||
membershipIndexToLoad *uint
|
||||
}
|
||||
|
||||
func handler(gm *DynamicGroupManager, events []*contracts.RLNMemberRegistered) error {
|
||||
func (gm *DynamicGroupManager) handler(events []*contracts.RLNMemberRegistered) error {
|
||||
gm.lastBlockProcessedMutex.Lock()
|
||||
defer gm.lastBlockProcessedMutex.Unlock()
|
||||
|
||||
toRemoveTable := om.New()
|
||||
toInsertTable := om.New()
|
||||
|
||||
|
@ -57,17 +56,17 @@ func handler(gm *DynamicGroupManager, events []*contracts.RLNMemberRegistered) e
|
|||
for _, event := range events {
|
||||
if event.Raw.Removed {
|
||||
var indexes []uint
|
||||
i_idx, ok := toRemoveTable.Get(event.Raw.BlockNumber)
|
||||
iIdx, ok := toRemoveTable.Get(event.Raw.BlockNumber)
|
||||
if ok {
|
||||
indexes = i_idx.([]uint)
|
||||
indexes = iIdx.([]uint)
|
||||
}
|
||||
indexes = append(indexes, uint(event.Index.Uint64()))
|
||||
toRemoveTable.Set(event.Raw.BlockNumber, indexes)
|
||||
} else {
|
||||
var eventsPerBlock []*contracts.RLNMemberRegistered
|
||||
i_evt, ok := toInsertTable.Get(event.Raw.BlockNumber)
|
||||
iEvt, ok := toInsertTable.Get(event.Raw.BlockNumber)
|
||||
if ok {
|
||||
eventsPerBlock = i_evt.([]*contracts.RLNMemberRegistered)
|
||||
eventsPerBlock = iEvt.([]*contracts.RLNMemberRegistered)
|
||||
}
|
||||
eventsPerBlock = append(eventsPerBlock, event)
|
||||
toInsertTable.Set(event.Raw.BlockNumber, eventsPerBlock)
|
||||
|
@ -88,8 +87,6 @@ func handler(gm *DynamicGroupManager, events []*contracts.RLNMemberRegistered) e
|
|||
return err
|
||||
}
|
||||
|
||||
gm.metrics.RecordRegisteredMembership(toInsertTable.Len() - toRemoveTable.Len())
|
||||
|
||||
gm.lastBlockProcessed = lastBlockProcessed
|
||||
err = gm.SetMetadata(RLNMetadata{
|
||||
LastProcessedBlock: gm.lastBlockProcessed,
|
||||
|
@ -101,7 +98,7 @@ func handler(gm *DynamicGroupManager, events []*contracts.RLNMemberRegistered) e
|
|||
// this is not a fatal error, hence we don't raise an exception
|
||||
gm.log.Warn("failed to persist rln metadata", zap.Error(err))
|
||||
} else {
|
||||
gm.log.Debug("rln metadata persisted", zap.Uint64("lastProcessedBlock", gm.lastBlockProcessed), zap.Uint64("chainID", gm.web3Config.ChainID.Uint64()), logging.HexBytes("contractAddress", gm.web3Config.RegistryContract.Address.Bytes()))
|
||||
gm.log.Debug("rln metadata persisted", zap.Uint64("lastBlockProcessed", gm.lastBlockProcessed), zap.Uint64("chainID", gm.web3Config.ChainID.Uint64()), logging.HexBytes("contractAddress", gm.web3Config.RegistryContract.Address.Bytes()))
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -112,34 +109,43 @@ type RegistrationHandler = func(tx *types.Transaction)
|
|||
func NewDynamicGroupManager(
|
||||
ethClientAddr string,
|
||||
memContractAddr common.Address,
|
||||
membershipIndex uint,
|
||||
membershipIndexToLoad *uint,
|
||||
appKeystore *keystore.AppKeystore,
|
||||
keystorePassword string,
|
||||
reg prometheus.Registerer,
|
||||
rlnInstance *rln.RLN,
|
||||
rootTracker *group_manager.MerkleRootTracker,
|
||||
log *zap.Logger,
|
||||
) (*DynamicGroupManager, error) {
|
||||
log = log.Named("rln-dynamic")
|
||||
|
||||
web3Config := web3.NewConfig(ethClientAddr, memContractAddr)
|
||||
return &DynamicGroupManager{
|
||||
membershipIndex: membershipIndex,
|
||||
web3Config: web3.NewConfig(ethClientAddr, memContractAddr),
|
||||
eventHandler: handler,
|
||||
appKeystore: appKeystore,
|
||||
keystorePassword: keystorePassword,
|
||||
log: log,
|
||||
metrics: newMetrics(reg),
|
||||
membershipIndexToLoad: membershipIndexToLoad,
|
||||
appKeystore: appKeystore,
|
||||
keystorePassword: keystorePassword,
|
||||
MembershipFetcher: NewMembershipFetcher(web3Config, rlnInstance, rootTracker, log),
|
||||
metrics: newMetrics(reg),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (gm *DynamicGroupManager) getMembershipFee(ctx context.Context) (*big.Int, error) {
|
||||
return gm.web3Config.RLNContract.MEMBERSHIPDEPOSIT(&bind.CallOpts{Context: ctx})
|
||||
fee, err := gm.web3Config.RLNContract.MEMBERSHIPDEPOSIT(&bind.CallOpts{Context: ctx})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not check if credential exits in contract: %w", err)
|
||||
}
|
||||
return fee, nil
|
||||
}
|
||||
|
||||
func (gm *DynamicGroupManager) memberExists(ctx context.Context, idCommitment rln.IDCommitment) (bool, error) {
|
||||
return gm.web3Config.RLNContract.MemberExists(&bind.CallOpts{Context: ctx}, rln.Bytes32ToBigInt(idCommitment))
|
||||
exists, err := gm.web3Config.RLNContract.MemberExists(&bind.CallOpts{Context: ctx}, rln.Bytes32ToBigInt(idCommitment))
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("could not check if credential exits in contract: %w", err)
|
||||
}
|
||||
return exists, nil
|
||||
}
|
||||
|
||||
func (gm *DynamicGroupManager) Start(ctx context.Context, rlnInstance *rln.RLN, rootTracker *group_manager.MerkleRootTracker) error {
|
||||
func (gm *DynamicGroupManager) Start(ctx context.Context) error {
|
||||
if gm.cancel != nil {
|
||||
return errors.New("already started")
|
||||
}
|
||||
|
@ -154,9 +160,6 @@ func (gm *DynamicGroupManager) Start(ctx context.Context, rlnInstance *rln.RLN,
|
|||
return err
|
||||
}
|
||||
|
||||
gm.rln = rlnInstance
|
||||
gm.rootTracker = rootTracker
|
||||
|
||||
// check if the contract exists by calling a static function
|
||||
_, err = gm.getMembershipFee(ctx)
|
||||
if err != nil {
|
||||
|
@ -168,19 +171,26 @@ func (gm *DynamicGroupManager) Start(ctx context.Context, rlnInstance *rln.RLN,
|
|||
return err
|
||||
}
|
||||
|
||||
if err = gm.HandleGroupUpdates(ctx, gm.eventHandler); err != nil {
|
||||
err = gm.MembershipFetcher.HandleGroupUpdates(ctx, gm.handler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
gm.metrics.RecordRegisteredMembership(gm.rln.LeavesSet())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gm *DynamicGroupManager) loadCredential(ctx context.Context) error {
|
||||
if gm.appKeystore == nil {
|
||||
gm.log.Warn("no credentials were loaded. Node will only validate messages, but wont be able to generate proofs and attach them to messages")
|
||||
return nil
|
||||
}
|
||||
start := time.Now()
|
||||
|
||||
credentials, err := gm.appKeystore.GetMembershipCredentials(
|
||||
gm.keystorePassword,
|
||||
gm.membershipIndex,
|
||||
gm.membershipIndexToLoad,
|
||||
keystore.NewMembershipContractInfo(gm.web3Config.ChainID, gm.web3Config.RegistryContract.Address))
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -201,6 +211,7 @@ func (gm *DynamicGroupManager) loadCredential(ctx context.Context) error {
|
|||
}
|
||||
|
||||
gm.identityCredential = credentials.IdentityCredential
|
||||
gm.membershipIndex = credentials.TreeIndex
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -231,6 +242,8 @@ func (gm *DynamicGroupManager) InsertMembers(toInsert *om.OrderedMap) error {
|
|||
}
|
||||
gm.metrics.RecordMembershipInsertionDuration(time.Since(start))
|
||||
|
||||
gm.metrics.RecordRegisteredMembership(gm.rln.LeavesSet())
|
||||
|
||||
_, err = gm.rootTracker.UpdateLatestRoot(pair.Key.(uint64))
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -278,9 +291,29 @@ func (gm *DynamicGroupManager) Stop() error {
|
|||
return err
|
||||
}
|
||||
|
||||
gm.web3Config.ETHClient.Close()
|
||||
|
||||
gm.wg.Wait()
|
||||
gm.MembershipFetcher.Stop()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gm *DynamicGroupManager) IsReady(ctx context.Context) (bool, error) {
|
||||
latestBlockNumber, err := gm.latestBlockNumber(ctx)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("could not retrieve latest block: %w", err)
|
||||
}
|
||||
|
||||
gm.lastBlockProcessedMutex.RLock()
|
||||
allBlocksProcessed := gm.lastBlockProcessed >= latestBlockNumber
|
||||
gm.lastBlockProcessedMutex.RUnlock()
|
||||
|
||||
if !allBlocksProcessed {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
syncProgress, err := gm.web3Config.ETHClient.SyncProgress(ctx)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("could not retrieve sync state: %w", err)
|
||||
}
|
||||
|
||||
return syncProgress == nil, nil // syncProgress only has a value while node is syncing
|
||||
}
|
||||
|
|
246
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/dynamic/membership_fetcher.go
generated
vendored
Normal file
246
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/dynamic/membership_fetcher.go
generated
vendored
Normal file
|
@ -0,0 +1,246 @@
|
|||
package dynamic
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/rln/contracts"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/rln/web3"
|
||||
"github.com/waku-org/go-zerokit-rln/rln"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// RegistrationEventHandler represents the types of inputs to this handler matches the MemberRegistered event/proc defined in the MembershipContract interface
|
||||
type RegistrationEventHandler = func([]*contracts.RLNMemberRegistered) error
|
||||
|
||||
// MembershipFetcher is used for getting membershipRegsitered Events from the eth rpc
|
||||
type MembershipFetcher struct {
|
||||
web3Config *web3.Config
|
||||
rln *rln.RLN
|
||||
log *zap.Logger
|
||||
rootTracker *group_manager.MerkleRootTracker
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
func NewMembershipFetcher(web3Config *web3.Config, rln *rln.RLN, rootTracker *group_manager.MerkleRootTracker, log *zap.Logger) MembershipFetcher {
|
||||
return MembershipFetcher{
|
||||
web3Config: web3Config,
|
||||
rln: rln,
|
||||
log: log,
|
||||
rootTracker: rootTracker,
|
||||
}
|
||||
}
|
||||
|
||||
// HandleGroupUpdates mounts the supplied handler for the registration events emitting from the membership contract
|
||||
// It connects to the eth client, subscribes to the `MemberRegistered` event emitted from the `MembershipContract`
|
||||
// and collects all the events, for every received event, it calls the `handler`
|
||||
func (mf *MembershipFetcher) HandleGroupUpdates(ctx context.Context, handler RegistrationEventHandler) error {
|
||||
fromBlock := mf.web3Config.RLNContract.DeployedBlockNumber
|
||||
metadata, err := mf.GetMetadata()
|
||||
if err != nil {
|
||||
mf.log.Warn("could not load last processed block from metadata. Starting onchain sync from deployment block", zap.Error(err), zap.Uint64("deploymentBlock", mf.web3Config.RLNContract.DeployedBlockNumber))
|
||||
} else {
|
||||
if mf.web3Config.ChainID.Cmp(metadata.ChainID) != 0 {
|
||||
return errors.New("persisted data: chain id mismatch")
|
||||
}
|
||||
|
||||
if !bytes.Equal(mf.web3Config.RegistryContract.Address.Bytes(), metadata.ContractAddress.Bytes()) {
|
||||
return errors.New("persisted data: contract address mismatch")
|
||||
}
|
||||
|
||||
fromBlock = metadata.LastProcessedBlock + 1
|
||||
mf.log.Info("resuming onchain sync", zap.Uint64("fromBlock", fromBlock))
|
||||
}
|
||||
|
||||
mf.rootTracker.SetValidRootsPerBlock(metadata.ValidRootsPerBlock)
|
||||
//
|
||||
latestBlockNumber, err := mf.latestBlockNumber(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//
|
||||
|
||||
mf.log.Info("loading old events...")
|
||||
t := time.Now()
|
||||
err = mf.loadOldEvents(ctx, fromBlock, latestBlockNumber, handler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mf.log.Info("events loaded", zap.Duration("timeToLoad", time.Since(t)))
|
||||
|
||||
errCh := make(chan error)
|
||||
|
||||
mf.wg.Add(1)
|
||||
go mf.watchNewEvents(ctx, latestBlockNumber+1, handler, errCh) // we have already fetched the events for latestBlocNumber in oldEvents
|
||||
return <-errCh
|
||||
}
|
||||
|
||||
func (mf *MembershipFetcher) loadOldEvents(ctx context.Context, fromBlock, toBlock uint64, handler RegistrationEventHandler) error {
|
||||
for ; fromBlock+maxBatchSize < toBlock; fromBlock += maxBatchSize + 1 { // check if the end of the batch is within the toBlock range
|
||||
t1 := time.Now()
|
||||
events, err := mf.getEvents(ctx, fromBlock, fromBlock+maxBatchSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t1Since := time.Since(t1)
|
||||
|
||||
t2 := time.Now()
|
||||
if err := handler(events); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mf.log.Info("fetching events", zap.Uint64("from", fromBlock), zap.Uint64("to", fromBlock+maxBatchSize), zap.Int("numEvents", len(events)), zap.Duration("timeToFetch", t1Since), zap.Duration("timeToProcess", time.Since(t2)))
|
||||
}
|
||||
|
||||
t1 := time.Now()
|
||||
events, err := mf.getEvents(ctx, fromBlock, toBlock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t1Since := time.Since(t1)
|
||||
|
||||
// process all the fetched events
|
||||
t2 := time.Now()
|
||||
err = handler(events)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mf.log.Info("fetching events", zap.Uint64("from", fromBlock), zap.Uint64("to", fromBlock+maxBatchSize), zap.Int("numEvents", len(events)), zap.Duration("timeToFetch", t1Since), zap.Duration("timeToProcess", time.Since(t2)))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mf *MembershipFetcher) watchNewEvents(ctx context.Context, fromBlock uint64, handler RegistrationEventHandler, errCh chan<- error) {
|
||||
defer mf.wg.Done()
|
||||
|
||||
// Watch for new events
|
||||
firstErr := true
|
||||
headerCh := make(chan *types.Header)
|
||||
subs := event.Resubscribe(2*time.Second, func(ctx context.Context) (event.Subscription, error) {
|
||||
s, err := mf.web3Config.ETHClient.SubscribeNewHead(ctx, headerCh)
|
||||
if err != nil {
|
||||
if err == rpc.ErrNotificationsUnsupported {
|
||||
err = errors.New("notifications not supported. The node must support websockets")
|
||||
}
|
||||
mf.log.Error("subscribing to rln events", zap.Error(err))
|
||||
}
|
||||
if firstErr { // errCh can be closed only once
|
||||
errCh <- err
|
||||
close(errCh)
|
||||
firstErr = false
|
||||
}
|
||||
return s, err
|
||||
})
|
||||
|
||||
defer subs.Unsubscribe()
|
||||
defer close(headerCh)
|
||||
|
||||
for {
|
||||
select {
|
||||
case h := <-headerCh:
|
||||
toBlock := h.Number.Uint64()
|
||||
events, err := mf.getEvents(ctx, fromBlock, toBlock)
|
||||
if err != nil {
|
||||
mf.log.Error("obtaining rln events", zap.Error(err))
|
||||
} else {
|
||||
// update the last processed block
|
||||
fromBlock = toBlock + 1
|
||||
}
|
||||
|
||||
err = handler(events)
|
||||
if err != nil {
|
||||
mf.log.Error("processing rln log", zap.Error(err))
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case err := <-subs.Err():
|
||||
if err != nil {
|
||||
mf.log.Error("watching new events", zap.Error(err))
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const maxBatchSize = uint64(5000)
|
||||
|
||||
func tooMuchDataRequestedError(err error) bool {
|
||||
// this error is only infura specific (other providers might have different error messages)
|
||||
return err.Error() == "query returned more than 10000 results"
|
||||
}
|
||||
|
||||
func (mf *MembershipFetcher) latestBlockNumber(ctx context.Context) (uint64, error) {
|
||||
block, err := mf.web3Config.ETHClient.BlockByNumber(ctx, nil)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return block.Number().Uint64(), nil
|
||||
}
|
||||
|
||||
func (mf *MembershipFetcher) getEvents(ctx context.Context, fromBlock uint64, toBlock uint64) ([]*contracts.RLNMemberRegistered, error) {
|
||||
evts, err := mf.fetchEvents(ctx, fromBlock, toBlock)
|
||||
if err != nil {
|
||||
if tooMuchDataRequestedError(err) { // divide the range and try again
|
||||
mid := (fromBlock + toBlock) / 2
|
||||
firstHalfEvents, err := mf.getEvents(ctx, fromBlock, mid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
secondHalfEvents, err := mf.getEvents(ctx, mid+1, toBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return append(firstHalfEvents, secondHalfEvents...), nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return evts, nil
|
||||
}
|
||||
|
||||
func (mf *MembershipFetcher) fetchEvents(ctx context.Context, from uint64, to uint64) ([]*contracts.RLNMemberRegistered, error) {
|
||||
logIterator, err := mf.web3Config.RLNContract.FilterMemberRegistered(&bind.FilterOpts{Start: from, End: &to, Context: ctx})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var results []*contracts.RLNMemberRegistered
|
||||
|
||||
for {
|
||||
if !logIterator.Next() {
|
||||
break
|
||||
}
|
||||
|
||||
if logIterator.Error() != nil {
|
||||
return nil, logIterator.Error()
|
||||
}
|
||||
|
||||
results = append(results, logIterator.Event)
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// GetMetadata retrieves metadata from the zerokit's RLN database
|
||||
func (mf *MembershipFetcher) GetMetadata() (RLNMetadata, error) {
|
||||
b, err := mf.rln.GetMetadata()
|
||||
if err != nil {
|
||||
return RLNMetadata{}, err
|
||||
}
|
||||
|
||||
return DeserializeMetadata(b)
|
||||
}
|
||||
|
||||
func (mf *MembershipFetcher) Stop() {
|
||||
mf.web3Config.ETHClient.Close()
|
||||
// wait for the watchNewEvents goroutine to finish
|
||||
mf.wg.Wait()
|
||||
}
|
100
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/dynamic/membership_fetcher.json
generated
vendored
Normal file
100
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/dynamic/membership_fetcher.json
generated
vendored
Normal file
|
@ -0,0 +1,100 @@
|
|||
{
|
||||
"blocks": {
|
||||
"5": [
|
||||
{
|
||||
"address": "0x0000000000000000000000000000000000000000",
|
||||
"topics": [
|
||||
"MemberRegistered(uint256,uint256)"
|
||||
],
|
||||
"data": [
|
||||
"bigint:1",
|
||||
"bigint:1"
|
||||
]
|
||||
}
|
||||
],
|
||||
"5005": [
|
||||
{
|
||||
"address": "0x0000000000000000000000000000000000000000",
|
||||
"topics": [
|
||||
"MemberRegistered(uint256,uint256)"
|
||||
],
|
||||
"data": [
|
||||
"bigint:2",
|
||||
"bigint:2"
|
||||
]
|
||||
}
|
||||
],
|
||||
"5006": [
|
||||
{
|
||||
"address": "0x0000000000000000000000000000000000000000",
|
||||
"topics": [
|
||||
"MemberRegistered(uint256,uint256)"
|
||||
],
|
||||
"data": [
|
||||
"bigint:3",
|
||||
"bigint:3"
|
||||
]
|
||||
}
|
||||
],
|
||||
"5007": [
|
||||
{
|
||||
"address": "0x0000000000000000000000000000000000000000",
|
||||
"topics": [
|
||||
"MemberRegistered(uint256,uint256)"
|
||||
],
|
||||
"data": [
|
||||
"bigint:4",
|
||||
"bigint:4"
|
||||
]
|
||||
}
|
||||
],
|
||||
"10005": [
|
||||
{
|
||||
"address": "0x0000000000000000000000000000000000000000",
|
||||
"topics": [
|
||||
"MemberRegistered(uint256,uint256)"
|
||||
],
|
||||
"data": [
|
||||
"bigint:5",
|
||||
"bigint:5"
|
||||
]
|
||||
}
|
||||
],
|
||||
"10010": [
|
||||
{
|
||||
"address": "0x0000000000000000000000000000000000000000",
|
||||
"topics": [
|
||||
"MemberRegistered(uint256,uint256)"
|
||||
],
|
||||
"data": [
|
||||
"bigint:6",
|
||||
"bigint:6"
|
||||
]
|
||||
}
|
||||
],
|
||||
"10011": [
|
||||
{
|
||||
"address": "0x0000000000000000000000000000000000000000",
|
||||
"topics": [
|
||||
"MemberRegistered(uint256,uint256)"
|
||||
],
|
||||
"data": [
|
||||
"bigint:7",
|
||||
"bigint:7"
|
||||
]
|
||||
}
|
||||
],
|
||||
"10012": [
|
||||
{
|
||||
"address": "0x0000000000000000000000000000000000000000",
|
||||
"topics": [
|
||||
"MemberRegistered(uint256,uint256)"
|
||||
],
|
||||
"data": [
|
||||
"bigint:8",
|
||||
"bigint:8"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
|
@ -79,13 +79,3 @@ func (gm *DynamicGroupManager) SetMetadata(meta RLNMetadata) error {
|
|||
b := meta.Serialize()
|
||||
return gm.rln.SetMetadata(b)
|
||||
}
|
||||
|
||||
// GetMetadata retrieves metadata from the zerokit's RLN database
|
||||
func (gm *DynamicGroupManager) GetMetadata() (RLNMetadata, error) {
|
||||
b, err := gm.rln.GetMetadata()
|
||||
if err != nil {
|
||||
return RLNMetadata{}, err
|
||||
}
|
||||
|
||||
return DeserializeMetadata(b)
|
||||
}
|
||||
|
|
14
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/dynamic/metrics.go
generated
vendored
14
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/dynamic/metrics.go
generated
vendored
|
@ -7,8 +7,8 @@ import (
|
|||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
var numberRegisteredMemberships = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
var numberRegisteredMemberships = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "waku_rln_number_registered_memberships",
|
||||
Help: "number of registered and active rln memberships",
|
||||
})
|
||||
|
@ -33,7 +33,7 @@ var collectors = []prometheus.Collector{
|
|||
|
||||
// Metrics exposes the functions required to update prometheus metrics for lightpush protocol
|
||||
type Metrics interface {
|
||||
RecordRegisteredMembership(num int)
|
||||
RecordRegisteredMembership(num uint)
|
||||
RecordMembershipInsertionDuration(duration time.Duration)
|
||||
RecordMembershipCredentialsImportDuration(duration time.Duration)
|
||||
}
|
||||
|
@ -60,10 +60,6 @@ func (m *metricsImpl) RecordMembershipCredentialsImportDuration(duration time.Du
|
|||
}
|
||||
|
||||
// RecordRegisteredMembership records the number of registered memberships
|
||||
func (m *metricsImpl) RecordRegisteredMembership(num int) {
|
||||
if num < 0 {
|
||||
return
|
||||
}
|
||||
|
||||
numberRegisteredMemberships.Add(float64(num))
|
||||
func (m *metricsImpl) RecordRegisteredMembership(num uint) {
|
||||
numberRegisteredMemberships.Set(float64(num))
|
||||
}
|
||||
|
|
81
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/dynamic/mock_blockchain.go
generated
vendored
Normal file
81
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/dynamic/mock_blockchain.go
generated
vendored
Normal file
|
@ -0,0 +1,81 @@
|
|||
package dynamic
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
)
|
||||
|
||||
// MockBlockChain is currently a chain of events for different block numbers
|
||||
// it is used internal by mock client for returning events for a given block number or range in FilterLog rpc call.
|
||||
type MockBlockChain struct {
|
||||
Blocks map[int64]*MockBlock `json:"blocks"`
|
||||
}
|
||||
|
||||
type MockBlock []MockEvent
|
||||
|
||||
func containsEntry[T common.Hash | common.Address](topics []T, topicA T) bool {
|
||||
for _, topic := range topics {
|
||||
if topic == topicA {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func Topic(topic string) common.Hash {
|
||||
return crypto.Keccak256Hash([]byte(topic))
|
||||
}
|
||||
func (b MockBlock) getLogs(blockNum uint64, addrs []common.Address, topicA []common.Hash) (txLogs []types.Log) {
|
||||
for ind, event := range b {
|
||||
txLog := event.GetLog()
|
||||
if containsEntry(addrs, txLog.Address) && (len(topicA) == 0 || containsEntry(topicA, txLog.Topics[0])) {
|
||||
txLog.BlockNumber = blockNum
|
||||
txLog.Index = uint(ind)
|
||||
txLogs = append(txLogs, txLog)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type MockEvent struct {
|
||||
Address common.Address `json:"address"`
|
||||
Topics []string `json:"topics"`
|
||||
Txhash common.Hash `json:"txhash"`
|
||||
Data []string `json:"data"`
|
||||
}
|
||||
|
||||
func (e MockEvent) GetLog() types.Log {
|
||||
topics := []common.Hash{Topic(e.Topics[0])}
|
||||
for _, topic := range e.Topics[1:] {
|
||||
topics = append(topics, parseData(topic))
|
||||
}
|
||||
//
|
||||
var data []byte
|
||||
for _, entry := range e.Data {
|
||||
data = append(data, parseData(entry).Bytes()...)
|
||||
}
|
||||
return types.Log{
|
||||
Address: e.Address,
|
||||
Topics: topics,
|
||||
TxHash: e.Txhash,
|
||||
Data: data,
|
||||
}
|
||||
}
|
||||
|
||||
func parseData(data string) common.Hash {
|
||||
splits := strings.Split(data, ":")
|
||||
switch splits[0] {
|
||||
case "bigint":
|
||||
bigInt, ok := new(big.Int).SetString(splits[1], 10)
|
||||
if !ok {
|
||||
panic("invalid big int")
|
||||
}
|
||||
return common.BytesToHash(bigInt.Bytes())
|
||||
default:
|
||||
panic("invalid data type")
|
||||
}
|
||||
}
|
118
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/dynamic/mock_client.go
generated
vendored
Normal file
118
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/dynamic/mock_client.go
generated
vendored
Normal file
|
@ -0,0 +1,118 @@
|
|||
package dynamic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"math/big"
|
||||
"os"
|
||||
"sort"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
)
|
||||
|
||||
type ErrCount struct {
|
||||
err error
|
||||
count int
|
||||
}
|
||||
|
||||
type MockClient struct {
|
||||
ethclient.Client
|
||||
blockChain MockBlockChain
|
||||
latestBlockNum atomic.Int64
|
||||
errOnBlock map[int64]*ErrCount
|
||||
}
|
||||
|
||||
func (c *MockClient) SetLatestBlockNumber(num int64) {
|
||||
c.latestBlockNum.Store(num)
|
||||
}
|
||||
|
||||
func (c *MockClient) Close() {
|
||||
|
||||
}
|
||||
func (c *MockClient) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) {
|
||||
return types.NewBlock(&types.Header{Number: big.NewInt(c.latestBlockNum.Load())}, nil, nil, nil, nil), nil
|
||||
}
|
||||
func NewMockClient(t *testing.T, blockFile string) *MockClient {
|
||||
blockChain := MockBlockChain{}
|
||||
data, err := os.ReadFile(blockFile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := json.Unmarshal(data, &blockChain); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return &MockClient{blockChain: blockChain, errOnBlock: map[int64]*ErrCount{}}
|
||||
}
|
||||
|
||||
func (c *MockClient) SetErrorOnBlock(blockNum int64, err error, count int) {
|
||||
c.errOnBlock[blockNum] = &ErrCount{err: err, count: count}
|
||||
}
|
||||
|
||||
func (c *MockClient) getFromAndToRange(query ethereum.FilterQuery) (int64, int64) {
|
||||
var fromBlock int64
|
||||
if query.FromBlock == nil {
|
||||
fromBlock = 0
|
||||
} else {
|
||||
fromBlock = query.FromBlock.Int64()
|
||||
}
|
||||
|
||||
var toBlock int64
|
||||
if query.ToBlock == nil {
|
||||
toBlock = 0
|
||||
} else {
|
||||
toBlock = query.ToBlock.Int64()
|
||||
}
|
||||
return fromBlock, toBlock
|
||||
}
|
||||
func (c *MockClient) FilterLogs(ctx context.Context, query ethereum.FilterQuery) (allTxLogs []types.Log, err error) {
|
||||
fromBlock, toBlock := c.getFromAndToRange(query)
|
||||
for block, details := range c.blockChain.Blocks {
|
||||
if block >= fromBlock && block <= toBlock {
|
||||
if txLogs := details.getLogs(uint64(block), query.Addresses, query.Topics[0]); len(txLogs) != 0 {
|
||||
allTxLogs = append(allTxLogs, txLogs...)
|
||||
}
|
||||
if errCount, ok := c.errOnBlock[block]; ok && errCount.count != 0 {
|
||||
errCount.count--
|
||||
return nil, errCount.err
|
||||
}
|
||||
}
|
||||
}
|
||||
sort.Slice(allTxLogs, func(i, j int) bool {
|
||||
return allTxLogs[i].BlockNumber < allTxLogs[j].BlockNumber ||
|
||||
(allTxLogs[i].BlockNumber == allTxLogs[j].BlockNumber && allTxLogs[i].Index < allTxLogs[j].Index)
|
||||
})
|
||||
return allTxLogs, nil
|
||||
}
|
||||
|
||||
func (c *MockClient) SubscribeNewHead(ctx context.Context, ch chan<- *types.Header) (ethereum.Subscription, error) {
|
||||
for {
|
||||
next := c.latestBlockNum.Load() + 1
|
||||
if c.blockChain.Blocks[next] != nil {
|
||||
ch <- &types.Header{Number: big.NewInt(next)}
|
||||
c.latestBlockNum.Store(next)
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return testNoopSub{}, nil
|
||||
}
|
||||
|
||||
type testNoopSub struct {
|
||||
}
|
||||
|
||||
func (testNoopSub) Unsubscribe() {
|
||||
|
||||
}
|
||||
|
||||
// Err returns the subscription error channel. The error channel receives
|
||||
// a value if there is an issue with the subscription (e.g. the network connection
|
||||
// delivering the events has been closed). Only one value will ever be sent.
|
||||
// The error channel is closed by Unsubscribe.
|
||||
func (testNoopSub) Err() <-chan error {
|
||||
ch := make(chan error)
|
||||
return ch
|
||||
}
|
209
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/dynamic/web3.go
generated
vendored
209
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/dynamic/web3.go
generated
vendored
|
@ -1,209 +0,0 @@
|
|||
package dynamic
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/rln/contracts"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// the types of inputs to this handler matches the MemberRegistered event/proc defined in the MembershipContract interface
|
||||
type RegistrationEventHandler = func(*DynamicGroupManager, []*contracts.RLNMemberRegistered) error
|
||||
|
||||
// HandleGroupUpdates mounts the supplied handler for the registration events emitting from the membership contract
|
||||
// It connects to the eth client, subscribes to the `MemberRegistered` event emitted from the `MembershipContract`
|
||||
// and collects all the events, for every received event, it calls the `handler`
|
||||
func (gm *DynamicGroupManager) HandleGroupUpdates(ctx context.Context, handler RegistrationEventHandler) error {
|
||||
fromBlock := gm.web3Config.RLNContract.DeployedBlockNumber
|
||||
metadata, err := gm.GetMetadata()
|
||||
if err != nil {
|
||||
gm.log.Warn("could not load last processed block from metadata. Starting onchain sync from deployment block", zap.Error(err), zap.Uint64("deploymentBlock", gm.web3Config.RLNContract.DeployedBlockNumber))
|
||||
} else {
|
||||
if gm.web3Config.ChainID.Cmp(metadata.ChainID) != 0 {
|
||||
return errors.New("persisted data: chain id mismatch")
|
||||
}
|
||||
|
||||
if !bytes.Equal(gm.web3Config.RegistryContract.Address.Bytes(), metadata.ContractAddress.Bytes()) {
|
||||
return errors.New("persisted data: contract address mismatch")
|
||||
}
|
||||
|
||||
fromBlock = metadata.LastProcessedBlock
|
||||
gm.log.Info("resuming onchain sync", zap.Uint64("fromBlock", fromBlock))
|
||||
}
|
||||
|
||||
gm.rootTracker.SetValidRootsPerBlock(metadata.ValidRootsPerBlock)
|
||||
|
||||
err = gm.loadOldEvents(ctx, fromBlock, handler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
errCh := make(chan error)
|
||||
|
||||
gm.wg.Add(1)
|
||||
go gm.watchNewEvents(ctx, handler, gm.log, errCh)
|
||||
return <-errCh
|
||||
}
|
||||
|
||||
func (gm *DynamicGroupManager) loadOldEvents(ctx context.Context, fromBlock uint64, handler RegistrationEventHandler) error {
|
||||
events, err := gm.getEvents(ctx, fromBlock, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return handler(gm, events)
|
||||
}
|
||||
|
||||
func (gm *DynamicGroupManager) watchNewEvents(ctx context.Context, handler RegistrationEventHandler, log *zap.Logger, errCh chan<- error) {
|
||||
defer gm.wg.Done()
|
||||
|
||||
// Watch for new events
|
||||
firstErr := true
|
||||
headerCh := make(chan *types.Header)
|
||||
subs := event.Resubscribe(2*time.Second, func(ctx context.Context) (event.Subscription, error) {
|
||||
s, err := gm.web3Config.ETHClient.SubscribeNewHead(ctx, headerCh)
|
||||
if err != nil {
|
||||
if err == rpc.ErrNotificationsUnsupported {
|
||||
err = errors.New("notifications not supported. The node must support websockets")
|
||||
}
|
||||
if firstErr {
|
||||
errCh <- err
|
||||
}
|
||||
gm.log.Error("subscribing to rln events", zap.Error(err))
|
||||
}
|
||||
firstErr = false
|
||||
close(errCh)
|
||||
return s, err
|
||||
})
|
||||
|
||||
defer subs.Unsubscribe()
|
||||
defer close(headerCh)
|
||||
|
||||
for {
|
||||
select {
|
||||
case h := <-headerCh:
|
||||
blk := h.Number.Uint64()
|
||||
events, err := gm.getEvents(ctx, blk, &blk)
|
||||
if err != nil {
|
||||
gm.log.Error("obtaining rln events", zap.Error(err))
|
||||
}
|
||||
|
||||
err = handler(gm, events)
|
||||
if err != nil {
|
||||
gm.log.Error("processing rln log", zap.Error(err))
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case err := <-subs.Err():
|
||||
if err != nil {
|
||||
gm.log.Error("watching new events", zap.Error(err))
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const maxBatchSize = uint64(5000)
|
||||
const additiveFactorMultiplier = 0.10
|
||||
const multiplicativeDecreaseDivisor = 2
|
||||
|
||||
func tooMuchDataRequestedError(err error) bool {
|
||||
// this error is only infura specific (other providers might have different error messages)
|
||||
return err.Error() == "query returned more than 10000 results"
|
||||
}
|
||||
|
||||
func (gm *DynamicGroupManager) getEvents(ctx context.Context, from uint64, to *uint64) ([]*contracts.RLNMemberRegistered, error) {
|
||||
var results []*contracts.RLNMemberRegistered
|
||||
|
||||
// Adapted from prysm logic for fetching historical logs
|
||||
|
||||
toBlock := to
|
||||
if to == nil {
|
||||
block, err := gm.web3Config.ETHClient.BlockByNumber(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
blockNumber := block.Number().Uint64()
|
||||
toBlock = &blockNumber
|
||||
}
|
||||
|
||||
if from == *toBlock { // Only loading a single block
|
||||
return gm.fetchEvents(ctx, from, toBlock)
|
||||
}
|
||||
|
||||
// Fetching blocks in batches
|
||||
batchSize := maxBatchSize
|
||||
additiveFactor := uint64(float64(batchSize) * additiveFactorMultiplier)
|
||||
|
||||
currentBlockNum := from
|
||||
for currentBlockNum < *toBlock {
|
||||
start := currentBlockNum
|
||||
end := currentBlockNum + batchSize
|
||||
if end > *toBlock {
|
||||
end = *toBlock
|
||||
}
|
||||
|
||||
gm.log.Info("loading events...", zap.Uint64("fromBlock", start), zap.Uint64("toBlock", end))
|
||||
|
||||
evts, err := gm.fetchEvents(ctx, start, &end)
|
||||
if err != nil {
|
||||
if tooMuchDataRequestedError(err) {
|
||||
if batchSize == 0 {
|
||||
return nil, errors.New("batch size is zero")
|
||||
}
|
||||
|
||||
// multiplicative decrease
|
||||
batchSize = batchSize / multiplicativeDecreaseDivisor
|
||||
|
||||
gm.log.Warn("too many logs requested!, retrying with a smaller chunk size", zap.Uint64("batchSize", batchSize))
|
||||
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
results = append(results, evts...)
|
||||
|
||||
currentBlockNum = end
|
||||
|
||||
if batchSize < maxBatchSize {
|
||||
// update the batchSize with additive increase
|
||||
batchSize = batchSize + additiveFactor
|
||||
if batchSize > maxBatchSize {
|
||||
batchSize = maxBatchSize
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func (gm *DynamicGroupManager) fetchEvents(ctx context.Context, from uint64, to *uint64) ([]*contracts.RLNMemberRegistered, error) {
|
||||
logIterator, err := gm.web3Config.RLNContract.FilterMemberRegistered(&bind.FilterOpts{Start: from, End: to, Context: ctx})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var results []*contracts.RLNMemberRegistered
|
||||
|
||||
for {
|
||||
if !logIterator.Next() {
|
||||
break
|
||||
}
|
||||
|
||||
if logIterator.Error() != nil {
|
||||
return nil, logIterator.Error()
|
||||
}
|
||||
|
||||
results = append(results, logIterator.Event)
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
21
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/group_manager.go
generated
vendored
21
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/group_manager.go
generated
vendored
|
@ -1 +1,22 @@
|
|||
package group_manager
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/waku-org/go-zerokit-rln/rln"
|
||||
)
|
||||
|
||||
type GroupManager interface {
|
||||
Start(ctx context.Context) error
|
||||
IdentityCredentials() (rln.IdentityCredential, error)
|
||||
MembershipIndex() rln.MembershipIndex
|
||||
Stop() error
|
||||
IsReady(ctx context.Context) (bool, error)
|
||||
}
|
||||
|
||||
type Details struct {
|
||||
GroupManager GroupManager
|
||||
RootTracker *MerkleRootTracker
|
||||
|
||||
RLN *rln.RLN
|
||||
}
|
||||
|
|
13
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/static/static.go
generated
vendored
13
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/static/static.go
generated
vendored
|
@ -25,6 +25,8 @@ func NewStaticGroupManager(
|
|||
group []rln.IDCommitment,
|
||||
identityCredential rln.IdentityCredential,
|
||||
index rln.MembershipIndex,
|
||||
rlnInstance *rln.RLN,
|
||||
rootTracker *group_manager.MerkleRootTracker,
|
||||
log *zap.Logger,
|
||||
) (*StaticGroupManager, error) {
|
||||
// check the peer's index and the inclusion of user's identity commitment in the group
|
||||
|
@ -37,15 +39,14 @@ func NewStaticGroupManager(
|
|||
group: group,
|
||||
identityCredential: &identityCredential,
|
||||
membershipIndex: index,
|
||||
rln: rlnInstance,
|
||||
rootTracker: rootTracker,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (gm *StaticGroupManager) Start(ctx context.Context, rlnInstance *rln.RLN, rootTracker *group_manager.MerkleRootTracker) error {
|
||||
func (gm *StaticGroupManager) Start(ctx context.Context) error {
|
||||
gm.log.Info("mounting rln-relay in off-chain/static mode")
|
||||
|
||||
gm.rln = rlnInstance
|
||||
gm.rootTracker = rootTracker
|
||||
|
||||
// add members to the Merkle tree
|
||||
|
||||
err := gm.insertMembers(gm.group)
|
||||
|
@ -94,3 +95,7 @@ func (gm *StaticGroupManager) Stop() error {
|
|||
// Do nothing
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gm *StaticGroupManager) IsReady(ctx context.Context) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
|
|
@ -15,21 +15,10 @@ import (
|
|||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// DefaultCredentialsFilename is the suggested default filename for the rln credentials keystore
|
||||
const DefaultCredentialsFilename = "./rlnKeystore.json"
|
||||
|
||||
// DefaultCredentialsPassword is the suggested default password for the rln credentials store
|
||||
const DefaultCredentialsPassword = "password"
|
||||
|
||||
// New creates a new instance of a rln credentials keystore
|
||||
func New(path string, appInfo AppInfo, logger *zap.Logger) (*AppKeystore, error) {
|
||||
logger = logger.Named("rln-keystore")
|
||||
|
||||
if path == "" {
|
||||
logger.Warn("keystore: no credentials path set, using default path", zap.String("path", DefaultCredentialsFilename))
|
||||
path = DefaultCredentialsFilename
|
||||
}
|
||||
|
||||
_, err := os.Stat(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
|
@ -81,10 +70,36 @@ func getKey(treeIndex rln.MembershipIndex, filterMembershipContract MembershipCo
|
|||
}
|
||||
|
||||
// GetMembershipCredentials decrypts and retrieves membership credentials from the keystore applying filters
|
||||
func (k *AppKeystore) GetMembershipCredentials(keystorePassword string, treeIndex rln.MembershipIndex, filterMembershipContract MembershipContractInfo) (*MembershipCredentials, error) {
|
||||
key, err := getKey(treeIndex, filterMembershipContract)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func (k *AppKeystore) GetMembershipCredentials(keystorePassword string, index *rln.MembershipIndex, filterMembershipContract MembershipContractInfo) (*MembershipCredentials, error) {
|
||||
// If there is only one, and index to laod nil, assume 0,
|
||||
// if there is more than one, complain if the index to load is nil
|
||||
|
||||
var key Key
|
||||
var err error
|
||||
|
||||
if len(k.Credentials) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if len(k.Credentials) == 1 {
|
||||
// Only one credential, the tree index does not matter.
|
||||
k.logger.Warn("automatically loading the only credential found on the keystore")
|
||||
for k := range k.Credentials {
|
||||
key = k // Obtain the first c
|
||||
break
|
||||
}
|
||||
} else {
|
||||
treeIndex := uint(0)
|
||||
if index != nil {
|
||||
treeIndex = *index
|
||||
} else {
|
||||
return nil, errors.New("the index of the onchain commitment to use was not specified")
|
||||
}
|
||||
|
||||
key, err = getKey(treeIndex, filterMembershipContract)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
credential, ok := k.Credentials[key]
|
||||
|
@ -108,7 +123,7 @@ func (k *AppKeystore) GetMembershipCredentials(keystorePassword string, treeInde
|
|||
|
||||
// AddMembershipCredentials inserts a membership credential to the keystore matching the application, appIdentifier and version filters.
|
||||
func (k *AppKeystore) AddMembershipCredentials(newCredential MembershipCredentials, password string) error {
|
||||
credentials, err := k.GetMembershipCredentials(password, newCredential.TreeIndex, newCredential.MembershipContractInfo)
|
||||
credentials, err := k.GetMembershipCredentials(password, &newCredential.TreeIndex, newCredential.MembershipContractInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -118,7 +133,7 @@ func (k *AppKeystore) AddMembershipCredentials(newCredential MembershipCredentia
|
|||
return err
|
||||
}
|
||||
|
||||
if credentials != nil {
|
||||
if credentials != nil && credentials.TreeIndex == newCredential.TreeIndex && credentials.MembershipContractInfo.Equals(newCredential.MembershipContractInfo) {
|
||||
return errors.New("credential already present")
|
||||
}
|
||||
|
||||
|
|
120
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/nullifier_log.go
generated
vendored
Normal file
120
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/nullifier_log.go
generated
vendored
Normal file
|
@ -0,0 +1,120 @@
|
|||
package rln
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/waku-org/go-zerokit-rln/rln"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// NullifierLog is the log of nullifiers and Shamir shares of the past messages grouped per epoch
|
||||
type NullifierLog struct {
|
||||
sync.RWMutex
|
||||
|
||||
log *zap.Logger
|
||||
nullifierLog map[rln.Nullifier][]rln.ProofMetadata // Might make sense to replace this map by a shrinkable map due to https://github.com/golang/go/issues/20135.
|
||||
nullifierQueue []rln.Nullifier
|
||||
}
|
||||
|
||||
// NewNullifierLog creates an instance of NullifierLog
|
||||
func NewNullifierLog(ctx context.Context, log *zap.Logger) *NullifierLog {
|
||||
result := &NullifierLog{
|
||||
nullifierLog: make(map[rln.Nullifier][]rln.ProofMetadata),
|
||||
log: log,
|
||||
}
|
||||
|
||||
go result.cleanup(ctx)
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
var errAlreadyExists = errors.New("proof already exists")
|
||||
|
||||
// Insert stores a proof in the nullifier log only if it doesnt exist already
|
||||
func (n *NullifierLog) Insert(proofMD rln.ProofMetadata) error {
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
|
||||
proofs, ok := n.nullifierLog[proofMD.ExternalNullifier]
|
||||
if ok {
|
||||
// check if an identical record exists
|
||||
for _, p := range proofs {
|
||||
if p.Equals(proofMD) {
|
||||
// TODO: slashing logic
|
||||
return errAlreadyExists
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
n.nullifierLog[proofMD.ExternalNullifier] = append(proofs, proofMD)
|
||||
n.nullifierQueue = append(n.nullifierQueue, proofMD.ExternalNullifier)
|
||||
return nil
|
||||
}
|
||||
|
||||
// HasDuplicate returns true if there is another message in the `nullifierLog` with the same
|
||||
// epoch and nullifier as `msg`'s epoch and nullifier but different Shamir secret shares
|
||||
// otherwise, returns false
|
||||
func (n *NullifierLog) HasDuplicate(proofMD rln.ProofMetadata) (bool, error) {
|
||||
n.RLock()
|
||||
defer n.RUnlock()
|
||||
|
||||
proofs, ok := n.nullifierLog[proofMD.ExternalNullifier]
|
||||
if !ok {
|
||||
// epoch does not exist
|
||||
return false, nil
|
||||
}
|
||||
|
||||
for _, p := range proofs {
|
||||
if p.Equals(proofMD) {
|
||||
// there is an identical record, ignore the msg
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// check for a message with the same nullifier but different secret shares
|
||||
matched := false
|
||||
for _, it := range proofs {
|
||||
if bytes.Equal(it.Nullifier[:], proofMD.Nullifier[:]) && (!bytes.Equal(it.ShareX[:], proofMD.ShareX[:]) || !bytes.Equal(it.ShareY[:], proofMD.ShareY[:])) {
|
||||
matched = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return matched, nil
|
||||
}
|
||||
|
||||
// cleanup cleans up the log every time there are more than MaxEpochGap epochs stored in it
|
||||
func (n *NullifierLog) cleanup(ctx context.Context) {
|
||||
t := time.NewTicker(1 * time.Minute) // TODO: tune this
|
||||
defer t.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
|
||||
case <-t.C:
|
||||
func() {
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
|
||||
if int64(len(n.nullifierQueue)) < maxEpochGap {
|
||||
return
|
||||
}
|
||||
|
||||
n.log.Debug("clearing epochs from the nullifier log", zap.Int64("count", maxEpochGap))
|
||||
|
||||
toDelete := n.nullifierQueue[0:maxEpochGap]
|
||||
for _, l := range toDelete {
|
||||
delete(n.nullifierLog, l)
|
||||
}
|
||||
n.nullifierQueue = n.nullifierQueue[maxEpochGap:]
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -1,17 +1,12 @@
|
|||
package rln
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/waku-org/go-waku/logging"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
|
@ -19,48 +14,26 @@ import (
|
|||
"github.com/waku-org/go-waku/waku/v2/timesource"
|
||||
"github.com/waku-org/go-zerokit-rln/rln"
|
||||
"go.uber.org/zap"
|
||||
proto "google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
type GroupManager interface {
|
||||
Start(ctx context.Context, rln *rln.RLN, rootTracker *group_manager.MerkleRootTracker) error
|
||||
IdentityCredentials() (rln.IdentityCredential, error)
|
||||
MembershipIndex() rln.MembershipIndex
|
||||
Stop() error
|
||||
}
|
||||
|
||||
type WakuRLNRelay struct {
|
||||
timesource timesource.Timesource
|
||||
metrics Metrics
|
||||
|
||||
groupManager GroupManager
|
||||
rootTracker *group_manager.MerkleRootTracker
|
||||
group_manager.Details
|
||||
|
||||
RLN *rln.RLN
|
||||
|
||||
// the log of nullifiers and Shamir shares of the past messages grouped per epoch
|
||||
nullifierLogLock sync.RWMutex
|
||||
nullifierLog map[rln.Nullifier][]rln.ProofMetadata
|
||||
nullifierLog *NullifierLog
|
||||
|
||||
log *zap.Logger
|
||||
}
|
||||
|
||||
const rlnDefaultTreePath = "./rln_tree.db"
|
||||
|
||||
func New(
|
||||
groupManager GroupManager,
|
||||
treePath string,
|
||||
timesource timesource.Timesource,
|
||||
reg prometheus.Registerer,
|
||||
log *zap.Logger) (*WakuRLNRelay, error) {
|
||||
|
||||
func GetRLNInstanceAndRootTracker(treePath string) (*rln.RLN, *group_manager.MerkleRootTracker, error) {
|
||||
if treePath == "" {
|
||||
treePath = rlnDefaultTreePath
|
||||
}
|
||||
|
||||
metrics := newMetrics(reg)
|
||||
|
||||
start := time.Now()
|
||||
rlnInstance, err := rln.NewWithConfig(rln.DefaultTreeDepth, &rln.TreeConfig{
|
||||
CacheCapacity: 15000,
|
||||
Mode: rln.HighThroughput,
|
||||
|
@ -69,31 +42,36 @@ func New(
|
|||
Path: treePath,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
metrics.RecordInstanceCreation(time.Since(start))
|
||||
|
||||
rootTracker, err := group_manager.NewMerkleRootTracker(acceptableRootWindowSize, rlnInstance)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
return rlnInstance, rootTracker, nil
|
||||
}
|
||||
func New(
|
||||
Details group_manager.Details,
|
||||
timesource timesource.Timesource,
|
||||
reg prometheus.Registerer,
|
||||
log *zap.Logger) *WakuRLNRelay {
|
||||
|
||||
// create the WakuRLNRelay
|
||||
rlnPeer := &WakuRLNRelay{
|
||||
RLN: rlnInstance,
|
||||
groupManager: groupManager,
|
||||
rootTracker: rootTracker,
|
||||
metrics: metrics,
|
||||
log: log,
|
||||
timesource: timesource,
|
||||
nullifierLog: make(map[rln.MerkleNode][]rln.ProofMetadata),
|
||||
Details: Details,
|
||||
metrics: newMetrics(reg),
|
||||
log: log,
|
||||
timesource: timesource,
|
||||
}
|
||||
|
||||
return rlnPeer, nil
|
||||
return rlnPeer
|
||||
}
|
||||
|
||||
func (rlnRelay *WakuRLNRelay) Start(ctx context.Context) error {
|
||||
err := rlnRelay.groupManager.Start(ctx, rlnRelay.RLN, rlnRelay.rootTracker)
|
||||
rlnRelay.nullifierLog = NewNullifierLog(ctx, rlnRelay.log)
|
||||
|
||||
err := rlnRelay.GroupManager.Start(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -105,66 +83,7 @@ func (rlnRelay *WakuRLNRelay) Start(ctx context.Context) error {
|
|||
|
||||
// Stop will stop any operation or goroutine started while using WakuRLNRelay
|
||||
func (rlnRelay *WakuRLNRelay) Stop() error {
|
||||
return rlnRelay.groupManager.Stop()
|
||||
}
|
||||
|
||||
func (rlnRelay *WakuRLNRelay) HasDuplicate(proofMD rln.ProofMetadata) (bool, error) {
|
||||
// returns true if there is another message in the `nullifierLog` of the `rlnPeer` with the same
|
||||
// epoch and nullifier as `msg`'s epoch and nullifier but different Shamir secret shares
|
||||
// otherwise, returns false
|
||||
|
||||
rlnRelay.nullifierLogLock.RLock()
|
||||
proofs, ok := rlnRelay.nullifierLog[proofMD.ExternalNullifier]
|
||||
rlnRelay.nullifierLogLock.RUnlock()
|
||||
|
||||
// check if the epoch exists
|
||||
if !ok {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
for _, p := range proofs {
|
||||
if p.Equals(proofMD) {
|
||||
// there is an identical record, ignore rhe mag
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// check for a message with the same nullifier but different secret shares
|
||||
matched := false
|
||||
for _, it := range proofs {
|
||||
if bytes.Equal(it.Nullifier[:], proofMD.Nullifier[:]) && (!bytes.Equal(it.ShareX[:], proofMD.ShareX[:]) || !bytes.Equal(it.ShareY[:], proofMD.ShareY[:])) {
|
||||
matched = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return matched, nil
|
||||
}
|
||||
|
||||
func (rlnRelay *WakuRLNRelay) updateLog(proofMD rln.ProofMetadata) (bool, error) {
|
||||
rlnRelay.nullifierLogLock.Lock()
|
||||
defer rlnRelay.nullifierLogLock.Unlock()
|
||||
proofs, ok := rlnRelay.nullifierLog[proofMD.ExternalNullifier]
|
||||
|
||||
// check if the epoch exists
|
||||
if !ok {
|
||||
rlnRelay.nullifierLog[proofMD.ExternalNullifier] = []rln.ProofMetadata{proofMD}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// check if an identical record exists
|
||||
for _, p := range proofs {
|
||||
if p.Equals(proofMD) {
|
||||
// TODO: slashing logic
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// add proofMD to the log
|
||||
proofs = append(proofs, proofMD)
|
||||
rlnRelay.nullifierLog[proofMD.ExternalNullifier] = proofs
|
||||
|
||||
return true, nil
|
||||
return rlnRelay.GroupManager.Stop()
|
||||
}
|
||||
|
||||
// ValidateMessage validates the supplied message based on the waku-rln-relay routing protocol i.e.,
|
||||
|
@ -173,7 +92,6 @@ func (rlnRelay *WakuRLNRelay) updateLog(proofMD rln.ProofMetadata) (bool, error)
|
|||
// the message's does not violate the rate limit
|
||||
// if `optionalTime` is supplied, then the current epoch is calculated based on that, otherwise the current time will be used
|
||||
func (rlnRelay *WakuRLNRelay) ValidateMessage(msg *pb.WakuMessage, optionalTime *time.Time) (messageValidationResult, error) {
|
||||
//
|
||||
if msg == nil {
|
||||
return validationError, errors.New("nil message")
|
||||
}
|
||||
|
@ -214,7 +132,7 @@ func (rlnRelay *WakuRLNRelay) ValidateMessage(msg *pb.WakuMessage, optionalTime
|
|||
return invalidMessage, nil
|
||||
}
|
||||
|
||||
if !(rlnRelay.rootTracker.ContainsRoot(msgProof.MerkleRoot)) {
|
||||
if !(rlnRelay.RootTracker.ContainsRoot(msgProof.MerkleRoot)) {
|
||||
rlnRelay.log.Debug("invalid message: unexpected root", logging.HexBytes("msgRoot", msg.RateLimitProof.MerkleRoot))
|
||||
rlnRelay.metrics.RecordInvalidMessage(invalidRoot)
|
||||
return invalidMessage, nil
|
||||
|
@ -237,7 +155,7 @@ func (rlnRelay *WakuRLNRelay) ValidateMessage(msg *pb.WakuMessage, optionalTime
|
|||
}
|
||||
|
||||
// check if double messaging has happened
|
||||
hasDup, err := rlnRelay.HasDuplicate(proofMD)
|
||||
hasDup, err := rlnRelay.nullifierLog.HasDuplicate(proofMD)
|
||||
if err != nil {
|
||||
rlnRelay.log.Debug("validation error", zap.Error(err))
|
||||
rlnRelay.metrics.RecordError(duplicateCheckErr)
|
||||
|
@ -249,10 +167,7 @@ func (rlnRelay *WakuRLNRelay) ValidateMessage(msg *pb.WakuMessage, optionalTime
|
|||
return spamMessage, nil
|
||||
}
|
||||
|
||||
// insert the message to the log
|
||||
// the result of `updateLog` is discarded because message insertion is guaranteed by the implementation i.e.,
|
||||
// it will never error out
|
||||
_, err = rlnRelay.updateLog(proofMD)
|
||||
err = rlnRelay.nullifierLog.Insert(proofMD)
|
||||
if err != nil {
|
||||
rlnRelay.log.Debug("could not insert proof into log")
|
||||
rlnRelay.metrics.RecordError(logInsertionErr)
|
||||
|
@ -261,7 +176,7 @@ func (rlnRelay *WakuRLNRelay) ValidateMessage(msg *pb.WakuMessage, optionalTime
|
|||
|
||||
rlnRelay.log.Debug("message is valid")
|
||||
|
||||
rootIndex := rlnRelay.rootTracker.IndexOf(msgProof.MerkleRoot)
|
||||
rootIndex := rlnRelay.RootTracker.IndexOf(msgProof.MerkleRoot)
|
||||
rlnRelay.metrics.RecordValidMessages(rootIndex)
|
||||
|
||||
return validMessage, nil
|
||||
|
@ -270,7 +185,7 @@ func (rlnRelay *WakuRLNRelay) ValidateMessage(msg *pb.WakuMessage, optionalTime
|
|||
func (rlnRelay *WakuRLNRelay) verifyProof(msg *pb.WakuMessage, proof *rln.RateLimitProof) (bool, error) {
|
||||
contentTopicBytes := []byte(msg.ContentTopic)
|
||||
input := append(msg.Payload, contentTopicBytes...)
|
||||
return rlnRelay.RLN.Verify(input, *proof, rlnRelay.rootTracker.Roots()...)
|
||||
return rlnRelay.RLN.Verify(input, *proof, rlnRelay.RootTracker.Roots()...)
|
||||
}
|
||||
|
||||
func (rlnRelay *WakuRLNRelay) AppendRLNProof(msg *pb.WakuMessage, senderEpochTime time.Time) error {
|
||||
|
@ -299,64 +214,61 @@ func (rlnRelay *WakuRLNRelay) AppendRLNProof(msg *pb.WakuMessage, senderEpochTim
|
|||
// Validator returns a validator for the waku messages.
|
||||
// The message validation logic is according to https://rfc.vac.dev/spec/17/
|
||||
func (rlnRelay *WakuRLNRelay) Validator(
|
||||
spamHandler SpamHandler) func(ctx context.Context, peerID peer.ID, message *pubsub.Message) bool {
|
||||
return func(ctx context.Context, peerID peer.ID, message *pubsub.Message) bool {
|
||||
rlnRelay.log.Debug("rln-relay topic validator called")
|
||||
spamHandler SpamHandler) func(ctx context.Context, msg *pb.WakuMessage, topic string) bool {
|
||||
return func(ctx context.Context, msg *pb.WakuMessage, topic string) bool {
|
||||
|
||||
hash := msg.Hash(topic)
|
||||
|
||||
log := rlnRelay.log.With(
|
||||
logging.HexBytes("hash", hash),
|
||||
zap.String("pubsubTopic", topic),
|
||||
zap.String("contentTopic", msg.ContentTopic),
|
||||
)
|
||||
|
||||
log.Debug("rln-relay topic validator called")
|
||||
|
||||
rlnRelay.metrics.RecordMessage()
|
||||
|
||||
wakuMessage := &pb.WakuMessage{}
|
||||
if err := proto.Unmarshal(message.Data, wakuMessage); err != nil {
|
||||
rlnRelay.log.Debug("could not unmarshal message")
|
||||
return true
|
||||
}
|
||||
|
||||
// validate the message
|
||||
validationRes, err := rlnRelay.ValidateMessage(wakuMessage, nil)
|
||||
validationRes, err := rlnRelay.ValidateMessage(msg, nil)
|
||||
if err != nil {
|
||||
rlnRelay.log.Debug("validating message", zap.Error(err))
|
||||
log.Debug("validating message", zap.Error(err))
|
||||
return false
|
||||
}
|
||||
|
||||
switch validationRes {
|
||||
case validMessage:
|
||||
rlnRelay.log.Debug("message verified",
|
||||
zap.String("id", hex.EncodeToString([]byte(message.ID))),
|
||||
)
|
||||
log.Debug("message verified")
|
||||
return true
|
||||
case invalidMessage:
|
||||
rlnRelay.log.Debug("message could not be verified",
|
||||
zap.String("id", hex.EncodeToString([]byte(message.ID))),
|
||||
)
|
||||
log.Debug("message could not be verified")
|
||||
return false
|
||||
case spamMessage:
|
||||
rlnRelay.log.Debug("spam message found",
|
||||
zap.String("id", hex.EncodeToString([]byte(message.ID))),
|
||||
)
|
||||
log.Debug("spam message found")
|
||||
|
||||
rlnRelay.metrics.RecordSpam(wakuMessage.ContentTopic)
|
||||
rlnRelay.metrics.RecordSpam(msg.ContentTopic)
|
||||
|
||||
if spamHandler != nil {
|
||||
if err := spamHandler(wakuMessage); err != nil {
|
||||
rlnRelay.log.Error("executing spam handler", zap.Error(err))
|
||||
if err := spamHandler(msg, topic); err != nil {
|
||||
log.Error("executing spam handler", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
default:
|
||||
rlnRelay.log.Debug("unhandled validation result", zap.Int("validationResult", int(validationRes)))
|
||||
log.Debug("unhandled validation result", zap.Int("validationResult", int(validationRes)))
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rlnRelay *WakuRLNRelay) generateProof(input []byte, epoch rln.Epoch) (*pb.RateLimitProof, error) {
|
||||
identityCredentials, err := rlnRelay.groupManager.IdentityCredentials()
|
||||
identityCredentials, err := rlnRelay.GroupManager.IdentityCredentials()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
membershipIndex := rlnRelay.groupManager.MembershipIndex()
|
||||
membershipIndex := rlnRelay.GroupManager.MembershipIndex()
|
||||
|
||||
proof, err := rlnRelay.RLN.GenerateProof(input, identityCredentials, membershipIndex, epoch)
|
||||
if err != nil {
|
||||
|
@ -375,9 +287,14 @@ func (rlnRelay *WakuRLNRelay) generateProof(input []byte, epoch rln.Epoch) (*pb.
|
|||
}
|
||||
|
||||
func (rlnRelay *WakuRLNRelay) IdentityCredential() (rln.IdentityCredential, error) {
|
||||
return rlnRelay.groupManager.IdentityCredentials()
|
||||
return rlnRelay.GroupManager.IdentityCredentials()
|
||||
}
|
||||
|
||||
func (rlnRelay *WakuRLNRelay) MembershipIndex() uint {
|
||||
return rlnRelay.groupManager.MembershipIndex()
|
||||
return rlnRelay.GroupManager.MembershipIndex()
|
||||
}
|
||||
|
||||
// IsReady returns true if the RLN Relay protocol is ready to relay messages
|
||||
func (rlnRelay *WakuRLNRelay) IsReady(ctx context.Context) (bool, error) {
|
||||
return rlnRelay.GroupManager.IsReady(ctx)
|
||||
}
|
||||
|
|
|
@ -5,8 +5,10 @@ import (
|
|||
"errors"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/rln/contracts"
|
||||
)
|
||||
|
@ -26,12 +28,22 @@ type RLNContract struct {
|
|||
DeployedBlockNumber uint64
|
||||
}
|
||||
|
||||
// EthClient is an interface for the ethclient.Client, so that we can pass mock client for testing
|
||||
type EthClient interface {
|
||||
bind.ContractBackend
|
||||
SyncProgress(ctx context.Context) (*ethereum.SyncProgress, error)
|
||||
TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error)
|
||||
BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error)
|
||||
SubscribeNewHead(ctx context.Context, ch chan<- *types.Header) (ethereum.Subscription, error)
|
||||
Close()
|
||||
}
|
||||
|
||||
// Config is a helper struct that contains attributes for interaction with RLN smart contracts
|
||||
type Config struct {
|
||||
configured bool
|
||||
|
||||
ETHClientAddress string
|
||||
ETHClient *ethclient.Client
|
||||
ETHClient EthClient
|
||||
ChainID *big.Int
|
||||
RegistryContract RegistryContract
|
||||
RLNContract RLNContract
|
||||
|
|
|
@ -194,6 +194,7 @@ func (rs RelayShards) BitVector() []byte {
|
|||
return append(result, vec...)
|
||||
}
|
||||
|
||||
// Generate a RelayShards from a byte slice
|
||||
func FromBitVector(buf []byte) (RelayShards, error) {
|
||||
if len(buf) != 130 {
|
||||
return RelayShards{}, errors.New("invalid data: expected 130 bytes")
|
||||
|
@ -229,3 +230,13 @@ func GetShardFromContentTopic(topic ContentTopic, shardCount int) StaticSharding
|
|||
|
||||
return NewStaticShardingPubsubTopic(ClusterIndex, uint16(shard))
|
||||
}
|
||||
|
||||
func GetPubSubTopicFromContentTopic(cTopicString string) (string, error) {
|
||||
cTopic, err := StringToContentTopic(cTopicString)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("%s : %s", err.Error(), cTopicString)
|
||||
}
|
||||
pTopic := GetShardFromContentTopic(cTopic, GenerationZeroShardsCount)
|
||||
|
||||
return pTopic.String(), nil
|
||||
}
|
||||
|
|
18
vendor/github.com/waku-org/go-waku/waku/v2/protocol/store/waku_store_client.go
generated
vendored
18
vendor/github.com/waku-org/go-waku/waku/v2/protocol/store/waku_store_client.go
generated
vendored
|
@ -31,7 +31,7 @@ type Result struct {
|
|||
store Store
|
||||
query *pb.HistoryQuery
|
||||
cursor *pb.Index
|
||||
peerId peer.ID
|
||||
peerID peer.ID
|
||||
}
|
||||
|
||||
func (r *Result) Cursor() *pb.Index {
|
||||
|
@ -43,7 +43,7 @@ func (r *Result) IsComplete() bool {
|
|||
}
|
||||
|
||||
func (r *Result) PeerID() peer.ID {
|
||||
return r.peerId
|
||||
return r.peerID
|
||||
}
|
||||
|
||||
func (r *Result) Query() *pb.HistoryQuery {
|
||||
|
@ -111,7 +111,7 @@ func WithAutomaticPeerSelection(fromThesePeers ...peer.ID) HistoryRequestOption
|
|||
if params.s.pm == nil {
|
||||
p, err = utils.SelectPeer(params.s.h, StoreID_v20beta4, fromThesePeers, params.s.log)
|
||||
} else {
|
||||
p, err = params.s.pm.SelectPeer(StoreID_v20beta4, fromThesePeers, params.s.log)
|
||||
p, err = params.s.pm.SelectPeer(StoreID_v20beta4, "", fromThesePeers...)
|
||||
}
|
||||
if err == nil {
|
||||
params.selectedPeer = p
|
||||
|
@ -148,7 +148,7 @@ func WithRequestID(requestID []byte) HistoryRequestOption {
|
|||
// when creating a store request
|
||||
func WithAutomaticRequestID() HistoryRequestOption {
|
||||
return func(params *HistoryRequestParameters) {
|
||||
params.requestID = protocol.GenerateRequestId()
|
||||
params.requestID = protocol.GenerateRequestID()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -282,7 +282,7 @@ func (store *WakuStore) Query(ctx context.Context, query Query, opts ...HistoryR
|
|||
}
|
||||
|
||||
if len(params.requestID) == 0 {
|
||||
return nil, ErrInvalidId
|
||||
return nil, ErrInvalidID
|
||||
}
|
||||
|
||||
if params.cursor != nil {
|
||||
|
@ -321,7 +321,7 @@ func (store *WakuStore) Query(ctx context.Context, query Query, opts ...HistoryR
|
|||
store: store,
|
||||
Messages: response.Messages,
|
||||
query: q,
|
||||
peerId: params.selectedPeer,
|
||||
peerID: params.selectedPeer,
|
||||
}
|
||||
|
||||
if response.PagingInfo != nil {
|
||||
|
@ -379,7 +379,7 @@ func (store *WakuStore) Next(ctx context.Context, r *Result) (*Result, error) {
|
|||
Messages: []*wpb.WakuMessage{},
|
||||
cursor: nil,
|
||||
query: r.query,
|
||||
peerId: r.PeerID(),
|
||||
peerID: r.PeerID(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -400,7 +400,7 @@ func (store *WakuStore) Next(ctx context.Context, r *Result) (*Result, error) {
|
|||
},
|
||||
}
|
||||
|
||||
response, err := store.queryFrom(ctx, q, r.PeerID(), protocol.GenerateRequestId())
|
||||
response, err := store.queryFrom(ctx, q, r.PeerID(), protocol.GenerateRequestID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -414,7 +414,7 @@ func (store *WakuStore) Next(ctx context.Context, r *Result) (*Result, error) {
|
|||
store: store,
|
||||
Messages: response.Messages,
|
||||
query: q,
|
||||
peerId: r.PeerID(),
|
||||
peerID: r.PeerID(),
|
||||
}
|
||||
|
||||
if response.PagingInfo != nil {
|
||||
|
|
|
@ -32,8 +32,8 @@ var (
|
|||
// that could be used to retrieve message history
|
||||
ErrNoPeersAvailable = errors.New("no suitable remote peers")
|
||||
|
||||
// ErrInvalidId is returned when no RequestID is given
|
||||
ErrInvalidId = errors.New("invalid request id")
|
||||
// ErrInvalidID is returned when no RequestID is given
|
||||
ErrInvalidID = errors.New("invalid request id")
|
||||
|
||||
// ErrFailedToResumeHistory is returned when the node attempted to retrieve historic
|
||||
// messages to fill its own message history but for some reason it failed
|
||||
|
|
4
vendor/github.com/waku-org/go-waku/waku/v2/protocol/store/waku_store_protocol.go
generated
vendored
4
vendor/github.com/waku-org/go-waku/waku/v2/protocol/store/waku_store_protocol.go
generated
vendored
|
@ -243,7 +243,7 @@ func (store *WakuStore) queryLoop(ctx context.Context, query *pb.HistoryQuery, c
|
|||
for _, peer := range candidateList {
|
||||
func() {
|
||||
defer queryWg.Done()
|
||||
result, err := store.queryFrom(ctx, query, peer, protocol.GenerateRequestId())
|
||||
result, err := store.queryFrom(ctx, query, peer, protocol.GenerateRequestID())
|
||||
if err == nil {
|
||||
resultChan <- result
|
||||
return
|
||||
|
@ -298,7 +298,7 @@ func (store *WakuStore) Resume(ctx context.Context, pubsubTopic string, peerList
|
|||
return 0, err
|
||||
}
|
||||
|
||||
var offset int64 = int64(20 * time.Nanosecond)
|
||||
offset := int64(20 * time.Nanosecond)
|
||||
currentTime := store.timesource.Now().UnixNano() + offset
|
||||
lastSeenTime = max(lastSeenTime-offset, 0)
|
||||
|
||||
|
|
|
@ -1,13 +1,15 @@
|
|||
package filter
|
||||
package subscription
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
type SubscriptionDetails struct {
|
||||
|
@ -19,63 +21,60 @@ type SubscriptionDetails struct {
|
|||
once sync.Once
|
||||
|
||||
PeerID peer.ID
|
||||
PubsubTopic string
|
||||
ContentTopics map[string]struct{}
|
||||
ContentFilter protocol.ContentFilter
|
||||
C chan *protocol.Envelope
|
||||
}
|
||||
|
||||
// Map of SubscriptionDetails.ID to subscriptions
|
||||
type SubscriptionSet map[string]*SubscriptionDetails
|
||||
|
||||
type PeerSubscription struct {
|
||||
peerID peer.ID
|
||||
subscriptionsPerTopic map[string]SubscriptionSet
|
||||
PeerID peer.ID
|
||||
SubsPerPubsubTopic map[string]SubscriptionSet
|
||||
}
|
||||
|
||||
type SubscriptionsMap struct {
|
||||
sync.RWMutex
|
||||
logger *zap.Logger
|
||||
items map[peer.ID]*PeerSubscription
|
||||
Items map[peer.ID]*PeerSubscription
|
||||
}
|
||||
|
||||
var ErrNotFound = errors.New("not found")
|
||||
|
||||
func NewSubscriptionMap(logger *zap.Logger) *SubscriptionsMap {
|
||||
return &SubscriptionsMap{
|
||||
logger: logger.Named("subscription-map"),
|
||||
items: make(map[peer.ID]*PeerSubscription),
|
||||
Items: make(map[peer.ID]*PeerSubscription),
|
||||
}
|
||||
}
|
||||
|
||||
func (sub *SubscriptionsMap) NewSubscription(peerID peer.ID, topic string, contentTopics []string) *SubscriptionDetails {
|
||||
func (sub *SubscriptionsMap) NewSubscription(peerID peer.ID, cf protocol.ContentFilter) *SubscriptionDetails {
|
||||
sub.Lock()
|
||||
defer sub.Unlock()
|
||||
|
||||
peerSubscription, ok := sub.items[peerID]
|
||||
peerSubscription, ok := sub.Items[peerID]
|
||||
if !ok {
|
||||
peerSubscription = &PeerSubscription{
|
||||
peerID: peerID,
|
||||
subscriptionsPerTopic: make(map[string]SubscriptionSet),
|
||||
PeerID: peerID,
|
||||
SubsPerPubsubTopic: make(map[string]SubscriptionSet),
|
||||
}
|
||||
sub.items[peerID] = peerSubscription
|
||||
sub.Items[peerID] = peerSubscription
|
||||
}
|
||||
|
||||
_, ok = peerSubscription.subscriptionsPerTopic[topic]
|
||||
_, ok = peerSubscription.SubsPerPubsubTopic[cf.PubsubTopic]
|
||||
if !ok {
|
||||
peerSubscription.subscriptionsPerTopic[topic] = make(SubscriptionSet)
|
||||
peerSubscription.SubsPerPubsubTopic[cf.PubsubTopic] = make(SubscriptionSet)
|
||||
}
|
||||
|
||||
details := &SubscriptionDetails{
|
||||
ID: uuid.NewString(),
|
||||
mapRef: sub,
|
||||
PeerID: peerID,
|
||||
PubsubTopic: topic,
|
||||
C: make(chan *protocol.Envelope, 1024),
|
||||
ContentTopics: make(map[string]struct{}),
|
||||
ContentFilter: protocol.ContentFilter{PubsubTopic: cf.PubsubTopic, ContentTopics: maps.Clone(cf.ContentTopics)},
|
||||
}
|
||||
|
||||
for _, ct := range contentTopics {
|
||||
details.ContentTopics[ct] = struct{}{}
|
||||
}
|
||||
|
||||
sub.items[peerID].subscriptionsPerTopic[topic][details.ID] = details
|
||||
sub.Items[peerID].SubsPerPubsubTopic[cf.PubsubTopic][details.ID] = details
|
||||
|
||||
return details
|
||||
}
|
||||
|
@ -84,31 +83,32 @@ func (sub *SubscriptionsMap) IsSubscribedTo(peerID peer.ID) bool {
|
|||
sub.RLock()
|
||||
defer sub.RUnlock()
|
||||
|
||||
_, ok := sub.items[peerID]
|
||||
_, ok := sub.Items[peerID]
|
||||
return ok
|
||||
}
|
||||
|
||||
func (sub *SubscriptionsMap) Has(peerID peer.ID, topic string, contentTopics ...string) bool {
|
||||
// Check if we have subscriptions for all (pubsubTopic, contentTopics[i]) pairs provided
|
||||
func (sub *SubscriptionsMap) Has(peerID peer.ID, cf protocol.ContentFilter) bool {
|
||||
sub.RLock()
|
||||
defer sub.RUnlock()
|
||||
|
||||
// Check if peer exits
|
||||
peerSubscription, ok := sub.items[peerID]
|
||||
peerSubscription, ok := sub.Items[peerID]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
//TODO: Handle pubsubTopic as null
|
||||
// Check if pubsub topic exists
|
||||
subscriptions, ok := peerSubscription.subscriptionsPerTopic[topic]
|
||||
subscriptions, ok := peerSubscription.SubsPerPubsubTopic[cf.PubsubTopic]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check if the content topic exists within the list of subscriptions for this peer
|
||||
for _, ct := range contentTopics {
|
||||
for _, ct := range cf.ContentTopicsList() {
|
||||
found := false
|
||||
for _, subscription := range subscriptions {
|
||||
_, exists := subscription.ContentTopics[ct]
|
||||
_, exists := subscription.ContentFilter.ContentTopics[ct]
|
||||
if exists {
|
||||
found = true
|
||||
break
|
||||
|
@ -121,17 +121,16 @@ func (sub *SubscriptionsMap) Has(peerID peer.ID, topic string, contentTopics ...
|
|||
|
||||
return true
|
||||
}
|
||||
|
||||
func (sub *SubscriptionsMap) Delete(subscription *SubscriptionDetails) error {
|
||||
sub.Lock()
|
||||
defer sub.Unlock()
|
||||
|
||||
peerSubscription, ok := sub.items[subscription.PeerID]
|
||||
peerSubscription, ok := sub.Items[subscription.PeerID]
|
||||
if !ok {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
delete(peerSubscription.subscriptionsPerTopic[subscription.PubsubTopic], subscription.ID)
|
||||
delete(peerSubscription.SubsPerPubsubTopic[subscription.ContentFilter.PubsubTopic], subscription.ID)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -141,7 +140,7 @@ func (s *SubscriptionDetails) Add(contentTopics ...string) {
|
|||
defer s.Unlock()
|
||||
|
||||
for _, ct := range contentTopics {
|
||||
s.ContentTopics[ct] = struct{}{}
|
||||
s.ContentFilter.ContentTopics[ct] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -150,11 +149,11 @@ func (s *SubscriptionDetails) Remove(contentTopics ...string) {
|
|||
defer s.Unlock()
|
||||
|
||||
for _, ct := range contentTopics {
|
||||
delete(s.ContentTopics, ct)
|
||||
delete(s.ContentFilter.ContentTopics, ct)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SubscriptionDetails) closeC() {
|
||||
func (s *SubscriptionDetails) CloseC() {
|
||||
s.once.Do(func() {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
@ -165,7 +164,7 @@ func (s *SubscriptionDetails) closeC() {
|
|||
}
|
||||
|
||||
func (s *SubscriptionDetails) Close() error {
|
||||
s.closeC()
|
||||
s.CloseC()
|
||||
return s.mapRef.Delete(s)
|
||||
}
|
||||
|
||||
|
@ -178,28 +177,23 @@ func (s *SubscriptionDetails) Clone() *SubscriptionDetails {
|
|||
mapRef: s.mapRef,
|
||||
Closed: false,
|
||||
PeerID: s.PeerID,
|
||||
PubsubTopic: s.PubsubTopic,
|
||||
ContentTopics: make(map[string]struct{}),
|
||||
ContentFilter: protocol.ContentFilter{PubsubTopic: s.ContentFilter.PubsubTopic, ContentTopics: maps.Clone(s.ContentFilter.ContentTopics)},
|
||||
C: make(chan *protocol.Envelope),
|
||||
}
|
||||
|
||||
for k := range s.ContentTopics {
|
||||
result.ContentTopics[k] = struct{}{}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (sub *SubscriptionsMap) clear() {
|
||||
for _, peerSubscription := range sub.items {
|
||||
for _, subscriptionSet := range peerSubscription.subscriptionsPerTopic {
|
||||
for _, peerSubscription := range sub.Items {
|
||||
for _, subscriptionSet := range peerSubscription.SubsPerPubsubTopic {
|
||||
for _, subscription := range subscriptionSet {
|
||||
subscription.closeC()
|
||||
subscription.CloseC()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sub.items = make(map[peer.ID]*PeerSubscription)
|
||||
sub.Items = make(map[peer.ID]*PeerSubscription)
|
||||
}
|
||||
|
||||
func (sub *SubscriptionsMap) Clear() {
|
||||
|
@ -212,7 +206,7 @@ func (sub *SubscriptionsMap) Notify(peerID peer.ID, envelope *protocol.Envelope)
|
|||
sub.RLock()
|
||||
defer sub.RUnlock()
|
||||
|
||||
subscriptions, ok := sub.items[peerID].subscriptionsPerTopic[envelope.PubsubTopic()]
|
||||
subscriptions, ok := sub.Items[peerID].SubsPerPubsubTopic[envelope.PubsubTopic()]
|
||||
if ok {
|
||||
iterateSubscriptionSet(sub.logger, subscriptions, envelope)
|
||||
}
|
||||
|
@ -224,7 +218,7 @@ func iterateSubscriptionSet(logger *zap.Logger, subscriptions SubscriptionSet, e
|
|||
subscription.RLock()
|
||||
defer subscription.RUnlock()
|
||||
|
||||
_, ok := subscription.ContentTopics[envelope.Message().ContentTopic]
|
||||
_, ok := subscription.ContentFilter.ContentTopics[envelope.Message().ContentTopic]
|
||||
if !ok { // only send the msg to subscriptions that have matching contentTopic
|
||||
return
|
||||
}
|
||||
|
@ -249,10 +243,10 @@ func (s *SubscriptionDetails) MarshalJSON() ([]byte, error) {
|
|||
|
||||
result := resultType{
|
||||
PeerID: s.PeerID.Pretty(),
|
||||
PubsubTopic: s.PubsubTopic,
|
||||
PubsubTopic: s.ContentFilter.PubsubTopic,
|
||||
}
|
||||
|
||||
for c := range s.ContentTopics {
|
||||
for c := range s.ContentFilter.ContentTopics {
|
||||
result.ContentTopics = append(result.ContentTopics, c)
|
||||
}
|
||||
|
|
@ -6,6 +6,8 @@ import (
|
|||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
)
|
||||
|
||||
const GossipSubOptimalFullMeshSize = 6
|
||||
|
||||
// FulltextMatch is the default matching function used for checking if a peer
|
||||
// supports a protocol or not
|
||||
func FulltextMatch(expectedProtocol string) func(string) bool {
|
||||
|
|
|
@ -36,7 +36,7 @@ type DB struct {
|
|||
cancel func()
|
||||
}
|
||||
|
||||
func NewDB(ctx context.Context, db *sql.DB, logger *zap.Logger) *DB {
|
||||
func NewDB(db *sql.DB, logger *zap.Logger) *DB {
|
||||
rdb := &DB{
|
||||
db: db,
|
||||
logger: logger.Named("rendezvous/db"),
|
||||
|
|
|
@ -2,10 +2,8 @@ package rendezvous
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
|
@ -32,9 +30,8 @@ type Rendezvous struct {
|
|||
|
||||
peerConnector PeerConnector
|
||||
|
||||
log *zap.Logger
|
||||
wg sync.WaitGroup
|
||||
cancel context.CancelFunc
|
||||
log *zap.Logger
|
||||
*peermanager.CommonDiscoveryService
|
||||
}
|
||||
|
||||
// PeerConnector will subscribe to a channel containing the information for all peers found by this discovery protocol
|
||||
|
@ -46,9 +43,10 @@ type PeerConnector interface {
|
|||
func NewRendezvous(db *DB, peerConnector PeerConnector, log *zap.Logger) *Rendezvous {
|
||||
logger := log.Named("rendezvous")
|
||||
return &Rendezvous{
|
||||
db: db,
|
||||
peerConnector: peerConnector,
|
||||
log: logger,
|
||||
db: db,
|
||||
peerConnector: peerConnector,
|
||||
log: logger,
|
||||
CommonDiscoveryService: peermanager.NewCommonDiscoveryService(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -58,17 +56,17 @@ func (r *Rendezvous) SetHost(h host.Host) {
|
|||
}
|
||||
|
||||
func (r *Rendezvous) Start(ctx context.Context) error {
|
||||
if r.cancel != nil {
|
||||
return errors.New("already started")
|
||||
return r.CommonDiscoveryService.Start(ctx, r.start)
|
||||
}
|
||||
|
||||
func (r *Rendezvous) start() error {
|
||||
if r.db != nil {
|
||||
if err := r.db.Start(r.Context()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
r.cancel = cancel
|
||||
|
||||
err := r.db.Start(ctx)
|
||||
if err != nil {
|
||||
cancel()
|
||||
return err
|
||||
if r.peerConnector != nil {
|
||||
r.peerConnector.Subscribe(r.Context(), r.GetListeningChan())
|
||||
}
|
||||
|
||||
r.rendezvousSvc = rvs.NewRendezvousService(r.host, r.db)
|
||||
|
@ -105,18 +103,15 @@ func (r *Rendezvous) DiscoverWithNamespace(ctx context.Context, namespace string
|
|||
if len(addrInfo) != 0 {
|
||||
rp.SetSuccess(cookie)
|
||||
|
||||
peerCh := make(chan peermanager.PeerData)
|
||||
defer close(peerCh)
|
||||
r.peerConnector.Subscribe(ctx, peerCh)
|
||||
for _, p := range addrInfo {
|
||||
peer := peermanager.PeerData{
|
||||
Origin: peerstore.Rendezvous,
|
||||
AddrInfo: p,
|
||||
Origin: peerstore.Rendezvous,
|
||||
AddrInfo: p,
|
||||
PubSubTopics: []string{namespace},
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
if !r.PushToChan(peer) {
|
||||
r.log.Error("could push to closed channel/context completed")
|
||||
return
|
||||
case peerCh <- peer:
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -161,9 +156,9 @@ func (r *Rendezvous) RegisterRelayShards(ctx context.Context, rs protocol.RelayS
|
|||
// RegisterWithNamespace registers the node in the rendezvous point by using an specific namespace (usually a pubsub topic)
|
||||
func (r *Rendezvous) RegisterWithNamespace(ctx context.Context, namespace string, rendezvousPoints []*RendezvousPoint) {
|
||||
for _, m := range rendezvousPoints {
|
||||
r.wg.Add(1)
|
||||
r.WaitGroup().Add(1)
|
||||
go func(m *RendezvousPoint) {
|
||||
r.wg.Done()
|
||||
r.WaitGroup().Done()
|
||||
|
||||
rendezvousClient := rvs.NewRendezvousClient(r.host, m.id)
|
||||
retries := 0
|
||||
|
@ -186,14 +181,10 @@ func (r *Rendezvous) RegisterWithNamespace(ctx context.Context, namespace string
|
|||
}
|
||||
|
||||
func (r *Rendezvous) Stop() {
|
||||
if r.cancel == nil {
|
||||
return
|
||||
}
|
||||
|
||||
r.cancel()
|
||||
r.wg.Wait()
|
||||
r.host.RemoveStreamHandler(rvs.RendezvousProto)
|
||||
r.rendezvousSvc = nil
|
||||
r.CommonDiscoveryService.Stop(func() {
|
||||
r.host.RemoveStreamHandler(rvs.RendezvousProto)
|
||||
r.rendezvousSvc = nil
|
||||
})
|
||||
}
|
||||
|
||||
// ShardToNamespace translates a cluster and shard index into a rendezvous namespace
|
||||
|
|
|
@ -113,6 +113,7 @@ func computeOffset(timeQuery ntpQuery, servers []string, allowedFailures int) (t
|
|||
return offsets[mid], nil
|
||||
}
|
||||
|
||||
// NewNTPTimesource creates a timesource that uses NTP
|
||||
func NewNTPTimesource(ntpServers []string, log *zap.Logger) *NTPTimeSource {
|
||||
return &NTPTimeSource{
|
||||
servers: ntpServers,
|
||||
|
|
|
@ -16,7 +16,7 @@ func EcdsaPubKeyToSecp256k1PublicKey(pubKey *ecdsa.PublicKey) *crypto.Secp256k1P
|
|||
return (*crypto.Secp256k1PublicKey)(btcec.NewPublicKey(xFieldVal, yFieldVal))
|
||||
}
|
||||
|
||||
// EcdsaPubKeyToSecp256k1PublicKey converts an `ecdsa.PrivateKey` into a libp2p `crypto.Secp256k1PrivateKey“
|
||||
// EcdsaPrivKeyToSecp256k1PrivKey converts an `ecdsa.PrivateKey` into a libp2p `crypto.Secp256k1PrivateKey“
|
||||
func EcdsaPrivKeyToSecp256k1PrivKey(privKey *ecdsa.PrivateKey) *crypto.Secp256k1PrivateKey {
|
||||
privK, _ := btcec.PrivKeyFromBytes(privKey.D.Bytes())
|
||||
return (*crypto.Secp256k1PrivateKey)(privK)
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var log *zap.Logger = nil
|
||||
var log *zap.Logger
|
||||
|
||||
// Logger creates a zap.Logger with some reasonable defaults
|
||||
func Logger() *zap.Logger {
|
||||
|
|
|
@ -12,7 +12,6 @@ import (
|
|||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/libp2p/go-libp2p/p2p/protocol/ping"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/waku-org/go-waku/logging"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -20,6 +19,7 @@ import (
|
|||
// some protocol
|
||||
var ErrNoPeersAvailable = errors.New("no suitable peers found")
|
||||
|
||||
// GetPeerID is used to extract the peerID from a multiaddress
|
||||
func GetPeerID(m multiaddr.Multiaddr) (peer.ID, error) {
|
||||
peerIDStr, err := m.ValueForProtocol(multiaddr.P_P2P)
|
||||
if err != nil {
|
||||
|
@ -61,7 +61,6 @@ func SelectRandomPeer(peers peer.IDSlice, log *zap.Logger) (peer.ID, error) {
|
|||
if len(peers) >= 1 {
|
||||
peerID := peers[rand.Intn(len(peers))]
|
||||
// TODO: proper heuristic here that compares peer scores and selects "best" one. For now a random peer for the given protocol is returned
|
||||
log.Info("Got random peer from peerstore", logging.HostID("peer", peerID))
|
||||
return peerID, nil // nolint: gosec
|
||||
}
|
||||
|
||||
|
@ -72,7 +71,7 @@ func SelectRandomPeer(peers peer.IDSlice, log *zap.Logger) (peer.ID, error) {
|
|||
// Note: Use this method only if WakuNode is not being initialized, otherwise use peermanager.SelectPeer.
|
||||
// If a list of specific peers is passed, the peer will be chosen from that list assuming
|
||||
// it supports the chosen protocol, otherwise it will chose a peer from the node peerstore
|
||||
func SelectPeer(host host.Host, protocolId protocol.ID, specificPeers []peer.ID, log *zap.Logger) (peer.ID, error) {
|
||||
func SelectPeer(host host.Host, protocolID protocol.ID, specificPeers []peer.ID, log *zap.Logger) (peer.ID, error) {
|
||||
// @TODO We need to be more strategic about which peers we dial. Right now we just set one on the service.
|
||||
// Ideally depending on the query and our set of peers we take a subset of ideal peers.
|
||||
// This will require us to check for various factors such as:
|
||||
|
@ -80,7 +79,7 @@ func SelectPeer(host host.Host, protocolId protocol.ID, specificPeers []peer.ID,
|
|||
// - latency?
|
||||
// - default store peer?
|
||||
|
||||
peers, err := FilterPeersByProto(host, specificPeers, protocolId)
|
||||
peers, err := FilterPeersByProto(host, specificPeers, protocolID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -96,7 +95,7 @@ type pingResult struct {
|
|||
// SelectPeerWithLowestRTT will select a peer that supports a specific protocol with the lowest reply time
|
||||
// If a list of specific peers is passed, the peer will be chosen from that list assuming
|
||||
// it supports the chosen protocol, otherwise it will chose a peer from the node peerstore
|
||||
func SelectPeerWithLowestRTT(ctx context.Context, host host.Host, protocolId protocol.ID, specificPeers []peer.ID, log *zap.Logger) (peer.ID, error) {
|
||||
func SelectPeerWithLowestRTT(ctx context.Context, host host.Host, protocolID protocol.ID, specificPeers []peer.ID, _ *zap.Logger) (peer.ID, error) {
|
||||
var peers peer.IDSlice
|
||||
|
||||
peerSet := specificPeers
|
||||
|
@ -105,7 +104,7 @@ func SelectPeerWithLowestRTT(ctx context.Context, host host.Host, protocolId pro
|
|||
}
|
||||
|
||||
for _, peer := range peerSet {
|
||||
protocols, err := host.Peerstore().SupportsProtocols(peer, protocolId)
|
||||
protocols, err := host.Peerstore().SupportsProtocols(peer, protocolID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@ type Timesource interface {
|
|||
func GetUnixEpoch(timesource ...Timesource) int64 {
|
||||
if len(timesource) != 0 {
|
||||
return GetUnixEpochFrom(timesource[0].Now())
|
||||
} else {
|
||||
return GetUnixEpochFrom(time.Now())
|
||||
}
|
||||
|
||||
return GetUnixEpochFrom(time.Now())
|
||||
}
|
||||
|
|
|
@ -42,6 +42,8 @@ bool set_leaf(struct RLN *ctx, uintptr_t index, const struct Buffer *input_buffe
|
|||
|
||||
bool get_leaf(struct RLN *ctx, uintptr_t index, struct Buffer *output_buffer);
|
||||
|
||||
uintptr_t leaves_set(struct RLN *ctx);
|
||||
|
||||
bool set_next_leaf(struct RLN *ctx, const struct Buffer *input_buffer);
|
||||
|
||||
bool set_leaves_from(struct RLN *ctx, uintptr_t index, const struct Buffer *input_buffer);
|
||||
|
|
|
@ -1,7 +1,12 @@
|
|||
package rln
|
||||
|
||||
import (
|
||||
_ "github.com/waku-org/go-zerokit-rln-apple/libs/aarch64-apple-darwin"
|
||||
_ "github.com/waku-org/go-zerokit-rln-apple/libs/x86_64-apple-darwin"
|
||||
)
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS:-lrln -ldl -lm
|
||||
#cgo LDFLAGS: -lrln -ldl -lm
|
||||
#cgo darwin,386,!ios LDFLAGS:-L${SRCDIR}/../libs/i686-apple-darwin
|
||||
#cgo darwin,arm64,!ios LDFLAGS:-L${SRCDIR}/../libs/aarch64-apple-darwin
|
||||
#cgo darwin,amd64,!ios LDFLAGS:-L${SRCDIR}/../libs/x86_64-apple-darwin
|
||||
|
|
|
@ -244,3 +244,7 @@ func (r *RLN) GetLeaf(index uint) ([]byte, error) {
|
|||
|
||||
return C.GoBytes(unsafe.Pointer(out.ptr), C.int(out.len)), nil
|
||||
}
|
||||
|
||||
func (r *RLN) LeavesSet() uint {
|
||||
return uint(C.leaves_set(r.ptr))
|
||||
}
|
||||
|
|
|
@ -42,6 +42,8 @@ bool set_leaf(struct RLN *ctx, uintptr_t index, const struct Buffer *input_buffe
|
|||
|
||||
bool get_leaf(struct RLN *ctx, uintptr_t index, struct Buffer *output_buffer);
|
||||
|
||||
uintptr_t leaves_set(struct RLN *ctx);
|
||||
|
||||
bool set_next_leaf(struct RLN *ctx, const struct Buffer *input_buffer);
|
||||
|
||||
bool set_leaves_from(struct RLN *ctx, uintptr_t index, const struct Buffer *input_buffer);
|
||||
|
|
|
@ -1,7 +1,17 @@
|
|||
package rln
|
||||
|
||||
import (
|
||||
_ "github.com/waku-org/go-zerokit-rln-arm/libs/aarch64-linux-android"
|
||||
_ "github.com/waku-org/go-zerokit-rln-arm/libs/aarch64-unknown-linux-gnu"
|
||||
_ "github.com/waku-org/go-zerokit-rln-arm/libs/arm-linux-androideabi"
|
||||
_ "github.com/waku-org/go-zerokit-rln-arm/libs/arm-unknown-linux-gnueabi"
|
||||
_ "github.com/waku-org/go-zerokit-rln-arm/libs/arm-unknown-linux-gnueabihf"
|
||||
_ "github.com/waku-org/go-zerokit-rln-arm/libs/armv7-linux-androideabi"
|
||||
_ "github.com/waku-org/go-zerokit-rln-arm/libs/armv7a-linux-androideabi"
|
||||
)
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS:-lrln -ldl -lm
|
||||
#cgo LDFLAGS: -lrln -ldl -lm
|
||||
#cgo linux,arm LDFLAGS:-L${SRCDIR}/../libs/armv7-linux-androideabi
|
||||
#cgo linux,arm64 LDFLAGS:-L${SRCDIR}/../libs/aarch64-unknown-linux-gnu
|
||||
|
||||
|
|
|
@ -244,3 +244,7 @@ func (r *RLN) GetLeaf(index uint) ([]byte, error) {
|
|||
|
||||
return C.GoBytes(unsafe.Pointer(out.ptr), C.int(out.len)), nil
|
||||
}
|
||||
|
||||
func (r *RLN) LeavesSet() uint {
|
||||
return uint(C.leaves_set(r.ptr))
|
||||
}
|
||||
|
|
|
@ -42,6 +42,8 @@ bool set_leaf(struct RLN *ctx, uintptr_t index, const struct Buffer *input_buffe
|
|||
|
||||
bool get_leaf(struct RLN *ctx, uintptr_t index, struct Buffer *output_buffer);
|
||||
|
||||
uintptr_t leaves_set(struct RLN *ctx);
|
||||
|
||||
bool set_next_leaf(struct RLN *ctx, const struct Buffer *input_buffer);
|
||||
|
||||
bool set_leaves_from(struct RLN *ctx, uintptr_t index, const struct Buffer *input_buffer);
|
||||
|
|
|
@ -1,7 +1,13 @@
|
|||
package rln
|
||||
|
||||
import (
|
||||
_ "github.com/waku-org/go-zerokit-rln-x86_64/libs/x86_64-pc-windows-gnu"
|
||||
_ "github.com/waku-org/go-zerokit-rln-x86_64/libs/x86_64-unknown-linux-gnu"
|
||||
_ "github.com/waku-org/go-zerokit-rln-x86_64/libs/x86_64-unknown-linux-musl"
|
||||
)
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS:-lrln -ldl -lm
|
||||
#cgo LDFLAGS: -lrln -ldl -lm
|
||||
#cgo linux,amd64,musl,!android LDFLAGS:-L${SRCDIR}/../libs/x86_64-unknown-linux-musl
|
||||
#cgo linux,amd64,!musl,!android LDFLAGS:-L${SRCDIR}/../libs/x86_64-unknown-linux-gnu
|
||||
#cgo windows,amd64 LDFLAGS:-L${SRCDIR}/../libs/x86_64-pc-windows-gnu -lrln -lm -lws2_32 -luserenv
|
||||
|
|
|
@ -244,3 +244,7 @@ func (r *RLN) GetLeaf(index uint) ([]byte, error) {
|
|||
|
||||
return C.GoBytes(unsafe.Pointer(out.ptr), C.int(out.len)), nil
|
||||
}
|
||||
|
||||
func (r *RLN) LeavesSet() uint {
|
||||
return uint(C.leaves_set(r.ptr))
|
||||
}
|
||||
|
|
|
@ -114,3 +114,7 @@ func (i RLNWrapper) GetMetadata() ([]byte, error) {
|
|||
func (i RLNWrapper) Flush() bool {
|
||||
return i.ffi.Flush()
|
||||
}
|
||||
|
||||
func (i RLNWrapper) LeavesSet() uint {
|
||||
return i.ffi.LeavesSet()
|
||||
}
|
||||
|
|
|
@ -113,3 +113,7 @@ func (i RLNWrapper) GetMetadata() ([]byte, error) {
|
|||
func (i RLNWrapper) Flush() bool {
|
||||
return i.ffi.Flush()
|
||||
}
|
||||
|
||||
func (i RLNWrapper) LeavesSet() uint {
|
||||
return i.ffi.LeavesSet()
|
||||
}
|
||||
|
|
|
@ -114,3 +114,7 @@ func (i RLNWrapper) GetMetadata() ([]byte, error) {
|
|||
func (i RLNWrapper) Flush() bool {
|
||||
return i.ffi.Flush()
|
||||
}
|
||||
|
||||
func (i RLNWrapper) LeavesSet() uint {
|
||||
return i.ffi.LeavesSet()
|
||||
}
|
||||
|
|
|
@ -484,3 +484,8 @@ func (r *RLN) Flush() error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// LeavesSet indicates how many elements have been inserted in the merkle tree
|
||||
func (r *RLN) LeavesSet() uint {
|
||||
return r.w.LeavesSet()
|
||||
}
|
||||
|
|
|
@ -0,0 +1,94 @@
|
|||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package maps defines various functions useful with maps of any type.
|
||||
package maps
|
||||
|
||||
// Keys returns the keys of the map m.
|
||||
// The keys will be in an indeterminate order.
|
||||
func Keys[M ~map[K]V, K comparable, V any](m M) []K {
|
||||
r := make([]K, 0, len(m))
|
||||
for k := range m {
|
||||
r = append(r, k)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// Values returns the values of the map m.
|
||||
// The values will be in an indeterminate order.
|
||||
func Values[M ~map[K]V, K comparable, V any](m M) []V {
|
||||
r := make([]V, 0, len(m))
|
||||
for _, v := range m {
|
||||
r = append(r, v)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// Equal reports whether two maps contain the same key/value pairs.
|
||||
// Values are compared using ==.
|
||||
func Equal[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool {
|
||||
if len(m1) != len(m2) {
|
||||
return false
|
||||
}
|
||||
for k, v1 := range m1 {
|
||||
if v2, ok := m2[k]; !ok || v1 != v2 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// EqualFunc is like Equal, but compares values using eq.
|
||||
// Keys are still compared with ==.
|
||||
func EqualFunc[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M2, eq func(V1, V2) bool) bool {
|
||||
if len(m1) != len(m2) {
|
||||
return false
|
||||
}
|
||||
for k, v1 := range m1 {
|
||||
if v2, ok := m2[k]; !ok || !eq(v1, v2) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Clear removes all entries from m, leaving it empty.
|
||||
func Clear[M ~map[K]V, K comparable, V any](m M) {
|
||||
for k := range m {
|
||||
delete(m, k)
|
||||
}
|
||||
}
|
||||
|
||||
// Clone returns a copy of m. This is a shallow clone:
|
||||
// the new keys and values are set using ordinary assignment.
|
||||
func Clone[M ~map[K]V, K comparable, V any](m M) M {
|
||||
// Preserve nil in case it matters.
|
||||
if m == nil {
|
||||
return nil
|
||||
}
|
||||
r := make(M, len(m))
|
||||
for k, v := range m {
|
||||
r[k] = v
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// Copy copies all key/value pairs in src adding them to dst.
|
||||
// When a key in src is already present in dst,
|
||||
// the value in dst will be overwritten by the value associated
|
||||
// with the key in src.
|
||||
func Copy[M1 ~map[K]V, M2 ~map[K]V, K comparable, V any](dst M1, src M2) {
|
||||
for k, v := range src {
|
||||
dst[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteFunc deletes any key/value pairs from m for which del returns true.
|
||||
func DeleteFunc[M ~map[K]V, K comparable, V any](m M, del func(K, V) bool) {
|
||||
for k, v := range m {
|
||||
if del(k, v) {
|
||||
delete(m, k)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1008,7 +1008,7 @@ github.com/waku-org/go-discover/discover/v5wire
|
|||
github.com/waku-org/go-libp2p-rendezvous
|
||||
github.com/waku-org/go-libp2p-rendezvous/db
|
||||
github.com/waku-org/go-libp2p-rendezvous/pb
|
||||
# github.com/waku-org/go-waku v0.7.1-0.20230907093131-092811658ea3
|
||||
# github.com/waku-org/go-waku v0.8.1-0.20230930175749-dcc828749f67
|
||||
## explicit; go 1.19
|
||||
github.com/waku-org/go-waku/logging
|
||||
github.com/waku-org/go-waku/waku/persistence
|
||||
|
@ -1040,21 +1040,34 @@ github.com/waku-org/go-waku/waku/v2/protocol/rln/keystore
|
|||
github.com/waku-org/go-waku/waku/v2/protocol/rln/web3
|
||||
github.com/waku-org/go-waku/waku/v2/protocol/store
|
||||
github.com/waku-org/go-waku/waku/v2/protocol/store/pb
|
||||
github.com/waku-org/go-waku/waku/v2/protocol/subscription
|
||||
github.com/waku-org/go-waku/waku/v2/rendezvous
|
||||
github.com/waku-org/go-waku/waku/v2/timesource
|
||||
github.com/waku-org/go-waku/waku/v2/utils
|
||||
# github.com/waku-org/go-zerokit-rln v0.1.14-0.20230905214645-ca686a02e816
|
||||
# github.com/waku-org/go-zerokit-rln v0.1.14-0.20230916173259-d284a3d8f2fd
|
||||
## explicit; go 1.18
|
||||
github.com/waku-org/go-zerokit-rln/rln
|
||||
github.com/waku-org/go-zerokit-rln/rln/link
|
||||
# github.com/waku-org/go-zerokit-rln-apple v0.0.0-20230905213302-1d6d18a03e7c
|
||||
# github.com/waku-org/go-zerokit-rln-apple v0.0.0-20230916172309-ee0ee61dde2b
|
||||
## explicit; go 1.19
|
||||
github.com/waku-org/go-zerokit-rln-apple/libs/aarch64-apple-darwin
|
||||
github.com/waku-org/go-zerokit-rln-apple/libs/x86_64-apple-darwin
|
||||
github.com/waku-org/go-zerokit-rln-apple/rln
|
||||
# github.com/waku-org/go-zerokit-rln-arm v0.0.0-20230905183322-05f4cda61468
|
||||
# github.com/waku-org/go-zerokit-rln-arm v0.0.0-20230916171929-1dd9494ff065
|
||||
## explicit; go 1.19
|
||||
github.com/waku-org/go-zerokit-rln-arm/libs/aarch64-linux-android
|
||||
github.com/waku-org/go-zerokit-rln-arm/libs/aarch64-unknown-linux-gnu
|
||||
github.com/waku-org/go-zerokit-rln-arm/libs/arm-linux-androideabi
|
||||
github.com/waku-org/go-zerokit-rln-arm/libs/arm-unknown-linux-gnueabi
|
||||
github.com/waku-org/go-zerokit-rln-arm/libs/arm-unknown-linux-gnueabihf
|
||||
github.com/waku-org/go-zerokit-rln-arm/libs/armv7-linux-androideabi
|
||||
github.com/waku-org/go-zerokit-rln-arm/libs/armv7a-linux-androideabi
|
||||
github.com/waku-org/go-zerokit-rln-arm/rln
|
||||
# github.com/waku-org/go-zerokit-rln-x86_64 v0.0.0-20230905182930-2b11e72ef866
|
||||
# github.com/waku-org/go-zerokit-rln-x86_64 v0.0.0-20230916171518-2a77c3734dd1
|
||||
## explicit; go 1.19
|
||||
github.com/waku-org/go-zerokit-rln-x86_64/libs/x86_64-pc-windows-gnu
|
||||
github.com/waku-org/go-zerokit-rln-x86_64/libs/x86_64-unknown-linux-gnu
|
||||
github.com/waku-org/go-zerokit-rln-x86_64/libs/x86_64-unknown-linux-musl
|
||||
github.com/waku-org/go-zerokit-rln-x86_64/rln
|
||||
# github.com/wealdtech/go-ens/v3 v3.5.0
|
||||
## explicit; go 1.12
|
||||
|
@ -1161,6 +1174,7 @@ golang.org/x/crypto/ssh/terminal
|
|||
# golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1
|
||||
## explicit; go 1.20
|
||||
golang.org/x/exp/constraints
|
||||
golang.org/x/exp/maps
|
||||
golang.org/x/exp/slices
|
||||
# golang.org/x/image v0.0.0-20210220032944-ac19c3e999fb
|
||||
## explicit; go 1.12
|
||||
|
|
|
@ -312,14 +312,11 @@ func (api *PublicWakuAPI) Messages(ctx context.Context, crit Criteria) (*rpc.Sub
|
|||
}
|
||||
|
||||
filter.PubsubTopic = crit.PubsubTopic
|
||||
|
||||
for _, bt := range crit.ContentTopics {
|
||||
filter.Topics = append(filter.Topics, bt[:])
|
||||
}
|
||||
filter.ContentTopics = common.NewTopicSet(crit.ContentTopics)
|
||||
|
||||
// listen for message that are encrypted with the given symmetric key
|
||||
if symKeyGiven {
|
||||
if len(filter.Topics) == 0 {
|
||||
if len(filter.ContentTopics) == 0 {
|
||||
return nil, ErrNoTopics
|
||||
}
|
||||
key, err := api.w.GetSymKey(crit.SymKeyID)
|
||||
|
@ -461,7 +458,6 @@ func (api *PublicWakuAPI) NewMessageFilter(req Criteria) (string, error) {
|
|||
src *ecdsa.PublicKey
|
||||
keySym []byte
|
||||
keyAsym *ecdsa.PrivateKey
|
||||
topics [][]byte
|
||||
|
||||
symKeyGiven = len(req.SymKeyID) > 0
|
||||
asymKeyGiven = len(req.PrivateKeyID) > 0
|
||||
|
@ -495,21 +491,13 @@ func (api *PublicWakuAPI) NewMessageFilter(req Criteria) (string, error) {
|
|||
}
|
||||
}
|
||||
|
||||
if len(req.ContentTopics) > 0 {
|
||||
topics = make([][]byte, len(req.ContentTopics))
|
||||
for i, topic := range req.ContentTopics {
|
||||
topics[i] = make([]byte, common.TopicLength)
|
||||
copy(topics[i], topic[:])
|
||||
}
|
||||
}
|
||||
|
||||
f := &common.Filter{
|
||||
Src: src,
|
||||
KeySym: keySym,
|
||||
KeyAsym: keyAsym,
|
||||
PubsubTopic: req.PubsubTopic,
|
||||
Topics: topics,
|
||||
Messages: common.NewMemoryMessageStore(),
|
||||
Src: src,
|
||||
KeySym: keySym,
|
||||
KeyAsym: keyAsym,
|
||||
PubsubTopic: req.PubsubTopic,
|
||||
ContentTopics: common.NewTopicSet(req.ContentTopics),
|
||||
Messages: common.NewMemoryMessageStore(),
|
||||
}
|
||||
|
||||
id, err := api.w.Subscribe(f)
|
||||
|
|
|
@ -19,11 +19,11 @@
|
|||
package wakuv2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||
"golang.org/x/exp/maps"
|
||||
|
||||
"github.com/status-im/status-go/wakuv2/common"
|
||||
)
|
||||
|
@ -43,12 +43,12 @@ func TestMultipleTopicCopyInNewMessageFilter(t *testing.T) {
|
|||
lastUsed: make(map[string]time.Time),
|
||||
}
|
||||
|
||||
t1 := [4]byte{0xde, 0xea, 0xbe, 0xef}
|
||||
t2 := [4]byte{0xca, 0xfe, 0xde, 0xca}
|
||||
t1 := common.TopicType([4]byte{0xde, 0xea, 0xbe, 0xef})
|
||||
t2 := common.TopicType([4]byte{0xca, 0xfe, 0xde, 0xca})
|
||||
|
||||
crit := Criteria{
|
||||
SymKeyID: keyID,
|
||||
ContentTopics: []common.TopicType{common.TopicType(t1), common.TopicType(t2)},
|
||||
ContentTopics: []common.TopicType{t1, t2},
|
||||
}
|
||||
|
||||
_, err = api.NewMessageFilter(crit)
|
||||
|
@ -59,10 +59,9 @@ func TestMultipleTopicCopyInNewMessageFilter(t *testing.T) {
|
|||
found := false
|
||||
candidates := w.filters.GetWatchersByTopic(relay.DefaultWakuTopic, t1)
|
||||
for _, f := range candidates {
|
||||
if len(f.Topics) == 2 {
|
||||
if bytes.Equal(f.Topics[0], t1[:]) && bytes.Equal(f.Topics[1], t2[:]) {
|
||||
found = true
|
||||
}
|
||||
if maps.Equal(f.ContentTopics, common.NewTopicSet(crit.ContentTopics)) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -24,6 +24,8 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/exp/maps"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
|
@ -32,13 +34,13 @@ import (
|
|||
|
||||
// Filter represents a Waku message filter
|
||||
type Filter struct {
|
||||
Src *ecdsa.PublicKey // Sender of the message
|
||||
KeyAsym *ecdsa.PrivateKey // Private Key of recipient
|
||||
KeySym []byte // Key associated with the Topic
|
||||
PubsubTopic string // Pubsub topic used to filter messages with
|
||||
Topics [][]byte // ContentTopics to filter messages with
|
||||
SymKeyHash common.Hash // The Keccak256Hash of the symmetric key, needed for optimization
|
||||
id string // unique identifier
|
||||
Src *ecdsa.PublicKey // Sender of the message
|
||||
KeyAsym *ecdsa.PrivateKey // Private Key of recipient
|
||||
KeySym []byte // Key associated with the Topic
|
||||
PubsubTopic string // Pubsub topic used to filter messages with
|
||||
ContentTopics TopicSet // ContentTopics to filter messages with
|
||||
SymKeyHash common.Hash // The Keccak256Hash of the symmetric key, needed for optimization
|
||||
id string // unique identifier
|
||||
|
||||
Messages MessageStore
|
||||
}
|
||||
|
@ -49,20 +51,27 @@ type PubsubTopicToContentTopic = map[string]ContentTopicToFilter
|
|||
|
||||
// Filters represents a collection of filters
|
||||
type Filters struct {
|
||||
// Map of random ID to Filter
|
||||
watchers map[string]*Filter
|
||||
|
||||
topicMatcher PubsubTopicToContentTopic // map a topic to the filters that are interested in being notified when a message matches that topic
|
||||
allTopicsMatcher map[*Filter]struct{} // list all the filters that will be notified of a new message, no matter what its topic is
|
||||
// map a topic to the filters that are interested in being notified when a message matches that topic
|
||||
topicMatcher PubsubTopicToContentTopic
|
||||
|
||||
mutex sync.RWMutex
|
||||
// list all the filters that will be notified of a new message, no matter what its topic is
|
||||
allTopicsMatcher map[*Filter]struct{}
|
||||
|
||||
logger *zap.Logger
|
||||
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
// NewFilters returns a newly created filter collection
|
||||
func NewFilters() *Filters {
|
||||
func NewFilters(logger *zap.Logger) *Filters {
|
||||
return &Filters{
|
||||
watchers: make(map[string]*Filter),
|
||||
topicMatcher: make(PubsubTopicToContentTopic),
|
||||
allTopicsMatcher: make(map[*Filter]struct{}),
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -77,8 +86,8 @@ func (fs *Filters) Install(watcher *Filter) (string, error) {
|
|||
return "", err
|
||||
}
|
||||
|
||||
fs.mutex.Lock()
|
||||
defer fs.mutex.Unlock()
|
||||
fs.Lock()
|
||||
defer fs.Unlock()
|
||||
|
||||
if fs.watchers[id] != nil {
|
||||
return "", fmt.Errorf("failed to generate unique ID")
|
||||
|
@ -89,19 +98,25 @@ func (fs *Filters) Install(watcher *Filter) (string, error) {
|
|||
}
|
||||
|
||||
watcher.id = id
|
||||
|
||||
fs.watchers[id] = watcher
|
||||
fs.addTopicMatcher(watcher)
|
||||
|
||||
fs.logger.Debug("filters install", zap.String("id", id))
|
||||
return id, err
|
||||
}
|
||||
|
||||
// Uninstall will remove a filter whose id has been specified from
|
||||
// the filter collection
|
||||
func (fs *Filters) Uninstall(id string) bool {
|
||||
fs.mutex.Lock()
|
||||
defer fs.mutex.Unlock()
|
||||
if fs.watchers[id] != nil {
|
||||
fs.removeFromTopicMatchers(fs.watchers[id])
|
||||
fs.Lock()
|
||||
defer fs.Unlock()
|
||||
watcher := fs.watchers[id]
|
||||
if watcher != nil {
|
||||
fs.removeFromTopicMatchers(watcher)
|
||||
delete(fs.watchers, id)
|
||||
|
||||
fs.logger.Debug("filters uninstall", zap.String("id", id))
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
@ -109,8 +124,8 @@ func (fs *Filters) Uninstall(id string) bool {
|
|||
|
||||
func (fs *Filters) AllTopics() []TopicType {
|
||||
var topics []TopicType
|
||||
fs.mutex.Lock()
|
||||
defer fs.mutex.Unlock()
|
||||
fs.Lock()
|
||||
defer fs.Unlock()
|
||||
for _, topicsPerPubsubTopic := range fs.topicMatcher {
|
||||
for t := range topicsPerPubsubTopic {
|
||||
topics = append(topics, t)
|
||||
|
@ -124,7 +139,7 @@ func (fs *Filters) AllTopics() []TopicType {
|
|||
// If the filter's Topics array is empty, it will be tried on every topic.
|
||||
// Otherwise, it will be tried on the topics specified.
|
||||
func (fs *Filters) addTopicMatcher(watcher *Filter) {
|
||||
if len(watcher.Topics) == 0 && (watcher.PubsubTopic == relay.DefaultWakuTopic || watcher.PubsubTopic == "") {
|
||||
if len(watcher.ContentTopics) == 0 && (watcher.PubsubTopic == relay.DefaultWakuTopic || watcher.PubsubTopic == "") {
|
||||
fs.allTopicsMatcher[watcher] = struct{}{}
|
||||
} else {
|
||||
filtersPerContentTopic, ok := fs.topicMatcher[watcher.PubsubTopic]
|
||||
|
@ -132,8 +147,7 @@ func (fs *Filters) addTopicMatcher(watcher *Filter) {
|
|||
filtersPerContentTopic = make(ContentTopicToFilter)
|
||||
}
|
||||
|
||||
for _, t := range watcher.Topics {
|
||||
topic := BytesToTopic(t)
|
||||
for topic := range watcher.ContentTopics {
|
||||
if filtersPerContentTopic[topic] == nil {
|
||||
filtersPerContentTopic[topic] = make(FilterSet)
|
||||
}
|
||||
|
@ -153,8 +167,7 @@ func (fs *Filters) removeFromTopicMatchers(watcher *Filter) {
|
|||
return
|
||||
}
|
||||
|
||||
for _, t := range watcher.Topics {
|
||||
topic := BytesToTopic(t)
|
||||
for topic := range watcher.ContentTopics {
|
||||
delete(filtersPerContentTopic[topic], watcher)
|
||||
}
|
||||
|
||||
|
@ -182,18 +195,24 @@ func (fs *Filters) GetWatchersByTopic(pubsubTopic string, contentTopic TopicType
|
|||
|
||||
// Get returns a filter from the collection with a specific ID
|
||||
func (fs *Filters) Get(id string) *Filter {
|
||||
fs.mutex.RLock()
|
||||
defer fs.mutex.RUnlock()
|
||||
fs.RLock()
|
||||
defer fs.RUnlock()
|
||||
return fs.watchers[id]
|
||||
}
|
||||
|
||||
func (fs *Filters) GetFilters() map[string]*Filter {
|
||||
fs.RLock()
|
||||
defer fs.RUnlock()
|
||||
return maps.Clone(fs.watchers)
|
||||
}
|
||||
|
||||
// NotifyWatchers notifies any filter that has declared interest
|
||||
// for the envelope's topic.
|
||||
func (fs *Filters) NotifyWatchers(recvMessage *ReceivedMessage) bool {
|
||||
var decodedMsg *ReceivedMessage
|
||||
|
||||
fs.mutex.RLock()
|
||||
defer fs.mutex.RUnlock()
|
||||
fs.RLock()
|
||||
defer fs.RUnlock()
|
||||
|
||||
var matched bool
|
||||
candidates := fs.GetWatchersByTopic(recvMessage.PubsubTopic, recvMessage.ContentTopic)
|
||||
|
|
|
@ -30,6 +30,24 @@ import (
|
|||
// SHA3 hash of some arbitrary data given by the original author of the message.
|
||||
type TopicType [TopicLength]byte
|
||||
|
||||
type TopicSet map[TopicType]struct{}
|
||||
|
||||
func NewTopicSet(topics []TopicType) TopicSet {
|
||||
s := make(TopicSet, len(topics))
|
||||
for _, t := range topics {
|
||||
s[t] = struct{}{}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func NewTopicSetFromBytes(byteArrays [][]byte) TopicSet {
|
||||
topics := make([]TopicType, len(byteArrays))
|
||||
for i, byteArr := range byteArrays {
|
||||
topics[i] = BytesToTopic(byteArr)
|
||||
}
|
||||
return NewTopicSet(topics)
|
||||
}
|
||||
|
||||
// BytesToTopic converts from the byte array representation of a topic
|
||||
// into the TopicType type.
|
||||
func BytesToTopic(b []byte) (t TopicType) {
|
||||
|
|
|
@ -54,7 +54,7 @@ var DefaultConfig = Config{
|
|||
KeepAliveInterval: 10, // second
|
||||
DiscoveryLimit: 20,
|
||||
MinPeersForRelay: 1, // TODO: determine correct value with Vac team
|
||||
MinPeersForFilter: 1, // TODO: determine correct value with Vac team and via testing
|
||||
MinPeersForFilter: 2, // TODO: determine correct value with Vac team and via testing
|
||||
AutoUpdate: false,
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,364 @@
|
|||
package wakuv2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"math/big"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
|
||||
"github.com/status-im/status-go/wakuv2/common"
|
||||
|
||||
node "github.com/waku-org/go-waku/waku/v2/node"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/filter"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/subscription"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
const (
|
||||
FilterEventAdded = iota
|
||||
FilterEventRemoved
|
||||
FilterEventPingResult
|
||||
FilterEventSubscribeResult
|
||||
FilterEventUnsubscribeResult
|
||||
FilterEventGetStats
|
||||
)
|
||||
|
||||
const pingTimeout = 10 * time.Second
|
||||
|
||||
type FilterSubs map[string]subscription.SubscriptionSet
|
||||
|
||||
type FilterEvent struct {
|
||||
eventType int
|
||||
filterID string
|
||||
success bool
|
||||
peerID peer.ID
|
||||
tempID string
|
||||
sub *subscription.SubscriptionDetails
|
||||
ch chan FilterSubs
|
||||
}
|
||||
|
||||
// Methods on FilterManager maintain filter peer health
|
||||
//
|
||||
// runFilterLoop is the main event loop
|
||||
//
|
||||
// Filter Install/Uninstall events are pushed onto eventChan
|
||||
// Subscribe, UnsubscribeWithSubscription, IsSubscriptionAlive calls
|
||||
// are invoked from goroutines and request results pushed onto eventChan
|
||||
//
|
||||
// filterSubs is the map of filter IDs to subscriptions
|
||||
|
||||
type FilterManager struct {
|
||||
ctx context.Context
|
||||
filterSubs FilterSubs
|
||||
eventChan chan (FilterEvent)
|
||||
isFilterSubAlive func(sub *subscription.SubscriptionDetails) error
|
||||
getFilter func(string) *common.Filter
|
||||
onNewEnvelopes func(env *protocol.Envelope) error
|
||||
peers []peer.ID
|
||||
logger *zap.Logger
|
||||
settings settings
|
||||
node *node.WakuNode
|
||||
}
|
||||
|
||||
func newFilterManager(ctx context.Context, logger *zap.Logger, getFilterFn func(string) *common.Filter, settings settings, onNewEnvelopes func(env *protocol.Envelope) error, node *node.WakuNode) *FilterManager {
|
||||
// This fn is being mocked in test
|
||||
mgr := new(FilterManager)
|
||||
mgr.ctx = ctx
|
||||
mgr.logger = logger
|
||||
mgr.getFilter = getFilterFn
|
||||
mgr.onNewEnvelopes = onNewEnvelopes
|
||||
mgr.filterSubs = make(FilterSubs)
|
||||
mgr.eventChan = make(chan FilterEvent, 100)
|
||||
mgr.peers = make([]peer.ID, 0)
|
||||
mgr.settings = settings
|
||||
mgr.node = node
|
||||
mgr.isFilterSubAlive = func(sub *subscription.SubscriptionDetails) error {
|
||||
ctx, cancel := context.WithTimeout(ctx, pingTimeout)
|
||||
defer cancel()
|
||||
return mgr.node.FilterLightnode().IsSubscriptionAlive(ctx, sub)
|
||||
}
|
||||
|
||||
return mgr
|
||||
}
|
||||
|
||||
func (mgr *FilterManager) runFilterLoop(wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
// Use it to ping filter peer(s) periodically
|
||||
ticker := time.NewTicker(5 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
// Populate filter peers initially
|
||||
mgr.peers = mgr.findFilterPeers() // ordered list of peers to select from
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-mgr.ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
mgr.peers = mgr.findFilterPeers()
|
||||
mgr.pingPeers()
|
||||
case ev := <-mgr.eventChan:
|
||||
mgr.processEvents(&ev)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (mgr *FilterManager) processEvents(ev *FilterEvent) {
|
||||
switch ev.eventType {
|
||||
|
||||
case FilterEventAdded:
|
||||
mgr.filterSubs[ev.filterID] = make(subscription.SubscriptionSet)
|
||||
mgr.resubscribe(ev.filterID)
|
||||
|
||||
case FilterEventRemoved:
|
||||
for _, sub := range mgr.filterSubs[ev.filterID] {
|
||||
if sub == nil {
|
||||
// Skip temp subs
|
||||
continue
|
||||
}
|
||||
go mgr.unsubscribeFromFilter(ev.filterID, sub)
|
||||
}
|
||||
delete(mgr.filterSubs, ev.filterID)
|
||||
|
||||
case FilterEventPingResult:
|
||||
if ev.success {
|
||||
break
|
||||
}
|
||||
// filterID field is only set when there are no subs to check for this filter,
|
||||
// therefore no particular peers that could be unreachable.
|
||||
if ev.filterID != "" {
|
||||
// Trigger full resubscribe, filter has no peers
|
||||
mgr.logger.Debug("filter has no subs", zap.String("filterId", ev.filterID))
|
||||
mgr.resubscribe(ev.filterID)
|
||||
break
|
||||
}
|
||||
// Remove peer from list
|
||||
for i, p := range mgr.peers {
|
||||
if ev.peerID == p {
|
||||
mgr.peers = append(mgr.peers[:i], mgr.peers[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
// Delete subs for removed peer
|
||||
for filterID, subs := range mgr.filterSubs {
|
||||
for _, sub := range subs {
|
||||
if sub == nil {
|
||||
// Skip temp subs
|
||||
continue
|
||||
}
|
||||
if sub.PeerID == ev.peerID {
|
||||
mgr.logger.Debug("filter sub is inactive", zap.String("filterId", filterID), zap.String("subID", sub.ID))
|
||||
delete(subs, sub.ID)
|
||||
go mgr.unsubscribeFromFilter(filterID, sub)
|
||||
}
|
||||
}
|
||||
mgr.resubscribe(filterID)
|
||||
}
|
||||
|
||||
case FilterEventSubscribeResult:
|
||||
subs, found := mgr.filterSubs[ev.filterID]
|
||||
if ev.success {
|
||||
if found {
|
||||
subs[ev.sub.ID] = ev.sub
|
||||
go mgr.runFilterSubscriptionLoop(ev.sub)
|
||||
} else {
|
||||
// We subscribed to a filter that is already uninstalled; invoke unsubscribe
|
||||
go mgr.unsubscribeFromFilter(ev.filterID, ev.sub)
|
||||
}
|
||||
}
|
||||
if found {
|
||||
// Delete temp subscription record
|
||||
delete(subs, ev.tempID)
|
||||
}
|
||||
|
||||
case FilterEventUnsubscribeResult:
|
||||
mgr.logger.Debug("filter event unsubscribe_result", zap.String("filterId", ev.filterID), zap.Stringer("peerID", ev.sub.PeerID))
|
||||
|
||||
case FilterEventGetStats:
|
||||
stats := make(FilterSubs)
|
||||
for id, subs := range mgr.filterSubs {
|
||||
stats[id] = make(subscription.SubscriptionSet)
|
||||
for subID, sub := range subs {
|
||||
if sub == nil {
|
||||
// Skip temp subs
|
||||
continue
|
||||
}
|
||||
|
||||
stats[id][subID] = sub
|
||||
}
|
||||
}
|
||||
ev.ch <- stats
|
||||
}
|
||||
}
|
||||
|
||||
func (mgr *FilterManager) subscribeToFilter(filterID string, peer peer.ID, tempID string) {
|
||||
|
||||
f := mgr.getFilter(filterID)
|
||||
if f == nil {
|
||||
mgr.logger.Error("filter subscribeToFilter: No filter found", zap.String("id", filterID))
|
||||
mgr.eventChan <- FilterEvent{eventType: FilterEventSubscribeResult, filterID: filterID, tempID: tempID, success: false}
|
||||
return
|
||||
}
|
||||
contentFilter := mgr.buildContentFilter(f.PubsubTopic, f.ContentTopics)
|
||||
mgr.logger.Debug("filter subscribe to filter node", zap.Stringer("peer", peer), zap.String("pubsubTopic", contentFilter.PubsubTopic), zap.Strings("contentTopics", contentFilter.ContentTopicsList()))
|
||||
ctx, cancel := context.WithTimeout(mgr.ctx, requestTimeout)
|
||||
defer cancel()
|
||||
|
||||
subDetails, err := mgr.node.FilterLightnode().Subscribe(ctx, contentFilter, filter.WithPeer(peer))
|
||||
var sub *subscription.SubscriptionDetails
|
||||
if err != nil {
|
||||
mgr.logger.Warn("filter could not add wakuv2 filter for peer", zap.String("filterId", filterID), zap.Stringer("peer", peer), zap.Error(err))
|
||||
} else {
|
||||
mgr.logger.Debug("filter subscription success", zap.String("filterId", filterID), zap.Stringer("peer", peer), zap.String("pubsubTopic", contentFilter.PubsubTopic), zap.Strings("contentTopics", contentFilter.ContentTopicsList()))
|
||||
sub = subDetails[0]
|
||||
}
|
||||
|
||||
success := err == nil
|
||||
mgr.eventChan <- FilterEvent{eventType: FilterEventSubscribeResult, filterID: filterID, tempID: tempID, sub: sub, success: success}
|
||||
}
|
||||
|
||||
func (mgr *FilterManager) unsubscribeFromFilter(filterID string, sub *subscription.SubscriptionDetails) {
|
||||
mgr.logger.Debug("filter unsubscribe from filter node", zap.String("filterId", filterID), zap.String("subId", sub.ID), zap.Stringer("peer", sub.PeerID))
|
||||
// Unsubscribe on light node
|
||||
ctx, cancel := context.WithTimeout(mgr.ctx, requestTimeout)
|
||||
defer cancel()
|
||||
_, err := mgr.node.FilterLightnode().UnsubscribeWithSubscription(ctx, sub)
|
||||
|
||||
if err != nil {
|
||||
mgr.logger.Warn("could not unsubscribe wakuv2 filter for peer", zap.String("filterId", filterID), zap.String("subId", sub.ID), zap.Error(err))
|
||||
}
|
||||
|
||||
success := err == nil
|
||||
mgr.eventChan <- FilterEvent{eventType: FilterEventUnsubscribeResult, filterID: filterID, success: success, sub: sub}
|
||||
}
|
||||
|
||||
// Check whether each of the installed filters
|
||||
// has enough alive subscriptions to peers
|
||||
func (mgr *FilterManager) pingPeers() {
|
||||
mgr.logger.Debug("filter pingPeers")
|
||||
|
||||
distinctPeers := make(map[peer.ID]struct{})
|
||||
for filterID, subs := range mgr.filterSubs {
|
||||
if len(subs) == 0 {
|
||||
// No subs found, trigger full resubscribe
|
||||
mgr.logger.Debug("filter ping peer no subs", zap.String("filterId", filterID))
|
||||
go func() {
|
||||
mgr.eventChan <- FilterEvent{eventType: FilterEventPingResult, filterID: filterID, success: false}
|
||||
}()
|
||||
continue
|
||||
}
|
||||
for _, sub := range subs {
|
||||
if sub == nil {
|
||||
// Skip temp subs
|
||||
continue
|
||||
}
|
||||
_, found := distinctPeers[sub.PeerID]
|
||||
if found {
|
||||
continue
|
||||
}
|
||||
distinctPeers[sub.PeerID] = struct{}{}
|
||||
mgr.logger.Debug("filter ping peer", zap.Stringer("peerId", sub.PeerID))
|
||||
go func(sub *subscription.SubscriptionDetails) {
|
||||
err := mgr.isFilterSubAlive(sub)
|
||||
alive := err == nil
|
||||
|
||||
if alive {
|
||||
mgr.logger.Debug("filter aliveness check succeeded", zap.Stringer("peerId", sub.PeerID))
|
||||
} else {
|
||||
mgr.logger.Debug("filter aliveness check failed", zap.Stringer("peerId", sub.PeerID), zap.Error(err))
|
||||
}
|
||||
mgr.eventChan <- FilterEvent{eventType: FilterEventPingResult, peerID: sub.PeerID, success: alive}
|
||||
}(sub)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (mgr *FilterManager) buildContentFilter(pubsubTopic string, contentTopicSet common.TopicSet) protocol.ContentFilter {
|
||||
contentTopics := make([]string, len(contentTopicSet))
|
||||
for i, ct := range maps.Keys(contentTopicSet) {
|
||||
contentTopics[i] = ct.ContentTopic()
|
||||
}
|
||||
|
||||
return protocol.ContentFilter{
|
||||
PubsubTopic: pubsubTopic,
|
||||
ContentTopics: protocol.NewContentTopicSet(contentTopics...),
|
||||
}
|
||||
}
|
||||
|
||||
// Find suitable peer(s)
|
||||
func (mgr *FilterManager) findFilterPeers() []peer.ID {
|
||||
allPeers := mgr.node.Host().Peerstore().Peers()
|
||||
|
||||
peers := make([]peer.ID, 0)
|
||||
for _, peer := range allPeers {
|
||||
protocols, err := mgr.node.Host().Peerstore().SupportsProtocols(peer, filter.FilterSubscribeID_v20beta1, relay.WakuRelayID_v200)
|
||||
if err != nil {
|
||||
mgr.logger.Debug("SupportsProtocols error", zap.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
if len(protocols) == 2 {
|
||||
peers = append(peers, peer)
|
||||
}
|
||||
}
|
||||
|
||||
mgr.logger.Debug("Filtered peers", zap.Int("cnt", len(peers)))
|
||||
return peers
|
||||
}
|
||||
|
||||
func (mgr *FilterManager) findPeerCandidate() (peer.ID, error) {
|
||||
if len(mgr.peers) == 0 {
|
||||
return "", errors.New("filter could not select a suitable peer")
|
||||
}
|
||||
n, _ := rand.Int(rand.Reader, big.NewInt(int64(len(mgr.peers))))
|
||||
return mgr.peers[n.Int64()], nil
|
||||
}
|
||||
|
||||
func (mgr *FilterManager) resubscribe(filterID string) {
|
||||
subs, found := mgr.filterSubs[filterID]
|
||||
if !found {
|
||||
mgr.logger.Error("resubscribe filter not found", zap.String("filterId", filterID))
|
||||
return
|
||||
}
|
||||
mgr.logger.Debug("filter active subscriptions count:", zap.String("filterId", filterID), zap.Int("len", len(subs)))
|
||||
for i := len(subs); i < mgr.settings.MinPeersForFilter; i++ {
|
||||
mgr.logger.Debug("filter check not passed, try subscribing to peers", zap.String("filterId", filterID))
|
||||
peer, err := mgr.findPeerCandidate()
|
||||
|
||||
if err == nil {
|
||||
// Create sub placeholder in order to avoid potentially too many subs
|
||||
tempID := uuid.NewString()
|
||||
subs[tempID] = nil
|
||||
go mgr.subscribeToFilter(filterID, peer, tempID)
|
||||
} else {
|
||||
mgr.logger.Error("filter resubscribe findPeer error", zap.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (mgr *FilterManager) runFilterSubscriptionLoop(sub *subscription.SubscriptionDetails) {
|
||||
for {
|
||||
select {
|
||||
case <-mgr.ctx.Done():
|
||||
return
|
||||
case env, ok := <-sub.C:
|
||||
if ok {
|
||||
err := (mgr.onNewEnvelopes)(env)
|
||||
if err != nil {
|
||||
mgr.logger.Error("OnNewEnvelopes error", zap.Error(err))
|
||||
}
|
||||
} else {
|
||||
mgr.logger.Debug("filter sub is closed", zap.String("id", sub.ID))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
278
wakuv2/waku.go
278
wakuv2/waku.go
|
@ -28,7 +28,6 @@ import (
|
|||
"math"
|
||||
"net"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
@ -57,7 +56,7 @@ import (
|
|||
"github.com/waku-org/go-waku/waku/v2/dnsdisc"
|
||||
wps "github.com/waku-org/go-waku/waku/v2/peerstore"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/filter"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/lightpush"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/peer_exchange"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||
|
||||
|
@ -102,11 +101,8 @@ type Waku struct {
|
|||
dnsAddressCacheLock *sync.RWMutex // lock to handle access to the map
|
||||
|
||||
// Filter-related
|
||||
filters *common.Filters // Message filters installed with Subscribe function
|
||||
filterSubscriptions map[*common.Filter]map[string]*filter.SubscriptionDetails // wakuv2 filter subscription details
|
||||
|
||||
filterPeerDisconnectMap map[peer.ID]int64
|
||||
isFilterSubAlive func(sub *filter.SubscriptionDetails) error
|
||||
filters *common.Filters // Message filters installed with Subscribe function
|
||||
filterManager *FilterManager
|
||||
|
||||
privateKeys map[string]*ecdsa.PrivateKey // Private key storage
|
||||
symKeys map[string][]byte // Symmetric key storage
|
||||
|
@ -196,7 +192,7 @@ func New(nodeKey string, fleet string, cfg *Config, logger *zap.Logger, appDB *s
|
|||
}
|
||||
}
|
||||
|
||||
logger.Debug("starting wakuv2 with config", zap.Any("config", cfg))
|
||||
logger.Info("starting wakuv2 with config", zap.Any("config", cfg))
|
||||
|
||||
waku := &Waku{
|
||||
appDB: appDB,
|
||||
|
@ -213,8 +209,6 @@ func New(nodeKey string, fleet string, cfg *Config, logger *zap.Logger, appDB *s
|
|||
dnsAddressCache: make(map[string][]dnsdisc.DiscoveredNode),
|
||||
dnsAddressCacheLock: &sync.RWMutex{},
|
||||
storeMsgIDs: make(map[gethcommon.Hash]bool),
|
||||
filterPeerDisconnectMap: make(map[peer.ID]int64),
|
||||
filterSubscriptions: make(map[*common.Filter]map[string]*filter.SubscriptionDetails),
|
||||
timesource: ts,
|
||||
storeMsgIDsMu: sync.RWMutex{},
|
||||
logger: logger,
|
||||
|
@ -223,11 +217,6 @@ func New(nodeKey string, fleet string, cfg *Config, logger *zap.Logger, appDB *s
|
|||
onPeerStats: onPeerStats,
|
||||
}
|
||||
|
||||
// This fn is being mocked in test
|
||||
waku.isFilterSubAlive = func(sub *filter.SubscriptionDetails) error {
|
||||
return waku.node.FilterLightnode().IsSubscriptionAlive(waku.ctx, sub)
|
||||
}
|
||||
|
||||
waku.settings = settings{
|
||||
MaxMsgSize: cfg.MaxMessageSize,
|
||||
LightClient: cfg.LightClient,
|
||||
|
@ -239,7 +228,7 @@ func New(nodeKey string, fleet string, cfg *Config, logger *zap.Logger, appDB *s
|
|||
EnableDiscV5: cfg.EnableDiscV5,
|
||||
}
|
||||
|
||||
waku.filters = common.NewFilters()
|
||||
waku.filters = common.NewFilters(waku.logger)
|
||||
waku.bandwidthCounter = metrics.NewBandwidthCounter()
|
||||
|
||||
var privateKey *ecdsa.PrivateKey
|
||||
|
@ -389,7 +378,7 @@ func (w *Waku) dnsDiscover(ctx context.Context, enrtreeAddress string, apply fnA
|
|||
nameserver := w.settings.Nameserver
|
||||
w.settingsMu.RUnlock()
|
||||
|
||||
var opts []dnsdisc.DnsDiscoveryOption
|
||||
var opts []dnsdisc.DNSDiscoveryOption
|
||||
if nameserver != "" {
|
||||
opts = append(opts, dnsdisc.WithNameserver(nameserver))
|
||||
}
|
||||
|
@ -479,21 +468,21 @@ func (w *Waku) identifyAndConnect(ctx context.Context, isLightClient bool, ma mu
|
|||
if isLightClient {
|
||||
err = w.node.Host().Network().ClosePeer(peerInfo.ID)
|
||||
if err != nil {
|
||||
w.logger.Error("could not close connections to peer", zap.Any("peer", peerInfo.ID), zap.Error(err))
|
||||
w.logger.Error("could not close connections to peer", zap.Stringer("peer", peerInfo.ID), zap.Error(err))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
supportedProtocols, err := w.node.Host().Peerstore().SupportsProtocols(peerInfo.ID, relay.WakuRelayID_v200)
|
||||
if err != nil {
|
||||
w.logger.Error("could not obtain protocols", zap.Any("peer", peerInfo.ID), zap.Error(err))
|
||||
w.logger.Error("could not obtain protocols", zap.Stringer("peer", peerInfo.ID), zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
if len(supportedProtocols) == 0 {
|
||||
err = w.node.Host().Network().ClosePeer(peerInfo.ID)
|
||||
if err != nil {
|
||||
w.logger.Error("could not close connections to peer", zap.Any("peer", peerInfo.ID), zap.Error(err))
|
||||
w.logger.Error("could not close connections to peer", zap.Stringer("peer", peerInfo.ID), zap.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -553,7 +542,7 @@ func (w *Waku) runPeerExchangeLoop() {
|
|||
case <-w.ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
w.logger.Debug("Running peer exchange loop")
|
||||
w.logger.Info("Running peer exchange loop")
|
||||
|
||||
connectedPeers := w.node.Host().Network().Peers()
|
||||
peersWithRelay := 0
|
||||
|
@ -644,13 +633,10 @@ func (w *Waku) subscribeToPubsubTopicWithWakuRelay(topic string, pubkey *ecdsa.P
|
|||
sub.Unsubscribe()
|
||||
return
|
||||
case env := <-sub.Ch:
|
||||
envelopeErrors, err := w.OnNewEnvelopes(env, common.RelayedMessageType)
|
||||
err := w.OnNewEnvelopes(env, common.RelayedMessageType)
|
||||
if err != nil {
|
||||
w.logger.Error("onNewEnvelope error", zap.Error(err))
|
||||
w.logger.Error("OnNewEnvelopes error", zap.Error(err))
|
||||
}
|
||||
// TODO: should these be handled?
|
||||
_ = envelopeErrors
|
||||
_ = err
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
@ -658,97 +644,6 @@ func (w *Waku) subscribeToPubsubTopicWithWakuRelay(topic string, pubkey *ecdsa.P
|
|||
return nil
|
||||
}
|
||||
|
||||
func (w *Waku) runFilterSubscriptionLoop(sub *filter.SubscriptionDetails) {
|
||||
for {
|
||||
select {
|
||||
case <-w.ctx.Done():
|
||||
return
|
||||
case env, ok := <-sub.C:
|
||||
if ok {
|
||||
envelopeErrors, err := w.OnNewEnvelopes(env, common.RelayedMessageType)
|
||||
// TODO: should these be handled?
|
||||
_ = envelopeErrors
|
||||
_ = err
|
||||
} else {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Waku) runFilterMsgLoop() {
|
||||
defer w.wg.Done()
|
||||
|
||||
if !w.settings.LightClient {
|
||||
return
|
||||
}
|
||||
|
||||
// Use it to ping filter peer(s) periodically
|
||||
ticker := time.NewTicker(time.Duration(w.cfg.KeepAliveInterval) * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-w.ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
for f, subMap := range w.filterSubscriptions {
|
||||
if len(subMap) == 0 {
|
||||
// All peers have disconnected on previous iteration,
|
||||
// attempt full reconnect
|
||||
err := w.subscribeToFilter(f)
|
||||
if err != nil {
|
||||
w.logger.Error("Failed to subscribe to filter")
|
||||
}
|
||||
break
|
||||
}
|
||||
for id, sub := range subMap {
|
||||
err := w.isFilterSubAlive(sub)
|
||||
if err != nil {
|
||||
// Unsubscribe on light node
|
||||
contentFilter := w.buildContentFilter(f.PubsubTopic, f.Topics)
|
||||
// TODO Better return value handling for WakuFilterPushResult
|
||||
_, err := w.node.FilterLightnode().Unsubscribe(w.ctx, contentFilter, filter.Peer(sub.PeerID))
|
||||
if err != nil {
|
||||
w.logger.Warn("could not unsubscribe wakuv2 filter for peer", zap.Any("peer", sub.PeerID))
|
||||
continue
|
||||
}
|
||||
|
||||
// Remove entry from maps
|
||||
w.filterPeerDisconnectMap[sub.PeerID] = time.Now().Unix()
|
||||
delete(subMap, id)
|
||||
|
||||
// Re-subscribe
|
||||
peers := w.findFilterPeers()
|
||||
if len(peers) > 0 && len(subMap) < w.settings.MinPeersForFilter {
|
||||
subDetails, err := w.node.FilterLightnode().Subscribe(w.ctx, contentFilter, filter.WithPeer(peers[0]))
|
||||
if err != nil {
|
||||
w.logger.Warn("could not add wakuv2 filter for peer", zap.Any("peer", peers[0]))
|
||||
break
|
||||
}
|
||||
|
||||
subMap[subDetails.ID] = subDetails
|
||||
go w.runFilterSubscriptionLoop(subDetails)
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
func (w *Waku) buildContentFilter(pubsubTopic string, topics [][]byte) filter.ContentFilter {
|
||||
contentFilter := filter.ContentFilter{
|
||||
Topic: pubsubTopic,
|
||||
}
|
||||
for _, topic := range topics {
|
||||
contentFilter.ContentTopics = append(contentFilter.ContentTopics, common.BytesToTopic(topic).ContentTopic())
|
||||
}
|
||||
|
||||
return contentFilter
|
||||
}
|
||||
|
||||
// MaxMessageSize returns the maximum accepted message size.
|
||||
func (w *Waku) MaxMessageSize() uint32 {
|
||||
w.settingsMu.RLock()
|
||||
|
@ -1038,43 +933,41 @@ func (w *Waku) Subscribe(f *common.Filter) (string, error) {
|
|||
f.PubsubTopic = relay.DefaultWakuTopic
|
||||
}
|
||||
|
||||
s, err := w.filters.Install(f)
|
||||
id, err := w.filters.Install(f)
|
||||
if err != nil {
|
||||
return s, err
|
||||
return id, err
|
||||
}
|
||||
|
||||
if w.settings.LightClient {
|
||||
go func() {
|
||||
ticker := time.NewTicker(1 * time.Second)
|
||||
for range ticker.C {
|
||||
err := w.subscribeToFilter(f)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
}()
|
||||
w.filterManager.eventChan <- FilterEvent{eventType: FilterEventAdded, filterID: id}
|
||||
}
|
||||
|
||||
return s, nil
|
||||
return id, nil
|
||||
}
|
||||
|
||||
// Unsubscribe removes an installed message handler.
|
||||
func (w *Waku) Unsubscribe(ctx context.Context, id string) error {
|
||||
f := w.filters.Get(id)
|
||||
if f != nil && w.settings.LightClient {
|
||||
contentFilter := w.buildContentFilter(f.PubsubTopic, f.Topics)
|
||||
if _, err := w.node.FilterLightnode().Unsubscribe(ctx, contentFilter); err != nil {
|
||||
return fmt.Errorf("failed to unsubscribe: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
ok := w.filters.Uninstall(id)
|
||||
if !ok {
|
||||
return fmt.Errorf("failed to unsubscribe: invalid ID '%s'", id)
|
||||
}
|
||||
|
||||
if w.settings.LightClient {
|
||||
w.filterManager.eventChan <- FilterEvent{eventType: FilterEventRemoved, filterID: id}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Used for testing
|
||||
func (w *Waku) getFilterStats() FilterSubs {
|
||||
ch := make(chan FilterSubs)
|
||||
w.filterManager.eventChan <- FilterEvent{eventType: FilterEventGetStats, ch: ch}
|
||||
stats := <-ch
|
||||
|
||||
return stats
|
||||
}
|
||||
|
||||
// GetFilter returns the filter by id.
|
||||
func (w *Waku) GetFilter(id string) *common.Filter {
|
||||
return w.filters.Get(id)
|
||||
|
@ -1083,7 +976,7 @@ func (w *Waku) GetFilter(id string) *common.Filter {
|
|||
// Unsubscribe removes an installed message handler.
|
||||
func (w *Waku) UnsubscribeMany(ids []string) error {
|
||||
for _, id := range ids {
|
||||
w.logger.Debug("cleaning up filter", zap.String("id", id))
|
||||
w.logger.Info("cleaning up filter", zap.String("id", id))
|
||||
ok := w.filters.Uninstall(id)
|
||||
if !ok {
|
||||
w.logger.Warn("could not remove filter with id", zap.String("id", id))
|
||||
|
@ -1099,7 +992,7 @@ func (w *Waku) broadcast() {
|
|||
var err error
|
||||
if w.settings.LightClient {
|
||||
w.logger.Info("publishing message via lightpush", zap.String("envelopeHash", hexutil.Encode(envelope.Hash())), zap.String("pubsubTopic", envelope.PubsubTopic()))
|
||||
_, err = w.node.Lightpush().PublishToTopic(context.Background(), envelope.Message(), envelope.PubsubTopic())
|
||||
_, err = w.node.Lightpush().PublishToTopic(context.Background(), envelope.Message(), lightpush.WithPubSubTopic(envelope.PubsubTopic()))
|
||||
} else {
|
||||
w.logger.Info("publishing message via relay", zap.String("envelopeHash", hexutil.Encode(envelope.Hash())), zap.String("pubsubTopic", envelope.PubsubTopic()))
|
||||
_, err = w.node.Relay().PublishToTopic(context.Background(), envelope.Message(), envelope.PubsubTopic())
|
||||
|
@ -1184,7 +1077,7 @@ func (w *Waku) query(ctx context.Context, peerID peer.ID, pubsubTopic string, to
|
|||
}
|
||||
|
||||
func (w *Waku) Query(ctx context.Context, peerID peer.ID, pubsubTopic string, topics []common.TopicType, from uint64, to uint64, opts []store.HistoryRequestOption) (cursor *storepb.Index, err error) {
|
||||
requestID := protocol.GenerateRequestId()
|
||||
requestID := protocol.GenerateRequestID()
|
||||
opts = append(opts, store.WithRequestID(requestID))
|
||||
result, err := w.query(ctx, peerID, pubsubTopic, topics, from, to, opts)
|
||||
if err != nil {
|
||||
|
@ -1202,7 +1095,7 @@ func (w *Waku) Query(ctx context.Context, peerID peer.ID, pubsubTopic string, to
|
|||
|
||||
envelope := protocol.NewEnvelope(msg, msg.Timestamp, pubsubTopic)
|
||||
w.logger.Info("received waku2 store message", zap.Any("envelopeHash", hexutil.Encode(envelope.Hash())), zap.String("pubsubTopic", pubsubTopic))
|
||||
_, err = w.OnNewEnvelopes(envelope, common.StoreMessageType)
|
||||
err = w.OnNewEnvelopes(envelope, common.StoreMessageType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1254,7 +1147,7 @@ func (w *Waku) Start() error {
|
|||
}
|
||||
}
|
||||
|
||||
w.wg.Add(3)
|
||||
w.wg.Add(2)
|
||||
|
||||
go func() {
|
||||
defer w.wg.Done()
|
||||
|
@ -1267,6 +1160,7 @@ func (w *Waku) Start() error {
|
|||
case c := <-w.connStatusChan:
|
||||
w.connStatusMu.Lock()
|
||||
latestConnStatus := formatConnStatus(w.node, c)
|
||||
w.logger.Debug("PeerStats", zap.Any("stats", latestConnStatus))
|
||||
for k, subs := range w.connStatusSubscriptions {
|
||||
if subs.Active() {
|
||||
subs.C <- latestConnStatus
|
||||
|
@ -1282,13 +1176,13 @@ func (w *Waku) Start() error {
|
|||
if w.cfg.EnableDiscV5 {
|
||||
// Restarting DiscV5
|
||||
if !latestConnStatus.IsOnline && isConnected {
|
||||
w.logger.Debug("Restarting DiscV5: offline and is connected")
|
||||
w.logger.Info("Restarting DiscV5: offline and is connected")
|
||||
isConnected = false
|
||||
w.node.DiscV5().Stop()
|
||||
} else if latestConnStatus.IsOnline && !isConnected {
|
||||
w.logger.Debug("Restarting DiscV5: online and is not connected")
|
||||
w.logger.Info("Restarting DiscV5: online and is not connected")
|
||||
isConnected = true
|
||||
if !w.node.DiscV5().IsStarted() {
|
||||
if w.node.DiscV5().ErrOnNotRunning() != nil {
|
||||
err := w.node.DiscV5().Start(ctx)
|
||||
if err != nil {
|
||||
w.logger.Error("Could not start DiscV5", zap.Error(err))
|
||||
|
@ -1302,7 +1196,19 @@ func (w *Waku) Start() error {
|
|||
|
||||
go w.telemetryBandwidthStats(w.cfg.TelemetryServerURL)
|
||||
go w.runPeerExchangeLoop()
|
||||
go w.runFilterMsgLoop()
|
||||
|
||||
if w.settings.LightClient {
|
||||
// Create FilterManager that will main peer connectivity
|
||||
// for installed filters
|
||||
w.filterManager = newFilterManager(w.ctx, w.logger,
|
||||
func(id string) *common.Filter { return w.GetFilter(id) },
|
||||
w.settings,
|
||||
func(env *protocol.Envelope) error { return w.OnNewEnvelopes(env, common.RelayedMessageType) },
|
||||
w.node)
|
||||
|
||||
w.wg.Add(1)
|
||||
go w.filterManager.runFilterLoop(&w.wg)
|
||||
}
|
||||
|
||||
err = w.setupRelaySubscriptions()
|
||||
if err != nil {
|
||||
|
@ -1373,18 +1279,16 @@ func (w *Waku) Stop() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (w *Waku) OnNewEnvelopes(envelope *protocol.Envelope, msgType common.MessageType) ([]common.EnvelopeError, error) {
|
||||
func (w *Waku) OnNewEnvelopes(envelope *protocol.Envelope, msgType common.MessageType) error {
|
||||
if envelope == nil {
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
recvMessage := common.NewReceivedMessage(envelope, msgType)
|
||||
if recvMessage == nil {
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
envelopeErrors := make([]common.EnvelopeError, 0)
|
||||
|
||||
logger := w.logger.With(zap.String("hash", recvMessage.Hash().Hex()))
|
||||
|
||||
logger.Debug("received new envelope")
|
||||
|
@ -1399,10 +1303,10 @@ func (w *Waku) OnNewEnvelopes(envelope *protocol.Envelope, msgType common.Messag
|
|||
common.EnvelopesValidatedCounter.Inc()
|
||||
|
||||
if trouble {
|
||||
return envelopeErrors, errors.New("received invalid envelope")
|
||||
return errors.New("received invalid envelope")
|
||||
}
|
||||
|
||||
return envelopeErrors, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// addEnvelope adds an envelope to the envelope map, used for sending
|
||||
|
@ -1649,7 +1553,7 @@ func (w *Waku) restartDiscV5() error {
|
|||
return errors.New("failed to fetch bootnodes")
|
||||
}
|
||||
|
||||
if !w.node.DiscV5().IsStarted() {
|
||||
if w.node.DiscV5().ErrOnNotRunning() != nil {
|
||||
w.logger.Info("is not started restarting")
|
||||
err := w.node.DiscV5().Start(ctx)
|
||||
if err != nil {
|
||||
|
@ -1681,7 +1585,7 @@ func (w *Waku) AddStorePeer(address string) (peer.ID, error) {
|
|||
return "", err
|
||||
}
|
||||
|
||||
peerID, err := w.node.AddPeer(addr, wps.Static, store.StoreID_v20beta4)
|
||||
peerID, err := w.node.AddPeer(addr, wps.Static, []string{}, store.StoreID_v20beta4)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -1698,7 +1602,7 @@ func (w *Waku) AddRelayPeer(address string) (peer.ID, error) {
|
|||
return "", err
|
||||
}
|
||||
|
||||
peerID, err := w.node.AddPeer(addr, wps.Static, relay.WakuRelayID_v200)
|
||||
peerID, err := w.node.AddPeer(addr, wps.Static, []string{}, relay.WakuRelayID_v200)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -1798,65 +1702,3 @@ func formatConnStatus(wakuNode *node.WakuNode, c node.ConnStatus) types.ConnStat
|
|||
Peers: FormatPeerStats(wakuNode, c.Peers),
|
||||
}
|
||||
}
|
||||
|
||||
// Find suitable peer(s). For this we use a peerDisconnectMap, it works so that
|
||||
// peers that have been recently disconnected from have lower priority
|
||||
func (w *Waku) findFilterPeers() []peer.ID {
|
||||
|
||||
allPeers := w.node.Host().Peerstore().Peers()
|
||||
var peers peer.IDSlice
|
||||
for _, peer := range allPeers {
|
||||
protocols, err := w.node.Host().Peerstore().SupportsProtocols(peer, filter.FilterSubscribeID_v20beta1, relay.WakuRelayID_v200)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(protocols) == 2 {
|
||||
peers = append(peers, peer)
|
||||
}
|
||||
}
|
||||
|
||||
if len(peers) > 0 {
|
||||
sort.Slice(peers, func(i, j int) bool {
|
||||
// If element not found in map, [] operator will return 0
|
||||
return w.filterPeerDisconnectMap[peers[i]] < w.filterPeerDisconnectMap[peers[j]]
|
||||
})
|
||||
}
|
||||
|
||||
var peerLen = len(peers)
|
||||
if w.settings.MinPeersForFilter < peerLen {
|
||||
peerLen = w.settings.MinPeersForFilter
|
||||
}
|
||||
peers = peers[0:peerLen]
|
||||
return peers
|
||||
}
|
||||
|
||||
func (w *Waku) subscribeToFilter(f *common.Filter) error {
|
||||
peers := w.findFilterPeers()
|
||||
|
||||
if len(peers) > 0 {
|
||||
contentFilter := w.buildContentFilter(f.PubsubTopic, f.Topics)
|
||||
for i := 0; i < len(peers) && i < w.settings.MinPeersForFilter; i++ {
|
||||
subDetails, err := w.node.FilterLightnode().Subscribe(w.ctx, contentFilter, filter.WithPeer(peers[i]))
|
||||
if err != nil {
|
||||
w.logger.Warn("could not add wakuv2 filter for peer", zap.Stringer("peer", peers[i]))
|
||||
continue
|
||||
}
|
||||
|
||||
subMap := w.filterSubscriptions[f]
|
||||
if subMap == nil {
|
||||
subMap = make(map[string]*filter.SubscriptionDetails)
|
||||
w.filterSubscriptions[f] = subMap
|
||||
}
|
||||
subMap[subDetails.ID] = subDetails
|
||||
go w.runFilterSubscriptionLoop(subDetails)
|
||||
|
||||
w.logger.Info("wakuv2 filter subscription success", zap.Stringer("peer", peers[i]), zap.String("pubsubTopic", contentFilter.Topic), zap.Strings("contentTopics", contentFilter.ContentTopics))
|
||||
}
|
||||
|
||||
} else {
|
||||
return errors.New("could not select a suitable peer for filter")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue