chore: bump go-waku to latest commit (#4000)
This includes the fix for panic we saw in regards to Relay connections issue in go-waku : waku-org/go-waku#713
This commit is contained in:
parent
81b94b7a4e
commit
15369f26dd
10
go.mod
10
go.mod
|
@ -84,7 +84,7 @@ require (
|
|||
github.com/mutecomm/go-sqlcipher/v4 v4.4.2
|
||||
github.com/schollz/peerdiscovery v1.7.0
|
||||
github.com/siphiuel/lc-proxy-wrapper v0.0.0-20230516150924-246507cee8c7
|
||||
github.com/waku-org/go-waku v0.7.1-0.20230829115339-8ad08d6b0481
|
||||
github.com/waku-org/go-waku v0.7.1-0.20230907093131-092811658ea3
|
||||
github.com/wk8/go-ordered-map/v2 v2.1.7
|
||||
github.com/yeqown/go-qrcode/v2 v2.2.1
|
||||
github.com/yeqown/go-qrcode/writer/standard v1.2.1
|
||||
|
@ -257,10 +257,10 @@ require (
|
|||
github.com/urfave/cli/v2 v2.24.4 // indirect
|
||||
github.com/waku-org/go-discover v0.0.0-20221209174356-61c833f34d98 // indirect
|
||||
github.com/waku-org/go-libp2p-rendezvous v0.0.0-20230628220917-7b4e5ae4c0e7 // indirect
|
||||
github.com/waku-org/go-zerokit-rln v0.1.14-0.20230823150836-a706089284fe // indirect
|
||||
github.com/waku-org/go-zerokit-rln-apple v0.0.0-20230821155521-70f1ff564bae // indirect
|
||||
github.com/waku-org/go-zerokit-rln-arm v0.0.0-20230807124929-ea702b1b4305 // indirect
|
||||
github.com/waku-org/go-zerokit-rln-x86_64 v0.0.0-20230807124913-ea636e5b4005 // indirect
|
||||
github.com/waku-org/go-zerokit-rln v0.1.14-0.20230905214645-ca686a02e816 // indirect
|
||||
github.com/waku-org/go-zerokit-rln-apple v0.0.0-20230905213302-1d6d18a03e7c // indirect
|
||||
github.com/waku-org/go-zerokit-rln-arm v0.0.0-20230905183322-05f4cda61468 // indirect
|
||||
github.com/waku-org/go-zerokit-rln-x86_64 v0.0.0-20230905182930-2b11e72ef866 // indirect
|
||||
github.com/wk8/go-ordered-map v1.0.0 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
|
|
20
go.sum
20
go.sum
|
@ -2094,16 +2094,16 @@ github.com/waku-org/go-discover v0.0.0-20221209174356-61c833f34d98 h1:xwY0kW5XZF
|
|||
github.com/waku-org/go-discover v0.0.0-20221209174356-61c833f34d98/go.mod h1:eBHgM6T4EG0RZzxpxKy+rGz/6Dw2Nd8DWxS0lm9ESDw=
|
||||
github.com/waku-org/go-libp2p-rendezvous v0.0.0-20230628220917-7b4e5ae4c0e7 h1:0e1h+p84yBp0IN7AqgbZlV7lgFBjm214lgSOE7CeJmE=
|
||||
github.com/waku-org/go-libp2p-rendezvous v0.0.0-20230628220917-7b4e5ae4c0e7/go.mod h1:pFvOZ9YTFsW0o5zJW7a0B5tr1owAijRWJctXJ2toL04=
|
||||
github.com/waku-org/go-waku v0.7.1-0.20230829115339-8ad08d6b0481 h1:g+yGK715lZX7fqAjXDi3txAeEq6SjC0lyXliWe0emfA=
|
||||
github.com/waku-org/go-waku v0.7.1-0.20230829115339-8ad08d6b0481/go.mod h1:egMHn9evnHqNTzc7eeGBKxNfv31PleuDjgLUfPrj40o=
|
||||
github.com/waku-org/go-zerokit-rln v0.1.14-0.20230823150836-a706089284fe h1:t2KJU5HRgmRHo94cFwSa7BDwVioj+LCjJIK1H4p2lBA=
|
||||
github.com/waku-org/go-zerokit-rln v0.1.14-0.20230823150836-a706089284fe/go.mod h1:aAlHP2G8TiZX5nKvsPpnOL+IGLlkYA567h5xrGCz7s8=
|
||||
github.com/waku-org/go-zerokit-rln-apple v0.0.0-20230821155521-70f1ff564bae h1:VXgstV6RFUs6L/x0Xad4s0BIJ8hVEv1pyrByYzvZdT0=
|
||||
github.com/waku-org/go-zerokit-rln-apple v0.0.0-20230821155521-70f1ff564bae/go.mod h1:KYykqtdApHVYZ3G0spwMnoxc5jH5eI3jyO9SwsSfi48=
|
||||
github.com/waku-org/go-zerokit-rln-arm v0.0.0-20230807124929-ea702b1b4305 h1:33LEcvkC5eRdCIKt0bTG6G6DYZRNQGcpdoScA1ZFgRI=
|
||||
github.com/waku-org/go-zerokit-rln-arm v0.0.0-20230807124929-ea702b1b4305/go.mod h1:7cSGUoGVIla1IpnChrLbkVjkYgdOcr7rcifEfh4ReR4=
|
||||
github.com/waku-org/go-zerokit-rln-x86_64 v0.0.0-20230807124913-ea636e5b4005 h1:kJfvDpiZZGNTpHB7Mp4BBNj/hsG6UzMg84E+bl+n7Eo=
|
||||
github.com/waku-org/go-zerokit-rln-x86_64 v0.0.0-20230807124913-ea636e5b4005/go.mod h1:+LeEYoW5/uBUTVjtBGLEVCUe9mOYAlu5ZPkIxLOSr5Y=
|
||||
github.com/waku-org/go-waku v0.7.1-0.20230907093131-092811658ea3 h1:lwXUUy6XWnWr/svnQG30H/FlWKOvPAGjAFn3pwwjWbY=
|
||||
github.com/waku-org/go-waku v0.7.1-0.20230907093131-092811658ea3/go.mod h1:HW6QoUlzw3tLUbLzhHCGCEVIFcAWIjqCF6+JU0pSyus=
|
||||
github.com/waku-org/go-zerokit-rln v0.1.14-0.20230905214645-ca686a02e816 h1:M5skPFmapY5i5a9jSiGWft9PZMiQr2nCi8uzJc2IfBI=
|
||||
github.com/waku-org/go-zerokit-rln v0.1.14-0.20230905214645-ca686a02e816/go.mod h1:zc3FBSLP6vy2sOjAnqIju3yKLRq1WkcxsS1Lh9w0CuA=
|
||||
github.com/waku-org/go-zerokit-rln-apple v0.0.0-20230905213302-1d6d18a03e7c h1:aDn17iEMrdXeQ6dp+Cv3ywJYStkomkvKWv8I00iy79c=
|
||||
github.com/waku-org/go-zerokit-rln-apple v0.0.0-20230905213302-1d6d18a03e7c/go.mod h1:KYykqtdApHVYZ3G0spwMnoxc5jH5eI3jyO9SwsSfi48=
|
||||
github.com/waku-org/go-zerokit-rln-arm v0.0.0-20230905183322-05f4cda61468 h1:yNRDUyWJu/wHEPLps5D/Zce24mu/5ax2u1pXsMwRPbg=
|
||||
github.com/waku-org/go-zerokit-rln-arm v0.0.0-20230905183322-05f4cda61468/go.mod h1:7cSGUoGVIla1IpnChrLbkVjkYgdOcr7rcifEfh4ReR4=
|
||||
github.com/waku-org/go-zerokit-rln-x86_64 v0.0.0-20230905182930-2b11e72ef866 h1:dURzhyGtPrpmBJcnY4hpY83dW81cZimkZ8U+S89ANd0=
|
||||
github.com/waku-org/go-zerokit-rln-x86_64 v0.0.0-20230905182930-2b11e72ef866/go.mod h1:+LeEYoW5/uBUTVjtBGLEVCUe9mOYAlu5ZPkIxLOSr5Y=
|
||||
github.com/wealdtech/go-ens/v3 v3.5.0 h1:Huc9GxBgiGweCOGTYomvsg07K2QggAqZpZ5SuiZdC8o=
|
||||
github.com/wealdtech/go-ens/v3 v3.5.0/go.mod h1:bVuYoWYEEeEu7Zy95rIMjPR34QFJarxt8p84ywSo0YM=
|
||||
github.com/wealdtech/go-multicodec v1.4.0 h1:iq5PgxwssxnXGGPTIK1srvt6U5bJwIp7k6kBrudIWxg=
|
||||
|
|
|
@ -453,10 +453,17 @@ func (d *DiscoveryV5) peerLoop(ctx context.Context) error {
|
|||
}
|
||||
|
||||
nodeRS, err := wenr.RelaySharding(n.Record())
|
||||
if err != nil || nodeRS == nil {
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if nodeRS == nil {
|
||||
// TODO: Node has no shard registered.
|
||||
// Since for now, status-go uses both mixed static and named shards, we assume the node is valid
|
||||
// Once status-go uses only static shards, we can't return true anymore.
|
||||
return true
|
||||
}
|
||||
|
||||
if nodeRS.Cluster != localRS.Cluster {
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -315,22 +315,17 @@ func (w *WakuNode) watchTopicShards(ctx context.Context) error {
|
|||
|
||||
if len(rs) > 0 {
|
||||
if len(rs) > 1 {
|
||||
w.log.Warn("could not set ENR shard info", zap.String("error", "use sharded topics within the same cluster"))
|
||||
continue
|
||||
}
|
||||
|
||||
tcount := 0
|
||||
for _, r := range rs {
|
||||
tcount += len(r.Indices)
|
||||
}
|
||||
if tcount != len(topics) {
|
||||
w.log.Warn("could not set ENR shard info", zap.String("error", "can't use a mix of static shards and named shards"))
|
||||
w.log.Warn("could not set ENR shard info", zap.String("error", "multiple clusters found, use sharded topics within the same cluster"))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if len(rs) == 1 {
|
||||
w.log.Info("updating advertised relay shards in ENR")
|
||||
if len(rs[0].Indices) != len(topics) {
|
||||
w.log.Warn("A mix of named and static shards found. ENR shard will contain only the following shards", zap.Any("shards", rs[0]))
|
||||
}
|
||||
|
||||
err = wenr.Update(w.localNode, wenr.WithWakuRelaySharding(rs[0]))
|
||||
if err != nil {
|
||||
w.log.Warn("could not set ENR shard info", zap.Error(err))
|
||||
|
|
|
@ -22,7 +22,6 @@ import (
|
|||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/peerstore"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/libp2p/go-libp2p/p2p/discovery/backoff"
|
||||
"github.com/libp2p/go-libp2p/p2p/host/autorelay"
|
||||
"github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem"
|
||||
"github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/proto"
|
||||
|
@ -71,7 +70,7 @@ type SpamHandler = func(message *pb.WakuMessage) error
|
|||
|
||||
type RLNRelay interface {
|
||||
IdentityCredential() (IdentityCredential, error)
|
||||
MembershipIndex() (uint, error)
|
||||
MembershipIndex() uint
|
||||
AppendRLNProof(msg *pb.WakuMessage, senderEpochTime time.Time) error
|
||||
Validator(spamHandler SpamHandler) func(ctx context.Context, peerID peer.ID, message *pubsub.Message) bool
|
||||
Start(ctx context.Context) error
|
||||
|
@ -254,13 +253,7 @@ func New(opts ...WakuNodeOption) (*WakuNode, error) {
|
|||
//Initialize peer manager.
|
||||
w.peermanager = peermanager.NewPeerManager(w.opts.maxPeerConnections, w.log)
|
||||
|
||||
// Setup peer connection strategy
|
||||
cacheSize := 600
|
||||
rngSrc := rand.NewSource(rand.Int63())
|
||||
minBackoff, maxBackoff := time.Minute, time.Hour
|
||||
bkf := backoff.NewExponentialBackoff(minBackoff, maxBackoff, backoff.FullJitter, time.Second, 5.0, 0, rand.New(rngSrc))
|
||||
|
||||
w.peerConnector, err = peermanager.NewPeerConnectionStrategy(cacheSize, w.peermanager, discoveryConnectTimeout, bkf, w.log)
|
||||
w.peerConnector, err = peermanager.NewPeerConnectionStrategy(w.peermanager, discoveryConnectTimeout, w.log)
|
||||
if err != nil {
|
||||
w.log.Error("creating peer connection strategy", zap.Error(err))
|
||||
}
|
||||
|
@ -747,6 +740,20 @@ func (w *WakuNode) connect(ctx context.Context, info peer.AddrInfo) error {
|
|||
return err
|
||||
}
|
||||
|
||||
for _, addr := range info.Addrs {
|
||||
// TODO: this is a temporary fix
|
||||
// host.Connect adds the addresses with a TempAddressTTL
|
||||
// however, identify will filter out all non IP addresses
|
||||
// and expire all temporary addrs. So in the meantime, let's
|
||||
// store dns4 addresses with a connectedAddressTTL, otherwise
|
||||
// it will have trouble with the status fleet circuit relay addresses
|
||||
// See https://github.com/libp2p/go-libp2p/issues/2550
|
||||
_, err := addr.ValueForProtocol(ma.P_DNS4)
|
||||
if err == nil {
|
||||
w.host.Peerstore().AddAddrs(info.ID, info.Addrs, peerstore.ConnectedAddrTTL)
|
||||
}
|
||||
}
|
||||
|
||||
w.host.Peerstore().(wps.WakuPeerstore).ResetConnFailures(info)
|
||||
|
||||
w.metrics.RecordDial()
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
"github.com/waku-org/go-waku/waku/v2/protocol/rln"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/dynamic"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/static"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/rln/keystore"
|
||||
r "github.com/waku-org/go-zerokit-rln/rln"
|
||||
)
|
||||
|
||||
|
@ -44,14 +45,17 @@ func (w *WakuNode) setupRLNRelay() error {
|
|||
} else {
|
||||
w.log.Info("setting up waku-rln-relay in on-chain mode")
|
||||
|
||||
appKeystore, err := keystore.New(w.opts.keystorePath, dynamic.RLNAppInfo, w.log)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
groupManager, err = dynamic.NewDynamicGroupManager(
|
||||
w.opts.rlnETHClientAddress,
|
||||
w.opts.rlnMembershipContractAddress,
|
||||
w.opts.rlnRelayMemIndex,
|
||||
w.opts.keystorePath,
|
||||
appKeystore,
|
||||
w.opts.keystorePassword,
|
||||
w.opts.keystoreIndex,
|
||||
true,
|
||||
w.opts.prometheusReg,
|
||||
w.log,
|
||||
)
|
||||
|
|
|
@ -100,7 +100,6 @@ type WakuNodeParameters struct {
|
|||
rlnETHClientAddress string
|
||||
keystorePath string
|
||||
keystorePassword string
|
||||
keystoreIndex uint
|
||||
rlnTreePath string
|
||||
rlnMembershipContractAddress common.Address
|
||||
|
||||
|
|
|
@ -23,17 +23,16 @@ func WithStaticRLNRelay(memberIndex r.MembershipIndex, spamHandler rln.SpamHandl
|
|||
|
||||
// WithDynamicRLNRelay enables the Waku V2 RLN protocol in onchain mode.
|
||||
// Requires the `gowaku_rln` build constrain (or the env variable RLN=true if building go-waku)
|
||||
func WithDynamicRLNRelay(keystorePath string, keystorePassword string, keystoreIndex uint, treePath string, membershipContract common.Address, membershipGroupIndex uint, spamHandler rln.SpamHandler, ethClientAddress string) WakuNodeOption {
|
||||
func WithDynamicRLNRelay(keystorePath string, keystorePassword string, treePath string, membershipContract common.Address, membershipIndex uint, spamHandler rln.SpamHandler, ethClientAddress string) WakuNodeOption {
|
||||
return func(params *WakuNodeParameters) error {
|
||||
params.enableRLN = true
|
||||
params.rlnRelayDynamic = true
|
||||
params.keystorePassword = keystorePassword
|
||||
params.keystorePath = keystorePath
|
||||
params.keystoreIndex = keystoreIndex
|
||||
params.rlnSpamHandler = spamHandler
|
||||
params.rlnETHClientAddress = ethClientAddress
|
||||
params.rlnMembershipContractAddress = membershipContract
|
||||
params.rlnRelayMemIndex = membershipGroupIndex
|
||||
params.rlnRelayMemIndex = membershipIndex
|
||||
params.rlnTreePath = treePath
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -82,6 +82,7 @@ func (c *ConnectionGater) InterceptUpgraded(_ network.Conn) (allow bool, reason
|
|||
return true, 0
|
||||
}
|
||||
|
||||
// NotifyDisconnect is called when a connection disconnects.
|
||||
func (c *ConnectionGater) NotifyDisconnect(addr multiaddr.Multiaddr) {
|
||||
ip, err := manet.ToIP(addr)
|
||||
if err != nil {
|
||||
|
@ -111,16 +112,10 @@ func (c *ConnectionGater) validateInboundConn(addr multiaddr.Multiaddr) bool {
|
|||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
currConnections, ok := c.limiter[ip.String()]
|
||||
if !ok {
|
||||
c.limiter[ip.String()] = 1
|
||||
return true
|
||||
} else {
|
||||
if currConnections+1 > maxConnsPerIP {
|
||||
return false
|
||||
}
|
||||
|
||||
c.limiter[ip.String()]++
|
||||
if currConnections := c.limiter[ip.String()]; currConnections+1 > maxConnsPerIP {
|
||||
return false
|
||||
}
|
||||
|
||||
c.limiter[ip.String()]++
|
||||
return true
|
||||
}
|
||||
|
|
56
vendor/github.com/waku-org/go-waku/waku/v2/peermanager/mock_peer_discoverer.go
generated
vendored
Normal file
56
vendor/github.com/waku-org/go-waku/waku/v2/peermanager/mock_peer_discoverer.go
generated
vendored
Normal file
|
@ -0,0 +1,56 @@
|
|||
package peermanager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
// TestPeerDiscoverer is mock peer discoverer for testing
|
||||
type TestPeerDiscoverer struct {
|
||||
sync.RWMutex
|
||||
peerMap map[peer.ID]struct{}
|
||||
}
|
||||
|
||||
// NewTestPeerDiscoverer is a constructor for TestPeerDiscoverer
|
||||
func NewTestPeerDiscoverer() *TestPeerDiscoverer {
|
||||
result := &TestPeerDiscoverer{
|
||||
peerMap: make(map[peer.ID]struct{}),
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// Subscribe is for subscribing to peer discoverer
|
||||
func (t *TestPeerDiscoverer) Subscribe(ctx context.Context, ch <-chan PeerData) {
|
||||
go func() {
|
||||
for p := range ch {
|
||||
t.Lock()
|
||||
t.peerMap[p.AddrInfo.ID] = struct{}{}
|
||||
t.Unlock()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// HasPeer is for checking if a peer is present in peer discoverer
|
||||
func (t *TestPeerDiscoverer) HasPeer(p peer.ID) bool {
|
||||
t.RLock()
|
||||
defer t.RUnlock()
|
||||
_, ok := t.peerMap[p]
|
||||
return ok
|
||||
}
|
||||
|
||||
// PeerCount is for getting the number of peers in peer discoverer
|
||||
func (t *TestPeerDiscoverer) PeerCount() int {
|
||||
t.RLock()
|
||||
defer t.RUnlock()
|
||||
return len(t.peerMap)
|
||||
}
|
||||
|
||||
// Clear is for clearing the peer discoverer
|
||||
func (t *TestPeerDiscoverer) Clear() {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
t.peerMap = make(map[peer.ID]struct{})
|
||||
}
|
|
@ -5,6 +5,7 @@ package peermanager
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -53,27 +54,34 @@ type PeerConnectionStrategy struct {
|
|||
logger *zap.Logger
|
||||
}
|
||||
|
||||
// backoff describes the strategy used to decide how long to backoff after previously attempting to connect to a peer
|
||||
func getBackOff() backoff.BackoffFactory {
|
||||
rngSrc := rand.NewSource(rand.Int63())
|
||||
minBackoff, maxBackoff := time.Minute, time.Hour
|
||||
bkf := backoff.NewExponentialBackoff(minBackoff, maxBackoff, backoff.FullJitter, time.Second, 5.0, 0, rand.New(rngSrc))
|
||||
return bkf
|
||||
}
|
||||
|
||||
// NewPeerConnectionStrategy creates a utility to connect to peers,
|
||||
// but only if we have not recently tried connecting to them already.
|
||||
//
|
||||
// cacheSize is the size of a TwoQueueCache
|
||||
// dialTimeout is how long we attempt to connect to a peer before giving up
|
||||
// minPeers is the minimum number of peers that the node should have
|
||||
// backoff describes the strategy used to decide how long to backoff after previously attempting to connect to a peer
|
||||
func NewPeerConnectionStrategy(cacheSize int, pm *PeerManager,
|
||||
dialTimeout time.Duration, backoff backoff.BackoffFactory,
|
||||
logger *zap.Logger) (*PeerConnectionStrategy, error) {
|
||||
|
||||
func NewPeerConnectionStrategy(pm *PeerManager,
|
||||
dialTimeout time.Duration, logger *zap.Logger) (*PeerConnectionStrategy, error) {
|
||||
// cacheSize is the size of a TwoQueueCache
|
||||
cacheSize := 600
|
||||
cache, err := lru.New2Q(cacheSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//
|
||||
pc := &PeerConnectionStrategy{
|
||||
cache: cache,
|
||||
wg: sync.WaitGroup{},
|
||||
dialTimeout: dialTimeout,
|
||||
pm: pm,
|
||||
backoff: backoff,
|
||||
backoff: getBackOff(),
|
||||
logger: logger.Named("discovery-connector"),
|
||||
}
|
||||
pm.SetPeerConnector(pc)
|
||||
|
|
|
@ -17,54 +17,56 @@ import (
|
|||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// TODO: Move all the protocol IDs to a common location.
|
||||
// WakuRelayIDv200 is protocol ID for Waku v2 relay protocol
|
||||
// TODO: Move all the protocol IDs to a common location.
|
||||
const WakuRelayIDv200 = protocol.ID("/vac/waku/relay/2.0.0")
|
||||
|
||||
// PeerManager applies various controls and manage connections towards peers.
|
||||
type PeerManager struct {
|
||||
peerConnector *PeerConnectionStrategy
|
||||
maxConnections int
|
||||
maxRelayPeers int
|
||||
logger *zap.Logger
|
||||
InRelayPeersTarget int
|
||||
OutRelayPeersTarget int
|
||||
host host.Host
|
||||
serviceSlots map[protocol.ID][]peer.ID
|
||||
serviceSlots *ServiceSlots
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
const maxRelayPeersShare = 5
|
||||
|
||||
// const defaultMaxOutRelayPeersTarget = 10
|
||||
const outRelayPeersShare = 3
|
||||
const peerConnectivityLoopSecs = 15
|
||||
const minOutRelayConns = 10
|
||||
|
||||
// 80% relay peers 20% service peers
|
||||
func relayAndServicePeers(maxConnections int) (int, int) {
|
||||
return maxConnections - maxConnections/5, maxConnections / 5
|
||||
}
|
||||
|
||||
// 66% inRelayPeers 33% outRelayPeers
|
||||
func inAndOutRelayPeers(relayPeers int) (int, int) {
|
||||
outRelayPeers := relayPeers / 3
|
||||
//
|
||||
const minOutRelayConns = 10
|
||||
if outRelayPeers < minOutRelayConns {
|
||||
outRelayPeers = minOutRelayConns
|
||||
}
|
||||
return relayPeers - outRelayPeers, outRelayPeers
|
||||
}
|
||||
|
||||
// NewPeerManager creates a new peerManager instance.
|
||||
func NewPeerManager(maxConnections int, logger *zap.Logger) *PeerManager {
|
||||
|
||||
maxRelayPeersValue := maxConnections - (maxConnections / maxRelayPeersShare)
|
||||
outRelayPeersTargetValue := int(maxRelayPeersValue / outRelayPeersShare)
|
||||
if outRelayPeersTargetValue < minOutRelayConns {
|
||||
outRelayPeersTargetValue = minOutRelayConns
|
||||
}
|
||||
inRelayPeersTargetValue := maxRelayPeersValue - outRelayPeersTargetValue
|
||||
if inRelayPeersTargetValue < 0 {
|
||||
inRelayPeersTargetValue = 0
|
||||
}
|
||||
maxRelayPeers, _ := relayAndServicePeers(maxConnections)
|
||||
inRelayPeersTarget, outRelayPeersTarget := inAndOutRelayPeers(maxRelayPeers)
|
||||
|
||||
pm := &PeerManager{
|
||||
maxConnections: maxConnections,
|
||||
logger: logger.Named("peer-manager"),
|
||||
maxRelayPeers: maxRelayPeersValue,
|
||||
InRelayPeersTarget: inRelayPeersTargetValue,
|
||||
OutRelayPeersTarget: outRelayPeersTargetValue,
|
||||
serviceSlots: make(map[protocol.ID][]peer.ID),
|
||||
maxRelayPeers: maxRelayPeers,
|
||||
InRelayPeersTarget: inRelayPeersTarget,
|
||||
OutRelayPeersTarget: outRelayPeersTarget,
|
||||
serviceSlots: NewServiceSlot(),
|
||||
}
|
||||
logger.Info("PeerManager init values", zap.Int("maxConnections", maxConnections),
|
||||
zap.Int("maxRelayPeersValue", maxRelayPeersValue),
|
||||
zap.Int("outRelayPeersTargetValue", outRelayPeersTargetValue),
|
||||
zap.Int("maxRelayPeers", maxRelayPeers),
|
||||
zap.Int("outRelayPeersTarget", outRelayPeersTarget),
|
||||
zap.Int("inRelayPeersTarget", pm.InRelayPeersTarget))
|
||||
|
||||
return pm
|
||||
|
@ -154,20 +156,20 @@ func (pm *PeerManager) connectToRelayPeers() {
|
|||
return
|
||||
}
|
||||
totalRelayPeers := inRelayPeers.Len() + outRelayPeers.Len()
|
||||
// Establish additional connections if there are peers.
|
||||
// Establish additional connections connected peers are lesser than target.
|
||||
//What if the not connected peers in peerstore are not relay peers???
|
||||
if totalRelayPeers < pm.host.Peerstore().Peers().Len() {
|
||||
if totalRelayPeers < pm.maxRelayPeers {
|
||||
//Find not connected peers.
|
||||
notConnectedPeers := pm.getNotConnectedPers()
|
||||
//Figure out outside backoff peers.
|
||||
|
||||
if notConnectedPeers.Len() == 0 {
|
||||
return
|
||||
}
|
||||
//Connect to eligible peers.
|
||||
numPeersToConnect := pm.maxRelayPeers - totalRelayPeers
|
||||
|
||||
if numPeersToConnect > notConnectedPeers.Len() {
|
||||
numPeersToConnect = notConnectedPeers.Len() - 1
|
||||
numPeersToConnect = notConnectedPeers.Len()
|
||||
}
|
||||
|
||||
pm.connectToPeers(notConnectedPeers[0:numPeersToConnect])
|
||||
} //Else: Should we raise some sort of unhealthy event??
|
||||
}
|
||||
|
@ -256,7 +258,7 @@ func (pm *PeerManager) AddPeer(address ma.Multiaddr, origin wps.Origin, protocol
|
|||
|
||||
//Add Service peers to serviceSlots.
|
||||
for _, proto := range protocols {
|
||||
pm.AddPeerToServiceSlot(proto, info.ID)
|
||||
pm.addPeerToServiceSlot(proto, info.ID)
|
||||
}
|
||||
|
||||
//Add to the peer-store
|
||||
|
@ -274,19 +276,13 @@ func (pm *PeerManager) RemovePeer(peerID peer.ID) {
|
|||
pm.host.Peerstore().RemovePeer(peerID)
|
||||
//Search if this peer is in serviceSlot and if so, remove it from there
|
||||
// TODO:Add another peer which is statically configured to the serviceSlot.
|
||||
for proto, peers := range pm.serviceSlots {
|
||||
for i, peer := range peers {
|
||||
if peer == peerID {
|
||||
pm.serviceSlots[proto][i] = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
pm.serviceSlots.removePeer(peerID)
|
||||
}
|
||||
|
||||
// AddPeerToServiceSlot adds a peerID to serviceSlot.
|
||||
// addPeerToServiceSlot adds a peerID to serviceSlot.
|
||||
// Adding to peerStore is expected to be already done by caller.
|
||||
// If relay proto is passed, it is not added to serviceSlot.
|
||||
func (pm *PeerManager) AddPeerToServiceSlot(proto protocol.ID, peerID peer.ID) {
|
||||
func (pm *PeerManager) addPeerToServiceSlot(proto protocol.ID, peerID peer.ID) {
|
||||
if proto == WakuRelayIDv200 {
|
||||
pm.logger.Warn("Cannot add Relay peer to service peer slots")
|
||||
return
|
||||
|
@ -296,7 +292,8 @@ func (pm *PeerManager) AddPeerToServiceSlot(proto protocol.ID, peerID peer.ID) {
|
|||
//TODO: Ideally we should sort the peers per service and return best peer based on peer score or RTT etc.
|
||||
pm.logger.Info("Adding peer to service slots", logging.HostID("peer", peerID),
|
||||
zap.String("service", string(proto)))
|
||||
pm.serviceSlots[proto] = append(pm.serviceSlots[proto], peerID)
|
||||
// getPeers returns nil for WakuRelayIDv200 protocol, but we don't run this ServiceSlot code for WakuRelayIDv200 protocol
|
||||
pm.serviceSlots.getPeers(proto).add(peerID)
|
||||
}
|
||||
|
||||
// SelectPeer is used to return a random peer that supports a given protocol.
|
||||
|
@ -310,19 +307,17 @@ func (pm *PeerManager) SelectPeer(proto protocol.ID, specificPeers []peer.ID, lo
|
|||
// - which topics they track
|
||||
// - latency?
|
||||
|
||||
//Try to fetch from serviceSlot
|
||||
if slot := pm.serviceSlots.getPeers(proto); slot != nil {
|
||||
if peerID, err := slot.getRandom(); err == nil {
|
||||
return peerID, nil
|
||||
}
|
||||
}
|
||||
|
||||
// if not found in serviceSlots or proto == WakuRelayIDv200
|
||||
filteredPeers, err := utils.FilterPeersByProto(pm.host, specificPeers, proto)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if proto == WakuRelayIDv200 {
|
||||
return utils.SelectRandomPeer(filteredPeers, pm.logger)
|
||||
}
|
||||
|
||||
//Try to fetch from serviceSlot
|
||||
peerIDs, ok := pm.serviceSlots[proto]
|
||||
if ok || len(peerIDs) > 0 {
|
||||
filteredPeers = peerIDs
|
||||
}
|
||||
|
||||
return utils.SelectRandomPeer(filteredPeers, pm.logger)
|
||||
}
|
||||
|
|
78
vendor/github.com/waku-org/go-waku/waku/v2/peermanager/service_slot.go
generated
vendored
Normal file
78
vendor/github.com/waku-org/go-waku/waku/v2/peermanager/service_slot.go
generated
vendored
Normal file
|
@ -0,0 +1,78 @@
|
|||
package peermanager
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
)
|
||||
|
||||
type peerMap struct {
|
||||
mu sync.RWMutex
|
||||
m map[peer.ID]struct{}
|
||||
}
|
||||
|
||||
func newPeerMap() *peerMap {
|
||||
return &peerMap{
|
||||
m: map[peer.ID]struct{}{},
|
||||
}
|
||||
}
|
||||
|
||||
func (pm *peerMap) getRandom() (peer.ID, error) {
|
||||
pm.mu.RLock()
|
||||
defer pm.mu.RUnlock()
|
||||
for pID := range pm.m {
|
||||
return pID, nil
|
||||
}
|
||||
return "", utils.ErrNoPeersAvailable
|
||||
|
||||
}
|
||||
|
||||
func (pm *peerMap) remove(pID peer.ID) {
|
||||
pm.mu.Lock()
|
||||
defer pm.mu.Unlock()
|
||||
|
||||
delete(pm.m, pID)
|
||||
}
|
||||
func (pm *peerMap) add(pID peer.ID) {
|
||||
pm.mu.Lock()
|
||||
defer pm.mu.Unlock()
|
||||
pm.m[pID] = struct{}{}
|
||||
}
|
||||
|
||||
// ServiceSlots is for storing service slots for a given protocol topic
|
||||
type ServiceSlots struct {
|
||||
mu sync.Mutex
|
||||
m map[protocol.ID]*peerMap
|
||||
}
|
||||
|
||||
// NewServiceSlot is a constructor for ServiceSlot
|
||||
func NewServiceSlot() *ServiceSlots {
|
||||
return &ServiceSlots{
|
||||
m: map[protocol.ID]*peerMap{},
|
||||
}
|
||||
}
|
||||
|
||||
// getPeers for getting all the peers for a given protocol
|
||||
// since peerMap is only used in peerManager that's why it is unexported
|
||||
func (slots *ServiceSlots) getPeers(proto protocol.ID) *peerMap {
|
||||
if proto == WakuRelayIDv200 {
|
||||
return nil
|
||||
}
|
||||
slots.mu.Lock()
|
||||
defer slots.mu.Unlock()
|
||||
if slots.m[proto] == nil {
|
||||
slots.m[proto] = newPeerMap()
|
||||
}
|
||||
return slots.m[proto]
|
||||
}
|
||||
|
||||
// RemovePeer for removing peer ID for a given protocol
|
||||
func (slots *ServiceSlots) removePeer(peerID peer.ID) {
|
||||
slots.mu.Lock()
|
||||
defer slots.mu.Unlock()
|
||||
for _, m := range slots.m {
|
||||
m.remove(peerID)
|
||||
}
|
||||
}
|
|
@ -34,6 +34,9 @@ var (
|
|||
)
|
||||
|
||||
type WakuFilterLightNode struct {
|
||||
sync.RWMutex
|
||||
started bool
|
||||
|
||||
cancel context.CancelFunc
|
||||
ctx context.Context
|
||||
h host.Host
|
||||
|
@ -56,6 +59,9 @@ type WakuFilterPushResult struct {
|
|||
PeerID peer.ID
|
||||
}
|
||||
|
||||
var errNotStarted = errors.New("not started")
|
||||
var errAlreadyStarted = errors.New("already started")
|
||||
|
||||
// NewWakuFilterLightnode returns a new instance of Waku Filter struct setup according to the chosen parameter and options
|
||||
// Takes an optional peermanager if WakuFilterLightnode is being created along with WakuNode.
|
||||
// If using libp2p host, then pass peermanager as nil
|
||||
|
@ -78,12 +84,20 @@ func (wf *WakuFilterLightNode) SetHost(h host.Host) {
|
|||
}
|
||||
|
||||
func (wf *WakuFilterLightNode) Start(ctx context.Context) error {
|
||||
wf.Lock()
|
||||
defer wf.Unlock()
|
||||
|
||||
if wf.started {
|
||||
return errAlreadyStarted
|
||||
}
|
||||
|
||||
wf.wg.Wait() // Wait for any goroutines to stop
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
wf.cancel = cancel
|
||||
wf.ctx = ctx
|
||||
wf.subscriptions = NewSubscriptionMap(wf.log)
|
||||
wf.started = true
|
||||
|
||||
wf.h.SetStreamHandlerMatch(FilterPushID_v20beta1, protocol.PrefixTextMatch(string(FilterPushID_v20beta1)), wf.onRequest(ctx))
|
||||
|
||||
|
@ -94,7 +108,10 @@ func (wf *WakuFilterLightNode) Start(ctx context.Context) error {
|
|||
|
||||
// Stop unmounts the filter protocol
|
||||
func (wf *WakuFilterLightNode) Stop() {
|
||||
if wf.cancel == nil {
|
||||
wf.Lock()
|
||||
defer wf.Unlock()
|
||||
|
||||
if !wf.started {
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -102,10 +119,23 @@ func (wf *WakuFilterLightNode) Stop() {
|
|||
|
||||
wf.h.RemoveStreamHandler(FilterPushID_v20beta1)
|
||||
|
||||
_, _ = wf.UnsubscribeAll(wf.ctx)
|
||||
res, err := wf.unsubscribeAll(wf.ctx)
|
||||
if err != nil {
|
||||
wf.log.Warn("unsubscribing from full nodes", zap.Error(err))
|
||||
}
|
||||
|
||||
for r := range res {
|
||||
if r.Err != nil {
|
||||
wf.log.Warn("unsubscribing from full nodes", zap.Error(r.Err), logging.HostID("peerID", r.PeerID))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
wf.subscriptions.Clear()
|
||||
|
||||
wf.started = false
|
||||
wf.cancel = nil
|
||||
|
||||
wf.wg.Wait()
|
||||
}
|
||||
|
||||
|
@ -206,6 +236,13 @@ func (wf *WakuFilterLightNode) request(ctx context.Context, params *FilterSubscr
|
|||
|
||||
// Subscribe setups a subscription to receive messages that match a specific content filter
|
||||
func (wf *WakuFilterLightNode) Subscribe(ctx context.Context, contentFilter ContentFilter, opts ...FilterSubscribeOption) (*SubscriptionDetails, error) {
|
||||
wf.RLock()
|
||||
defer wf.RUnlock()
|
||||
|
||||
if !wf.started {
|
||||
return nil, errNotStarted
|
||||
}
|
||||
|
||||
if contentFilter.Topic == "" {
|
||||
return nil, errors.New("topic is required")
|
||||
}
|
||||
|
@ -244,6 +281,13 @@ func (wf *WakuFilterLightNode) Subscribe(ctx context.Context, contentFilter Cont
|
|||
|
||||
// FilterSubscription is used to obtain an object from which you could receive messages received via filter protocol
|
||||
func (wf *WakuFilterLightNode) FilterSubscription(peerID peer.ID, contentFilter ContentFilter) (*SubscriptionDetails, error) {
|
||||
wf.RLock()
|
||||
defer wf.RUnlock()
|
||||
|
||||
if !wf.started {
|
||||
return nil, errNotStarted
|
||||
}
|
||||
|
||||
if !wf.subscriptions.Has(peerID, contentFilter.Topic, contentFilter.ContentTopics...) {
|
||||
return nil, errors.New("subscription does not exist")
|
||||
}
|
||||
|
@ -263,6 +307,13 @@ func (wf *WakuFilterLightNode) getUnsubscribeParameters(opts ...FilterUnsubscrib
|
|||
}
|
||||
|
||||
func (wf *WakuFilterLightNode) Ping(ctx context.Context, peerID peer.ID) error {
|
||||
wf.RLock()
|
||||
defer wf.RUnlock()
|
||||
|
||||
if !wf.started {
|
||||
return errNotStarted
|
||||
}
|
||||
|
||||
return wf.request(
|
||||
ctx,
|
||||
&FilterSubscribeParameters{selectedPeer: peerID},
|
||||
|
@ -271,10 +322,24 @@ func (wf *WakuFilterLightNode) Ping(ctx context.Context, peerID peer.ID) error {
|
|||
}
|
||||
|
||||
func (wf *WakuFilterLightNode) IsSubscriptionAlive(ctx context.Context, subscription *SubscriptionDetails) error {
|
||||
wf.RLock()
|
||||
defer wf.RUnlock()
|
||||
|
||||
if !wf.started {
|
||||
return errNotStarted
|
||||
}
|
||||
|
||||
return wf.Ping(ctx, subscription.PeerID)
|
||||
}
|
||||
|
||||
func (wf *WakuFilterLightNode) Subscriptions() []*SubscriptionDetails {
|
||||
wf.RLock()
|
||||
defer wf.RUnlock()
|
||||
|
||||
if !wf.started {
|
||||
return nil
|
||||
}
|
||||
|
||||
wf.subscriptions.RLock()
|
||||
defer wf.subscriptions.RUnlock()
|
||||
|
||||
|
@ -324,6 +389,13 @@ func (wf *WakuFilterLightNode) cleanupSubscriptions(peerID peer.ID, contentFilte
|
|||
|
||||
// Unsubscribe is used to stop receiving messages from a peer that match a content filter
|
||||
func (wf *WakuFilterLightNode) Unsubscribe(ctx context.Context, contentFilter ContentFilter, opts ...FilterUnsubscribeOption) (<-chan WakuFilterPushResult, error) {
|
||||
wf.RLock()
|
||||
defer wf.RUnlock()
|
||||
|
||||
if !wf.started {
|
||||
return nil, errNotStarted
|
||||
}
|
||||
|
||||
if contentFilter.Topic == "" {
|
||||
return nil, errors.New("topic is required")
|
||||
}
|
||||
|
@ -341,17 +413,33 @@ func (wf *WakuFilterLightNode) Unsubscribe(ctx context.Context, contentFilter Co
|
|||
return nil, err
|
||||
}
|
||||
|
||||
localWg := sync.WaitGroup{}
|
||||
resultChan := make(chan WakuFilterPushResult, len(wf.subscriptions.items))
|
||||
var peersUnsubscribed []peer.ID
|
||||
for peerID := range wf.subscriptions.items {
|
||||
if params.selectedPeer != "" && peerID != params.selectedPeer {
|
||||
continue
|
||||
}
|
||||
peersUnsubscribed = append(peersUnsubscribed, peerID)
|
||||
localWg.Add(1)
|
||||
|
||||
subscriptions, ok := wf.subscriptions.items[peerID]
|
||||
if !ok || subscriptions == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
wf.cleanupSubscriptions(peerID, contentFilter)
|
||||
if len(subscriptions.subscriptionsPerTopic) == 0 {
|
||||
delete(wf.subscriptions.items, peerID)
|
||||
}
|
||||
|
||||
if params.wg != nil {
|
||||
params.wg.Add(1)
|
||||
}
|
||||
|
||||
go func(peerID peer.ID) {
|
||||
defer localWg.Done()
|
||||
defer func() {
|
||||
if params.wg != nil {
|
||||
params.wg.Done()
|
||||
}
|
||||
}()
|
||||
|
||||
err := wf.request(
|
||||
ctx,
|
||||
&FilterSubscribeParameters{selectedPeer: peerID, requestID: params.requestID},
|
||||
|
@ -367,27 +455,33 @@ func (wf *WakuFilterLightNode) Unsubscribe(ctx context.Context, contentFilter Co
|
|||
}
|
||||
}
|
||||
|
||||
wf.cleanupSubscriptions(peerID, contentFilter)
|
||||
|
||||
resultChan <- WakuFilterPushResult{
|
||||
Err: err,
|
||||
PeerID: peerID,
|
||||
if params.wg != nil {
|
||||
resultChan <- WakuFilterPushResult{
|
||||
Err: err,
|
||||
PeerID: peerID,
|
||||
}
|
||||
}
|
||||
}(peerID)
|
||||
}
|
||||
|
||||
localWg.Wait()
|
||||
close(resultChan)
|
||||
for _, peerID := range peersUnsubscribed {
|
||||
if wf.subscriptions != nil && wf.subscriptions.items != nil && wf.subscriptions.items[peerID] != nil && len(wf.subscriptions.items[peerID].subscriptionsPerTopic) == 0 {
|
||||
delete(wf.subscriptions.items, peerID)
|
||||
}
|
||||
if params.wg != nil {
|
||||
params.wg.Wait()
|
||||
}
|
||||
|
||||
close(resultChan)
|
||||
|
||||
return resultChan, nil
|
||||
}
|
||||
|
||||
// Unsubscribe is used to stop receiving messages from a peer that match a content filter
|
||||
func (wf *WakuFilterLightNode) UnsubscribeWithSubscription(ctx context.Context, sub *SubscriptionDetails, opts ...FilterUnsubscribeOption) (<-chan WakuFilterPushResult, error) {
|
||||
wf.RLock()
|
||||
defer wf.RUnlock()
|
||||
|
||||
if !wf.started {
|
||||
return nil, errNotStarted
|
||||
}
|
||||
|
||||
var contentTopics []string
|
||||
for k := range sub.ContentTopics {
|
||||
contentTopics = append(contentTopics, k)
|
||||
|
@ -398,8 +492,7 @@ func (wf *WakuFilterLightNode) UnsubscribeWithSubscription(ctx context.Context,
|
|||
return wf.Unsubscribe(ctx, ContentFilter{Topic: sub.PubsubTopic, ContentTopics: contentTopics}, opts...)
|
||||
}
|
||||
|
||||
// UnsubscribeAll is used to stop receiving messages from peer(s). It does not close subscriptions
|
||||
func (wf *WakuFilterLightNode) UnsubscribeAll(ctx context.Context, opts ...FilterUnsubscribeOption) (<-chan WakuFilterPushResult, error) {
|
||||
func (wf *WakuFilterLightNode) unsubscribeAll(ctx context.Context, opts ...FilterUnsubscribeOption) (<-chan WakuFilterPushResult, error) {
|
||||
params, err := wf.getUnsubscribeParameters(opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -408,19 +501,26 @@ func (wf *WakuFilterLightNode) UnsubscribeAll(ctx context.Context, opts ...Filte
|
|||
wf.subscriptions.Lock()
|
||||
defer wf.subscriptions.Unlock()
|
||||
|
||||
localWg := sync.WaitGroup{}
|
||||
resultChan := make(chan WakuFilterPushResult, len(wf.subscriptions.items))
|
||||
var peersUnsubscribed []peer.ID
|
||||
|
||||
for peerID := range wf.subscriptions.items {
|
||||
if params.selectedPeer != "" && peerID != params.selectedPeer {
|
||||
continue
|
||||
}
|
||||
peersUnsubscribed = append(peersUnsubscribed, peerID)
|
||||
|
||||
localWg.Add(1)
|
||||
delete(wf.subscriptions.items, peerID)
|
||||
|
||||
if params.wg != nil {
|
||||
params.wg.Add(1)
|
||||
}
|
||||
|
||||
go func(peerID peer.ID) {
|
||||
defer localWg.Done()
|
||||
defer func() {
|
||||
if params.wg != nil {
|
||||
params.wg.Done()
|
||||
}
|
||||
}()
|
||||
|
||||
err := wf.request(
|
||||
ctx,
|
||||
&FilterSubscribeParameters{selectedPeer: peerID, requestID: params.requestID},
|
||||
|
@ -429,17 +529,32 @@ func (wf *WakuFilterLightNode) UnsubscribeAll(ctx context.Context, opts ...Filte
|
|||
if err != nil {
|
||||
wf.log.Error("could not unsubscribe from peer", logging.HostID("peerID", peerID), zap.Error(err))
|
||||
}
|
||||
resultChan <- WakuFilterPushResult{
|
||||
Err: err,
|
||||
PeerID: peerID,
|
||||
if params.wg != nil {
|
||||
resultChan <- WakuFilterPushResult{
|
||||
Err: err,
|
||||
PeerID: peerID,
|
||||
}
|
||||
}
|
||||
}(peerID)
|
||||
}
|
||||
|
||||
localWg.Wait()
|
||||
close(resultChan)
|
||||
for _, peerID := range peersUnsubscribed {
|
||||
delete(wf.subscriptions.items, peerID)
|
||||
if params.wg != nil {
|
||||
params.wg.Wait()
|
||||
}
|
||||
|
||||
close(resultChan)
|
||||
|
||||
return resultChan, nil
|
||||
}
|
||||
|
||||
// UnsubscribeAll is used to stop receiving messages from peer(s). It does not close subscriptions
|
||||
func (wf *WakuFilterLightNode) UnsubscribeAll(ctx context.Context, opts ...FilterUnsubscribeOption) (<-chan WakuFilterPushResult, error) {
|
||||
wf.RLock()
|
||||
defer wf.RUnlock()
|
||||
|
||||
if !wf.started {
|
||||
return nil, errNotStarted
|
||||
}
|
||||
|
||||
return wf.unsubscribeAll(ctx, opts...)
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package filter
|
|||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
|
@ -26,6 +27,7 @@ type (
|
|||
selectedPeer peer.ID
|
||||
requestID []byte
|
||||
log *zap.Logger
|
||||
wg *sync.WaitGroup
|
||||
}
|
||||
|
||||
FilterParameters struct {
|
||||
|
@ -135,9 +137,26 @@ func AutomaticRequestId() FilterUnsubscribeOption {
|
|||
}
|
||||
}
|
||||
|
||||
// WithWaitGroup allos specigying a waitgroup to wait until all
|
||||
// unsubscribe requests are complete before the function is complete
|
||||
func WithWaitGroup(wg *sync.WaitGroup) FilterUnsubscribeOption {
|
||||
return func(params *FilterUnsubscribeParameters) {
|
||||
params.wg = wg
|
||||
}
|
||||
}
|
||||
|
||||
// DontWait is used to fire and forget an unsubscription, and don't
|
||||
// care about the results of it
|
||||
func DontWait() FilterUnsubscribeOption {
|
||||
return func(params *FilterUnsubscribeParameters) {
|
||||
params.wg = nil
|
||||
}
|
||||
}
|
||||
|
||||
func DefaultUnsubscribeOptions() []FilterUnsubscribeOption {
|
||||
return []FilterUnsubscribeOption{
|
||||
AutomaticRequestId(),
|
||||
WithWaitGroup(&sync.WaitGroup{}),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -267,7 +267,12 @@ func (wf *WakuFilterFullNode) filterListener(ctx context.Context) {
|
|||
}
|
||||
|
||||
func (wf *WakuFilterFullNode) pushMessage(ctx context.Context, peerID peer.ID, env *protocol.Envelope) error {
|
||||
logger := wf.log.With(logging.HostID("peer", peerID))
|
||||
logger := wf.log.With(
|
||||
logging.HostID("peer", peerID),
|
||||
logging.HexBytes("envelopeHash", env.Hash()),
|
||||
zap.String("pubsubTopic", env.PubsubTopic()),
|
||||
zap.String("contentTopic", env.Message().ContentTopic),
|
||||
)
|
||||
|
||||
messagePush := &pb.MessagePushV2{
|
||||
PubsubTopic: env.PubsubTopic(),
|
||||
|
@ -298,12 +303,15 @@ func (wf *WakuFilterFullNode) pushMessage(ctx context.Context, peerID peer.ID, e
|
|||
} else {
|
||||
wf.metrics.RecordError(writeResponseFailure)
|
||||
}
|
||||
logger.Error("pushing messages to peer", logging.HexBytes("envelopeHash", env.Hash()), zap.String("pubsubTopic", env.PubsubTopic()), zap.String("contentTopic", env.Message().ContentTopic), zap.Error(err))
|
||||
logger.Error("pushing messages to peer", zap.Error(err))
|
||||
wf.subscriptions.FlagAsFailure(peerID)
|
||||
return nil
|
||||
}
|
||||
|
||||
wf.subscriptions.FlagAsSuccess(peerID)
|
||||
|
||||
logger.Info("message pushed succesfully") // TODO: remove or change to debug once dogfooding of filter is complete
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
2
vendor/github.com/waku-org/go-waku/waku/v2/protocol/filter/subscriptions_map.go
generated
vendored
2
vendor/github.com/waku-org/go-waku/waku/v2/protocol/filter/subscriptions_map.go
generated
vendored
|
@ -225,7 +225,7 @@ func iterateSubscriptionSet(logger *zap.Logger, subscriptions SubscriptionSet, e
|
|||
defer subscription.RUnlock()
|
||||
|
||||
_, ok := subscription.ContentTopics[envelope.Message().ContentTopic]
|
||||
if !ok && len(subscription.ContentTopics) != 0 {
|
||||
if !ok { // only send the msg to subscriptions that have matching contentTopic
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -94,7 +94,7 @@ type StaticShardingPubsubTopic struct {
|
|||
}
|
||||
|
||||
// NewStaticShardingPubsubTopic creates a new pubSub topic
|
||||
func NewStaticShardingPubsubTopic(cluster uint16, shard uint16) NamespacedPubsubTopic {
|
||||
func NewStaticShardingPubsubTopic(cluster uint16, shard uint16) StaticShardingPubsubTopic {
|
||||
return StaticShardingPubsubTopic{
|
||||
kind: StaticSharding,
|
||||
cluster: cluster,
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
### Requirements:
|
||||
- Node.js
|
||||
- Go
|
||||
- jq
|
||||
- [solcjs 0.8.15](https://github.com/ethereum/solc-js)
|
||||
- [abigen](https://github.com/ethereum/go-ethereum/tree/master/cmd/abigen)
|
||||
|
||||
|
|
File diff suppressed because one or more lines are too long
24
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/contracts/compile_contracts.sh
generated
vendored
Normal file
24
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/contracts/compile_contracts.sh
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
#!/bin/sh
|
||||
|
||||
# Generate solc_output.json
|
||||
cat import.json | solcjs --bin --standard-json --base-path . > solc_output.json
|
||||
tail -n +2 solc_output.json > solc_output.tmp # Removes ">>> Cannot retry compilation with SMT because there are no SMT solvers available."
|
||||
mv solc_output.tmp solc_output.json
|
||||
|
||||
# Generate abi and binary files for each smart contract
|
||||
jq '.contracts."WakuRln.sol".WakuRln.abi' -r -c solc_output.json > WakuRln.abi
|
||||
jq '.contracts."WakuRln.sol".WakuRln.evm.bytecode.object' -r -c solc_output.json > WakuRln.bin
|
||||
jq '.contracts."WakuRlnRegistry.sol".WakuRlnRegistry.abi' -r -c solc_output.json > WakuRlnRegistry.abi
|
||||
jq '.contracts."WakuRlnRegistry.sol".WakuRlnRegistry.evm.bytecode.object' -r -c solc_output.json > WakuRlnRegistry.bin
|
||||
jq '.contracts."rln-contract/PoseidonHasher.sol".PoseidonHasher.abi' -r -c solc_output.json > PoseidonHasher.abi
|
||||
jq '.contracts."rln-contract/PoseidonHasher.sol".PoseidonHasher.evm.bytecode.object' -r -c solc_output.json > PoseidonHasher.bin
|
||||
|
||||
# Generate golang types for each contract
|
||||
abigen --abi ./WakuRln.abi --pkg contracts --type RLN --out ./rln.go --bin ./WakuRln.bin
|
||||
abigen --abi ./WakuRlnRegistry.abi --pkg contracts --type RLNRegistry --out ./registry.go --bin ./WakuRlnRegistry.bin
|
||||
abigen --abi ./PoseidonHasher.abi --pkg contracts --type PoseidonHasher --out ./poseidon.go --bin ./PoseidonHasher.bin
|
||||
|
||||
# Cleanup
|
||||
rm *.bin
|
||||
rm *.abi
|
||||
rm solc_output.json
|
|
@ -1,5 +1,3 @@
|
|||
package contracts
|
||||
|
||||
//go:generate solcjs --abi --bin ./rln-contract/contracts/Rln.sol -o ./
|
||||
//go:generate abigen --abi ./rln-contract_contracts_Rln_sol_RLN.abi --pkg contracts --type RLN --out ./RLN.go --bin ./rln-contract_contracts_Rln_sol_RLN.bin
|
||||
//go:generate abigen --abi ./rln-contract_contracts_PoseidonHasher_sol_PoseidonHasher.abi --pkg contracts --type PoseidonHasher --out ./PoseidonHasher.go --bin ./rln-contract_contracts_PoseidonHasher_sol_PoseidonHasher.bin
|
||||
//go:generate ./compile_contracts.sh
|
||||
|
|
31
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/contracts/import.json
generated
vendored
Normal file
31
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/contracts/import.json
generated
vendored
Normal file
|
@ -0,0 +1,31 @@
|
|||
{
|
||||
"language": "Solidity",
|
||||
"sources": {
|
||||
"WakuRln.sol": {
|
||||
"urls": ["./waku-rln-contract/contracts/WakuRln.sol"]
|
||||
},
|
||||
"WakuRlnRegistry.sol": {
|
||||
"urls": ["./waku-rln-contract/contracts/WakuRlnRegistry.sol"]
|
||||
},
|
||||
"rln-contract/PoseidonHasher.sol": {
|
||||
"urls": [
|
||||
"./waku-rln-contract/lib/rln-contract/contracts/PoseidonHasher.sol"
|
||||
]
|
||||
},
|
||||
"rln-contract/RlnBase.sol": {
|
||||
"urls": ["./waku-rln-contract/lib/rln-contract/contracts/RlnBase.sol"]
|
||||
},
|
||||
"rln-contract/IVerifier.sol": {
|
||||
"urls": ["./waku-rln-contract/lib/rln-contract/contracts/IVerifier.sol"]
|
||||
},
|
||||
"openzeppelin-contracts/contracts/access/Ownable.sol": {
|
||||
"urls": ["./waku-rln-contract/lib/openzeppelin-contracts/contracts/access/Ownable.sol"]
|
||||
},
|
||||
"openzeppelin-contracts/contracts/utils/Context.sol": {
|
||||
"urls": ["./waku-rln-contract/lib/openzeppelin-contracts/contracts/utils/Context.sol"]
|
||||
}
|
||||
},
|
||||
"settings": {
|
||||
"outputSelection": { "*": { "*": ["abi", "evm.bytecode.object"] } }
|
||||
}
|
||||
}
|
File diff suppressed because one or more lines are too long
814
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/contracts/registry.go
generated
vendored
Normal file
814
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/contracts/registry.go
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
1202
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/contracts/rln.go
generated
vendored
Normal file
1202
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/contracts/rln.go
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
158
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/dynamic/dynamic.go
generated
vendored
158
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/dynamic/dynamic.go
generated
vendored
|
@ -3,7 +3,6 @@ package dynamic
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"sync"
|
||||
"time"
|
||||
|
@ -11,12 +10,12 @@ import (
|
|||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/waku-org/go-waku/logging"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/rln/contracts"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/rln/keystore"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/rln/web3"
|
||||
"github.com/waku-org/go-zerokit-rln/rln"
|
||||
om "github.com/wk8/go-ordered-map"
|
||||
"go.uber.org/zap"
|
||||
|
@ -25,7 +24,7 @@ import (
|
|||
var RLNAppInfo = keystore.AppInfo{
|
||||
Application: "waku-rln-relay",
|
||||
AppIdentifier: "01234567890abcdef",
|
||||
Version: "0.1",
|
||||
Version: "0.2",
|
||||
}
|
||||
|
||||
type DynamicGroupManager struct {
|
||||
|
@ -37,24 +36,15 @@ type DynamicGroupManager struct {
|
|||
wg sync.WaitGroup
|
||||
|
||||
identityCredential *rln.IdentityCredential
|
||||
membershipIndex *rln.MembershipIndex
|
||||
|
||||
membershipContractAddress common.Address
|
||||
membershipGroupIndex uint
|
||||
ethClientAddress string
|
||||
ethClient *ethclient.Client
|
||||
membershipIndex rln.MembershipIndex
|
||||
|
||||
web3Config *web3.Config
|
||||
lastBlockProcessed uint64
|
||||
|
||||
eventHandler RegistrationEventHandler
|
||||
|
||||
chainId *big.Int
|
||||
rlnContract *contracts.RLN
|
||||
|
||||
saveKeystore bool
|
||||
keystorePath string
|
||||
appKeystore *keystore.AppKeystore
|
||||
keystorePassword string
|
||||
keystoreIndex uint
|
||||
|
||||
rootTracker *group_manager.MerkleRootTracker
|
||||
}
|
||||
|
@ -103,14 +93,15 @@ func handler(gm *DynamicGroupManager, events []*contracts.RLNMemberRegistered) e
|
|||
gm.lastBlockProcessed = lastBlockProcessed
|
||||
err = gm.SetMetadata(RLNMetadata{
|
||||
LastProcessedBlock: gm.lastBlockProcessed,
|
||||
ChainID: gm.chainId,
|
||||
ContractAddress: gm.membershipContractAddress,
|
||||
ChainID: gm.web3Config.ChainID,
|
||||
ContractAddress: gm.web3Config.RegistryContract.Address,
|
||||
ValidRootsPerBlock: gm.rootTracker.ValidRootsPerBlock(),
|
||||
})
|
||||
if err != nil {
|
||||
// this is not a fatal error, hence we don't raise an exception
|
||||
gm.log.Warn("failed to persist rln metadata", zap.Error(err))
|
||||
} else {
|
||||
gm.log.Debug("rln metadata persisted", zap.Uint64("lastProcessedBlock", gm.lastBlockProcessed), zap.Uint64("chainID", gm.chainId.Uint64()), logging.HexBytes("contractAddress", gm.membershipContractAddress[:]))
|
||||
gm.log.Debug("rln metadata persisted", zap.Uint64("lastProcessedBlock", gm.lastBlockProcessed), zap.Uint64("chainID", gm.web3Config.ChainID.Uint64()), logging.HexBytes("contractAddress", gm.web3Config.RegistryContract.Address.Bytes()))
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -121,44 +112,31 @@ type RegistrationHandler = func(tx *types.Transaction)
|
|||
func NewDynamicGroupManager(
|
||||
ethClientAddr string,
|
||||
memContractAddr common.Address,
|
||||
membershipGroupIndex uint,
|
||||
keystorePath string,
|
||||
membershipIndex uint,
|
||||
appKeystore *keystore.AppKeystore,
|
||||
keystorePassword string,
|
||||
keystoreIndex uint,
|
||||
saveKeystore bool,
|
||||
reg prometheus.Registerer,
|
||||
log *zap.Logger,
|
||||
) (*DynamicGroupManager, error) {
|
||||
log = log.Named("rln-dynamic")
|
||||
|
||||
path := keystorePath
|
||||
if path == "" {
|
||||
log.Warn("keystore: no credentials path set, using default path", zap.String("path", keystore.RLN_CREDENTIALS_FILENAME))
|
||||
path = keystore.RLN_CREDENTIALS_FILENAME
|
||||
}
|
||||
|
||||
password := keystorePassword
|
||||
if password == "" {
|
||||
log.Warn("keystore: no credentials password set, using default password", zap.String("password", keystore.RLN_CREDENTIALS_PASSWORD))
|
||||
password = keystore.RLN_CREDENTIALS_PASSWORD
|
||||
}
|
||||
|
||||
return &DynamicGroupManager{
|
||||
membershipGroupIndex: membershipGroupIndex,
|
||||
membershipContractAddress: memContractAddr,
|
||||
ethClientAddress: ethClientAddr,
|
||||
eventHandler: handler,
|
||||
saveKeystore: saveKeystore,
|
||||
keystorePath: path,
|
||||
keystorePassword: password,
|
||||
keystoreIndex: keystoreIndex,
|
||||
log: log,
|
||||
metrics: newMetrics(reg),
|
||||
membershipIndex: membershipIndex,
|
||||
web3Config: web3.NewConfig(ethClientAddr, memContractAddr),
|
||||
eventHandler: handler,
|
||||
appKeystore: appKeystore,
|
||||
keystorePassword: keystorePassword,
|
||||
log: log,
|
||||
metrics: newMetrics(reg),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (gm *DynamicGroupManager) getMembershipFee(ctx context.Context) (*big.Int, error) {
|
||||
return gm.rlnContract.MEMBERSHIPDEPOSIT(&bind.CallOpts{Context: ctx})
|
||||
return gm.web3Config.RLNContract.MEMBERSHIPDEPOSIT(&bind.CallOpts{Context: ctx})
|
||||
}
|
||||
|
||||
func (gm *DynamicGroupManager) memberExists(ctx context.Context, idCommitment rln.IDCommitment) (bool, error) {
|
||||
return gm.web3Config.RLNContract.MemberExists(&bind.CallOpts{Context: ctx}, rln.Bytes32ToBigInt(idCommitment))
|
||||
}
|
||||
|
||||
func (gm *DynamicGroupManager) Start(ctx context.Context, rlnInstance *rln.RLN, rootTracker *group_manager.MerkleRootTracker) error {
|
||||
|
@ -171,64 +149,23 @@ func (gm *DynamicGroupManager) Start(ctx context.Context, rlnInstance *rln.RLN,
|
|||
|
||||
gm.log.Info("mounting rln-relay in on-chain/dynamic mode")
|
||||
|
||||
backend, err := ethclient.Dial(gm.ethClientAddress)
|
||||
err := gm.web3Config.Build(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
gm.ethClient = backend
|
||||
|
||||
gm.rln = rlnInstance
|
||||
gm.rootTracker = rootTracker
|
||||
|
||||
gm.chainId, err = backend.ChainID(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
gm.rlnContract, err = contracts.NewRLN(gm.membershipContractAddress, backend)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check if the contract exists by calling a static function
|
||||
_, err = gm.getMembershipFee(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if gm.identityCredential == nil && gm.keystorePassword != "" && gm.keystorePath != "" {
|
||||
start := time.Now()
|
||||
credentials, err := keystore.GetMembershipCredentials(gm.log,
|
||||
gm.keystorePath,
|
||||
gm.keystorePassword,
|
||||
RLNAppInfo,
|
||||
nil,
|
||||
[]keystore.MembershipContract{{
|
||||
ChainId: fmt.Sprintf("0x%X", gm.chainId),
|
||||
Address: gm.membershipContractAddress.Hex(),
|
||||
}})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
gm.metrics.RecordMembershipCredentialsImportDuration(time.Since(start))
|
||||
|
||||
if len(credentials) != 0 {
|
||||
if int(gm.keystoreIndex) <= len(credentials)-1 {
|
||||
credential := credentials[gm.keystoreIndex]
|
||||
gm.identityCredential = credential.IdentityCredential
|
||||
if int(gm.membershipGroupIndex) <= len(credential.MembershipGroups)-1 {
|
||||
gm.membershipIndex = &credential.MembershipGroups[gm.membershipGroupIndex].TreeIndex
|
||||
} else {
|
||||
return errors.New("invalid membership group index")
|
||||
}
|
||||
} else {
|
||||
return errors.New("invalid keystore index")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if gm.identityCredential == nil || gm.membershipIndex == nil {
|
||||
return errors.New("no credentials available")
|
||||
err = gm.loadCredential(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = gm.HandleGroupUpdates(ctx, gm.eventHandler); err != nil {
|
||||
|
@ -238,6 +175,36 @@ func (gm *DynamicGroupManager) Start(ctx context.Context, rlnInstance *rln.RLN,
|
|||
return nil
|
||||
}
|
||||
|
||||
func (gm *DynamicGroupManager) loadCredential(ctx context.Context) error {
|
||||
start := time.Now()
|
||||
|
||||
credentials, err := gm.appKeystore.GetMembershipCredentials(
|
||||
gm.keystorePassword,
|
||||
gm.membershipIndex,
|
||||
keystore.NewMembershipContractInfo(gm.web3Config.ChainID, gm.web3Config.RegistryContract.Address))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
gm.metrics.RecordMembershipCredentialsImportDuration(time.Since(start))
|
||||
|
||||
if credentials == nil {
|
||||
return errors.New("no credentials available")
|
||||
}
|
||||
|
||||
exists, err := gm.memberExists(ctx, credentials.IdentityCredential.IDCommitment)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return errors.New("the provided commitment does not have a membership")
|
||||
}
|
||||
|
||||
gm.identityCredential = credentials.IdentityCredential
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gm *DynamicGroupManager) InsertMembers(toInsert *om.OrderedMap) error {
|
||||
for pair := toInsert.Oldest(); pair != nil; pair = pair.Next() {
|
||||
events := pair.Value.([]*contracts.RLNMemberRegistered) // TODO: should these be sortered by index? we assume all members arrive in order
|
||||
|
@ -294,12 +261,8 @@ func (gm *DynamicGroupManager) IdentityCredentials() (rln.IdentityCredential, er
|
|||
return *gm.identityCredential, nil
|
||||
}
|
||||
|
||||
func (gm *DynamicGroupManager) MembershipIndex() (rln.MembershipIndex, error) {
|
||||
if gm.membershipIndex == nil {
|
||||
return 0, errors.New("membership index has not been setup")
|
||||
}
|
||||
|
||||
return *gm.membershipIndex, nil
|
||||
func (gm *DynamicGroupManager) MembershipIndex() rln.MembershipIndex {
|
||||
return gm.membershipIndex
|
||||
}
|
||||
|
||||
// Stop stops all go-routines, eth client and closes the rln database
|
||||
|
@ -314,7 +277,8 @@ func (gm *DynamicGroupManager) Stop() error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
gm.ethClient.Close()
|
||||
|
||||
gm.web3Config.ETHClient.Close()
|
||||
|
||||
gm.wg.Wait()
|
||||
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager"
|
||||
)
|
||||
|
||||
// RLNMetadata persists attributes in the RLN database
|
||||
|
@ -13,6 +14,7 @@ type RLNMetadata struct {
|
|||
LastProcessedBlock uint64
|
||||
ChainID *big.Int
|
||||
ContractAddress common.Address
|
||||
ValidRootsPerBlock []group_manager.RootsPerBlock
|
||||
}
|
||||
|
||||
// Serialize converts a RLNMetadata into a binary format expected by zerokit's RLN
|
||||
|
@ -26,24 +28,49 @@ func (r RLNMetadata) Serialize() []byte {
|
|||
result = binary.LittleEndian.AppendUint64(result, r.LastProcessedBlock)
|
||||
result = binary.LittleEndian.AppendUint64(result, chainID.Uint64())
|
||||
result = append(result, r.ContractAddress.Bytes()...)
|
||||
result = binary.LittleEndian.AppendUint64(result, uint64(len(r.ValidRootsPerBlock)))
|
||||
for _, v := range r.ValidRootsPerBlock {
|
||||
result = append(result, v.Root[:]...)
|
||||
result = binary.LittleEndian.AppendUint64(result, v.BlockNumber)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
const lastProcessedBlockOffset = 0
|
||||
const chainIDOffset = lastProcessedBlockOffset + 8
|
||||
const contractAddressOffset = chainIDOffset + 8
|
||||
const metadataByteLen = 8 + 8 + 20 // 2 uint64 fields and a 20bytes address
|
||||
const validRootsLenOffset = contractAddressOffset + 20
|
||||
const validRootsValOffset = validRootsLenOffset + 8
|
||||
const metadataByteLen = 8 + 8 + 20 + 8 // uint64 + uint64 + ethAddress + uint64
|
||||
|
||||
// DeserializeMetadata converts a byte slice into a RLNMetadata instance
|
||||
func DeserializeMetadata(b []byte) (RLNMetadata, error) {
|
||||
if len(b) != metadataByteLen {
|
||||
if len(b) < metadataByteLen {
|
||||
return RLNMetadata{}, errors.New("wrong size")
|
||||
}
|
||||
|
||||
validRootLen := binary.LittleEndian.Uint64(b[validRootsLenOffset:validRootsValOffset])
|
||||
if len(b) < int(metadataByteLen+validRootLen*(32+8)) { // len of a merkle node and len of a uint64 for the block number
|
||||
return RLNMetadata{}, errors.New("wrong size")
|
||||
}
|
||||
|
||||
validRoots := make([]group_manager.RootsPerBlock, 0, validRootLen)
|
||||
for i := 0; i < int(validRootLen); i++ {
|
||||
rootOffset := validRootsValOffset + (i * (32 + 8))
|
||||
blockOffset := rootOffset + 32
|
||||
root := group_manager.RootsPerBlock{
|
||||
BlockNumber: binary.LittleEndian.Uint64(b[blockOffset : blockOffset+8]),
|
||||
}
|
||||
copy(root.Root[:], b[rootOffset:blockOffset])
|
||||
validRoots = append(validRoots, root)
|
||||
}
|
||||
|
||||
return RLNMetadata{
|
||||
LastProcessedBlock: binary.LittleEndian.Uint64(b[lastProcessedBlockOffset:chainIDOffset]),
|
||||
ChainID: new(big.Int).SetUint64(binary.LittleEndian.Uint64(b[chainIDOffset:contractAddressOffset])),
|
||||
ContractAddress: common.BytesToAddress(b[contractAddressOffset:]),
|
||||
ContractAddress: common.BytesToAddress(b[contractAddressOffset:validRootsLenOffset]),
|
||||
ValidRootsPerBlock: validRoots,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
29
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/dynamic/web3.go
generated
vendored
29
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/dynamic/web3.go
generated
vendored
|
@ -21,16 +21,16 @@ type RegistrationEventHandler = func(*DynamicGroupManager, []*contracts.RLNMembe
|
|||
// It connects to the eth client, subscribes to the `MemberRegistered` event emitted from the `MembershipContract`
|
||||
// and collects all the events, for every received event, it calls the `handler`
|
||||
func (gm *DynamicGroupManager) HandleGroupUpdates(ctx context.Context, handler RegistrationEventHandler) error {
|
||||
fromBlock := uint64(0)
|
||||
fromBlock := gm.web3Config.RLNContract.DeployedBlockNumber
|
||||
metadata, err := gm.GetMetadata()
|
||||
if err != nil {
|
||||
gm.log.Warn("could not load last processed block from metadata. Starting onchain sync from scratch", zap.Error(err))
|
||||
gm.log.Warn("could not load last processed block from metadata. Starting onchain sync from deployment block", zap.Error(err), zap.Uint64("deploymentBlock", gm.web3Config.RLNContract.DeployedBlockNumber))
|
||||
} else {
|
||||
if gm.chainId.Uint64() != metadata.ChainID.Uint64() {
|
||||
if gm.web3Config.ChainID.Cmp(metadata.ChainID) != 0 {
|
||||
return errors.New("persisted data: chain id mismatch")
|
||||
}
|
||||
|
||||
if !bytes.Equal(gm.membershipContractAddress[:], metadata.ContractAddress[:]) {
|
||||
if !bytes.Equal(gm.web3Config.RegistryContract.Address.Bytes(), metadata.ContractAddress.Bytes()) {
|
||||
return errors.New("persisted data: contract address mismatch")
|
||||
}
|
||||
|
||||
|
@ -38,7 +38,9 @@ func (gm *DynamicGroupManager) HandleGroupUpdates(ctx context.Context, handler R
|
|||
gm.log.Info("resuming onchain sync", zap.Uint64("fromBlock", fromBlock))
|
||||
}
|
||||
|
||||
err = gm.loadOldEvents(ctx, gm.rlnContract, fromBlock, handler)
|
||||
gm.rootTracker.SetValidRootsPerBlock(metadata.ValidRootsPerBlock)
|
||||
|
||||
err = gm.loadOldEvents(ctx, fromBlock, handler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -46,11 +48,11 @@ func (gm *DynamicGroupManager) HandleGroupUpdates(ctx context.Context, handler R
|
|||
errCh := make(chan error)
|
||||
|
||||
gm.wg.Add(1)
|
||||
go gm.watchNewEvents(ctx, gm.rlnContract, handler, gm.log, errCh)
|
||||
go gm.watchNewEvents(ctx, handler, gm.log, errCh)
|
||||
return <-errCh
|
||||
}
|
||||
|
||||
func (gm *DynamicGroupManager) loadOldEvents(ctx context.Context, rlnContract *contracts.RLN, fromBlock uint64, handler RegistrationEventHandler) error {
|
||||
func (gm *DynamicGroupManager) loadOldEvents(ctx context.Context, fromBlock uint64, handler RegistrationEventHandler) error {
|
||||
events, err := gm.getEvents(ctx, fromBlock, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -58,14 +60,14 @@ func (gm *DynamicGroupManager) loadOldEvents(ctx context.Context, rlnContract *c
|
|||
return handler(gm, events)
|
||||
}
|
||||
|
||||
func (gm *DynamicGroupManager) watchNewEvents(ctx context.Context, rlnContract *contracts.RLN, handler RegistrationEventHandler, log *zap.Logger, errCh chan<- error) {
|
||||
func (gm *DynamicGroupManager) watchNewEvents(ctx context.Context, handler RegistrationEventHandler, log *zap.Logger, errCh chan<- error) {
|
||||
defer gm.wg.Done()
|
||||
|
||||
// Watch for new events
|
||||
firstErr := true
|
||||
headerCh := make(chan *types.Header)
|
||||
subs := event.Resubscribe(2*time.Second, func(ctx context.Context) (event.Subscription, error) {
|
||||
s, err := gm.ethClient.SubscribeNewHead(ctx, headerCh)
|
||||
s, err := gm.web3Config.ETHClient.SubscribeNewHead(ctx, headerCh)
|
||||
if err != nil {
|
||||
if err == rpc.ErrNotificationsUnsupported {
|
||||
err = errors.New("notifications not supported. The node must support websockets")
|
||||
|
@ -123,7 +125,7 @@ func (gm *DynamicGroupManager) getEvents(ctx context.Context, from uint64, to *u
|
|||
|
||||
toBlock := to
|
||||
if to == nil {
|
||||
block, err := gm.ethClient.BlockByNumber(ctx, nil)
|
||||
block, err := gm.web3Config.ETHClient.BlockByNumber(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -148,6 +150,8 @@ func (gm *DynamicGroupManager) getEvents(ctx context.Context, from uint64, to *u
|
|||
end = *toBlock
|
||||
}
|
||||
|
||||
gm.log.Info("loading events...", zap.Uint64("fromBlock", start), zap.Uint64("toBlock", end))
|
||||
|
||||
evts, err := gm.fetchEvents(ctx, start, &end)
|
||||
if err != nil {
|
||||
if tooMuchDataRequestedError(err) {
|
||||
|
@ -157,6 +161,9 @@ func (gm *DynamicGroupManager) getEvents(ctx context.Context, from uint64, to *u
|
|||
|
||||
// multiplicative decrease
|
||||
batchSize = batchSize / multiplicativeDecreaseDivisor
|
||||
|
||||
gm.log.Warn("too many logs requested!, retrying with a smaller chunk size", zap.Uint64("batchSize", batchSize))
|
||||
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
|
@ -179,7 +186,7 @@ func (gm *DynamicGroupManager) getEvents(ctx context.Context, from uint64, to *u
|
|||
}
|
||||
|
||||
func (gm *DynamicGroupManager) fetchEvents(ctx context.Context, from uint64, to *uint64) ([]*contracts.RLNMemberRegistered, error) {
|
||||
logIterator, err := gm.rlnContract.FilterMemberRegistered(&bind.FilterOpts{Start: from, End: to, Context: ctx})
|
||||
logIterator, err := gm.web3Config.RLNContract.FilterMemberRegistered(&bind.FilterOpts{Start: from, End: to, Context: ctx})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
46
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/root_tracker.go
generated
vendored
46
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/root_tracker.go
generated
vendored
|
@ -7,11 +7,14 @@ import (
|
|||
"github.com/waku-org/go-zerokit-rln/rln"
|
||||
)
|
||||
|
||||
// RootsPerBlock stores the merkle root generated at N block number
|
||||
type RootsPerBlock struct {
|
||||
root rln.MerkleNode
|
||||
blockNumber uint64
|
||||
Root rln.MerkleNode
|
||||
BlockNumber uint64
|
||||
}
|
||||
|
||||
// MerkleRootTracker keeps track of the latest N merkle roots considered
|
||||
// valid for RLN proofs.
|
||||
type MerkleRootTracker struct {
|
||||
sync.RWMutex
|
||||
|
||||
|
@ -23,6 +26,7 @@ type MerkleRootTracker struct {
|
|||
|
||||
const maxBufferSize = 20
|
||||
|
||||
// NewMerkleRootTracker creates an instance of MerkleRootTracker
|
||||
func NewMerkleRootTracker(acceptableRootWindowSize int, rlnInstance *rln.RLN) (*MerkleRootTracker, error) {
|
||||
result := &MerkleRootTracker{
|
||||
acceptableRootWindowSize: acceptableRootWindowSize,
|
||||
|
@ -37,13 +41,14 @@ func NewMerkleRootTracker(acceptableRootWindowSize int, rlnInstance *rln.RLN) (*
|
|||
return result, nil
|
||||
}
|
||||
|
||||
// Backfill is used to pop merkle roots when there is a chain fork
|
||||
func (m *MerkleRootTracker) Backfill(fromBlockNumber uint64) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
numBlocks := 0
|
||||
for i := len(m.validMerkleRoots) - 1; i >= 0; i-- {
|
||||
if m.validMerkleRoots[i].blockNumber >= fromBlockNumber {
|
||||
if m.validMerkleRoots[i].BlockNumber >= fromBlockNumber {
|
||||
numBlocks++
|
||||
}
|
||||
}
|
||||
|
@ -87,7 +92,7 @@ func (m *MerkleRootTracker) IndexOf(root [32]byte) int {
|
|||
defer m.RUnlock()
|
||||
|
||||
for i := range m.validMerkleRoots {
|
||||
if bytes.Equal(m.validMerkleRoots[i].root[:], root[:]) {
|
||||
if bytes.Equal(m.validMerkleRoots[i].Root[:], root[:]) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
|
@ -95,6 +100,8 @@ func (m *MerkleRootTracker) IndexOf(root [32]byte) int {
|
|||
return -1
|
||||
}
|
||||
|
||||
// UpdateLatestRoot should be called when a block containing a new
|
||||
// IDCommitment is received so we can keep track of the merkle root change
|
||||
func (m *MerkleRootTracker) UpdateLatestRoot(blockNumber uint64) (rln.MerkleNode, error) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
@ -111,8 +118,8 @@ func (m *MerkleRootTracker) UpdateLatestRoot(blockNumber uint64) (rln.MerkleNode
|
|||
|
||||
func (m *MerkleRootTracker) pushRoot(blockNumber uint64, root [32]byte) {
|
||||
m.validMerkleRoots = append(m.validMerkleRoots, RootsPerBlock{
|
||||
root: root,
|
||||
blockNumber: blockNumber,
|
||||
Root: root,
|
||||
BlockNumber: blockNumber,
|
||||
})
|
||||
|
||||
// Maintain valid merkle root window
|
||||
|
@ -125,29 +132,50 @@ func (m *MerkleRootTracker) pushRoot(blockNumber uint64, root [32]byte) {
|
|||
if len(m.merkleRootBuffer) > maxBufferSize {
|
||||
m.merkleRootBuffer = m.merkleRootBuffer[1:]
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Roots return the list of valid merkle roots
|
||||
func (m *MerkleRootTracker) Roots() []rln.MerkleNode {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
|
||||
result := make([]rln.MerkleNode, len(m.validMerkleRoots))
|
||||
for i := range m.validMerkleRoots {
|
||||
result[i] = m.validMerkleRoots[i].root
|
||||
result[i] = m.validMerkleRoots[i].Root
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// Buffer is used as a repository of older merkle roots that although
|
||||
// they were valid once, they have left the acceptable window of
|
||||
// merkle roots. We keep track of them in case a chain fork occurs
|
||||
// and we need to restore the valid merkle roots to a previous point
|
||||
// of time
|
||||
func (m *MerkleRootTracker) Buffer() []rln.MerkleNode {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
|
||||
result := make([]rln.MerkleNode, len(m.merkleRootBuffer))
|
||||
for i := range m.merkleRootBuffer {
|
||||
result[i] = m.merkleRootBuffer[i].root
|
||||
result[i] = m.merkleRootBuffer[i].Root
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// ValidRootsPerBlock returns the current valid merkle roots and block numbers
|
||||
func (m *MerkleRootTracker) ValidRootsPerBlock() []RootsPerBlock {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
|
||||
return m.validMerkleRoots
|
||||
}
|
||||
|
||||
// SetValidRootsPerBlock is used to overwrite the valid merkle roots
|
||||
func (m *MerkleRootTracker) SetValidRootsPerBlock(roots []RootsPerBlock) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
m.validMerkleRoots = roots
|
||||
}
|
||||
|
|
12
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/static/static.go
generated
vendored
12
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/static/static.go
generated
vendored
|
@ -14,7 +14,7 @@ type StaticGroupManager struct {
|
|||
log *zap.Logger
|
||||
|
||||
identityCredential *rln.IdentityCredential
|
||||
membershipIndex *rln.MembershipIndex
|
||||
membershipIndex rln.MembershipIndex
|
||||
|
||||
group []rln.IDCommitment
|
||||
rootTracker *group_manager.MerkleRootTracker
|
||||
|
@ -36,7 +36,7 @@ func NewStaticGroupManager(
|
|||
log: log.Named("rln-static"),
|
||||
group: group,
|
||||
identityCredential: &identityCredential,
|
||||
membershipIndex: &index,
|
||||
membershipIndex: index,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -85,12 +85,8 @@ func (gm *StaticGroupManager) IdentityCredentials() (rln.IdentityCredential, err
|
|||
return *gm.identityCredential, nil
|
||||
}
|
||||
|
||||
func (gm *StaticGroupManager) MembershipIndex() (rln.MembershipIndex, error) {
|
||||
if gm.membershipIndex == nil {
|
||||
return 0, errors.New("membership index has not been setup")
|
||||
}
|
||||
|
||||
return *gm.membershipIndex, nil
|
||||
func (gm *StaticGroupManager) MembershipIndex() rln.MembershipIndex {
|
||||
return gm.membershipIndex
|
||||
}
|
||||
|
||||
// Stop is a function created just to comply with the GroupManager interface (it does nothing)
|
||||
|
|
|
@ -2,93 +2,151 @@ package keystore
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/waku-org/go-waku/waku/v2/hash"
|
||||
"github.com/waku-org/go-zerokit-rln/rln"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const RLN_CREDENTIALS_FILENAME = "rlnKeystore.json"
|
||||
const RLN_CREDENTIALS_PASSWORD = "password"
|
||||
// DefaultCredentialsFilename is the suggested default filename for the rln credentials keystore
|
||||
const DefaultCredentialsFilename = "./rlnKeystore.json"
|
||||
|
||||
type MembershipContract struct {
|
||||
ChainId string `json:"chainId"`
|
||||
Address string `json:"address"`
|
||||
}
|
||||
// DefaultCredentialsPassword is the suggested default password for the rln credentials store
|
||||
const DefaultCredentialsPassword = "password"
|
||||
|
||||
type MembershipGroup struct {
|
||||
MembershipContract MembershipContract `json:"membershipContract"`
|
||||
TreeIndex rln.MembershipIndex `json:"treeIndex"`
|
||||
}
|
||||
// New creates a new instance of a rln credentials keystore
|
||||
func New(path string, appInfo AppInfo, logger *zap.Logger) (*AppKeystore, error) {
|
||||
logger = logger.Named("rln-keystore")
|
||||
|
||||
type MembershipCredentials struct {
|
||||
IdentityCredential *rln.IdentityCredential `json:"identityCredential"`
|
||||
MembershipGroups []MembershipGroup `json:"membershipGroups"`
|
||||
}
|
||||
|
||||
type AppInfo struct {
|
||||
Application string `json:"application"`
|
||||
AppIdentifier string `json:"appIdentifier"`
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
type AppKeystore struct {
|
||||
Application string `json:"application"`
|
||||
AppIdentifier string `json:"appIdentifier"`
|
||||
Credentials []AppKeystoreCredential `json:"credentials"`
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
type AppKeystoreCredential struct {
|
||||
Crypto keystore.CryptoJSON `json:"crypto"`
|
||||
}
|
||||
|
||||
const DefaultSeparator = "\n"
|
||||
|
||||
func (m MembershipCredentials) Equals(other MembershipCredentials) bool {
|
||||
if !rln.IdentityCredentialEquals(*m.IdentityCredential, *other.IdentityCredential) {
|
||||
return false
|
||||
if path == "" {
|
||||
logger.Warn("keystore: no credentials path set, using default path", zap.String("path", DefaultCredentialsFilename))
|
||||
path = DefaultCredentialsFilename
|
||||
}
|
||||
|
||||
for _, x := range m.MembershipGroups {
|
||||
found := false
|
||||
for _, y := range other.MembershipGroups {
|
||||
if x.Equals(y) {
|
||||
found = true
|
||||
break
|
||||
_, err := os.Stat(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
// If no keystore exists at path we create a new empty one with passed keystore parameters
|
||||
err = createAppKeystore(path, appInfo, defaultSeparator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return false
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
src, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, keystoreBytes := range bytes.Split(src, []byte(defaultSeparator)) {
|
||||
if len(keystoreBytes) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
keystore := new(AppKeystore)
|
||||
err := json.Unmarshal(keystoreBytes, keystore)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
keystore.logger = logger
|
||||
keystore.path = path
|
||||
|
||||
if keystore.Credentials == nil {
|
||||
keystore.Credentials = map[Key]appKeystoreCredential{}
|
||||
}
|
||||
|
||||
if keystore.AppIdentifier == appInfo.AppIdentifier && keystore.Application == appInfo.Application && keystore.Version == appInfo.Version {
|
||||
return keystore, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.New("no keystore found")
|
||||
}
|
||||
|
||||
func (m MembershipGroup) Equals(other MembershipGroup) bool {
|
||||
return m.MembershipContract.Equals(other.MembershipContract) && m.TreeIndex == other.TreeIndex
|
||||
func getKey(treeIndex rln.MembershipIndex, filterMembershipContract MembershipContractInfo) (Key, error) {
|
||||
keyStr := fmt.Sprintf("%s%s%d", filterMembershipContract.ChainID, filterMembershipContract.Address, treeIndex)
|
||||
hash := hash.SHA256([]byte(keyStr))
|
||||
return Key(strings.ToUpper(hex.EncodeToString(hash))), nil
|
||||
}
|
||||
|
||||
func (m MembershipContract) Equals(other MembershipContract) bool {
|
||||
return m.Address == other.Address && m.ChainId == other.ChainId
|
||||
// GetMembershipCredentials decrypts and retrieves membership credentials from the keystore applying filters
|
||||
func (k *AppKeystore) GetMembershipCredentials(keystorePassword string, treeIndex rln.MembershipIndex, filterMembershipContract MembershipContractInfo) (*MembershipCredentials, error) {
|
||||
key, err := getKey(treeIndex, filterMembershipContract)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
credential, ok := k.Credentials[key]
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
credentialsBytes, err := keystore.DecryptDataV3(credential.Crypto, keystorePassword)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
credentials := new(MembershipCredentials)
|
||||
err = json.Unmarshal(credentialsBytes, credentials)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return credentials, nil
|
||||
}
|
||||
|
||||
func CreateAppKeystore(path string, appInfo AppInfo, separator string) error {
|
||||
// AddMembershipCredentials inserts a membership credential to the keystore matching the application, appIdentifier and version filters.
|
||||
func (k *AppKeystore) AddMembershipCredentials(newCredential MembershipCredentials, password string) error {
|
||||
credentials, err := k.GetMembershipCredentials(password, newCredential.TreeIndex, newCredential.MembershipContractInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
key, err := getKey(newCredential.TreeIndex, newCredential.MembershipContractInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if credentials != nil {
|
||||
return errors.New("credential already present")
|
||||
}
|
||||
|
||||
b, err := json.Marshal(newCredential)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
encryptedCredentials, err := keystore.EncryptDataV3(b, []byte(password), keystore.StandardScryptN, keystore.StandardScryptP)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
k.Credentials[key] = appKeystoreCredential{Crypto: encryptedCredentials}
|
||||
|
||||
return save(k, k.path)
|
||||
}
|
||||
|
||||
func createAppKeystore(path string, appInfo AppInfo, separator string) error {
|
||||
if separator == "" {
|
||||
separator = DefaultSeparator
|
||||
separator = defaultSeparator
|
||||
}
|
||||
|
||||
keystore := AppKeystore{
|
||||
Application: appInfo.Application,
|
||||
AppIdentifier: appInfo.AppIdentifier,
|
||||
Version: appInfo.Version,
|
||||
Credentials: make(map[Key]appKeystoreCredential),
|
||||
}
|
||||
|
||||
b, err := json.Marshal(keystore)
|
||||
|
@ -105,221 +163,12 @@ func CreateAppKeystore(path string, appInfo AppInfo, separator string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
return ioutil.WriteFile(path, buffer.Bytes(), 0600)
|
||||
}
|
||||
|
||||
func LoadAppKeystore(path string, appInfo AppInfo, separator string) (AppKeystore, error) {
|
||||
if separator == "" {
|
||||
separator = DefaultSeparator
|
||||
}
|
||||
|
||||
_, err := os.Stat(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
// If no keystore exists at path we create a new empty one with passed keystore parameters
|
||||
err = CreateAppKeystore(path, appInfo, separator)
|
||||
if err != nil {
|
||||
return AppKeystore{}, err
|
||||
}
|
||||
} else {
|
||||
return AppKeystore{}, err
|
||||
}
|
||||
}
|
||||
|
||||
src, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return AppKeystore{}, err
|
||||
}
|
||||
|
||||
for _, keystoreBytes := range bytes.Split(src, []byte(separator)) {
|
||||
if len(keystoreBytes) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
keystore := AppKeystore{}
|
||||
err := json.Unmarshal(keystoreBytes, &keystore)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if keystore.AppIdentifier == appInfo.AppIdentifier && keystore.Application == appInfo.Application && keystore.Version == appInfo.Version {
|
||||
return keystore, nil
|
||||
}
|
||||
}
|
||||
|
||||
return AppKeystore{}, errors.New("no keystore found")
|
||||
}
|
||||
|
||||
func filterCredential(credential MembershipCredentials, filterIdentityCredentials []MembershipCredentials, filterMembershipContracts []MembershipContract) *MembershipCredentials {
|
||||
if len(filterIdentityCredentials) != 0 {
|
||||
found := false
|
||||
for _, filterCreds := range filterIdentityCredentials {
|
||||
if filterCreds.Equals(credential) {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if len(filterMembershipContracts) != 0 {
|
||||
var membershipGroupsIntersection []MembershipGroup
|
||||
for _, filterContract := range filterMembershipContracts {
|
||||
for _, credentialGroups := range credential.MembershipGroups {
|
||||
if filterContract.Equals(credentialGroups.MembershipContract) {
|
||||
membershipGroupsIntersection = append(membershipGroupsIntersection, credentialGroups)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(membershipGroupsIntersection) != 0 {
|
||||
// If we have a match on some groups, we return the credential with filtered groups
|
||||
return &MembershipCredentials{
|
||||
IdentityCredential: credential.IdentityCredential,
|
||||
MembershipGroups: membershipGroupsIntersection,
|
||||
}
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// We hit this return only if
|
||||
// - filterIdentityCredentials.len() == 0 and filterMembershipContracts.len() == 0 (no filter)
|
||||
// - filterIdentityCredentials.len() != 0 and filterMembershipContracts.len() == 0 (filter only on identity credential)
|
||||
// Indeed, filterMembershipContracts.len() != 0 will have its exclusive return based on all values of membershipGroupsIntersection.len()
|
||||
return &credential
|
||||
}
|
||||
|
||||
func GetMembershipCredentials(logger *zap.Logger, credentialsPath string, password string, appInfo AppInfo, filterIdentityCredentials []MembershipCredentials, filterMembershipContracts []MembershipContract) ([]MembershipCredentials, error) {
|
||||
k, err := LoadAppKeystore(credentialsPath, appInfo, DefaultSeparator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var result []MembershipCredentials
|
||||
|
||||
for _, credential := range k.Credentials {
|
||||
credentialsBytes, err := keystore.DecryptDataV3(credential.Crypto, password)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var credentials MembershipCredentials
|
||||
err = json.Unmarshal(credentialsBytes, &credentials)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
filteredCredential := filterCredential(credentials, filterIdentityCredentials, filterMembershipContracts)
|
||||
if filteredCredential != nil {
|
||||
result = append(result, *filteredCredential)
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// AddMembershipCredentials inserts a membership credential to the keystore matching the application, appIdentifier and version filters.
|
||||
func AddMembershipCredentials(path string, newIdentityCredential *rln.IdentityCredential, newMembershipGroup MembershipGroup, password string, appInfo AppInfo, separator string) (membershipGroupIndex uint, err error) {
|
||||
k, err := LoadAppKeystore(path, appInfo, DefaultSeparator)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// A flag to tell us if the keystore contains a credential associated to the input identity credential, i.e. membershipCredential
|
||||
found := false
|
||||
for i, existingCredentials := range k.Credentials {
|
||||
credentialsBytes, err := keystore.DecryptDataV3(existingCredentials.Crypto, password)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
var credentials MembershipCredentials
|
||||
err = json.Unmarshal(credentialsBytes, &credentials)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if rln.IdentityCredentialEquals(*credentials.IdentityCredential, *newIdentityCredential) {
|
||||
// idCredential is present in keystore. We add the input credential membership group to the one contained in the decrypted keystore credential (we deduplicate groups using sets)
|
||||
allMembershipsMap := make(map[MembershipGroup]struct{})
|
||||
for _, m := range credentials.MembershipGroups {
|
||||
allMembershipsMap[m] = struct{}{}
|
||||
}
|
||||
allMembershipsMap[newMembershipGroup] = struct{}{}
|
||||
|
||||
// We sort membership groups, otherwise we will not have deterministic results in tests
|
||||
var allMemberships []MembershipGroup
|
||||
for k := range allMembershipsMap {
|
||||
allMemberships = append(allMemberships, k)
|
||||
}
|
||||
sort.Slice(allMemberships, func(i, j int) bool {
|
||||
return allMemberships[i].MembershipContract.Address < allMemberships[j].MembershipContract.Address
|
||||
})
|
||||
|
||||
// we define the updated credential with the updated membership sets
|
||||
updatedCredential := MembershipCredentials{
|
||||
IdentityCredential: newIdentityCredential,
|
||||
MembershipGroups: allMemberships,
|
||||
}
|
||||
|
||||
// we re-encrypt creating a new keyfile
|
||||
b, err := json.Marshal(updatedCredential)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
encryptedCredentials, err := keystore.EncryptDataV3(b, []byte(password), keystore.StandardScryptN, keystore.StandardScryptP)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// we update the original credential field in keystoreCredentials
|
||||
k.Credentials[i] = AppKeystoreCredential{Crypto: encryptedCredentials}
|
||||
|
||||
found = true
|
||||
|
||||
// We setup the return values
|
||||
membershipGroupIndex = uint(len(allMemberships))
|
||||
for mIdx, mg := range updatedCredential.MembershipGroups {
|
||||
if mg.MembershipContract.Equals(newMembershipGroup.MembershipContract) {
|
||||
membershipGroupIndex = uint(mIdx)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// We stop decrypting other credentials in the keystore
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found { // Not found
|
||||
newCredential := MembershipCredentials{
|
||||
IdentityCredential: newIdentityCredential,
|
||||
MembershipGroups: []MembershipGroup{newMembershipGroup},
|
||||
}
|
||||
|
||||
b, err := json.Marshal(newCredential)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
encryptedCredentials, err := keystore.EncryptDataV3(b, []byte(password), keystore.StandardScryptN, keystore.StandardScryptP)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
k.Credentials = append(k.Credentials, AppKeystoreCredential{Crypto: encryptedCredentials})
|
||||
|
||||
membershipGroupIndex = uint(len(newCredential.MembershipGroups) - 1)
|
||||
}
|
||||
|
||||
return membershipGroupIndex, save(k, path, separator)
|
||||
return os.WriteFile(path, buffer.Bytes(), 0600)
|
||||
}
|
||||
|
||||
// Safely saves a Keystore's JsonNode to disk.
|
||||
// If exists, the destination file is renamed with extension .bkp; the file is written at its destination and the .bkp file is removed if write is successful, otherwise is restored
|
||||
func save(keystore AppKeystore, path string, separator string) error {
|
||||
func save(keystore *AppKeystore, path string) error {
|
||||
// We first backup the current keystore
|
||||
_, err := os.Stat(path)
|
||||
if err == nil {
|
||||
|
@ -329,16 +178,12 @@ func save(keystore AppKeystore, path string, separator string) error {
|
|||
}
|
||||
}
|
||||
|
||||
if separator == "" {
|
||||
separator = DefaultSeparator
|
||||
}
|
||||
|
||||
b, err := json.Marshal(keystore)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b = append(b, []byte(separator)...)
|
||||
b = append(b, []byte(defaultSeparator)...)
|
||||
|
||||
buffer := new(bytes.Buffer)
|
||||
|
||||
|
@ -351,7 +196,7 @@ func save(keystore AppKeystore, path string, separator string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(path, buffer.Bytes(), 0600)
|
||||
err = os.WriteFile(path, buffer.Bytes(), 0600)
|
||||
if err != nil {
|
||||
restoreErr := os.Rename(path, path+".bkp")
|
||||
if restoreErr != nil {
|
||||
|
|
118
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/keystore/types.go
generated
vendored
Normal file
118
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/keystore/types.go
generated
vendored
Normal file
|
@ -0,0 +1,118 @@
|
|||
package keystore
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/waku-org/go-zerokit-rln/rln"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// MembershipContractInfo contains information about a membership smart contract address and the chain in which it is deployed
|
||||
type MembershipContractInfo struct {
|
||||
ChainID ChainID `json:"chainId"`
|
||||
Address ContractAddress `json:"address"`
|
||||
}
|
||||
|
||||
// NewMembershipContractInfo generates a new MembershipContract instance
|
||||
func NewMembershipContractInfo(chainID *big.Int, address common.Address) MembershipContractInfo {
|
||||
return MembershipContractInfo{
|
||||
ChainID: ChainID{
|
||||
chainID,
|
||||
},
|
||||
Address: ContractAddress(address),
|
||||
}
|
||||
}
|
||||
|
||||
// ContractAddress is a common.Address created to comply with the expected marshalling for the credentials
|
||||
type ContractAddress common.Address
|
||||
|
||||
// MarshalText is used to convert a ContractAddress into a valid value expected by the json encoder
|
||||
func (c ContractAddress) MarshalText() ([]byte, error) {
|
||||
return []byte(common.Address(c).Hex()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText converts a byte slice into a ContractAddress
|
||||
func (c *ContractAddress) UnmarshalText(text []byte) error {
|
||||
b, err := hexutil.Decode(string(text))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
copy(c[:], b[:])
|
||||
return nil
|
||||
}
|
||||
|
||||
// ChainID is a helper struct created to comply with the expected marshalling for the credentials
|
||||
type ChainID struct {
|
||||
*big.Int
|
||||
}
|
||||
|
||||
// String returns a string with the expected chainId format for the credentials
|
||||
func (c ChainID) String() string {
|
||||
return fmt.Sprintf(`"%s"`, hexutil.EncodeBig(c.Int))
|
||||
}
|
||||
|
||||
// MarshalJSON is used to convert a ChainID into a valid value expected by the json encoder
|
||||
func (c ChainID) MarshalJSON() (text []byte, err error) {
|
||||
return []byte(c.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON converts a byte slice into a ChainID
|
||||
func (c *ChainID) UnmarshalJSON(text []byte) error {
|
||||
hexVal := strings.ReplaceAll(string(text), `"`, "")
|
||||
b, err := hexutil.DecodeBig(hexVal)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.Int = b
|
||||
return nil
|
||||
}
|
||||
|
||||
// Equals is used to compare MembershipContract
|
||||
func (m MembershipContractInfo) Equals(other MembershipContractInfo) bool {
|
||||
return m.Address == other.Address && m.ChainID.Int64() == other.ChainID.Int64()
|
||||
}
|
||||
|
||||
// MembershipCredentials contains all the information about an RLN Identity Credential and membership group it belongs to
|
||||
type MembershipCredentials struct {
|
||||
IdentityCredential *rln.IdentityCredential `json:"identityCredential"`
|
||||
MembershipContractInfo MembershipContractInfo `json:"membershipContract"`
|
||||
TreeIndex rln.MembershipIndex `json:"treeIndex"`
|
||||
}
|
||||
|
||||
// Equals is used to compare MembershipCredentials
|
||||
func (m MembershipCredentials) Equals(other MembershipCredentials) bool {
|
||||
return rln.IdentityCredentialEquals(*m.IdentityCredential, *other.IdentityCredential) && m.MembershipContractInfo.Equals(other.MembershipContractInfo) && m.TreeIndex == other.TreeIndex
|
||||
}
|
||||
|
||||
// AppInfo is a helper structure that contains information about the application that uses these credentials
|
||||
type AppInfo struct {
|
||||
Application string `json:"application"`
|
||||
AppIdentifier string `json:"appIdentifier"`
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
// Key is a helper type created to represent the key in a map of credentials
|
||||
type Key string
|
||||
|
||||
// AppKeystore represents the membership credentials to be used in RLN
|
||||
type AppKeystore struct {
|
||||
Application string `json:"application"`
|
||||
AppIdentifier string `json:"appIdentifier"`
|
||||
Credentials map[Key]appKeystoreCredential `json:"credentials"`
|
||||
Version string `json:"version"`
|
||||
|
||||
path string
|
||||
logger *zap.Logger
|
||||
}
|
||||
|
||||
type appKeystoreCredential struct {
|
||||
Crypto keystore.CryptoJSON `json:"crypto"`
|
||||
}
|
||||
|
||||
const defaultSeparator = "\n"
|
|
@ -25,7 +25,7 @@ import (
|
|||
type GroupManager interface {
|
||||
Start(ctx context.Context, rln *rln.RLN, rootTracker *group_manager.MerkleRootTracker) error
|
||||
IdentityCredentials() (rln.IdentityCredential, error)
|
||||
MembershipIndex() (rln.MembershipIndex, error)
|
||||
MembershipIndex() rln.MembershipIndex
|
||||
Stop() error
|
||||
}
|
||||
|
||||
|
@ -356,10 +356,7 @@ func (rlnRelay *WakuRLNRelay) generateProof(input []byte, epoch rln.Epoch) (*pb.
|
|||
return nil, err
|
||||
}
|
||||
|
||||
membershipIndex, err := rlnRelay.groupManager.MembershipIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
membershipIndex := rlnRelay.groupManager.MembershipIndex()
|
||||
|
||||
proof, err := rlnRelay.RLN.GenerateProof(input, identityCredentials, membershipIndex, epoch)
|
||||
if err != nil {
|
||||
|
@ -381,6 +378,6 @@ func (rlnRelay *WakuRLNRelay) IdentityCredential() (rln.IdentityCredential, erro
|
|||
return rlnRelay.groupManager.IdentityCredentials()
|
||||
}
|
||||
|
||||
func (rlnRelay *WakuRLNRelay) MembershipIndex() (uint, error) {
|
||||
func (rlnRelay *WakuRLNRelay) MembershipIndex() uint {
|
||||
return rlnRelay.groupManager.MembershipIndex()
|
||||
}
|
||||
|
|
129
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/web3/web3.go
generated
vendored
Normal file
129
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/web3/web3.go
generated
vendored
Normal file
|
@ -0,0 +1,129 @@
|
|||
package web3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/rln/contracts"
|
||||
)
|
||||
|
||||
// RegistryContract contains an instance of the RLN Registry contract and its address
|
||||
type RegistryContract struct {
|
||||
*contracts.RLNRegistry
|
||||
Address common.Address
|
||||
}
|
||||
|
||||
// RLNContract contains an instance of the RLN contract, its address and the storage index within the registry
|
||||
// that represents this contract
|
||||
type RLNContract struct {
|
||||
*contracts.RLN
|
||||
Address common.Address
|
||||
StorageIndex uint16
|
||||
DeployedBlockNumber uint64
|
||||
}
|
||||
|
||||
// Config is a helper struct that contains attributes for interaction with RLN smart contracts
|
||||
type Config struct {
|
||||
configured bool
|
||||
|
||||
ETHClientAddress string
|
||||
ETHClient *ethclient.Client
|
||||
ChainID *big.Int
|
||||
RegistryContract RegistryContract
|
||||
RLNContract RLNContract
|
||||
}
|
||||
|
||||
// NewConfig creates an instance of web3 Config
|
||||
func NewConfig(ethClientAddress string, registryAddress common.Address) *Config {
|
||||
return &Config{
|
||||
ETHClientAddress: ethClientAddress,
|
||||
RegistryContract: RegistryContract{
|
||||
Address: registryAddress,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// BuildConfig returns an instance of Web3Config with all the required elements for interaction with the RLN smart contracts
|
||||
func BuildConfig(ctx context.Context, ethClientAddress string, registryAddress common.Address) (*Config, error) {
|
||||
ethClient, err := ethclient.DialContext(ctx, ethClientAddress)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
chainID, err := ethClient.ChainID(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rlnRegistry, err := contracts.NewRLNRegistry(registryAddress, ethClient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
storageIndex, err := rlnRegistry.UsingStorageIndex(&bind.CallOpts{Context: ctx})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rlnContractAddress, err := rlnRegistry.Storages(&bind.CallOpts{Context: ctx}, storageIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rlnContract, err := contracts.NewRLN(rlnContractAddress, ethClient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
deploymentBlockNumber, err := rlnContract.DeployedBlockNumber(&bind.CallOpts{Context: ctx})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Config{
|
||||
configured: true,
|
||||
ETHClientAddress: ethClientAddress,
|
||||
ETHClient: ethClient,
|
||||
ChainID: chainID,
|
||||
RegistryContract: RegistryContract{
|
||||
RLNRegistry: rlnRegistry,
|
||||
Address: registryAddress,
|
||||
},
|
||||
RLNContract: RLNContract{
|
||||
RLN: rlnContract,
|
||||
Address: rlnContractAddress,
|
||||
StorageIndex: storageIndex,
|
||||
DeployedBlockNumber: uint64(deploymentBlockNumber),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Build sets up the Config object by instantiating the eth client and contracts
|
||||
func (w *Config) Build(ctx context.Context) error {
|
||||
if w.configured {
|
||||
return errors.New("already configured")
|
||||
}
|
||||
|
||||
if w.ETHClientAddress == "" {
|
||||
return errors.New("no eth client address")
|
||||
}
|
||||
|
||||
var zeroAddr common.Address
|
||||
if w.RegistryContract.Address == zeroAddr {
|
||||
return errors.New("no registry contract address")
|
||||
}
|
||||
|
||||
newW, err := BuildConfig(ctx, w.ETHClientAddress, w.RegistryContract.Address)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*w = *newW
|
||||
w.configured = true
|
||||
|
||||
return nil
|
||||
}
|
|
@ -20,8 +20,8 @@ const ClusterIndex = 1
|
|||
const GenerationZeroShardsCount = 8
|
||||
|
||||
type RelayShards struct {
|
||||
Cluster uint16
|
||||
Indices []uint16
|
||||
Cluster uint16 `json:"cluster"`
|
||||
Indices []uint16 `json:"indices"`
|
||||
}
|
||||
|
||||
func NewRelayShards(cluster uint16, indices ...uint16) (RelayShards, error) {
|
||||
|
@ -217,7 +217,7 @@ func FromBitVector(buf []byte) (RelayShards, error) {
|
|||
|
||||
// GetShardFromContentTopic runs Autosharding logic and returns a pubSubTopic
|
||||
// This is based on Autosharding algorithm defined in RFC 51
|
||||
func GetShardFromContentTopic(topic ContentTopic, shardCount int) NamespacedPubsubTopic {
|
||||
func GetShardFromContentTopic(topic ContentTopic, shardCount int) StaticShardingPubsubTopic {
|
||||
bytes := []byte(topic.ApplicationName)
|
||||
bytes = append(bytes, []byte(fmt.Sprintf("%d", topic.ApplicationVersion))...)
|
||||
|
||||
|
|
|
@ -53,6 +53,10 @@ bool atomic_operation(struct RLN *ctx,
|
|||
const struct Buffer *leaves_buffer,
|
||||
const struct Buffer *indices_buffer);
|
||||
|
||||
bool seq_atomic_operation(struct RLN *ctx,
|
||||
const struct Buffer *leaves_buffer,
|
||||
const struct Buffer *indices_buffer);
|
||||
|
||||
bool get_root(const struct RLN *ctx, struct Buffer *output_buffer);
|
||||
|
||||
bool get_proof(const struct RLN *ctx, uintptr_t index, struct Buffer *output_buffer);
|
||||
|
|
|
@ -213,6 +213,12 @@ func (r *RLN) AtomicOperation(index uint, leaves []byte, indices []byte) bool {
|
|||
return bool(C.atomic_operation(r.ptr, C.uintptr_t(index), leavesBuffer, indicesBuffer))
|
||||
}
|
||||
|
||||
func (r *RLN) SeqAtomicOperation(leaves []byte, indices []byte) bool {
|
||||
leavesBuffer := toCBufferPtr(leaves)
|
||||
indicesBuffer := toCBufferPtr(indices)
|
||||
return bool(C.seq_atomic_operation(r.ptr, leavesBuffer, indicesBuffer))
|
||||
}
|
||||
|
||||
func (r *RLN) DeleteLeaf(index uint) bool {
|
||||
return bool(C.delete_leaf(r.ptr, C.uintptr_t(index)))
|
||||
}
|
||||
|
|
|
@ -53,6 +53,10 @@ bool atomic_operation(struct RLN *ctx,
|
|||
const struct Buffer *leaves_buffer,
|
||||
const struct Buffer *indices_buffer);
|
||||
|
||||
bool seq_atomic_operation(struct RLN *ctx,
|
||||
const struct Buffer *leaves_buffer,
|
||||
const struct Buffer *indices_buffer);
|
||||
|
||||
bool get_root(const struct RLN *ctx, struct Buffer *output_buffer);
|
||||
|
||||
bool get_proof(const struct RLN *ctx, uintptr_t index, struct Buffer *output_buffer);
|
||||
|
|
|
@ -213,6 +213,12 @@ func (r *RLN) AtomicOperation(index uint, leaves []byte, indices []byte) bool {
|
|||
return bool(C.atomic_operation(r.ptr, C.uintptr_t(index), leavesBuffer, indicesBuffer))
|
||||
}
|
||||
|
||||
func (r *RLN) SeqAtomicOperation(leaves []byte, indices []byte) bool {
|
||||
leavesBuffer := toCBufferPtr(leaves)
|
||||
indicesBuffer := toCBufferPtr(indices)
|
||||
return bool(C.seq_atomic_operation(r.ptr, leavesBuffer, indicesBuffer))
|
||||
}
|
||||
|
||||
func (r *RLN) DeleteLeaf(index uint) bool {
|
||||
return bool(C.delete_leaf(r.ptr, C.uintptr_t(index)))
|
||||
}
|
||||
|
|
|
@ -53,6 +53,10 @@ bool atomic_operation(struct RLN *ctx,
|
|||
const struct Buffer *leaves_buffer,
|
||||
const struct Buffer *indices_buffer);
|
||||
|
||||
bool seq_atomic_operation(struct RLN *ctx,
|
||||
const struct Buffer *leaves_buffer,
|
||||
const struct Buffer *indices_buffer);
|
||||
|
||||
bool get_root(const struct RLN *ctx, struct Buffer *output_buffer);
|
||||
|
||||
bool get_proof(const struct RLN *ctx, uintptr_t index, struct Buffer *output_buffer);
|
||||
|
|
|
@ -213,6 +213,12 @@ func (r *RLN) AtomicOperation(index uint, leaves []byte, indices []byte) bool {
|
|||
return bool(C.atomic_operation(r.ptr, C.uintptr_t(index), leavesBuffer, indicesBuffer))
|
||||
}
|
||||
|
||||
func (r *RLN) SeqAtomicOperation(leaves []byte, indices []byte) bool {
|
||||
leavesBuffer := toCBufferPtr(leaves)
|
||||
indicesBuffer := toCBufferPtr(indices)
|
||||
return bool(C.seq_atomic_operation(r.ptr, leavesBuffer, indicesBuffer))
|
||||
}
|
||||
|
||||
func (r *RLN) DeleteLeaf(index uint) bool {
|
||||
return bool(C.delete_leaf(r.ptr, C.uintptr_t(index)))
|
||||
}
|
||||
|
|
|
@ -95,6 +95,10 @@ func (i RLNWrapper) AtomicOperation(index uint, leaves []byte, indices []byte) b
|
|||
return i.ffi.AtomicOperation(index, leaves, indices)
|
||||
}
|
||||
|
||||
func (i RLNWrapper) SeqAtomicOperation(leaves []byte, indices []byte) bool {
|
||||
return i.ffi.SeqAtomicOperation(leaves, indices)
|
||||
}
|
||||
|
||||
func (i RLNWrapper) RecoverIDSecret(proof1 []byte, proof2 []byte) ([]byte, error) {
|
||||
return i.ffi.RecoverIDSecret(proof1, proof2)
|
||||
}
|
||||
|
|
|
@ -94,6 +94,10 @@ func (i RLNWrapper) AtomicOperation(index uint, leaves []byte, indices []byte) b
|
|||
return i.ffi.AtomicOperation(index, leaves, indices)
|
||||
}
|
||||
|
||||
func (i RLNWrapper) SeqAtomicOperation(leaves []byte, indices []byte) bool {
|
||||
return i.ffi.SeqAtomicOperation(leaves, indices)
|
||||
}
|
||||
|
||||
func (i RLNWrapper) RecoverIDSecret(proof1 []byte, proof2 []byte) ([]byte, error) {
|
||||
return i.ffi.RecoverIDSecret(proof1, proof2)
|
||||
}
|
||||
|
|
|
@ -95,6 +95,10 @@ func (i RLNWrapper) AtomicOperation(index uint, leaves []byte, indices []byte) b
|
|||
return i.ffi.AtomicOperation(index, leaves, indices)
|
||||
}
|
||||
|
||||
func (i RLNWrapper) SeqAtomicOperation(leaves []byte, indices []byte) bool {
|
||||
return i.ffi.SeqAtomicOperation(leaves, indices)
|
||||
}
|
||||
|
||||
func (i RLNWrapper) RecoverIDSecret(proof1 []byte, proof2 []byte) ([]byte, error) {
|
||||
return i.ffi.RecoverIDSecret(proof1, proof2)
|
||||
}
|
||||
|
|
|
@ -1008,7 +1008,7 @@ github.com/waku-org/go-discover/discover/v5wire
|
|||
github.com/waku-org/go-libp2p-rendezvous
|
||||
github.com/waku-org/go-libp2p-rendezvous/db
|
||||
github.com/waku-org/go-libp2p-rendezvous/pb
|
||||
# github.com/waku-org/go-waku v0.7.1-0.20230829115339-8ad08d6b0481
|
||||
# github.com/waku-org/go-waku v0.7.1-0.20230907093131-092811658ea3
|
||||
## explicit; go 1.19
|
||||
github.com/waku-org/go-waku/logging
|
||||
github.com/waku-org/go-waku/waku/persistence
|
||||
|
@ -1037,22 +1037,23 @@ github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager
|
|||
github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/dynamic
|
||||
github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/static
|
||||
github.com/waku-org/go-waku/waku/v2/protocol/rln/keystore
|
||||
github.com/waku-org/go-waku/waku/v2/protocol/rln/web3
|
||||
github.com/waku-org/go-waku/waku/v2/protocol/store
|
||||
github.com/waku-org/go-waku/waku/v2/protocol/store/pb
|
||||
github.com/waku-org/go-waku/waku/v2/rendezvous
|
||||
github.com/waku-org/go-waku/waku/v2/timesource
|
||||
github.com/waku-org/go-waku/waku/v2/utils
|
||||
# github.com/waku-org/go-zerokit-rln v0.1.14-0.20230823150836-a706089284fe
|
||||
# github.com/waku-org/go-zerokit-rln v0.1.14-0.20230905214645-ca686a02e816
|
||||
## explicit; go 1.18
|
||||
github.com/waku-org/go-zerokit-rln/rln
|
||||
github.com/waku-org/go-zerokit-rln/rln/link
|
||||
# github.com/waku-org/go-zerokit-rln-apple v0.0.0-20230821155521-70f1ff564bae
|
||||
# github.com/waku-org/go-zerokit-rln-apple v0.0.0-20230905213302-1d6d18a03e7c
|
||||
## explicit; go 1.19
|
||||
github.com/waku-org/go-zerokit-rln-apple/rln
|
||||
# github.com/waku-org/go-zerokit-rln-arm v0.0.0-20230807124929-ea702b1b4305
|
||||
# github.com/waku-org/go-zerokit-rln-arm v0.0.0-20230905183322-05f4cda61468
|
||||
## explicit; go 1.19
|
||||
github.com/waku-org/go-zerokit-rln-arm/rln
|
||||
# github.com/waku-org/go-zerokit-rln-x86_64 v0.0.0-20230807124913-ea636e5b4005
|
||||
# github.com/waku-org/go-zerokit-rln-x86_64 v0.0.0-20230905182930-2b11e72ef866
|
||||
## explicit; go 1.19
|
||||
github.com/waku-org/go-zerokit-rln-x86_64/rln
|
||||
# github.com/wealdtech/go-ens/v3 v3.5.0
|
||||
|
|
Loading…
Reference in New Issue