chore: bump go-waku
This commit is contained in:
parent
98d3b4198b
commit
acad6e4958
5
go.mod
5
go.mod
|
@ -79,7 +79,7 @@ require (
|
||||||
github.com/ladydascalie/currency v1.6.0
|
github.com/ladydascalie/currency v1.6.0
|
||||||
github.com/meirf/gopart v0.0.0-20180520194036-37e9492a85a8
|
github.com/meirf/gopart v0.0.0-20180520194036-37e9492a85a8
|
||||||
github.com/schollz/peerdiscovery v1.7.0
|
github.com/schollz/peerdiscovery v1.7.0
|
||||||
github.com/waku-org/go-waku v0.5.3-0.20230404182041-41691a44e579
|
github.com/waku-org/go-waku v0.5.3-0.20230509204224-d9a12bf079a8
|
||||||
github.com/yeqown/go-qrcode/v2 v2.2.1
|
github.com/yeqown/go-qrcode/v2 v2.2.1
|
||||||
github.com/yeqown/go-qrcode/writer/standard v1.2.1
|
github.com/yeqown/go-qrcode/writer/standard v1.2.1
|
||||||
go.uber.org/multierr v1.8.0
|
go.uber.org/multierr v1.8.0
|
||||||
|
@ -241,10 +241,11 @@ require (
|
||||||
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
|
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
|
||||||
github.com/urfave/cli/v2 v2.24.4 // indirect
|
github.com/urfave/cli/v2 v2.24.4 // indirect
|
||||||
github.com/waku-org/go-discover v0.0.0-20221209174356-61c833f34d98 // indirect
|
github.com/waku-org/go-discover v0.0.0-20221209174356-61c833f34d98 // indirect
|
||||||
github.com/waku-org/go-zerokit-rln v0.1.11 // indirect
|
github.com/waku-org/go-zerokit-rln v0.1.12 // indirect
|
||||||
github.com/waku-org/go-zerokit-rln-apple v0.0.0-20230331231302-258cacb91327 // indirect
|
github.com/waku-org/go-zerokit-rln-apple v0.0.0-20230331231302-258cacb91327 // indirect
|
||||||
github.com/waku-org/go-zerokit-rln-arm v0.0.0-20230331223149-f90e66aebb0d // indirect
|
github.com/waku-org/go-zerokit-rln-arm v0.0.0-20230331223149-f90e66aebb0d // indirect
|
||||||
github.com/waku-org/go-zerokit-rln-x86_64 v0.0.0-20230331181847-cba74520bae9 // indirect
|
github.com/waku-org/go-zerokit-rln-x86_64 v0.0.0-20230331181847-cba74520bae9 // indirect
|
||||||
|
github.com/wk8/go-ordered-map v1.0.0 // indirect
|
||||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
|
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
|
||||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||||
|
|
10
go.sum
10
go.sum
|
@ -2102,10 +2102,10 @@ github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1
|
||||||
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
|
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
|
||||||
github.com/waku-org/go-discover v0.0.0-20221209174356-61c833f34d98 h1:xwY0kW5XZFimdqfZb9cZwT1S3VJP9j3AE6bdNd9boXM=
|
github.com/waku-org/go-discover v0.0.0-20221209174356-61c833f34d98 h1:xwY0kW5XZFimdqfZb9cZwT1S3VJP9j3AE6bdNd9boXM=
|
||||||
github.com/waku-org/go-discover v0.0.0-20221209174356-61c833f34d98/go.mod h1:eBHgM6T4EG0RZzxpxKy+rGz/6Dw2Nd8DWxS0lm9ESDw=
|
github.com/waku-org/go-discover v0.0.0-20221209174356-61c833f34d98/go.mod h1:eBHgM6T4EG0RZzxpxKy+rGz/6Dw2Nd8DWxS0lm9ESDw=
|
||||||
github.com/waku-org/go-waku v0.5.3-0.20230404182041-41691a44e579 h1:M8Q35R35VZvw05OAzE8x5Gbu7bsDcTX8yBAyJvbTY40=
|
github.com/waku-org/go-waku v0.5.3-0.20230509204224-d9a12bf079a8 h1:1agRxCtCBoCaMB/72L87bZgyvCAEMUoBL6l0MImpV2Y=
|
||||||
github.com/waku-org/go-waku v0.5.3-0.20230404182041-41691a44e579/go.mod h1:42KC3R7HOi16QwvREB+8LLV2EzhTRgZ4qE+2jxZAEy8=
|
github.com/waku-org/go-waku v0.5.3-0.20230509204224-d9a12bf079a8/go.mod h1:6AXlCiXueZC7XbvG1LUi0uEOMS2n/30h2kjXzW8zfYY=
|
||||||
github.com/waku-org/go-zerokit-rln v0.1.11 h1:e4veZm80uYkW1r43a5f47YBUUhRELpLx3Bge9EyfuI8=
|
github.com/waku-org/go-zerokit-rln v0.1.12 h1:66+tU6sTlmUpuUlEv7kCFOGZ37MwZYFJBXHcm8QquwU=
|
||||||
github.com/waku-org/go-zerokit-rln v0.1.11/go.mod h1:MUW+wB6Yj7UBMdZrhko7oHfUZeY2wchggXYjpUiMoac=
|
github.com/waku-org/go-zerokit-rln v0.1.12/go.mod h1:MUW+wB6Yj7UBMdZrhko7oHfUZeY2wchggXYjpUiMoac=
|
||||||
github.com/waku-org/go-zerokit-rln-apple v0.0.0-20230331231302-258cacb91327 h1:Q5XQqo+PEmvrybT8D7BEsKCwIYDi80s+00Q49cfm9Gs=
|
github.com/waku-org/go-zerokit-rln-apple v0.0.0-20230331231302-258cacb91327 h1:Q5XQqo+PEmvrybT8D7BEsKCwIYDi80s+00Q49cfm9Gs=
|
||||||
github.com/waku-org/go-zerokit-rln-apple v0.0.0-20230331231302-258cacb91327/go.mod h1:KYykqtdApHVYZ3G0spwMnoxc5jH5eI3jyO9SwsSfi48=
|
github.com/waku-org/go-zerokit-rln-apple v0.0.0-20230331231302-258cacb91327/go.mod h1:KYykqtdApHVYZ3G0spwMnoxc5jH5eI3jyO9SwsSfi48=
|
||||||
github.com/waku-org/go-zerokit-rln-arm v0.0.0-20230331223149-f90e66aebb0d h1:Kcg85Y2xGU6hqZ/kMfkLQF2jAog8vt+tw1/VNidzNtE=
|
github.com/waku-org/go-zerokit-rln-arm v0.0.0-20230331223149-f90e66aebb0d h1:Kcg85Y2xGU6hqZ/kMfkLQF2jAog8vt+tw1/VNidzNtE=
|
||||||
|
@ -2125,6 +2125,8 @@ github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT
|
||||||
github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
|
github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
|
||||||
github.com/willf/bloom v0.0.0-20170505221640-54e3b963ee16/go.mod h1:MmAltL9pDMNTrvUkxdg0k0q5I0suxmuwp3KbyrZLOZ8=
|
github.com/willf/bloom v0.0.0-20170505221640-54e3b963ee16/go.mod h1:MmAltL9pDMNTrvUkxdg0k0q5I0suxmuwp3KbyrZLOZ8=
|
||||||
github.com/willf/bloom v2.0.3+incompatible/go.mod h1:MmAltL9pDMNTrvUkxdg0k0q5I0suxmuwp3KbyrZLOZ8=
|
github.com/willf/bloom v2.0.3+incompatible/go.mod h1:MmAltL9pDMNTrvUkxdg0k0q5I0suxmuwp3KbyrZLOZ8=
|
||||||
|
github.com/wk8/go-ordered-map v1.0.0 h1:BV7z+2PaK8LTSd/mWgY12HyMAo5CEgkHqbkVq2thqr8=
|
||||||
|
github.com/wk8/go-ordered-map v1.0.0/go.mod h1:9ZIbRunKbuvfPKyBP1SIKLcXNlv74YCOZ3t3VTS6gRk=
|
||||||
github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs=
|
github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs=
|
||||||
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
|
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
|
||||||
github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs=
|
github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs=
|
||||||
|
|
|
@ -9,16 +9,19 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/waku-org/go-waku/waku/v2/metrics"
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||||
wpb "github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
wpb "github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol/store/pb"
|
"github.com/waku-org/go-waku/waku/v2/protocol/store/pb"
|
||||||
"github.com/waku-org/go-waku/waku/v2/timesource"
|
"github.com/waku-org/go-waku/waku/v2/timesource"
|
||||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||||
|
"go.opencensus.io/stats"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
type MessageProvider interface {
|
type MessageProvider interface {
|
||||||
GetAll() ([]StoredMessage, error)
|
GetAll() ([]StoredMessage, error)
|
||||||
|
Validate(env *protocol.Envelope) error
|
||||||
Put(env *protocol.Envelope) error
|
Put(env *protocol.Envelope) error
|
||||||
Query(query *pb.HistoryQuery) ([]StoredMessage, error)
|
Query(query *pb.HistoryQuery) ([]StoredMessage, error)
|
||||||
MostRecentTimestamp() (int64, error)
|
MostRecentTimestamp() (int64, error)
|
||||||
|
@ -27,10 +30,15 @@ type MessageProvider interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
var ErrInvalidCursor = errors.New("invalid cursor")
|
var ErrInvalidCursor = errors.New("invalid cursor")
|
||||||
|
var ErrFutureMessage = errors.New("message timestamp in the future")
|
||||||
|
var ErrMessageTooOld = errors.New("message too old")
|
||||||
|
|
||||||
// WALMode for sqlite.
|
// WALMode for sqlite.
|
||||||
const WALMode = "wal"
|
const WALMode = "wal"
|
||||||
|
|
||||||
|
// MaxTimeVariance is the maximum duration in the future allowed for a message timestamp
|
||||||
|
const MaxTimeVariance = time.Duration(20) * time.Second
|
||||||
|
|
||||||
// DBStore is a MessageProvider that has a *sql.DB connection
|
// DBStore is a MessageProvider that has a *sql.DB connection
|
||||||
type DBStore struct {
|
type DBStore struct {
|
||||||
MessageProvider
|
MessageProvider
|
||||||
|
@ -152,18 +160,39 @@ func (d *DBStore) Start(ctx context.Context, timesource timesource.Timesource) e
|
||||||
d.cancel = cancel
|
d.cancel = cancel
|
||||||
d.timesource = timesource
|
d.timesource = timesource
|
||||||
|
|
||||||
err := d.cleanOlderRecords()
|
err := d.cleanOlderRecords(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
d.wg.Add(1)
|
d.wg.Add(2)
|
||||||
go d.checkForOlderRecords(ctx, 60*time.Second)
|
go d.checkForOlderRecords(ctx, 60*time.Second)
|
||||||
|
go d.updateMetrics(ctx)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *DBStore) cleanOlderRecords() error {
|
func (store *DBStore) updateMetrics(ctx context.Context) {
|
||||||
|
ticker := time.NewTicker(5 * time.Second)
|
||||||
|
defer ticker.Stop()
|
||||||
|
defer store.wg.Done()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ticker.C:
|
||||||
|
msgCount, err := store.Count()
|
||||||
|
if err != nil {
|
||||||
|
store.log.Error("updating store metrics", zap.Error(err))
|
||||||
|
} else {
|
||||||
|
metrics.RecordArchiveMessage(ctx, "stored", msgCount)
|
||||||
|
}
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DBStore) cleanOlderRecords(ctx context.Context) error {
|
||||||
d.log.Info("Cleaning older records...")
|
d.log.Info("Cleaning older records...")
|
||||||
|
|
||||||
// Delete older messages
|
// Delete older messages
|
||||||
|
@ -172,6 +201,7 @@ func (d *DBStore) cleanOlderRecords() error {
|
||||||
sqlStmt := `DELETE FROM message WHERE receiverTimestamp < $1`
|
sqlStmt := `DELETE FROM message WHERE receiverTimestamp < $1`
|
||||||
_, err := d.db.Exec(sqlStmt, utils.GetUnixEpochFrom(d.timesource.Now().Add(-d.maxDuration)))
|
_, err := d.db.Exec(sqlStmt, utils.GetUnixEpochFrom(d.timesource.Now().Add(-d.maxDuration)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordArchiveError(ctx, "retpolicy_failure")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
elapsed := time.Since(start)
|
elapsed := time.Since(start)
|
||||||
|
@ -184,6 +214,7 @@ func (d *DBStore) cleanOlderRecords() error {
|
||||||
sqlStmt := `DELETE FROM message WHERE id IN (SELECT id FROM message ORDER BY receiverTimestamp DESC LIMIT -1 OFFSET $1)`
|
sqlStmt := `DELETE FROM message WHERE id IN (SELECT id FROM message ORDER BY receiverTimestamp DESC LIMIT -1 OFFSET $1)`
|
||||||
_, err := d.db.Exec(sqlStmt, d.maxMessages)
|
_, err := d.db.Exec(sqlStmt, d.maxMessages)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordArchiveError(ctx, "retpolicy_failure")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
elapsed := time.Since(start)
|
elapsed := time.Since(start)
|
||||||
|
@ -206,7 +237,7 @@ func (d *DBStore) checkForOlderRecords(ctx context.Context, t time.Duration) {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
err := d.cleanOlderRecords()
|
err := d.cleanOlderRecords(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
d.log.Error("cleaning older records", zap.Error(err))
|
d.log.Error("cleaning older records", zap.Error(err))
|
||||||
}
|
}
|
||||||
|
@ -225,19 +256,41 @@ func (d *DBStore) Stop() {
|
||||||
d.db.Close()
|
d.db.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *DBStore) Validate(env *protocol.Envelope) error {
|
||||||
|
n := time.Unix(0, env.Index().ReceiverTime)
|
||||||
|
upperBound := n.Add(MaxTimeVariance)
|
||||||
|
lowerBound := n.Add(-MaxTimeVariance)
|
||||||
|
|
||||||
|
// Ensure that messages don't "jump" to the front of the queue with future timestamps
|
||||||
|
if env.Message().Timestamp > upperBound.UnixNano() {
|
||||||
|
return ErrFutureMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
if env.Message().Timestamp < lowerBound.UnixNano() {
|
||||||
|
return ErrMessageTooOld
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Put inserts a WakuMessage into the DB
|
// Put inserts a WakuMessage into the DB
|
||||||
func (d *DBStore) Put(env *protocol.Envelope) error {
|
func (d *DBStore) Put(env *protocol.Envelope) error {
|
||||||
stmt, err := d.db.Prepare("INSERT INTO message (id, receiverTimestamp, senderTimestamp, contentTopic, pubsubTopic, payload, version) VALUES ($1, $2, $3, $4, $5, $6, $7)")
|
stmt, err := d.db.Prepare("INSERT INTO message (id, receiverTimestamp, senderTimestamp, contentTopic, pubsubTopic, payload, version) VALUES ($1, $2, $3, $4, $5, $6, $7)")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordArchiveError(context.TODO(), "insert_failure")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
cursor := env.Index()
|
cursor := env.Index()
|
||||||
dbKey := NewDBKey(uint64(cursor.SenderTime), uint64(cursor.ReceiverTime), env.PubsubTopic(), env.Index().Digest)
|
dbKey := NewDBKey(uint64(cursor.SenderTime), uint64(cursor.ReceiverTime), env.PubsubTopic(), env.Index().Digest)
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
_, err = stmt.Exec(dbKey.Bytes(), cursor.ReceiverTime, env.Message().Timestamp, env.Message().ContentTopic, env.PubsubTopic(), env.Message().Payload, env.Message().Version)
|
_, err = stmt.Exec(dbKey.Bytes(), cursor.ReceiverTime, env.Message().Timestamp, env.Message().ContentTopic, env.PubsubTopic(), env.Message().Payload, env.Message().Version)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
ellapsed := time.Since(start)
|
||||||
|
stats.Record(context.Background(), metrics.ArchiveInsertDurationSeconds.M(int64(ellapsed.Seconds())))
|
||||||
|
|
||||||
err = stmt.Close()
|
err = stmt.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -352,10 +405,13 @@ func (d *DBStore) Query(query *pb.HistoryQuery) (*pb.Index, []StoredMessage, err
|
||||||
pageSize := query.PagingInfo.PageSize + 1
|
pageSize := query.PagingInfo.PageSize + 1
|
||||||
|
|
||||||
parameters = append(parameters, pageSize)
|
parameters = append(parameters, pageSize)
|
||||||
|
measurementStart := time.Now()
|
||||||
rows, err := stmt.Query(parameters...)
|
rows, err := stmt.Query(parameters...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
ellapsed := time.Since(measurementStart)
|
||||||
|
stats.Record(context.Background(), metrics.ArchiveQueryDurationSeconds.M(int64(ellapsed.Seconds())))
|
||||||
|
|
||||||
var result []StoredMessage
|
var result []StoredMessage
|
||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
|
|
|
@ -1,175 +0,0 @@
|
||||||
package v2
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Adapted from https://github.com/dustin/go-broadcast/commit/f664265f5a662fb4d1df7f3533b1e8d0e0277120
|
|
||||||
// by Dustin Sallings (c) 2013, which was released under MIT license
|
|
||||||
|
|
||||||
type doneCh chan struct{}
|
|
||||||
|
|
||||||
type chOperation struct {
|
|
||||||
ch chan<- *protocol.Envelope
|
|
||||||
topic *string
|
|
||||||
done doneCh
|
|
||||||
}
|
|
||||||
|
|
||||||
type broadcastOutputs map[chan<- *protocol.Envelope]struct{}
|
|
||||||
|
|
||||||
type broadcaster struct {
|
|
||||||
input chan *protocol.Envelope
|
|
||||||
reg chan chOperation
|
|
||||||
unreg chan chOperation
|
|
||||||
|
|
||||||
outputs broadcastOutputs
|
|
||||||
outputsPerTopic map[string]broadcastOutputs
|
|
||||||
}
|
|
||||||
|
|
||||||
// The Broadcaster interface describes the main entry points to
|
|
||||||
// broadcasters.
|
|
||||||
type Broadcaster interface {
|
|
||||||
// Register a new channel to receive broadcasts from a pubsubtopic
|
|
||||||
Register(topic *string, newch chan<- *protocol.Envelope)
|
|
||||||
// Register a new channel to receive broadcasts from a pubsub topic and return a channel to wait until this operation is complete
|
|
||||||
WaitRegister(topic *string, newch chan<- *protocol.Envelope) doneCh
|
|
||||||
// Unregister a channel so that it no longer receives broadcasts from a pubsub topic
|
|
||||||
Unregister(topic *string, newch chan<- *protocol.Envelope)
|
|
||||||
// Unregister a subscriptor channel and return a channel to wait until this operation is done
|
|
||||||
WaitUnregister(topic *string, newch chan<- *protocol.Envelope) doneCh
|
|
||||||
// Shut this broadcaster down.
|
|
||||||
Close()
|
|
||||||
// Submit a new object to all subscribers
|
|
||||||
Submit(*protocol.Envelope)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *broadcaster) broadcast(m *protocol.Envelope) {
|
|
||||||
for ch := range b.outputs {
|
|
||||||
ch <- m
|
|
||||||
}
|
|
||||||
|
|
||||||
outputs, ok := b.outputsPerTopic[m.PubsubTopic()]
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for ch := range outputs {
|
|
||||||
ch <- m
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *broadcaster) run() {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case m := <-b.input:
|
|
||||||
b.broadcast(m)
|
|
||||||
case broadcastee, ok := <-b.reg:
|
|
||||||
if ok {
|
|
||||||
if broadcastee.topic != nil {
|
|
||||||
topicOutputs, ok := b.outputsPerTopic[*broadcastee.topic]
|
|
||||||
if !ok {
|
|
||||||
b.outputsPerTopic[*broadcastee.topic] = make(broadcastOutputs)
|
|
||||||
topicOutputs = b.outputsPerTopic[*broadcastee.topic]
|
|
||||||
}
|
|
||||||
|
|
||||||
topicOutputs[broadcastee.ch] = struct{}{}
|
|
||||||
b.outputsPerTopic[*broadcastee.topic] = topicOutputs
|
|
||||||
} else {
|
|
||||||
b.outputs[broadcastee.ch] = struct{}{}
|
|
||||||
}
|
|
||||||
if broadcastee.done != nil {
|
|
||||||
broadcastee.done <- struct{}{}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if broadcastee.done != nil {
|
|
||||||
broadcastee.done <- struct{}{}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case broadcastee := <-b.unreg:
|
|
||||||
if broadcastee.topic != nil {
|
|
||||||
topicOutputs, ok := b.outputsPerTopic[*broadcastee.topic]
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
delete(topicOutputs, broadcastee.ch)
|
|
||||||
b.outputsPerTopic[*broadcastee.topic] = topicOutputs
|
|
||||||
} else {
|
|
||||||
delete(b.outputs, broadcastee.ch)
|
|
||||||
}
|
|
||||||
|
|
||||||
if broadcastee.done != nil {
|
|
||||||
broadcastee.done <- struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBroadcaster creates a Broadcaster with an specified length
|
|
||||||
// It's used to register subscriptors that will need to receive
|
|
||||||
// an Envelope containing a WakuMessage
|
|
||||||
func NewBroadcaster(buflen int) Broadcaster {
|
|
||||||
b := &broadcaster{
|
|
||||||
input: make(chan *protocol.Envelope, buflen),
|
|
||||||
reg: make(chan chOperation),
|
|
||||||
unreg: make(chan chOperation),
|
|
||||||
outputs: make(broadcastOutputs),
|
|
||||||
outputsPerTopic: make(map[string]broadcastOutputs),
|
|
||||||
}
|
|
||||||
|
|
||||||
go b.run()
|
|
||||||
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register a subscriptor channel and return a channel to wait until this operation is done
|
|
||||||
func (b *broadcaster) WaitRegister(topic *string, newch chan<- *protocol.Envelope) doneCh {
|
|
||||||
d := make(doneCh)
|
|
||||||
b.reg <- chOperation{
|
|
||||||
ch: newch,
|
|
||||||
topic: topic,
|
|
||||||
done: d,
|
|
||||||
}
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register a subscriptor channel
|
|
||||||
func (b *broadcaster) Register(topic *string, newch chan<- *protocol.Envelope) {
|
|
||||||
b.reg <- chOperation{
|
|
||||||
ch: newch,
|
|
||||||
topic: topic,
|
|
||||||
done: nil,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unregister a subscriptor channel and return a channel to wait until this operation is done
|
|
||||||
func (b *broadcaster) WaitUnregister(topic *string, newch chan<- *protocol.Envelope) doneCh {
|
|
||||||
d := make(doneCh)
|
|
||||||
b.unreg <- chOperation{
|
|
||||||
ch: newch,
|
|
||||||
topic: topic,
|
|
||||||
done: d,
|
|
||||||
}
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unregister a subscriptor channel
|
|
||||||
func (b *broadcaster) Unregister(topic *string, newch chan<- *protocol.Envelope) {
|
|
||||||
b.unreg <- chOperation{
|
|
||||||
ch: newch,
|
|
||||||
topic: topic,
|
|
||||||
done: nil,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Closes the broadcaster. Used to stop receiving new subscribers
|
|
||||||
func (b *broadcaster) Close() {
|
|
||||||
close(b.reg)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Submits an Envelope to be broadcasted among all registered subscriber channels
|
|
||||||
func (b *broadcaster) Submit(m *protocol.Envelope) {
|
|
||||||
if b != nil {
|
|
||||||
b.input <- m
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -46,7 +46,7 @@ type PeerConnectionStrategy struct {
|
||||||
// dialTimeout is how long we attempt to connect to a peer before giving up
|
// dialTimeout is how long we attempt to connect to a peer before giving up
|
||||||
// minPeers is the minimum number of peers that the node should have
|
// minPeers is the minimum number of peers that the node should have
|
||||||
// backoff describes the strategy used to decide how long to backoff after previously attempting to connect to a peer
|
// backoff describes the strategy used to decide how long to backoff after previously attempting to connect to a peer
|
||||||
func NewPeerConnectionStrategy(h host.Host, cacheSize int, minPeers int, dialTimeout time.Duration, backoff backoff.BackoffFactory, logger *zap.Logger) (*PeerConnectionStrategy, error) {
|
func NewPeerConnectionStrategy(cacheSize int, minPeers int, dialTimeout time.Duration, backoff backoff.BackoffFactory, logger *zap.Logger) (*PeerConnectionStrategy, error) {
|
||||||
cache, err := lru.New2Q(cacheSize)
|
cache, err := lru.New2Q(cacheSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -54,7 +54,6 @@ func NewPeerConnectionStrategy(h host.Host, cacheSize int, minPeers int, dialTim
|
||||||
|
|
||||||
return &PeerConnectionStrategy{
|
return &PeerConnectionStrategy{
|
||||||
cache: cache,
|
cache: cache,
|
||||||
host: h,
|
|
||||||
wg: sync.WaitGroup{},
|
wg: sync.WaitGroup{},
|
||||||
minPeers: minPeers,
|
minPeers: minPeers,
|
||||||
dialTimeout: dialTimeout,
|
dialTimeout: dialTimeout,
|
||||||
|
@ -73,6 +72,11 @@ func (c *PeerConnectionStrategy) PeerChannel() chan<- peer.AddrInfo {
|
||||||
return c.peerCh
|
return c.peerCh
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Sets the host to be able to mount or consume a protocol
|
||||||
|
func (c *PeerConnectionStrategy) SetHost(h host.Host) {
|
||||||
|
c.host = h
|
||||||
|
}
|
||||||
|
|
||||||
// Start attempts to connect to the peers passed in by peerCh. Will not connect to peers if they are within the backoff period.
|
// Start attempts to connect to the peers passed in by peerCh. Will not connect to peers if they are within the backoff period.
|
||||||
func (c *PeerConnectionStrategy) Start(ctx context.Context) error {
|
func (c *PeerConnectionStrategy) Start(ctx context.Context) error {
|
||||||
if c.cancel != nil {
|
if c.cancel != nil {
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/libp2p/go-libp2p/core/host"
|
"github.com/libp2p/go-libp2p/core/host"
|
||||||
|
@ -14,6 +15,8 @@ import (
|
||||||
"github.com/multiformats/go-multiaddr"
|
"github.com/multiformats/go-multiaddr"
|
||||||
"github.com/waku-org/go-discover/discover"
|
"github.com/waku-org/go-discover/discover"
|
||||||
"github.com/waku-org/go-waku/logging"
|
"github.com/waku-org/go-waku/logging"
|
||||||
|
"github.com/waku-org/go-waku/waku/v2/metrics"
|
||||||
|
"github.com/waku-org/go-waku/waku/v2/protocol/enr"
|
||||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
|
||||||
|
@ -24,8 +27,6 @@ import (
|
||||||
var ErrNoDiscV5Listener = errors.New("no discv5 listener")
|
var ErrNoDiscV5Listener = errors.New("no discv5 listener")
|
||||||
|
|
||||||
type DiscoveryV5 struct {
|
type DiscoveryV5 struct {
|
||||||
sync.RWMutex
|
|
||||||
|
|
||||||
params *discV5Parameters
|
params *discV5Parameters
|
||||||
host host.Host
|
host host.Host
|
||||||
config discover.Config
|
config discover.Config
|
||||||
|
@ -37,7 +38,7 @@ type DiscoveryV5 struct {
|
||||||
|
|
||||||
log *zap.Logger
|
log *zap.Logger
|
||||||
|
|
||||||
started bool
|
started atomic.Bool
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
wg *sync.WaitGroup
|
wg *sync.WaitGroup
|
||||||
}
|
}
|
||||||
|
@ -87,7 +88,7 @@ type PeerConnector interface {
|
||||||
PeerChannel() chan<- peer.AddrInfo
|
PeerChannel() chan<- peer.AddrInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewDiscoveryV5(host host.Host, priv *ecdsa.PrivateKey, localnode *enode.LocalNode, peerConnector PeerConnector, log *zap.Logger, opts ...DiscoveryV5Option) (*DiscoveryV5, error) {
|
func NewDiscoveryV5(priv *ecdsa.PrivateKey, localnode *enode.LocalNode, peerConnector PeerConnector, log *zap.Logger, opts ...DiscoveryV5Option) (*DiscoveryV5, error) {
|
||||||
params := new(discV5Parameters)
|
params := new(discV5Parameters)
|
||||||
optList := DefaultOptions()
|
optList := DefaultOptions()
|
||||||
optList = append(optList, opts...)
|
optList = append(optList, opts...)
|
||||||
|
@ -103,7 +104,6 @@ func NewDiscoveryV5(host host.Host, priv *ecdsa.PrivateKey, localnode *enode.Loc
|
||||||
}
|
}
|
||||||
|
|
||||||
return &DiscoveryV5{
|
return &DiscoveryV5{
|
||||||
host: host,
|
|
||||||
peerConnector: peerConnector,
|
peerConnector: peerConnector,
|
||||||
params: params,
|
params: params,
|
||||||
NAT: NAT,
|
NAT: NAT,
|
||||||
|
@ -135,6 +135,7 @@ func (d *DiscoveryV5) listen(ctx context.Context) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
d.udpAddr = conn.LocalAddr().(*net.UDPAddr)
|
d.udpAddr = conn.LocalAddr().(*net.UDPAddr)
|
||||||
|
|
||||||
if d.NAT != nil && !d.udpAddr.IP.IsLoopback() {
|
if d.NAT != nil && !d.udpAddr.IP.IsLoopback() {
|
||||||
d.wg.Add(1)
|
d.wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
|
@ -161,15 +162,21 @@ func (d *DiscoveryV5) listen(ctx context.Context) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Sets the host to be able to mount or consume a protocol
|
||||||
|
func (d *DiscoveryV5) SetHost(h host.Host) {
|
||||||
|
d.host = h
|
||||||
|
}
|
||||||
|
|
||||||
|
// only works if the discovery v5 hasn't been started yet.
|
||||||
func (d *DiscoveryV5) Start(ctx context.Context) error {
|
func (d *DiscoveryV5) Start(ctx context.Context) error {
|
||||||
d.Lock()
|
// compare and swap sets the discovery v5 to `started` state
|
||||||
defer d.Unlock()
|
// and prevents multiple calls to the start method by being atomic.
|
||||||
|
if !d.started.CompareAndSwap(false, true) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
d.wg.Wait() // Waiting for any go routines to stop
|
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
|
||||||
d.cancel = cancel
|
d.cancel = cancel
|
||||||
d.started = true
|
|
||||||
|
|
||||||
err := d.listen(ctx)
|
err := d.listen(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -177,7 +184,10 @@ func (d *DiscoveryV5) Start(ctx context.Context) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
d.wg.Add(1)
|
d.wg.Add(1)
|
||||||
go d.runDiscoveryV5Loop(ctx)
|
go func() {
|
||||||
|
defer d.wg.Done()
|
||||||
|
d.runDiscoveryV5Loop(ctx)
|
||||||
|
}()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -190,16 +200,13 @@ func (d *DiscoveryV5) SetBootnodes(nodes []*enode.Node) error {
|
||||||
return d.listener.SetFallbackNodes(nodes)
|
return d.listener.SetFallbackNodes(nodes)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// only works if the discovery v5 is in running state
|
||||||
|
// so we can assume that cancel method is set
|
||||||
func (d *DiscoveryV5) Stop() {
|
func (d *DiscoveryV5) Stop() {
|
||||||
d.Lock()
|
if !d.started.CompareAndSwap(true, false) { // if Discoveryv5 is running, set started to false
|
||||||
defer d.Unlock()
|
|
||||||
|
|
||||||
if d.cancel == nil {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
d.cancel()
|
d.cancel()
|
||||||
d.started = false
|
|
||||||
|
|
||||||
if d.listener != nil {
|
if d.listener != nil {
|
||||||
d.listener.Close()
|
d.listener.Close()
|
||||||
|
@ -238,9 +245,10 @@ func evaluateNode(node *enode.Node) bool {
|
||||||
return false
|
return false
|
||||||
}*/
|
}*/
|
||||||
|
|
||||||
_, err := utils.EnodeToPeerInfo(node)
|
_, err := enr.EnodeToPeerInfo(node)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordDiscV5Error(context.Background(), "peer_info_failure")
|
||||||
utils.Logger().Named("discv5").Error("obtaining peer info from enode", logging.ENode("enr", node), zap.Error(err))
|
utils.Logger().Named("discv5").Error("obtaining peer info from enode", logging.ENode("enr", node), zap.Error(err))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -248,6 +256,9 @@ func evaluateNode(node *enode.Node) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// get random nodes from DHT via discv5 listender
|
||||||
|
// used for caching enr address in peerExchange
|
||||||
|
// used for connecting to peers in discovery_connector
|
||||||
func (d *DiscoveryV5) Iterator() (enode.Iterator, error) {
|
func (d *DiscoveryV5) Iterator() (enode.Iterator, error) {
|
||||||
if d.listener == nil {
|
if d.listener == nil {
|
||||||
return nil, ErrNoDiscV5Listener
|
return nil, ErrNoDiscV5Listener
|
||||||
|
@ -257,55 +268,38 @@ func (d *DiscoveryV5) Iterator() (enode.Iterator, error) {
|
||||||
return enode.Filter(iterator, evaluateNode), nil
|
return enode.Filter(iterator, evaluateNode), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// iterate over all fecthed peer addresses and send them to peerConnector
|
||||||
func (d *DiscoveryV5) iterate(ctx context.Context) error {
|
func (d *DiscoveryV5) iterate(ctx context.Context) error {
|
||||||
iterator, err := d.Iterator()
|
iterator, err := d.Iterator()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordDiscV5Error(context.Background(), "iterator_failure")
|
||||||
return fmt.Errorf("obtaining iterator: %w", err)
|
return fmt.Errorf("obtaining iterator: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
closeCh := make(chan struct{}, 1)
|
defer iterator.Close()
|
||||||
defer close(closeCh)
|
|
||||||
|
|
||||||
// Closing iterator when context is cancelled or function is returning
|
for iterator.Next() { // while next exists, run for loop
|
||||||
d.wg.Add(1)
|
_, addresses, err := enr.Multiaddress(iterator.Node())
|
||||||
go func() {
|
|
||||||
defer d.wg.Done()
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
iterator.Close()
|
|
||||||
case <-closeCh:
|
|
||||||
iterator.Close()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
for {
|
|
||||||
if ctx.Err() != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
exists := iterator.Next()
|
|
||||||
if !exists {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
_, addresses, err := utils.Multiaddress(iterator.Node())
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordDiscV5Error(context.Background(), "peer_info_failure")
|
||||||
d.log.Error("extracting multiaddrs from enr", zap.Error(err))
|
d.log.Error("extracting multiaddrs from enr", zap.Error(err))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
peerAddrs, err := peer.AddrInfosFromP2pAddrs(addresses...)
|
peerAddrs, err := peer.AddrInfosFromP2pAddrs(addresses...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordDiscV5Error(context.Background(), "peer_info_failure")
|
||||||
d.log.Error("converting multiaddrs to addrinfos", zap.Error(err))
|
d.log.Error("converting multiaddrs to addrinfos", zap.Error(err))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(peerAddrs) != 0 {
|
if len(peerAddrs) != 0 {
|
||||||
select {
|
d.peerConnector.PeerChannel() <- peerAddrs[0]
|
||||||
case <-ctx.Done():
|
}
|
||||||
return nil
|
select {
|
||||||
case d.peerConnector.PeerChannel() <- peerAddrs[0]:
|
case <-ctx.Done():
|
||||||
}
|
return nil
|
||||||
|
default:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -313,23 +307,20 @@ func (d *DiscoveryV5) iterate(ctx context.Context) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *DiscoveryV5) runDiscoveryV5Loop(ctx context.Context) {
|
func (d *DiscoveryV5) runDiscoveryV5Loop(ctx context.Context) {
|
||||||
defer d.wg.Done()
|
|
||||||
|
|
||||||
ch := make(chan struct{}, 1)
|
|
||||||
ch <- struct{}{} // Initial execution
|
|
||||||
|
|
||||||
restartLoop:
|
restartLoop:
|
||||||
for {
|
for {
|
||||||
|
err := d.iterate(ctx)
|
||||||
|
if err != nil {
|
||||||
|
d.log.Debug("iterating discv5", zap.Error(err))
|
||||||
|
}
|
||||||
|
|
||||||
|
t := time.NewTimer(5 * time.Second)
|
||||||
select {
|
select {
|
||||||
case <-ch:
|
case <-t.C:
|
||||||
err := d.iterate(ctx)
|
t.Stop()
|
||||||
if err != nil {
|
|
||||||
d.log.Debug("iterating discv5", zap.Error(err))
|
|
||||||
time.Sleep(2 * time.Second)
|
|
||||||
}
|
|
||||||
ch <- struct{}{}
|
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
close(ch)
|
t.Stop()
|
||||||
break restartLoop
|
break restartLoop
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -337,8 +328,5 @@ restartLoop:
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *DiscoveryV5) IsStarted() bool {
|
func (d *DiscoveryV5) IsStarted() bool {
|
||||||
d.RLock()
|
return d.started.Load()
|
||||||
defer d.RUnlock()
|
|
||||||
|
|
||||||
return d.started
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,7 +7,8 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||||
"github.com/libp2p/go-libp2p/core/peer"
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
"github.com/waku-org/go-waku/waku/v2/metrics"
|
||||||
|
wenr "github.com/waku-org/go-waku/waku/v2/protocol/enr"
|
||||||
|
|
||||||
ma "github.com/multiformats/go-multiaddr"
|
ma "github.com/multiformats/go-multiaddr"
|
||||||
)
|
)
|
||||||
|
@ -46,12 +47,14 @@ func RetrieveNodes(ctx context.Context, url string, opts ...DnsDiscoveryOption)
|
||||||
|
|
||||||
tree, err := client.SyncTree(url)
|
tree, err := client.SyncTree(url)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordDnsDiscoveryError(ctx, "tree_sync_failure")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, node := range tree.Nodes() {
|
for _, node := range tree.Nodes() {
|
||||||
peerID, m, err := utils.Multiaddress(node)
|
peerID, m, err := wenr.Multiaddress(node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordDnsDiscoveryError(ctx, "peer_info_failure")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3,6 +3,7 @@ package metrics
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||||
"go.opencensus.io/stats"
|
"go.opencensus.io/stats"
|
||||||
|
@ -12,16 +13,42 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
WakuVersion = stats.Int64("waku_version", "", stats.UnitDimensionless)
|
WakuVersion = stats.Int64("waku_version", "", stats.UnitDimensionless)
|
||||||
Messages = stats.Int64("node_messages", "Number of messages received", stats.UnitDimensionless)
|
Messages = stats.Int64("node_messages", "Number of messages received", stats.UnitDimensionless)
|
||||||
Peers = stats.Int64("peers", "Number of connected peers", stats.UnitDimensionless)
|
MessageSize = stats.Int64("waku_histogram_message_size", "message size histogram in kB", stats.UnitDimensionless)
|
||||||
Dials = stats.Int64("dials", "Number of peer dials", stats.UnitDimensionless)
|
|
||||||
StoreMessages = stats.Int64("store_messages", "Number of historical messages", stats.UnitDimensionless)
|
Peers = stats.Int64("peers", "Number of connected peers", stats.UnitDimensionless)
|
||||||
FilterSubscriptions = stats.Int64("filter_subscriptions", "Number of filter subscriptions", stats.UnitDimensionless)
|
Dials = stats.Int64("dials", "Number of peer dials", stats.UnitDimensionless)
|
||||||
StoreErrors = stats.Int64("errors", "Number of errors in store protocol", stats.UnitDimensionless)
|
|
||||||
StoreQueries = stats.Int64("store_queries", "Number of store queries", stats.UnitDimensionless)
|
LegacyFilterMessages = stats.Int64("legacy_filter_messages", "Number of legacy filter messages", stats.UnitDimensionless)
|
||||||
LightpushErrors = stats.Int64("errors", "Number of errors in lightpush protocol", stats.UnitDimensionless)
|
LegacyFilterSubscribers = stats.Int64("legacy_filter_subscribers", "Number of legacy filter subscribers", stats.UnitDimensionless)
|
||||||
PeerExchangeError = stats.Int64("errors", "Number of errors in peer exchange protocol", stats.UnitDimensionless)
|
LegacyFilterSubscriptions = stats.Int64("legacy_filter_subscriptions", "Number of legacy filter subscriptions", stats.UnitDimensionless)
|
||||||
|
LegacyFilterErrors = stats.Int64("legacy_filter_errors", "Number of errors in legacy filter protocol", stats.UnitDimensionless)
|
||||||
|
|
||||||
|
FilterMessages = stats.Int64("filter_messages", "Number of filter messages", stats.UnitDimensionless)
|
||||||
|
FilterRequests = stats.Int64("filter_requests", "Number of filter requests", stats.UnitDimensionless)
|
||||||
|
FilterSubscriptions = stats.Int64("filter_subscriptions", "Number of filter subscriptions", stats.UnitDimensionless)
|
||||||
|
FilterErrors = stats.Int64("filter_errors", "Number of errors in filter protocol", stats.UnitDimensionless)
|
||||||
|
FilterRequestDurationSeconds = stats.Int64("filter_request_duration_seconds", "Duration of Filter Subscribe Requests", stats.UnitSeconds)
|
||||||
|
FilterHandleMessageDurationSeconds = stats.Int64("filter_handle_msessageduration_seconds", "Duration to Push Message to Filter Subscribers", stats.UnitSeconds)
|
||||||
|
|
||||||
|
StoreErrors = stats.Int64("errors", "Number of errors in store protocol", stats.UnitDimensionless)
|
||||||
|
StoreQueries = stats.Int64("store_queries", "Number of store queries", stats.UnitDimensionless)
|
||||||
|
|
||||||
|
ArchiveMessages = stats.Int64("waku_archive_messages", "Number of historical messages", stats.UnitDimensionless)
|
||||||
|
ArchiveErrors = stats.Int64("waku_archive_errors", "Number of errors in archive protocol", stats.UnitDimensionless)
|
||||||
|
ArchiveInsertDurationSeconds = stats.Int64("waku_archive_insert_duration_seconds", "Message insertion duration", stats.UnitSeconds)
|
||||||
|
ArchiveQueryDurationSeconds = stats.Int64("waku_archive_query_duration_seconds", "History query duration", stats.UnitSeconds)
|
||||||
|
|
||||||
|
LightpushMessages = stats.Int64("lightpush_messages", "Number of messages sent via lightpush protocol", stats.UnitDimensionless)
|
||||||
|
LightpushErrors = stats.Int64("errors", "Number of errors in lightpush protocol", stats.UnitDimensionless)
|
||||||
|
|
||||||
|
PeerExchangeError = stats.Int64("errors", "Number of errors in peer exchange protocol", stats.UnitDimensionless)
|
||||||
|
|
||||||
|
DnsDiscoveryNodes = stats.Int64("dnsdisc_nodes", "Number of discovered nodes in dns discovert", stats.UnitDimensionless)
|
||||||
|
DnsDiscoveryErrors = stats.Int64("dnsdisc_errors", "Number of errors in dns discovery", stats.UnitDimensionless)
|
||||||
|
|
||||||
|
DiscV5Errors = stats.Int64("discv5_errors", "Number of errors in discv5", stats.UnitDimensionless)
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -32,63 +59,179 @@ var (
|
||||||
|
|
||||||
var (
|
var (
|
||||||
PeersView = &view.View{
|
PeersView = &view.View{
|
||||||
Name: "gowaku_connected_peers",
|
Name: "waku_connected_peers",
|
||||||
Measure: Peers,
|
Measure: Peers,
|
||||||
Description: "Number of connected peers",
|
Description: "Number of connected peers",
|
||||||
Aggregation: view.Sum(),
|
Aggregation: view.Sum(),
|
||||||
}
|
}
|
||||||
DialsView = &view.View{
|
DialsView = &view.View{
|
||||||
Name: "gowaku_peers_dials",
|
Name: "waku_peers_dials",
|
||||||
Measure: Dials,
|
Measure: Dials,
|
||||||
Description: "Number of peer dials",
|
Description: "Number of peer dials",
|
||||||
Aggregation: view.Count(),
|
Aggregation: view.Count(),
|
||||||
}
|
}
|
||||||
MessageView = &view.View{
|
MessageView = &view.View{
|
||||||
Name: "gowaku_node_messages",
|
Name: "waku_node_messages",
|
||||||
Measure: Messages,
|
Measure: Messages,
|
||||||
Description: "The number of the messages received",
|
Description: "The number of the messages received",
|
||||||
Aggregation: view.Count(),
|
Aggregation: view.Count(),
|
||||||
}
|
}
|
||||||
|
MessageSizeView = &view.View{
|
||||||
|
Name: "waku_histogram_message_size",
|
||||||
|
Measure: MessageSize,
|
||||||
|
Description: "message size histogram in kB",
|
||||||
|
Aggregation: view.Distribution(0.0, 5.0, 15.0, 50.0, 100.0, 300.0, 700.0, 1000.0),
|
||||||
|
}
|
||||||
|
|
||||||
StoreQueriesView = &view.View{
|
StoreQueriesView = &view.View{
|
||||||
Name: "gowaku_store_queries",
|
Name: "waku_store_queries",
|
||||||
Measure: StoreQueries,
|
Measure: StoreQueries,
|
||||||
Description: "The number of the store queries received",
|
Description: "The number of the store queries received",
|
||||||
Aggregation: view.Count(),
|
Aggregation: view.Count(),
|
||||||
}
|
}
|
||||||
StoreMessagesView = &view.View{
|
|
||||||
Name: "gowaku_store_messages",
|
|
||||||
Measure: StoreMessages,
|
|
||||||
Description: "The distribution of the store protocol messages",
|
|
||||||
Aggregation: view.LastValue(),
|
|
||||||
TagKeys: []tag.Key{KeyType},
|
|
||||||
}
|
|
||||||
FilterSubscriptionsView = &view.View{
|
|
||||||
Name: "gowaku_filter_subscriptions",
|
|
||||||
Measure: FilterSubscriptions,
|
|
||||||
Description: "The number of content filter subscriptions",
|
|
||||||
Aggregation: view.LastValue(),
|
|
||||||
}
|
|
||||||
StoreErrorTypesView = &view.View{
|
StoreErrorTypesView = &view.View{
|
||||||
Name: "gowaku_store_errors",
|
Name: "waku_store_errors",
|
||||||
Measure: StoreErrors,
|
Measure: StoreErrors,
|
||||||
Description: "The distribution of the store protocol errors",
|
Description: "The distribution of the store protocol errors",
|
||||||
Aggregation: view.Count(),
|
Aggregation: view.Count(),
|
||||||
TagKeys: []tag.Key{ErrorType},
|
TagKeys: []tag.Key{ErrorType},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ArchiveMessagesView = &view.View{
|
||||||
|
Name: "waku_archive_messages",
|
||||||
|
Measure: ArchiveMessages,
|
||||||
|
Description: "The distribution of the archive protocol messages",
|
||||||
|
Aggregation: view.LastValue(),
|
||||||
|
TagKeys: []tag.Key{KeyType},
|
||||||
|
}
|
||||||
|
ArchiveErrorTypesView = &view.View{
|
||||||
|
Name: "waku_archive_errors",
|
||||||
|
Measure: StoreErrors,
|
||||||
|
Description: "Number of errors in archive protocol",
|
||||||
|
Aggregation: view.Count(),
|
||||||
|
TagKeys: []tag.Key{ErrorType},
|
||||||
|
}
|
||||||
|
ArchiveInsertDurationView = &view.View{
|
||||||
|
Name: "waku_archive_insert_duration_seconds",
|
||||||
|
Measure: ArchiveInsertDurationSeconds,
|
||||||
|
Description: "Message insertion duration",
|
||||||
|
Aggregation: view.Count(),
|
||||||
|
}
|
||||||
|
ArchiveQueryDurationView = &view.View{
|
||||||
|
Name: "waku_archive_query_duration_seconds",
|
||||||
|
Measure: ArchiveQueryDurationSeconds,
|
||||||
|
Description: "History query duration",
|
||||||
|
Aggregation: view.Count(),
|
||||||
|
}
|
||||||
|
|
||||||
|
LegacyFilterSubscriptionsView = &view.View{
|
||||||
|
Name: "waku_legacy_filter_subscriptions",
|
||||||
|
Measure: LegacyFilterSubscriptions,
|
||||||
|
Description: "The number of legacy filter subscriptions",
|
||||||
|
Aggregation: view.Count(),
|
||||||
|
}
|
||||||
|
LegacyFilterSubscribersView = &view.View{
|
||||||
|
Name: "waku_legacy_filter_subscribers",
|
||||||
|
Measure: LegacyFilterSubscribers,
|
||||||
|
Description: "The number of legacy filter subscribers",
|
||||||
|
Aggregation: view.LastValue(),
|
||||||
|
}
|
||||||
|
LegacyFilterMessagesView = &view.View{
|
||||||
|
Name: "waku_legacy_filter_messages",
|
||||||
|
Measure: LegacyFilterMessages,
|
||||||
|
Description: "The distribution of the legacy filter protocol messages received",
|
||||||
|
Aggregation: view.Count(),
|
||||||
|
TagKeys: []tag.Key{KeyType},
|
||||||
|
}
|
||||||
|
LegacyFilterErrorTypesView = &view.View{
|
||||||
|
Name: "waku_legacy_filter_errors",
|
||||||
|
Measure: LegacyFilterErrors,
|
||||||
|
Description: "The distribution of the legacy filter protocol errors",
|
||||||
|
Aggregation: view.Count(),
|
||||||
|
TagKeys: []tag.Key{ErrorType},
|
||||||
|
}
|
||||||
|
|
||||||
|
FilterSubscriptionsView = &view.View{
|
||||||
|
Name: "waku_filter_subscriptions",
|
||||||
|
Measure: FilterSubscriptions,
|
||||||
|
Description: "The number of filter subscriptions",
|
||||||
|
Aggregation: view.Count(),
|
||||||
|
}
|
||||||
|
FilterRequestsView = &view.View{
|
||||||
|
Name: "waku_filter_requests",
|
||||||
|
Measure: FilterRequests,
|
||||||
|
Description: "The number of filter requests",
|
||||||
|
Aggregation: view.Count(),
|
||||||
|
}
|
||||||
|
FilterMessagesView = &view.View{
|
||||||
|
Name: "waku_filter_messages",
|
||||||
|
Measure: FilterMessages,
|
||||||
|
Description: "The distribution of the filter protocol messages received",
|
||||||
|
Aggregation: view.Count(),
|
||||||
|
TagKeys: []tag.Key{KeyType},
|
||||||
|
}
|
||||||
|
FilterErrorTypesView = &view.View{
|
||||||
|
Name: "waku_filter_errors",
|
||||||
|
Measure: FilterErrors,
|
||||||
|
Description: "The distribution of the filter protocol errors",
|
||||||
|
Aggregation: view.Count(),
|
||||||
|
TagKeys: []tag.Key{ErrorType},
|
||||||
|
}
|
||||||
|
|
||||||
|
FilterRequestDurationView = &view.View{
|
||||||
|
Name: "waku_filter_request_duration_seconds",
|
||||||
|
Measure: FilterRequestDurationSeconds,
|
||||||
|
Description: "Duration of Filter Subscribe Requests",
|
||||||
|
Aggregation: view.Count(),
|
||||||
|
}
|
||||||
|
FilterHandleMessageDurationView = &view.View{
|
||||||
|
Name: "waku_filter_handle_msessageduration_seconds",
|
||||||
|
Measure: FilterHandleMessageDurationSeconds,
|
||||||
|
Description: "Duration to Push Message to Filter Subscribers",
|
||||||
|
Aggregation: view.Count(),
|
||||||
|
}
|
||||||
|
|
||||||
|
LightpushMessagesView = &view.View{
|
||||||
|
Name: "waku_lightpush_messages",
|
||||||
|
Measure: LightpushMessages,
|
||||||
|
Description: "The distribution of the lightpush protocol messages",
|
||||||
|
Aggregation: view.LastValue(),
|
||||||
|
TagKeys: []tag.Key{KeyType},
|
||||||
|
}
|
||||||
LightpushErrorTypesView = &view.View{
|
LightpushErrorTypesView = &view.View{
|
||||||
Name: "gowaku_lightpush_errors",
|
Name: "waku_lightpush_errors",
|
||||||
Measure: LightpushErrors,
|
Measure: LightpushErrors,
|
||||||
Description: "The distribution of the lightpush protocol errors",
|
Description: "The distribution of the lightpush protocol errors",
|
||||||
Aggregation: view.Count(),
|
Aggregation: view.Count(),
|
||||||
TagKeys: []tag.Key{ErrorType},
|
TagKeys: []tag.Key{ErrorType},
|
||||||
}
|
}
|
||||||
VersionView = &view.View{
|
VersionView = &view.View{
|
||||||
Name: "gowaku_version",
|
Name: "waku_version",
|
||||||
Measure: WakuVersion,
|
Measure: WakuVersion,
|
||||||
Description: "The gowaku version",
|
Description: "The gowaku version",
|
||||||
Aggregation: view.LastValue(),
|
Aggregation: view.LastValue(),
|
||||||
TagKeys: []tag.Key{GitVersion},
|
TagKeys: []tag.Key{GitVersion},
|
||||||
}
|
}
|
||||||
|
DnsDiscoveryNodesView = &view.View{
|
||||||
|
Name: "waku_dnsdisc_discovered",
|
||||||
|
Measure: DnsDiscoveryNodes,
|
||||||
|
Description: "The number of nodes discovered via DNS discovery",
|
||||||
|
Aggregation: view.Count(),
|
||||||
|
}
|
||||||
|
DnsDiscoveryErrorTypesView = &view.View{
|
||||||
|
Name: "waku_dnsdisc_errors",
|
||||||
|
Measure: DnsDiscoveryErrors,
|
||||||
|
Description: "The distribution of the dns discovery protocol errors",
|
||||||
|
Aggregation: view.Count(),
|
||||||
|
TagKeys: []tag.Key{ErrorType},
|
||||||
|
}
|
||||||
|
DiscV5ErrorTypesView = &view.View{
|
||||||
|
Name: "waku_discv5_errors",
|
||||||
|
Measure: DiscV5Errors,
|
||||||
|
Description: "The distribution of the discv5 protocol errors",
|
||||||
|
Aggregation: view.Count(),
|
||||||
|
TagKeys: []tag.Key{ErrorType},
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
func recordWithTags(ctx context.Context, tagKey tag.Key, tagType string, ms stats.Measurement) {
|
func recordWithTags(ctx context.Context, tagKey tag.Key, tagType string, ms stats.Measurement) {
|
||||||
|
@ -97,16 +240,61 @@ func recordWithTags(ctx context.Context, tagKey tag.Key, tagType string, ms stat
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func RecordLightpushMessage(ctx context.Context, tagType string) {
|
||||||
|
if err := stats.RecordWithTags(ctx, []tag.Mutator{tag.Insert(KeyType, tagType)}, LightpushMessages.M(1)); err != nil {
|
||||||
|
utils.Logger().Error("failed to record with tags", zap.Error(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func RecordLightpushError(ctx context.Context, tagType string) {
|
func RecordLightpushError(ctx context.Context, tagType string) {
|
||||||
recordWithTags(ctx, ErrorType, tagType, LightpushErrors.M(1))
|
recordWithTags(ctx, ErrorType, tagType, LightpushErrors.M(1))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func RecordLegacyFilterError(ctx context.Context, tagType string) {
|
||||||
|
recordWithTags(ctx, ErrorType, tagType, LegacyFilterErrors.M(1))
|
||||||
|
}
|
||||||
|
|
||||||
|
func RecordArchiveError(ctx context.Context, tagType string) {
|
||||||
|
recordWithTags(ctx, ErrorType, tagType, ArchiveErrors.M(1))
|
||||||
|
}
|
||||||
|
|
||||||
|
func RecordFilterError(ctx context.Context, tagType string) {
|
||||||
|
recordWithTags(ctx, ErrorType, tagType, FilterErrors.M(1))
|
||||||
|
}
|
||||||
|
|
||||||
|
func RecordFilterRequest(ctx context.Context, tagType string, duration time.Duration) {
|
||||||
|
if err := stats.RecordWithTags(ctx, []tag.Mutator{tag.Insert(KeyType, tagType)}, FilterRequests.M(1)); err != nil {
|
||||||
|
utils.Logger().Error("failed to record with tags", zap.Error(err))
|
||||||
|
}
|
||||||
|
FilterRequestDurationSeconds.M(int64(duration.Seconds()))
|
||||||
|
}
|
||||||
|
|
||||||
|
func RecordFilterMessage(ctx context.Context, tagType string, len int) {
|
||||||
|
if err := stats.RecordWithTags(ctx, []tag.Mutator{tag.Insert(KeyType, tagType)}, FilterMessages.M(int64(len))); err != nil {
|
||||||
|
utils.Logger().Error("failed to record with tags", zap.Error(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func RecordLegacyFilterMessage(ctx context.Context, tagType string, len int) {
|
||||||
|
if err := stats.RecordWithTags(ctx, []tag.Mutator{tag.Insert(KeyType, tagType)}, LegacyFilterMessages.M(int64(len))); err != nil {
|
||||||
|
utils.Logger().Error("failed to record with tags", zap.Error(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func RecordPeerExchangeError(ctx context.Context, tagType string) {
|
func RecordPeerExchangeError(ctx context.Context, tagType string) {
|
||||||
recordWithTags(ctx, ErrorType, tagType, PeerExchangeError.M(1))
|
recordWithTags(ctx, ErrorType, tagType, PeerExchangeError.M(1))
|
||||||
}
|
}
|
||||||
|
|
||||||
func RecordMessage(ctx context.Context, tagType string, len int) {
|
func RecordDnsDiscoveryError(ctx context.Context, tagType string) {
|
||||||
if err := stats.RecordWithTags(ctx, []tag.Mutator{tag.Insert(KeyType, tagType)}, StoreMessages.M(int64(len))); err != nil {
|
recordWithTags(ctx, ErrorType, tagType, DnsDiscoveryErrors.M(1))
|
||||||
|
}
|
||||||
|
|
||||||
|
func RecordDiscV5Error(ctx context.Context, tagType string) {
|
||||||
|
recordWithTags(ctx, ErrorType, tagType, DiscV5Errors.M(1))
|
||||||
|
}
|
||||||
|
|
||||||
|
func RecordArchiveMessage(ctx context.Context, tagType string, len int) {
|
||||||
|
if err := stats.RecordWithTags(ctx, []tag.Mutator{tag.Insert(KeyType, tagType)}, ArchiveMessages.M(int64(len))); err != nil {
|
||||||
utils.Logger().Error("failed to record with tags", zap.Error(err))
|
utils.Logger().Error("failed to record with tags", zap.Error(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,7 +10,7 @@ import (
|
||||||
"github.com/multiformats/go-multiaddr"
|
"github.com/multiformats/go-multiaddr"
|
||||||
"github.com/waku-org/go-waku/logging"
|
"github.com/waku-org/go-waku/logging"
|
||||||
"github.com/waku-org/go-waku/waku/v2/metrics"
|
"github.com/waku-org/go-waku/waku/v2/metrics"
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol/filter"
|
"github.com/waku-org/go-waku/waku/v2/protocol/legacy_filter"
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol/lightpush"
|
"github.com/waku-org/go-waku/waku/v2/protocol/lightpush"
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol/store"
|
"github.com/waku-org/go-waku/waku/v2/protocol/store"
|
||||||
|
@ -131,7 +131,7 @@ func (w *WakuNode) Status() (isOnline bool, hasHistory bool) {
|
||||||
if !hasStore && protocol == store.StoreID_v20beta4 {
|
if !hasStore && protocol == store.StoreID_v20beta4 {
|
||||||
hasStore = true
|
hasStore = true
|
||||||
}
|
}
|
||||||
if !hasFilter && protocol == filter.FilterID_v20beta1 {
|
if !hasFilter && protocol == legacy_filter.FilterID_v20beta1 {
|
||||||
hasFilter = true
|
hasFilter = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,7 +14,7 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||||
ma "github.com/multiformats/go-multiaddr"
|
ma "github.com/multiformats/go-multiaddr"
|
||||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
wenr "github.com/waku-org/go-waku/waku/v2/protocol/enr"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -30,7 +30,7 @@ func writeMultiaddressField(localnode *enode.LocalNode, addrAggr []ma.Multiaddr)
|
||||||
defer func() {
|
defer func() {
|
||||||
if e := recover(); e != nil {
|
if e := recover(); e != nil {
|
||||||
// Deleting the multiaddr entry, as we could not write it succesfully
|
// Deleting the multiaddr entry, as we could not write it succesfully
|
||||||
localnode.Delete(enr.WithEntry(utils.MultiaddrENRField, struct{}{}))
|
localnode.Delete(enr.WithEntry(wenr.MultiaddrENRField, struct{}{}))
|
||||||
err = errors.New("could not write enr record")
|
err = errors.New("could not write enr record")
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
@ -46,7 +46,7 @@ func writeMultiaddressField(localnode *enode.LocalNode, addrAggr []ma.Multiaddr)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(fieldRaw) != 0 && len(fieldRaw) <= 100 { // Max length for multiaddr field before triggering the 300 bytes limit
|
if len(fieldRaw) != 0 && len(fieldRaw) <= 100 { // Max length for multiaddr field before triggering the 300 bytes limit
|
||||||
localnode.Set(enr.WithEntry(utils.MultiaddrENRField, fieldRaw))
|
localnode.Set(enr.WithEntry(wenr.MultiaddrENRField, fieldRaw))
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is to trigger the signing record err due to exceeding 300bytes limit
|
// This is to trigger the signing record err due to exceeding 300bytes limit
|
||||||
|
@ -55,9 +55,9 @@ func writeMultiaddressField(localnode *enode.LocalNode, addrAggr []ma.Multiaddr)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *WakuNode) updateLocalNode(localnode *enode.LocalNode, multiaddrs []ma.Multiaddr, ipAddr *net.TCPAddr, udpPort uint, wakuFlags utils.WakuEnrBitfield, advertiseAddr []ma.Multiaddr, shouldAutoUpdate bool, log *zap.Logger) error {
|
func (w *WakuNode) updateLocalNode(localnode *enode.LocalNode, multiaddrs []ma.Multiaddr, ipAddr *net.TCPAddr, udpPort uint, wakuFlags wenr.WakuEnrBitfield, advertiseAddr []ma.Multiaddr, shouldAutoUpdate bool, log *zap.Logger) error {
|
||||||
localnode.SetFallbackUDP(int(udpPort))
|
localnode.SetFallbackUDP(int(udpPort))
|
||||||
localnode.Set(enr.WithEntry(utils.WakuENRField, wakuFlags))
|
localnode.Set(enr.WithEntry(wenr.WakuENRField, wakuFlags))
|
||||||
localnode.SetFallbackIP(net.IP{127, 0, 0, 1})
|
localnode.SetFallbackIP(net.IP{127, 0, 0, 1})
|
||||||
|
|
||||||
if udpPort > math.MaxUint16 {
|
if udpPort > math.MaxUint16 {
|
||||||
|
|
|
@ -1,113 +0,0 @@
|
||||||
//go:build gowaku_rln
|
|
||||||
// +build gowaku_rln
|
|
||||||
|
|
||||||
package node
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/waku-org/go-zerokit-rln/rln"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
const RLN_CREDENTIALS_FILENAME = "rlnCredentials.txt"
|
|
||||||
|
|
||||||
func WriteRLNMembershipCredentialsToFile(keyPair *rln.MembershipKeyPair, idx rln.MembershipIndex, contractAddress common.Address, path string, passwd []byte) error {
|
|
||||||
if path == "" {
|
|
||||||
return nil // we dont want to use a credentials file
|
|
||||||
}
|
|
||||||
|
|
||||||
if keyPair == nil {
|
|
||||||
return nil // no credentials to store
|
|
||||||
}
|
|
||||||
|
|
||||||
credentialsJSON, err := json.Marshal(MembershipCredentials{
|
|
||||||
Keypair: keyPair,
|
|
||||||
Index: idx,
|
|
||||||
Contract: contractAddress,
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
encryptedCredentials, err := keystore.EncryptDataV3(credentialsJSON, passwd, keystore.StandardScryptN, keystore.StandardScryptP)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
output, err := json.Marshal(encryptedCredentials)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
path = filepath.Join(path, RLN_CREDENTIALS_FILENAME)
|
|
||||||
|
|
||||||
return ioutil.WriteFile(path, output, 0600)
|
|
||||||
}
|
|
||||||
|
|
||||||
func loadMembershipCredentialsFromFile(credentialsFilePath string, passwd string) (MembershipCredentials, error) {
|
|
||||||
src, err := ioutil.ReadFile(credentialsFilePath)
|
|
||||||
if err != nil {
|
|
||||||
return MembershipCredentials{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var encryptedK keystore.CryptoJSON
|
|
||||||
err = json.Unmarshal(src, &encryptedK)
|
|
||||||
if err != nil {
|
|
||||||
return MembershipCredentials{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
credentialsBytes, err := keystore.DecryptDataV3(encryptedK, passwd)
|
|
||||||
if err != nil {
|
|
||||||
return MembershipCredentials{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var credentials MembershipCredentials
|
|
||||||
err = json.Unmarshal(credentialsBytes, &credentials)
|
|
||||||
|
|
||||||
return credentials, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetMembershipCredentials(logger *zap.Logger, credentialsPath string, password string, membershipContract common.Address, membershipIndex uint) (credentials MembershipCredentials, err error) {
|
|
||||||
if credentialsPath == "" { // Not using a file
|
|
||||||
return MembershipCredentials{
|
|
||||||
Contract: membershipContract,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
credentialsFilePath := filepath.Join(credentialsPath, RLN_CREDENTIALS_FILENAME)
|
|
||||||
if _, err = os.Stat(credentialsFilePath); err == nil {
|
|
||||||
if credentials, err := loadMembershipCredentialsFromFile(credentialsFilePath, password); err != nil {
|
|
||||||
return MembershipCredentials{}, fmt.Errorf("could not read membership credentials file: %w", err)
|
|
||||||
} else {
|
|
||||||
logger.Info("loaded rln credentials", zap.String("filepath", credentialsFilePath))
|
|
||||||
if (bytes.Equal(credentials.Contract.Bytes(), common.Address{}.Bytes())) {
|
|
||||||
credentials.Contract = membershipContract
|
|
||||||
}
|
|
||||||
if (bytes.Equal(membershipContract.Bytes(), common.Address{}.Bytes())) {
|
|
||||||
return MembershipCredentials{}, errors.New("no contract address specified")
|
|
||||||
}
|
|
||||||
return credentials, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
return MembershipCredentials{
|
|
||||||
Keypair: nil,
|
|
||||||
Index: membershipIndex,
|
|
||||||
Contract: membershipContract,
|
|
||||||
}, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
return MembershipCredentials{}, fmt.Errorf("could not read membership credentials file: %w", err)
|
|
||||||
}
|
|
|
@ -3,18 +3,21 @@ package node
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p/core/host"
|
||||||
"github.com/libp2p/go-libp2p/core/peer"
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Service interface {
|
type Service interface {
|
||||||
Start(ctx context.Context) error
|
SetHost(h host.Host)
|
||||||
|
Start(context.Context) error
|
||||||
Stop()
|
Stop()
|
||||||
}
|
}
|
||||||
|
|
||||||
type ReceptorService interface {
|
type ReceptorService interface {
|
||||||
Service
|
SetHost(h host.Host)
|
||||||
MessageChannel() chan *protocol.Envelope
|
Stop()
|
||||||
|
Start(context.Context, relay.Subscription) error
|
||||||
}
|
}
|
||||||
|
|
||||||
type PeerConnectorService interface {
|
type PeerConnectorService interface {
|
||||||
|
|
|
@ -13,7 +13,6 @@ import (
|
||||||
"github.com/libp2p/go-libp2p"
|
"github.com/libp2p/go-libp2p"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||||
|
|
||||||
|
@ -33,8 +32,9 @@ import (
|
||||||
v2 "github.com/waku-org/go-waku/waku/v2"
|
v2 "github.com/waku-org/go-waku/waku/v2"
|
||||||
"github.com/waku-org/go-waku/waku/v2/discv5"
|
"github.com/waku-org/go-waku/waku/v2/discv5"
|
||||||
"github.com/waku-org/go-waku/waku/v2/metrics"
|
"github.com/waku-org/go-waku/waku/v2/metrics"
|
||||||
|
"github.com/waku-org/go-waku/waku/v2/protocol/enr"
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol/filter"
|
"github.com/waku-org/go-waku/waku/v2/protocol/filter"
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol/filterv2"
|
"github.com/waku-org/go-waku/waku/v2/protocol/legacy_filter"
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol/lightpush"
|
"github.com/waku-org/go-waku/waku/v2/protocol/lightpush"
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol/peer_exchange"
|
"github.com/waku-org/go-waku/waku/v2/protocol/peer_exchange"
|
||||||
|
@ -55,15 +55,18 @@ type Peer struct {
|
||||||
|
|
||||||
type storeFactory func(w *WakuNode) store.Store
|
type storeFactory func(w *WakuNode) store.Store
|
||||||
|
|
||||||
type MembershipKeyPair = struct {
|
type byte32 = [32]byte
|
||||||
IDKey [32]byte `json:"idKey"`
|
|
||||||
IDCommitment [32]byte `json:"idCommitment"`
|
type IdentityCredential = struct {
|
||||||
|
IDTrapdoor byte32 `json:"idTrapdoor"`
|
||||||
|
IDNullifier byte32 `json:"idNullifier"`
|
||||||
|
IDSecretHash byte32 `json:"idSecretHash"`
|
||||||
|
IDCommitment byte32 `json:"idCommitment"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type RLNRelay interface {
|
type RLNRelay interface {
|
||||||
MembershipKeyPair() *MembershipKeyPair
|
IdentityCredential() (IdentityCredential, error)
|
||||||
MembershipIndex() uint
|
MembershipIndex() (uint, error)
|
||||||
MembershipContractAddress() common.Address
|
|
||||||
AppendRLNProof(msg *pb.WakuMessage, senderEpochTime time.Time) error
|
AppendRLNProof(msg *pb.WakuMessage, senderEpochTime time.Time) error
|
||||||
Stop()
|
Stop()
|
||||||
}
|
}
|
||||||
|
@ -74,23 +77,23 @@ type WakuNode struct {
|
||||||
log *zap.Logger
|
log *zap.Logger
|
||||||
timesource timesource.Timesource
|
timesource timesource.Timesource
|
||||||
|
|
||||||
relay Service
|
relay Service
|
||||||
lightPush Service
|
lightPush Service
|
||||||
peerConnector PeerConnectorService
|
peerConnector PeerConnectorService
|
||||||
discoveryV5 Service
|
discoveryV5 Service
|
||||||
peerExchange Service
|
peerExchange Service
|
||||||
rendezvous Service
|
rendezvous Service
|
||||||
filter ReceptorService
|
legacyFilter ReceptorService
|
||||||
filterV2Full ReceptorService
|
filterFullnode ReceptorService
|
||||||
filterV2Light Service
|
filterLightnode Service
|
||||||
store ReceptorService
|
store ReceptorService
|
||||||
rlnRelay RLNRelay
|
rlnRelay RLNRelay
|
||||||
|
|
||||||
wakuFlag utils.WakuEnrBitfield
|
wakuFlag enr.WakuEnrBitfield
|
||||||
|
|
||||||
localNode *enode.LocalNode
|
localNode *enode.LocalNode
|
||||||
|
|
||||||
bcaster v2.Broadcaster
|
bcaster relay.Broadcaster
|
||||||
|
|
||||||
connectionNotif ConnectionNotifier
|
connectionNotif ConnectionNotifier
|
||||||
protocolEventSub event.Subscription
|
protocolEventSub event.Subscription
|
||||||
|
@ -112,7 +115,7 @@ type WakuNode struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func defaultStoreFactory(w *WakuNode) store.Store {
|
func defaultStoreFactory(w *WakuNode) store.Store {
|
||||||
return store.NewWakuStore(w.host, w.opts.messageProvider, w.timesource, w.log)
|
return store.NewWakuStore(w.opts.messageProvider, w.timesource, w.log)
|
||||||
}
|
}
|
||||||
|
|
||||||
// New is used to instantiate a WakuNode using a set of WakuNodeOptions
|
// New is used to instantiate a WakuNode using a set of WakuNodeOptions
|
||||||
|
@ -165,19 +168,15 @@ func New(opts ...WakuNodeOption) (*WakuNode, error) {
|
||||||
params.libP2POpts = append(params.libP2POpts, libp2p.AddrsFactory(params.addressFactory))
|
params.libP2POpts = append(params.libP2POpts, libp2p.AddrsFactory(params.addressFactory))
|
||||||
}
|
}
|
||||||
|
|
||||||
host, err := libp2p.New(params.libP2POpts...)
|
var err error
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
w := new(WakuNode)
|
w := new(WakuNode)
|
||||||
w.bcaster = v2.NewBroadcaster(1024)
|
w.bcaster = relay.NewBroadcaster(1024)
|
||||||
w.host = host
|
|
||||||
w.opts = params
|
w.opts = params
|
||||||
w.log = params.logger.Named("node2")
|
w.log = params.logger.Named("node2")
|
||||||
w.wg = &sync.WaitGroup{}
|
w.wg = &sync.WaitGroup{}
|
||||||
w.keepAliveFails = make(map[peer.ID]int)
|
w.keepAliveFails = make(map[peer.ID]int)
|
||||||
w.wakuFlag = utils.NewWakuEnrBitfield(w.opts.enableLightPush, w.opts.enableFilter, w.opts.enableStore, w.opts.enableRelay)
|
w.wakuFlag = enr.NewWakuEnrBitfield(w.opts.enableLightPush, w.opts.enableLegacyFilter, w.opts.enableStore, w.opts.enableRelay)
|
||||||
|
|
||||||
if params.enableNTP {
|
if params.enableNTP {
|
||||||
w.timesource = timesource.NewNTPTimesource(w.opts.ntpURLs, w.log)
|
w.timesource = timesource.NewNTPTimesource(w.opts.ntpURLs, w.log)
|
||||||
|
@ -195,7 +194,7 @@ func New(opts ...WakuNodeOption) (*WakuNode, error) {
|
||||||
rngSrc := rand.NewSource(rand.Int63())
|
rngSrc := rand.NewSource(rand.Int63())
|
||||||
minBackoff, maxBackoff := time.Second*30, time.Hour
|
minBackoff, maxBackoff := time.Second*30, time.Hour
|
||||||
bkf := backoff.NewExponentialBackoff(minBackoff, maxBackoff, backoff.FullJitter, time.Second, 5.0, 0, rand.New(rngSrc))
|
bkf := backoff.NewExponentialBackoff(minBackoff, maxBackoff, backoff.FullJitter, time.Second, 5.0, 0, rand.New(rngSrc))
|
||||||
w.peerConnector, err = v2.NewPeerConnectionStrategy(host, cacheSize, w.opts.discoveryMinPeers, network.DialPeerTimeout, bkf, w.log)
|
w.peerConnector, err = v2.NewPeerConnectionStrategy(cacheSize, w.opts.discoveryMinPeers, network.DialPeerTimeout, bkf, w.log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
w.log.Error("creating peer connection strategy", zap.Error(err))
|
w.log.Error("creating peer connection strategy", zap.Error(err))
|
||||||
}
|
}
|
||||||
|
@ -207,7 +206,7 @@ func New(opts ...WakuNodeOption) (*WakuNode, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
w.peerExchange, err = peer_exchange.NewWakuPeerExchange(w.host, w.DiscV5(), w.peerConnector, w.log)
|
w.peerExchange, err = peer_exchange.NewWakuPeerExchange(w.DiscV5(), w.peerConnector, w.log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -221,12 +220,12 @@ func New(opts ...WakuNodeOption) (*WakuNode, error) {
|
||||||
rendezvousPoints = append(rendezvousPoints, peerID)
|
rendezvousPoints = append(rendezvousPoints, peerID)
|
||||||
}
|
}
|
||||||
|
|
||||||
w.rendezvous = rendezvous.NewRendezvous(w.host, w.opts.enableRendezvousServer, w.opts.rendezvousDB, w.opts.enableRendezvous, rendezvousPoints, w.peerConnector, w.log)
|
w.rendezvous = rendezvous.NewRendezvous(w.opts.enableRendezvousServer, w.opts.rendezvousDB, w.opts.enableRendezvous, rendezvousPoints, w.peerConnector, w.log)
|
||||||
w.relay = relay.NewWakuRelay(w.host, w.bcaster, w.opts.minRelayPeersToPublish, w.timesource, w.log, w.opts.wOpts...)
|
w.relay = relay.NewWakuRelay(w.bcaster, w.opts.minRelayPeersToPublish, w.timesource, w.log, w.opts.wOpts...)
|
||||||
w.filter = filter.NewWakuFilter(w.host, w.bcaster, w.opts.isFilterFullNode, w.timesource, w.log, w.opts.filterOpts...)
|
w.legacyFilter = legacy_filter.NewWakuFilter(w.bcaster, w.opts.isLegacyFilterFullnode, w.timesource, w.log, w.opts.legacyFilterOpts...)
|
||||||
w.filterV2Full = filterv2.NewWakuFilterFullnode(w.host, w.bcaster, w.timesource, w.log, w.opts.filterV2Opts...)
|
w.filterFullnode = filter.NewWakuFilterFullnode(w.timesource, w.log, w.opts.filterOpts...)
|
||||||
w.filterV2Light = filterv2.NewWakuFilterLightnode(w.host, w.bcaster, w.timesource, w.log)
|
w.filterLightnode = filter.NewWakuFilterLightnode(w.bcaster, w.timesource, w.log)
|
||||||
w.lightPush = lightpush.NewWakuLightPush(w.host, w.Relay(), w.log)
|
w.lightPush = lightpush.NewWakuLightPush(w.Relay(), w.log)
|
||||||
|
|
||||||
if params.storeFactory != nil {
|
if params.storeFactory != nil {
|
||||||
w.storeFactory = params.storeFactory
|
w.storeFactory = params.storeFactory
|
||||||
|
@ -234,18 +233,6 @@ func New(opts ...WakuNodeOption) (*WakuNode, error) {
|
||||||
w.storeFactory = defaultStoreFactory
|
w.storeFactory = defaultStoreFactory
|
||||||
}
|
}
|
||||||
|
|
||||||
if w.protocolEventSub, err = host.EventBus().Subscribe(new(event.EvtPeerProtocolsUpdated)); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if w.identificationEventSub, err = host.EventBus().Subscribe(new(event.EvtPeerIdentificationCompleted)); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if w.addressChangesSub, err = host.EventBus().Subscribe(new(event.EvtLocalAddressesUpdated)); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if params.connStatusC != nil {
|
if params.connStatusC != nil {
|
||||||
w.connStatusChan = params.connStatusC
|
w.connStatusChan = params.connStatusC
|
||||||
}
|
}
|
||||||
|
@ -294,6 +281,25 @@ func (w *WakuNode) Start(ctx context.Context) error {
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
w.cancel = cancel
|
w.cancel = cancel
|
||||||
|
|
||||||
|
host, err := libp2p.New(w.opts.libP2POpts...)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
w.host = host
|
||||||
|
|
||||||
|
if w.protocolEventSub, err = host.EventBus().Subscribe(new(event.EvtPeerProtocolsUpdated)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if w.identificationEventSub, err = host.EventBus().Subscribe(new(event.EvtPeerIdentificationCompleted)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if w.addressChangesSub, err = host.EventBus().Subscribe(new(event.EvtLocalAddressesUpdated)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
w.connectionNotif = NewConnectionNotifier(ctx, w.host, w.log)
|
w.connectionNotif = NewConnectionNotifier(ctx, w.host, w.log)
|
||||||
w.host.Network().Notify(w.connectionNotif)
|
w.host.Network().Notify(w.connectionNotif)
|
||||||
|
|
||||||
|
@ -304,12 +310,18 @@ func (w *WakuNode) Start(ctx context.Context) error {
|
||||||
go w.watchMultiaddressChanges(ctx)
|
go w.watchMultiaddressChanges(ctx)
|
||||||
go w.watchENRChanges(ctx)
|
go w.watchENRChanges(ctx)
|
||||||
|
|
||||||
|
err = w.bcaster.Start(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
if w.opts.keepAliveInterval > time.Duration(0) {
|
if w.opts.keepAliveInterval > time.Duration(0) {
|
||||||
w.wg.Add(1)
|
w.wg.Add(1)
|
||||||
go w.startKeepAlive(ctx, w.opts.keepAliveInterval)
|
go w.startKeepAlive(ctx, w.opts.keepAliveInterval)
|
||||||
}
|
}
|
||||||
|
|
||||||
err := w.peerConnector.Start(ctx)
|
w.peerConnector.SetHost(host)
|
||||||
|
err = w.peerConnector.Start(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -321,6 +333,7 @@ func (w *WakuNode) Start(ctx context.Context) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
w.relay.SetHost(host)
|
||||||
if w.opts.enableRelay {
|
if w.opts.enableRelay {
|
||||||
err := w.relay.Start(ctx)
|
err := w.relay.Start(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -332,50 +345,52 @@ func (w *WakuNode) Start(ctx context.Context) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
sub.Unsubscribe()
|
||||||
w.Broadcaster().Unregister(&relay.DefaultWakuTopic, sub.C)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
w.store = w.storeFactory(w)
|
w.store = w.storeFactory(w)
|
||||||
|
w.store.SetHost(host)
|
||||||
if w.opts.enableStore {
|
if w.opts.enableStore {
|
||||||
err := w.startStore(ctx)
|
sub := w.bcaster.RegisterForAll()
|
||||||
|
err := w.startStore(ctx, sub)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
w.log.Info("Subscribing store to broadcaster")
|
w.log.Info("Subscribing store to broadcaster")
|
||||||
w.bcaster.Register(nil, w.store.MessageChannel())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
w.lightPush.SetHost(host)
|
||||||
if w.opts.enableLightPush {
|
if w.opts.enableLightPush {
|
||||||
if err := w.lightPush.Start(ctx); err != nil {
|
if err := w.lightPush.Start(ctx); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if w.opts.enableFilter {
|
w.legacyFilter.SetHost(host)
|
||||||
err := w.filter.Start(ctx)
|
if w.opts.enableLegacyFilter {
|
||||||
|
sub := w.bcaster.RegisterForAll()
|
||||||
|
err := w.legacyFilter.Start(ctx, sub)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
w.log.Info("Subscribing filter to broadcaster")
|
w.log.Info("Subscribing filter to broadcaster")
|
||||||
w.bcaster.Register(nil, w.filter.MessageChannel())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if w.opts.enableFilterV2FullNode {
|
w.filterFullnode.SetHost(host)
|
||||||
err := w.filterV2Full.Start(ctx)
|
if w.opts.enableFilterFullNode {
|
||||||
|
sub := w.bcaster.RegisterForAll()
|
||||||
|
err := w.filterFullnode.Start(ctx, sub)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
w.log.Info("Subscribing filterV2 to broadcaster")
|
w.log.Info("Subscribing filterV2 to broadcaster")
|
||||||
w.bcaster.Register(nil, w.filterV2Full.MessageChannel())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if w.opts.enableFilterV2LightNode {
|
w.filterLightnode.SetHost(host)
|
||||||
err := w.filterV2Light.Start(ctx)
|
if w.opts.enableFilterLightNode {
|
||||||
|
err := w.filterLightnode.Start(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -386,6 +401,7 @@ func (w *WakuNode) Start(ctx context.Context) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
w.peerExchange.SetHost(host)
|
||||||
if w.opts.enablePeerExchange {
|
if w.opts.enablePeerExchange {
|
||||||
err := w.peerExchange.Start(ctx)
|
err := w.peerExchange.Start(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -393,6 +409,7 @@ func (w *WakuNode) Start(ctx context.Context) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
w.rendezvous.SetHost(host)
|
||||||
if w.opts.enableRendezvousServer || w.opts.enableRendezvous {
|
if w.opts.enableRendezvousServer || w.opts.enableRendezvous {
|
||||||
err := w.rendezvous.Start(ctx)
|
err := w.rendezvous.Start(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -416,9 +433,7 @@ func (w *WakuNode) Stop() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
w.cancel()
|
w.bcaster.Stop()
|
||||||
|
|
||||||
w.bcaster.Close()
|
|
||||||
|
|
||||||
defer w.connectionNotif.Close()
|
defer w.connectionNotif.Close()
|
||||||
defer w.protocolEventSub.Close()
|
defer w.protocolEventSub.Close()
|
||||||
|
@ -432,8 +447,8 @@ func (w *WakuNode) Stop() {
|
||||||
w.relay.Stop()
|
w.relay.Stop()
|
||||||
w.lightPush.Stop()
|
w.lightPush.Stop()
|
||||||
w.store.Stop()
|
w.store.Stop()
|
||||||
w.filter.Stop()
|
w.legacyFilter.Stop()
|
||||||
w.filterV2Full.Stop()
|
w.filterFullnode.Stop()
|
||||||
w.peerExchange.Stop()
|
w.peerExchange.Stop()
|
||||||
|
|
||||||
if w.opts.enableDiscV5 {
|
if w.opts.enableDiscV5 {
|
||||||
|
@ -448,9 +463,13 @@ func (w *WakuNode) Stop() {
|
||||||
|
|
||||||
w.host.Close()
|
w.host.Close()
|
||||||
|
|
||||||
|
w.cancel()
|
||||||
|
|
||||||
w.wg.Wait()
|
w.wg.Wait()
|
||||||
|
|
||||||
close(w.enrChangeCh)
|
close(w.enrChangeCh)
|
||||||
|
|
||||||
|
w.cancel = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Host returns the libp2p Host used by the WakuNode
|
// Host returns the libp2p Host used by the WakuNode
|
||||||
|
@ -521,17 +540,25 @@ func (w *WakuNode) Store() store.Store {
|
||||||
return w.store.(store.Store)
|
return w.store.(store.Store)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Filter is used to access any operation related to Waku Filter protocol
|
// LegacyFilter is used to access any operation related to Waku LegacyFilter protocol
|
||||||
func (w *WakuNode) Filter() *filter.WakuFilter {
|
func (w *WakuNode) LegacyFilter() *legacy_filter.WakuFilter {
|
||||||
if result, ok := w.filter.(*filter.WakuFilter); ok {
|
if result, ok := w.legacyFilter.(*legacy_filter.WakuFilter); ok {
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// FilterV2 is used to access any operation related to Waku Filter protocol
|
// FilterLightnode is used to access any operation related to Waku Filter protocol Full node feature
|
||||||
func (w *WakuNode) FilterV2() *filterv2.WakuFilterLightnode {
|
func (w *WakuNode) FilterFullnode() *filter.WakuFilterFullNode {
|
||||||
if result, ok := w.filterV2Light.(*filterv2.WakuFilterLightnode); ok {
|
if result, ok := w.filterFullnode.(*filter.WakuFilterFullNode); ok {
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterFullnode is used to access any operation related to Waku Filter protocol Light node feature
|
||||||
|
func (w *WakuNode) FilterLightnode() *filter.WakuFilterLightnode {
|
||||||
|
if result, ok := w.filterLightnode.(*filter.WakuFilterLightnode); ok {
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -563,7 +590,7 @@ func (w *WakuNode) PeerExchange() *peer_exchange.WakuPeerExchange {
|
||||||
|
|
||||||
// Broadcaster is used to access the message broadcaster that is used to push
|
// Broadcaster is used to access the message broadcaster that is used to push
|
||||||
// messages to different protocols
|
// messages to different protocols
|
||||||
func (w *WakuNode) Broadcaster() v2.Broadcaster {
|
func (w *WakuNode) Broadcaster() relay.Broadcaster {
|
||||||
return w.bcaster
|
return w.bcaster
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -607,13 +634,13 @@ func (w *WakuNode) mountDiscV5() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
w.discoveryV5, err = discv5.NewDiscoveryV5(w.Host(), w.opts.privKey, w.localNode, w.peerConnector, w.log, discV5Options...)
|
w.discoveryV5, err = discv5.NewDiscoveryV5(w.opts.privKey, w.localNode, w.peerConnector, w.log, discV5Options...)
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *WakuNode) startStore(ctx context.Context) error {
|
func (w *WakuNode) startStore(ctx context.Context, sub relay.Subscription) error {
|
||||||
err := w.store.Start(ctx)
|
err := w.store.Start(ctx, sub)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
w.log.Error("starting store", zap.Error(err))
|
w.log.Error("starting store", zap.Error(err))
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -6,9 +6,9 @@ package node
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/hex"
|
|
||||||
"errors"
|
"errors"
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol/rln"
|
"github.com/waku-org/go-waku/waku/v2/protocol/rln"
|
||||||
|
"github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/dynamic"
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/static"
|
"github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/static"
|
||||||
r "github.com/waku-org/go-zerokit-rln/rln"
|
r "github.com/waku-org/go-zerokit-rln/rln"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
@ -39,33 +39,53 @@ func (w *WakuNode) mountRlnRelay(ctx context.Context) error {
|
||||||
return errors.New("relay protocol does not support the configured pubsub topic")
|
return errors.New("relay protocol does not support the configured pubsub topic")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
var groupManager rln.GroupManager
|
||||||
|
|
||||||
if !w.opts.rlnRelayDynamic {
|
if !w.opts.rlnRelayDynamic {
|
||||||
w.log.Info("setting up waku-rln-relay in off-chain mode")
|
w.log.Info("setting up waku-rln-relay in off-chain mode")
|
||||||
|
|
||||||
// set up rln relay inputs
|
// set up rln relay inputs
|
||||||
groupKeys, idCredential, err := static.Setup(w.opts.rlnRelayMemIndex)
|
groupKeys, idCredential, err := static.Setup(w.opts.rlnRelayMemIndex)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// rlnrelay in off-chain mode with a static group of user
|
groupManager, err = static.NewStaticGroupManager(groupKeys, idCredential, w.opts.rlnRelayMemIndex, w.log)
|
||||||
|
|
||||||
groupManager, err := static.NewStaticGroupManager(groupKeys, idCredential, w.opts.rlnRelayMemIndex, w.log)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
w.log.Info("setting up waku-rln-relay in on-chain mode")
|
||||||
|
|
||||||
rlnRelay, err := rln.New(w.Relay(), groupManager, w.opts.rlnRelayPubsubTopic, w.opts.rlnRelayContentTopic, w.opts.rlnSpamHandler, w.timesource, w.log)
|
groupManager, err = dynamic.NewDynamicGroupManager(
|
||||||
|
w.opts.rlnETHClientAddress,
|
||||||
|
w.opts.rlnETHPrivateKey,
|
||||||
|
w.opts.rlnMembershipContractAddress,
|
||||||
|
w.opts.keystorePath,
|
||||||
|
w.opts.keystorePassword,
|
||||||
|
true,
|
||||||
|
w.opts.rlnRegistrationHandler,
|
||||||
|
w.log,
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
err = rlnRelay.Start(ctx)
|
rlnRelay, err := rln.New(w.Relay(), groupManager, w.opts.rlnRelayPubsubTopic, w.opts.rlnRelayContentTopic, w.opts.rlnSpamHandler, w.timesource, w.log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
w.rlnRelay = rlnRelay
|
err = rlnRelay.Start(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
w.rlnRelay = rlnRelay
|
||||||
|
|
||||||
|
if !w.opts.rlnRelayDynamic {
|
||||||
// check the correct construction of the tree by comparing the calculated root against the expected root
|
// check the correct construction of the tree by comparing the calculated root against the expected root
|
||||||
// no error should happen as it is already captured in the unit tests
|
// no error should happen as it is already captured in the unit tests
|
||||||
root, err := rlnRelay.RLN.GetMerkleRoot()
|
root, err := rlnRelay.RLN.GetMerkleRoot()
|
||||||
|
@ -81,26 +101,6 @@ func (w *WakuNode) mountRlnRelay(ctx context.Context) error {
|
||||||
if !bytes.Equal(expectedRoot[:], root[:]) {
|
if !bytes.Equal(expectedRoot[:], root[:]) {
|
||||||
return errors.New("root mismatch: something went wrong not in Merkle tree construction")
|
return errors.New("root mismatch: something went wrong not in Merkle tree construction")
|
||||||
}
|
}
|
||||||
|
|
||||||
w.log.Debug("the calculated root", zap.String("root", hex.EncodeToString(root[:])))
|
|
||||||
} else {
|
|
||||||
w.log.Info("setting up waku-rln-relay in on-chain mode")
|
|
||||||
|
|
||||||
/*// check if the peer has provided its rln credentials
|
|
||||||
var memKeyPair *r.IdentityCredential
|
|
||||||
if w.opts.rlnRelayIDCommitment != nil && w.opts.rlnRelayIDKey != nil {
|
|
||||||
memKeyPair = &r.IdentityCredential{
|
|
||||||
IDCommitment: *w.opts.rlnRelayIDCommitment,
|
|
||||||
IDSecretHash: *w.opts.rlnRelayIDKey,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// mount the rln relay protocol in the on-chain/dynamic mode
|
|
||||||
var err error
|
|
||||||
w.rlnRelay, err = rln.RlnRelayDynamic(ctx, w.Relay(), w.opts.rlnETHClientAddress, w.opts.rlnETHPrivateKey, w.opts.rlnMembershipContractAddress, memKeyPair, w.opts.rlnRelayMemIndex, w.opts.rlnRelayPubsubTopic, w.opts.rlnRelayContentTopic, w.opts.rlnSpamHandler, w.opts.rlnRegistrationHandler, w.timesource, w.log)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}*/
|
|
||||||
}
|
}
|
||||||
|
|
||||||
w.log.Info("mounted waku RLN relay", zap.String("pubsubTopic", w.opts.rlnRelayPubsubTopic), zap.String("contentTopic", w.opts.rlnRelayContentTopic))
|
w.log.Info("mounted waku RLN relay", zap.String("pubsubTopic", w.opts.rlnRelayPubsubTopic), zap.String("contentTopic", w.opts.rlnRelayContentTopic))
|
||||||
|
|
|
@ -22,11 +22,12 @@ import (
|
||||||
"github.com/libp2p/go-libp2p/p2p/net/connmgr"
|
"github.com/libp2p/go-libp2p/p2p/net/connmgr"
|
||||||
quic "github.com/libp2p/go-libp2p/p2p/transport/quic"
|
quic "github.com/libp2p/go-libp2p/p2p/transport/quic"
|
||||||
"github.com/libp2p/go-libp2p/p2p/transport/tcp"
|
"github.com/libp2p/go-libp2p/p2p/transport/tcp"
|
||||||
|
libp2pwebtransport "github.com/libp2p/go-libp2p/p2p/transport/webtransport"
|
||||||
"github.com/multiformats/go-multiaddr"
|
"github.com/multiformats/go-multiaddr"
|
||||||
ma "github.com/multiformats/go-multiaddr"
|
ma "github.com/multiformats/go-multiaddr"
|
||||||
manet "github.com/multiformats/go-multiaddr/net"
|
manet "github.com/multiformats/go-multiaddr/net"
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol/filter"
|
"github.com/waku-org/go-waku/waku/v2/protocol/filter"
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol/filterv2"
|
"github.com/waku-org/go-waku/waku/v2/protocol/legacy_filter"
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol/store"
|
"github.com/waku-org/go-waku/waku/v2/protocol/store"
|
||||||
"github.com/waku-org/go-waku/waku/v2/rendezvous"
|
"github.com/waku-org/go-waku/waku/v2/rendezvous"
|
||||||
|
@ -63,15 +64,15 @@ type WakuNodeParameters struct {
|
||||||
logger *zap.Logger
|
logger *zap.Logger
|
||||||
logLevel logging.LogLevel
|
logLevel logging.LogLevel
|
||||||
|
|
||||||
noDefaultWakuTopic bool
|
noDefaultWakuTopic bool
|
||||||
enableRelay bool
|
enableRelay bool
|
||||||
enableFilter bool
|
enableLegacyFilter bool
|
||||||
isFilterFullNode bool
|
isLegacyFilterFullnode bool
|
||||||
enableFilterV2LightNode bool
|
enableFilterLightNode bool
|
||||||
enableFilterV2FullNode bool
|
enableFilterFullNode bool
|
||||||
filterOpts []filter.Option
|
legacyFilterOpts []legacy_filter.Option
|
||||||
filterV2Opts []filterv2.Option
|
filterOpts []filter.Option
|
||||||
wOpts []pubsub.Option
|
wOpts []pubsub.Option
|
||||||
|
|
||||||
minRelayPeersToPublish int
|
minRelayPeersToPublish int
|
||||||
|
|
||||||
|
@ -100,10 +101,10 @@ type WakuNodeParameters struct {
|
||||||
rlnRelayContentTopic string
|
rlnRelayContentTopic string
|
||||||
rlnRelayDynamic bool
|
rlnRelayDynamic bool
|
||||||
rlnSpamHandler func(message *pb.WakuMessage) error
|
rlnSpamHandler func(message *pb.WakuMessage) error
|
||||||
rlnRelayIDKey *[32]byte
|
|
||||||
rlnRelayIDCommitment *[32]byte
|
|
||||||
rlnETHPrivateKey *ecdsa.PrivateKey
|
rlnETHPrivateKey *ecdsa.PrivateKey
|
||||||
rlnETHClientAddress string
|
rlnETHClientAddress string
|
||||||
|
keystorePath string
|
||||||
|
keystorePassword string
|
||||||
rlnMembershipContractAddress common.Address
|
rlnMembershipContractAddress common.Address
|
||||||
rlnRegistrationHandler func(tx *types.Transaction)
|
rlnRegistrationHandler func(tx *types.Transaction)
|
||||||
|
|
||||||
|
@ -219,6 +220,31 @@ func WithAdvertiseAddresses(advertiseAddrs ...ma.Multiaddr) WakuNodeOption {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithExternalIP is a WakuNodeOption that allows overriding the advertised external IP used in the waku node with custom value
|
||||||
|
func WithExternalIP(ip net.IP) WakuNodeOption {
|
||||||
|
return func(params *WakuNodeParameters) error {
|
||||||
|
params.addressFactory = func(inputAddr []multiaddr.Multiaddr) (addresses []multiaddr.Multiaddr) {
|
||||||
|
component := "/ip4/"
|
||||||
|
if ip.To4() == nil && ip.To16() != nil {
|
||||||
|
component = "/ip6/"
|
||||||
|
}
|
||||||
|
|
||||||
|
hostAddrMA, err := multiaddr.NewMultiaddr(component + ip.String())
|
||||||
|
if err != nil {
|
||||||
|
panic("Could not build external IP")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, addr := range inputAddr {
|
||||||
|
_, rest := multiaddr.SplitFirst(addr)
|
||||||
|
addresses = append(addresses, hostAddrMA.Encapsulate(rest))
|
||||||
|
}
|
||||||
|
|
||||||
|
return addresses
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// WithMultiaddress is a WakuNodeOption that configures libp2p to listen on a list of multiaddresses
|
// WithMultiaddress is a WakuNodeOption that configures libp2p to listen on a list of multiaddresses
|
||||||
func WithMultiaddress(addresses []multiaddr.Multiaddr) WakuNodeOption {
|
func WithMultiaddress(addresses []multiaddr.Multiaddr) WakuNodeOption {
|
||||||
return func(params *WakuNodeParameters) error {
|
return func(params *WakuNodeParameters) error {
|
||||||
|
@ -318,31 +344,31 @@ func WithPeerExchange() WakuNodeOption {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithWakuFilter enables the Waku Filter protocol. This WakuNodeOption
|
// WithLegacyWakuFilter enables the legacy Waku Filter protocol. This WakuNodeOption
|
||||||
// accepts a list of WakuFilter gossipsub options to setup the protocol
|
// accepts a list of WakuFilter gossipsub options to setup the protocol
|
||||||
func WithWakuFilter(fullNode bool, filterOpts ...filter.Option) WakuNodeOption {
|
func WithLegacyWakuFilter(fullnode bool, filterOpts ...legacy_filter.Option) WakuNodeOption {
|
||||||
return func(params *WakuNodeParameters) error {
|
return func(params *WakuNodeParameters) error {
|
||||||
params.enableFilter = true
|
params.enableLegacyFilter = true
|
||||||
params.isFilterFullNode = fullNode
|
params.isLegacyFilterFullnode = fullnode
|
||||||
params.filterOpts = filterOpts
|
params.legacyFilterOpts = filterOpts
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithWakuFilterV2 enables the Waku Filter V2 protocol for lightnode functionality
|
// WithWakuFilter enables the Waku Filter V2 protocol for lightnode functionality
|
||||||
func WithWakuFilterV2LightNode() WakuNodeOption {
|
func WithWakuFilterLightNode() WakuNodeOption {
|
||||||
return func(params *WakuNodeParameters) error {
|
return func(params *WakuNodeParameters) error {
|
||||||
params.enableFilterV2LightNode = true
|
params.enableFilterLightNode = true
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithWakuFilterV2FullNode enables the Waku Filter V2 protocol full node functionality.
|
// WithWakuFilterFullNode enables the Waku Filter V2 protocol full node functionality.
|
||||||
// This WakuNodeOption accepts a list of WakuFilter options to setup the protocol
|
// This WakuNodeOption accepts a list of WakuFilter options to setup the protocol
|
||||||
func WithWakuFilterV2FullNode(filterOpts ...filterv2.Option) WakuNodeOption {
|
func WithWakuFilterFullNode(filterOpts ...filter.Option) WakuNodeOption {
|
||||||
return func(params *WakuNodeParameters) error {
|
return func(params *WakuNodeParameters) error {
|
||||||
params.enableFilterV2FullNode = true
|
params.enableFilterFullNode = true
|
||||||
params.filterV2Opts = filterOpts
|
params.filterOpts = filterOpts
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -473,6 +499,7 @@ var DefaultLibP2POptions = []libp2p.Option{
|
||||||
libp2p.ChainOptions(
|
libp2p.ChainOptions(
|
||||||
libp2p.Transport(tcp.NewTCPTransport),
|
libp2p.Transport(tcp.NewTCPTransport),
|
||||||
libp2p.Transport(quic.NewTransport),
|
libp2p.Transport(quic.NewTransport),
|
||||||
|
libp2p.Transport(libp2pwebtransport.New),
|
||||||
),
|
),
|
||||||
libp2p.UserAgent(userAgent),
|
libp2p.UserAgent(userAgent),
|
||||||
libp2p.ChainOptions(
|
libp2p.ChainOptions(
|
||||||
|
|
|
@ -25,29 +25,20 @@ func WithStaticRLNRelay(pubsubTopic string, contentTopic string, memberIndex r.M
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type MembershipCredentials struct {
|
// WithDynamicRLNRelay enables the Waku V2 RLN protocol in onchain mode.
|
||||||
Contract common.Address `json:"contract"`
|
|
||||||
Keypair *r.MembershipKeyPair `json:"membershipKeyPair"`
|
|
||||||
Index r.MembershipIndex `json:"rlnIndex"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithStaticRLNRelay enables the Waku V2 RLN protocol in onchain mode.
|
|
||||||
// Requires the `gowaku_rln` build constrain (or the env variable RLN=true if building go-waku)
|
// Requires the `gowaku_rln` build constrain (or the env variable RLN=true if building go-waku)
|
||||||
func WithDynamicRLNRelay(pubsubTopic string, contentTopic string, membershipCredentials MembershipCredentials, spamHandler rln.SpamHandler, ethClientAddress string, ethPrivateKey *ecdsa.PrivateKey, registrationHandler rln.RegistrationHandler) WakuNodeOption {
|
func WithDynamicRLNRelay(pubsubTopic string, contentTopic string, keystorePath string, keystorePassword string, membershipContract common.Address, spamHandler rln.SpamHandler, ethClientAddress string, ethPrivateKey *ecdsa.PrivateKey, registrationHandler rln.RegistrationHandler) WakuNodeOption {
|
||||||
return func(params *WakuNodeParameters) error {
|
return func(params *WakuNodeParameters) error {
|
||||||
params.enableRLN = true
|
params.enableRLN = true
|
||||||
params.rlnRelayDynamic = true
|
params.rlnRelayDynamic = true
|
||||||
params.rlnRelayMemIndex = membershipCredentials.Index
|
params.keystorePassword = keystorePassword
|
||||||
if membershipCredentials.Keypair != nil {
|
params.keystorePath = keystorePath
|
||||||
params.rlnRelayIDKey = &membershipCredentials.Keypair.IDKey
|
|
||||||
params.rlnRelayIDCommitment = &membershipCredentials.Keypair.IDCommitment
|
|
||||||
}
|
|
||||||
params.rlnRelayPubsubTopic = pubsubTopic
|
params.rlnRelayPubsubTopic = pubsubTopic
|
||||||
params.rlnRelayContentTopic = contentTopic
|
params.rlnRelayContentTopic = contentTopic
|
||||||
params.rlnSpamHandler = spamHandler
|
params.rlnSpamHandler = spamHandler
|
||||||
params.rlnETHClientAddress = ethClientAddress
|
params.rlnETHClientAddress = ethClientAddress
|
||||||
params.rlnETHPrivateKey = ethPrivateKey
|
params.rlnETHPrivateKey = ethPrivateKey
|
||||||
params.rlnMembershipContractAddress = membershipCredentials.Contract
|
params.rlnMembershipContractAddress = membershipContract
|
||||||
params.rlnRegistrationHandler = registrationHandler
|
params.rlnRegistrationHandler = registrationHandler
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
package utils
|
package enr
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
@ -9,6 +9,7 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||||
"github.com/libp2p/go-libp2p/core/peer"
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
"github.com/multiformats/go-multiaddr"
|
"github.com/multiformats/go-multiaddr"
|
||||||
|
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// WakuENRField is the name of the ENR field that contains information about which protocols are supported by the node
|
// WakuENRField is the name of the ENR field that contains information about which protocols are supported by the node
|
||||||
|
@ -18,6 +19,10 @@ const WakuENRField = "waku2"
|
||||||
// already available ENR fields (i.e. in the case of websocket connections)
|
// already available ENR fields (i.e. in the case of websocket connections)
|
||||||
const MultiaddrENRField = "multiaddrs"
|
const MultiaddrENRField = "multiaddrs"
|
||||||
|
|
||||||
|
const ShardingIndicesListEnrField = "rs"
|
||||||
|
|
||||||
|
const ShardingBitVectorEnrField = "rsv"
|
||||||
|
|
||||||
// WakuEnrBitfield is a8-bit flag field to indicate Waku capabilities. Only the 4 LSBs are currently defined according to RFC31 (https://rfc.vac.dev/spec/31/).
|
// WakuEnrBitfield is a8-bit flag field to indicate Waku capabilities. Only the 4 LSBs are currently defined according to RFC31 (https://rfc.vac.dev/spec/31/).
|
||||||
type WakuEnrBitfield = uint8
|
type WakuEnrBitfield = uint8
|
||||||
|
|
||||||
|
@ -46,7 +51,7 @@ func NewWakuEnrBitfield(lightpush, filter, store, relay bool) WakuEnrBitfield {
|
||||||
|
|
||||||
// EnodeToMultiaddress converts an enode into a multiaddress
|
// EnodeToMultiaddress converts an enode into a multiaddress
|
||||||
func enodeToMultiAddr(node *enode.Node) (multiaddr.Multiaddr, error) {
|
func enodeToMultiAddr(node *enode.Node) (multiaddr.Multiaddr, error) {
|
||||||
pubKey := EcdsaPubKeyToSecp256k1PublicKey(node.Pubkey())
|
pubKey := utils.EcdsaPubKeyToSecp256k1PublicKey(node.Pubkey())
|
||||||
peerID, err := peer.IDFromPublicKey(pubKey)
|
peerID, err := peer.IDFromPublicKey(pubKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -57,7 +62,7 @@ func enodeToMultiAddr(node *enode.Node) (multiaddr.Multiaddr, error) {
|
||||||
|
|
||||||
// Multiaddress is used to extract all the multiaddresses that are part of a ENR record
|
// Multiaddress is used to extract all the multiaddresses that are part of a ENR record
|
||||||
func Multiaddress(node *enode.Node) (peer.ID, []multiaddr.Multiaddr, error) {
|
func Multiaddress(node *enode.Node) (peer.ID, []multiaddr.Multiaddr, error) {
|
||||||
pubKey := EcdsaPubKeyToSecp256k1PublicKey(node.Pubkey())
|
pubKey := utils.EcdsaPubKeyToSecp256k1PublicKey(node.Pubkey())
|
||||||
peerID, err := peer.IDFromPublicKey(pubKey)
|
peerID, err := peer.IDFromPublicKey(pubKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, err
|
return "", nil, err
|
104
vendor/github.com/waku-org/go-waku/waku/v2/protocol/enr/shards.go
generated
vendored
Normal file
104
vendor/github.com/waku-org/go-waku/waku/v2/protocol/enr/shards.go
generated
vendored
Normal file
|
@ -0,0 +1,104 @@
|
||||||
|
package enr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||||
|
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||||
|
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||||
|
)
|
||||||
|
|
||||||
|
func SetWakuRelayShardingIndicesList(localnode *enode.LocalNode, rs protocol.RelayShards) error {
|
||||||
|
value, err := rs.IndicesList()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
localnode.Set(enr.WithEntry(ShardingIndicesListEnrField, value))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func SetWakuRelayShardingBitVector(localnode *enode.LocalNode, rs protocol.RelayShards) error {
|
||||||
|
localnode.Set(enr.WithEntry(ShardingBitVectorEnrField, rs.BitVector()))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func SetWakuRelaySharding(localnode *enode.LocalNode, rs protocol.RelayShards) error {
|
||||||
|
if len(rs.Indices) >= 64 {
|
||||||
|
return SetWakuRelayShardingBitVector(localnode, rs)
|
||||||
|
} else {
|
||||||
|
return SetWakuRelayShardingIndicesList(localnode, rs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ENR record accessors
|
||||||
|
|
||||||
|
func RelayShardingIndicesList(localnode *enode.LocalNode) (*protocol.RelayShards, error) {
|
||||||
|
var field []byte
|
||||||
|
if err := localnode.Node().Record().Load(enr.WithEntry(ShardingIndicesListEnrField, field)); err != nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := protocol.FromIndicesList(field)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func RelayShardingBitVector(localnode *enode.LocalNode) (*protocol.RelayShards, error) {
|
||||||
|
var field []byte
|
||||||
|
if err := localnode.Node().Record().Load(enr.WithEntry(ShardingBitVectorEnrField, field)); err != nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := protocol.FromBitVector(field)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func RelaySharding(localnode *enode.LocalNode) (*protocol.RelayShards, error) {
|
||||||
|
res, err := RelayShardingIndicesList(localnode)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if res != nil {
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return RelayShardingBitVector(localnode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Utils
|
||||||
|
|
||||||
|
func ContainsShard(localnode *enode.LocalNode, cluster uint16, index uint16) bool {
|
||||||
|
if index > protocol.MaxShardIndex {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
rs, err := RelaySharding(localnode)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return rs.Contains(cluster, index)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ContainsShardWithNsTopic(localnode *enode.LocalNode, topic protocol.NamespacedPubsubTopic) bool {
|
||||||
|
if topic.Kind() != protocol.StaticSharding {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
shardTopic := topic.(protocol.StaticShardingPubsubTopic)
|
||||||
|
return ContainsShard(localnode, shardTopic.Cluster(), shardTopic.Shard())
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func ContainsShardTopic(localnode *enode.LocalNode, topic string) bool {
|
||||||
|
shardTopic, err := protocol.ToShardedPubsubTopic(topic)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return ContainsShardWithNsTopic(localnode, shardTopic)
|
||||||
|
}
|
|
@ -1,4 +1,4 @@
|
||||||
package filterv2
|
package filter
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
@ -15,11 +15,11 @@ import (
|
||||||
libp2pProtocol "github.com/libp2p/go-libp2p/core/protocol"
|
libp2pProtocol "github.com/libp2p/go-libp2p/core/protocol"
|
||||||
"github.com/libp2p/go-msgio/pbio"
|
"github.com/libp2p/go-msgio/pbio"
|
||||||
"github.com/waku-org/go-waku/logging"
|
"github.com/waku-org/go-waku/logging"
|
||||||
v2 "github.com/waku-org/go-waku/waku/v2"
|
|
||||||
"github.com/waku-org/go-waku/waku/v2/metrics"
|
"github.com/waku-org/go-waku/waku/v2/metrics"
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol/filterv2/pb"
|
"github.com/waku-org/go-waku/waku/v2/protocol/filter/pb"
|
||||||
wpb "github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
wpb "github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||||
|
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||||
"github.com/waku-org/go-waku/waku/v2/timesource"
|
"github.com/waku-org/go-waku/waku/v2/timesource"
|
||||||
"go.opencensus.io/tag"
|
"go.opencensus.io/tag"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
@ -37,7 +37,7 @@ type WakuFilterLightnode struct {
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
h host.Host
|
h host.Host
|
||||||
broadcaster v2.Broadcaster
|
broadcaster relay.Broadcaster
|
||||||
timesource timesource.Timesource
|
timesource timesource.Timesource
|
||||||
wg *sync.WaitGroup
|
wg *sync.WaitGroup
|
||||||
log *zap.Logger
|
log *zap.Logger
|
||||||
|
@ -55,17 +55,21 @@ type WakuFilterPushResult struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewWakuRelay returns a new instance of Waku Filter struct setup according to the chosen parameter and options
|
// NewWakuRelay returns a new instance of Waku Filter struct setup according to the chosen parameter and options
|
||||||
func NewWakuFilterLightnode(host host.Host, broadcaster v2.Broadcaster, timesource timesource.Timesource, log *zap.Logger) *WakuFilterLightnode {
|
func NewWakuFilterLightnode(broadcaster relay.Broadcaster, timesource timesource.Timesource, log *zap.Logger) *WakuFilterLightnode {
|
||||||
wf := new(WakuFilterLightnode)
|
wf := new(WakuFilterLightnode)
|
||||||
wf.log = log.Named("filterv2-lightnode")
|
wf.log = log.Named("filterv2-lightnode")
|
||||||
wf.broadcaster = broadcaster
|
wf.broadcaster = broadcaster
|
||||||
wf.timesource = timesource
|
wf.timesource = timesource
|
||||||
wf.wg = &sync.WaitGroup{}
|
wf.wg = &sync.WaitGroup{}
|
||||||
wf.h = host
|
|
||||||
|
|
||||||
return wf
|
return wf
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Sets the host to be able to mount or consume a protocol
|
||||||
|
func (wf *WakuFilterLightnode) SetHost(h host.Host) {
|
||||||
|
wf.h = h
|
||||||
|
}
|
||||||
|
|
||||||
func (wf *WakuFilterLightnode) Start(ctx context.Context) error {
|
func (wf *WakuFilterLightnode) Start(ctx context.Context) error {
|
||||||
wf.wg.Wait() // Wait for any goroutines to stop
|
wf.wg.Wait() // Wait for any goroutines to stop
|
||||||
|
|
||||||
|
@ -78,11 +82,11 @@ func (wf *WakuFilterLightnode) Start(ctx context.Context) error {
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
wf.cancel = cancel
|
wf.cancel = cancel
|
||||||
wf.ctx = ctx
|
wf.ctx = ctx
|
||||||
wf.subscriptions = NewSubscriptionMap()
|
wf.subscriptions = NewSubscriptionMap(wf.log)
|
||||||
|
|
||||||
wf.h.SetStreamHandlerMatch(FilterPushID_v20beta1, protocol.PrefixTextMatch(string(FilterPushID_v20beta1)), wf.onRequest(ctx))
|
wf.h.SetStreamHandlerMatch(FilterPushID_v20beta1, protocol.PrefixTextMatch(string(FilterPushID_v20beta1)), wf.onRequest(ctx))
|
||||||
|
|
||||||
wf.log.Info("filter protocol (light) started")
|
wf.log.Info("filter-push protocol started")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -109,15 +113,30 @@ func (wf *WakuFilterLightnode) onRequest(ctx context.Context) func(s network.Str
|
||||||
defer s.Close()
|
defer s.Close()
|
||||||
logger := wf.log.With(logging.HostID("peer", s.Conn().RemotePeer()))
|
logger := wf.log.With(logging.HostID("peer", s.Conn().RemotePeer()))
|
||||||
|
|
||||||
|
if !wf.subscriptions.IsSubscribedTo(s.Conn().RemotePeer()) {
|
||||||
|
logger.Warn("received message push from unknown peer", logging.HostID("peerID", s.Conn().RemotePeer()))
|
||||||
|
metrics.RecordFilterError(ctx, "unknown_peer_messagepush")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
reader := pbio.NewDelimitedReader(s, math.MaxInt32)
|
reader := pbio.NewDelimitedReader(s, math.MaxInt32)
|
||||||
|
|
||||||
messagePush := &pb.MessagePushV2{}
|
messagePush := &pb.MessagePushV2{}
|
||||||
err := reader.ReadMsg(messagePush)
|
err := reader.ReadMsg(messagePush)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("reading message push", zap.Error(err))
|
logger.Error("reading message push", zap.Error(err))
|
||||||
|
metrics.RecordFilterError(ctx, "decode_rpc_failure")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !wf.subscriptions.Has(s.Conn().RemotePeer(), messagePush.PubsubTopic, messagePush.WakuMessage.ContentTopic) {
|
||||||
|
logger.Warn("received messagepush with invalid subscription parameters", logging.HostID("peerID", s.Conn().RemotePeer()), zap.String("topic", messagePush.PubsubTopic), zap.String("contentTopic", messagePush.WakuMessage.ContentTopic))
|
||||||
|
metrics.RecordFilterError(ctx, "invalid_subscription_message")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
metrics.RecordFilterMessage(ctx, "PushMessage", 1)
|
||||||
|
|
||||||
wf.notify(s.Conn().RemotePeer(), messagePush.PubsubTopic, messagePush.WakuMessage)
|
wf.notify(s.Conn().RemotePeer(), messagePush.PubsubTopic, messagePush.WakuMessage)
|
||||||
|
|
||||||
logger.Info("received message push")
|
logger.Info("received message push")
|
||||||
|
@ -137,12 +156,14 @@ func (wf *WakuFilterLightnode) notify(remotePeerID peer.ID, pubsubTopic string,
|
||||||
func (wf *WakuFilterLightnode) request(ctx context.Context, params *FilterSubscribeParameters, reqType pb.FilterSubscribeRequest_FilterSubscribeType, contentFilter ContentFilter) error {
|
func (wf *WakuFilterLightnode) request(ctx context.Context, params *FilterSubscribeParameters, reqType pb.FilterSubscribeRequest_FilterSubscribeType, contentFilter ContentFilter) error {
|
||||||
err := wf.h.Connect(ctx, wf.h.Peerstore().PeerInfo(params.selectedPeer))
|
err := wf.h.Connect(ctx, wf.h.Peerstore().PeerInfo(params.selectedPeer))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordFilterError(ctx, "dial_failure")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var conn network.Stream
|
var conn network.Stream
|
||||||
conn, err = wf.h.NewStream(ctx, params.selectedPeer, FilterSubscribeID_v20beta1)
|
conn, err = wf.h.NewStream(ctx, params.selectedPeer, FilterSubscribeID_v20beta1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordFilterError(ctx, "dial_failure")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
|
@ -160,6 +181,7 @@ func (wf *WakuFilterLightnode) request(ctx context.Context, params *FilterSubscr
|
||||||
wf.log.Debug("sending FilterSubscribeRequest", zap.Stringer("request", request))
|
wf.log.Debug("sending FilterSubscribeRequest", zap.Stringer("request", request))
|
||||||
err = writer.WriteMsg(request)
|
err = writer.WriteMsg(request)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordFilterError(ctx, "write_request_failure")
|
||||||
wf.log.Error("sending FilterSubscribeRequest", zap.Error(err))
|
wf.log.Error("sending FilterSubscribeRequest", zap.Error(err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -168,10 +190,19 @@ func (wf *WakuFilterLightnode) request(ctx context.Context, params *FilterSubscr
|
||||||
err = reader.ReadMsg(filterSubscribeResponse)
|
err = reader.ReadMsg(filterSubscribeResponse)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
wf.log.Error("receiving FilterSubscribeResponse", zap.Error(err))
|
wf.log.Error("receiving FilterSubscribeResponse", zap.Error(err))
|
||||||
|
metrics.RecordFilterError(ctx, "decode_rpc_failure")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if filterSubscribeResponse.RequestId != request.RequestId {
|
||||||
|
wf.log.Error("requestId mismatch", zap.String("expected", request.RequestId), zap.String("received", filterSubscribeResponse.RequestId))
|
||||||
|
metrics.RecordFilterError(ctx, "request_id_mismatch")
|
||||||
|
err := NewFilterError(300, "request_id_mismatch")
|
||||||
|
return &err
|
||||||
|
}
|
||||||
|
|
||||||
if filterSubscribeResponse.StatusCode != http.StatusOK {
|
if filterSubscribeResponse.StatusCode != http.StatusOK {
|
||||||
|
metrics.RecordFilterError(ctx, "error_response")
|
||||||
err := NewFilterError(int(filterSubscribeResponse.StatusCode), filterSubscribeResponse.StatusDesc)
|
err := NewFilterError(int(filterSubscribeResponse.StatusCode), filterSubscribeResponse.StatusDesc)
|
||||||
return &err
|
return &err
|
||||||
}
|
}
|
||||||
|
@ -204,6 +235,7 @@ func (wf *WakuFilterLightnode) Subscribe(ctx context.Context, contentFilter Cont
|
||||||
}
|
}
|
||||||
|
|
||||||
if params.selectedPeer == "" {
|
if params.selectedPeer == "" {
|
||||||
|
metrics.RecordFilterError(ctx, "peer_not_found_failure")
|
||||||
return nil, ErrNoPeersAvailable
|
return nil, ErrNoPeersAvailable
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -217,7 +249,7 @@ func (wf *WakuFilterLightnode) Subscribe(ctx context.Context, contentFilter Cont
|
||||||
|
|
||||||
// FilterSubscription is used to obtain an object from which you could receive messages received via filter protocol
|
// FilterSubscription is used to obtain an object from which you could receive messages received via filter protocol
|
||||||
func (wf *WakuFilterLightnode) FilterSubscription(peerID peer.ID, contentFilter ContentFilter) (*SubscriptionDetails, error) {
|
func (wf *WakuFilterLightnode) FilterSubscription(peerID peer.ID, contentFilter ContentFilter) (*SubscriptionDetails, error) {
|
||||||
if !wf.subscriptions.Has(peerID, contentFilter.Topic, contentFilter.ContentTopics) {
|
if !wf.subscriptions.Has(peerID, contentFilter.Topic, contentFilter.ContentTopics...) {
|
||||||
return nil, errors.New("subscription does not exist")
|
return nil, errors.New("subscription does not exist")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -247,7 +279,24 @@ func (wf *WakuFilterLightnode) Ping(ctx context.Context, peerID peer.ID) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (wf *WakuFilterLightnode) IsSubscriptionAlive(ctx context.Context, subscription *SubscriptionDetails) error {
|
func (wf *WakuFilterLightnode) IsSubscriptionAlive(ctx context.Context, subscription *SubscriptionDetails) error {
|
||||||
return wf.Ping(ctx, subscription.peerID)
|
return wf.Ping(ctx, subscription.PeerID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wf *WakuFilterLightnode) Subscriptions() []*SubscriptionDetails {
|
||||||
|
wf.subscriptions.RLock()
|
||||||
|
defer wf.subscriptions.RUnlock()
|
||||||
|
|
||||||
|
var output []*SubscriptionDetails
|
||||||
|
|
||||||
|
for _, peerSubscription := range wf.subscriptions.items {
|
||||||
|
for _, subscriptionPerTopic := range peerSubscription.subscriptionsPerTopic {
|
||||||
|
for _, subscriptionDetail := range subscriptionPerTopic {
|
||||||
|
output = append(output, subscriptionDetail)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return output
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unsubscribe is used to stop receiving messages from a peer that match a content filter
|
// Unsubscribe is used to stop receiving messages from a peer that match a content filter
|
||||||
|
@ -305,13 +354,13 @@ func (wf *WakuFilterLightnode) Unsubscribe(ctx context.Context, contentFilter Co
|
||||||
// Unsubscribe is used to stop receiving messages from a peer that match a content filter
|
// Unsubscribe is used to stop receiving messages from a peer that match a content filter
|
||||||
func (wf *WakuFilterLightnode) UnsubscribeWithSubscription(ctx context.Context, sub *SubscriptionDetails, opts ...FilterUnsubscribeOption) (<-chan WakuFilterPushResult, error) {
|
func (wf *WakuFilterLightnode) UnsubscribeWithSubscription(ctx context.Context, sub *SubscriptionDetails, opts ...FilterUnsubscribeOption) (<-chan WakuFilterPushResult, error) {
|
||||||
var contentTopics []string
|
var contentTopics []string
|
||||||
for k := range sub.contentTopics {
|
for k := range sub.ContentTopics {
|
||||||
contentTopics = append(contentTopics, k)
|
contentTopics = append(contentTopics, k)
|
||||||
}
|
}
|
||||||
|
|
||||||
opts = append(opts, Peer(sub.peerID))
|
opts = append(opts, Peer(sub.PeerID))
|
||||||
|
|
||||||
return wf.Unsubscribe(ctx, ContentFilter{Topic: sub.pubsubTopic, ContentTopics: contentTopics}, opts...)
|
return wf.Unsubscribe(ctx, ContentFilter{Topic: sub.PubsubTopic, ContentTopics: contentTopics}, opts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnsubscribeAll is used to stop receiving messages from peer(s). It does not close subscriptions
|
// UnsubscribeAll is used to stop receiving messages from peer(s). It does not close subscriptions
|
|
@ -1,4 +1,4 @@
|
||||||
package filterv2
|
package filter
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
|
@ -1,4 +1,4 @@
|
||||||
package filterv2
|
package filter
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
|
@ -1,3 +1,3 @@
|
||||||
package pb
|
package pb
|
||||||
|
|
||||||
//go:generate protoc -I./../../pb/. -I. --go_opt=paths=source_relative --go_opt=Mwaku_filter.proto=github.com/waku-org/go-waku/waku/v2/protocol/filter/pb --go_opt=Mwaku_message.proto=github.com/waku-org/go-waku/waku/v2/protocol/pb --go_out=. ./waku_filter.proto
|
//go:generate protoc -I./../../pb/. -I. --go_opt=paths=source_relative --go_opt=Mwaku_filter_v2.proto=github.com/waku-org/go-waku/waku/v2/protocol/filter/pb --go_opt=Mwaku_message.proto=github.com/waku-org/go-waku/waku/v2/protocol/pb --go_out=. ./waku_filter_v2.proto
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
package filterv2
|
package filter
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
@ -7,6 +7,7 @@ import (
|
||||||
"math"
|
"math"
|
||||||
"net/http"
|
"net/http"
|
||||||
"sync"
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/libp2p/go-libp2p/core/host"
|
"github.com/libp2p/go-libp2p/core/host"
|
||||||
"github.com/libp2p/go-libp2p/core/network"
|
"github.com/libp2p/go-libp2p/core/network"
|
||||||
|
@ -14,11 +15,12 @@ import (
|
||||||
libp2pProtocol "github.com/libp2p/go-libp2p/core/protocol"
|
libp2pProtocol "github.com/libp2p/go-libp2p/core/protocol"
|
||||||
"github.com/libp2p/go-msgio/pbio"
|
"github.com/libp2p/go-msgio/pbio"
|
||||||
"github.com/waku-org/go-waku/logging"
|
"github.com/waku-org/go-waku/logging"
|
||||||
v2 "github.com/waku-org/go-waku/waku/v2"
|
|
||||||
"github.com/waku-org/go-waku/waku/v2/metrics"
|
"github.com/waku-org/go-waku/waku/v2/metrics"
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol/filterv2/pb"
|
"github.com/waku-org/go-waku/waku/v2/protocol/filter/pb"
|
||||||
|
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||||
"github.com/waku-org/go-waku/waku/v2/timesource"
|
"github.com/waku-org/go-waku/waku/v2/timesource"
|
||||||
|
"go.opencensus.io/stats"
|
||||||
"go.opencensus.io/tag"
|
"go.opencensus.io/tag"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
@ -30,10 +32,10 @@ const FilterSubscribeID_v20beta1 = libp2pProtocol.ID("/vac/waku/filter-subscribe
|
||||||
const peerHasNoSubscription = "peer has no subscriptions"
|
const peerHasNoSubscription = "peer has no subscriptions"
|
||||||
|
|
||||||
type (
|
type (
|
||||||
WakuFilterFull struct {
|
WakuFilterFullNode struct {
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
h host.Host
|
h host.Host
|
||||||
msgC chan *protocol.Envelope
|
msgSub relay.Subscription
|
||||||
wg *sync.WaitGroup
|
wg *sync.WaitGroup
|
||||||
log *zap.Logger
|
log *zap.Logger
|
||||||
|
|
||||||
|
@ -44,8 +46,8 @@ type (
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewWakuFilterFullnode returns a new instance of Waku Filter struct setup according to the chosen parameter and options
|
// NewWakuFilterFullnode returns a new instance of Waku Filter struct setup according to the chosen parameter and options
|
||||||
func NewWakuFilterFullnode(host host.Host, broadcaster v2.Broadcaster, timesource timesource.Timesource, log *zap.Logger, opts ...Option) *WakuFilterFull {
|
func NewWakuFilterFullnode(timesource timesource.Timesource, log *zap.Logger, opts ...Option) *WakuFilterFullNode {
|
||||||
wf := new(WakuFilterFull)
|
wf := new(WakuFilterFullNode)
|
||||||
wf.log = log.Named("filterv2-fullnode")
|
wf.log = log.Named("filterv2-fullnode")
|
||||||
|
|
||||||
params := new(FilterParameters)
|
params := new(FilterParameters)
|
||||||
|
@ -56,14 +58,18 @@ func NewWakuFilterFullnode(host host.Host, broadcaster v2.Broadcaster, timesourc
|
||||||
}
|
}
|
||||||
|
|
||||||
wf.wg = &sync.WaitGroup{}
|
wf.wg = &sync.WaitGroup{}
|
||||||
wf.h = host
|
|
||||||
wf.subscriptions = NewSubscribersMap(params.Timeout)
|
wf.subscriptions = NewSubscribersMap(params.Timeout)
|
||||||
wf.maxSubscriptions = params.MaxSubscribers
|
wf.maxSubscriptions = params.MaxSubscribers
|
||||||
|
|
||||||
return wf
|
return wf
|
||||||
}
|
}
|
||||||
|
|
||||||
func (wf *WakuFilterFull) Start(ctx context.Context) error {
|
// Sets the host to be able to mount or consume a protocol
|
||||||
|
func (wf *WakuFilterFullNode) SetHost(h host.Host) {
|
||||||
|
wf.h = h
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wf *WakuFilterFullNode) Start(ctx context.Context, sub relay.Subscription) error {
|
||||||
wf.wg.Wait() // Wait for any goroutines to stop
|
wf.wg.Wait() // Wait for any goroutines to stop
|
||||||
|
|
||||||
ctx, err := tag.New(ctx, tag.Insert(metrics.KeyType, "filter"))
|
ctx, err := tag.New(ctx, tag.Insert(metrics.KeyType, "filter"))
|
||||||
|
@ -77,17 +83,16 @@ func (wf *WakuFilterFull) Start(ctx context.Context) error {
|
||||||
wf.h.SetStreamHandlerMatch(FilterSubscribeID_v20beta1, protocol.PrefixTextMatch(string(FilterSubscribeID_v20beta1)), wf.onRequest(ctx))
|
wf.h.SetStreamHandlerMatch(FilterSubscribeID_v20beta1, protocol.PrefixTextMatch(string(FilterSubscribeID_v20beta1)), wf.onRequest(ctx))
|
||||||
|
|
||||||
wf.cancel = cancel
|
wf.cancel = cancel
|
||||||
wf.msgC = make(chan *protocol.Envelope, 1024)
|
wf.msgSub = sub
|
||||||
|
|
||||||
wf.wg.Add(1)
|
wf.wg.Add(1)
|
||||||
go wf.filterListener(ctx)
|
go wf.filterListener(ctx)
|
||||||
|
|
||||||
wf.log.Info("filter protocol (full) started")
|
wf.log.Info("filter-subscriber protocol started")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (wf *WakuFilterFull) onRequest(ctx context.Context) func(s network.Stream) {
|
func (wf *WakuFilterFullNode) onRequest(ctx context.Context) func(s network.Stream) {
|
||||||
return func(s network.Stream) {
|
return func(s network.Stream) {
|
||||||
defer s.Close()
|
defer s.Close()
|
||||||
logger := wf.log.With(logging.HostID("peer", s.Conn().RemotePeer()))
|
logger := wf.log.With(logging.HostID("peer", s.Conn().RemotePeer()))
|
||||||
|
@ -97,28 +102,33 @@ func (wf *WakuFilterFull) onRequest(ctx context.Context) func(s network.Stream)
|
||||||
subscribeRequest := &pb.FilterSubscribeRequest{}
|
subscribeRequest := &pb.FilterSubscribeRequest{}
|
||||||
err := reader.ReadMsg(subscribeRequest)
|
err := reader.ReadMsg(subscribeRequest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordFilterError(ctx, "decode_rpc_failure")
|
||||||
logger.Error("reading request", zap.Error(err))
|
logger.Error("reading request", zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
logger = logger.With(zap.String("requestID", subscribeRequest.RequestId))
|
logger = logger.With(zap.String("requestID", subscribeRequest.RequestId))
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
|
|
||||||
switch subscribeRequest.FilterSubscribeType {
|
switch subscribeRequest.FilterSubscribeType {
|
||||||
case pb.FilterSubscribeRequest_SUBSCRIBE:
|
case pb.FilterSubscribeRequest_SUBSCRIBE:
|
||||||
wf.subscribe(s, logger, subscribeRequest)
|
wf.subscribe(ctx, s, logger, subscribeRequest)
|
||||||
case pb.FilterSubscribeRequest_SUBSCRIBER_PING:
|
case pb.FilterSubscribeRequest_SUBSCRIBER_PING:
|
||||||
wf.ping(s, logger, subscribeRequest)
|
wf.ping(ctx, s, logger, subscribeRequest)
|
||||||
case pb.FilterSubscribeRequest_UNSUBSCRIBE:
|
case pb.FilterSubscribeRequest_UNSUBSCRIBE:
|
||||||
wf.unsubscribe(s, logger, subscribeRequest)
|
wf.unsubscribe(ctx, s, logger, subscribeRequest)
|
||||||
case pb.FilterSubscribeRequest_UNSUBSCRIBE_ALL:
|
case pb.FilterSubscribeRequest_UNSUBSCRIBE_ALL:
|
||||||
wf.unsubscribeAll(s, logger, subscribeRequest)
|
wf.unsubscribeAll(ctx, s, logger, subscribeRequest)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
metrics.RecordFilterRequest(ctx, subscribeRequest.FilterSubscribeType.String(), time.Since(start))
|
||||||
|
|
||||||
logger.Info("received request")
|
logger.Info("received request")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func reply(s network.Stream, logger *zap.Logger, request *pb.FilterSubscribeRequest, statusCode int, description ...string) {
|
func reply(ctx context.Context, s network.Stream, logger *zap.Logger, request *pb.FilterSubscribeRequest, statusCode int, description ...string) {
|
||||||
response := &pb.FilterSubscribeResponse{
|
response := &pb.FilterSubscribeResponse{
|
||||||
RequestId: request.RequestId,
|
RequestId: request.RequestId,
|
||||||
StatusCode: uint32(statusCode),
|
StatusCode: uint32(statusCode),
|
||||||
|
@ -133,37 +143,38 @@ func reply(s network.Stream, logger *zap.Logger, request *pb.FilterSubscribeRequ
|
||||||
writer := pbio.NewDelimitedWriter(s)
|
writer := pbio.NewDelimitedWriter(s)
|
||||||
err := writer.WriteMsg(response)
|
err := writer.WriteMsg(response)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordFilterError(ctx, "write_response_failure")
|
||||||
logger.Error("sending response", zap.Error(err))
|
logger.Error("sending response", zap.Error(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (wf *WakuFilterFull) ping(s network.Stream, logger *zap.Logger, request *pb.FilterSubscribeRequest) {
|
func (wf *WakuFilterFullNode) ping(ctx context.Context, s network.Stream, logger *zap.Logger, request *pb.FilterSubscribeRequest) {
|
||||||
exists := wf.subscriptions.Has(s.Conn().RemotePeer())
|
exists := wf.subscriptions.Has(s.Conn().RemotePeer())
|
||||||
|
|
||||||
if exists {
|
if exists {
|
||||||
reply(s, logger, request, http.StatusOK)
|
reply(ctx, s, logger, request, http.StatusOK)
|
||||||
} else {
|
} else {
|
||||||
reply(s, logger, request, http.StatusNotFound, peerHasNoSubscription)
|
reply(ctx, s, logger, request, http.StatusNotFound, peerHasNoSubscription)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (wf *WakuFilterFull) subscribe(s network.Stream, logger *zap.Logger, request *pb.FilterSubscribeRequest) {
|
func (wf *WakuFilterFullNode) subscribe(ctx context.Context, s network.Stream, logger *zap.Logger, request *pb.FilterSubscribeRequest) {
|
||||||
if request.PubsubTopic == "" {
|
if request.PubsubTopic == "" {
|
||||||
reply(s, logger, request, http.StatusBadRequest, "pubsubtopic can't be empty")
|
reply(ctx, s, logger, request, http.StatusBadRequest, "pubsubtopic can't be empty")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(request.ContentTopics) == 0 {
|
if len(request.ContentTopics) == 0 {
|
||||||
reply(s, logger, request, http.StatusBadRequest, "at least one contenttopic should be specified")
|
reply(ctx, s, logger, request, http.StatusBadRequest, "at least one contenttopic should be specified")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(request.ContentTopics) > MaxContentTopicsPerRequest {
|
if len(request.ContentTopics) > MaxContentTopicsPerRequest {
|
||||||
reply(s, logger, request, http.StatusBadRequest, fmt.Sprintf("exceeds maximum content topics: %d", MaxContentTopicsPerRequest))
|
reply(ctx, s, logger, request, http.StatusBadRequest, fmt.Sprintf("exceeds maximum content topics: %d", MaxContentTopicsPerRequest))
|
||||||
}
|
}
|
||||||
|
|
||||||
if wf.subscriptions.Count() >= wf.maxSubscriptions {
|
if wf.subscriptions.Count() >= wf.maxSubscriptions {
|
||||||
reply(s, logger, request, http.StatusServiceUnavailable, "node has reached maximum number of subscriptions")
|
reply(ctx, s, logger, request, http.StatusServiceUnavailable, "node has reached maximum number of subscriptions")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -176,49 +187,53 @@ func (wf *WakuFilterFull) subscribe(s network.Stream, logger *zap.Logger, reques
|
||||||
}
|
}
|
||||||
|
|
||||||
if ctTotal+len(request.ContentTopics) > MaxCriteriaPerSubscription {
|
if ctTotal+len(request.ContentTopics) > MaxCriteriaPerSubscription {
|
||||||
reply(s, logger, request, http.StatusServiceUnavailable, "peer has reached maximum number of filter criteria")
|
reply(ctx, s, logger, request, http.StatusServiceUnavailable, "peer has reached maximum number of filter criteria")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
wf.subscriptions.Set(peerID, request.PubsubTopic, request.ContentTopics)
|
wf.subscriptions.Set(peerID, request.PubsubTopic, request.ContentTopics)
|
||||||
|
|
||||||
reply(s, logger, request, http.StatusOK)
|
stats.Record(ctx, metrics.FilterSubscriptions.M(int64(wf.subscriptions.Count())))
|
||||||
|
|
||||||
|
reply(ctx, s, logger, request, http.StatusOK)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (wf *WakuFilterFull) unsubscribe(s network.Stream, logger *zap.Logger, request *pb.FilterSubscribeRequest) {
|
func (wf *WakuFilterFullNode) unsubscribe(ctx context.Context, s network.Stream, logger *zap.Logger, request *pb.FilterSubscribeRequest) {
|
||||||
if request.PubsubTopic == "" {
|
if request.PubsubTopic == "" {
|
||||||
reply(s, logger, request, http.StatusBadRequest, "pubsubtopic can't be empty")
|
reply(ctx, s, logger, request, http.StatusBadRequest, "pubsubtopic can't be empty")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(request.ContentTopics) == 0 {
|
if len(request.ContentTopics) == 0 {
|
||||||
reply(s, logger, request, http.StatusBadRequest, "at least one contenttopic should be specified")
|
reply(ctx, s, logger, request, http.StatusBadRequest, "at least one contenttopic should be specified")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(request.ContentTopics) > MaxContentTopicsPerRequest {
|
if len(request.ContentTopics) > MaxContentTopicsPerRequest {
|
||||||
reply(s, logger, request, http.StatusBadRequest, fmt.Sprintf("exceeds maximum content topics: %d", MaxContentTopicsPerRequest))
|
reply(ctx, s, logger, request, http.StatusBadRequest, fmt.Sprintf("exceeds maximum content topics: %d", MaxContentTopicsPerRequest))
|
||||||
}
|
}
|
||||||
|
|
||||||
err := wf.subscriptions.Delete(s.Conn().RemotePeer(), request.PubsubTopic, request.ContentTopics)
|
err := wf.subscriptions.Delete(s.Conn().RemotePeer(), request.PubsubTopic, request.ContentTopics)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
reply(s, logger, request, http.StatusNotFound, peerHasNoSubscription)
|
reply(ctx, s, logger, request, http.StatusNotFound, peerHasNoSubscription)
|
||||||
} else {
|
} else {
|
||||||
reply(s, logger, request, http.StatusOK)
|
stats.Record(ctx, metrics.FilterSubscriptions.M(int64(wf.subscriptions.Count())))
|
||||||
|
reply(ctx, s, logger, request, http.StatusOK)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (wf *WakuFilterFull) unsubscribeAll(s network.Stream, logger *zap.Logger, request *pb.FilterSubscribeRequest) {
|
func (wf *WakuFilterFullNode) unsubscribeAll(ctx context.Context, s network.Stream, logger *zap.Logger, request *pb.FilterSubscribeRequest) {
|
||||||
err := wf.subscriptions.DeleteAll(s.Conn().RemotePeer())
|
err := wf.subscriptions.DeleteAll(s.Conn().RemotePeer())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
reply(s, logger, request, http.StatusNotFound, peerHasNoSubscription)
|
reply(ctx, s, logger, request, http.StatusNotFound, peerHasNoSubscription)
|
||||||
} else {
|
} else {
|
||||||
reply(s, logger, request, http.StatusOK)
|
stats.Record(ctx, metrics.FilterSubscriptions.M(int64(wf.subscriptions.Count())))
|
||||||
|
reply(ctx, s, logger, request, http.StatusOK)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (wf *WakuFilterFull) filterListener(ctx context.Context) {
|
func (wf *WakuFilterFullNode) filterListener(ctx context.Context) {
|
||||||
defer wf.wg.Done()
|
defer wf.wg.Done()
|
||||||
|
|
||||||
// This function is invoked for each message received
|
// This function is invoked for each message received
|
||||||
|
@ -238,24 +253,28 @@ func (wf *WakuFilterFull) filterListener(ctx context.Context) {
|
||||||
wf.wg.Add(1)
|
wf.wg.Add(1)
|
||||||
go func(subscriber peer.ID) {
|
go func(subscriber peer.ID) {
|
||||||
defer wf.wg.Done()
|
defer wf.wg.Done()
|
||||||
|
start := time.Now()
|
||||||
err := wf.pushMessage(ctx, subscriber, envelope)
|
err := wf.pushMessage(ctx, subscriber, envelope)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("pushing message", zap.Error(err))
|
logger.Error("pushing message", zap.Error(err))
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
ellapsed := time.Since(start)
|
||||||
|
metrics.FilterHandleMessageDurationSeconds.M(int64(ellapsed.Seconds()))
|
||||||
}(subscriber)
|
}(subscriber)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
for m := range wf.msgC {
|
for m := range wf.msgSub.Ch {
|
||||||
if err := handle(m); err != nil {
|
if err := handle(m); err != nil {
|
||||||
wf.log.Error("handling message", zap.Error(err))
|
wf.log.Error("handling message", zap.Error(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (wf *WakuFilterFull) pushMessage(ctx context.Context, peerID peer.ID, env *protocol.Envelope) error {
|
func (wf *WakuFilterFullNode) pushMessage(ctx context.Context, peerID peer.ID, env *protocol.Envelope) error {
|
||||||
logger := wf.log.With(logging.HostID("peer", peerID))
|
logger := wf.log.With(logging.HostID("peer", peerID))
|
||||||
|
|
||||||
messagePush := &pb.MessagePushV2{
|
messagePush := &pb.MessagePushV2{
|
||||||
|
@ -270,6 +289,11 @@ func (wf *WakuFilterFull) pushMessage(ctx context.Context, peerID peer.ID, env *
|
||||||
err := wf.h.Connect(ctx, wf.h.Peerstore().PeerInfo(peerID))
|
err := wf.h.Connect(ctx, wf.h.Peerstore().PeerInfo(peerID))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
wf.subscriptions.FlagAsFailure(peerID)
|
wf.subscriptions.FlagAsFailure(peerID)
|
||||||
|
if errors.Is(context.DeadlineExceeded, err) {
|
||||||
|
metrics.RecordFilterError(ctx, "push_timeout_failure")
|
||||||
|
} else {
|
||||||
|
metrics.RecordFilterError(ctx, "dial_failure")
|
||||||
|
}
|
||||||
logger.Error("connecting to peer", zap.Error(err))
|
logger.Error("connecting to peer", zap.Error(err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -277,9 +301,12 @@ func (wf *WakuFilterFull) pushMessage(ctx context.Context, peerID peer.ID, env *
|
||||||
conn, err := wf.h.NewStream(ctx, peerID, FilterPushID_v20beta1)
|
conn, err := wf.h.NewStream(ctx, peerID, FilterPushID_v20beta1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
wf.subscriptions.FlagAsFailure(peerID)
|
wf.subscriptions.FlagAsFailure(peerID)
|
||||||
|
if errors.Is(context.DeadlineExceeded, err) {
|
||||||
|
metrics.RecordFilterError(ctx, "push_timeout_failure")
|
||||||
|
} else {
|
||||||
|
metrics.RecordFilterError(ctx, "dial_failure")
|
||||||
|
}
|
||||||
logger.Error("opening peer stream", zap.Error(err))
|
logger.Error("opening peer stream", zap.Error(err))
|
||||||
//waku_filter_errors.inc(labelValues = [dialFailure])
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -287,6 +314,11 @@ func (wf *WakuFilterFull) pushMessage(ctx context.Context, peerID peer.ID, env *
|
||||||
writer := pbio.NewDelimitedWriter(conn)
|
writer := pbio.NewDelimitedWriter(conn)
|
||||||
err = writer.WriteMsg(messagePush)
|
err = writer.WriteMsg(messagePush)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if errors.Is(context.DeadlineExceeded, err) {
|
||||||
|
metrics.RecordFilterError(ctx, "push_timeout_failure")
|
||||||
|
} else {
|
||||||
|
metrics.RecordFilterError(ctx, "response_write_failure")
|
||||||
|
}
|
||||||
logger.Error("pushing messages to peer", zap.Error(err))
|
logger.Error("pushing messages to peer", zap.Error(err))
|
||||||
wf.subscriptions.FlagAsFailure(peerID)
|
wf.subscriptions.FlagAsFailure(peerID)
|
||||||
return nil
|
return nil
|
||||||
|
@ -297,7 +329,7 @@ func (wf *WakuFilterFull) pushMessage(ctx context.Context, peerID peer.ID, env *
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop unmounts the filter protocol
|
// Stop unmounts the filter protocol
|
||||||
func (wf *WakuFilterFull) Stop() {
|
func (wf *WakuFilterFullNode) Stop() {
|
||||||
if wf.cancel == nil {
|
if wf.cancel == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -306,11 +338,7 @@ func (wf *WakuFilterFull) Stop() {
|
||||||
|
|
||||||
wf.cancel()
|
wf.cancel()
|
||||||
|
|
||||||
close(wf.msgC)
|
wf.msgSub.Unsubscribe()
|
||||||
|
|
||||||
wf.wg.Wait()
|
wf.wg.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (wf *WakuFilterFull) MessageChannel() chan *protocol.Envelope {
|
|
||||||
return wf.msgC
|
|
||||||
}
|
|
|
@ -1,4 +1,4 @@
|
||||||
package filterv2
|
package filter
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/hex"
|
"encoding/hex"
|
|
@ -1,4 +1,4 @@
|
||||||
package filterv2
|
package filter
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -6,19 +6,20 @@ import (
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/libp2p/go-libp2p/core/peer"
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||||
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
type SubscriptionDetails struct {
|
type SubscriptionDetails struct {
|
||||||
sync.RWMutex
|
sync.RWMutex
|
||||||
|
|
||||||
id string
|
ID string
|
||||||
mapRef *SubscriptionsMap
|
mapRef *SubscriptionsMap
|
||||||
closed bool
|
Closed bool
|
||||||
once sync.Once
|
once sync.Once
|
||||||
|
|
||||||
peerID peer.ID
|
PeerID peer.ID
|
||||||
pubsubTopic string
|
PubsubTopic string
|
||||||
contentTopics map[string]struct{}
|
ContentTopics map[string]struct{}
|
||||||
C chan *protocol.Envelope
|
C chan *protocol.Envelope
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -31,12 +32,14 @@ type PeerSubscription struct {
|
||||||
|
|
||||||
type SubscriptionsMap struct {
|
type SubscriptionsMap struct {
|
||||||
sync.RWMutex
|
sync.RWMutex
|
||||||
items map[peer.ID]*PeerSubscription
|
logger *zap.Logger
|
||||||
|
items map[peer.ID]*PeerSubscription
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewSubscriptionMap() *SubscriptionsMap {
|
func NewSubscriptionMap(logger *zap.Logger) *SubscriptionsMap {
|
||||||
return &SubscriptionsMap{
|
return &SubscriptionsMap{
|
||||||
items: make(map[peer.ID]*PeerSubscription),
|
logger: logger.Named("subscription-map"),
|
||||||
|
items: make(map[peer.ID]*PeerSubscription),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -59,24 +62,35 @@ func (sub *SubscriptionsMap) NewSubscription(peerID peer.ID, topic string, conte
|
||||||
}
|
}
|
||||||
|
|
||||||
details := &SubscriptionDetails{
|
details := &SubscriptionDetails{
|
||||||
id: uuid.NewString(),
|
ID: uuid.NewString(),
|
||||||
mapRef: sub,
|
mapRef: sub,
|
||||||
peerID: peerID,
|
PeerID: peerID,
|
||||||
pubsubTopic: topic,
|
PubsubTopic: topic,
|
||||||
C: make(chan *protocol.Envelope),
|
C: make(chan *protocol.Envelope, 1024),
|
||||||
contentTopics: make(map[string]struct{}),
|
ContentTopics: make(map[string]struct{}),
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, ct := range contentTopics {
|
for _, ct := range contentTopics {
|
||||||
details.contentTopics[ct] = struct{}{}
|
details.ContentTopics[ct] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
sub.items[peerID].subscriptionsPerTopic[topic][details.id] = details
|
sub.items[peerID].subscriptionsPerTopic[topic][details.ID] = details
|
||||||
|
|
||||||
return details
|
return details
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sub *SubscriptionsMap) Has(peerID peer.ID, topic string, contentTopics []string) bool {
|
func (sub *SubscriptionsMap) IsSubscribedTo(peerID peer.ID) bool {
|
||||||
|
sub.RLock()
|
||||||
|
defer sub.RUnlock()
|
||||||
|
|
||||||
|
_, ok := sub.items[peerID]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sub *SubscriptionsMap) Has(peerID peer.ID, topic string, contentTopics ...string) bool {
|
||||||
|
sub.RLock()
|
||||||
|
defer sub.RUnlock()
|
||||||
|
|
||||||
// Check if peer exits
|
// Check if peer exits
|
||||||
peerSubscription, ok := sub.items[peerID]
|
peerSubscription, ok := sub.items[peerID]
|
||||||
if !ok {
|
if !ok {
|
||||||
|
@ -93,7 +107,7 @@ func (sub *SubscriptionsMap) Has(peerID peer.ID, topic string, contentTopics []s
|
||||||
for _, ct := range contentTopics {
|
for _, ct := range contentTopics {
|
||||||
found := false
|
found := false
|
||||||
for _, subscription := range subscriptions {
|
for _, subscription := range subscriptions {
|
||||||
_, exists := subscription.contentTopics[ct]
|
_, exists := subscription.ContentTopics[ct]
|
||||||
if exists {
|
if exists {
|
||||||
found = true
|
found = true
|
||||||
break
|
break
|
||||||
|
@ -111,12 +125,12 @@ func (sub *SubscriptionsMap) Delete(subscription *SubscriptionDetails) error {
|
||||||
sub.Lock()
|
sub.Lock()
|
||||||
defer sub.Unlock()
|
defer sub.Unlock()
|
||||||
|
|
||||||
peerSubscription, ok := sub.items[subscription.peerID]
|
peerSubscription, ok := sub.items[subscription.PeerID]
|
||||||
if !ok {
|
if !ok {
|
||||||
return ErrNotFound
|
return ErrNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
delete(peerSubscription.subscriptionsPerTopic[subscription.pubsubTopic], subscription.id)
|
delete(peerSubscription.subscriptionsPerTopic[subscription.PubsubTopic], subscription.ID)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -126,7 +140,7 @@ func (s *SubscriptionDetails) Add(contentTopics ...string) {
|
||||||
defer s.Unlock()
|
defer s.Unlock()
|
||||||
|
|
||||||
for _, ct := range contentTopics {
|
for _, ct := range contentTopics {
|
||||||
s.contentTopics[ct] = struct{}{}
|
s.ContentTopics[ct] = struct{}{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -135,7 +149,7 @@ func (s *SubscriptionDetails) Remove(contentTopics ...string) {
|
||||||
defer s.Unlock()
|
defer s.Unlock()
|
||||||
|
|
||||||
for _, ct := range contentTopics {
|
for _, ct := range contentTopics {
|
||||||
delete(s.contentTopics, ct)
|
delete(s.ContentTopics, ct)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -144,7 +158,7 @@ func (s *SubscriptionDetails) closeC() {
|
||||||
s.Lock()
|
s.Lock()
|
||||||
defer s.Unlock()
|
defer s.Unlock()
|
||||||
|
|
||||||
s.closed = true
|
s.Closed = true
|
||||||
close(s.C)
|
close(s.C)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -159,17 +173,17 @@ func (s *SubscriptionDetails) Clone() *SubscriptionDetails {
|
||||||
defer s.RUnlock()
|
defer s.RUnlock()
|
||||||
|
|
||||||
result := &SubscriptionDetails{
|
result := &SubscriptionDetails{
|
||||||
id: uuid.NewString(),
|
ID: uuid.NewString(),
|
||||||
mapRef: s.mapRef,
|
mapRef: s.mapRef,
|
||||||
closed: false,
|
Closed: false,
|
||||||
peerID: s.peerID,
|
PeerID: s.PeerID,
|
||||||
pubsubTopic: s.pubsubTopic,
|
PubsubTopic: s.PubsubTopic,
|
||||||
contentTopics: make(map[string]struct{}),
|
ContentTopics: make(map[string]struct{}),
|
||||||
C: make(chan *protocol.Envelope),
|
C: make(chan *protocol.Envelope),
|
||||||
}
|
}
|
||||||
|
|
||||||
for k := range s.contentTopics {
|
for k := range s.ContentTopics {
|
||||||
result.contentTopics[k] = struct{}{}
|
result.ContentTopics[k] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
@ -199,24 +213,27 @@ func (sub *SubscriptionsMap) Notify(peerID peer.ID, envelope *protocol.Envelope)
|
||||||
|
|
||||||
subscriptions, ok := sub.items[peerID].subscriptionsPerTopic[envelope.PubsubTopic()]
|
subscriptions, ok := sub.items[peerID].subscriptionsPerTopic[envelope.PubsubTopic()]
|
||||||
if ok {
|
if ok {
|
||||||
iterateSubscriptionSet(subscriptions, envelope)
|
iterateSubscriptionSet(sub.logger, subscriptions, envelope)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func iterateSubscriptionSet(subscriptions SubscriptionSet, envelope *protocol.Envelope) {
|
func iterateSubscriptionSet(logger *zap.Logger, subscriptions SubscriptionSet, envelope *protocol.Envelope) {
|
||||||
for _, subscription := range subscriptions {
|
for _, subscription := range subscriptions {
|
||||||
func(subscription *SubscriptionDetails) {
|
func(subscription *SubscriptionDetails) {
|
||||||
subscription.RLock()
|
subscription.RLock()
|
||||||
defer subscription.RUnlock()
|
defer subscription.RUnlock()
|
||||||
|
|
||||||
_, ok := subscription.contentTopics[envelope.Message().ContentTopic]
|
_, ok := subscription.ContentTopics[envelope.Message().ContentTopic]
|
||||||
if !ok && len(subscription.contentTopics) != 0 { // TODO: confirm if no content topics are allowed
|
if !ok && len(subscription.ContentTopics) != 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if !subscription.closed {
|
if !subscription.Closed {
|
||||||
// TODO: consider pushing or dropping if subscription is not available
|
select {
|
||||||
subscription.C <- envelope
|
case subscription.C <- envelope:
|
||||||
|
default:
|
||||||
|
logger.Warn("can't deliver message to subscription. subscriber too slow")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}(subscription)
|
}(subscription)
|
||||||
}
|
}
|
|
@ -1,3 +0,0 @@
|
||||||
package pb
|
|
||||||
|
|
||||||
//go:generate protoc -I./../../pb/. -I. --go_opt=paths=source_relative --go_opt=Mwaku_filter_v2.proto=github.com/waku-org/go-waku/waku/v2/protocol/filterv2/pb --go_opt=Mwaku_message.proto=github.com/waku-org/go-waku/waku/v2/protocol/pb --go_out=. ./waku_filter_v2.proto
|
|
|
@ -1,11 +1,11 @@
|
||||||
package filter
|
package legacy_filter
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
v2 "github.com/waku-org/go-waku/waku/v2"
|
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||||
|
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||||
"github.com/waku-org/go-waku/waku/v2/timesource"
|
"github.com/waku-org/go-waku/waku/v2/timesource"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -13,7 +13,7 @@ type FilterMap struct {
|
||||||
sync.RWMutex
|
sync.RWMutex
|
||||||
timesource timesource.Timesource
|
timesource timesource.Timesource
|
||||||
items map[string]Filter
|
items map[string]Filter
|
||||||
broadcaster v2.Broadcaster
|
broadcaster relay.Broadcaster
|
||||||
}
|
}
|
||||||
|
|
||||||
type FilterMapItem struct {
|
type FilterMapItem struct {
|
||||||
|
@ -21,7 +21,7 @@ type FilterMapItem struct {
|
||||||
Value Filter
|
Value Filter
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFilterMap(broadcaster v2.Broadcaster, timesource timesource.Timesource) *FilterMap {
|
func NewFilterMap(broadcaster relay.Broadcaster, timesource timesource.Timesource) *FilterMap {
|
||||||
return &FilterMap{
|
return &FilterMap{
|
||||||
timesource: timesource,
|
timesource: timesource,
|
||||||
items: make(map[string]Filter),
|
items: make(map[string]Filter),
|
||||||
|
@ -49,6 +49,11 @@ func (fm *FilterMap) Delete(key string) {
|
||||||
fm.Lock()
|
fm.Lock()
|
||||||
defer fm.Unlock()
|
defer fm.Unlock()
|
||||||
|
|
||||||
|
_, ok := fm.items[key]
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
close(fm.items[key].Chan)
|
close(fm.items[key].Chan)
|
||||||
delete(fm.items, key)
|
delete(fm.items, key)
|
||||||
}
|
}
|
|
@ -1,11 +1,11 @@
|
||||||
package filter
|
package legacy_filter
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/libp2p/go-libp2p/core/peer"
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol/filter/pb"
|
"github.com/waku-org/go-waku/waku/v2/protocol/legacy_filter/pb"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Subscriber struct {
|
type Subscriber struct {
|
3
vendor/github.com/waku-org/go-waku/waku/v2/protocol/legacy_filter/pb/generate.go
generated
vendored
Normal file
3
vendor/github.com/waku-org/go-waku/waku/v2/protocol/legacy_filter/pb/generate.go
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
package pb
|
||||||
|
|
||||||
|
//go:generate protoc -I./../../pb/. -I. --go_opt=paths=source_relative --go_opt=Mwaku_filter.proto=github.com/waku-org/go-waku/waku/v2/protocol/filter/pb --go_opt=Mwaku_message.proto=github.com/waku-org/go-waku/waku/v2/protocol/pb --go_out=. ./waku_filter.proto
|
|
@ -1,4 +1,4 @@
|
||||||
package filter
|
package legacy_filter
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
@ -13,10 +13,9 @@ import (
|
||||||
libp2pProtocol "github.com/libp2p/go-libp2p/core/protocol"
|
libp2pProtocol "github.com/libp2p/go-libp2p/core/protocol"
|
||||||
"github.com/libp2p/go-msgio/pbio"
|
"github.com/libp2p/go-msgio/pbio"
|
||||||
"github.com/waku-org/go-waku/logging"
|
"github.com/waku-org/go-waku/logging"
|
||||||
v2 "github.com/waku-org/go-waku/waku/v2"
|
|
||||||
"github.com/waku-org/go-waku/waku/v2/metrics"
|
"github.com/waku-org/go-waku/waku/v2/metrics"
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol/filter/pb"
|
"github.com/waku-org/go-waku/waku/v2/protocol/legacy_filter/pb"
|
||||||
wpb "github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
wpb "github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||||
"github.com/waku-org/go-waku/waku/v2/timesource"
|
"github.com/waku-org/go-waku/waku/v2/timesource"
|
||||||
|
@ -53,7 +52,7 @@ type (
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
h host.Host
|
h host.Host
|
||||||
isFullNode bool
|
isFullNode bool
|
||||||
msgC chan *protocol.Envelope
|
msgSub relay.Subscription
|
||||||
wg *sync.WaitGroup
|
wg *sync.WaitGroup
|
||||||
log *zap.Logger
|
log *zap.Logger
|
||||||
|
|
||||||
|
@ -66,7 +65,7 @@ type (
|
||||||
const FilterID_v20beta1 = libp2pProtocol.ID("/vac/waku/filter/2.0.0-beta1")
|
const FilterID_v20beta1 = libp2pProtocol.ID("/vac/waku/filter/2.0.0-beta1")
|
||||||
|
|
||||||
// NewWakuRelay returns a new instance of Waku Filter struct setup according to the chosen parameter and options
|
// NewWakuRelay returns a new instance of Waku Filter struct setup according to the chosen parameter and options
|
||||||
func NewWakuFilter(host host.Host, broadcaster v2.Broadcaster, isFullNode bool, timesource timesource.Timesource, log *zap.Logger, opts ...Option) *WakuFilter {
|
func NewWakuFilter(broadcaster relay.Broadcaster, isFullNode bool, timesource timesource.Timesource, log *zap.Logger, opts ...Option) *WakuFilter {
|
||||||
wf := new(WakuFilter)
|
wf := new(WakuFilter)
|
||||||
wf.log = log.Named("filter").With(zap.Bool("fullNode", isFullNode))
|
wf.log = log.Named("filter").With(zap.Bool("fullNode", isFullNode))
|
||||||
|
|
||||||
|
@ -78,7 +77,6 @@ func NewWakuFilter(host host.Host, broadcaster v2.Broadcaster, isFullNode bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
wf.wg = &sync.WaitGroup{}
|
wf.wg = &sync.WaitGroup{}
|
||||||
wf.h = host
|
|
||||||
wf.isFullNode = isFullNode
|
wf.isFullNode = isFullNode
|
||||||
wf.filters = NewFilterMap(broadcaster, timesource)
|
wf.filters = NewFilterMap(broadcaster, timesource)
|
||||||
wf.subscribers = NewSubscribers(params.Timeout)
|
wf.subscribers = NewSubscribers(params.Timeout)
|
||||||
|
@ -86,7 +84,12 @@ func NewWakuFilter(host host.Host, broadcaster v2.Broadcaster, isFullNode bool,
|
||||||
return wf
|
return wf
|
||||||
}
|
}
|
||||||
|
|
||||||
func (wf *WakuFilter) Start(ctx context.Context) error {
|
// Sets the host to be able to mount or consume a protocol
|
||||||
|
func (wf *WakuFilter) SetHost(h host.Host) {
|
||||||
|
wf.h = h
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wf *WakuFilter) Start(ctx context.Context, sub relay.Subscription) error {
|
||||||
wf.wg.Wait() // Wait for any goroutines to stop
|
wf.wg.Wait() // Wait for any goroutines to stop
|
||||||
|
|
||||||
ctx, err := tag.New(ctx, tag.Insert(metrics.KeyType, "filter"))
|
ctx, err := tag.New(ctx, tag.Insert(metrics.KeyType, "filter"))
|
||||||
|
@ -100,7 +103,7 @@ func (wf *WakuFilter) Start(ctx context.Context) error {
|
||||||
wf.h.SetStreamHandlerMatch(FilterID_v20beta1, protocol.PrefixTextMatch(string(FilterID_v20beta1)), wf.onRequest(ctx))
|
wf.h.SetStreamHandlerMatch(FilterID_v20beta1, protocol.PrefixTextMatch(string(FilterID_v20beta1)), wf.onRequest(ctx))
|
||||||
|
|
||||||
wf.cancel = cancel
|
wf.cancel = cancel
|
||||||
wf.msgC = make(chan *protocol.Envelope, 1024)
|
wf.msgSub = sub
|
||||||
|
|
||||||
wf.wg.Add(1)
|
wf.wg.Add(1)
|
||||||
go wf.filterListener(ctx)
|
go wf.filterListener(ctx)
|
||||||
|
@ -121,6 +124,7 @@ func (wf *WakuFilter) onRequest(ctx context.Context) func(s network.Stream) {
|
||||||
|
|
||||||
err := reader.ReadMsg(filterRPCRequest)
|
err := reader.ReadMsg(filterRPCRequest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordLegacyFilterError(ctx, "decode_rpc_failure")
|
||||||
logger.Error("reading request", zap.Error(err))
|
logger.Error("reading request", zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -135,7 +139,7 @@ func (wf *WakuFilter) onRequest(ctx context.Context) func(s network.Stream) {
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Info("received a message push", zap.Int("messages", len(filterRPCRequest.Push.Messages)))
|
logger.Info("received a message push", zap.Int("messages", len(filterRPCRequest.Push.Messages)))
|
||||||
stats.Record(ctx, metrics.Messages.M(int64(len(filterRPCRequest.Push.Messages))))
|
metrics.RecordLegacyFilterMessage(ctx, "FilterRequest", len(filterRPCRequest.Push.Messages))
|
||||||
} else if filterRPCRequest.Request != nil && wf.isFullNode {
|
} else if filterRPCRequest.Request != nil && wf.isFullNode {
|
||||||
// We're on a full node.
|
// We're on a full node.
|
||||||
// This is a filter request coming from a light node.
|
// This is a filter request coming from a light node.
|
||||||
|
@ -148,13 +152,13 @@ func (wf *WakuFilter) onRequest(ctx context.Context) func(s network.Stream) {
|
||||||
len := wf.subscribers.Append(subscriber)
|
len := wf.subscribers.Append(subscriber)
|
||||||
|
|
||||||
logger.Info("adding subscriber")
|
logger.Info("adding subscriber")
|
||||||
stats.Record(ctx, metrics.FilterSubscriptions.M(int64(len)))
|
stats.Record(ctx, metrics.LegacyFilterSubscribers.M(int64(len)))
|
||||||
} else {
|
} else {
|
||||||
peerId := s.Conn().RemotePeer()
|
peerId := s.Conn().RemotePeer()
|
||||||
wf.subscribers.RemoveContentFilters(peerId, filterRPCRequest.RequestId, filterRPCRequest.Request.ContentFilters)
|
wf.subscribers.RemoveContentFilters(peerId, filterRPCRequest.RequestId, filterRPCRequest.Request.ContentFilters)
|
||||||
|
|
||||||
logger.Info("removing subscriber")
|
logger.Info("removing subscriber")
|
||||||
stats.Record(ctx, metrics.FilterSubscriptions.M(int64(wf.subscribers.Length())))
|
stats.Record(ctx, metrics.LegacyFilterSubscribers.M(int64(wf.subscribers.Length())))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
logger.Error("can't serve request")
|
logger.Error("can't serve request")
|
||||||
|
@ -172,15 +176,15 @@ func (wf *WakuFilter) pushMessage(ctx context.Context, subscriber Subscriber, ms
|
||||||
if err != nil {
|
if err != nil {
|
||||||
wf.subscribers.FlagAsFailure(subscriber.peer)
|
wf.subscribers.FlagAsFailure(subscriber.peer)
|
||||||
logger.Error("connecting to peer", zap.Error(err))
|
logger.Error("connecting to peer", zap.Error(err))
|
||||||
|
metrics.RecordLegacyFilterError(ctx, "dial_failure")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
conn, err := wf.h.NewStream(ctx, subscriber.peer, FilterID_v20beta1)
|
conn, err := wf.h.NewStream(ctx, subscriber.peer, FilterID_v20beta1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
wf.subscribers.FlagAsFailure(subscriber.peer)
|
wf.subscribers.FlagAsFailure(subscriber.peer)
|
||||||
|
|
||||||
logger.Error("opening peer stream", zap.Error(err))
|
logger.Error("opening peer stream", zap.Error(err))
|
||||||
//waku_filter_errors.inc(labelValues = [dialFailure])
|
metrics.RecordLegacyFilterError(ctx, "dial_failure")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -190,6 +194,7 @@ func (wf *WakuFilter) pushMessage(ctx context.Context, subscriber Subscriber, ms
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("pushing messages to peer", zap.Error(err))
|
logger.Error("pushing messages to peer", zap.Error(err))
|
||||||
wf.subscribers.FlagAsFailure(subscriber.peer)
|
wf.subscribers.FlagAsFailure(subscriber.peer)
|
||||||
|
metrics.RecordLegacyFilterError(ctx, "push_write_error")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -233,7 +238,7 @@ func (wf *WakuFilter) filterListener(ctx context.Context) {
|
||||||
return g.Wait()
|
return g.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
for m := range wf.msgC {
|
for m := range wf.msgSub.Ch {
|
||||||
if err := handle(m); err != nil {
|
if err := handle(m); err != nil {
|
||||||
wf.log.Error("handling message", zap.Error(err))
|
wf.log.Error("handling message", zap.Error(err))
|
||||||
}
|
}
|
||||||
|
@ -255,6 +260,7 @@ func (wf *WakuFilter) requestSubscription(ctx context.Context, filter ContentFil
|
||||||
}
|
}
|
||||||
|
|
||||||
if params.selectedPeer == "" {
|
if params.selectedPeer == "" {
|
||||||
|
metrics.RecordLegacyFilterError(ctx, "peer_not_found_failure")
|
||||||
return nil, ErrNoPeersAvailable
|
return nil, ErrNoPeersAvailable
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -266,6 +272,7 @@ func (wf *WakuFilter) requestSubscription(ctx context.Context, filter ContentFil
|
||||||
// We connect first so dns4 addresses are resolved (NewStream does not do it)
|
// We connect first so dns4 addresses are resolved (NewStream does not do it)
|
||||||
err = wf.h.Connect(ctx, wf.h.Peerstore().PeerInfo(params.selectedPeer))
|
err = wf.h.Connect(ctx, wf.h.Peerstore().PeerInfo(params.selectedPeer))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordLegacyFilterError(ctx, "dial_failure")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -278,6 +285,7 @@ func (wf *WakuFilter) requestSubscription(ctx context.Context, filter ContentFil
|
||||||
var conn network.Stream
|
var conn network.Stream
|
||||||
conn, err = wf.h.NewStream(ctx, params.selectedPeer, FilterID_v20beta1)
|
conn, err = wf.h.NewStream(ctx, params.selectedPeer, FilterID_v20beta1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordLegacyFilterError(ctx, "dial_failure")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -291,6 +299,7 @@ func (wf *WakuFilter) requestSubscription(ctx context.Context, filter ContentFil
|
||||||
wf.log.Debug("sending filterRPC", zap.Stringer("rpc", filterRPC))
|
wf.log.Debug("sending filterRPC", zap.Stringer("rpc", filterRPC))
|
||||||
err = writer.WriteMsg(filterRPC)
|
err = writer.WriteMsg(filterRPC)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordLegacyFilterError(ctx, "request_write_error")
|
||||||
wf.log.Error("sending filterRPC", zap.Error(err))
|
wf.log.Error("sending filterRPC", zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -307,11 +316,13 @@ func (wf *WakuFilter) Unsubscribe(ctx context.Context, contentFilter ContentFilt
|
||||||
// We connect first so dns4 addresses are resolved (NewStream does not do it)
|
// We connect first so dns4 addresses are resolved (NewStream does not do it)
|
||||||
err := wf.h.Connect(ctx, wf.h.Peerstore().PeerInfo(peer))
|
err := wf.h.Connect(ctx, wf.h.Peerstore().PeerInfo(peer))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordLegacyFilterError(ctx, "dial_failure")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
conn, err := wf.h.NewStream(ctx, peer, FilterID_v20beta1)
|
conn, err := wf.h.NewStream(ctx, peer, FilterID_v20beta1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordLegacyFilterError(ctx, "dial_failure")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -335,6 +346,7 @@ func (wf *WakuFilter) Unsubscribe(ctx context.Context, contentFilter ContentFilt
|
||||||
filterRPC := &pb.FilterRPC{RequestId: hex.EncodeToString(id), Request: request}
|
filterRPC := &pb.FilterRPC{RequestId: hex.EncodeToString(id), Request: request}
|
||||||
err = writer.WriteMsg(filterRPC)
|
err = writer.WriteMsg(filterRPC)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordLegacyFilterError(ctx, "request_write_error")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -349,7 +361,7 @@ func (wf *WakuFilter) Stop() {
|
||||||
|
|
||||||
wf.cancel()
|
wf.cancel()
|
||||||
|
|
||||||
close(wf.msgC)
|
wf.msgSub.Unsubscribe()
|
||||||
|
|
||||||
wf.h.RemoveStreamHandler(FilterID_v20beta1)
|
wf.h.RemoveStreamHandler(FilterID_v20beta1)
|
||||||
wf.filters.RemoveAll()
|
wf.filters.RemoveAll()
|
||||||
|
@ -381,7 +393,6 @@ func (wf *WakuFilter) Subscribe(ctx context.Context, f ContentFilter, opts ...Fi
|
||||||
ContentFilters: f.ContentTopics,
|
ContentFilters: f.ContentTopics,
|
||||||
Chan: make(chan *protocol.Envelope, 1024), // To avoid blocking
|
Chan: make(chan *protocol.Envelope, 1024), // To avoid blocking
|
||||||
}
|
}
|
||||||
|
|
||||||
wf.filters.Set(filterID, theFilter)
|
wf.filters.Set(filterID, theFilter)
|
||||||
|
|
||||||
return
|
return
|
||||||
|
@ -427,7 +438,7 @@ func (wf *WakuFilter) UnsubscribeFilterByID(ctx context.Context, filterID string
|
||||||
// the contentTopics are removed the subscription is dropped completely
|
// the contentTopics are removed the subscription is dropped completely
|
||||||
func (wf *WakuFilter) UnsubscribeFilter(ctx context.Context, cf ContentFilter) error {
|
func (wf *WakuFilter) UnsubscribeFilter(ctx context.Context, cf ContentFilter) error {
|
||||||
// Remove local filter
|
// Remove local filter
|
||||||
var idsToRemove []string
|
idsToRemove := make(map[string]struct{})
|
||||||
for filterMapItem := range wf.filters.Items() {
|
for filterMapItem := range wf.filters.Items() {
|
||||||
f := filterMapItem.Value
|
f := filterMapItem.Value
|
||||||
id := filterMapItem.Key
|
id := filterMapItem.Key
|
||||||
|
@ -456,18 +467,14 @@ func (wf *WakuFilter) UnsubscribeFilter(ctx context.Context, cf ContentFilter) e
|
||||||
|
|
||||||
}
|
}
|
||||||
if len(f.ContentFilters) == 0 {
|
if len(f.ContentFilters) == 0 {
|
||||||
idsToRemove = append(idsToRemove, id)
|
idsToRemove[id] = struct{}{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, rId := range idsToRemove {
|
for rId := range idsToRemove {
|
||||||
wf.filters.Delete(rId)
|
wf.filters.Delete(rId)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (wf *WakuFilter) MessageChannel() chan *protocol.Envelope {
|
|
||||||
return wf.msgC
|
|
||||||
}
|
|
|
@ -1,4 +1,4 @@
|
||||||
package filter
|
package legacy_filter
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
28
vendor/github.com/waku-org/go-waku/waku/v2/protocol/lightpush/waku_lightpush.go
generated
vendored
28
vendor/github.com/waku-org/go-waku/waku/v2/protocol/lightpush/waku_lightpush.go
generated
vendored
|
@ -36,15 +36,19 @@ type WakuLightPush struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewWakuRelay returns a new instance of Waku Lightpush struct
|
// NewWakuRelay returns a new instance of Waku Lightpush struct
|
||||||
func NewWakuLightPush(h host.Host, relay *relay.WakuRelay, log *zap.Logger) *WakuLightPush {
|
func NewWakuLightPush(relay *relay.WakuRelay, log *zap.Logger) *WakuLightPush {
|
||||||
wakuLP := new(WakuLightPush)
|
wakuLP := new(WakuLightPush)
|
||||||
wakuLP.relay = relay
|
wakuLP.relay = relay
|
||||||
wakuLP.h = h
|
|
||||||
wakuLP.log = log.Named("lightpush")
|
wakuLP.log = log.Named("lightpush")
|
||||||
|
|
||||||
return wakuLP
|
return wakuLP
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Sets the host to be able to mount or consume a protocol
|
||||||
|
func (wakuLP *WakuLightPush) SetHost(h host.Host) {
|
||||||
|
wakuLP.h = h
|
||||||
|
}
|
||||||
|
|
||||||
// Start inits the lighpush protocol
|
// Start inits the lighpush protocol
|
||||||
func (wakuLP *WakuLightPush) Start(ctx context.Context) error {
|
func (wakuLP *WakuLightPush) Start(ctx context.Context) error {
|
||||||
if wakuLP.relayIsNotAvailable() {
|
if wakuLP.relayIsNotAvailable() {
|
||||||
|
@ -77,7 +81,7 @@ func (wakuLP *WakuLightPush) onRequest(ctx context.Context) func(s network.Strea
|
||||||
err := reader.ReadMsg(requestPushRPC)
|
err := reader.ReadMsg(requestPushRPC)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("reading request", zap.Error(err))
|
logger.Error("reading request", zap.Error(err))
|
||||||
metrics.RecordLightpushError(ctx, "decodeRpcFailure")
|
metrics.RecordLightpushError(ctx, "decode_rpc_failure")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -89,6 +93,8 @@ func (wakuLP *WakuLightPush) onRequest(ctx context.Context) func(s network.Strea
|
||||||
pubSubTopic := requestPushRPC.Query.PubsubTopic
|
pubSubTopic := requestPushRPC.Query.PubsubTopic
|
||||||
message := requestPushRPC.Query.Message
|
message := requestPushRPC.Query.Message
|
||||||
|
|
||||||
|
metrics.RecordLightpushMessage(ctx, "PushRequest")
|
||||||
|
|
||||||
// TODO: Assumes success, should probably be extended to check for network, peers, etc
|
// TODO: Assumes success, should probably be extended to check for network, peers, etc
|
||||||
// It might make sense to use WithReadiness option here?
|
// It might make sense to use WithReadiness option here?
|
||||||
|
|
||||||
|
@ -96,6 +102,7 @@ func (wakuLP *WakuLightPush) onRequest(ctx context.Context) func(s network.Strea
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("publishing message", zap.Error(err))
|
logger.Error("publishing message", zap.Error(err))
|
||||||
|
metrics.RecordLightpushError(ctx, "message_push_failure")
|
||||||
response.Info = "Could not publish message"
|
response.Info = "Could not publish message"
|
||||||
} else {
|
} else {
|
||||||
response.IsSuccess = true
|
response.IsSuccess = true
|
||||||
|
@ -108,11 +115,14 @@ func (wakuLP *WakuLightPush) onRequest(ctx context.Context) func(s network.Strea
|
||||||
|
|
||||||
err = writer.WriteMsg(responsePushRPC)
|
err = writer.WriteMsg(responsePushRPC)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordLightpushError(ctx, "response_write_failure")
|
||||||
logger.Error("writing response", zap.Error(err))
|
logger.Error("writing response", zap.Error(err))
|
||||||
_ = s.Reset()
|
_ = s.Reset()
|
||||||
} else {
|
} else {
|
||||||
logger.Info("response sent")
|
logger.Info("response sent")
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
metrics.RecordLightpushError(ctx, "empty_request_body_failure")
|
||||||
}
|
}
|
||||||
|
|
||||||
if requestPushRPC.Response != nil {
|
if requestPushRPC.Response != nil {
|
||||||
|
@ -121,6 +131,8 @@ func (wakuLP *WakuLightPush) onRequest(ctx context.Context) func(s network.Strea
|
||||||
} else {
|
} else {
|
||||||
logger.Info("request failure", zap.String("info=", requestPushRPC.Response.Info))
|
logger.Info("request failure", zap.String("info=", requestPushRPC.Response.Info))
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
metrics.RecordLightpushError(ctx, "empty_response_body_failure")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -136,7 +148,7 @@ func (wakuLP *WakuLightPush) request(ctx context.Context, req *pb.PushRequest, o
|
||||||
}
|
}
|
||||||
|
|
||||||
if params.selectedPeer == "" {
|
if params.selectedPeer == "" {
|
||||||
metrics.RecordLightpushError(ctx, "dialError")
|
metrics.RecordLightpushError(ctx, "peer_not_found_failure")
|
||||||
return nil, ErrNoPeersAvailable
|
return nil, ErrNoPeersAvailable
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -148,6 +160,7 @@ func (wakuLP *WakuLightPush) request(ctx context.Context, req *pb.PushRequest, o
|
||||||
// We connect first so dns4 addresses are resolved (NewStream does not do it)
|
// We connect first so dns4 addresses are resolved (NewStream does not do it)
|
||||||
err := wakuLP.h.Connect(ctx, wakuLP.h.Peerstore().PeerInfo(params.selectedPeer))
|
err := wakuLP.h.Connect(ctx, wakuLP.h.Peerstore().PeerInfo(params.selectedPeer))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordLightpushError(ctx, "dial_failure")
|
||||||
logger.Error("connecting peer", zap.Error(err))
|
logger.Error("connecting peer", zap.Error(err))
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -155,7 +168,7 @@ func (wakuLP *WakuLightPush) request(ctx context.Context, req *pb.PushRequest, o
|
||||||
connOpt, err := wakuLP.h.NewStream(ctx, params.selectedPeer, LightPushID_v20beta1)
|
connOpt, err := wakuLP.h.NewStream(ctx, params.selectedPeer, LightPushID_v20beta1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("creating stream to peer", zap.Error(err))
|
logger.Error("creating stream to peer", zap.Error(err))
|
||||||
metrics.RecordLightpushError(ctx, "dialError")
|
metrics.RecordLightpushError(ctx, "dial_failure")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -163,7 +176,7 @@ func (wakuLP *WakuLightPush) request(ctx context.Context, req *pb.PushRequest, o
|
||||||
defer func() {
|
defer func() {
|
||||||
err := connOpt.Reset()
|
err := connOpt.Reset()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
metrics.RecordLightpushError(ctx, "dialError")
|
metrics.RecordLightpushError(ctx, "dial_failure")
|
||||||
logger.Error("resetting connection", zap.Error(err))
|
logger.Error("resetting connection", zap.Error(err))
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
@ -175,6 +188,7 @@ func (wakuLP *WakuLightPush) request(ctx context.Context, req *pb.PushRequest, o
|
||||||
|
|
||||||
err = writer.WriteMsg(pushRequestRPC)
|
err = writer.WriteMsg(pushRequestRPC)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordLightpushError(ctx, "request_write_failure")
|
||||||
logger.Error("writing request", zap.Error(err))
|
logger.Error("writing request", zap.Error(err))
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -183,7 +197,7 @@ func (wakuLP *WakuLightPush) request(ctx context.Context, req *pb.PushRequest, o
|
||||||
err = reader.ReadMsg(pushResponseRPC)
|
err = reader.ReadMsg(pushResponseRPC)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("reading response", zap.Error(err))
|
logger.Error("reading response", zap.Error(err))
|
||||||
metrics.RecordLightpushError(ctx, "decodeRPCFailure")
|
metrics.RecordLightpushError(ctx, "decode_rpc_failure")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -11,8 +11,8 @@ import (
|
||||||
"github.com/libp2p/go-libp2p/core/peer"
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
"github.com/libp2p/go-msgio/pbio"
|
"github.com/libp2p/go-msgio/pbio"
|
||||||
"github.com/waku-org/go-waku/waku/v2/metrics"
|
"github.com/waku-org/go-waku/waku/v2/metrics"
|
||||||
|
wenr "github.com/waku-org/go-waku/waku/v2/protocol/enr"
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol/peer_exchange/pb"
|
"github.com/waku-org/go-waku/waku/v2/protocol/peer_exchange/pb"
|
||||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -85,7 +85,7 @@ func (wakuPX *WakuPeerExchange) handleResponse(ctx context.Context, response *pb
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
peerInfo, err := utils.EnodeToPeerInfo(enodeRecord)
|
peerInfo, err := wenr.EnodeToPeerInfo(enodeRecord)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
71
vendor/github.com/waku-org/go-waku/waku/v2/protocol/peer_exchange/enr_cache.go
generated
vendored
Normal file
71
vendor/github.com/waku-org/go-waku/waku/v2/protocol/peer_exchange/enr_cache.go
generated
vendored
Normal file
|
@ -0,0 +1,71 @@
|
||||||
|
package peer_exchange
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"math/rand"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||||
|
"github.com/hashicorp/golang-lru/simplelru"
|
||||||
|
"github.com/waku-org/go-waku/waku/v2/protocol/peer_exchange/pb"
|
||||||
|
)
|
||||||
|
|
||||||
|
// simpleLRU internal uses container/list, which is ring buffer(double linked list)
|
||||||
|
type enrCache struct {
|
||||||
|
// using lru, saves us from periodically cleaning the cache to maintain a certain size
|
||||||
|
data *simplelru.LRU
|
||||||
|
rng *rand.Rand
|
||||||
|
mu sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// err on negative size
|
||||||
|
func newEnrCache(size int) (*enrCache, error) {
|
||||||
|
inner, err := simplelru.NewLRU(size, nil)
|
||||||
|
return &enrCache{
|
||||||
|
data: inner,
|
||||||
|
rng: rand.New(rand.NewSource(rand.Int63())),
|
||||||
|
}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// updating cache
|
||||||
|
func (c *enrCache) updateCache(node *enode.Node) {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
c.data.Add(node.ID(), node)
|
||||||
|
}
|
||||||
|
|
||||||
|
// get `numPeers` records of enr
|
||||||
|
func (c *enrCache) getENRs(neededPeers int) ([]*pb.PeerInfo, error) {
|
||||||
|
c.mu.RLock()
|
||||||
|
defer c.mu.RUnlock()
|
||||||
|
//
|
||||||
|
availablePeers := c.data.Len()
|
||||||
|
if availablePeers == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
if availablePeers < neededPeers {
|
||||||
|
neededPeers = availablePeers
|
||||||
|
}
|
||||||
|
|
||||||
|
perm := c.rng.Perm(availablePeers)[0:neededPeers]
|
||||||
|
keys := c.data.Keys()
|
||||||
|
result := []*pb.PeerInfo{}
|
||||||
|
for _, ind := range perm {
|
||||||
|
node, ok := c.data.Get(keys[ind])
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
var b bytes.Buffer
|
||||||
|
writer := bufio.NewWriter(&b)
|
||||||
|
err := node.(*enode.Node).Record().EncodeRLP(writer)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
writer.Flush()
|
||||||
|
result = append(result, &pb.PeerInfo{
|
||||||
|
ENR: b.Bytes(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
162
vendor/github.com/waku-org/go-waku/waku/v2/protocol/peer_exchange/protocol.go
generated
vendored
162
vendor/github.com/waku-org/go-waku/waku/v2/protocol/peer_exchange/protocol.go
generated
vendored
|
@ -1,17 +1,13 @@
|
||||||
package peer_exchange
|
package peer_exchange
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"math/rand"
|
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
|
||||||
"github.com/libp2p/go-libp2p/core/host"
|
"github.com/libp2p/go-libp2p/core/host"
|
||||||
"github.com/libp2p/go-libp2p/core/network"
|
"github.com/libp2p/go-libp2p/core/network"
|
||||||
"github.com/libp2p/go-libp2p/core/peer"
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
@ -21,26 +17,20 @@ import (
|
||||||
"github.com/waku-org/go-waku/waku/v2/discv5"
|
"github.com/waku-org/go-waku/waku/v2/discv5"
|
||||||
"github.com/waku-org/go-waku/waku/v2/metrics"
|
"github.com/waku-org/go-waku/waku/v2/metrics"
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||||
|
"github.com/waku-org/go-waku/waku/v2/protocol/enr"
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol/peer_exchange/pb"
|
"github.com/waku-org/go-waku/waku/v2/protocol/peer_exchange/pb"
|
||||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PeerExchangeID_v20alpha1 is the current Waku Peer Exchange protocol identifier
|
// PeerExchangeID_v20alpha1 is the current Waku Peer Exchange protocol identifier
|
||||||
const PeerExchangeID_v20alpha1 = libp2pProtocol.ID("/vac/waku/peer-exchange/2.0.0-alpha1")
|
const PeerExchangeID_v20alpha1 = libp2pProtocol.ID("/vac/waku/peer-exchange/2.0.0-alpha1")
|
||||||
const MaxCacheSize = 1000
|
const MaxCacheSize = 1000
|
||||||
const CacheCleanWindow = 200
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
ErrNoPeersAvailable = errors.New("no suitable remote peers")
|
ErrNoPeersAvailable = errors.New("no suitable remote peers")
|
||||||
ErrInvalidId = errors.New("invalid request id")
|
ErrInvalidId = errors.New("invalid request id")
|
||||||
)
|
)
|
||||||
|
|
||||||
type peerRecord struct {
|
|
||||||
node *enode.Node
|
|
||||||
idx int
|
|
||||||
}
|
|
||||||
|
|
||||||
type WakuPeerExchange struct {
|
type WakuPeerExchange struct {
|
||||||
h host.Host
|
h host.Host
|
||||||
disc *discv5.DiscoveryV5
|
disc *discv5.DiscoveryV5
|
||||||
|
@ -51,10 +41,7 @@ type WakuPeerExchange struct {
|
||||||
|
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
peerConnector PeerConnector
|
peerConnector PeerConnector
|
||||||
peerCh chan peer.AddrInfo
|
enrCache *enrCache
|
||||||
enrCache map[enode.ID]peerRecord // todo: next step: ring buffer; future: implement cache satisfying https://rfc.vac.dev/spec/34/
|
|
||||||
enrCacheMutex sync.RWMutex
|
|
||||||
rng *rand.Rand
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type PeerConnector interface {
|
type PeerConnector interface {
|
||||||
|
@ -62,17 +49,24 @@ type PeerConnector interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewWakuPeerExchange returns a new instance of WakuPeerExchange struct
|
// NewWakuPeerExchange returns a new instance of WakuPeerExchange struct
|
||||||
func NewWakuPeerExchange(h host.Host, disc *discv5.DiscoveryV5, peerConnector PeerConnector, log *zap.Logger) (*WakuPeerExchange, error) {
|
func NewWakuPeerExchange(disc *discv5.DiscoveryV5, peerConnector PeerConnector, log *zap.Logger) (*WakuPeerExchange, error) {
|
||||||
|
newEnrCache, err := newEnrCache(MaxCacheSize)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
wakuPX := new(WakuPeerExchange)
|
wakuPX := new(WakuPeerExchange)
|
||||||
wakuPX.h = h
|
|
||||||
wakuPX.disc = disc
|
wakuPX.disc = disc
|
||||||
wakuPX.log = log.Named("wakupx")
|
wakuPX.log = log.Named("wakupx")
|
||||||
wakuPX.enrCache = make(map[enode.ID]peerRecord)
|
wakuPX.enrCache = newEnrCache
|
||||||
wakuPX.rng = rand.New(rand.NewSource(rand.Int63()))
|
|
||||||
wakuPX.peerConnector = peerConnector
|
wakuPX.peerConnector = peerConnector
|
||||||
return wakuPX, nil
|
return wakuPX, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Sets the host to be able to mount or consume a protocol
|
||||||
|
func (wakuPX *WakuPeerExchange) SetHost(h host.Host) {
|
||||||
|
wakuPX.h = h
|
||||||
|
}
|
||||||
|
|
||||||
// Start inits the peer exchange protocol
|
// Start inits the peer exchange protocol
|
||||||
func (wakuPX *WakuPeerExchange) Start(ctx context.Context) error {
|
func (wakuPX *WakuPeerExchange) Start(ctx context.Context) error {
|
||||||
if wakuPX.cancel != nil {
|
if wakuPX.cancel != nil {
|
||||||
|
@ -83,7 +77,6 @@ func (wakuPX *WakuPeerExchange) Start(ctx context.Context) error {
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
wakuPX.cancel = cancel
|
wakuPX.cancel = cancel
|
||||||
wakuPX.peerCh = make(chan peer.AddrInfo)
|
|
||||||
|
|
||||||
wakuPX.h.SetStreamHandlerMatch(PeerExchangeID_v20alpha1, protocol.PrefixTextMatch(string(PeerExchangeID_v20alpha1)), wakuPX.onRequest(ctx))
|
wakuPX.h.SetStreamHandlerMatch(PeerExchangeID_v20alpha1, protocol.PrefixTextMatch(string(PeerExchangeID_v20alpha1)), wakuPX.onRequest(ctx))
|
||||||
wakuPX.log.Info("Peer exchange protocol started")
|
wakuPX.log.Info("Peer exchange protocol started")
|
||||||
|
@ -109,7 +102,7 @@ func (wakuPX *WakuPeerExchange) onRequest(ctx context.Context) func(s network.St
|
||||||
if requestRPC.Query != nil {
|
if requestRPC.Query != nil {
|
||||||
logger.Info("request received")
|
logger.Info("request received")
|
||||||
|
|
||||||
records, err := wakuPX.getENRsFromCache(requestRPC.Query.NumPeers)
|
records, err := wakuPX.enrCache.getENRs(int(requestRPC.Query.NumPeers))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("obtaining enrs from cache", zap.Error(err))
|
logger.Error("obtaining enrs from cache", zap.Error(err))
|
||||||
metrics.RecordPeerExchangeError(ctx, "pxFailure")
|
metrics.RecordPeerExchangeError(ctx, "pxFailure")
|
||||||
|
@ -138,102 +131,19 @@ func (wakuPX *WakuPeerExchange) Stop() {
|
||||||
}
|
}
|
||||||
wakuPX.h.RemoveStreamHandler(PeerExchangeID_v20alpha1)
|
wakuPX.h.RemoveStreamHandler(PeerExchangeID_v20alpha1)
|
||||||
wakuPX.cancel()
|
wakuPX.cancel()
|
||||||
close(wakuPX.peerCh)
|
|
||||||
wakuPX.wg.Wait()
|
wakuPX.wg.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (wakuPX *WakuPeerExchange) getENRsFromCache(numPeers uint64) ([]*pb.PeerInfo, error) {
|
|
||||||
wakuPX.enrCacheMutex.Lock()
|
|
||||||
defer wakuPX.enrCacheMutex.Unlock()
|
|
||||||
|
|
||||||
if len(wakuPX.enrCache) == 0 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
numItems := int(numPeers)
|
|
||||||
if len(wakuPX.enrCache) < int(numPeers) {
|
|
||||||
numItems = len(wakuPX.enrCache)
|
|
||||||
}
|
|
||||||
|
|
||||||
perm := wakuPX.rng.Perm(len(wakuPX.enrCache))[0:numItems]
|
|
||||||
permSet := make(map[int]int)
|
|
||||||
for i, v := range perm {
|
|
||||||
permSet[v] = i
|
|
||||||
}
|
|
||||||
|
|
||||||
var result []*pb.PeerInfo
|
|
||||||
iter := 0
|
|
||||||
for k := range wakuPX.enrCache {
|
|
||||||
if _, ok := permSet[iter]; ok {
|
|
||||||
var b bytes.Buffer
|
|
||||||
writer := bufio.NewWriter(&b)
|
|
||||||
enode := wakuPX.enrCache[k]
|
|
||||||
|
|
||||||
err := enode.node.Record().EncodeRLP(writer)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
writer.Flush()
|
|
||||||
|
|
||||||
result = append(result, &pb.PeerInfo{
|
|
||||||
ENR: b.Bytes(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
iter++
|
|
||||||
}
|
|
||||||
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (wakuPX *WakuPeerExchange) cleanCache() {
|
|
||||||
if len(wakuPX.enrCache) < MaxCacheSize {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
r := make(map[enode.ID]peerRecord)
|
|
||||||
for k, v := range wakuPX.enrCache {
|
|
||||||
if v.idx > CacheCleanWindow {
|
|
||||||
v.idx -= CacheCleanWindow
|
|
||||||
r[k] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
wakuPX.enrCache = r
|
|
||||||
}
|
|
||||||
|
|
||||||
func (wakuPX *WakuPeerExchange) iterate(ctx context.Context) error {
|
func (wakuPX *WakuPeerExchange) iterate(ctx context.Context) error {
|
||||||
iterator, err := wakuPX.disc.Iterator()
|
iterator, err := wakuPX.disc.Iterator()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("obtaining iterator: %w", err)
|
return fmt.Errorf("obtaining iterator: %w", err)
|
||||||
}
|
}
|
||||||
|
// Closing iterator
|
||||||
|
defer iterator.Close()
|
||||||
|
|
||||||
closeCh := make(chan struct{}, 1)
|
for iterator.Next() {
|
||||||
defer close(closeCh)
|
_, addresses, err := enr.Multiaddress(iterator.Node())
|
||||||
|
|
||||||
// Closing iterator when context is cancelled or function is returning
|
|
||||||
wakuPX.wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer wakuPX.wg.Done()
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
iterator.Close()
|
|
||||||
case <-closeCh:
|
|
||||||
iterator.Close()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
for {
|
|
||||||
if ctx.Err() != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
exists := iterator.Next()
|
|
||||||
if !exists {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
_, addresses, err := utils.Multiaddress(iterator.Node())
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
wakuPX.log.Error("extracting multiaddrs from enr", zap.Error(err))
|
wakuPX.log.Error("extracting multiaddrs from enr", zap.Error(err))
|
||||||
continue
|
continue
|
||||||
|
@ -244,15 +154,14 @@ func (wakuPX *WakuPeerExchange) iterate(ctx context.Context) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
wakuPX.log.Debug("Discovered px peers via discv5")
|
wakuPX.log.Debug("Discovered px peers via discv5")
|
||||||
|
wakuPX.enrCache.updateCache(iterator.Node())
|
||||||
|
|
||||||
wakuPX.enrCacheMutex.Lock()
|
select {
|
||||||
wakuPX.enrCache[iterator.Node().ID()] = peerRecord{
|
case <-ctx.Done():
|
||||||
idx: len(wakuPX.enrCache),
|
return nil
|
||||||
node: iterator.Node(),
|
default:
|
||||||
}
|
}
|
||||||
wakuPX.enrCacheMutex.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -265,27 +174,16 @@ func (wakuPX *WakuPeerExchange) runPeerExchangeDiscv5Loop(ctx context.Context) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
ch := make(chan struct{}, 1)
|
|
||||||
ch <- struct{}{} // Initial execution
|
|
||||||
|
|
||||||
ticker := time.NewTicker(5 * time.Second)
|
|
||||||
defer ticker.Stop()
|
|
||||||
|
|
||||||
restartLoop:
|
|
||||||
for {
|
for {
|
||||||
|
err := wakuPX.iterate(ctx)
|
||||||
|
if err != nil {
|
||||||
|
wakuPX.log.Debug("iterating peer exchange", zap.Error(err))
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
}
|
||||||
select {
|
select {
|
||||||
case <-ch:
|
|
||||||
err := wakuPX.iterate(ctx)
|
|
||||||
if err != nil {
|
|
||||||
wakuPX.log.Debug("iterating peer exchange", zap.Error(err))
|
|
||||||
time.Sleep(2 * time.Second)
|
|
||||||
}
|
|
||||||
ch <- struct{}{}
|
|
||||||
case <-ticker.C:
|
|
||||||
wakuPX.cleanCache()
|
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
close(ch)
|
return
|
||||||
break restartLoop
|
default:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
177
vendor/github.com/waku-org/go-waku/waku/v2/protocol/relay/broadcast.go
generated
vendored
Normal file
177
vendor/github.com/waku-org/go-waku/waku/v2/protocol/relay/broadcast.go
generated
vendored
Normal file
|
@ -0,0 +1,177 @@
|
||||||
|
package relay
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
|
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||||
|
)
|
||||||
|
|
||||||
|
type chStore struct {
|
||||||
|
mu sync.RWMutex
|
||||||
|
topicToChans map[string]map[int]chan *protocol.Envelope
|
||||||
|
id int
|
||||||
|
}
|
||||||
|
|
||||||
|
func newChStore() chStore {
|
||||||
|
return chStore{
|
||||||
|
topicToChans: make(map[string]map[int]chan *protocol.Envelope),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (s *chStore) getNewCh(topic string, chLen int) Subscription {
|
||||||
|
ch := make(chan *protocol.Envelope, chLen)
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
s.id++
|
||||||
|
//
|
||||||
|
if s.topicToChans[topic] == nil {
|
||||||
|
s.topicToChans[topic] = make(map[int]chan *protocol.Envelope)
|
||||||
|
}
|
||||||
|
id := s.id
|
||||||
|
s.topicToChans[topic][id] = ch
|
||||||
|
return Subscription{
|
||||||
|
// read only channel,will not block forever, returns once closed.
|
||||||
|
Ch: ch,
|
||||||
|
// Unsubscribe function is safe, can be called multiple times
|
||||||
|
// and even after broadcaster has stopped running.
|
||||||
|
Unsubscribe: func() {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
if s.topicToChans[topic] == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if ch := s.topicToChans[topic][id]; ch != nil {
|
||||||
|
close(ch)
|
||||||
|
delete(s.topicToChans[topic], id)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *chStore) broadcast(ctx context.Context, m *protocol.Envelope) {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
for _, ch := range s.topicToChans[m.PubsubTopic()] {
|
||||||
|
select {
|
||||||
|
// using ctx.Done for returning on cancellation is needed
|
||||||
|
// reason:
|
||||||
|
// if for a channel there is no one listening to it
|
||||||
|
// the broadcast will acquire lock and wait until there is a receiver on that channel.
|
||||||
|
// this will also block the chStore close function as it uses same mutex
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case ch <- m:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// send to all registered subscribers
|
||||||
|
for _, ch := range s.topicToChans[""] {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case ch <- m:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *chStore) close() {
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
for _, chans := range b.topicToChans {
|
||||||
|
for _, ch := range chans {
|
||||||
|
close(ch)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.topicToChans = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type Broadcaster interface {
|
||||||
|
Start(ctx context.Context) error
|
||||||
|
Stop()
|
||||||
|
Register(topic string, chLen ...int) Subscription
|
||||||
|
RegisterForAll(chLen ...int) Subscription
|
||||||
|
Submit(*protocol.Envelope)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ////
|
||||||
|
// thread safe
|
||||||
|
// panic safe, input can't be submitted to `input` channel after stop
|
||||||
|
// lock safe, only read channels are returned and later closed, calling code has guarantee Register channel will not block forever.
|
||||||
|
// no opened channel leaked, all created only read channels are closed when stop
|
||||||
|
// even if there is noone listening to returned channels, guarantees to be lockfree.
|
||||||
|
type broadcaster struct {
|
||||||
|
bufLen int
|
||||||
|
cancel context.CancelFunc
|
||||||
|
input chan *protocol.Envelope
|
||||||
|
//
|
||||||
|
chStore chStore
|
||||||
|
running atomic.Bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewBroadcaster(bufLen int) *broadcaster {
|
||||||
|
return &broadcaster{
|
||||||
|
bufLen: bufLen,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *broadcaster) Start(ctx context.Context) error {
|
||||||
|
if !b.running.CompareAndSwap(false, true) { // if not running then start
|
||||||
|
return errors.New("already started")
|
||||||
|
}
|
||||||
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
b.cancel = cancel
|
||||||
|
b.chStore = newChStore()
|
||||||
|
b.input = make(chan *protocol.Envelope, b.bufLen)
|
||||||
|
go b.run(ctx)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *broadcaster) run(ctx context.Context) {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case msg, ok := <-b.input:
|
||||||
|
if ok {
|
||||||
|
b.chStore.broadcast(ctx, msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *broadcaster) Stop() {
|
||||||
|
if !b.running.CompareAndSwap(true, false) { // if running then stop
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// cancel must be before chStore.close(), so that broadcast releases lock before chStore.close() acquires it.
|
||||||
|
b.cancel() // exit the run loop,
|
||||||
|
b.chStore.close() // close all channels that we send to
|
||||||
|
close(b.input) // close input channel
|
||||||
|
}
|
||||||
|
|
||||||
|
// returned subscription is all speicfied topic
|
||||||
|
func (b *broadcaster) Register(topic string, chLen ...int) Subscription {
|
||||||
|
return b.chStore.getNewCh(topic, getChLen(chLen))
|
||||||
|
}
|
||||||
|
|
||||||
|
// return subscription is for all topic
|
||||||
|
func (b *broadcaster) RegisterForAll(chLen ...int) Subscription {
|
||||||
|
|
||||||
|
return b.chStore.getNewCh("", getChLen(chLen))
|
||||||
|
}
|
||||||
|
|
||||||
|
func getChLen(chLen []int) int {
|
||||||
|
l := 0
|
||||||
|
if len(chLen) > 0 {
|
||||||
|
l = chLen[0]
|
||||||
|
}
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
// only accepts value when running.
|
||||||
|
func (b *broadcaster) Submit(m *protocol.Envelope) {
|
||||||
|
if b.running.Load() {
|
||||||
|
b.input <- m
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,34 +1,29 @@
|
||||||
package relay
|
package relay
|
||||||
|
|
||||||
import (
|
import "github.com/waku-org/go-waku/waku/v2/protocol"
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Subscription handles the subscrition to a particular pubsub topic
|
|
||||||
type Subscription struct {
|
type Subscription struct {
|
||||||
sync.RWMutex
|
Unsubscribe func()
|
||||||
|
Ch <-chan *protocol.Envelope
|
||||||
// C is channel used for receiving envelopes
|
|
||||||
C chan *protocol.Envelope
|
|
||||||
|
|
||||||
closed bool
|
|
||||||
once sync.Once
|
|
||||||
quit chan struct{}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unsubscribe will close a subscription from a pubsub topic. Will close the message channel
|
func NoopSubscription() Subscription {
|
||||||
func (subs *Subscription) Unsubscribe() {
|
ch := make(chan *protocol.Envelope)
|
||||||
subs.once.Do(func() {
|
close(ch)
|
||||||
close(subs.quit)
|
return Subscription{
|
||||||
})
|
Unsubscribe: func() {},
|
||||||
|
Ch: ch,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsClosed determine whether a Subscription is still open for receiving messages
|
func ArraySubscription(msgs []*protocol.Envelope) Subscription {
|
||||||
func (subs *Subscription) IsClosed() bool {
|
ch := make(chan *protocol.Envelope, len(msgs))
|
||||||
subs.RLock()
|
for _, msg := range msgs {
|
||||||
defer subs.RUnlock()
|
ch <- msg
|
||||||
return subs.closed
|
}
|
||||||
|
close(ch)
|
||||||
|
return Subscription{
|
||||||
|
Unsubscribe: func() {},
|
||||||
|
Ch: ch,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
100
vendor/github.com/waku-org/go-waku/waku/v2/protocol/relay/validators.go
generated
vendored
Normal file
100
vendor/github.com/waku-org/go-waku/waku/v2/protocol/relay/validators.go
generated
vendored
Normal file
|
@ -0,0 +1,100 @@
|
||||||
|
package relay
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"crypto/elliptic"
|
||||||
|
"encoding/binary"
|
||||||
|
"encoding/hex"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto/secp256k1"
|
||||||
|
|
||||||
|
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
"github.com/waku-org/go-waku/waku/v2/hash"
|
||||||
|
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||||
|
"github.com/waku-org/go-waku/waku/v2/timesource"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
proto "google.golang.org/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Application level message hash
|
||||||
|
func MsgHash(pubSubTopic string, msg *pb.WakuMessage) []byte {
|
||||||
|
timestampBytes := make([]byte, 8)
|
||||||
|
binary.LittleEndian.PutUint64(timestampBytes, uint64(msg.Timestamp))
|
||||||
|
|
||||||
|
var ephemeralByte byte
|
||||||
|
if msg.Ephemeral {
|
||||||
|
ephemeralByte = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
return hash.SHA256(
|
||||||
|
[]byte(pubSubTopic),
|
||||||
|
msg.Payload,
|
||||||
|
[]byte(msg.ContentTopic),
|
||||||
|
timestampBytes,
|
||||||
|
[]byte{ephemeralByte},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
const MessageWindowDuration = time.Minute * 5
|
||||||
|
|
||||||
|
func withinTimeWindow(t timesource.Timesource, msg *pb.WakuMessage) bool {
|
||||||
|
if msg.Timestamp == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
now := t.Now()
|
||||||
|
msgTime := time.Unix(0, msg.Timestamp)
|
||||||
|
|
||||||
|
return now.Sub(msgTime).Abs() <= MessageWindowDuration
|
||||||
|
}
|
||||||
|
|
||||||
|
type validatorFn = func(ctx context.Context, peerID peer.ID, message *pubsub.Message) bool
|
||||||
|
|
||||||
|
func validatorFnBuilder(t timesource.Timesource, topic string, publicKey *ecdsa.PublicKey) validatorFn {
|
||||||
|
pubkBytes := crypto.FromECDSAPub(publicKey)
|
||||||
|
return func(ctx context.Context, peerID peer.ID, message *pubsub.Message) bool {
|
||||||
|
msg := new(pb.WakuMessage)
|
||||||
|
err := proto.Unmarshal(message.Data, msg)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if !withinTimeWindow(t, msg) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
msgHash := MsgHash(topic, msg)
|
||||||
|
signature := msg.Meta
|
||||||
|
|
||||||
|
return secp256k1.VerifySignature(pubkBytes, msgHash, signature)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WakuRelay) AddSignedTopicValidator(topic string, publicKey *ecdsa.PublicKey) error {
|
||||||
|
w.log.Info("adding validator to signed topic", zap.String("topic", topic), zap.String("publicKey", hex.EncodeToString(elliptic.Marshal(publicKey.Curve, publicKey.X, publicKey.Y))))
|
||||||
|
err := w.pubsub.RegisterTopicValidator(topic, validatorFnBuilder(w.timesource, topic, publicKey))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !w.IsSubscribed(topic) {
|
||||||
|
w.log.Warn("relay is not subscribed to signed topic", zap.String("topic", topic))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func SignMessage(privKey *ecdsa.PrivateKey, topic string, msg *pb.WakuMessage) error {
|
||||||
|
msgHash := MsgHash(topic, msg)
|
||||||
|
sign, err := secp256k1.Sign(msgHash, crypto.FromECDSA(privKey))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
msg.Meta = sign[0:64] // Drop the V in R||S||V
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -2,13 +2,11 @@ package relay
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/hex"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/libp2p/go-libp2p/core/host"
|
"github.com/libp2p/go-libp2p/core/host"
|
||||||
"github.com/libp2p/go-libp2p/core/peer"
|
|
||||||
"github.com/libp2p/go-libp2p/core/protocol"
|
"github.com/libp2p/go-libp2p/core/protocol"
|
||||||
"go.opencensus.io/stats"
|
"go.opencensus.io/stats"
|
||||||
"go.opencensus.io/tag"
|
"go.opencensus.io/tag"
|
||||||
|
@ -18,7 +16,6 @@ import (
|
||||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||||
pubsub_pb "github.com/libp2p/go-libp2p-pubsub/pb"
|
pubsub_pb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||||
"github.com/waku-org/go-waku/logging"
|
"github.com/waku-org/go-waku/logging"
|
||||||
v2 "github.com/waku-org/go-waku/waku/v2"
|
|
||||||
"github.com/waku-org/go-waku/waku/v2/hash"
|
"github.com/waku-org/go-waku/waku/v2/hash"
|
||||||
"github.com/waku-org/go-waku/waku/v2/metrics"
|
"github.com/waku-org/go-waku/waku/v2/metrics"
|
||||||
waku_proto "github.com/waku-org/go-waku/waku/v2/protocol"
|
waku_proto "github.com/waku-org/go-waku/waku/v2/protocol"
|
||||||
|
@ -38,7 +35,7 @@ type WakuRelay struct {
|
||||||
|
|
||||||
log *zap.Logger
|
log *zap.Logger
|
||||||
|
|
||||||
bcaster v2.Broadcaster
|
bcaster Broadcaster
|
||||||
|
|
||||||
minPeersToPublish int
|
minPeersToPublish int
|
||||||
|
|
||||||
|
@ -47,10 +44,6 @@ type WakuRelay struct {
|
||||||
wakuRelayTopics map[string]*pubsub.Topic
|
wakuRelayTopics map[string]*pubsub.Topic
|
||||||
relaySubs map[string]*pubsub.Subscription
|
relaySubs map[string]*pubsub.Subscription
|
||||||
|
|
||||||
// TODO: convert to concurrent maps
|
|
||||||
subscriptions map[string][]*Subscription
|
|
||||||
subscriptionsMutex sync.Mutex
|
|
||||||
|
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
|
@ -61,13 +54,11 @@ func msgIdFn(pmsg *pubsub_pb.Message) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewWakuRelay returns a new instance of a WakuRelay struct
|
// NewWakuRelay returns a new instance of a WakuRelay struct
|
||||||
func NewWakuRelay(h host.Host, bcaster v2.Broadcaster, minPeersToPublish int, timesource timesource.Timesource, log *zap.Logger, opts ...pubsub.Option) *WakuRelay {
|
func NewWakuRelay(bcaster Broadcaster, minPeersToPublish int, timesource timesource.Timesource, log *zap.Logger, opts ...pubsub.Option) *WakuRelay {
|
||||||
w := new(WakuRelay)
|
w := new(WakuRelay)
|
||||||
w.host = h
|
|
||||||
w.timesource = timesource
|
w.timesource = timesource
|
||||||
w.wakuRelayTopics = make(map[string]*pubsub.Topic)
|
w.wakuRelayTopics = make(map[string]*pubsub.Topic)
|
||||||
w.relaySubs = make(map[string]*pubsub.Subscription)
|
w.relaySubs = make(map[string]*pubsub.Subscription)
|
||||||
w.subscriptions = make(map[string][]*Subscription)
|
|
||||||
w.bcaster = bcaster
|
w.bcaster = bcaster
|
||||||
w.minPeersToPublish = minPeersToPublish
|
w.minPeersToPublish = minPeersToPublish
|
||||||
w.wg = sync.WaitGroup{}
|
w.wg = sync.WaitGroup{}
|
||||||
|
@ -96,6 +87,11 @@ func NewWakuRelay(h host.Host, bcaster v2.Broadcaster, minPeersToPublish int, ti
|
||||||
return w
|
return w
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Sets the host to be able to mount or consume a protocol
|
||||||
|
func (w *WakuRelay) SetHost(h host.Host) {
|
||||||
|
w.host = h
|
||||||
|
}
|
||||||
|
|
||||||
func (w *WakuRelay) Start(ctx context.Context) error {
|
func (w *WakuRelay) Start(ctx context.Context) error {
|
||||||
w.wg.Wait()
|
w.wg.Wait()
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
@ -129,6 +125,13 @@ func (w *WakuRelay) Topics() []string {
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (w *WakuRelay) IsSubscribed(topic string) bool {
|
||||||
|
defer w.topicsMutex.Unlock()
|
||||||
|
w.topicsMutex.Lock()
|
||||||
|
_, ok := w.relaySubs[topic]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
// SetPubSub is used to set an implementation of the pubsub system
|
// SetPubSub is used to set an implementation of the pubsub system
|
||||||
func (w *WakuRelay) SetPubSub(pubSub *pubsub.PubSub) {
|
func (w *WakuRelay) SetPubSub(pubSub *pubsub.PubSub) {
|
||||||
w.pubsub = pubSub
|
w.pubsub = pubSub
|
||||||
|
@ -150,17 +153,15 @@ func (w *WakuRelay) upsertTopic(topic string) (*pubsub.Topic, error) {
|
||||||
return pubSubTopic, nil
|
return pubSubTopic, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
func (w *WakuRelay) validatorFactory(pubsubTopic string) func(ctx context.Context, peerID peer.ID, message *pubsub.Message) bool {
|
func (w *WakuRelay) validatorFactory(pubsubTopic string) func(ctx context.Context, peerID peer.ID, message *pubsub.Message) bool {
|
||||||
return func(ctx context.Context, peerID peer.ID, message *pubsub.Message) bool {
|
return func(ctx context.Context, peerID peer.ID, message *pubsub.Message) bool {
|
||||||
msg := new(pb.WakuMessage)
|
msg := new(pb.WakuMessage)
|
||||||
err := proto.Unmarshal(message.Data, msg)
|
err := proto.Unmarshal(message.Data, msg)
|
||||||
if err != nil {
|
return err == nil
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
func (w *WakuRelay) subscribe(topic string) (subs *pubsub.Subscription, err error) {
|
func (w *WakuRelay) subscribe(topic string) (subs *pubsub.Subscription, err error) {
|
||||||
sub, ok := w.relaySubs[topic]
|
sub, ok := w.relaySubs[topic]
|
||||||
|
@ -170,17 +171,24 @@ func (w *WakuRelay) subscribe(topic string) (subs *pubsub.Subscription, err erro
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = w.pubsub.RegisterTopicValidator(topic, w.validatorFactory(topic))
|
/*
|
||||||
if err != nil {
|
// TODO: Add a function to validate the WakuMessage integrity
|
||||||
return nil, err
|
// Rejects messages that are not WakuMessage
|
||||||
}
|
err = w.pubsub.RegisterTopicValidator(topic, w.validatorFactory(topic))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
sub, err = pubSubTopic.Subscribe()
|
sub, err = pubSubTopic.Subscribe()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
w.relaySubs[topic] = sub
|
w.relaySubs[topic] = sub
|
||||||
|
if w.bcaster != nil {
|
||||||
|
w.wg.Add(1)
|
||||||
|
go w.subscribeToTopic(topic, sub)
|
||||||
|
}
|
||||||
w.log.Info("subscribing to topic", zap.String("topic", sub.Topic()))
|
w.log.Info("subscribing to topic", zap.String("topic", sub.Topic()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -220,7 +228,7 @@ func (w *WakuRelay) PublishToTopic(ctx context.Context, message *pb.WakuMessage,
|
||||||
|
|
||||||
hash := message.Hash(topic)
|
hash := message.Hash(topic)
|
||||||
|
|
||||||
w.log.Debug("waku.relay published", zap.String("hash", hex.EncodeToString(hash)))
|
w.log.Debug("waku.relay published", zap.String("pubsubTopic", topic), logging.HexString("hash", hash), zap.Int64("publishTime", w.timesource.Now().UnixNano()), zap.Int("payloadSizeBytes", len(message.Payload)))
|
||||||
|
|
||||||
return hash, nil
|
return hash, nil
|
||||||
}
|
}
|
||||||
|
@ -240,16 +248,6 @@ func (w *WakuRelay) Stop() {
|
||||||
|
|
||||||
w.cancel()
|
w.cancel()
|
||||||
w.wg.Wait()
|
w.wg.Wait()
|
||||||
|
|
||||||
w.subscriptionsMutex.Lock()
|
|
||||||
defer w.subscriptionsMutex.Unlock()
|
|
||||||
|
|
||||||
for _, topic := range w.Topics() {
|
|
||||||
for _, sub := range w.subscriptions[topic] {
|
|
||||||
sub.Unsubscribe()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
w.subscriptions = nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// EnoughPeersToPublish returns whether there are enough peers connected in the default waku pubsub topic
|
// EnoughPeersToPublish returns whether there are enough peers connected in the default waku pubsub topic
|
||||||
|
@ -264,30 +262,21 @@ func (w *WakuRelay) EnoughPeersToPublishToTopic(topic string) bool {
|
||||||
|
|
||||||
// SubscribeToTopic returns a Subscription to receive messages from a pubsub topic
|
// SubscribeToTopic returns a Subscription to receive messages from a pubsub topic
|
||||||
func (w *WakuRelay) SubscribeToTopic(ctx context.Context, topic string) (*Subscription, error) {
|
func (w *WakuRelay) SubscribeToTopic(ctx context.Context, topic string) (*Subscription, error) {
|
||||||
sub, err := w.subscribe(topic)
|
_, err := w.subscribe(topic)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create client subscription
|
// Create client subscription
|
||||||
subscription := new(Subscription)
|
subscription := NoopSubscription()
|
||||||
subscription.closed = false
|
|
||||||
subscription.C = make(chan *waku_proto.Envelope, 1024) // To avoid blocking
|
|
||||||
subscription.quit = make(chan struct{})
|
|
||||||
|
|
||||||
w.subscriptionsMutex.Lock()
|
|
||||||
defer w.subscriptionsMutex.Unlock()
|
|
||||||
|
|
||||||
w.subscriptions[topic] = append(w.subscriptions[topic], subscription)
|
|
||||||
|
|
||||||
if w.bcaster != nil {
|
if w.bcaster != nil {
|
||||||
w.bcaster.Register(&topic, subscription.C)
|
subscription = w.bcaster.Register(topic, 1024)
|
||||||
}
|
}
|
||||||
|
go func() {
|
||||||
w.wg.Add(1)
|
<-ctx.Done()
|
||||||
go w.subscribeToTopic(ctx, topic, subscription, sub)
|
subscription.Unsubscribe()
|
||||||
|
}()
|
||||||
return subscription, nil
|
return &subscription, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SubscribeToTopic returns a Subscription to receive messages from the default waku pubsub topic
|
// SubscribeToTopic returns a Subscription to receive messages from the default waku pubsub topic
|
||||||
|
@ -303,10 +292,6 @@ func (w *WakuRelay) Unsubscribe(ctx context.Context, topic string) error {
|
||||||
}
|
}
|
||||||
w.log.Info("unsubscribing from topic", zap.String("topic", sub.Topic()))
|
w.log.Info("unsubscribing from topic", zap.String("topic", sub.Topic()))
|
||||||
|
|
||||||
for _, sub := range w.subscriptions[topic] {
|
|
||||||
sub.Unsubscribe()
|
|
||||||
}
|
|
||||||
|
|
||||||
w.relaySubs[topic].Cancel()
|
w.relaySubs[topic].Cancel()
|
||||||
delete(w.relaySubs, topic)
|
delete(w.relaySubs, topic)
|
||||||
|
|
||||||
|
@ -321,34 +306,24 @@ func (w *WakuRelay) Unsubscribe(ctx context.Context, topic string) error {
|
||||||
|
|
||||||
func (w *WakuRelay) nextMessage(ctx context.Context, sub *pubsub.Subscription) <-chan *pubsub.Message {
|
func (w *WakuRelay) nextMessage(ctx context.Context, sub *pubsub.Subscription) <-chan *pubsub.Message {
|
||||||
msgChannel := make(chan *pubsub.Message, 1024)
|
msgChannel := make(chan *pubsub.Message, 1024)
|
||||||
go func(msgChannel chan *pubsub.Message) {
|
go func() {
|
||||||
defer func() {
|
defer close(msgChannel)
|
||||||
if r := recover(); r != nil {
|
|
||||||
w.log.Debug("recovered msgChannel")
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
for {
|
for {
|
||||||
msg, err := sub.Next(ctx)
|
msg, err := sub.Next(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !errors.Is(err, context.Canceled) {
|
if !errors.Is(err, context.Canceled) {
|
||||||
w.log.Error("getting message from subscription", zap.Error(err))
|
w.log.Error("getting message from subscription", zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
sub.Cancel()
|
sub.Cancel()
|
||||||
close(msgChannel)
|
return
|
||||||
for _, subscription := range w.subscriptions[sub.Topic()] {
|
|
||||||
subscription.Unsubscribe()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
msgChannel <- msg
|
msgChannel <- msg
|
||||||
}
|
}
|
||||||
}(msgChannel)
|
}()
|
||||||
return msgChannel
|
return msgChannel
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *WakuRelay) subscribeToTopic(userCtx context.Context, pubsubTopic string, subscription *Subscription, sub *pubsub.Subscription) {
|
func (w *WakuRelay) subscribeToTopic(pubsubTopic string, sub *pubsub.Subscription) {
|
||||||
defer w.wg.Done()
|
defer w.wg.Done()
|
||||||
|
|
||||||
ctx, err := tag.New(w.ctx, tag.Insert(metrics.KeyType, "relay"))
|
ctx, err := tag.New(w.ctx, tag.Insert(metrics.KeyType, "relay"))
|
||||||
|
@ -360,39 +335,25 @@ func (w *WakuRelay) subscribeToTopic(userCtx context.Context, pubsubTopic string
|
||||||
subChannel := w.nextMessage(w.ctx, sub)
|
subChannel := w.nextMessage(w.ctx, sub)
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-userCtx.Done():
|
|
||||||
return
|
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
case <-subscription.quit:
|
|
||||||
func(topic string) {
|
|
||||||
subscription.Lock()
|
|
||||||
defer subscription.Unlock()
|
|
||||||
|
|
||||||
if subscription.closed {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
subscription.closed = true
|
|
||||||
if w.bcaster != nil {
|
|
||||||
<-w.bcaster.WaitUnregister(&topic, subscription.C) // Remove from broadcast list
|
|
||||||
}
|
|
||||||
|
|
||||||
close(subscription.C)
|
|
||||||
}(pubsubTopic)
|
|
||||||
// TODO: if there are no more relay subscriptions, close the pubsub subscription
|
// TODO: if there are no more relay subscriptions, close the pubsub subscription
|
||||||
case msg := <-subChannel:
|
case msg, ok := <-subChannel:
|
||||||
if msg == nil {
|
if !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
stats.Record(ctx, metrics.Messages.M(1))
|
|
||||||
wakuMessage := &pb.WakuMessage{}
|
wakuMessage := &pb.WakuMessage{}
|
||||||
if err := proto.Unmarshal(msg.Data, wakuMessage); err != nil {
|
if err := proto.Unmarshal(msg.Data, wakuMessage); err != nil {
|
||||||
w.log.Error("decoding message", zap.Error(err))
|
w.log.Error("decoding message", zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
payloadSizeInBytes := len(wakuMessage.Payload)
|
||||||
|
payloadSizeInKb := payloadSizeInBytes / 1000
|
||||||
|
stats.Record(ctx, metrics.Messages.M(1), metrics.MessageSize.M(int64(payloadSizeInKb)))
|
||||||
|
|
||||||
envelope := waku_proto.NewEnvelope(wakuMessage, w.timesource.Now().UnixNano(), pubsubTopic)
|
envelope := waku_proto.NewEnvelope(wakuMessage, w.timesource.Now().UnixNano(), pubsubTopic)
|
||||||
w.log.Debug("waku.relay received", logging.HexString("hash", envelope.Hash()))
|
w.log.Debug("waku.relay received", zap.String("pubsubTopic", pubsubTopic), logging.HexString("hash", envelope.Hash()), zap.Int64("receivedTime", envelope.Index().ReceiverTime), zap.Int("payloadSizeBytes", payloadSizeInBytes))
|
||||||
|
|
||||||
if w.bcaster != nil {
|
if w.bcaster != nil {
|
||||||
w.bcaster.Submit(envelope)
|
w.bcaster.Submit(envelope)
|
||||||
|
|
|
@ -24,12 +24,6 @@ const MAX_EPOCH_GAP = int64(MAX_CLOCK_GAP_SECONDS / rln.EPOCH_UNIT_SECONDS)
|
||||||
// Acceptable roots for merkle root validation of incoming messages
|
// Acceptable roots for merkle root validation of incoming messages
|
||||||
const AcceptableRootWindowSize = 5
|
const AcceptableRootWindowSize = 5
|
||||||
|
|
||||||
type AppInfo struct {
|
|
||||||
Application string
|
|
||||||
AppIdentifier string
|
|
||||||
Version string
|
|
||||||
}
|
|
||||||
|
|
||||||
type RegistrationHandler = func(tx *types.Transaction)
|
type RegistrationHandler = func(tx *types.Transaction)
|
||||||
|
|
||||||
type SpamHandler = func(message *pb.WakuMessage) error
|
type SpamHandler = func(message *pb.WakuMessage) error
|
||||||
|
|
335
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/dynamic/dynamic.go
generated
vendored
Normal file
335
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/dynamic/dynamic.go
generated
vendored
Normal file
|
@ -0,0 +1,335 @@
|
||||||
|
package dynamic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"math/big"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/ethclient"
|
||||||
|
"github.com/waku-org/go-waku/waku/v2/protocol/rln/contracts"
|
||||||
|
"github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager"
|
||||||
|
"github.com/waku-org/go-waku/waku/v2/protocol/rln/keystore"
|
||||||
|
"github.com/waku-org/go-zerokit-rln/rln"
|
||||||
|
om "github.com/wk8/go-ordered-map"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
var RLNAppInfo = keystore.AppInfo{
|
||||||
|
Application: "go-waku-rln-relay",
|
||||||
|
AppIdentifier: "01234567890abcdef",
|
||||||
|
Version: "0.1",
|
||||||
|
}
|
||||||
|
|
||||||
|
type DynamicGroupManager struct {
|
||||||
|
rln *rln.RLN
|
||||||
|
log *zap.Logger
|
||||||
|
|
||||||
|
cancel context.CancelFunc
|
||||||
|
wg sync.WaitGroup
|
||||||
|
|
||||||
|
identityCredential *rln.IdentityCredential
|
||||||
|
membershipIndex *rln.MembershipIndex
|
||||||
|
|
||||||
|
membershipContractAddress common.Address
|
||||||
|
ethClientAddress string
|
||||||
|
ethClient *ethclient.Client
|
||||||
|
|
||||||
|
// ethAccountPrivateKey is required for signing transactions
|
||||||
|
// TODO may need to erase this ethAccountPrivateKey when is not used
|
||||||
|
// TODO may need to make ethAccountPrivateKey mandatory
|
||||||
|
ethAccountPrivateKey *ecdsa.PrivateKey
|
||||||
|
|
||||||
|
eventHandler RegistrationEventHandler
|
||||||
|
|
||||||
|
registrationHandler RegistrationHandler
|
||||||
|
chainId *big.Int
|
||||||
|
rlnContract *contracts.RLN
|
||||||
|
membershipFee *big.Int
|
||||||
|
|
||||||
|
saveKeystore bool
|
||||||
|
keystorePath string
|
||||||
|
keystorePassword string
|
||||||
|
|
||||||
|
rootTracker *group_manager.MerkleRootTracker
|
||||||
|
}
|
||||||
|
|
||||||
|
func handler(gm *DynamicGroupManager, events []*contracts.RLNMemberRegistered) error {
|
||||||
|
toRemoveTable := om.New()
|
||||||
|
toInsertTable := om.New()
|
||||||
|
for _, event := range events {
|
||||||
|
if event.Raw.Removed {
|
||||||
|
var indexes []uint64
|
||||||
|
i_idx, ok := toRemoveTable.Get(event.Raw.BlockNumber)
|
||||||
|
if ok {
|
||||||
|
indexes = i_idx.([]uint64)
|
||||||
|
}
|
||||||
|
indexes = append(indexes, event.Index.Uint64())
|
||||||
|
toRemoveTable.Set(event.Raw.BlockNumber, indexes)
|
||||||
|
} else {
|
||||||
|
var eventsPerBlock []*contracts.RLNMemberRegistered
|
||||||
|
i_evt, ok := toInsertTable.Get(event.Raw.BlockNumber)
|
||||||
|
if ok {
|
||||||
|
eventsPerBlock = i_evt.([]*contracts.RLNMemberRegistered)
|
||||||
|
}
|
||||||
|
eventsPerBlock = append(eventsPerBlock, event)
|
||||||
|
toInsertTable.Set(event.Raw.BlockNumber, eventsPerBlock)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err := gm.RemoveMembers(toRemoveTable)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = gm.InsertMembers(toInsertTable)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type RegistrationHandler = func(tx *types.Transaction)
|
||||||
|
|
||||||
|
func NewDynamicGroupManager(
|
||||||
|
ethClientAddr string,
|
||||||
|
ethAccountPrivateKey *ecdsa.PrivateKey,
|
||||||
|
memContractAddr common.Address,
|
||||||
|
keystorePath string,
|
||||||
|
keystorePassword string,
|
||||||
|
saveKeystore bool,
|
||||||
|
registrationHandler RegistrationHandler,
|
||||||
|
log *zap.Logger,
|
||||||
|
) (*DynamicGroupManager, error) {
|
||||||
|
log = log.Named("rln-dynamic")
|
||||||
|
|
||||||
|
path := keystorePath
|
||||||
|
if path == "" {
|
||||||
|
log.Warn("keystore: no credentials path set, using default path", zap.String("path", keystore.RLN_CREDENTIALS_FILENAME))
|
||||||
|
path = keystore.RLN_CREDENTIALS_FILENAME
|
||||||
|
}
|
||||||
|
|
||||||
|
password := keystorePassword
|
||||||
|
if password == "" {
|
||||||
|
log.Warn("keystore: no credentials password set, using default password", zap.String("password", keystore.RLN_CREDENTIALS_PASSWORD))
|
||||||
|
password = keystore.RLN_CREDENTIALS_PASSWORD
|
||||||
|
}
|
||||||
|
|
||||||
|
return &DynamicGroupManager{
|
||||||
|
membershipContractAddress: memContractAddr,
|
||||||
|
ethClientAddress: ethClientAddr,
|
||||||
|
ethAccountPrivateKey: ethAccountPrivateKey,
|
||||||
|
registrationHandler: registrationHandler,
|
||||||
|
eventHandler: handler,
|
||||||
|
saveKeystore: saveKeystore,
|
||||||
|
keystorePath: path,
|
||||||
|
keystorePassword: password,
|
||||||
|
log: log,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (gm *DynamicGroupManager) getMembershipFee(ctx context.Context) (*big.Int, error) {
|
||||||
|
auth, err := bind.NewKeyedTransactorWithChainID(gm.ethAccountPrivateKey, gm.chainId)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
auth.Context = ctx
|
||||||
|
|
||||||
|
return gm.rlnContract.MEMBERSHIPDEPOSIT(&bind.CallOpts{Context: ctx})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (gm *DynamicGroupManager) Start(ctx context.Context, rlnInstance *rln.RLN, rootTracker *group_manager.MerkleRootTracker) error {
|
||||||
|
if gm.cancel != nil {
|
||||||
|
return errors.New("already started")
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
gm.cancel = cancel
|
||||||
|
|
||||||
|
gm.log.Info("mounting rln-relay in on-chain/dynamic mode")
|
||||||
|
|
||||||
|
backend, err := ethclient.Dial(gm.ethClientAddress)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
gm.ethClient = backend
|
||||||
|
|
||||||
|
gm.rln = rlnInstance
|
||||||
|
gm.rootTracker = rootTracker
|
||||||
|
|
||||||
|
gm.chainId, err = backend.ChainID(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
gm.rlnContract, err = contracts.NewRLN(gm.membershipContractAddress, backend)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if the contract exists by calling a static function
|
||||||
|
gm.membershipFee, err = gm.getMembershipFee(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if gm.identityCredential == nil && gm.keystorePassword != "" && gm.keystorePath != "" {
|
||||||
|
credentials, err := keystore.GetMembershipCredentials(gm.log,
|
||||||
|
gm.keystorePath,
|
||||||
|
gm.keystorePassword,
|
||||||
|
RLNAppInfo,
|
||||||
|
nil,
|
||||||
|
[]keystore.MembershipContract{{
|
||||||
|
ChainId: gm.chainId.String(),
|
||||||
|
Address: gm.membershipContractAddress.Hex(),
|
||||||
|
}})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: accept an index from the config
|
||||||
|
if len(credentials) != 0 {
|
||||||
|
gm.identityCredential = &credentials[0].IdentityCredential
|
||||||
|
gm.membershipIndex = &credentials[0].MembershipGroups[0].TreeIndex
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if gm.identityCredential == nil && gm.ethAccountPrivateKey == nil {
|
||||||
|
return errors.New("either a credentials path or a private key must be specified")
|
||||||
|
}
|
||||||
|
|
||||||
|
// prepare rln membership key pair
|
||||||
|
if gm.identityCredential == nil && gm.ethAccountPrivateKey != nil {
|
||||||
|
gm.log.Info("no rln-relay key is provided, generating one")
|
||||||
|
identityCredential, err := rlnInstance.MembershipKeyGen()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
gm.identityCredential = identityCredential
|
||||||
|
|
||||||
|
// register the rln-relay peer to the membership contract
|
||||||
|
gm.membershipIndex, err = gm.Register(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = gm.persistCredentials()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
gm.log.Info("registered peer into the membership contract")
|
||||||
|
}
|
||||||
|
|
||||||
|
if gm.identityCredential == nil || gm.membershipIndex == nil {
|
||||||
|
return errors.New("no credentials available")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = gm.HandleGroupUpdates(ctx, gm.eventHandler); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (gm *DynamicGroupManager) persistCredentials() error {
|
||||||
|
if !gm.saveKeystore {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if gm.identityCredential == nil || gm.membershipIndex == nil {
|
||||||
|
return errors.New("no credentials to persist")
|
||||||
|
}
|
||||||
|
|
||||||
|
keystoreCred := keystore.MembershipCredentials{
|
||||||
|
IdentityCredential: *gm.identityCredential,
|
||||||
|
MembershipGroups: []keystore.MembershipGroup{{
|
||||||
|
TreeIndex: *gm.membershipIndex,
|
||||||
|
MembershipContract: keystore.MembershipContract{
|
||||||
|
ChainId: gm.chainId.String(),
|
||||||
|
Address: gm.membershipContractAddress.String(),
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
|
||||||
|
err := keystore.AddMembershipCredentials(gm.keystorePath, []keystore.MembershipCredentials{keystoreCred}, gm.keystorePassword, RLNAppInfo, keystore.DefaultSeparator)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to persist credentials: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (gm *DynamicGroupManager) InsertMembers(toInsert *om.OrderedMap) error {
|
||||||
|
for pair := toInsert.Oldest(); pair != nil; pair = pair.Next() {
|
||||||
|
events := pair.Value.([]*contracts.RLNMemberRegistered) // TODO: should these be sortered by index? we assume all members arrive in order
|
||||||
|
for _, evt := range events {
|
||||||
|
pubkey := rln.Bytes32(evt.Pubkey.Bytes())
|
||||||
|
// TODO: should we track indexes to identify missing?
|
||||||
|
err := gm.rln.InsertMember(pubkey)
|
||||||
|
if err != nil {
|
||||||
|
gm.log.Error("inserting member into merkletree", zap.Error(err))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := gm.rootTracker.UpdateLatestRoot(pair.Key.(uint64))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (gm *DynamicGroupManager) RemoveMembers(toRemove *om.OrderedMap) error {
|
||||||
|
for pair := toRemove.Newest(); pair != nil; pair = pair.Prev() {
|
||||||
|
memberIndexes := pair.Value.([]uint64)
|
||||||
|
for _, index := range memberIndexes {
|
||||||
|
err := gm.rln.DeleteMember(uint(index))
|
||||||
|
if err != nil {
|
||||||
|
gm.log.Error("deleting member", zap.Error(err))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
gm.rootTracker.Backfill(pair.Key.(uint64))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (gm *DynamicGroupManager) IdentityCredentials() (rln.IdentityCredential, error) {
|
||||||
|
if gm.identityCredential == nil {
|
||||||
|
return rln.IdentityCredential{}, errors.New("identity credential has not been setup")
|
||||||
|
}
|
||||||
|
|
||||||
|
return *gm.identityCredential, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (gm *DynamicGroupManager) SetCredentials(identityCredential *rln.IdentityCredential, index *rln.MembershipIndex) {
|
||||||
|
gm.identityCredential = identityCredential
|
||||||
|
gm.membershipIndex = index
|
||||||
|
}
|
||||||
|
|
||||||
|
func (gm *DynamicGroupManager) MembershipIndex() (rln.MembershipIndex, error) {
|
||||||
|
if gm.membershipIndex == nil {
|
||||||
|
return 0, errors.New("membership index has not been setup")
|
||||||
|
}
|
||||||
|
|
||||||
|
return *gm.membershipIndex, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (gm *DynamicGroupManager) Stop() {
|
||||||
|
if gm.cancel == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
gm.cancel()
|
||||||
|
gm.wg.Wait()
|
||||||
|
}
|
259
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/dynamic/web3.go
generated
vendored
Normal file
259
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/dynamic/web3.go
generated
vendored
Normal file
|
@ -0,0 +1,259 @@
|
||||||
|
package dynamic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"errors"
|
||||||
|
"math/big"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/ethclient"
|
||||||
|
"github.com/ethereum/go-ethereum/event"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
|
"github.com/waku-org/go-waku/waku/v2/protocol/rln/contracts"
|
||||||
|
r "github.com/waku-org/go-zerokit-rln/rln"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
func ToBigInt(i []byte) *big.Int {
|
||||||
|
result := new(big.Int)
|
||||||
|
result.SetBytes(i[:])
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func register(ctx context.Context, backend *ethclient.Client, membershipFee *big.Int, idComm r.IDCommitment, ethAccountPrivateKey *ecdsa.PrivateKey, rlnContract *contracts.RLN, chainID *big.Int, registrationHandler RegistrationHandler, log *zap.Logger) (*r.MembershipIndex, error) {
|
||||||
|
auth, err := bind.NewKeyedTransactorWithChainID(ethAccountPrivateKey, chainID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
auth.Value = membershipFee
|
||||||
|
auth.Context = ctx
|
||||||
|
|
||||||
|
log.Debug("registering an id commitment", zap.Binary("idComm", idComm[:]))
|
||||||
|
|
||||||
|
// registers the idComm into the membership contract whose address is in rlnPeer.membershipContractAddress
|
||||||
|
tx, err := rlnContract.Register(auth, ToBigInt(idComm[:]))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("transaction broadcasted", zap.String("transactionHash", tx.Hash().Hex()))
|
||||||
|
|
||||||
|
if registrationHandler != nil {
|
||||||
|
registrationHandler(tx)
|
||||||
|
}
|
||||||
|
|
||||||
|
txReceipt, err := bind.WaitMined(ctx, backend, tx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if txReceipt.Status != types.ReceiptStatusSuccessful {
|
||||||
|
return nil, errors.New("transaction reverted")
|
||||||
|
}
|
||||||
|
|
||||||
|
// the receipt topic holds the hash of signature of the raised events
|
||||||
|
evt, err := rlnContract.ParseMemberRegistered(*txReceipt.Logs[0])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var eventIdComm r.IDCommitment = r.Bytes32(evt.Pubkey.Bytes())
|
||||||
|
|
||||||
|
log.Debug("the identity commitment key extracted from tx log", zap.Binary("eventIdComm", eventIdComm[:]))
|
||||||
|
|
||||||
|
if eventIdComm != idComm {
|
||||||
|
return nil, errors.New("invalid id commitment key")
|
||||||
|
}
|
||||||
|
|
||||||
|
result := new(r.MembershipIndex)
|
||||||
|
*result = r.MembershipIndex(uint(evt.Index.Int64()))
|
||||||
|
|
||||||
|
// debug "the index of registered identity commitment key", eventIndex=eventIndex
|
||||||
|
|
||||||
|
log.Debug("the index of registered identity commitment key", zap.Uint("eventIndex", uint(*result)))
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register registers the public key of the rlnPeer which is rlnPeer.membershipKeyPair.publicKey
|
||||||
|
// into the membership contract whose address is in rlnPeer.membershipContractAddress
|
||||||
|
func (gm *DynamicGroupManager) Register(ctx context.Context) (*r.MembershipIndex, error) {
|
||||||
|
return register(ctx,
|
||||||
|
gm.ethClient,
|
||||||
|
gm.membershipFee,
|
||||||
|
gm.identityCredential.IDCommitment,
|
||||||
|
gm.ethAccountPrivateKey,
|
||||||
|
gm.rlnContract,
|
||||||
|
gm.chainId,
|
||||||
|
gm.registrationHandler,
|
||||||
|
gm.log)
|
||||||
|
}
|
||||||
|
|
||||||
|
// the types of inputs to this handler matches the MemberRegistered event/proc defined in the MembershipContract interface
|
||||||
|
type RegistrationEventHandler = func(*DynamicGroupManager, []*contracts.RLNMemberRegistered) error
|
||||||
|
|
||||||
|
// HandleGroupUpdates mounts the supplied handler for the registration events emitting from the membership contract
|
||||||
|
// It connects to the eth client, subscribes to the `MemberRegistered` event emitted from the `MembershipContract`
|
||||||
|
// and collects all the events, for every received event, it calls the `handler`
|
||||||
|
func (gm *DynamicGroupManager) HandleGroupUpdates(ctx context.Context, handler RegistrationEventHandler) error {
|
||||||
|
err := gm.loadOldEvents(ctx, gm.rlnContract, handler)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
errCh := make(chan error)
|
||||||
|
|
||||||
|
gm.wg.Add(1)
|
||||||
|
go gm.watchNewEvents(ctx, gm.rlnContract, handler, gm.log, errCh)
|
||||||
|
return <-errCh
|
||||||
|
}
|
||||||
|
|
||||||
|
func (gm *DynamicGroupManager) loadOldEvents(ctx context.Context, rlnContract *contracts.RLN, handler RegistrationEventHandler) error {
|
||||||
|
events, err := gm.getEvents(ctx, 0, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return handler(gm, events)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (gm *DynamicGroupManager) watchNewEvents(ctx context.Context, rlnContract *contracts.RLN, handler RegistrationEventHandler, log *zap.Logger, errCh chan<- error) {
|
||||||
|
defer gm.wg.Done()
|
||||||
|
|
||||||
|
// Watch for new events
|
||||||
|
firstErr := true
|
||||||
|
headerCh := make(chan *types.Header)
|
||||||
|
subs := event.Resubscribe(2*time.Second, func(ctx context.Context) (event.Subscription, error) {
|
||||||
|
s, err := gm.ethClient.SubscribeNewHead(ctx, headerCh)
|
||||||
|
if err != nil {
|
||||||
|
if err == rpc.ErrNotificationsUnsupported {
|
||||||
|
err = errors.New("notifications not supported. The node must support websockets")
|
||||||
|
}
|
||||||
|
if firstErr {
|
||||||
|
errCh <- err
|
||||||
|
}
|
||||||
|
gm.log.Error("subscribing to rln events", zap.Error(err))
|
||||||
|
}
|
||||||
|
firstErr = false
|
||||||
|
close(errCh)
|
||||||
|
return s, err
|
||||||
|
})
|
||||||
|
|
||||||
|
defer subs.Unsubscribe()
|
||||||
|
defer close(headerCh)
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case h := <-headerCh:
|
||||||
|
blk := h.Number.Uint64()
|
||||||
|
events, err := gm.getEvents(ctx, blk, &blk)
|
||||||
|
if err != nil {
|
||||||
|
gm.log.Error("obtaining rln events", zap.Error(err))
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
err = handler(gm, events)
|
||||||
|
if err != nil {
|
||||||
|
gm.log.Error("processing rln log", zap.Error(err))
|
||||||
|
}
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case err := <-subs.Err():
|
||||||
|
if err != nil {
|
||||||
|
gm.log.Error("watching new events", zap.Error(err))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const maxBatchSize = uint64(5000000) // TODO: tune this
|
||||||
|
const additiveFactorMultiplier = 0.10
|
||||||
|
const multiplicativeDecreaseDivisor = 2
|
||||||
|
|
||||||
|
func tooMuchDataRequestedError(err error) bool {
|
||||||
|
// this error is only infura specific (other providers might have different error messages)
|
||||||
|
return err.Error() == "query returned more than 10000 results"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (gm *DynamicGroupManager) getEvents(ctx context.Context, from uint64, to *uint64) ([]*contracts.RLNMemberRegistered, error) {
|
||||||
|
var results []*contracts.RLNMemberRegistered
|
||||||
|
|
||||||
|
// Adapted from prysm logic for fetching historical logs
|
||||||
|
|
||||||
|
toBlock := to
|
||||||
|
if to == nil {
|
||||||
|
block, err := gm.ethClient.BlockByNumber(ctx, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
blockNumber := block.Number().Uint64()
|
||||||
|
toBlock = &blockNumber
|
||||||
|
}
|
||||||
|
|
||||||
|
batchSize := maxBatchSize
|
||||||
|
additiveFactor := uint64(float64(batchSize) * additiveFactorMultiplier)
|
||||||
|
|
||||||
|
currentBlockNum := from
|
||||||
|
for currentBlockNum < *toBlock {
|
||||||
|
start := currentBlockNum
|
||||||
|
end := currentBlockNum + batchSize
|
||||||
|
if end > *toBlock {
|
||||||
|
end = *toBlock
|
||||||
|
}
|
||||||
|
|
||||||
|
evts, err := gm.fetchEvents(ctx, start, &end)
|
||||||
|
if err != nil {
|
||||||
|
if tooMuchDataRequestedError(err) {
|
||||||
|
if batchSize == 0 {
|
||||||
|
return nil, errors.New("batch size is zero")
|
||||||
|
}
|
||||||
|
|
||||||
|
// multiplicative decrease
|
||||||
|
batchSize = batchSize / multiplicativeDecreaseDivisor
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
results = append(results, evts...)
|
||||||
|
|
||||||
|
currentBlockNum = end
|
||||||
|
|
||||||
|
if batchSize < maxBatchSize {
|
||||||
|
// update the batchSize with additive increase
|
||||||
|
batchSize = batchSize + additiveFactor
|
||||||
|
if batchSize > maxBatchSize {
|
||||||
|
batchSize = maxBatchSize
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return results, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (gm *DynamicGroupManager) fetchEvents(ctx context.Context, from uint64, to *uint64) ([]*contracts.RLNMemberRegistered, error) {
|
||||||
|
logIterator, err := gm.rlnContract.FilterMemberRegistered(&bind.FilterOpts{Start: from, End: to, Context: ctx})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var results []*contracts.RLNMemberRegistered
|
||||||
|
|
||||||
|
for {
|
||||||
|
if !logIterator.Next() {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if logIterator.Error() != nil {
|
||||||
|
return nil, logIterator.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
results = append(results, logIterator.Event)
|
||||||
|
}
|
||||||
|
|
||||||
|
return results, nil
|
||||||
|
}
|
127
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/root_tracker.go
generated
vendored
127
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/root_tracker.go
generated
vendored
|
@ -1,34 +1,133 @@
|
||||||
package group_manager
|
package group_manager
|
||||||
|
|
||||||
import "github.com/waku-org/go-zerokit-rln/rln"
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
type MerkleRootTracker struct {
|
"github.com/waku-org/go-zerokit-rln/rln"
|
||||||
rln *rln.RLN
|
)
|
||||||
acceptableRootWindowSize int
|
|
||||||
validMerkleRoots []rln.MerkleNode
|
type RootsPerBlock struct {
|
||||||
|
root rln.MerkleNode
|
||||||
|
blockNumber uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewMerkleRootTracker(acceptableRootWindowSize int, rlnInstance *rln.RLN) *MerkleRootTracker {
|
type MerkleRootTracker struct {
|
||||||
return &MerkleRootTracker{
|
sync.RWMutex
|
||||||
|
|
||||||
|
rln *rln.RLN
|
||||||
|
acceptableRootWindowSize int
|
||||||
|
validMerkleRoots []RootsPerBlock
|
||||||
|
merkleRootBuffer []RootsPerBlock
|
||||||
|
}
|
||||||
|
|
||||||
|
const maxBufferSize = 20
|
||||||
|
|
||||||
|
func NewMerkleRootTracker(acceptableRootWindowSize int, rlnInstance *rln.RLN) (*MerkleRootTracker, error) {
|
||||||
|
result := &MerkleRootTracker{
|
||||||
acceptableRootWindowSize: acceptableRootWindowSize,
|
acceptableRootWindowSize: acceptableRootWindowSize,
|
||||||
rln: rlnInstance,
|
rln: rlnInstance,
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MerkleRootTracker) Sync() error {
|
_, err := result.UpdateLatestRoot(0)
|
||||||
root, err := m.rln.GetMerkleRoot()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
m.validMerkleRoots = append(m.validMerkleRoots, root)
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MerkleRootTracker) Backfill(fromBlockNumber uint64) {
|
||||||
|
m.Lock()
|
||||||
|
defer m.Unlock()
|
||||||
|
|
||||||
|
numBlocks := 0
|
||||||
|
for i := len(m.validMerkleRoots) - 1; i >= 0; i-- {
|
||||||
|
if m.validMerkleRoots[i].blockNumber >= fromBlockNumber {
|
||||||
|
numBlocks++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if numBlocks == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove last roots
|
||||||
|
rootsToPop := numBlocks
|
||||||
|
if len(m.validMerkleRoots) < rootsToPop {
|
||||||
|
rootsToPop = len(m.validMerkleRoots)
|
||||||
|
}
|
||||||
|
m.validMerkleRoots = m.validMerkleRoots[0 : len(m.validMerkleRoots)-rootsToPop]
|
||||||
|
|
||||||
|
if len(m.merkleRootBuffer) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Backfill the tree's acceptable roots
|
||||||
|
rootsToRestore := numBlocks
|
||||||
|
bufferLen := len(m.merkleRootBuffer)
|
||||||
|
if bufferLen < rootsToRestore {
|
||||||
|
rootsToRestore = bufferLen
|
||||||
|
}
|
||||||
|
for i := 0; i < rootsToRestore; i++ {
|
||||||
|
x, newRootBuffer := m.merkleRootBuffer[len(m.merkleRootBuffer)-1], m.merkleRootBuffer[:len(m.merkleRootBuffer)-1] // Pop
|
||||||
|
m.validMerkleRoots = append([]RootsPerBlock{x}, m.validMerkleRoots...)
|
||||||
|
m.merkleRootBuffer = newRootBuffer
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MerkleRootTracker) UpdateLatestRoot(blockNumber uint64) (rln.MerkleNode, error) {
|
||||||
|
m.Lock()
|
||||||
|
defer m.Unlock()
|
||||||
|
|
||||||
|
root, err := m.rln.GetMerkleRoot()
|
||||||
|
if err != nil {
|
||||||
|
return [32]byte{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
m.pushRoot(blockNumber, root)
|
||||||
|
|
||||||
|
return root, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MerkleRootTracker) pushRoot(blockNumber uint64, root [32]byte) {
|
||||||
|
m.validMerkleRoots = append(m.validMerkleRoots, RootsPerBlock{
|
||||||
|
root: root,
|
||||||
|
blockNumber: blockNumber,
|
||||||
|
})
|
||||||
|
|
||||||
|
// Maintain valid merkle root window
|
||||||
if len(m.validMerkleRoots) > m.acceptableRootWindowSize {
|
if len(m.validMerkleRoots) > m.acceptableRootWindowSize {
|
||||||
|
m.merkleRootBuffer = append(m.merkleRootBuffer, m.validMerkleRoots[0])
|
||||||
m.validMerkleRoots = m.validMerkleRoots[1:]
|
m.validMerkleRoots = m.validMerkleRoots[1:]
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
// Maintain merkle root buffer
|
||||||
|
if len(m.merkleRootBuffer) > maxBufferSize {
|
||||||
|
m.merkleRootBuffer = m.merkleRootBuffer[1:]
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MerkleRootTracker) Roots() []rln.MerkleNode {
|
func (m *MerkleRootTracker) Roots() []rln.MerkleNode {
|
||||||
return m.validMerkleRoots
|
m.RLock()
|
||||||
|
defer m.RUnlock()
|
||||||
|
|
||||||
|
result := make([]rln.MerkleNode, len(m.validMerkleRoots))
|
||||||
|
for i := range m.validMerkleRoots {
|
||||||
|
result[i] = m.validMerkleRoots[i].root
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MerkleRootTracker) Buffer() []rln.MerkleNode {
|
||||||
|
m.RLock()
|
||||||
|
defer m.RUnlock()
|
||||||
|
|
||||||
|
result := make([]rln.MerkleNode, len(m.merkleRootBuffer))
|
||||||
|
for i := range m.merkleRootBuffer {
|
||||||
|
result[i] = m.merkleRootBuffer[i].root
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
}
|
}
|
||||||
|
|
20
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/static/static.go
generated
vendored
20
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/static/static.go
generated
vendored
|
@ -45,18 +45,9 @@ func (gm *StaticGroupManager) Start(ctx context.Context, rlnInstance *rln.RLN, r
|
||||||
gm.rln = rlnInstance
|
gm.rln = rlnInstance
|
||||||
gm.rootTracker = rootTracker
|
gm.rootTracker = rootTracker
|
||||||
|
|
||||||
err := rootTracker.Sync()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// add members to the Merkle tree
|
// add members to the Merkle tree
|
||||||
for _, member := range gm.group {
|
for i, member := range gm.group {
|
||||||
if err := rlnInstance.InsertMember(member); err != nil {
|
err := gm.insertMember(member, uint64(i+1))
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = rootTracker.Sync()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -67,8 +58,9 @@ func (gm *StaticGroupManager) Start(ctx context.Context, rlnInstance *rln.RLN, r
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (gm *StaticGroupManager) InsertMember(pubkey rln.IDCommitment) error {
|
func (gm *StaticGroupManager) insertMember(pubkey rln.IDCommitment, index uint64) error {
|
||||||
gm.log.Debug("a new key is added", zap.Binary("pubkey", pubkey[:]))
|
gm.log.Debug("a new key is added", zap.Binary("pubkey", pubkey[:]), zap.Uint64("index", index))
|
||||||
|
|
||||||
// assuming all the members arrive in order
|
// assuming all the members arrive in order
|
||||||
err := gm.rln.InsertMember(pubkey)
|
err := gm.rln.InsertMember(pubkey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -76,7 +68,7 @@ func (gm *StaticGroupManager) InsertMember(pubkey rln.IDCommitment) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = gm.rootTracker.Sync()
|
_, err = gm.rootTracker.UpdateLatestRoot(index)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
346
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/keystore/keystore.go
generated
vendored
Normal file
346
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/keystore/keystore.go
generated
vendored
Normal file
|
@ -0,0 +1,346 @@
|
||||||
|
package keystore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||||
|
"github.com/waku-org/go-zerokit-rln/rln"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
const RLN_CREDENTIALS_FILENAME = "rlnCredentials.json"
|
||||||
|
const RLN_CREDENTIALS_PASSWORD = "password"
|
||||||
|
|
||||||
|
type MembershipContract struct {
|
||||||
|
ChainId string `json:"chainId"`
|
||||||
|
Address string `json:"address"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type MembershipGroup struct {
|
||||||
|
MembershipContract MembershipContract `json:"membershipContract"`
|
||||||
|
TreeIndex rln.MembershipIndex `json:"treeIndex"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type MembershipCredentials struct {
|
||||||
|
IdentityCredential rln.IdentityCredential `json:"identityCredential"`
|
||||||
|
MembershipGroups []MembershipGroup `json:"membershipGroups"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type AppInfo struct {
|
||||||
|
Application string `json:"application"`
|
||||||
|
AppIdentifier string `json:"appIdentifier"`
|
||||||
|
Version string `json:"version"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type AppKeystore struct {
|
||||||
|
Application string `json:"application"`
|
||||||
|
AppIdentifier string `json:"appIdentifier"`
|
||||||
|
Credentials []keystore.CryptoJSON `json:"credentials"`
|
||||||
|
Version string `json:"version"`
|
||||||
|
}
|
||||||
|
|
||||||
|
const DefaultSeparator = "\n"
|
||||||
|
|
||||||
|
func (m MembershipCredentials) Equals(other MembershipCredentials) bool {
|
||||||
|
if !rln.IdentityCredentialEquals(m.IdentityCredential, other.IdentityCredential) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, x := range m.MembershipGroups {
|
||||||
|
found := false
|
||||||
|
for _, y := range other.MembershipGroups {
|
||||||
|
if x.Equals(y) {
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m MembershipGroup) Equals(other MembershipGroup) bool {
|
||||||
|
return m.MembershipContract.Equals(other.MembershipContract) && m.TreeIndex == other.TreeIndex
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m MembershipContract) Equals(other MembershipContract) bool {
|
||||||
|
return m.Address == other.Address && m.ChainId == other.ChainId
|
||||||
|
}
|
||||||
|
|
||||||
|
func CreateAppKeystore(path string, appInfo AppInfo, separator string) error {
|
||||||
|
if separator == "" {
|
||||||
|
separator = DefaultSeparator
|
||||||
|
}
|
||||||
|
|
||||||
|
keystore := AppKeystore{
|
||||||
|
Application: appInfo.Application,
|
||||||
|
AppIdentifier: appInfo.AppIdentifier,
|
||||||
|
Version: appInfo.Version,
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := json.Marshal(keystore)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
b = append(b, []byte(separator)...)
|
||||||
|
|
||||||
|
buffer := new(bytes.Buffer)
|
||||||
|
|
||||||
|
err = json.Compact(buffer, b)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ioutil.WriteFile(path, buffer.Bytes(), 0600)
|
||||||
|
}
|
||||||
|
|
||||||
|
func LoadAppKeystore(path string, appInfo AppInfo, separator string) (AppKeystore, error) {
|
||||||
|
if separator == "" {
|
||||||
|
separator = DefaultSeparator
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := os.Stat(path)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
// If no keystore exists at path we create a new empty one with passed keystore parameters
|
||||||
|
err = CreateAppKeystore(path, appInfo, separator)
|
||||||
|
if err != nil {
|
||||||
|
return AppKeystore{}, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return AppKeystore{}, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
src, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return AppKeystore{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, keystoreBytes := range bytes.Split(src, []byte(separator)) {
|
||||||
|
if len(keystoreBytes) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
keystore := AppKeystore{}
|
||||||
|
err := json.Unmarshal(keystoreBytes, &keystore)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if keystore.AppIdentifier == appInfo.AppIdentifier && keystore.Application == appInfo.Application && keystore.Version == appInfo.Version {
|
||||||
|
return keystore, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return AppKeystore{}, errors.New("no keystore found")
|
||||||
|
}
|
||||||
|
|
||||||
|
func filterCredential(credential MembershipCredentials, filterIdentityCredentials []MembershipCredentials, filterMembershipContracts []MembershipContract) *MembershipCredentials {
|
||||||
|
if len(filterIdentityCredentials) != 0 {
|
||||||
|
found := false
|
||||||
|
for _, filterCreds := range filterIdentityCredentials {
|
||||||
|
if filterCreds.Equals(credential) {
|
||||||
|
found = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(filterMembershipContracts) != 0 {
|
||||||
|
var membershipGroupsIntersection []MembershipGroup
|
||||||
|
for _, filterContract := range filterMembershipContracts {
|
||||||
|
for _, credentialGroups := range credential.MembershipGroups {
|
||||||
|
if filterContract.Equals(credentialGroups.MembershipContract) {
|
||||||
|
membershipGroupsIntersection = append(membershipGroupsIntersection, credentialGroups)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(membershipGroupsIntersection) != 0 {
|
||||||
|
// If we have a match on some groups, we return the credential with filtered groups
|
||||||
|
return &MembershipCredentials{
|
||||||
|
IdentityCredential: credential.IdentityCredential,
|
||||||
|
MembershipGroups: membershipGroupsIntersection,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We hit this return only if
|
||||||
|
// - filterIdentityCredentials.len() == 0 and filterMembershipContracts.len() == 0 (no filter)
|
||||||
|
// - filterIdentityCredentials.len() != 0 and filterMembershipContracts.len() == 0 (filter only on identity credential)
|
||||||
|
// Indeed, filterMembershipContracts.len() != 0 will have its exclusive return based on all values of membershipGroupsIntersection.len()
|
||||||
|
return &credential
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetMembershipCredentials(logger *zap.Logger, credentialsPath string, password string, appInfo AppInfo, filterIdentityCredentials []MembershipCredentials, filterMembershipContracts []MembershipContract) ([]MembershipCredentials, error) {
|
||||||
|
k, err := LoadAppKeystore(credentialsPath, appInfo, DefaultSeparator)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var result []MembershipCredentials
|
||||||
|
|
||||||
|
for _, credential := range k.Credentials {
|
||||||
|
credentialsBytes, err := keystore.DecryptDataV3(credential, password)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var credentials MembershipCredentials
|
||||||
|
err = json.Unmarshal(credentialsBytes, &credentials)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
filteredCredential := filterCredential(credentials, filterIdentityCredentials, filterMembershipContracts)
|
||||||
|
if filteredCredential != nil {
|
||||||
|
result = append(result, *filteredCredential)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adds a sequence of membership credential to the keystore matching the application, appIdentifier and version filters.
|
||||||
|
func AddMembershipCredentials(path string, credentials []MembershipCredentials, password string, appInfo AppInfo, separator string) error {
|
||||||
|
k, err := LoadAppKeystore(path, appInfo, DefaultSeparator)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var credentialsToAdd []MembershipCredentials
|
||||||
|
for _, newCredential := range credentials {
|
||||||
|
// A flag to tell us if the keystore contains a credential associated to the input identity credential, i.e. membershipCredential
|
||||||
|
found := -1
|
||||||
|
for i, existingCredentials := range k.Credentials {
|
||||||
|
credentialsBytes, err := keystore.DecryptDataV3(existingCredentials, password)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var credentials MembershipCredentials
|
||||||
|
err = json.Unmarshal(credentialsBytes, &credentials)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if rln.IdentityCredentialEquals(credentials.IdentityCredential, newCredential.IdentityCredential) {
|
||||||
|
// idCredential is present in keystore. We add the input credential membership group to the one contained in the decrypted keystore credential (we deduplicate groups using sets)
|
||||||
|
allMemberships := append(credentials.MembershipGroups, newCredential.MembershipGroups...)
|
||||||
|
|
||||||
|
// we define the updated credential with the updated membership sets
|
||||||
|
updatedCredential := MembershipCredentials{
|
||||||
|
IdentityCredential: newCredential.IdentityCredential,
|
||||||
|
MembershipGroups: allMemberships,
|
||||||
|
}
|
||||||
|
|
||||||
|
// we re-encrypt creating a new keyfile
|
||||||
|
b, err := json.Marshal(updatedCredential)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
encryptedCredentials, err := keystore.EncryptDataV3(b, []byte(password), keystore.StandardScryptN, keystore.StandardScryptP)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// we update the original credential field in keystoreCredentials
|
||||||
|
k.Credentials[i] = encryptedCredentials
|
||||||
|
|
||||||
|
found = i
|
||||||
|
|
||||||
|
// We stop decrypting other credentials in the keystore
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if found == -1 {
|
||||||
|
credentialsToAdd = append(credentialsToAdd, newCredential)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, c := range credentialsToAdd {
|
||||||
|
b, err := json.Marshal(c)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
encryptedCredentials, err := keystore.EncryptDataV3(b, []byte(password), keystore.StandardScryptN, keystore.StandardScryptP)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
k.Credentials = append(k.Credentials, encryptedCredentials)
|
||||||
|
}
|
||||||
|
|
||||||
|
return save(k, path, separator)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Safely saves a Keystore's JsonNode to disk.
|
||||||
|
// If exists, the destination file is renamed with extension .bkp; the file is written at its destination and the .bkp file is removed if write is successful, otherwise is restored
|
||||||
|
func save(keystore AppKeystore, path string, separator string) error {
|
||||||
|
// We first backup the current keystore
|
||||||
|
_, err := os.Stat(path)
|
||||||
|
if err == nil {
|
||||||
|
err := os.Rename(path, path+".bkp")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if separator == "" {
|
||||||
|
separator = DefaultSeparator
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := json.Marshal(keystore)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
b = append(b, []byte(separator)...)
|
||||||
|
|
||||||
|
buffer := new(bytes.Buffer)
|
||||||
|
|
||||||
|
err = json.Compact(buffer, b)
|
||||||
|
if err != nil {
|
||||||
|
restoreErr := os.Rename(path, path+".bkp")
|
||||||
|
if restoreErr != nil {
|
||||||
|
return fmt.Errorf("could not restore backup file: %w", restoreErr)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = ioutil.WriteFile(path, buffer.Bytes(), 0600)
|
||||||
|
if err != nil {
|
||||||
|
restoreErr := os.Rename(path, path+".bkp")
|
||||||
|
if restoreErr != nil {
|
||||||
|
return fmt.Errorf("could not restore backup file: %w", restoreErr)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// The write went fine, so we can remove the backup keystore
|
||||||
|
_, err = os.Stat(path + ".bkp")
|
||||||
|
if err == nil {
|
||||||
|
err := os.Remove(path + ".bkp")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -1,175 +0,0 @@
|
||||||
package rln
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/ecdsa"
|
|
||||||
"errors"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
|
||||||
"github.com/waku-org/go-waku/waku/v2/timesource"
|
|
||||||
r "github.com/waku-org/go-zerokit-rln/rln"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
func RlnRelayStatic(
|
|
||||||
ctx context.Context,
|
|
||||||
relay *relay.WakuRelay,
|
|
||||||
group []r.IDCommitment,
|
|
||||||
memKeyPair r.IdentityCredential,
|
|
||||||
memIndex r.MembershipIndex,
|
|
||||||
pubsubTopic string,
|
|
||||||
contentTopic string,
|
|
||||||
spamHandler SpamHandler,
|
|
||||||
timesource timesource.Timesource,
|
|
||||||
log *zap.Logger,
|
|
||||||
) (*WakuRLNRelay, error) {
|
|
||||||
log = log.Named("rln-static")
|
|
||||||
|
|
||||||
log.Info("mounting rln-relay in off-chain/static mode")
|
|
||||||
|
|
||||||
// check the peer's index and the inclusion of user's identity commitment in the group
|
|
||||||
if memKeyPair.IDCommitment != group[int(memIndex)] {
|
|
||||||
return nil, errors.New("peer's IDCommitment does not match commitment in group")
|
|
||||||
}
|
|
||||||
|
|
||||||
rlnInstance, err := r.NewRLN()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// create the WakuRLNRelay
|
|
||||||
rlnPeer := &WakuRLNRelay{
|
|
||||||
ctx: ctx,
|
|
||||||
membershipKeyPair: &memKeyPair,
|
|
||||||
membershipIndex: memIndex,
|
|
||||||
RLN: rlnInstance,
|
|
||||||
pubsubTopic: pubsubTopic,
|
|
||||||
contentTopic: contentTopic,
|
|
||||||
log: log,
|
|
||||||
timesource: timesource,
|
|
||||||
nullifierLog: make(map[r.Nullifier][]r.ProofMetadata),
|
|
||||||
}
|
|
||||||
|
|
||||||
root, err := rlnPeer.RLN.GetMerkleRoot()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
rlnPeer.validMerkleRoots = append(rlnPeer.validMerkleRoots, root)
|
|
||||||
|
|
||||||
// add members to the Merkle tree
|
|
||||||
for _, member := range group {
|
|
||||||
if err := rlnPeer.insertMember(member); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// adds a topic validator for the supplied pubsub topic at the relay protocol
|
|
||||||
// messages published on this pubsub topic will be relayed upon a successful validation, otherwise they will be dropped
|
|
||||||
// the topic validator checks for the correct non-spamming proof of the message
|
|
||||||
err = rlnPeer.addValidator(relay, pubsubTopic, contentTopic, spamHandler)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Info("rln relay topic validator mounted", zap.String("pubsubTopic", pubsubTopic), zap.String("contentTopic", contentTopic))
|
|
||||||
|
|
||||||
return rlnPeer, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func RlnRelayDynamic(
|
|
||||||
ctx context.Context,
|
|
||||||
relay *relay.WakuRelay,
|
|
||||||
ethClientAddr string,
|
|
||||||
ethAccountPrivateKey *ecdsa.PrivateKey,
|
|
||||||
memContractAddr common.Address,
|
|
||||||
memKeyPair *r.IdentityCredential,
|
|
||||||
memIndex r.MembershipIndex,
|
|
||||||
pubsubTopic string,
|
|
||||||
contentTopic string,
|
|
||||||
spamHandler SpamHandler,
|
|
||||||
registrationHandler RegistrationHandler,
|
|
||||||
timesource timesource.Timesource,
|
|
||||||
log *zap.Logger,
|
|
||||||
) (*WakuRLNRelay, error) {
|
|
||||||
log = log.Named("rln-dynamic")
|
|
||||||
|
|
||||||
log.Info("mounting rln-relay in onchain/dynamic mode")
|
|
||||||
|
|
||||||
rlnInstance, err := r.NewRLN()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// create the WakuRLNRelay
|
|
||||||
rlnPeer := &WakuRLNRelay{
|
|
||||||
ctx: ctx,
|
|
||||||
membershipIndex: memIndex,
|
|
||||||
membershipContractAddress: memContractAddr,
|
|
||||||
ethClientAddress: ethClientAddr,
|
|
||||||
ethAccountPrivateKey: ethAccountPrivateKey,
|
|
||||||
RLN: rlnInstance,
|
|
||||||
pubsubTopic: pubsubTopic,
|
|
||||||
contentTopic: contentTopic,
|
|
||||||
log: log,
|
|
||||||
timesource: timesource,
|
|
||||||
nullifierLog: make(map[r.Nullifier][]r.ProofMetadata),
|
|
||||||
registrationHandler: registrationHandler,
|
|
||||||
lastIndexLoaded: -1,
|
|
||||||
}
|
|
||||||
|
|
||||||
root, err := rlnPeer.RLN.GetMerkleRoot()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
rlnPeer.validMerkleRoots = append(rlnPeer.validMerkleRoots, root)
|
|
||||||
|
|
||||||
// prepare rln membership key pair
|
|
||||||
if memKeyPair == nil && ethAccountPrivateKey != nil {
|
|
||||||
log.Debug("no rln-relay key is provided, generating one")
|
|
||||||
memKeyPair, err = rlnInstance.MembershipKeyGen()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
rlnPeer.membershipKeyPair = memKeyPair
|
|
||||||
|
|
||||||
// register the rln-relay peer to the membership contract
|
|
||||||
membershipIndex, err := rlnPeer.Register(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
rlnPeer.membershipIndex = *membershipIndex
|
|
||||||
|
|
||||||
log.Info("registered peer into the membership contract")
|
|
||||||
} else if memKeyPair != nil {
|
|
||||||
rlnPeer.membershipKeyPair = memKeyPair
|
|
||||||
}
|
|
||||||
|
|
||||||
handler := func(pubkey r.IDCommitment, index r.MembershipIndex) error {
|
|
||||||
return rlnPeer.insertMember(pubkey)
|
|
||||||
}
|
|
||||||
|
|
||||||
errChan := make(chan error)
|
|
||||||
go rlnPeer.HandleGroupUpdates(handler, errChan)
|
|
||||||
err = <-errChan
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// adds a topic validator for the supplied pubsub topic at the relay protocol
|
|
||||||
// messages published on this pubsub topic will be relayed upon a successful validation, otherwise they will be dropped
|
|
||||||
// the topic validator checks for the correct non-spamming proof of the message
|
|
||||||
err = rlnPeer.addValidator(relay, pubsubTopic, contentTopic, spamHandler)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Info("rln relay topic validator mounted", zap.String("pubsubTopic", pubsubTopic), zap.String("contentTopic", contentTopic))
|
|
||||||
|
|
||||||
return rlnPeer, nil
|
|
||||||
|
|
||||||
}
|
|
|
@ -21,12 +21,6 @@ import (
|
||||||
proto "google.golang.org/protobuf/proto"
|
proto "google.golang.org/protobuf/proto"
|
||||||
)
|
)
|
||||||
|
|
||||||
var RLNAppInfo = AppInfo{
|
|
||||||
Application: "go-waku-rln-relay",
|
|
||||||
AppIdentifier: "01234567890abcdef",
|
|
||||||
Version: "0.1",
|
|
||||||
}
|
|
||||||
|
|
||||||
type GroupManager interface {
|
type GroupManager interface {
|
||||||
Start(ctx context.Context, rln *rln.RLN, rootTracker *group_manager.MerkleRootTracker) error
|
Start(ctx context.Context, rln *rln.RLN, rootTracker *group_manager.MerkleRootTracker) error
|
||||||
IdentityCredentials() (rln.IdentityCredential, error)
|
IdentityCredentials() (rln.IdentityCredential, error)
|
||||||
|
@ -68,11 +62,16 @@ func New(
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rootTracker, err := group_manager.NewMerkleRootTracker(AcceptableRootWindowSize, rlnInstance)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
// create the WakuRLNRelay
|
// create the WakuRLNRelay
|
||||||
rlnPeer := &WakuRLNRelay{
|
rlnPeer := &WakuRLNRelay{
|
||||||
RLN: rlnInstance,
|
RLN: rlnInstance,
|
||||||
groupManager: groupManager,
|
groupManager: groupManager,
|
||||||
rootTracker: group_manager.NewMerkleRootTracker(AcceptableRootWindowSize, rlnInstance),
|
rootTracker: rootTracker,
|
||||||
pubsubTopic: pubsubTopic,
|
pubsubTopic: pubsubTopic,
|
||||||
contentTopic: contentTopic,
|
contentTopic: contentTopic,
|
||||||
relay: relay,
|
relay: relay,
|
||||||
|
@ -367,3 +366,11 @@ func (rlnRelay *WakuRLNRelay) generateProof(input []byte, epoch rln.Epoch) (*pb.
|
||||||
RlnIdentifier: proof.RLNIdentifier[:],
|
RlnIdentifier: proof.RLNIdentifier[:],
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (rlnRelay *WakuRLNRelay) IdentityCredential() (rln.IdentityCredential, error) {
|
||||||
|
return rlnRelay.groupManager.IdentityCredentials()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rlnRelay *WakuRLNRelay) MembershipIndex() (uint, error) {
|
||||||
|
return rlnRelay.groupManager.MembershipIndex()
|
||||||
|
}
|
||||||
|
|
|
@ -1,227 +0,0 @@
|
||||||
package rln
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/ecdsa"
|
|
||||||
"errors"
|
|
||||||
"math/big"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
"github.com/ethereum/go-ethereum/ethclient"
|
|
||||||
"github.com/ethereum/go-ethereum/event"
|
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol/rln/contracts"
|
|
||||||
r "github.com/waku-org/go-zerokit-rln/rln"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
var MEMBERSHIP_FEE = big.NewInt(1000000000000000) // wei - 0.001 eth
|
|
||||||
|
|
||||||
func toBigInt(i []byte) *big.Int {
|
|
||||||
result := new(big.Int)
|
|
||||||
result.SetBytes(i[:])
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
func register(ctx context.Context, idComm r.IDCommitment, ethAccountPrivateKey *ecdsa.PrivateKey, ethClientAddress string, membershipContractAddress common.Address, registrationHandler RegistrationHandler, log *zap.Logger) (*r.MembershipIndex, error) {
|
|
||||||
backend, err := ethclient.Dial(ethClientAddress)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer backend.Close()
|
|
||||||
|
|
||||||
chainID, err := backend.ChainID(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
auth, err := bind.NewKeyedTransactorWithChainID(ethAccountPrivateKey, chainID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
auth.Value = MEMBERSHIP_FEE
|
|
||||||
auth.Context = ctx
|
|
||||||
|
|
||||||
rlnContract, err := contracts.NewRLN(membershipContractAddress, backend)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debug("registering an id commitment", zap.Binary("idComm", idComm[:]))
|
|
||||||
|
|
||||||
// registers the idComm into the membership contract whose address is in rlnPeer.membershipContractAddress
|
|
||||||
tx, err := rlnContract.Register(auth, toBigInt(idComm[:]))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Info("transaction broadcasted", zap.String("transactionHash", tx.Hash().Hex()))
|
|
||||||
|
|
||||||
if registrationHandler != nil {
|
|
||||||
registrationHandler(tx)
|
|
||||||
}
|
|
||||||
|
|
||||||
txReceipt, err := bind.WaitMined(ctx, backend, tx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if txReceipt.Status != types.ReceiptStatusSuccessful {
|
|
||||||
return nil, errors.New("transaction reverted")
|
|
||||||
}
|
|
||||||
|
|
||||||
// the receipt topic holds the hash of signature of the raised events
|
|
||||||
evt, err := rlnContract.ParseMemberRegistered(*txReceipt.Logs[0])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var eventIdComm r.IDCommitment = r.Bytes32(evt.Pubkey.Bytes())
|
|
||||||
|
|
||||||
log.Debug("the identity commitment key extracted from tx log", zap.Binary("eventIdComm", eventIdComm[:]))
|
|
||||||
|
|
||||||
if eventIdComm != idComm {
|
|
||||||
return nil, errors.New("invalid id commitment key")
|
|
||||||
}
|
|
||||||
|
|
||||||
result := new(r.MembershipIndex)
|
|
||||||
*result = r.MembershipIndex(uint(evt.Index.Int64()))
|
|
||||||
|
|
||||||
// debug "the index of registered identity commitment key", eventIndex=eventIndex
|
|
||||||
|
|
||||||
log.Debug("the index of registered identity commitment key", zap.Uint("eventIndex", uint(*result)))
|
|
||||||
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register registers the public key of the rlnPeer which is rlnPeer.membershipKeyPair.publicKey
|
|
||||||
// into the membership contract whose address is in rlnPeer.membershipContractAddress
|
|
||||||
func (rln *WakuRLNRelay) Register(ctx context.Context) (*r.MembershipIndex, error) {
|
|
||||||
pk := rln.membershipKeyPair.IDCommitment
|
|
||||||
return register(ctx, pk, rln.ethAccountPrivateKey, rln.ethClientAddress, rln.membershipContractAddress, rln.registrationHandler, rln.log)
|
|
||||||
}
|
|
||||||
|
|
||||||
// the types of inputs to this handler matches the MemberRegistered event/proc defined in the MembershipContract interface
|
|
||||||
type RegistrationEventHandler = func(pubkey r.IDCommitment, index r.MembershipIndex) error
|
|
||||||
|
|
||||||
func (rln *WakuRLNRelay) processLogs(evt *contracts.RLNMemberRegistered, handler RegistrationEventHandler) error {
|
|
||||||
if evt == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var pubkey r.IDCommitment = r.Bytes32(evt.Pubkey.Bytes())
|
|
||||||
|
|
||||||
index := evt.Index.Int64()
|
|
||||||
if index <= rln.lastIndexLoaded {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
rln.lastIndexLoaded = index
|
|
||||||
return handler(pubkey, r.MembershipIndex(uint(evt.Index.Int64())))
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandleGroupUpdates mounts the supplied handler for the registration events emitting from the membership contract
|
|
||||||
// It connects to the eth client, subscribes to the `MemberRegistered` event emitted from the `MembershipContract`
|
|
||||||
// and collects all the events, for every received event, it calls the `handler`
|
|
||||||
func (rln *WakuRLNRelay) HandleGroupUpdates(handler RegistrationEventHandler, errChan chan<- error) {
|
|
||||||
defer close(errChan)
|
|
||||||
|
|
||||||
backend, err := ethclient.Dial(rln.ethClientAddress)
|
|
||||||
if err != nil {
|
|
||||||
errChan <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
rln.ethClient = backend
|
|
||||||
|
|
||||||
rlnContract, err := contracts.NewRLN(rln.membershipContractAddress, backend)
|
|
||||||
if err != nil {
|
|
||||||
errChan <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
err = rln.loadOldEvents(rlnContract, handler)
|
|
||||||
if err != nil {
|
|
||||||
errChan <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
doneCh := make(chan struct{})
|
|
||||||
errCh := make(chan error)
|
|
||||||
go rln.watchNewEvents(rlnContract, handler, rln.log, doneCh, errCh)
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-doneCh:
|
|
||||||
return
|
|
||||||
case err := <-errCh:
|
|
||||||
errChan <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rln *WakuRLNRelay) loadOldEvents(rlnContract *contracts.RLN, handler RegistrationEventHandler) error {
|
|
||||||
logIterator, err := rlnContract.FilterMemberRegistered(&bind.FilterOpts{Start: 0, End: nil, Context: rln.ctx})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
if !logIterator.Next() {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if logIterator.Error() != nil {
|
|
||||||
return logIterator.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
err = rln.processLogs(logIterator.Event, handler)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rln *WakuRLNRelay) watchNewEvents(rlnContract *contracts.RLN, handler RegistrationEventHandler, log *zap.Logger, doneCh chan struct{}, errCh chan error) {
|
|
||||||
// Watch for new events
|
|
||||||
logSink := make(chan *contracts.RLNMemberRegistered)
|
|
||||||
|
|
||||||
subs := event.Resubscribe(2*time.Second, func(ctx context.Context) (event.Subscription, error) {
|
|
||||||
subs, err := rlnContract.WatchMemberRegistered(&bind.WatchOpts{Context: rln.ctx, Start: nil}, logSink)
|
|
||||||
if err != nil {
|
|
||||||
if err == rpc.ErrNotificationsUnsupported {
|
|
||||||
err = errors.New("notifications not supported. The node must support websockets")
|
|
||||||
}
|
|
||||||
errCh <- err
|
|
||||||
subs.Unsubscribe()
|
|
||||||
rln.log.Error("subscribing to rln events", zap.Error(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
return subs, err
|
|
||||||
})
|
|
||||||
defer subs.Unsubscribe()
|
|
||||||
|
|
||||||
close(doneCh)
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case evt := <-logSink:
|
|
||||||
err := rln.processLogs(evt, handler)
|
|
||||||
if err != nil {
|
|
||||||
rln.log.Error("processing rln log", zap.Error(err))
|
|
||||||
}
|
|
||||||
case <-rln.ctx.Done():
|
|
||||||
subs.Unsubscribe()
|
|
||||||
close(logSink)
|
|
||||||
return
|
|
||||||
case err := <-subs.Err():
|
|
||||||
close(logSink)
|
|
||||||
if err != nil {
|
|
||||||
rln.log.Error("watching new events", zap.Error(err))
|
|
||||||
errCh <- err
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -0,0 +1,166 @@
|
||||||
|
package protocol
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
)
|
||||||
|
|
||||||
|
const MaxShardIndex = uint16(1023)
|
||||||
|
|
||||||
|
type RelayShards struct {
|
||||||
|
Cluster uint16
|
||||||
|
Indices []uint16
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewRelayShards(cluster uint16, indices ...uint16) (RelayShards, error) {
|
||||||
|
if len(indices) > math.MaxUint8 {
|
||||||
|
return RelayShards{}, errors.New("too many indices")
|
||||||
|
}
|
||||||
|
|
||||||
|
indiceSet := make(map[uint16]struct{})
|
||||||
|
for _, index := range indices {
|
||||||
|
if index > MaxShardIndex {
|
||||||
|
return RelayShards{}, errors.New("invalid index")
|
||||||
|
}
|
||||||
|
indiceSet[index] = struct{}{} // dedup
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(indiceSet) == 0 {
|
||||||
|
return RelayShards{}, errors.New("invalid index count")
|
||||||
|
}
|
||||||
|
|
||||||
|
indices = []uint16{}
|
||||||
|
for index := range indiceSet {
|
||||||
|
indices = append(indices, index)
|
||||||
|
}
|
||||||
|
|
||||||
|
return RelayShards{Cluster: cluster, Indices: indices}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rs RelayShards) Topics() []NamespacedPubsubTopic {
|
||||||
|
var result []NamespacedPubsubTopic
|
||||||
|
for _, i := range rs.Indices {
|
||||||
|
result = append(result, NewStaticShardingPubsubTopic(rs.Cluster, i))
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rs RelayShards) Contains(cluster uint16, index uint16) bool {
|
||||||
|
if rs.Cluster != cluster {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
found := false
|
||||||
|
for _, i := range rs.Indices {
|
||||||
|
if i == index {
|
||||||
|
found = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return found
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rs RelayShards) ContainsNamespacedTopic(topic NamespacedPubsubTopic) bool {
|
||||||
|
if topic.Kind() != StaticSharding {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
shardedTopic := topic.(StaticShardingPubsubTopic)
|
||||||
|
|
||||||
|
return rs.Contains(shardedTopic.Cluster(), shardedTopic.Shard())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rs RelayShards) ContainsTopic(topic string) bool {
|
||||||
|
nsTopic, err := ToShardedPubsubTopic(topic)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return rs.ContainsNamespacedTopic(nsTopic)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rs RelayShards) IndicesList() ([]byte, error) {
|
||||||
|
if len(rs.Indices) > math.MaxUint8 {
|
||||||
|
return nil, errors.New("indices list too long")
|
||||||
|
}
|
||||||
|
|
||||||
|
var result []byte
|
||||||
|
|
||||||
|
result = binary.BigEndian.AppendUint16(result, rs.Cluster)
|
||||||
|
result = append(result, uint8(len(rs.Indices)))
|
||||||
|
for _, index := range rs.Indices {
|
||||||
|
result = binary.BigEndian.AppendUint16(result, index)
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func FromIndicesList(buf []byte) (RelayShards, error) {
|
||||||
|
if len(buf) < 3 {
|
||||||
|
return RelayShards{}, fmt.Errorf("insufficient data: expected at least 3 bytes, got %d bytes", len(buf))
|
||||||
|
}
|
||||||
|
|
||||||
|
cluster := binary.BigEndian.Uint16(buf[0:2])
|
||||||
|
length := int(buf[2])
|
||||||
|
|
||||||
|
if len(buf) != 3+2*length {
|
||||||
|
return RelayShards{}, fmt.Errorf("invalid data: `length` field is %d but %d bytes were provided", length, len(buf))
|
||||||
|
}
|
||||||
|
|
||||||
|
var indices []uint16
|
||||||
|
for i := 0; i < length; i++ {
|
||||||
|
indices = append(indices, binary.BigEndian.Uint16(buf[3+2*i:5+2*i]))
|
||||||
|
}
|
||||||
|
|
||||||
|
return NewRelayShards(cluster, indices...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func setBit(n byte, pos uint) byte {
|
||||||
|
n |= (1 << pos)
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasBit(n byte, pos uint) bool {
|
||||||
|
val := n & (1 << pos)
|
||||||
|
return (val > 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rs RelayShards) BitVector() []byte {
|
||||||
|
// The value is comprised of a two-byte shard cluster index in network byte
|
||||||
|
// order concatenated with a 128-byte wide bit vector. The bit vector
|
||||||
|
// indicates which shards of the respective shard cluster the node is part
|
||||||
|
// of. The right-most bit in the bit vector represents shard 0, the left-most
|
||||||
|
// bit represents shard 1023.
|
||||||
|
var result []byte
|
||||||
|
result = binary.BigEndian.AppendUint16(result, rs.Cluster)
|
||||||
|
|
||||||
|
vec := make([]byte, 128)
|
||||||
|
for _, index := range rs.Indices {
|
||||||
|
n := vec[index/8]
|
||||||
|
vec[index/8] = byte(setBit(n, uint(index%8)))
|
||||||
|
}
|
||||||
|
|
||||||
|
return append(result, vec...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func FromBitVector(buf []byte) (RelayShards, error) {
|
||||||
|
if len(buf) != 130 {
|
||||||
|
return RelayShards{}, errors.New("invalid data: expected 130 bytes")
|
||||||
|
}
|
||||||
|
|
||||||
|
cluster := binary.BigEndian.Uint16(buf[0:2])
|
||||||
|
var indices []uint16
|
||||||
|
|
||||||
|
for i := uint16(0); i < 128; i++ {
|
||||||
|
for j := uint(0); j < 8; j++ {
|
||||||
|
if !hasBit(buf[2+i], j) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
indices = append(indices, uint16(j)+8*i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return RelayShards{Cluster: cluster, Indices: indices}, nil
|
||||||
|
}
|
|
@ -180,12 +180,14 @@ func (store *WakuStore) queryFrom(ctx context.Context, q *pb.HistoryQuery, selec
|
||||||
err := store.h.Connect(ctx, store.h.Peerstore().PeerInfo(selectedPeer))
|
err := store.h.Connect(ctx, store.h.Peerstore().PeerInfo(selectedPeer))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("connecting to peer", zap.Error(err))
|
logger.Error("connecting to peer", zap.Error(err))
|
||||||
|
metrics.RecordStoreError(store.ctx, "dial_failure")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
connOpt, err := store.h.NewStream(ctx, selectedPeer, StoreID_v20beta4)
|
connOpt, err := store.h.NewStream(ctx, selectedPeer, StoreID_v20beta4)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("creating stream to peer", zap.Error(err))
|
logger.Error("creating stream to peer", zap.Error(err))
|
||||||
|
metrics.RecordStoreError(store.ctx, "dial_failure")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -202,6 +204,7 @@ func (store *WakuStore) queryFrom(ctx context.Context, q *pb.HistoryQuery, selec
|
||||||
err = writer.WriteMsg(historyRequest)
|
err = writer.WriteMsg(historyRequest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("writing request", zap.Error(err))
|
logger.Error("writing request", zap.Error(err))
|
||||||
|
metrics.RecordStoreError(store.ctx, "write_request_failure")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -209,7 +212,7 @@ func (store *WakuStore) queryFrom(ctx context.Context, q *pb.HistoryQuery, selec
|
||||||
err = reader.ReadMsg(historyResponseRPC)
|
err = reader.ReadMsg(historyResponseRPC)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("reading response", zap.Error(err))
|
logger.Error("reading response", zap.Error(err))
|
||||||
metrics.RecordStoreError(store.ctx, "decodeRPCFailure")
|
metrics.RecordStoreError(store.ctx, "decode_rpc_failure")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -220,8 +223,6 @@ func (store *WakuStore) queryFrom(ctx context.Context, q *pb.HistoryQuery, selec
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
metrics.RecordMessage(ctx, "retrieved", len(historyResponseRPC.Response.Messages))
|
|
||||||
|
|
||||||
return historyResponseRPC.Response, nil
|
return historyResponseRPC.Response, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -275,6 +276,7 @@ func (store *WakuStore) Query(ctx context.Context, query Query, opts ...HistoryR
|
||||||
}
|
}
|
||||||
|
|
||||||
if !params.localQuery && params.selectedPeer == "" {
|
if !params.localQuery && params.selectedPeer == "" {
|
||||||
|
metrics.RecordStoreError(ctx, "peer_not_found_failure")
|
||||||
return nil, ErrNoPeersAvailable
|
return nil, ErrNoPeersAvailable
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -7,7 +7,7 @@ import (
|
||||||
|
|
||||||
"github.com/libp2p/go-libp2p/core/host"
|
"github.com/libp2p/go-libp2p/core/host"
|
||||||
libp2pProtocol "github.com/libp2p/go-libp2p/core/protocol"
|
libp2pProtocol "github.com/libp2p/go-libp2p/core/protocol"
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||||
"github.com/waku-org/go-waku/waku/v2/timesource"
|
"github.com/waku-org/go-waku/waku/v2/timesource"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
@ -39,8 +39,6 @@ var (
|
||||||
|
|
||||||
// ErrFailedQuery is emitted when the query fails to return results
|
// ErrFailedQuery is emitted when the query fails to return results
|
||||||
ErrFailedQuery = errors.New("failed to resolve the query")
|
ErrFailedQuery = errors.New("failed to resolve the query")
|
||||||
|
|
||||||
ErrFutureMessage = errors.New("message timestamp in the future")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type WakuSwap interface {
|
type WakuSwap interface {
|
||||||
|
@ -51,7 +49,7 @@ type WakuStore struct {
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
timesource timesource.Timesource
|
timesource timesource.Timesource
|
||||||
MsgC chan *protocol.Envelope
|
MsgC relay.Subscription
|
||||||
wg *sync.WaitGroup
|
wg *sync.WaitGroup
|
||||||
|
|
||||||
log *zap.Logger
|
log *zap.Logger
|
||||||
|
@ -63,10 +61,9 @@ type WakuStore struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewWakuStore creates a WakuStore using an specific MessageProvider for storing the messages
|
// NewWakuStore creates a WakuStore using an specific MessageProvider for storing the messages
|
||||||
func NewWakuStore(host host.Host, p MessageProvider, timesource timesource.Timesource, log *zap.Logger) *WakuStore {
|
func NewWakuStore(p MessageProvider, timesource timesource.Timesource, log *zap.Logger) *WakuStore {
|
||||||
wakuStore := new(WakuStore)
|
wakuStore := new(WakuStore)
|
||||||
wakuStore.msgProvider = p
|
wakuStore.msgProvider = p
|
||||||
wakuStore.h = host
|
|
||||||
wakuStore.wg = &sync.WaitGroup{}
|
wakuStore.wg = &sync.WaitGroup{}
|
||||||
wakuStore.log = log.Named("store")
|
wakuStore.log = log.Named("store")
|
||||||
wakuStore.timesource = timesource
|
wakuStore.timesource = timesource
|
||||||
|
|
70
vendor/github.com/waku-org/go-waku/waku/v2/protocol/store/waku_store_protocol.go
generated
vendored
70
vendor/github.com/waku-org/go-waku/waku/v2/protocol/store/waku_store_protocol.go
generated
vendored
|
@ -7,6 +7,7 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p/core/host"
|
||||||
"github.com/libp2p/go-libp2p/core/network"
|
"github.com/libp2p/go-libp2p/core/network"
|
||||||
"github.com/libp2p/go-libp2p/core/peer"
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
"github.com/libp2p/go-msgio/pbio"
|
"github.com/libp2p/go-msgio/pbio"
|
||||||
|
@ -17,13 +18,11 @@ import (
|
||||||
"github.com/waku-org/go-waku/waku/v2/metrics"
|
"github.com/waku-org/go-waku/waku/v2/metrics"
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||||
wpb "github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
wpb "github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||||
|
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol/store/pb"
|
"github.com/waku-org/go-waku/waku/v2/protocol/store/pb"
|
||||||
"github.com/waku-org/go-waku/waku/v2/timesource"
|
"github.com/waku-org/go-waku/waku/v2/timesource"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MaxTimeVariance is the maximum duration in the future allowed for a message timestamp
|
|
||||||
const MaxTimeVariance = time.Duration(20) * time.Second
|
|
||||||
|
|
||||||
func findMessages(query *pb.HistoryQuery, msgProvider MessageProvider) ([]*wpb.WakuMessage, *pb.PagingInfo, error) {
|
func findMessages(query *pb.HistoryQuery, msgProvider MessageProvider) ([]*wpb.WakuMessage, *pb.PagingInfo, error) {
|
||||||
if query.PagingInfo == nil {
|
if query.PagingInfo == nil {
|
||||||
query.PagingInfo = &pb.PagingInfo{
|
query.PagingInfo = &pb.PagingInfo{
|
||||||
|
@ -77,6 +76,7 @@ func (store *WakuStore) FindMessages(query *pb.HistoryQuery) *pb.HistoryResponse
|
||||||
type MessageProvider interface {
|
type MessageProvider interface {
|
||||||
GetAll() ([]persistence.StoredMessage, error)
|
GetAll() ([]persistence.StoredMessage, error)
|
||||||
Query(query *pb.HistoryQuery) (*pb.Index, []persistence.StoredMessage, error)
|
Query(query *pb.HistoryQuery) (*pb.Index, []persistence.StoredMessage, error)
|
||||||
|
Validate(env *protocol.Envelope) error
|
||||||
Put(env *protocol.Envelope) error
|
Put(env *protocol.Envelope) error
|
||||||
MostRecentTimestamp() (int64, error)
|
MostRecentTimestamp() (int64, error)
|
||||||
Start(ctx context.Context, timesource timesource.Timesource) error
|
Start(ctx context.Context, timesource timesource.Timesource) error
|
||||||
|
@ -85,12 +85,12 @@ type MessageProvider interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
type Store interface {
|
type Store interface {
|
||||||
Start(ctx context.Context) error
|
SetHost(h host.Host)
|
||||||
|
Start(context.Context, relay.Subscription) error
|
||||||
Query(ctx context.Context, query Query, opts ...HistoryRequestOption) (*Result, error)
|
Query(ctx context.Context, query Query, opts ...HistoryRequestOption) (*Result, error)
|
||||||
Find(ctx context.Context, query Query, cb criteriaFN, opts ...HistoryRequestOption) (*wpb.WakuMessage, error)
|
Find(ctx context.Context, query Query, cb criteriaFN, opts ...HistoryRequestOption) (*wpb.WakuMessage, error)
|
||||||
Next(ctx context.Context, r *Result) (*Result, error)
|
Next(ctx context.Context, r *Result) (*Result, error)
|
||||||
Resume(ctx context.Context, pubsubTopic string, peerList []peer.ID) (int, error)
|
Resume(ctx context.Context, pubsubTopic string, peerList []peer.ID) (int, error)
|
||||||
MessageChannel() chan *protocol.Envelope
|
|
||||||
Stop()
|
Stop()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -99,8 +99,13 @@ func (store *WakuStore) SetMessageProvider(p MessageProvider) {
|
||||||
store.msgProvider = p
|
store.msgProvider = p
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Sets the host to be able to mount or consume a protocol
|
||||||
|
func (store *WakuStore) SetHost(h host.Host) {
|
||||||
|
store.h = h
|
||||||
|
}
|
||||||
|
|
||||||
// Start initializes the WakuStore by enabling the protocol and fetching records from a message provider
|
// Start initializes the WakuStore by enabling the protocol and fetching records from a message provider
|
||||||
func (store *WakuStore) Start(ctx context.Context) error {
|
func (store *WakuStore) Start(ctx context.Context, sub relay.Subscription) error {
|
||||||
if store.started {
|
if store.started {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -113,18 +118,17 @@ func (store *WakuStore) Start(ctx context.Context) error {
|
||||||
err := store.msgProvider.Start(ctx, store.timesource) // TODO: store protocol should not start a message provider
|
err := store.msgProvider.Start(ctx, store.timesource) // TODO: store protocol should not start a message provider
|
||||||
if err != nil {
|
if err != nil {
|
||||||
store.log.Error("Error starting message provider", zap.Error(err))
|
store.log.Error("Error starting message provider", zap.Error(err))
|
||||||
return nil
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
store.started = true
|
store.started = true
|
||||||
store.ctx, store.cancel = context.WithCancel(ctx)
|
store.ctx, store.cancel = context.WithCancel(ctx)
|
||||||
store.MsgC = make(chan *protocol.Envelope, 1024)
|
store.MsgC = sub
|
||||||
|
|
||||||
store.h.SetStreamHandlerMatch(StoreID_v20beta4, protocol.PrefixTextMatch(string(StoreID_v20beta4)), store.onRequest)
|
store.h.SetStreamHandlerMatch(StoreID_v20beta4, protocol.PrefixTextMatch(string(StoreID_v20beta4)), store.onRequest)
|
||||||
|
|
||||||
store.wg.Add(2)
|
store.wg.Add(1)
|
||||||
go store.storeIncomingMessages(store.ctx)
|
go store.storeIncomingMessages(store.ctx)
|
||||||
go store.updateMetrics(store.ctx)
|
|
||||||
|
|
||||||
store.log.Info("Store protocol started")
|
store.log.Info("Store protocol started")
|
||||||
|
|
||||||
|
@ -132,16 +136,17 @@ func (store *WakuStore) Start(ctx context.Context) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store *WakuStore) storeMessage(env *protocol.Envelope) error {
|
func (store *WakuStore) storeMessage(env *protocol.Envelope) error {
|
||||||
// Ensure that messages don't "jump" to the front of the queue with future timestamps
|
|
||||||
if env.Index().SenderTime-env.Index().ReceiverTime > int64(MaxTimeVariance) {
|
|
||||||
return ErrFutureMessage
|
|
||||||
}
|
|
||||||
|
|
||||||
if env.Message().Ephemeral {
|
if env.Message().Ephemeral {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
err := store.msgProvider.Put(env)
|
err := store.msgProvider.Validate(env)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = store.msgProvider.Put(env)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
store.log.Error("storing message", zap.Error(err))
|
store.log.Error("storing message", zap.Error(err))
|
||||||
metrics.RecordStoreError(store.ctx, "store_failure")
|
metrics.RecordStoreError(store.ctx, "store_failure")
|
||||||
|
@ -153,33 +158,13 @@ func (store *WakuStore) storeMessage(env *protocol.Envelope) error {
|
||||||
|
|
||||||
func (store *WakuStore) storeIncomingMessages(ctx context.Context) {
|
func (store *WakuStore) storeIncomingMessages(ctx context.Context) {
|
||||||
defer store.wg.Done()
|
defer store.wg.Done()
|
||||||
for envelope := range store.MsgC {
|
for envelope := range store.MsgC.Ch {
|
||||||
go func(env *protocol.Envelope) {
|
go func(env *protocol.Envelope) {
|
||||||
_ = store.storeMessage(env)
|
_ = store.storeMessage(env)
|
||||||
}(envelope)
|
}(envelope)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store *WakuStore) updateMetrics(ctx context.Context) {
|
|
||||||
ticker := time.NewTicker(3 * time.Second)
|
|
||||||
defer ticker.Stop()
|
|
||||||
defer store.wg.Done()
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ticker.C:
|
|
||||||
msgCount, err := store.msgProvider.Count()
|
|
||||||
if err != nil {
|
|
||||||
store.log.Error("updating store metrics", zap.Error(err))
|
|
||||||
} else {
|
|
||||||
metrics.RecordMessage(store.ctx, "stored", msgCount)
|
|
||||||
}
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (store *WakuStore) onRequest(s network.Stream) {
|
func (store *WakuStore) onRequest(s network.Stream) {
|
||||||
defer s.Close()
|
defer s.Close()
|
||||||
logger := store.log.With(logging.HostID("peer", s.Conn().RemotePeer()))
|
logger := store.log.With(logging.HostID("peer", s.Conn().RemotePeer()))
|
||||||
|
@ -191,7 +176,7 @@ func (store *WakuStore) onRequest(s network.Stream) {
|
||||||
err := reader.ReadMsg(historyRPCRequest)
|
err := reader.ReadMsg(historyRPCRequest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("reading request", zap.Error(err))
|
logger.Error("reading request", zap.Error(err))
|
||||||
metrics.RecordStoreError(store.ctx, "decodeRPCFailure")
|
metrics.RecordStoreError(store.ctx, "decode_rpc_failure")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -200,7 +185,7 @@ func (store *WakuStore) onRequest(s network.Stream) {
|
||||||
logger = logger.With(logging.Filters(query.GetContentFilters()))
|
logger = logger.With(logging.Filters(query.GetContentFilters()))
|
||||||
} else {
|
} else {
|
||||||
logger.Error("reading request", zap.Error(err))
|
logger.Error("reading request", zap.Error(err))
|
||||||
metrics.RecordStoreError(store.ctx, "emptyRpcQueryFailure")
|
metrics.RecordStoreError(store.ctx, "empty_rpc_query_failure")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -215,16 +200,13 @@ func (store *WakuStore) onRequest(s network.Stream) {
|
||||||
err = writer.WriteMsg(historyResponseRPC)
|
err = writer.WriteMsg(historyResponseRPC)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("writing response", zap.Error(err), logging.PagingInfo(historyResponseRPC.Response.PagingInfo))
|
logger.Error("writing response", zap.Error(err), logging.PagingInfo(historyResponseRPC.Response.PagingInfo))
|
||||||
|
metrics.RecordStoreError(store.ctx, "response_write_failure")
|
||||||
_ = s.Reset()
|
_ = s.Reset()
|
||||||
} else {
|
} else {
|
||||||
logger.Info("response sent")
|
logger.Info("response sent")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store *WakuStore) MessageChannel() chan *protocol.Envelope {
|
|
||||||
return store.MsgC
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: queryWithAccounting
|
// TODO: queryWithAccounting
|
||||||
|
|
||||||
// Stop closes the store message channel and removes the protocol stream handler
|
// Stop closes the store message channel and removes the protocol stream handler
|
||||||
|
@ -237,9 +219,7 @@ func (store *WakuStore) Stop() {
|
||||||
|
|
||||||
store.started = false
|
store.started = false
|
||||||
|
|
||||||
if store.MsgC != nil {
|
store.MsgC.Unsubscribe()
|
||||||
close(store.MsgC)
|
|
||||||
}
|
|
||||||
|
|
||||||
if store.msgProvider != nil {
|
if store.msgProvider != nil {
|
||||||
store.msgProvider.Stop() // TODO: StoreProtocol should not stop a message provider
|
store.msgProvider.Stop() // TODO: StoreProtocol should not stop a message provider
|
||||||
|
|
|
@ -3,7 +3,6 @@ package protocol
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"runtime/debug"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
@ -72,19 +71,19 @@ const (
|
||||||
NamedSharding
|
NamedSharding
|
||||||
)
|
)
|
||||||
|
|
||||||
type ShardedPubsubTopic interface {
|
type NamespacedPubsubTopic interface {
|
||||||
String() string
|
String() string
|
||||||
Kind() NamespacedPubsubTopicKind
|
Kind() NamespacedPubsubTopicKind
|
||||||
Equal(ShardedPubsubTopic) bool
|
Equal(NamespacedPubsubTopic) bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type NamedShardingPubsubTopic struct {
|
type NamedShardingPubsubTopic struct {
|
||||||
ShardedPubsubTopic
|
NamespacedPubsubTopic
|
||||||
kind NamespacedPubsubTopicKind
|
kind NamespacedPubsubTopicKind
|
||||||
name string
|
name string
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewNamedShardingPubsubTopic(name string) ShardedPubsubTopic {
|
func NewNamedShardingPubsubTopic(name string) NamespacedPubsubTopic {
|
||||||
return NamedShardingPubsubTopic{
|
return NamedShardingPubsubTopic{
|
||||||
kind: NamedSharding,
|
kind: NamedSharding,
|
||||||
name: name,
|
name: name,
|
||||||
|
@ -99,7 +98,7 @@ func (n NamedShardingPubsubTopic) Name() string {
|
||||||
return n.name
|
return n.name
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s NamedShardingPubsubTopic) Equal(t2 ShardedPubsubTopic) bool {
|
func (s NamedShardingPubsubTopic) Equal(t2 NamespacedPubsubTopic) bool {
|
||||||
return s.String() == t2.String()
|
return s.String() == t2.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -124,13 +123,13 @@ func (s *NamedShardingPubsubTopic) Parse(topic string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
type StaticShardingPubsubTopic struct {
|
type StaticShardingPubsubTopic struct {
|
||||||
ShardedPubsubTopic
|
NamespacedPubsubTopic
|
||||||
kind NamespacedPubsubTopicKind
|
kind NamespacedPubsubTopicKind
|
||||||
cluster uint16
|
cluster uint16
|
||||||
shard uint16
|
shard uint16
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewStaticShardingPubsubTopic(cluster uint16, shard uint16) ShardedPubsubTopic {
|
func NewStaticShardingPubsubTopic(cluster uint16, shard uint16) NamespacedPubsubTopic {
|
||||||
return StaticShardingPubsubTopic{
|
return StaticShardingPubsubTopic{
|
||||||
kind: StaticSharding,
|
kind: StaticSharding,
|
||||||
cluster: cluster,
|
cluster: cluster,
|
||||||
|
@ -150,7 +149,7 @@ func (n StaticShardingPubsubTopic) Kind() NamespacedPubsubTopicKind {
|
||||||
return n.kind
|
return n.kind
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s StaticShardingPubsubTopic) Equal(t2 ShardedPubsubTopic) bool {
|
func (s StaticShardingPubsubTopic) Equal(t2 NamespacedPubsubTopic) bool {
|
||||||
return s.String() == t2.String()
|
return s.String() == t2.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -196,7 +195,7 @@ func (s *StaticShardingPubsubTopic) Parse(topic string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func ToShardedPubsubTopic(topic string) (ShardedPubsubTopic, error) {
|
func ToShardedPubsubTopic(topic string) (NamespacedPubsubTopic, error) {
|
||||||
if strings.HasPrefix(topic, StaticShardingPubsubTopicPrefix) {
|
if strings.HasPrefix(topic, StaticShardingPubsubTopicPrefix) {
|
||||||
s := StaticShardingPubsubTopic{}
|
s := StaticShardingPubsubTopic{}
|
||||||
err := s.Parse(topic)
|
err := s.Parse(topic)
|
||||||
|
@ -205,7 +204,6 @@ func ToShardedPubsubTopic(topic string) (ShardedPubsubTopic, error) {
|
||||||
}
|
}
|
||||||
return s, nil
|
return s, nil
|
||||||
} else {
|
} else {
|
||||||
debug.PrintStack()
|
|
||||||
s := NamedShardingPubsubTopic{}
|
s := NamedShardingPubsubTopic{}
|
||||||
err := s.Parse(topic)
|
err := s.Parse(topic)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -215,6 +213,6 @@ func ToShardedPubsubTopic(topic string) (ShardedPubsubTopic, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func DefaultPubsubTopic() ShardedPubsubTopic {
|
func DefaultPubsubTopic() NamespacedPubsubTopic {
|
||||||
return NewNamedShardingPubsubTopic("default-waku/proto")
|
return NewNamedShardingPubsubTopic("default-waku/proto")
|
||||||
}
|
}
|
||||||
|
|
|
@ -43,7 +43,7 @@ type PeerConnector interface {
|
||||||
PeerChannel() chan<- peer.AddrInfo
|
PeerChannel() chan<- peer.AddrInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewRendezvous(host host.Host, enableServer bool, db *DB, discoverPeers bool, rendezvousPoints []peer.ID, peerConnector PeerConnector, log *zap.Logger) *Rendezvous {
|
func NewRendezvous(enableServer bool, db *DB, discoverPeers bool, rendezvousPoints []peer.ID, peerConnector PeerConnector, log *zap.Logger) *Rendezvous {
|
||||||
logger := log.Named("rendezvous")
|
logger := log.Named("rendezvous")
|
||||||
|
|
||||||
var rendevousPoints []*rendezvousPoint
|
var rendevousPoints []*rendezvousPoint
|
||||||
|
@ -54,7 +54,6 @@ func NewRendezvous(host host.Host, enableServer bool, db *DB, discoverPeers bool
|
||||||
}
|
}
|
||||||
|
|
||||||
return &Rendezvous{
|
return &Rendezvous{
|
||||||
host: host,
|
|
||||||
enableServer: enableServer,
|
enableServer: enableServer,
|
||||||
db: db,
|
db: db,
|
||||||
discoverPeers: discoverPeers,
|
discoverPeers: discoverPeers,
|
||||||
|
@ -64,6 +63,11 @@ func NewRendezvous(host host.Host, enableServer bool, db *DB, discoverPeers bool
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Sets the host to be able to mount or consume a protocol
|
||||||
|
func (r *Rendezvous) SetHost(h host.Host) {
|
||||||
|
r.host = h
|
||||||
|
}
|
||||||
|
|
||||||
func (r *Rendezvous) Start(ctx context.Context) error {
|
func (r *Rendezvous) Start(ctx context.Context) error {
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
r.cancel = cancel
|
r.cancel = cancel
|
||||||
|
|
|
@ -37,6 +37,10 @@ type IdentityCredential = struct {
|
||||||
IDCommitment IDCommitment `json:"idCommitment"`
|
IDCommitment IDCommitment `json:"idCommitment"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func IdentityCredentialEquals(i IdentityCredential, i2 IdentityCredential) bool {
|
||||||
|
return bytes.Equal(i.IDTrapdoor[:], i2.IDTrapdoor[:]) && bytes.Equal(i.IDNullifier[:], i2.IDNullifier[:]) && bytes.Equal(i.IDSecretHash[:], i2.IDSecretHash[:]) && bytes.Equal(i.IDCommitment[:], i2.IDCommitment[:])
|
||||||
|
}
|
||||||
|
|
||||||
type RateLimitProof struct {
|
type RateLimitProof struct {
|
||||||
// RateLimitProof holds the public inputs to rln circuit as
|
// RateLimitProof holds the public inputs to rln circuit as
|
||||||
// defined in https://hackmd.io/tMTLMYmTR5eynw2lwK9n1w?view#Public-Inputs
|
// defined in https://hackmd.io/tMTLMYmTR5eynw2lwK9n1w?view#Public-Inputs
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
/vendor/
|
|
@ -0,0 +1,85 @@
|
||||||
|
run:
|
||||||
|
tests: false
|
||||||
|
|
||||||
|
linters:
|
||||||
|
disable-all: true
|
||||||
|
enable:
|
||||||
|
- asciicheck
|
||||||
|
- bidichk
|
||||||
|
- bodyclose
|
||||||
|
- containedctx
|
||||||
|
- contextcheck
|
||||||
|
- cyclop
|
||||||
|
- deadcode
|
||||||
|
- decorder
|
||||||
|
- depguard
|
||||||
|
- dogsled
|
||||||
|
- dupl
|
||||||
|
- durationcheck
|
||||||
|
- errcheck
|
||||||
|
- errchkjson
|
||||||
|
- errname
|
||||||
|
- errorlint
|
||||||
|
- exhaustive
|
||||||
|
- exportloopref
|
||||||
|
- forbidigo
|
||||||
|
- funlen
|
||||||
|
- gci
|
||||||
|
- gochecknoglobals
|
||||||
|
- gochecknoinits
|
||||||
|
- gocognit
|
||||||
|
- goconst
|
||||||
|
- gocritic
|
||||||
|
- gocyclo
|
||||||
|
- godox
|
||||||
|
- goerr113
|
||||||
|
- gofmt
|
||||||
|
- gofumpt
|
||||||
|
- goheader
|
||||||
|
- goimports
|
||||||
|
- gomnd
|
||||||
|
- gomoddirectives
|
||||||
|
- gomodguard
|
||||||
|
- goprintffuncname
|
||||||
|
- gosec
|
||||||
|
- gosimple
|
||||||
|
- govet
|
||||||
|
- grouper
|
||||||
|
- ifshort
|
||||||
|
- importas
|
||||||
|
- ineffassign
|
||||||
|
- ireturn
|
||||||
|
- lll
|
||||||
|
- maintidx
|
||||||
|
- makezero
|
||||||
|
- misspell
|
||||||
|
- nakedret
|
||||||
|
- nestif
|
||||||
|
- nilerr
|
||||||
|
- nilnil
|
||||||
|
- noctx
|
||||||
|
- nolintlint
|
||||||
|
- paralleltest
|
||||||
|
- prealloc
|
||||||
|
- predeclared
|
||||||
|
- promlinter
|
||||||
|
- revive
|
||||||
|
- rowserrcheck
|
||||||
|
- sqlclosecheck
|
||||||
|
- staticcheck
|
||||||
|
- structcheck
|
||||||
|
- stylecheck
|
||||||
|
- tagliatelle
|
||||||
|
- tenv
|
||||||
|
- testpackage
|
||||||
|
- thelper
|
||||||
|
- tparallel
|
||||||
|
- typecheck
|
||||||
|
- unconvert
|
||||||
|
- unparam
|
||||||
|
- unused
|
||||||
|
- varcheck
|
||||||
|
- varnamelen
|
||||||
|
- wastedassign
|
||||||
|
- whitespace
|
||||||
|
- wrapcheck
|
|
@ -0,0 +1,201 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright {yyyy} {name of copyright owner}
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
|
@ -0,0 +1,13 @@
|
||||||
|
.DEFAULT_GOAL := all
|
||||||
|
|
||||||
|
.PHONY: all
|
||||||
|
all: test lint
|
||||||
|
|
||||||
|
# the TEST_FLAGS env var can be set to eg run only specific tests
|
||||||
|
.PHONY: test
|
||||||
|
test:
|
||||||
|
go test -v -count=1 -race -cover "$$TEST_FLAGS"
|
||||||
|
|
||||||
|
.PHONY: lint
|
||||||
|
lint:
|
||||||
|
golangci-lint run
|
|
@ -0,0 +1,104 @@
|
||||||
|
[![Build Status](https://circleci.com/gh/wk8/go-ordered-map.svg?style=svg)](https://app.circleci.com/pipelines/github/wk8/go-ordered-map)
|
||||||
|
|
||||||
|
# Goland Ordered Maps
|
||||||
|
|
||||||
|
Same as regular maps, but also remembers the order in which keys were inserted, akin to [Python's `collections.OrderedDict`s](https://docs.python.org/3.7/library/collections.html#ordereddict-objects).
|
||||||
|
|
||||||
|
It offers the following features:
|
||||||
|
* optimal runtime performance (all operations are constant time)
|
||||||
|
* optimal memory usage (only one copy of values, no unnecessary memory allocation)
|
||||||
|
* allows iterating from newest or oldest keys indifferently, without memory copy, allowing to `break` the iteration, and in time linear to the number of keys iterated over rather than the total length of the ordered map
|
||||||
|
* takes and returns generic `interface{}`s
|
||||||
|
* idiomatic API, akin to that of [`container/list`](https://golang.org/pkg/container/list)
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
```bash
|
||||||
|
go get -u github.com/wk8/go-ordered-map
|
||||||
|
```
|
||||||
|
|
||||||
|
Or use your favorite golang vendoring tool!
|
||||||
|
|
||||||
|
## Supported go versions
|
||||||
|
|
||||||
|
All go versions >= 1.13 are supported. There's no reason for older versions to not also work, but they're not part of the build matrix.
|
||||||
|
|
||||||
|
## Documentation
|
||||||
|
|
||||||
|
[The full documentation is available on godoc.org](https://godoc.org/github.com/wk8/go-ordered-map).
|
||||||
|
|
||||||
|
## Example / usage
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/wk8/go-ordered-map"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
om := orderedmap.New()
|
||||||
|
|
||||||
|
om.Set("foo", "bar")
|
||||||
|
om.Set("bar", "baz")
|
||||||
|
om.Set("coucou", "toi")
|
||||||
|
|
||||||
|
fmt.Println(om.Get("foo")) // => bar, true
|
||||||
|
fmt.Println(om.Get("i dont exist")) // => <nil>, false
|
||||||
|
|
||||||
|
// iterating pairs from oldest to newest:
|
||||||
|
for pair := om.Oldest(); pair != nil; pair = pair.Next() {
|
||||||
|
fmt.Printf("%s => %s\n", pair.Key, pair.Value)
|
||||||
|
} // prints:
|
||||||
|
// foo => bar
|
||||||
|
// bar => baz
|
||||||
|
// coucou => toi
|
||||||
|
|
||||||
|
// iterating over the 2 newest pairs:
|
||||||
|
i := 0
|
||||||
|
for pair := om.Newest(); pair != nil; pair = pair.Prev() {
|
||||||
|
fmt.Printf("%s => %s\n", pair.Key, pair.Value)
|
||||||
|
i++
|
||||||
|
if i >= 2 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
} // prints:
|
||||||
|
// coucou => toi
|
||||||
|
// bar => baz
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
All of `OrderedMap`'s methods accept and return `interface{}`s, so you can use any type of keys that regular `map`s accept, as well pack/unpack arbitrary values, e.g.:
|
||||||
|
```go
|
||||||
|
type myStruct struct {
|
||||||
|
payload string
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
om := orderedmap.New()
|
||||||
|
|
||||||
|
om.Set(12, &myStruct{"foo"})
|
||||||
|
om.Set(1, &myStruct{"bar"})
|
||||||
|
|
||||||
|
value, present := om.Get(12)
|
||||||
|
if !present {
|
||||||
|
panic("should be there!")
|
||||||
|
}
|
||||||
|
fmt.Println(value.(*myStruct).payload) // => foo
|
||||||
|
|
||||||
|
for pair := om.Oldest(); pair != nil; pair = pair.Next() {
|
||||||
|
fmt.Printf("%d => %s\n", pair.Key, pair.Value.(*myStruct).payload)
|
||||||
|
} // prints:
|
||||||
|
// 12 => foo
|
||||||
|
// 1 => bar
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Alternatives
|
||||||
|
|
||||||
|
There are several other ordered map golang implementations out there, but I believe that at the time of writing none of them offer the same functionality as this library; more specifically:
|
||||||
|
* [iancoleman/orderedmap](https://github.com/iancoleman/orderedmap) only accepts `string` keys, its `Delete` operations are linear
|
||||||
|
* [cevaris/ordered_map](https://github.com/cevaris/ordered_map) uses a channel for iterations, and leaks goroutines if the iteration is interrupted before fully traversing the map
|
||||||
|
* [mantyr/iterator](https://github.com/mantyr/iterator) also uses a channel for iterations, and its `Delete` operations are linear
|
||||||
|
* [samdolan/go-ordered-map](https://github.com/samdolan/go-ordered-map) adds unnecessary locking (users should add their own locking instead if they need it), its `Delete` and `Get` operations are linear, iterations trigger a linear memory allocation
|
|
@ -0,0 +1,193 @@
|
||||||
|
// Package orderedmap implements an ordered map, i.e. a map that also keeps track of
|
||||||
|
// the order in which keys were inserted.
|
||||||
|
//
|
||||||
|
// All operations are constant-time.
|
||||||
|
//
|
||||||
|
// Github repo: https://github.com/wk8/go-ordered-map
|
||||||
|
//
|
||||||
|
package orderedmap
|
||||||
|
|
||||||
|
import (
|
||||||
|
"container/list"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Pair struct {
|
||||||
|
Key interface{}
|
||||||
|
Value interface{}
|
||||||
|
|
||||||
|
element *list.Element
|
||||||
|
}
|
||||||
|
|
||||||
|
type OrderedMap struct {
|
||||||
|
pairs map[interface{}]*Pair
|
||||||
|
list *list.List
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new OrderedMap.
|
||||||
|
func New() *OrderedMap {
|
||||||
|
return &OrderedMap{
|
||||||
|
pairs: make(map[interface{}]*Pair),
|
||||||
|
list: list.New(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get looks for the given key, and returns the value associated with it,
|
||||||
|
// or nil if not found. The boolean it returns says whether the key is present in the map.
|
||||||
|
func (om *OrderedMap) Get(key interface{}) (interface{}, bool) {
|
||||||
|
if pair, present := om.pairs[key]; present {
|
||||||
|
return pair.Value, present
|
||||||
|
}
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load is an alias for Get, mostly to present an API similar to `sync.Map`'s.
|
||||||
|
func (om *OrderedMap) Load(key interface{}) (interface{}, bool) {
|
||||||
|
return om.Get(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPair looks for the given key, and returns the pair associated with it,
|
||||||
|
// or nil if not found. The Pair struct can then be used to iterate over the ordered map
|
||||||
|
// from that point, either forward or backward.
|
||||||
|
func (om *OrderedMap) GetPair(key interface{}) *Pair {
|
||||||
|
return om.pairs[key]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set sets the key-value pair, and returns what `Get` would have returned
|
||||||
|
// on that key prior to the call to `Set`.
|
||||||
|
func (om *OrderedMap) Set(key interface{}, value interface{}) (interface{}, bool) {
|
||||||
|
if pair, present := om.pairs[key]; present {
|
||||||
|
oldValue := pair.Value
|
||||||
|
pair.Value = value
|
||||||
|
return oldValue, true
|
||||||
|
}
|
||||||
|
|
||||||
|
pair := &Pair{
|
||||||
|
Key: key,
|
||||||
|
Value: value,
|
||||||
|
}
|
||||||
|
pair.element = om.list.PushBack(pair)
|
||||||
|
om.pairs[key] = pair
|
||||||
|
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store is an alias for Set, mostly to present an API similar to `sync.Map`'s.
|
||||||
|
func (om *OrderedMap) Store(key interface{}, value interface{}) (interface{}, bool) {
|
||||||
|
return om.Set(key, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete removes the key-value pair, and returns what `Get` would have returned
|
||||||
|
// on that key prior to the call to `Delete`.
|
||||||
|
func (om *OrderedMap) Delete(key interface{}) (interface{}, bool) {
|
||||||
|
if pair, present := om.pairs[key]; present {
|
||||||
|
om.list.Remove(pair.element)
|
||||||
|
delete(om.pairs, key)
|
||||||
|
return pair.Value, true
|
||||||
|
}
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the length of the ordered map.
|
||||||
|
func (om *OrderedMap) Len() int {
|
||||||
|
return len(om.pairs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Oldest returns a pointer to the oldest pair. It's meant to be used to iterate on the ordered map's
|
||||||
|
// pairs from the oldest to the newest, e.g.:
|
||||||
|
// for pair := orderedMap.Oldest(); pair != nil; pair = pair.Next() { fmt.Printf("%v => %v\n", pair.Key, pair.Value) }
|
||||||
|
func (om *OrderedMap) Oldest() *Pair {
|
||||||
|
return listElementToPair(om.list.Front())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Newest returns a pointer to the newest pair. It's meant to be used to iterate on the ordered map's
|
||||||
|
// pairs from the newest to the oldest, e.g.:
|
||||||
|
// for pair := orderedMap.Oldest(); pair != nil; pair = pair.Next() { fmt.Printf("%v => %v\n", pair.Key, pair.Value) }
|
||||||
|
func (om *OrderedMap) Newest() *Pair {
|
||||||
|
return listElementToPair(om.list.Back())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next returns a pointer to the next pair.
|
||||||
|
func (p *Pair) Next() *Pair {
|
||||||
|
return listElementToPair(p.element.Next())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Previous returns a pointer to the previous pair.
|
||||||
|
func (p *Pair) Prev() *Pair {
|
||||||
|
return listElementToPair(p.element.Prev())
|
||||||
|
}
|
||||||
|
|
||||||
|
func listElementToPair(element *list.Element) *Pair {
|
||||||
|
if element == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return element.Value.(*Pair)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyNotFoundError may be returned by functions in this package when they're called with keys that are not present
|
||||||
|
// in the map.
|
||||||
|
type KeyNotFoundError struct {
|
||||||
|
MissingKey interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ error = &KeyNotFoundError{}
|
||||||
|
|
||||||
|
func (e *KeyNotFoundError) Error() string {
|
||||||
|
return fmt.Sprintf("missing key: %v", e.MissingKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveAfter moves the value associated with key to its new position after the one associated with markKey.
|
||||||
|
// Returns an error iff key or markKey are not present in the map.
|
||||||
|
func (om *OrderedMap) MoveAfter(key, markKey interface{}) error {
|
||||||
|
elements, err := om.getElements(key, markKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
om.list.MoveAfter(elements[0], elements[1])
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveBefore moves the value associated with key to its new position before the one associated with markKey.
|
||||||
|
// Returns an error iff key or markKey are not present in the map.
|
||||||
|
func (om *OrderedMap) MoveBefore(key, markKey interface{}) error {
|
||||||
|
elements, err := om.getElements(key, markKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
om.list.MoveBefore(elements[0], elements[1])
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (om *OrderedMap) getElements(keys ...interface{}) ([]*list.Element, error) {
|
||||||
|
elements := make([]*list.Element, len(keys))
|
||||||
|
for i, k := range keys {
|
||||||
|
pair, present := om.pairs[k]
|
||||||
|
if !present {
|
||||||
|
return nil, &KeyNotFoundError{k}
|
||||||
|
}
|
||||||
|
elements[i] = pair.element
|
||||||
|
}
|
||||||
|
return elements, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveToBack moves the value associated with key to the back of the ordered map.
|
||||||
|
// Returns an error iff key is not present in the map.
|
||||||
|
func (om *OrderedMap) MoveToBack(key interface{}) error {
|
||||||
|
pair, present := om.pairs[key]
|
||||||
|
if !present {
|
||||||
|
return &KeyNotFoundError{key}
|
||||||
|
}
|
||||||
|
om.list.MoveToBack(pair.element)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveToFront moves the value associated with key to the front of the ordered map.
|
||||||
|
// Returns an error iff key is not present in the map.
|
||||||
|
func (om *OrderedMap) MoveToFront(key interface{}) error {
|
||||||
|
pair, present := om.pairs[key]
|
||||||
|
if !present {
|
||||||
|
return &KeyNotFoundError{key}
|
||||||
|
}
|
||||||
|
om.list.MoveToFront(pair.element)
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -975,7 +975,7 @@ github.com/vacp2p/mvds/transport
|
||||||
github.com/waku-org/go-discover/discover
|
github.com/waku-org/go-discover/discover
|
||||||
github.com/waku-org/go-discover/discover/v4wire
|
github.com/waku-org/go-discover/discover/v4wire
|
||||||
github.com/waku-org/go-discover/discover/v5wire
|
github.com/waku-org/go-discover/discover/v5wire
|
||||||
# github.com/waku-org/go-waku v0.5.3-0.20230404182041-41691a44e579
|
# github.com/waku-org/go-waku v0.5.3-0.20230509204224-d9a12bf079a8
|
||||||
## explicit; go 1.19
|
## explicit; go 1.19
|
||||||
github.com/waku-org/go-waku/logging
|
github.com/waku-org/go-waku/logging
|
||||||
github.com/waku-org/go-waku/waku/persistence
|
github.com/waku-org/go-waku/waku/persistence
|
||||||
|
@ -988,10 +988,11 @@ github.com/waku-org/go-waku/waku/v2/metrics
|
||||||
github.com/waku-org/go-waku/waku/v2/node
|
github.com/waku-org/go-waku/waku/v2/node
|
||||||
github.com/waku-org/go-waku/waku/v2/payload
|
github.com/waku-org/go-waku/waku/v2/payload
|
||||||
github.com/waku-org/go-waku/waku/v2/protocol
|
github.com/waku-org/go-waku/waku/v2/protocol
|
||||||
|
github.com/waku-org/go-waku/waku/v2/protocol/enr
|
||||||
github.com/waku-org/go-waku/waku/v2/protocol/filter
|
github.com/waku-org/go-waku/waku/v2/protocol/filter
|
||||||
github.com/waku-org/go-waku/waku/v2/protocol/filter/pb
|
github.com/waku-org/go-waku/waku/v2/protocol/filter/pb
|
||||||
github.com/waku-org/go-waku/waku/v2/protocol/filterv2
|
github.com/waku-org/go-waku/waku/v2/protocol/legacy_filter
|
||||||
github.com/waku-org/go-waku/waku/v2/protocol/filterv2/pb
|
github.com/waku-org/go-waku/waku/v2/protocol/legacy_filter/pb
|
||||||
github.com/waku-org/go-waku/waku/v2/protocol/lightpush
|
github.com/waku-org/go-waku/waku/v2/protocol/lightpush
|
||||||
github.com/waku-org/go-waku/waku/v2/protocol/lightpush/pb
|
github.com/waku-org/go-waku/waku/v2/protocol/lightpush/pb
|
||||||
github.com/waku-org/go-waku/waku/v2/protocol/pb
|
github.com/waku-org/go-waku/waku/v2/protocol/pb
|
||||||
|
@ -1001,13 +1002,15 @@ github.com/waku-org/go-waku/waku/v2/protocol/relay
|
||||||
github.com/waku-org/go-waku/waku/v2/protocol/rln
|
github.com/waku-org/go-waku/waku/v2/protocol/rln
|
||||||
github.com/waku-org/go-waku/waku/v2/protocol/rln/contracts
|
github.com/waku-org/go-waku/waku/v2/protocol/rln/contracts
|
||||||
github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager
|
github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager
|
||||||
|
github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/dynamic
|
||||||
github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/static
|
github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/static
|
||||||
|
github.com/waku-org/go-waku/waku/v2/protocol/rln/keystore
|
||||||
github.com/waku-org/go-waku/waku/v2/protocol/store
|
github.com/waku-org/go-waku/waku/v2/protocol/store
|
||||||
github.com/waku-org/go-waku/waku/v2/protocol/store/pb
|
github.com/waku-org/go-waku/waku/v2/protocol/store/pb
|
||||||
github.com/waku-org/go-waku/waku/v2/rendezvous
|
github.com/waku-org/go-waku/waku/v2/rendezvous
|
||||||
github.com/waku-org/go-waku/waku/v2/timesource
|
github.com/waku-org/go-waku/waku/v2/timesource
|
||||||
github.com/waku-org/go-waku/waku/v2/utils
|
github.com/waku-org/go-waku/waku/v2/utils
|
||||||
# github.com/waku-org/go-zerokit-rln v0.1.11
|
# github.com/waku-org/go-zerokit-rln v0.1.12
|
||||||
## explicit; go 1.18
|
## explicit; go 1.18
|
||||||
github.com/waku-org/go-zerokit-rln/rln
|
github.com/waku-org/go-zerokit-rln/rln
|
||||||
github.com/waku-org/go-zerokit-rln/rln/link
|
github.com/waku-org/go-zerokit-rln/rln/link
|
||||||
|
@ -1039,6 +1042,9 @@ github.com/wealdtech/go-ens/v3/util
|
||||||
# github.com/wealdtech/go-multicodec v1.4.0
|
# github.com/wealdtech/go-multicodec v1.4.0
|
||||||
## explicit; go 1.12
|
## explicit; go 1.12
|
||||||
github.com/wealdtech/go-multicodec
|
github.com/wealdtech/go-multicodec
|
||||||
|
# github.com/wk8/go-ordered-map v1.0.0
|
||||||
|
## explicit; go 1.14
|
||||||
|
github.com/wk8/go-ordered-map
|
||||||
# github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f
|
# github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f
|
||||||
## explicit
|
## explicit
|
||||||
github.com/xeipuuv/gojsonpointer
|
github.com/xeipuuv/gojsonpointer
|
||||||
|
|
|
@ -21,6 +21,12 @@ import (
|
||||||
|
|
||||||
var ErrInvalidCursor = errors.New("invalid cursor")
|
var ErrInvalidCursor = errors.New("invalid cursor")
|
||||||
|
|
||||||
|
var ErrFutureMessage = errors.New("message timestamp in the future")
|
||||||
|
var ErrMessageTooOld = errors.New("message too old")
|
||||||
|
|
||||||
|
// MaxTimeVariance is the maximum duration in the future allowed for a message timestamp
|
||||||
|
const MaxTimeVariance = time.Duration(20) * time.Second
|
||||||
|
|
||||||
// DBStore is a MessageProvider that has a *sql.DB connection
|
// DBStore is a MessageProvider that has a *sql.DB connection
|
||||||
type DBStore struct {
|
type DBStore struct {
|
||||||
db *sql.DB
|
db *sql.DB
|
||||||
|
@ -87,6 +93,23 @@ func (d *DBStore) Start(ctx context.Context, timesource timesource.Timesource) e
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *DBStore) Validate(env *protocol.Envelope) error {
|
||||||
|
n := time.Unix(0, env.Index().ReceiverTime)
|
||||||
|
upperBound := n.Add(MaxTimeVariance)
|
||||||
|
lowerBound := n.Add(-MaxTimeVariance)
|
||||||
|
|
||||||
|
// Ensure that messages don't "jump" to the front of the queue with future timestamps
|
||||||
|
if env.Message().Timestamp > upperBound.UnixNano() {
|
||||||
|
return ErrFutureMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
if env.Message().Timestamp < lowerBound.UnixNano() {
|
||||||
|
return ErrMessageTooOld
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (d *DBStore) cleanOlderRecords() error {
|
func (d *DBStore) cleanOlderRecords() error {
|
||||||
d.log.Debug("Cleaning older records...")
|
d.log.Debug("Cleaning older records...")
|
||||||
|
|
||||||
|
|
|
@ -58,7 +58,7 @@ import (
|
||||||
|
|
||||||
"github.com/waku-org/go-waku/waku/v2/dnsdisc"
|
"github.com/waku-org/go-waku/waku/v2/dnsdisc"
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol/filter"
|
"github.com/waku-org/go-waku/waku/v2/protocol/legacy_filter"
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol/peer_exchange"
|
"github.com/waku-org/go-waku/waku/v2/protocol/peer_exchange"
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||||
|
|
||||||
|
@ -286,7 +286,7 @@ func New(nodeKey string, fleet string, cfg *Config, logger *zap.Logger, appDB *s
|
||||||
}
|
}
|
||||||
|
|
||||||
if cfg.LightClient {
|
if cfg.LightClient {
|
||||||
opts = append(opts, node.WithWakuFilter(false))
|
opts = append(opts, node.WithLegacyWakuFilter(false))
|
||||||
} else {
|
} else {
|
||||||
relayOpts := []pubsub.Option{
|
relayOpts := []pubsub.Option{
|
||||||
pubsub.WithMaxMessageSize(int(waku.settings.MaxMsgSize)),
|
pubsub.WithMaxMessageSize(int(waku.settings.MaxMsgSize)),
|
||||||
|
@ -525,8 +525,7 @@ func (w *Waku) GetStats() types.StatsSummary {
|
||||||
|
|
||||||
func (w *Waku) runPeerExchangeLoop() {
|
func (w *Waku) runPeerExchangeLoop() {
|
||||||
defer w.wg.Done()
|
defer w.wg.Done()
|
||||||
|
if !w.settings.PeerExchange || !w.settings.LightClient {
|
||||||
if w.settings.PeerExchange && !w.settings.LightClient {
|
|
||||||
// Currently peer exchange is only used for full nodes
|
// Currently peer exchange is only used for full nodes
|
||||||
// TODO: should it be used for lightpush? or lightpush nodes
|
// TODO: should it be used for lightpush? or lightpush nodes
|
||||||
// are only going to be selected from a specific set of peers?
|
// are only going to be selected from a specific set of peers?
|
||||||
|
@ -620,7 +619,7 @@ func (w *Waku) runRelayMsgLoop() {
|
||||||
case <-w.quit:
|
case <-w.quit:
|
||||||
sub.Unsubscribe()
|
sub.Unsubscribe()
|
||||||
return
|
return
|
||||||
case env := <-sub.C:
|
case env := <-sub.Ch:
|
||||||
envelopeErrors, err := w.OnNewEnvelopes(env, common.RelayedMessageType)
|
envelopeErrors, err := w.OnNewEnvelopes(env, common.RelayedMessageType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
w.logger.Error("onNewEnvelope error", zap.Error(err))
|
w.logger.Error("onNewEnvelope error", zap.Error(err))
|
||||||
|
@ -661,13 +660,13 @@ func (w *Waku) subscribeWakuFilterTopic(topics [][]byte) {
|
||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
contentFilter := filter.ContentFilter{
|
contentFilter := legacy_filter.ContentFilter{
|
||||||
Topic: relay.DefaultWakuTopic,
|
Topic: relay.DefaultWakuTopic,
|
||||||
ContentTopics: contentTopics,
|
ContentTopics: contentTopics,
|
||||||
}
|
}
|
||||||
|
|
||||||
var wakuFilter filter.Filter
|
var wakuFilter legacy_filter.Filter
|
||||||
_, wakuFilter, err = w.node.Filter().Subscribe(context.Background(), contentFilter)
|
_, wakuFilter, err = w.node.LegacyFilter().Subscribe(context.Background(), contentFilter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
w.logger.Warn("could not add wakuv2 filter for topics", zap.Any("topics", topics))
|
w.logger.Warn("could not add wakuv2 filter for topics", zap.Any("topics", topics))
|
||||||
return
|
return
|
||||||
|
@ -982,14 +981,14 @@ func (w *Waku) GetFilter(id string) *common.Filter {
|
||||||
func (w *Waku) Unsubscribe(id string) error {
|
func (w *Waku) Unsubscribe(id string) error {
|
||||||
f := w.filters.Get(id)
|
f := w.filters.Get(id)
|
||||||
if f != nil && w.settings.LightClient {
|
if f != nil && w.settings.LightClient {
|
||||||
contentFilter := filter.ContentFilter{
|
contentFilter := legacy_filter.ContentFilter{
|
||||||
Topic: relay.DefaultWakuTopic,
|
Topic: relay.DefaultWakuTopic,
|
||||||
}
|
}
|
||||||
for _, topic := range f.Topics {
|
for _, topic := range f.Topics {
|
||||||
contentFilter.ContentTopics = append(contentFilter.ContentTopics, common.BytesToTopic(topic).ContentTopic())
|
contentFilter.ContentTopics = append(contentFilter.ContentTopics, common.BytesToTopic(topic).ContentTopic())
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := w.node.Filter().UnsubscribeFilter(context.Background(), contentFilter); err != nil {
|
if err := w.node.LegacyFilter().UnsubscribeFilter(context.Background(), contentFilter); err != nil {
|
||||||
return fmt.Errorf("failed to unsubscribe: %w", err)
|
return fmt.Errorf("failed to unsubscribe: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1125,13 +1124,6 @@ func (w *Waku) Start() error {
|
||||||
return fmt.Errorf("failed to create a go-waku node: %v", err)
|
return fmt.Errorf("failed to create a go-waku node: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
idService, err := identify.NewIDService(w.node.Host())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
w.identifyService = idService
|
|
||||||
|
|
||||||
w.quit = make(chan struct{})
|
w.quit = make(chan struct{})
|
||||||
w.filterMsgChannel = make(chan *protocol.Envelope, 1024)
|
w.filterMsgChannel = make(chan *protocol.Envelope, 1024)
|
||||||
w.connectionChanged = make(chan struct{})
|
w.connectionChanged = make(chan struct{})
|
||||||
|
@ -1141,6 +1133,13 @@ func (w *Waku) Start() error {
|
||||||
return fmt.Errorf("failed to start go-waku node: %v", err)
|
return fmt.Errorf("failed to start go-waku node: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
idService, err := identify.NewIDService(w.node.Host())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
w.identifyService = idService
|
||||||
|
|
||||||
if err = w.addWakuV2Peers(ctx, w.cfg); err != nil {
|
if err = w.addWakuV2Peers(ctx, w.cfg); err != nil {
|
||||||
return fmt.Errorf("failed to add wakuv2 peers: %v", err)
|
return fmt.Errorf("failed to add wakuv2 peers: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -1215,9 +1214,9 @@ func (w *Waku) Start() error {
|
||||||
// Stop implements node.Service, stopping the background data propagation thread
|
// Stop implements node.Service, stopping the background data propagation thread
|
||||||
// of the Waku protocol.
|
// of the Waku protocol.
|
||||||
func (w *Waku) Stop() error {
|
func (w *Waku) Stop() error {
|
||||||
|
close(w.quit)
|
||||||
w.identifyService.Close()
|
w.identifyService.Close()
|
||||||
w.node.Stop()
|
w.node.Stop()
|
||||||
close(w.quit)
|
|
||||||
close(w.filterMsgChannel)
|
close(w.filterMsgChannel)
|
||||||
close(w.connectionChanged)
|
close(w.connectionChanged)
|
||||||
w.wg.Wait()
|
w.wg.Wait()
|
||||||
|
@ -1226,7 +1225,7 @@ func (w *Waku) Stop() error {
|
||||||
|
|
||||||
func (w *Waku) OnNewEnvelopes(envelope *protocol.Envelope, msgType common.MessageType) ([]common.EnvelopeError, error) {
|
func (w *Waku) OnNewEnvelopes(envelope *protocol.Envelope, msgType common.MessageType) ([]common.EnvelopeError, error) {
|
||||||
if envelope == nil {
|
if envelope == nil {
|
||||||
return nil, errors.New("nil envelope error")
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
recvMessage := common.NewReceivedMessage(envelope, msgType)
|
recvMessage := common.NewReceivedMessage(envelope, msgType)
|
||||||
|
|
Loading…
Reference in New Issue