Bump waku version (#4407)
* chore: make vendor * chore: fix data types * Update wakuv2/common/message.go Co-authored-by: richΛrd <info@richardramos.me> * Update wakuv2/common/message.go Co-authored-by: richΛrd <info@richardramos.me> * Update wakuv2/persistence/dbstore.go Co-authored-by: richΛrd <info@richardramos.me> * chore: use safe method to get timestamp. * chore: use proto.Uint64 to convert reference * chore: manual fix lint issue when import dependency --------- Co-authored-by: richΛrd <info@richardramos.me>
This commit is contained in:
parent
d7e7792b51
commit
e28eca1c54
2
go.mod
2
go.mod
|
@ -86,7 +86,7 @@ require (
|
|||
github.com/mutecomm/go-sqlcipher/v4 v4.4.2
|
||||
github.com/schollz/peerdiscovery v1.7.0
|
||||
github.com/siphiuel/lc-proxy-wrapper v0.0.0-20230516150924-246507cee8c7
|
||||
github.com/waku-org/go-waku v0.8.1-0.20231103161423-351dd55a1498
|
||||
github.com/waku-org/go-waku v0.8.1-0.20231201063231-bdd5d02a91a3
|
||||
github.com/wk8/go-ordered-map/v2 v2.1.7
|
||||
github.com/yeqown/go-qrcode/v2 v2.2.1
|
||||
github.com/yeqown/go-qrcode/writer/standard v1.2.1
|
||||
|
|
4
go.sum
4
go.sum
|
@ -2097,8 +2097,8 @@ github.com/waku-org/go-discover v0.0.0-20221209174356-61c833f34d98 h1:xwY0kW5XZF
|
|||
github.com/waku-org/go-discover v0.0.0-20221209174356-61c833f34d98/go.mod h1:eBHgM6T4EG0RZzxpxKy+rGz/6Dw2Nd8DWxS0lm9ESDw=
|
||||
github.com/waku-org/go-libp2p-rendezvous v0.0.0-20230628220917-7b4e5ae4c0e7 h1:0e1h+p84yBp0IN7AqgbZlV7lgFBjm214lgSOE7CeJmE=
|
||||
github.com/waku-org/go-libp2p-rendezvous v0.0.0-20230628220917-7b4e5ae4c0e7/go.mod h1:pFvOZ9YTFsW0o5zJW7a0B5tr1owAijRWJctXJ2toL04=
|
||||
github.com/waku-org/go-waku v0.8.1-0.20231103161423-351dd55a1498 h1:2Y06Ni3tBj2LQA0ys1o1PspZxZPM9GOKwNEGolbueQ4=
|
||||
github.com/waku-org/go-waku v0.8.1-0.20231103161423-351dd55a1498/go.mod h1:hem2hnXK5BdabxwJULszM0Rh1Yj+gD9IxjwLCGPPaxs=
|
||||
github.com/waku-org/go-waku v0.8.1-0.20231201063231-bdd5d02a91a3 h1:TKP/f+K4xPQIFlPihuX7K/Jtm1EltPVhCSHmRn220EI=
|
||||
github.com/waku-org/go-waku v0.8.1-0.20231201063231-bdd5d02a91a3/go.mod h1:hem2hnXK5BdabxwJULszM0Rh1Yj+gD9IxjwLCGPPaxs=
|
||||
github.com/waku-org/go-zerokit-rln v0.1.14-0.20230916173259-d284a3d8f2fd h1:cu7CsUo7BK6ac/v193RIaqAzUcmpa6MNY4xYW9AenQI=
|
||||
github.com/waku-org/go-zerokit-rln v0.1.14-0.20230916173259-d284a3d8f2fd/go.mod h1:1PdBdPzyTaKt3VnpAHk3zj+r9dXPFOr3IHZP9nFle6E=
|
||||
github.com/waku-org/go-zerokit-rln-apple v0.0.0-20230916172309-ee0ee61dde2b h1:KgZVhsLkxsj5gb/FfndSCQu6VYwALrCOgYI3poR95yE=
|
||||
|
|
|
@ -63,7 +63,7 @@ func (c *Client) PushReceivedEnvelope(envelope *v2protocol.Envelope) {
|
|||
url := fmt.Sprintf("%s/received-envelope", c.serverURL)
|
||||
postBody := map[string]interface{}{
|
||||
"messageHash": types.EncodeHex(envelope.Hash()),
|
||||
"sentAt": uint32(envelope.Message().Timestamp / int64(time.Second)),
|
||||
"sentAt": uint32(envelope.Message().GetTimestamp() / int64(time.Second)),
|
||||
"pubsubTopic": envelope.PubsubTopic(),
|
||||
"topic": envelope.Message().ContentTopic,
|
||||
"receiverKeyUID": c.keyUID,
|
||||
|
|
|
@ -35,16 +35,6 @@ func (bArr byteArr) MarshalLogArray(encoder zapcore.ArrayEncoder) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
type hexByte []byte
|
||||
|
||||
func HexString(key string, byteVal hexByte) zapcore.Field {
|
||||
return zap.Stringer(key, hexByte(byteVal))
|
||||
}
|
||||
|
||||
func (h hexByte) String() string {
|
||||
return "0x" + hex.EncodeToString(h)
|
||||
}
|
||||
|
||||
// List of multiaddrs
|
||||
type multiaddrs []multiaddr.Multiaddr
|
||||
|
||||
|
|
|
@ -14,8 +14,8 @@ import (
|
|||
wpb "github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/store/pb"
|
||||
"github.com/waku-org/go-waku/waku/v2/timesource"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// MessageProvider is an interface that provides access to store/retrieve messages from a persistence store.
|
||||
|
@ -212,7 +212,7 @@ func (d *DBStore) cleanOlderRecords(ctx context.Context) error {
|
|||
if d.maxDuration > 0 {
|
||||
start := time.Now()
|
||||
sqlStmt := `DELETE FROM message WHERE receiverTimestamp < $1`
|
||||
_, err := d.db.Exec(sqlStmt, utils.GetUnixEpochFrom(d.timesource.Now().Add(-d.maxDuration)))
|
||||
_, err := d.db.Exec(sqlStmt, d.timesource.Now().Add(-d.maxDuration).UnixNano())
|
||||
if err != nil {
|
||||
d.metrics.RecordError(retPolicyFailure)
|
||||
return err
|
||||
|
@ -287,11 +287,11 @@ func (d *DBStore) Validate(env *protocol.Envelope) error {
|
|||
lowerBound := n.Add(-MaxTimeVariance)
|
||||
|
||||
// Ensure that messages don't "jump" to the front of the queue with future timestamps
|
||||
if env.Message().Timestamp > upperBound.UnixNano() {
|
||||
if env.Message().GetTimestamp() > upperBound.UnixNano() {
|
||||
return ErrFutureMessage
|
||||
}
|
||||
|
||||
if env.Message().Timestamp < lowerBound.UnixNano() {
|
||||
if env.Message().GetTimestamp() < lowerBound.UnixNano() {
|
||||
return ErrMessageTooOld
|
||||
}
|
||||
|
||||
|
@ -310,7 +310,7 @@ func (d *DBStore) Put(env *protocol.Envelope) error {
|
|||
dbKey := NewDBKey(uint64(cursor.SenderTime), uint64(cursor.ReceiverTime), env.PubsubTopic(), env.Index().Digest)
|
||||
|
||||
start := time.Now()
|
||||
_, err = stmt.Exec(dbKey.Bytes(), cursor.ReceiverTime, env.Message().Timestamp, env.Message().ContentTopic, env.PubsubTopic(), env.Message().Payload, env.Message().Version)
|
||||
_, err = stmt.Exec(dbKey.Bytes(), cursor.ReceiverTime, env.Message().GetTimestamp(), env.Message().ContentTopic, env.PubsubTopic(), env.Message().Payload, env.Message().GetVersion())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -361,15 +361,17 @@ func (d *DBStore) handleQueryCursor(query *pb.HistoryQuery, paramCnt *int, condi
|
|||
parameters = append(parameters, timeDBKey.Bytes())
|
||||
}
|
||||
|
||||
if query.StartTime != 0 {
|
||||
startTime := query.GetStartTime()
|
||||
if startTime != 0 {
|
||||
if !usesCursor || query.PagingInfo.Direction == pb.PagingInfo_BACKWARD {
|
||||
handleTimeParam(query.StartTime, ">=")
|
||||
handleTimeParam(startTime, ">=")
|
||||
}
|
||||
}
|
||||
|
||||
if query.EndTime != 0 {
|
||||
endTime := query.GetEndTime()
|
||||
if endTime != 0 {
|
||||
if !usesCursor || query.PagingInfo.Direction == pb.PagingInfo_FORWARD {
|
||||
handleTimeParam(query.EndTime+1, "<")
|
||||
handleTimeParam(endTime+1, "<")
|
||||
}
|
||||
}
|
||||
return conditions, parameters, nil
|
||||
|
@ -564,8 +566,14 @@ func (d *DBStore) GetStoredMessage(row *sql.Rows) (StoredMessage, error) {
|
|||
msg := new(wpb.WakuMessage)
|
||||
msg.ContentTopic = contentTopic
|
||||
msg.Payload = payload
|
||||
msg.Timestamp = senderTimestamp
|
||||
msg.Version = version
|
||||
|
||||
if senderTimestamp != 0 {
|
||||
msg.Timestamp = proto.Int64(senderTimestamp)
|
||||
}
|
||||
|
||||
if version > 0 {
|
||||
msg.Version = proto.Uint32(version)
|
||||
}
|
||||
|
||||
record := StoredMessage{
|
||||
ID: id,
|
||||
|
|
|
@ -14,13 +14,14 @@ import (
|
|||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/waku-org/go-discover/discover"
|
||||
"github.com/waku-org/go-waku/logging"
|
||||
"github.com/waku-org/go-waku/waku/v2/peermanager"
|
||||
"github.com/waku-org/go-waku/waku/v2/peerstore"
|
||||
wenr "github.com/waku-org/go-waku/waku/v2/protocol/enr"
|
||||
"github.com/waku-org/go-waku/waku/v2/service"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/ethereum/go-ethereum/p2p/nat"
|
||||
)
|
||||
|
||||
|
@ -28,7 +29,7 @@ var ErrNoDiscV5Listener = errors.New("no discv5 listener")
|
|||
|
||||
// PeerConnector will subscribe to a channel containing the information for all peers found by this discovery protocol
|
||||
type PeerConnector interface {
|
||||
Subscribe(context.Context, <-chan peermanager.PeerData)
|
||||
Subscribe(context.Context, <-chan service.PeerData)
|
||||
}
|
||||
|
||||
type DiscoveryV5 struct {
|
||||
|
@ -45,7 +46,7 @@ type DiscoveryV5 struct {
|
|||
|
||||
log *zap.Logger
|
||||
|
||||
*peermanager.CommonDiscoveryService
|
||||
*service.CommonDiscoveryService
|
||||
}
|
||||
|
||||
type discV5Parameters struct {
|
||||
|
@ -138,7 +139,7 @@ func NewDiscoveryV5(priv *ecdsa.PrivateKey, localnode *enode.LocalNode, peerConn
|
|||
params: params,
|
||||
peerConnector: peerConnector,
|
||||
NAT: NAT,
|
||||
CommonDiscoveryService: peermanager.NewCommonDiscoveryService(),
|
||||
CommonDiscoveryService: service.NewCommonDiscoveryService(),
|
||||
localnode: localnode,
|
||||
metrics: newMetrics(reg),
|
||||
config: discover.Config{
|
||||
|
@ -250,40 +251,40 @@ func (d *DiscoveryV5) Stop() {
|
|||
})
|
||||
}
|
||||
|
||||
/*
|
||||
func isWakuNode(node *enode.Node) bool {
|
||||
enrField := new(utils.WakuEnrBitfield)
|
||||
if err := node.Record().Load(enr.WithEntry(utils.WakuENRField, &enrField)); err != nil {
|
||||
enrField := new(wenr.WakuEnrBitfield)
|
||||
if err := node.Record().Load(enr.WithEntry(wenr.WakuENRField, &enrField)); err != nil {
|
||||
if !enr.IsNotFound(err) {
|
||||
utils.Logger().Named("discv5").Error("could not retrieve port for enr ", zap.Any("node", node))
|
||||
utils.Logger().Named("discv5").Error("could not retrieve waku2 ENR field for enr ", zap.Any("node", node))
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if enrField != nil {
|
||||
return *enrField != uint8(0)
|
||||
return *enrField != uint8(0) // #RFC 31 requirement
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
*/
|
||||
|
||||
func (d *DiscoveryV5) evaluateNode() func(node *enode.Node) bool {
|
||||
return func(node *enode.Node) bool {
|
||||
if node == nil {
|
||||
return false
|
||||
}
|
||||
d.log.Debug("found a peer", logging.ENode("enr", node))
|
||||
|
||||
// TODO: consider node filtering based on ENR; we do not filter based on ENR in the first waku discv5 beta stage
|
||||
/*if !isWakuNode(node) {
|
||||
// node filtering based on ENR; we do not filter based on ENR in the first waku discv5 beta stage
|
||||
if !isWakuNode(node) {
|
||||
d.log.Debug("peer is not waku node", logging.ENode("enr", node))
|
||||
return false
|
||||
}*/
|
||||
|
||||
}
|
||||
d.log.Debug("peer is a waku node", logging.ENode("enr", node))
|
||||
_, err := wenr.EnodeToPeerInfo(node)
|
||||
|
||||
if err != nil {
|
||||
d.metrics.RecordError(peerInfoFailure)
|
||||
utils.Logger().Named("discv5").Error("obtaining peer info from enode", logging.ENode("enr", node), zap.Error(err))
|
||||
d.log.Error("obtaining peer info from enode", logging.ENode("enr", node), zap.Error(err))
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -405,21 +406,25 @@ func (d *DiscoveryV5) DefaultPredicate() Predicate {
|
|||
|
||||
nodeRS, err := wenr.RelaySharding(n.Record())
|
||||
if err != nil {
|
||||
d.log.Debug("failed to get relay shards from node record", logging.ENode("node", n), zap.Error(err))
|
||||
return false
|
||||
}
|
||||
|
||||
if nodeRS == nil {
|
||||
d.log.Debug("node has no shards registered", logging.ENode("node", n))
|
||||
// Node has no shards registered.
|
||||
return false
|
||||
}
|
||||
|
||||
if nodeRS.ClusterID != localRS.ClusterID {
|
||||
d.log.Debug("cluster id mismatch from local clusterid", logging.ENode("node", n), zap.Error(err))
|
||||
return false
|
||||
}
|
||||
|
||||
// Contains any
|
||||
for _, idx := range localRS.ShardIDs {
|
||||
if nodeRS.Contains(localRS.ClusterID, idx) {
|
||||
d.log.Debug("shards match for discovered node", logging.ENode("node", n))
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
@ -439,7 +444,7 @@ func (d *DiscoveryV5) peerLoop(ctx context.Context) error {
|
|||
defer iterator.Close()
|
||||
|
||||
d.Iterate(ctx, iterator, func(n *enode.Node, p peer.AddrInfo) error {
|
||||
peer := peermanager.PeerData{
|
||||
peer := service.PeerData{
|
||||
Origin: peerstore.Discv5,
|
||||
AddrInfo: p,
|
||||
ENR: n,
|
||||
|
|
|
@ -1,10 +1,11 @@
|
|||
package peermanager
|
||||
package discv5
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/waku-org/go-waku/waku/v2/service"
|
||||
)
|
||||
|
||||
// TestPeerDiscoverer is mock peer discoverer for testing
|
||||
|
@ -23,7 +24,7 @@ func NewTestPeerDiscoverer() *TestPeerDiscoverer {
|
|||
}
|
||||
|
||||
// Subscribe is for subscribing to peer discoverer
|
||||
func (t *TestPeerDiscoverer) Subscribe(ctx context.Context, ch <-chan PeerData) {
|
||||
func (t *TestPeerDiscoverer) Subscribe(ctx context.Context, ch <-chan service.PeerData) {
|
||||
go func() {
|
||||
for {
|
||||
select {
|
|
@ -2,6 +2,7 @@ package dnsdisc
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/dnsdisc"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
|
@ -15,14 +16,31 @@ import (
|
|||
|
||||
type dnsDiscoveryParameters struct {
|
||||
nameserver string
|
||||
resolver dnsdisc.Resolver
|
||||
}
|
||||
|
||||
type DNSDiscoveryOption func(*dnsDiscoveryParameters)
|
||||
type DNSDiscoveryOption func(*dnsDiscoveryParameters) error
|
||||
|
||||
var ErrExclusiveOpts = errors.New("cannot set both nameserver and resolver")
|
||||
|
||||
// WithNameserver is a DnsDiscoveryOption that configures the nameserver to use
|
||||
func WithNameserver(nameserver string) DNSDiscoveryOption {
|
||||
return func(params *dnsDiscoveryParameters) {
|
||||
return func(params *dnsDiscoveryParameters) error {
|
||||
if params.resolver != nil {
|
||||
return ErrExclusiveOpts
|
||||
}
|
||||
params.nameserver = nameserver
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithResolver(resolver dnsdisc.Resolver) DNSDiscoveryOption {
|
||||
return func(params *dnsDiscoveryParameters) error {
|
||||
if params.nameserver != "" {
|
||||
return ErrExclusiveOpts
|
||||
}
|
||||
params.resolver = resolver
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -49,11 +67,18 @@ func RetrieveNodes(ctx context.Context, url string, opts ...DNSDiscoveryOption)
|
|||
|
||||
params := new(dnsDiscoveryParameters)
|
||||
for _, opt := range opts {
|
||||
opt(params)
|
||||
err := opt(params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if params.resolver == nil {
|
||||
params.resolver = GetResolver(ctx, params.nameserver)
|
||||
}
|
||||
|
||||
client := dnsdisc.NewClient(dnsdisc.Config{
|
||||
Resolver: GetResolver(ctx, params.nameserver),
|
||||
Resolver: params.resolver,
|
||||
})
|
||||
|
||||
tree, err := client.SyncTree(url)
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
|
||||
"github.com/waku-org/go-waku/logging"
|
||||
"github.com/waku-org/go-waku/waku/v2/discv5"
|
||||
"github.com/waku-org/go-waku/waku/v2/dnsdisc"
|
||||
"github.com/waku-org/go-waku/waku/v2/peermanager"
|
||||
wps "github.com/waku-org/go-waku/waku/v2/peerstore"
|
||||
wakuprotocol "github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
|
@ -42,6 +43,7 @@ import (
|
|||
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/store"
|
||||
"github.com/waku-org/go-waku/waku/v2/rendezvous"
|
||||
"github.com/waku-org/go-waku/waku/v2/service"
|
||||
"github.com/waku-org/go-waku/waku/v2/timesource"
|
||||
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
|
@ -288,6 +290,7 @@ func New(opts ...WakuNodeOption) (*WakuNode, error) {
|
|||
}
|
||||
|
||||
w.opts.legacyFilterOpts = append(w.opts.legacyFilterOpts, legacy_filter.WithPeerManager(w.peermanager))
|
||||
w.opts.filterOpts = append(w.opts.filterOpts, filter.WithPeerManager(w.peermanager))
|
||||
|
||||
w.legacyFilter = legacy_filter.NewWakuFilter(w.bcaster, w.opts.isLegacyFilterFullNode, w.timesource, w.opts.prometheusReg, w.log, w.opts.legacyFilterOpts...)
|
||||
w.filterFullNode = filter.NewWakuFilterFullNode(w.timesource, w.opts.prometheusReg, w.log, w.opts.filterOpts...)
|
||||
|
@ -690,7 +693,9 @@ func (w *WakuNode) mountDiscV5() error {
|
|||
}
|
||||
|
||||
var err error
|
||||
w.discoveryV5, err = discv5.NewDiscoveryV5(w.opts.privKey, w.localNode, w.peerConnector, w.opts.prometheusReg, w.log, discV5Options...)
|
||||
discv5Inst, err := discv5.NewDiscoveryV5(w.opts.privKey, w.localNode, w.peerConnector, w.opts.prometheusReg, w.log, discV5Options...)
|
||||
w.discoveryV5 = discv5Inst
|
||||
w.peermanager.SetDiscv5(discv5Inst)
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -708,18 +713,22 @@ func (w *WakuNode) startStore(ctx context.Context, sub *relay.Subscription) erro
|
|||
// AddPeer is used to add a peer and the protocols it support to the node peerstore
|
||||
// TODO: Need to update this for autosharding, to only take contentTopics and optional pubSubTopics or provide an alternate API only for contentTopics.
|
||||
func (w *WakuNode) AddPeer(address ma.Multiaddr, origin wps.Origin, pubSubTopics []string, protocols ...protocol.ID) (peer.ID, error) {
|
||||
return w.peermanager.AddPeer(address, origin, pubSubTopics, protocols...)
|
||||
pData, err := w.peermanager.AddPeer(address, origin, pubSubTopics, protocols...)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return pData.AddrInfo.ID, nil
|
||||
}
|
||||
|
||||
// AddDiscoveredPeer to add a discovered peer to the node peerStore
|
||||
func (w *WakuNode) AddDiscoveredPeer(ID peer.ID, addrs []ma.Multiaddr, origin wps.Origin, pubsubTopics []string, connectNow bool) {
|
||||
p := peermanager.PeerData{
|
||||
p := service.PeerData{
|
||||
Origin: origin,
|
||||
AddrInfo: peer.AddrInfo{
|
||||
ID: ID,
|
||||
Addrs: addrs,
|
||||
},
|
||||
PubSubTopics: pubsubTopics,
|
||||
PubsubTopics: pubsubTopics,
|
||||
}
|
||||
w.peermanager.AddDiscoveredPeer(p, connectNow)
|
||||
}
|
||||
|
@ -923,3 +932,41 @@ func (w *WakuNode) findRelayNodes(ctx context.Context) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func GetNodesFromDNSDiscovery(logger *zap.Logger, ctx context.Context, nameServer string, discoveryURLs []string) []dnsdisc.DiscoveredNode {
|
||||
var discoveredNodes []dnsdisc.DiscoveredNode
|
||||
for _, url := range discoveryURLs {
|
||||
logger.Info("attempting DNS discovery with ", zap.String("URL", url))
|
||||
nodes, err := dnsdisc.RetrieveNodes(ctx, url, dnsdisc.WithNameserver(nameServer))
|
||||
if err != nil {
|
||||
logger.Warn("dns discovery error ", zap.Error(err))
|
||||
} else {
|
||||
var discPeerInfo []peer.AddrInfo
|
||||
for _, n := range nodes {
|
||||
discPeerInfo = append(discPeerInfo, n.PeerInfo)
|
||||
}
|
||||
logger.Info("found dns entries ", zap.Any("nodes", discPeerInfo))
|
||||
discoveredNodes = append(discoveredNodes, nodes...)
|
||||
}
|
||||
}
|
||||
return discoveredNodes
|
||||
}
|
||||
|
||||
func GetDiscv5Option(dnsDiscoveredNodes []dnsdisc.DiscoveredNode, discv5Nodes []string, port uint, autoUpdate bool) (WakuNodeOption, error) {
|
||||
var bootnodes []*enode.Node
|
||||
for _, addr := range discv5Nodes {
|
||||
bootnode, err := enode.Parse(enode.ValidSchemes, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bootnodes = append(bootnodes, bootnode)
|
||||
}
|
||||
|
||||
for _, n := range dnsDiscoveredNodes {
|
||||
if n.ENR != nil {
|
||||
bootnodes = append(bootnodes, n.ENR)
|
||||
}
|
||||
}
|
||||
|
||||
return WithDiscoveryV5(port, bootnodes, autoUpdate), nil
|
||||
}
|
||||
|
|
|
@ -25,6 +25,9 @@ const (
|
|||
None KeyKind = "None"
|
||||
)
|
||||
|
||||
const Unencrypted = 0
|
||||
const V1Encryption = 1
|
||||
|
||||
// Payload contains the data of the message to encode
|
||||
type Payload struct {
|
||||
Data []byte // Raw message payload
|
||||
|
@ -94,7 +97,7 @@ func EncodeWakuMessage(message *pb.WakuMessage, keyInfo *KeyInfo) error {
|
|||
Key: keyInfo,
|
||||
}
|
||||
|
||||
encodedBytes, err := payload.Encode(message.Version)
|
||||
encodedBytes, err := payload.Encode(message.GetVersion())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -106,7 +109,7 @@ func EncodeWakuMessage(message *pb.WakuMessage, keyInfo *KeyInfo) error {
|
|||
// DecodePayload decodes a WakuMessage depending on the version parameter.
|
||||
// 0 for raw unencrypted data, and 1 for using WakuV1 decoding
|
||||
func DecodePayload(message *pb.WakuMessage, keyInfo *KeyInfo) (*DecodedPayload, error) {
|
||||
switch message.Version {
|
||||
switch message.GetVersion() {
|
||||
case uint32(0):
|
||||
return &DecodedPayload{Data: message.Payload}, nil
|
||||
case uint32(1):
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
"github.com/waku-org/go-waku/logging"
|
||||
wps "github.com/waku-org/go-waku/waku/v2/peerstore"
|
||||
waku_proto "github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/service"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
|
@ -34,7 +35,7 @@ type PeerConnectionStrategy struct {
|
|||
|
||||
paused atomic.Bool
|
||||
dialTimeout time.Duration
|
||||
*CommonDiscoveryService
|
||||
*service.CommonDiscoveryService
|
||||
subscriptions []subscription
|
||||
|
||||
backoff backoff.BackoffFactory
|
||||
|
@ -43,7 +44,7 @@ type PeerConnectionStrategy struct {
|
|||
|
||||
type subscription struct {
|
||||
ctx context.Context
|
||||
ch <-chan PeerData
|
||||
ch <-chan service.PeerData
|
||||
}
|
||||
|
||||
// backoff describes the strategy used to decide how long to backoff after previously attempting to connect to a peer
|
||||
|
@ -71,7 +72,7 @@ func NewPeerConnectionStrategy(pm *PeerManager,
|
|||
pc := &PeerConnectionStrategy{
|
||||
cache: cache,
|
||||
dialTimeout: dialTimeout,
|
||||
CommonDiscoveryService: NewCommonDiscoveryService(),
|
||||
CommonDiscoveryService: service.NewCommonDiscoveryService(),
|
||||
pm: pm,
|
||||
backoff: getBackOff(),
|
||||
logger: logger.Named("discovery-connector"),
|
||||
|
@ -86,7 +87,7 @@ type connCacheData struct {
|
|||
}
|
||||
|
||||
// Subscribe receives channels on which discovered peers should be pushed
|
||||
func (c *PeerConnectionStrategy) Subscribe(ctx context.Context, ch <-chan PeerData) {
|
||||
func (c *PeerConnectionStrategy) Subscribe(ctx context.Context, ch <-chan service.PeerData) {
|
||||
// if not running yet, store the subscription and return
|
||||
if err := c.ErrOnNotRunning(); err != nil {
|
||||
c.mux.Lock()
|
||||
|
@ -129,6 +130,7 @@ func (c *PeerConnectionStrategy) consumeSubscription(s subscription) {
|
|||
if len(c.host.Network().Peers()) < waku_proto.GossipSubOptimalFullMeshSize {
|
||||
triggerImmediateConnection = true
|
||||
}
|
||||
c.logger.Debug("adding discovered peer", logging.HostID("peer", p.AddrInfo.ID))
|
||||
c.pm.AddDiscoveredPeer(p, triggerImmediateConnection)
|
||||
|
||||
case <-time.After(1 * time.Second):
|
||||
|
|
121
vendor/github.com/waku-org/go-waku/waku/v2/peermanager/peer_discovery.go
generated
vendored
Normal file
121
vendor/github.com/waku-org/go-waku/waku/v2/peermanager/peer_discovery.go
generated
vendored
Normal file
|
@ -0,0 +1,121 @@
|
|||
package peermanager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/discv5"
|
||||
wps "github.com/waku-org/go-waku/waku/v2/peerstore"
|
||||
waku_proto "github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
wenr "github.com/waku-org/go-waku/waku/v2/protocol/enr"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||
"github.com/waku-org/go-waku/waku/v2/service"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// DiscoverAndConnectToPeers discovers peers using discoveryv5 and connects to the peers.
|
||||
// It discovers peers till maxCount peers are found for the cluster,shard and protocol or the context passed expires.
|
||||
func (pm *PeerManager) DiscoverAndConnectToPeers(ctx context.Context, cluster uint16,
|
||||
shard uint16, serviceProtocol protocol.ID, maxCount int) error {
|
||||
if pm.discoveryService == nil {
|
||||
return nil
|
||||
}
|
||||
peers, err := pm.discoverOnDemand(cluster, shard, serviceProtocol, ctx, maxCount)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pm.logger.Debug("discovered peers on demand ", zap.Int("noOfPeers", len(peers)))
|
||||
connectNow := false
|
||||
//Add discovered peers to peerStore and connect to them
|
||||
for idx, p := range peers {
|
||||
if serviceProtocol != relay.WakuRelayID_v200 && idx <= maxCount {
|
||||
//how many connections to initiate? Maybe this could be a config exposed to client API.
|
||||
//For now just going ahead with initiating connections with 2 nodes in case of non-relay service peers
|
||||
//In case of relay let it go through connectivityLoop
|
||||
connectNow = true
|
||||
}
|
||||
pm.AddDiscoveredPeer(p, connectNow)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RegisterWakuProtocol to be used by Waku protocols that could be used for peer discovery
|
||||
// Which means protoocl should be as defined in waku2 ENR key in https://rfc.vac.dev/spec/31/.
|
||||
func (pm *PeerManager) RegisterWakuProtocol(proto protocol.ID, bitField uint8) {
|
||||
pm.wakuprotoToENRFieldMap[proto] = WakuProtoInfo{waku2ENRBitField: bitField}
|
||||
}
|
||||
|
||||
// OnDemandPeerDiscovery initiates an on demand peer discovery and
|
||||
// filters peers based on cluster,shard and any wakuservice protocols specified
|
||||
func (pm *PeerManager) discoverOnDemand(cluster uint16,
|
||||
shard uint16, wakuProtocol protocol.ID, ctx context.Context, maxCount int) ([]service.PeerData, error) {
|
||||
var peers []service.PeerData
|
||||
|
||||
wakuProtoInfo, ok := pm.wakuprotoToENRFieldMap[wakuProtocol]
|
||||
if !ok {
|
||||
pm.logger.Error("cannot do on demand discovery for non-waku protocol", zap.String("protocol", string(wakuProtocol)))
|
||||
return nil, errors.New("cannot do on demand discovery for non-waku protocol")
|
||||
}
|
||||
iterator, err := pm.discoveryService.PeerIterator(
|
||||
discv5.FilterShard(cluster, shard),
|
||||
discv5.FilterCapabilities(wakuProtoInfo.waku2ENRBitField),
|
||||
)
|
||||
if err != nil {
|
||||
pm.logger.Error("failed to find peers for shard and services", zap.Uint16("cluster", cluster),
|
||||
zap.Uint16("shard", shard), zap.String("service", string(wakuProtocol)), zap.Error(err))
|
||||
return peers, err
|
||||
}
|
||||
|
||||
//Iterate and fill peers.
|
||||
defer iterator.Close()
|
||||
|
||||
for iterator.Next() {
|
||||
|
||||
pInfo, err := wenr.EnodeToPeerInfo(iterator.Node())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
pData := service.PeerData{
|
||||
Origin: wps.Discv5,
|
||||
ENR: iterator.Node(),
|
||||
AddrInfo: *pInfo,
|
||||
}
|
||||
peers = append(peers, pData)
|
||||
|
||||
if len(peers) >= maxCount {
|
||||
pm.logger.Debug("found required number of nodes, stopping on demand discovery", zap.Uint16("cluster", cluster),
|
||||
zap.Uint16("shard", shard), zap.Int("required-nodes", maxCount))
|
||||
break
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
pm.logger.Error("failed to find peers for shard and services", zap.Uint16("cluster", cluster),
|
||||
zap.Uint16("shard", shard), zap.String("service", string(wakuProtocol)), zap.Error(ctx.Err()))
|
||||
return nil, ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
}
|
||||
return peers, nil
|
||||
}
|
||||
|
||||
func (pm *PeerManager) discoverPeersByPubsubTopics(pubsubTopics []string, proto protocol.ID, ctx context.Context, maxCount int) {
|
||||
shardsInfo, err := waku_proto.TopicsToRelayShards(pubsubTopics...)
|
||||
if err != nil {
|
||||
pm.logger.Error("failed to convert pubsub topic to shard", zap.Strings("topics", pubsubTopics), zap.Error(err))
|
||||
return
|
||||
}
|
||||
if len(shardsInfo) > 0 {
|
||||
for _, shardInfo := range shardsInfo {
|
||||
err = pm.DiscoverAndConnectToPeers(ctx, shardInfo.ClusterID, shardInfo.ShardIDs[0], proto, maxCount)
|
||||
if err != nil {
|
||||
pm.logger.Error("failed to discover and conenct to peers", zap.Error(err))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
pm.logger.Debug("failed to convert pubsub topics to shards as one of the topics is named pubsubTopic", zap.Strings("topics", pubsubTopics))
|
||||
}
|
||||
}
|
|
@ -3,10 +3,10 @@ package peermanager
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/event"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
|
@ -14,13 +14,14 @@ import (
|
|||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/peerstore"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/libp2p/go-libp2p/p2p/protocol/ping"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
"github.com/waku-org/go-waku/logging"
|
||||
"github.com/waku-org/go-waku/waku/v2/discv5"
|
||||
wps "github.com/waku-org/go-waku/waku/v2/peerstore"
|
||||
waku_proto "github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
wenr "github.com/waku-org/go-waku/waku/v2/protocol/enr"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||
"github.com/waku-org/go-waku/waku/v2/service"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -30,20 +31,29 @@ type NodeTopicDetails struct {
|
|||
topic *pubsub.Topic
|
||||
}
|
||||
|
||||
// WakuProtoInfo holds protocol specific info
|
||||
// To be used at a later stage to set various config such as criteria for peer management specific to each Waku protocols
|
||||
// This should make peer-manager agnostic to protocol
|
||||
type WakuProtoInfo struct {
|
||||
waku2ENRBitField uint8
|
||||
}
|
||||
|
||||
// PeerManager applies various controls and manage connections towards peers.
|
||||
type PeerManager struct {
|
||||
peerConnector *PeerConnectionStrategy
|
||||
maxPeers int
|
||||
maxRelayPeers int
|
||||
logger *zap.Logger
|
||||
InRelayPeersTarget int
|
||||
OutRelayPeersTarget int
|
||||
host host.Host
|
||||
serviceSlots *ServiceSlots
|
||||
ctx context.Context
|
||||
sub event.Subscription
|
||||
topicMutex sync.RWMutex
|
||||
subRelayTopics map[string]*NodeTopicDetails
|
||||
peerConnector *PeerConnectionStrategy
|
||||
maxPeers int
|
||||
maxRelayPeers int
|
||||
logger *zap.Logger
|
||||
InRelayPeersTarget int
|
||||
OutRelayPeersTarget int
|
||||
host host.Host
|
||||
serviceSlots *ServiceSlots
|
||||
ctx context.Context
|
||||
sub event.Subscription
|
||||
topicMutex sync.RWMutex
|
||||
subRelayTopics map[string]*NodeTopicDetails
|
||||
discoveryService *discv5.DiscoveryV5
|
||||
wakuprotoToENRFieldMap map[protocol.ID]WakuProtoInfo
|
||||
}
|
||||
|
||||
// PeerSelection provides various options based on which Peer is selected from a list of peers.
|
||||
|
@ -88,13 +98,14 @@ func NewPeerManager(maxConnections int, maxPeers int, logger *zap.Logger) *PeerM
|
|||
}
|
||||
|
||||
pm := &PeerManager{
|
||||
logger: logger.Named("peer-manager"),
|
||||
maxRelayPeers: maxRelayPeers,
|
||||
InRelayPeersTarget: inRelayPeersTarget,
|
||||
OutRelayPeersTarget: outRelayPeersTarget,
|
||||
serviceSlots: NewServiceSlot(),
|
||||
subRelayTopics: make(map[string]*NodeTopicDetails),
|
||||
maxPeers: maxPeers,
|
||||
logger: logger.Named("peer-manager"),
|
||||
maxRelayPeers: maxRelayPeers,
|
||||
InRelayPeersTarget: inRelayPeersTarget,
|
||||
OutRelayPeersTarget: outRelayPeersTarget,
|
||||
serviceSlots: NewServiceSlot(),
|
||||
subRelayTopics: make(map[string]*NodeTopicDetails),
|
||||
maxPeers: maxPeers,
|
||||
wakuprotoToENRFieldMap: map[protocol.ID]WakuProtoInfo{},
|
||||
}
|
||||
logger.Info("PeerManager init values", zap.Int("maxConnections", maxConnections),
|
||||
zap.Int("maxRelayPeers", maxRelayPeers),
|
||||
|
@ -105,6 +116,11 @@ func NewPeerManager(maxConnections int, maxPeers int, logger *zap.Logger) *PeerM
|
|||
return pm
|
||||
}
|
||||
|
||||
// SetDiscv5 sets the discoveryv5 service to be used for peer discovery.
|
||||
func (pm *PeerManager) SetDiscv5(discv5 *discv5.DiscoveryV5) {
|
||||
pm.discoveryService = discv5
|
||||
}
|
||||
|
||||
// SetHost sets the host to be used in order to access the peerStore.
|
||||
func (pm *PeerManager) SetHost(host host.Host) {
|
||||
pm.host = host
|
||||
|
@ -117,6 +133,9 @@ func (pm *PeerManager) SetPeerConnector(pc *PeerConnectionStrategy) {
|
|||
|
||||
// Start starts the processing to be done by peer manager.
|
||||
func (pm *PeerManager) Start(ctx context.Context) {
|
||||
|
||||
pm.RegisterWakuProtocol(relay.WakuRelayID_v200, relay.WakuRelayENRField)
|
||||
|
||||
pm.ctx = ctx
|
||||
if pm.sub != nil {
|
||||
go pm.peerEventLoop(ctx)
|
||||
|
@ -154,7 +173,7 @@ func (pm *PeerManager) GroupPeersByDirection(specificPeers ...peer.ID) (inPeers
|
|||
outPeers = append(outPeers, p)
|
||||
}
|
||||
} else {
|
||||
pm.logger.Error("Failed to retrieve peer direction",
|
||||
pm.logger.Error("failed to retrieve peer direction",
|
||||
logging.HostID("peerID", p), zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
@ -169,7 +188,7 @@ func (pm *PeerManager) getRelayPeers(specificPeers ...peer.ID) (inRelayPeers pee
|
|||
if err != nil {
|
||||
return
|
||||
}
|
||||
pm.logger.Debug("Number of peers connected", zap.Int("inPeers", inPeers.Len()),
|
||||
pm.logger.Debug("number of peers connected", zap.Int("inPeers", inPeers.Len()),
|
||||
zap.Int("outPeers", outPeers.Len()))
|
||||
|
||||
//Need to filter peers to check if they support relay
|
||||
|
@ -192,15 +211,17 @@ func (pm *PeerManager) ensureMinRelayConnsPerTopic() {
|
|||
curPeers := topicInst.topic.ListPeers()
|
||||
curPeerLen := len(curPeers)
|
||||
if curPeerLen < waku_proto.GossipSubOptimalFullMeshSize {
|
||||
pm.logger.Info("Subscribed topic is unhealthy, initiating more connections to maintain health",
|
||||
pm.logger.Debug("subscribed topic is unhealthy, initiating more connections to maintain health",
|
||||
zap.String("pubSubTopic", topicStr), zap.Int("connectedPeerCount", curPeerLen),
|
||||
zap.Int("optimumPeers", waku_proto.GossipSubOptimalFullMeshSize))
|
||||
//Find not connected peers.
|
||||
notConnectedPeers := pm.getNotConnectedPers(topicStr)
|
||||
if notConnectedPeers.Len() == 0 {
|
||||
//TODO: Trigger on-demand discovery for this topic.
|
||||
pm.logger.Debug("could not find any peers in peerstore to connect to, discovering more", zap.String("pubSubTopic", topicStr))
|
||||
pm.discoverPeersByPubsubTopics([]string{topicStr}, relay.WakuRelayID_v200, pm.ctx, 2)
|
||||
continue
|
||||
}
|
||||
pm.logger.Debug("connecting to eligible peers in peerstore", zap.String("pubSubTopic", topicStr))
|
||||
//Connect to eligible peers.
|
||||
numPeersToConnect := waku_proto.GossipSubOptimalFullMeshSize - curPeerLen
|
||||
|
||||
|
@ -220,7 +241,7 @@ func (pm *PeerManager) connectToRelayPeers() {
|
|||
pm.ensureMinRelayConnsPerTopic()
|
||||
|
||||
inRelayPeers, outRelayPeers := pm.getRelayPeers()
|
||||
pm.logger.Info("number of relay peers connected",
|
||||
pm.logger.Debug("number of relay peers connected",
|
||||
zap.Int("in", inRelayPeers.Len()),
|
||||
zap.Int("out", outRelayPeers.Len()))
|
||||
if inRelayPeers.Len() > 0 &&
|
||||
|
@ -229,28 +250,10 @@ func (pm *PeerManager) connectToRelayPeers() {
|
|||
}
|
||||
}
|
||||
|
||||
// addrInfoToPeerData returns addressinfo for a peer
|
||||
// If addresses are expired, it removes the peer from host peerStore and returns nil.
|
||||
func addrInfoToPeerData(origin wps.Origin, peerID peer.ID, host host.Host) *PeerData {
|
||||
addrs := host.Peerstore().Addrs(peerID)
|
||||
if len(addrs) == 0 {
|
||||
//Addresses expired, remove peer from peerStore
|
||||
host.Peerstore().RemovePeer(peerID)
|
||||
return nil
|
||||
}
|
||||
return &PeerData{
|
||||
Origin: origin,
|
||||
AddrInfo: peer.AddrInfo{
|
||||
ID: peerID,
|
||||
Addrs: addrs,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// connectToPeers connects to peers provided in the list if the addresses have not expired.
|
||||
func (pm *PeerManager) connectToPeers(peers peer.IDSlice) {
|
||||
for _, peerID := range peers {
|
||||
peerData := addrInfoToPeerData(wps.PeerManager, peerID, pm.host)
|
||||
peerData := AddrInfoToPeerData(wps.PeerManager, peerID, pm.host)
|
||||
if peerData == nil {
|
||||
continue
|
||||
}
|
||||
|
@ -287,18 +290,50 @@ func (pm *PeerManager) pruneInRelayConns(inRelayPeers peer.IDSlice) {
|
|||
p := inRelayPeers[pruningStartIndex]
|
||||
err := pm.host.Network().ClosePeer(p)
|
||||
if err != nil {
|
||||
pm.logger.Warn("Failed to disconnect connection towards peer",
|
||||
pm.logger.Warn("failed to disconnect connection towards peer",
|
||||
logging.HostID("peerID", p))
|
||||
}
|
||||
pm.logger.Debug("Successfully disconnected connection towards peer",
|
||||
pm.logger.Debug("successfully disconnected connection towards peer",
|
||||
logging.HostID("peerID", p))
|
||||
}
|
||||
}
|
||||
|
||||
func (pm *PeerManager) processPeerENR(p *service.PeerData) []protocol.ID {
|
||||
shards, err := wenr.RelaySharding(p.ENR.Record())
|
||||
if err != nil {
|
||||
pm.logger.Error("could not derive relayShards from ENR", zap.Error(err),
|
||||
logging.HostID("peer", p.AddrInfo.ID), zap.String("enr", p.ENR.String()))
|
||||
} else {
|
||||
if shards != nil {
|
||||
p.PubsubTopics = make([]string, 0)
|
||||
topics := shards.Topics()
|
||||
for _, topic := range topics {
|
||||
topicStr := topic.String()
|
||||
p.PubsubTopics = append(p.PubsubTopics, topicStr)
|
||||
}
|
||||
} else {
|
||||
pm.logger.Debug("ENR doesn't have relay shards", logging.HostID("peer", p.AddrInfo.ID))
|
||||
}
|
||||
}
|
||||
supportedProtos := []protocol.ID{}
|
||||
//Identify and specify protocols supported by the peer based on the discovered peer's ENR
|
||||
var enrField wenr.WakuEnrBitfield
|
||||
if err := p.ENR.Record().Load(enr.WithEntry(wenr.WakuENRField, &enrField)); err == nil {
|
||||
for proto, protoENR := range pm.wakuprotoToENRFieldMap {
|
||||
protoENRField := protoENR.waku2ENRBitField
|
||||
if protoENRField&enrField != 0 {
|
||||
supportedProtos = append(supportedProtos, proto)
|
||||
//Add Service peers to serviceSlots.
|
||||
pm.addPeerToServiceSlot(proto, p.AddrInfo.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
return supportedProtos
|
||||
}
|
||||
|
||||
// AddDiscoveredPeer to add dynamically discovered peers.
|
||||
// Note that these peers will not be set in service-slots.
|
||||
// TODO: It maybe good to set in service-slots based on services supported in the ENR
|
||||
func (pm *PeerManager) AddDiscoveredPeer(p PeerData, connectNow bool) {
|
||||
func (pm *PeerManager) AddDiscoveredPeer(p service.PeerData, connectNow bool) {
|
||||
//Doing this check again inside addPeer, in order to avoid additional complexity of rollingBack other changes.
|
||||
if pm.maxPeers <= pm.host.Peerstore().Peers().Len() {
|
||||
return
|
||||
|
@ -306,30 +341,16 @@ func (pm *PeerManager) AddDiscoveredPeer(p PeerData, connectNow bool) {
|
|||
//Check if the peer is already present, if so skip adding
|
||||
_, err := pm.host.Peerstore().(wps.WakuPeerstore).Origin(p.AddrInfo.ID)
|
||||
if err == nil {
|
||||
pm.logger.Debug("Found discovered peer already in peerStore", logging.HostID("peer", p.AddrInfo.ID))
|
||||
pm.logger.Debug("peer already in peerStore", logging.HostID("peer", p.AddrInfo.ID))
|
||||
return
|
||||
}
|
||||
// Try to fetch shard info from ENR to arrive at pubSub topics.
|
||||
if len(p.PubSubTopics) == 0 && p.ENR != nil {
|
||||
shards, err := wenr.RelaySharding(p.ENR.Record())
|
||||
if err != nil {
|
||||
pm.logger.Error("Could not derive relayShards from ENR", zap.Error(err),
|
||||
logging.HostID("peer", p.AddrInfo.ID), zap.String("enr", p.ENR.String()))
|
||||
} else {
|
||||
if shards != nil {
|
||||
p.PubSubTopics = make([]string, 0)
|
||||
topics := shards.Topics()
|
||||
for _, topic := range topics {
|
||||
topicStr := topic.String()
|
||||
p.PubSubTopics = append(p.PubSubTopics, topicStr)
|
||||
}
|
||||
} else {
|
||||
pm.logger.Debug("ENR doesn't have relay shards", logging.HostID("peer", p.AddrInfo.ID))
|
||||
}
|
||||
}
|
||||
supportedProtos := []protocol.ID{}
|
||||
if len(p.PubsubTopics) == 0 && p.ENR != nil {
|
||||
// Try to fetch shard info and supported protocols from ENR to arrive at pubSub topics.
|
||||
supportedProtos = pm.processPeerENR(&p)
|
||||
}
|
||||
|
||||
_ = pm.addPeer(p.AddrInfo.ID, p.AddrInfo.Addrs, p.Origin, p.PubSubTopics)
|
||||
_ = pm.addPeer(p.AddrInfo.ID, p.AddrInfo.Addrs, p.Origin, p.PubsubTopics, supportedProtos...)
|
||||
|
||||
if p.ENR != nil {
|
||||
err := pm.host.Peerstore().(wps.WakuPeerstore).SetENR(p.AddrInfo.ID, p.ENR)
|
||||
|
@ -339,6 +360,7 @@ func (pm *PeerManager) AddDiscoveredPeer(p PeerData, connectNow bool) {
|
|||
}
|
||||
}
|
||||
if connectNow {
|
||||
pm.logger.Debug("connecting now to discovered peer", logging.HostID("peer", p.AddrInfo.ID))
|
||||
go pm.peerConnector.PushToChan(p)
|
||||
}
|
||||
}
|
||||
|
@ -347,6 +369,7 @@ func (pm *PeerManager) AddDiscoveredPeer(p PeerData, connectNow bool) {
|
|||
// It also sets additional metadata such as origin, ENR and supported protocols
|
||||
func (pm *PeerManager) addPeer(ID peer.ID, addrs []ma.Multiaddr, origin wps.Origin, pubSubTopics []string, protocols ...protocol.ID) error {
|
||||
if pm.maxPeers <= pm.host.Peerstore().Peers().Len() {
|
||||
pm.logger.Error("could not add peer as peer store capacity is reached", logging.HostID("peer", ID), zap.Int("capacity", pm.maxPeers))
|
||||
return errors.New("peer store capacity reached")
|
||||
}
|
||||
pm.logger.Info("adding peer to peerstore", logging.HostID("peer", ID))
|
||||
|
@ -366,6 +389,7 @@ func (pm *PeerManager) addPeer(ID peer.ID, addrs []ma.Multiaddr, origin wps.Orig
|
|||
if len(protocols) > 0 {
|
||||
err = pm.host.Peerstore().AddProtocols(ID, protocols...)
|
||||
if err != nil {
|
||||
pm.logger.Error("could not set protocols", zap.Error(err), logging.HostID("peer", ID))
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -382,12 +406,29 @@ func (pm *PeerManager) addPeer(ID peer.ID, addrs []ma.Multiaddr, origin wps.Orig
|
|||
return nil
|
||||
}
|
||||
|
||||
func AddrInfoToPeerData(origin wps.Origin, peerID peer.ID, host host.Host, pubsubTopics ...string) *service.PeerData {
|
||||
addrs := host.Peerstore().Addrs(peerID)
|
||||
if len(addrs) == 0 {
|
||||
//Addresses expired, remove peer from peerStore
|
||||
host.Peerstore().RemovePeer(peerID)
|
||||
return nil
|
||||
}
|
||||
return &service.PeerData{
|
||||
Origin: origin,
|
||||
AddrInfo: peer.AddrInfo{
|
||||
ID: peerID,
|
||||
Addrs: addrs,
|
||||
},
|
||||
PubsubTopics: pubsubTopics,
|
||||
}
|
||||
}
|
||||
|
||||
// AddPeer adds peer to the peerStore and also to service slots
|
||||
func (pm *PeerManager) AddPeer(address ma.Multiaddr, origin wps.Origin, pubSubTopics []string, protocols ...protocol.ID) (peer.ID, error) {
|
||||
func (pm *PeerManager) AddPeer(address ma.Multiaddr, origin wps.Origin, pubsubTopics []string, protocols ...protocol.ID) (*service.PeerData, error) {
|
||||
//Assuming all addresses have peerId
|
||||
info, err := peer.AddrInfoFromP2pAddr(address)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//Add Service peers to serviceSlots.
|
||||
|
@ -396,12 +437,26 @@ func (pm *PeerManager) AddPeer(address ma.Multiaddr, origin wps.Origin, pubSubTo
|
|||
}
|
||||
|
||||
//Add to the peer-store
|
||||
err = pm.addPeer(info.ID, info.Addrs, origin, pubSubTopics, protocols...)
|
||||
err = pm.addPeer(info.ID, info.Addrs, origin, pubsubTopics, protocols...)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return info.ID, nil
|
||||
pData := &service.PeerData{
|
||||
Origin: origin,
|
||||
AddrInfo: peer.AddrInfo{
|
||||
ID: info.ID,
|
||||
Addrs: info.Addrs,
|
||||
},
|
||||
PubsubTopics: pubsubTopics,
|
||||
}
|
||||
|
||||
return pData, nil
|
||||
}
|
||||
|
||||
// Connect establishes a connection to a
|
||||
func (pm *PeerManager) Connect(pData *service.PeerData) {
|
||||
go pm.peerConnector.PushToChan(*pData)
|
||||
}
|
||||
|
||||
// RemovePeer deletes peer from the peerStore after disconnecting it.
|
||||
|
@ -418,210 +473,14 @@ func (pm *PeerManager) RemovePeer(peerID peer.ID) {
|
|||
// If relay proto is passed, it is not added to serviceSlot.
|
||||
func (pm *PeerManager) addPeerToServiceSlot(proto protocol.ID, peerID peer.ID) {
|
||||
if proto == relay.WakuRelayID_v200 {
|
||||
pm.logger.Warn("Cannot add Relay peer to service peer slots")
|
||||
pm.logger.Debug("cannot add Relay peer to service peer slots")
|
||||
return
|
||||
}
|
||||
|
||||
//For now adding the peer to serviceSlot which means the latest added peer would be given priority.
|
||||
//TODO: Ideally we should sort the peers per service and return best peer based on peer score or RTT etc.
|
||||
pm.logger.Info("Adding peer to service slots", logging.HostID("peer", peerID),
|
||||
pm.logger.Info("adding peer to service slots", logging.HostID("peer", peerID),
|
||||
zap.String("service", string(proto)))
|
||||
// getPeers returns nil for WakuRelayIDv200 protocol, but we don't run this ServiceSlot code for WakuRelayIDv200 protocol
|
||||
pm.serviceSlots.getPeers(proto).add(peerID)
|
||||
}
|
||||
|
||||
// SelectPeerByContentTopic is used to return a random peer that supports a given protocol for given contentTopic.
|
||||
// If a list of specific peers is passed, the peer will be chosen from that list assuming
|
||||
// it supports the chosen protocol and contentTopic, otherwise it will chose a peer from the service slot.
|
||||
// If a peer cannot be found in the service slot, a peer will be selected from node peerstore
|
||||
func (pm *PeerManager) SelectPeerByContentTopic(proto protocol.ID, contentTopic string, specificPeers ...peer.ID) (peer.ID, error) {
|
||||
pubsubTopic, err := waku_proto.GetPubSubTopicFromContentTopic(contentTopic)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return pm.SelectPeer(PeerSelectionCriteria{PubsubTopic: pubsubTopic, Proto: proto, SpecificPeers: specificPeers})
|
||||
}
|
||||
|
||||
// SelectRandomPeer is used to return a random peer that supports a given protocol.
|
||||
// If a list of specific peers is passed, the peer will be chosen from that list assuming
|
||||
// it supports the chosen protocol, otherwise it will chose a peer from the service slot.
|
||||
// If a peer cannot be found in the service slot, a peer will be selected from node peerstore
|
||||
// if pubSubTopic is specified, peer is selected from list that support the pubSubTopic
|
||||
func (pm *PeerManager) SelectRandomPeer(criteria PeerSelectionCriteria) (peer.ID, error) {
|
||||
// @TODO We need to be more strategic about which peers we dial. Right now we just set one on the service.
|
||||
// Ideally depending on the query and our set of peers we take a subset of ideal peers.
|
||||
// This will require us to check for various factors such as:
|
||||
// - which topics they track
|
||||
// - latency?
|
||||
|
||||
peerID, err := pm.selectServicePeer(criteria.Proto, criteria.PubsubTopic, criteria.SpecificPeers...)
|
||||
if err == nil {
|
||||
return peerID, nil
|
||||
} else if !errors.Is(err, ErrNoPeersAvailable) {
|
||||
pm.logger.Debug("could not retrieve random peer from slot", zap.String("protocol", string(criteria.Proto)), zap.String("pubsubTopic", criteria.PubsubTopic), zap.Error(err))
|
||||
return "", err
|
||||
}
|
||||
|
||||
// if not found in serviceSlots or proto == WakuRelayIDv200
|
||||
filteredPeers, err := pm.FilterPeersByProto(criteria.SpecificPeers, criteria.Proto)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if criteria.PubsubTopic != "" {
|
||||
filteredPeers = pm.host.Peerstore().(wps.WakuPeerstore).PeersByPubSubTopic(criteria.PubsubTopic, filteredPeers...)
|
||||
}
|
||||
return selectRandomPeer(filteredPeers, pm.logger)
|
||||
}
|
||||
|
||||
func (pm *PeerManager) selectServicePeer(proto protocol.ID, pubSubTopic string, specificPeers ...peer.ID) (peer.ID, error) {
|
||||
//Try to fetch from serviceSlot
|
||||
if slot := pm.serviceSlots.getPeers(proto); slot != nil {
|
||||
if pubSubTopic == "" {
|
||||
return slot.getRandom()
|
||||
} else { //PubsubTopic based selection
|
||||
keys := make([]peer.ID, 0, len(slot.m))
|
||||
for i := range slot.m {
|
||||
keys = append(keys, i)
|
||||
}
|
||||
selectedPeers := pm.host.Peerstore().(wps.WakuPeerstore).PeersByPubSubTopic(pubSubTopic, keys...)
|
||||
return selectRandomPeer(selectedPeers, pm.logger)
|
||||
}
|
||||
}
|
||||
|
||||
return "", ErrNoPeersAvailable
|
||||
}
|
||||
|
||||
// PeerSelectionCriteria is the selection Criteria that is used by PeerManager to select peers.
|
||||
type PeerSelectionCriteria struct {
|
||||
SelectionType PeerSelection
|
||||
Proto protocol.ID
|
||||
PubsubTopic string
|
||||
SpecificPeers peer.IDSlice
|
||||
Ctx context.Context
|
||||
}
|
||||
|
||||
// SelectPeer selects a peer based on selectionType specified.
|
||||
// Context is required only in case of selectionType set to LowestRTT
|
||||
func (pm *PeerManager) SelectPeer(criteria PeerSelectionCriteria) (peer.ID, error) {
|
||||
|
||||
switch criteria.SelectionType {
|
||||
case Automatic:
|
||||
return pm.SelectRandomPeer(criteria)
|
||||
case LowestRTT:
|
||||
if criteria.Ctx == nil {
|
||||
criteria.Ctx = context.Background()
|
||||
pm.logger.Warn("context is not passed for peerSelectionwithRTT, using background context")
|
||||
}
|
||||
return pm.SelectPeerWithLowestRTT(criteria)
|
||||
default:
|
||||
return "", errors.New("unknown peer selection type specified")
|
||||
}
|
||||
}
|
||||
|
||||
type pingResult struct {
|
||||
p peer.ID
|
||||
rtt time.Duration
|
||||
}
|
||||
|
||||
// SelectPeerWithLowestRTT will select a peer that supports a specific protocol with the lowest reply time
|
||||
// If a list of specific peers is passed, the peer will be chosen from that list assuming
|
||||
// it supports the chosen protocol, otherwise it will chose a peer from the node peerstore
|
||||
// TO OPTIMIZE: As of now the peer with lowest RTT is identified when select is called, this should be optimized
|
||||
// to maintain the RTT as part of peer-scoring and just select based on that.
|
||||
func (pm *PeerManager) SelectPeerWithLowestRTT(criteria PeerSelectionCriteria) (peer.ID, error) {
|
||||
var peers peer.IDSlice
|
||||
var err error
|
||||
if criteria.Ctx == nil {
|
||||
criteria.Ctx = context.Background()
|
||||
}
|
||||
|
||||
if criteria.PubsubTopic != "" {
|
||||
peers = pm.host.Peerstore().(wps.WakuPeerstore).PeersByPubSubTopic(criteria.PubsubTopic, criteria.SpecificPeers...)
|
||||
}
|
||||
|
||||
peers, err = pm.FilterPeersByProto(peers, criteria.Proto)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
wg := sync.WaitGroup{}
|
||||
waitCh := make(chan struct{})
|
||||
pingCh := make(chan pingResult, 1000)
|
||||
|
||||
wg.Add(len(peers))
|
||||
|
||||
go func() {
|
||||
for _, p := range peers {
|
||||
go func(p peer.ID) {
|
||||
defer wg.Done()
|
||||
ctx, cancel := context.WithTimeout(criteria.Ctx, 3*time.Second)
|
||||
defer cancel()
|
||||
result := <-ping.Ping(ctx, pm.host, p)
|
||||
if result.Error == nil {
|
||||
pingCh <- pingResult{
|
||||
p: p,
|
||||
rtt: result.RTT,
|
||||
}
|
||||
} else {
|
||||
pm.logger.Debug("could not ping", logging.HostID("peer", p), zap.Error(result.Error))
|
||||
}
|
||||
}(p)
|
||||
}
|
||||
wg.Wait()
|
||||
close(waitCh)
|
||||
close(pingCh)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-waitCh:
|
||||
var min *pingResult
|
||||
for p := range pingCh {
|
||||
if min == nil {
|
||||
min = &p
|
||||
} else {
|
||||
if p.rtt < min.rtt {
|
||||
min = &p
|
||||
}
|
||||
}
|
||||
}
|
||||
if min == nil {
|
||||
return "", ErrNoPeersAvailable
|
||||
}
|
||||
|
||||
return min.p, nil
|
||||
case <-criteria.Ctx.Done():
|
||||
return "", ErrNoPeersAvailable
|
||||
}
|
||||
}
|
||||
|
||||
// selectRandomPeer selects randomly a peer from the list of peers passed.
|
||||
func selectRandomPeer(peers peer.IDSlice, log *zap.Logger) (peer.ID, error) {
|
||||
if len(peers) >= 1 {
|
||||
peerID := peers[rand.Intn(len(peers))]
|
||||
// TODO: proper heuristic here that compares peer scores and selects "best" one. For now a random peer for the given protocol is returned
|
||||
return peerID, nil // nolint: gosec
|
||||
}
|
||||
|
||||
return "", ErrNoPeersAvailable
|
||||
}
|
||||
|
||||
// FilterPeersByProto filters list of peers that support specified protocols.
|
||||
// If specificPeers is nil, all peers in the host's peerStore are considered for filtering.
|
||||
func (pm *PeerManager) FilterPeersByProto(specificPeers peer.IDSlice, proto ...protocol.ID) (peer.IDSlice, error) {
|
||||
peerSet := specificPeers
|
||||
if len(peerSet) == 0 {
|
||||
peerSet = pm.host.Peerstore().Peers()
|
||||
}
|
||||
|
||||
var peers peer.IDSlice
|
||||
for _, peer := range peerSet {
|
||||
protocols, err := pm.host.Peerstore().SupportsProtocols(peer, proto...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(protocols) > 0 {
|
||||
peers = append(peers, peer)
|
||||
}
|
||||
}
|
||||
return peers, nil
|
||||
}
|
||||
|
|
232
vendor/github.com/waku-org/go-waku/waku/v2/peermanager/peer_selection.go
generated
vendored
Normal file
232
vendor/github.com/waku-org/go-waku/waku/v2/peermanager/peer_selection.go
generated
vendored
Normal file
|
@ -0,0 +1,232 @@
|
|||
package peermanager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/libp2p/go-libp2p/p2p/protocol/ping"
|
||||
"github.com/waku-org/go-waku/logging"
|
||||
wps "github.com/waku-org/go-waku/waku/v2/peerstore"
|
||||
waku_proto "github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// SelectPeerByContentTopic is used to return a random peer that supports a given protocol for given contentTopic.
|
||||
// If a list of specific peers is passed, the peer will be chosen from that list assuming
|
||||
// it supports the chosen protocol and contentTopic, otherwise it will chose a peer from the service slot.
|
||||
// If a peer cannot be found in the service slot, a peer will be selected from node peerstore
|
||||
func (pm *PeerManager) SelectPeerByContentTopics(proto protocol.ID, contentTopics []string, specificPeers ...peer.ID) (peer.ID, error) {
|
||||
pubsubTopics := []string{}
|
||||
for _, cTopic := range contentTopics {
|
||||
pubsubTopic, err := waku_proto.GetPubSubTopicFromContentTopic(cTopic)
|
||||
if err != nil {
|
||||
pm.logger.Debug("selectPeer: failed to get contentTopic from pubsubTopic", zap.String("contentTopic", cTopic))
|
||||
return "", err
|
||||
}
|
||||
pubsubTopics = append(pubsubTopics, pubsubTopic)
|
||||
}
|
||||
return pm.SelectPeer(PeerSelectionCriteria{PubsubTopics: pubsubTopics, Proto: proto, SpecificPeers: specificPeers})
|
||||
}
|
||||
|
||||
// SelectRandomPeer is used to return a random peer that supports a given protocol.
|
||||
// If a list of specific peers is passed, the peer will be chosen from that list assuming
|
||||
// it supports the chosen protocol, otherwise it will chose a peer from the service slot.
|
||||
// If a peer cannot be found in the service slot, a peer will be selected from node peerstore
|
||||
// if pubSubTopic is specified, peer is selected from list that support the pubSubTopic
|
||||
func (pm *PeerManager) SelectRandomPeer(criteria PeerSelectionCriteria) (peer.ID, error) {
|
||||
// @TODO We need to be more strategic about which peers we dial. Right now we just set one on the service.
|
||||
// Ideally depending on the query and our set of peers we take a subset of ideal peers.
|
||||
// This will require us to check for various factors such as:
|
||||
// - which topics they track
|
||||
// - latency?
|
||||
|
||||
peerID, err := pm.selectServicePeer(criteria.Proto, criteria.PubsubTopics, criteria.Ctx, criteria.SpecificPeers...)
|
||||
if err == nil {
|
||||
return peerID, nil
|
||||
} else if !errors.Is(err, ErrNoPeersAvailable) {
|
||||
pm.logger.Debug("could not retrieve random peer from slot", zap.String("protocol", string(criteria.Proto)),
|
||||
zap.Strings("pubsubTopics", criteria.PubsubTopics), zap.Error(err))
|
||||
return "", err
|
||||
}
|
||||
|
||||
// if not found in serviceSlots or proto == WakuRelayIDv200
|
||||
filteredPeers, err := pm.FilterPeersByProto(criteria.SpecificPeers, criteria.Proto)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(criteria.PubsubTopics) > 0 {
|
||||
filteredPeers = pm.host.Peerstore().(wps.WakuPeerstore).PeersByPubSubTopics(criteria.PubsubTopics, filteredPeers...)
|
||||
}
|
||||
return selectRandomPeer(filteredPeers, pm.logger)
|
||||
}
|
||||
|
||||
func (pm *PeerManager) selectServicePeer(proto protocol.ID, pubsubTopics []string, ctx context.Context, specificPeers ...peer.ID) (peer.ID, error) {
|
||||
var peerID peer.ID
|
||||
var err error
|
||||
for retryCnt := 0; retryCnt < 1; retryCnt++ {
|
||||
//Try to fetch from serviceSlot
|
||||
if slot := pm.serviceSlots.getPeers(proto); slot != nil {
|
||||
if len(pubsubTopics) == 0 || (len(pubsubTopics) == 1 && pubsubTopics[0] == "") {
|
||||
return slot.getRandom()
|
||||
} else { //PubsubTopic based selection
|
||||
keys := make([]peer.ID, 0, len(slot.m))
|
||||
for i := range slot.m {
|
||||
keys = append(keys, i)
|
||||
}
|
||||
selectedPeers := pm.host.Peerstore().(wps.WakuPeerstore).PeersByPubSubTopics(pubsubTopics, keys...)
|
||||
peerID, err = selectRandomPeer(selectedPeers, pm.logger)
|
||||
if err == nil {
|
||||
return peerID, nil
|
||||
} else {
|
||||
pm.logger.Debug("discovering peers by pubsubTopic", zap.Strings("pubsubTopics", pubsubTopics))
|
||||
//Trigger on-demand discovery for this topic and connect to peer immediately.
|
||||
//For now discover atleast 1 peer for the criteria
|
||||
pm.discoverPeersByPubsubTopics(pubsubTopics, proto, ctx, 1)
|
||||
//Try to fetch peers again.
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if peerID == "" {
|
||||
pm.logger.Debug("could not retrieve random peer from slot", zap.Error(err))
|
||||
}
|
||||
return "", ErrNoPeersAvailable
|
||||
}
|
||||
|
||||
// PeerSelectionCriteria is the selection Criteria that is used by PeerManager to select peers.
|
||||
type PeerSelectionCriteria struct {
|
||||
SelectionType PeerSelection
|
||||
Proto protocol.ID
|
||||
PubsubTopics []string
|
||||
SpecificPeers peer.IDSlice
|
||||
Ctx context.Context
|
||||
}
|
||||
|
||||
// SelectPeer selects a peer based on selectionType specified.
|
||||
// Context is required only in case of selectionType set to LowestRTT
|
||||
func (pm *PeerManager) SelectPeer(criteria PeerSelectionCriteria) (peer.ID, error) {
|
||||
|
||||
switch criteria.SelectionType {
|
||||
case Automatic:
|
||||
return pm.SelectRandomPeer(criteria)
|
||||
case LowestRTT:
|
||||
return pm.SelectPeerWithLowestRTT(criteria)
|
||||
default:
|
||||
return "", errors.New("unknown peer selection type specified")
|
||||
}
|
||||
}
|
||||
|
||||
type pingResult struct {
|
||||
p peer.ID
|
||||
rtt time.Duration
|
||||
}
|
||||
|
||||
// SelectPeerWithLowestRTT will select a peer that supports a specific protocol with the lowest reply time
|
||||
// If a list of specific peers is passed, the peer will be chosen from that list assuming
|
||||
// it supports the chosen protocol, otherwise it will chose a peer from the node peerstore
|
||||
// TO OPTIMIZE: As of now the peer with lowest RTT is identified when select is called, this should be optimized
|
||||
// to maintain the RTT as part of peer-scoring and just select based on that.
|
||||
func (pm *PeerManager) SelectPeerWithLowestRTT(criteria PeerSelectionCriteria) (peer.ID, error) {
|
||||
var peers peer.IDSlice
|
||||
var err error
|
||||
if criteria.Ctx == nil {
|
||||
pm.logger.Warn("context is not passed for peerSelectionwithRTT, using background context")
|
||||
criteria.Ctx = context.Background()
|
||||
}
|
||||
|
||||
if len(criteria.PubsubTopics) == 0 || (len(criteria.PubsubTopics) == 1 && criteria.PubsubTopics[0] == "") {
|
||||
peers = pm.host.Peerstore().(wps.WakuPeerstore).PeersByPubSubTopics(criteria.PubsubTopics, criteria.SpecificPeers...)
|
||||
}
|
||||
|
||||
peers, err = pm.FilterPeersByProto(peers, criteria.Proto)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
wg := sync.WaitGroup{}
|
||||
waitCh := make(chan struct{})
|
||||
pingCh := make(chan pingResult, 1000)
|
||||
|
||||
wg.Add(len(peers))
|
||||
|
||||
go func() {
|
||||
for _, p := range peers {
|
||||
go func(p peer.ID) {
|
||||
defer wg.Done()
|
||||
ctx, cancel := context.WithTimeout(criteria.Ctx, 3*time.Second)
|
||||
defer cancel()
|
||||
result := <-ping.Ping(ctx, pm.host, p)
|
||||
if result.Error == nil {
|
||||
pingCh <- pingResult{
|
||||
p: p,
|
||||
rtt: result.RTT,
|
||||
}
|
||||
} else {
|
||||
pm.logger.Debug("could not ping", logging.HostID("peer", p), zap.Error(result.Error))
|
||||
}
|
||||
}(p)
|
||||
}
|
||||
wg.Wait()
|
||||
close(waitCh)
|
||||
close(pingCh)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-waitCh:
|
||||
var min *pingResult
|
||||
for p := range pingCh {
|
||||
if min == nil {
|
||||
min = &p
|
||||
} else {
|
||||
if p.rtt < min.rtt {
|
||||
min = &p
|
||||
}
|
||||
}
|
||||
}
|
||||
if min == nil {
|
||||
return "", ErrNoPeersAvailable
|
||||
}
|
||||
|
||||
return min.p, nil
|
||||
case <-criteria.Ctx.Done():
|
||||
return "", ErrNoPeersAvailable
|
||||
}
|
||||
}
|
||||
|
||||
// selectRandomPeer selects randomly a peer from the list of peers passed.
|
||||
func selectRandomPeer(peers peer.IDSlice, log *zap.Logger) (peer.ID, error) {
|
||||
if len(peers) >= 1 {
|
||||
peerID := peers[rand.Intn(len(peers))]
|
||||
// TODO: proper heuristic here that compares peer scores and selects "best" one. For now a random peer for the given protocol is returned
|
||||
return peerID, nil // nolint: gosec
|
||||
}
|
||||
|
||||
return "", ErrNoPeersAvailable
|
||||
}
|
||||
|
||||
// FilterPeersByProto filters list of peers that support specified protocols.
|
||||
// If specificPeers is nil, all peers in the host's peerStore are considered for filtering.
|
||||
func (pm *PeerManager) FilterPeersByProto(specificPeers peer.IDSlice, proto ...protocol.ID) (peer.IDSlice, error) {
|
||||
peerSet := specificPeers
|
||||
if len(peerSet) == 0 {
|
||||
peerSet = pm.host.Peerstore().Peers()
|
||||
}
|
||||
|
||||
var peers peer.IDSlice
|
||||
for _, peer := range peerSet {
|
||||
protocols, err := pm.host.Peerstore().SupportsProtocols(peer, proto...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(protocols) > 0 {
|
||||
peers = append(peers, peer)
|
||||
}
|
||||
}
|
||||
return peers, nil
|
||||
}
|
|
@ -59,6 +59,7 @@ type WakuPeerstore interface {
|
|||
RemovePubSubTopic(p peer.ID, topic string) error
|
||||
PubSubTopics(p peer.ID) ([]string, error)
|
||||
SetPubSubTopics(p peer.ID, topics []string) error
|
||||
PeersByPubSubTopics(pubSubTopics []string, specificPeers ...peer.ID) peer.IDSlice
|
||||
PeersByPubSubTopic(pubSubTopic string, specificPeers ...peer.ID) peer.IDSlice
|
||||
}
|
||||
|
||||
|
@ -207,7 +208,38 @@ func (ps *WakuPeerstoreImpl) PubSubTopics(p peer.ID) ([]string, error) {
|
|||
return result.([]string), nil
|
||||
}
|
||||
|
||||
// PeersByPubSubTopic Returns list of peers by pubSubTopic
|
||||
// PeersByPubSubTopic Returns list of peers that support list of pubSubTopics
|
||||
// If specifiPeers are listed, filtering is done from them otherwise from all peers in peerstore
|
||||
func (ps *WakuPeerstoreImpl) PeersByPubSubTopics(pubSubTopics []string, specificPeers ...peer.ID) peer.IDSlice {
|
||||
if specificPeers == nil {
|
||||
specificPeers = ps.Peers()
|
||||
}
|
||||
var result peer.IDSlice
|
||||
for _, p := range specificPeers {
|
||||
topics, err := ps.PubSubTopics(p)
|
||||
if err == nil {
|
||||
//Convoluted and crazy logic to find subset of topics
|
||||
// Could not find a better way to do it?
|
||||
peerTopicMap := make(map[string]struct{})
|
||||
match := true
|
||||
for _, topic := range topics {
|
||||
peerTopicMap[topic] = struct{}{}
|
||||
}
|
||||
for _, topic := range pubSubTopics {
|
||||
if _, ok := peerTopicMap[topic]; !ok {
|
||||
match = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if match {
|
||||
result = append(result, p)
|
||||
}
|
||||
} //Note: skipping a peer in case of an error as there would be others available.
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// PeersByPubSubTopic Returns list of peers that support a single pubSubTopic
|
||||
// If specifiPeers are listed, filtering is done from them otherwise from all peers in peerstore
|
||||
func (ps *WakuPeerstoreImpl) PeersByPubSubTopic(pubSubTopic string, specificPeers ...peer.ID) peer.IDSlice {
|
||||
if specificPeers == nil {
|
||||
|
|
|
@ -15,6 +15,10 @@ func NewContentTopicSet(contentTopics ...string) ContentTopicSet {
|
|||
return s
|
||||
}
|
||||
|
||||
func (cf ContentTopicSet) ToList() []string {
|
||||
return maps.Keys(cf)
|
||||
}
|
||||
|
||||
// ContentFilter is used to specify the filter to be applied for a FilterNode.
|
||||
// Topic means pubSubTopic - optional in case of using contentTopics that following Auto sharding, mandatory in case of named or static sharding.
|
||||
// ContentTopics - Specify list of content topics to be filtered under a pubSubTopic (for named and static sharding), or a list of contentTopics (in case ofAuto sharding)
|
||||
|
@ -25,7 +29,7 @@ type ContentFilter struct {
|
|||
}
|
||||
|
||||
func (cf ContentFilter) ContentTopicsList() []string {
|
||||
return maps.Keys(cf.ContentTopics)
|
||||
return cf.ContentTopics.ToList()
|
||||
}
|
||||
|
||||
func NewContentFilter(pubsubTopic string, contentTopics ...string) ContentFilter {
|
||||
|
@ -48,23 +52,5 @@ func (cf ContentFilter) Equals(cf1 ContentFilter) bool {
|
|||
|
||||
// This function converts a contentFilter into a map of pubSubTopics and corresponding contentTopics
|
||||
func ContentFilterToPubSubTopicMap(contentFilter ContentFilter) (map[PubsubTopicStr][]ContentTopicStr, error) {
|
||||
pubSubTopicMap := make(map[string][]string)
|
||||
|
||||
if contentFilter.PubsubTopic != "" {
|
||||
pubSubTopicMap[contentFilter.PubsubTopic] = contentFilter.ContentTopicsList()
|
||||
} else {
|
||||
//Parse the content-Topics to figure out shards.
|
||||
for _, cTopicString := range contentFilter.ContentTopicsList() {
|
||||
pTopicStr, err := GetPubSubTopicFromContentTopic(cTopicString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, ok := pubSubTopicMap[pTopicStr]
|
||||
if !ok {
|
||||
pubSubTopicMap[pTopicStr] = []string{}
|
||||
}
|
||||
pubSubTopicMap[pTopicStr] = append(pubSubTopicMap[pTopicStr], cTopicString)
|
||||
}
|
||||
}
|
||||
return pubSubTopicMap, nil
|
||||
return GeneratePubsubToContentTopicMap(contentFilter.PubsubTopic, contentFilter.ContentTopicsList())
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@ var ErrInvalidGeneration = errors.New("generation should be a number")
|
|||
type ContentTopic struct {
|
||||
ContentTopicParams
|
||||
ApplicationName string
|
||||
ApplicationVersion uint32
|
||||
ApplicationVersion string
|
||||
ContentTopicName string
|
||||
Encoding string
|
||||
}
|
||||
|
@ -35,12 +35,13 @@ type ContentTopicOption func(*ContentTopicParams)
|
|||
|
||||
// String formats a content topic in string format as per RFC 23.
|
||||
func (ct ContentTopic) String() string {
|
||||
return fmt.Sprintf("/%s/%d/%s/%s", ct.ApplicationName, ct.ApplicationVersion, ct.ContentTopicName, ct.Encoding)
|
||||
return fmt.Sprintf("/%s/%s/%s/%s", ct.ApplicationName, ct.ApplicationVersion, ct.ContentTopicName, ct.Encoding)
|
||||
}
|
||||
|
||||
// NewContentTopic creates a new content topic based on params specified.
|
||||
// Returns ErrInvalidGeneration if an unsupported generation is specified.
|
||||
func NewContentTopic(applicationName string, applicationVersion uint32,
|
||||
// Note that this is recommended to be used for autosharding where contentTopic format is enforced as per https://rfc.vac.dev/spec/51/#content-topics-format-for-autosharding
|
||||
func NewContentTopic(applicationName string, applicationVersion string,
|
||||
contentTopicName string, encoding string, opts ...ContentTopicOption) (ContentTopic, error) {
|
||||
|
||||
params := new(ContentTopicParams)
|
||||
|
@ -83,18 +84,19 @@ func (ct ContentTopic) Equal(ct2 ContentTopic) bool {
|
|||
}
|
||||
|
||||
// StringToContentTopic can be used to create a ContentTopic object from a string
|
||||
// Note that this has to be used only when following the rfc format of contentTopic, which is currently validated only for Autosharding.
|
||||
// For static and named-sharding, contentTopic can be of any format and hence it is not recommended to use this function.
|
||||
// This can be updated if required to handle such a case.
|
||||
func StringToContentTopic(s string) (ContentTopic, error) {
|
||||
p := strings.Split(s, "/")
|
||||
switch len(p) {
|
||||
case 5:
|
||||
vNum, err := strconv.ParseUint(p[2], 10, 32)
|
||||
if err != nil {
|
||||
if len(p[1]) == 0 || len(p[2]) == 0 || len(p[3]) == 0 || len(p[4]) == 0 {
|
||||
return ContentTopic{}, ErrInvalidFormat
|
||||
}
|
||||
|
||||
return ContentTopic{
|
||||
ApplicationName: p[1],
|
||||
ApplicationVersion: uint32(vNum),
|
||||
ApplicationVersion: p[2],
|
||||
ContentTopicName: p[3],
|
||||
Encoding: p[4],
|
||||
}, nil
|
||||
|
@ -106,15 +108,13 @@ func StringToContentTopic(s string) (ContentTopic, error) {
|
|||
if err != nil || generation > 0 {
|
||||
return ContentTopic{}, ErrInvalidGeneration
|
||||
}
|
||||
vNum, err := strconv.ParseUint(p[3], 10, 32)
|
||||
if err != nil {
|
||||
if len(p[2]) == 0 || len(p[3]) == 0 || len(p[4]) == 0 || len(p[5]) == 0 {
|
||||
return ContentTopic{}, ErrInvalidFormat
|
||||
}
|
||||
|
||||
return ContentTopic{
|
||||
ContentTopicParams: ContentTopicParams{Generation: generation},
|
||||
ApplicationName: p[2],
|
||||
ApplicationVersion: uint32(vNum),
|
||||
ApplicationVersion: p[3],
|
||||
ContentTopicName: p[4],
|
||||
Encoding: p[5],
|
||||
}, nil
|
||||
|
|
|
@ -146,6 +146,8 @@ func EnodeToPeerInfo(node *enode.Node) (*peer.AddrInfo, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(res) == 0 {
|
||||
return nil, errors.New("could not retrieve peer addresses from enr")
|
||||
}
|
||||
return &res[0], nil
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ func NewEnvelope(msg *wpb.WakuMessage, receiverTime int64, pubSubTopic string) *
|
|||
index: &pb.Index{
|
||||
Digest: digest[:],
|
||||
ReceiverTime: receiverTime,
|
||||
SenderTime: msg.Timestamp,
|
||||
SenderTime: msg.GetTimestamp(),
|
||||
PubsubTopic: pubSubTopic,
|
||||
},
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"math"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
|
@ -17,13 +18,16 @@ import (
|
|||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/waku-org/go-waku/logging"
|
||||
"github.com/waku-org/go-waku/waku/v2/peermanager"
|
||||
"github.com/waku-org/go-waku/waku/v2/peerstore"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/filter/pb"
|
||||
wpb "github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/subscription"
|
||||
"github.com/waku-org/go-waku/waku/v2/service"
|
||||
"github.com/waku-org/go-waku/waku/v2/timesource"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
|
@ -32,11 +36,12 @@ import (
|
|||
const FilterPushID_v20beta1 = libp2pProtocol.ID("/vac/waku/filter-push/2.0.0-beta1")
|
||||
|
||||
var (
|
||||
ErrNoPeersAvailable = errors.New("no suitable remote peers")
|
||||
ErrNoPeersAvailable = errors.New("no suitable remote peers")
|
||||
ErrSubscriptionNotFound = errors.New("subscription not found")
|
||||
)
|
||||
|
||||
type WakuFilterLightNode struct {
|
||||
*protocol.CommonService
|
||||
*service.CommonService
|
||||
h host.Host
|
||||
broadcaster relay.Broadcaster //TODO: Move the broadcast functionality outside of relay client to a higher SDK layer.s
|
||||
timesource timesource.Timesource
|
||||
|
@ -46,11 +51,27 @@ type WakuFilterLightNode struct {
|
|||
pm *peermanager.PeerManager
|
||||
}
|
||||
|
||||
type WakuFilterPushResult struct {
|
||||
type WakuFilterPushError struct {
|
||||
Err error
|
||||
PeerID peer.ID
|
||||
}
|
||||
|
||||
type WakuFilterPushResult struct {
|
||||
errs []WakuFilterPushError
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
func (arr *WakuFilterPushResult) Add(err WakuFilterPushError) {
|
||||
arr.Lock()
|
||||
defer arr.Unlock()
|
||||
arr.errs = append(arr.errs, err)
|
||||
}
|
||||
func (arr *WakuFilterPushResult) Errors() []WakuFilterPushError {
|
||||
arr.RLock()
|
||||
defer arr.RUnlock()
|
||||
return arr.errs
|
||||
}
|
||||
|
||||
// NewWakuFilterLightnode returns a new instance of Waku Filter struct setup according to the chosen parameter and options
|
||||
// Note that broadcaster is optional.
|
||||
// Takes an optional peermanager if WakuFilterLightnode is being created along with WakuNode.
|
||||
|
@ -62,7 +83,7 @@ func NewWakuFilterLightNode(broadcaster relay.Broadcaster, pm *peermanager.PeerM
|
|||
wf.broadcaster = broadcaster
|
||||
wf.timesource = timesource
|
||||
wf.pm = pm
|
||||
wf.CommonService = protocol.NewCommonService()
|
||||
wf.CommonService = service.NewCommonService()
|
||||
wf.metrics = newMetrics(reg)
|
||||
|
||||
return wf
|
||||
|
@ -90,19 +111,21 @@ func (wf *WakuFilterLightNode) start() error {
|
|||
func (wf *WakuFilterLightNode) Stop() {
|
||||
wf.CommonService.Stop(func() {
|
||||
wf.h.RemoveStreamHandler(FilterPushID_v20beta1)
|
||||
res, err := wf.unsubscribeAll(wf.Context())
|
||||
if err != nil {
|
||||
wf.log.Warn("unsubscribing from full nodes", zap.Error(err))
|
||||
}
|
||||
|
||||
for r := range res {
|
||||
if r.Err != nil {
|
||||
wf.log.Warn("unsubscribing from full nodes", zap.Error(r.Err), logging.HostID("peerID", r.PeerID))
|
||||
if wf.subscriptions.Count() > 0 {
|
||||
res, err := wf.unsubscribeAll(wf.Context())
|
||||
if err != nil {
|
||||
wf.log.Warn("unsubscribing from full nodes", zap.Error(err))
|
||||
}
|
||||
|
||||
for _, r := range res.Errors() {
|
||||
if r.Err != nil {
|
||||
wf.log.Warn("unsubscribing from full nodes", zap.Error(r.Err), logging.HostID("peerID", r.PeerID))
|
||||
}
|
||||
|
||||
}
|
||||
//
|
||||
wf.subscriptions.Clear()
|
||||
}
|
||||
//
|
||||
wf.subscriptions.Clear()
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -121,7 +144,7 @@ func (wf *WakuFilterLightNode) onRequest(ctx context.Context) func(network.Strea
|
|||
|
||||
reader := pbio.NewDelimitedReader(stream, math.MaxInt32)
|
||||
|
||||
messagePush := &pb.MessagePushV2{}
|
||||
messagePush := &pb.MessagePush{}
|
||||
err := reader.ReadMsg(messagePush)
|
||||
if err != nil {
|
||||
logger.Error("reading message push", zap.Error(err))
|
||||
|
@ -243,13 +266,63 @@ func (wf *WakuFilterLightNode) request(ctx context.Context, params *FilterSubscr
|
|||
|
||||
if filterSubscribeResponse.StatusCode != http.StatusOK {
|
||||
wf.metrics.RecordError(errorResponse)
|
||||
err := NewFilterError(int(filterSubscribeResponse.StatusCode), filterSubscribeResponse.StatusDesc)
|
||||
errMessage := ""
|
||||
if filterSubscribeResponse.StatusDesc != nil {
|
||||
errMessage = *filterSubscribeResponse.StatusDesc
|
||||
}
|
||||
err := NewFilterError(int(filterSubscribeResponse.StatusCode), errMessage)
|
||||
return &err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (wf *WakuFilterLightNode) handleFilterSubscribeOptions(ctx context.Context, contentFilter protocol.ContentFilter, opts []FilterSubscribeOption) (*FilterSubscribeParameters, map[string][]string, error) {
|
||||
params := new(FilterSubscribeParameters)
|
||||
params.log = wf.log
|
||||
params.host = wf.h
|
||||
params.pm = wf.pm
|
||||
|
||||
optList := DefaultSubscriptionOptions()
|
||||
optList = append(optList, opts...)
|
||||
for _, opt := range optList {
|
||||
err := opt(params)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
pubSubTopicMap, err := protocol.ContentFilterToPubSubTopicMap(contentFilter)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
//Add Peer to peerstore.
|
||||
if params.pm != nil && params.peerAddr != nil {
|
||||
pData, err := wf.pm.AddPeer(params.peerAddr, peerstore.Static, maps.Keys(pubSubTopicMap), FilterSubscribeID_v20beta1)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
wf.pm.Connect(pData)
|
||||
params.selectedPeer = pData.AddrInfo.ID
|
||||
}
|
||||
if params.pm != nil && params.selectedPeer == "" {
|
||||
params.selectedPeer, err = wf.pm.SelectPeer(
|
||||
peermanager.PeerSelectionCriteria{
|
||||
SelectionType: params.peerSelectionType,
|
||||
Proto: FilterSubscribeID_v20beta1,
|
||||
PubsubTopics: maps.Keys(pubSubTopicMap),
|
||||
SpecificPeers: params.preferredPeers,
|
||||
Ctx: ctx,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
return params, pubSubTopicMap, nil
|
||||
}
|
||||
|
||||
// Subscribe setups a subscription to receive messages that match a specific content filter
|
||||
// If contentTopics passed result in different pubSub topics (due to Auto/Static sharding), then multiple subscription requests are sent to the peer.
|
||||
// This may change if Filterv2 protocol is updated to handle such a scenario in a single request.
|
||||
|
@ -261,36 +334,21 @@ func (wf *WakuFilterLightNode) Subscribe(ctx context.Context, contentFilter prot
|
|||
return nil, err
|
||||
}
|
||||
|
||||
params := new(FilterSubscribeParameters)
|
||||
params.log = wf.log
|
||||
params.host = wf.h
|
||||
params.pm = wf.pm
|
||||
|
||||
optList := DefaultSubscriptionOptions()
|
||||
optList = append(optList, opts...)
|
||||
for _, opt := range optList {
|
||||
err := opt(params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
pubSubTopicMap, err := protocol.ContentFilterToPubSubTopicMap(contentFilter)
|
||||
|
||||
params, pubSubTopicMap, err := wf.handleFilterSubscribeOptions(ctx, contentFilter, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
failedContentTopics := []string{}
|
||||
subscriptions := make([]*subscription.SubscriptionDetails, 0)
|
||||
for pubSubTopic, cTopics := range pubSubTopicMap {
|
||||
var selectedPeer peer.ID
|
||||
//TO Optimize: find a peer with all pubSubTopics in the list if possible, if not only then look for single pubSubTopic
|
||||
if params.pm != nil && params.selectedPeer == "" {
|
||||
selectedPeer, err = wf.pm.SelectPeer(
|
||||
peermanager.PeerSelectionCriteria{
|
||||
SelectionType: params.peerSelectionType,
|
||||
Proto: FilterSubscribeID_v20beta1,
|
||||
PubsubTopic: pubSubTopic,
|
||||
PubsubTopics: []string{pubSubTopic},
|
||||
SpecificPeers: params.preferredPeers,
|
||||
Ctx: ctx,
|
||||
},
|
||||
|
@ -395,59 +453,8 @@ func (wf *WakuFilterLightNode) IsSubscriptionAlive(ctx context.Context, subscrip
|
|||
return wf.Ping(ctx, subscription.PeerID)
|
||||
}
|
||||
|
||||
func (wf *WakuFilterLightNode) Subscriptions() []*subscription.SubscriptionDetails {
|
||||
wf.RLock()
|
||||
defer wf.RUnlock()
|
||||
if err := wf.ErrOnNotRunning(); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
wf.subscriptions.RLock()
|
||||
defer wf.subscriptions.RUnlock()
|
||||
|
||||
var output []*subscription.SubscriptionDetails
|
||||
|
||||
for _, peerSubscription := range wf.subscriptions.Items {
|
||||
for _, subscriptions := range peerSubscription.SubsPerPubsubTopic {
|
||||
for _, subscriptionDetail := range subscriptions {
|
||||
output = append(output, subscriptionDetail)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return output
|
||||
}
|
||||
|
||||
func (wf *WakuFilterLightNode) cleanupSubscriptions(peerID peer.ID, contentFilter protocol.ContentFilter) {
|
||||
wf.subscriptions.Lock()
|
||||
defer wf.subscriptions.Unlock()
|
||||
|
||||
peerSubscription, ok := wf.subscriptions.Items[peerID]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
subscriptionDetailList, ok := peerSubscription.SubsPerPubsubTopic[contentFilter.PubsubTopic]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
for subscriptionDetailID, subscriptionDetail := range subscriptionDetailList {
|
||||
subscriptionDetail.Remove(contentFilter.ContentTopicsList()...)
|
||||
if len(subscriptionDetail.ContentFilter.ContentTopics) == 0 {
|
||||
delete(subscriptionDetailList, subscriptionDetailID)
|
||||
subscriptionDetail.CloseC()
|
||||
}
|
||||
}
|
||||
|
||||
if len(subscriptionDetailList) == 0 {
|
||||
delete(wf.subscriptions.Items[peerID].SubsPerPubsubTopic, contentFilter.PubsubTopic)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Unsubscribe is used to stop receiving messages from a peer that match a content filter
|
||||
func (wf *WakuFilterLightNode) Unsubscribe(ctx context.Context, contentFilter protocol.ContentFilter, opts ...FilterSubscribeOption) (<-chan WakuFilterPushResult, error) {
|
||||
func (wf *WakuFilterLightNode) Unsubscribe(ctx context.Context, contentFilter protocol.ContentFilter, opts ...FilterSubscribeOption) (*WakuFilterPushResult, error) {
|
||||
wf.RLock()
|
||||
defer wf.RUnlock()
|
||||
if err := wf.ErrOnNotRunning(); err != nil {
|
||||
|
@ -475,28 +482,28 @@ func (wf *WakuFilterLightNode) Unsubscribe(ctx context.Context, contentFilter pr
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resultChan := make(chan WakuFilterPushResult, len(wf.subscriptions.Items))
|
||||
result := &WakuFilterPushResult{}
|
||||
for pTopic, cTopics := range pubSubTopicMap {
|
||||
cFilter := protocol.NewContentFilter(pTopic, cTopics...)
|
||||
for peerID := range wf.subscriptions.Items {
|
||||
if params.selectedPeer != "" && peerID != params.selectedPeer {
|
||||
continue
|
||||
}
|
||||
|
||||
subscriptions, ok := wf.subscriptions.Items[peerID]
|
||||
if !ok || subscriptions == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
wf.cleanupSubscriptions(peerID, cFilter)
|
||||
if len(subscriptions.SubsPerPubsubTopic) == 0 {
|
||||
delete(wf.subscriptions.Items, peerID)
|
||||
}
|
||||
|
||||
if params.wg != nil {
|
||||
params.wg.Add(1)
|
||||
}
|
||||
|
||||
peers := make(map[peer.ID]struct{})
|
||||
subs := wf.subscriptions.GetSubscription(params.selectedPeer, cFilter)
|
||||
if len(subs) == 0 {
|
||||
result.Add(WakuFilterPushError{
|
||||
Err: ErrSubscriptionNotFound,
|
||||
PeerID: params.selectedPeer,
|
||||
})
|
||||
continue
|
||||
}
|
||||
for _, sub := range subs {
|
||||
sub.Remove(cTopics...)
|
||||
peers[sub.PeerID] = struct{}{}
|
||||
}
|
||||
if params.wg != nil {
|
||||
params.wg.Add(len(peers))
|
||||
}
|
||||
// send unsubscribe request to all the peers
|
||||
for peerID := range peers {
|
||||
go func(peerID peer.ID) {
|
||||
defer func() {
|
||||
if params.wg != nil {
|
||||
|
@ -506,10 +513,10 @@ func (wf *WakuFilterLightNode) Unsubscribe(ctx context.Context, contentFilter pr
|
|||
err := wf.unsubscribeFromServer(ctx, &FilterSubscribeParameters{selectedPeer: peerID, requestID: params.requestID}, cFilter)
|
||||
|
||||
if params.wg != nil {
|
||||
resultChan <- WakuFilterPushResult{
|
||||
result.Add(WakuFilterPushError{
|
||||
Err: err,
|
||||
PeerID: peerID,
|
||||
}
|
||||
})
|
||||
}
|
||||
}(peerID)
|
||||
}
|
||||
|
@ -518,16 +525,24 @@ func (wf *WakuFilterLightNode) Unsubscribe(ctx context.Context, contentFilter pr
|
|||
params.wg.Wait()
|
||||
}
|
||||
|
||||
close(resultChan)
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (wf *WakuFilterLightNode) Subscriptions() []*subscription.SubscriptionDetails {
|
||||
subs := wf.subscriptions.GetSubscription("", protocol.ContentFilter{})
|
||||
return subs
|
||||
}
|
||||
|
||||
func (wf *WakuFilterLightNode) IsListening(pubsubTopic, contentTopic string) bool {
|
||||
return wf.subscriptions.IsListening(pubsubTopic, contentTopic)
|
||||
|
||||
return resultChan, nil
|
||||
}
|
||||
|
||||
// UnsubscribeWithSubscription is used to close a particular subscription
|
||||
// If there are no more subscriptions matching the passed [peer, contentFilter] pair,
|
||||
// server unsubscribe is also performed
|
||||
func (wf *WakuFilterLightNode) UnsubscribeWithSubscription(ctx context.Context, sub *subscription.SubscriptionDetails,
|
||||
opts ...FilterSubscribeOption) (<-chan WakuFilterPushResult, error) {
|
||||
opts ...FilterSubscribeOption) (*WakuFilterPushResult, error) {
|
||||
wf.RLock()
|
||||
defer wf.RUnlock()
|
||||
if err := wf.ErrOnNotRunning(); err != nil {
|
||||
|
@ -542,20 +557,18 @@ func (wf *WakuFilterLightNode) UnsubscribeWithSubscription(ctx context.Context,
|
|||
// Close this sub
|
||||
sub.Close()
|
||||
|
||||
resultChan := make(chan WakuFilterPushResult, 1)
|
||||
result := &WakuFilterPushResult{}
|
||||
|
||||
if !wf.subscriptions.Has(sub.PeerID, sub.ContentFilter) {
|
||||
// Last sub for this [peer, contentFilter] pair
|
||||
paramsCopy := params.Copy()
|
||||
paramsCopy.selectedPeer = sub.PeerID
|
||||
err = wf.unsubscribeFromServer(ctx, paramsCopy, sub.ContentFilter)
|
||||
resultChan <- WakuFilterPushResult{
|
||||
params.selectedPeer = sub.PeerID
|
||||
err = wf.unsubscribeFromServer(ctx, params, sub.ContentFilter)
|
||||
result.Add(WakuFilterPushError{
|
||||
Err: err,
|
||||
PeerID: sub.PeerID,
|
||||
}
|
||||
})
|
||||
}
|
||||
close(resultChan)
|
||||
return resultChan, err
|
||||
return result, err
|
||||
|
||||
}
|
||||
|
||||
|
@ -573,28 +586,32 @@ func (wf *WakuFilterLightNode) unsubscribeFromServer(ctx context.Context, params
|
|||
return err
|
||||
}
|
||||
|
||||
func (wf *WakuFilterLightNode) unsubscribeAll(ctx context.Context, opts ...FilterSubscribeOption) (<-chan WakuFilterPushResult, error) {
|
||||
// close all subscribe for selectedPeer or if selectedPeer == "", then all peers
|
||||
// send the unsubscribeAll request to the peers
|
||||
func (wf *WakuFilterLightNode) unsubscribeAll(ctx context.Context, opts ...FilterSubscribeOption) (*WakuFilterPushResult, error) {
|
||||
params, err := wf.getUnsubscribeParameters(opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := &WakuFilterPushResult{}
|
||||
|
||||
wf.subscriptions.Lock()
|
||||
defer wf.subscriptions.Unlock()
|
||||
|
||||
resultChan := make(chan WakuFilterPushResult, len(wf.subscriptions.Items))
|
||||
|
||||
for peerID := range wf.subscriptions.Items {
|
||||
if params.selectedPeer != "" && peerID != params.selectedPeer {
|
||||
continue
|
||||
}
|
||||
|
||||
delete(wf.subscriptions.Items, peerID)
|
||||
|
||||
if params.wg != nil {
|
||||
params.wg.Add(1)
|
||||
}
|
||||
|
||||
peers := make(map[peer.ID]struct{})
|
||||
subs := wf.subscriptions.GetSubscription(params.selectedPeer, protocol.ContentFilter{})
|
||||
if len(subs) == 0 && params.selectedPeer != "" {
|
||||
result.Add(WakuFilterPushError{
|
||||
Err: err,
|
||||
PeerID: params.selectedPeer,
|
||||
})
|
||||
return result, ErrSubscriptionNotFound
|
||||
}
|
||||
for _, sub := range subs {
|
||||
sub.Close()
|
||||
peers[sub.PeerID] = struct{}{}
|
||||
}
|
||||
if params.wg != nil {
|
||||
params.wg.Add(len(peers))
|
||||
}
|
||||
for peerId := range peers {
|
||||
go func(peerID peer.ID) {
|
||||
defer func() {
|
||||
if params.wg != nil {
|
||||
|
@ -613,25 +630,23 @@ func (wf *WakuFilterLightNode) unsubscribeAll(ctx context.Context, opts ...Filte
|
|||
wf.log.Error("could not unsubscribe from peer", logging.HostID("peerID", peerID), zap.Error(err))
|
||||
}
|
||||
if params.wg != nil {
|
||||
resultChan <- WakuFilterPushResult{
|
||||
result.Add(WakuFilterPushError{
|
||||
Err: err,
|
||||
PeerID: peerID,
|
||||
}
|
||||
})
|
||||
}
|
||||
}(peerID)
|
||||
}(peerId)
|
||||
}
|
||||
|
||||
if params.wg != nil {
|
||||
params.wg.Wait()
|
||||
}
|
||||
|
||||
close(resultChan)
|
||||
|
||||
return resultChan, nil
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// UnsubscribeAll is used to stop receiving messages from peer(s). It does not close subscriptions
|
||||
func (wf *WakuFilterLightNode) UnsubscribeAll(ctx context.Context, opts ...FilterSubscribeOption) (<-chan WakuFilterPushResult, error) {
|
||||
func (wf *WakuFilterLightNode) UnsubscribeAll(ctx context.Context, opts ...FilterSubscribeOption) (*WakuFilterPushResult, error) {
|
||||
wf.RLock()
|
||||
defer wf.RUnlock()
|
||||
if err := wf.ErrOnNotRunning(); err != nil {
|
||||
|
|
|
@ -1,11 +1,13 @@
|
|||
package filter
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/waku-org/go-waku/waku/v2/peermanager"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"go.uber.org/zap"
|
||||
|
@ -34,6 +36,7 @@ func WithPingRequestId(requestId []byte) FilterPingOption {
|
|||
type (
|
||||
FilterSubscribeParameters struct {
|
||||
selectedPeer peer.ID
|
||||
peerAddr multiaddr.Multiaddr
|
||||
peerSelectionType peermanager.PeerSelection
|
||||
preferredPeers peer.IDSlice
|
||||
requestID []byte
|
||||
|
@ -51,6 +54,7 @@ type (
|
|||
FilterParameters struct {
|
||||
Timeout time.Duration
|
||||
MaxSubscribers int
|
||||
pm *peermanager.PeerManager
|
||||
}
|
||||
|
||||
Option func(*FilterParameters)
|
||||
|
@ -64,9 +68,27 @@ func WithTimeout(timeout time.Duration) Option {
|
|||
}
|
||||
}
|
||||
|
||||
// WithPeer is an option used to specify the peerID to request the message history.
|
||||
// Note that this option is mutually exclusive to WithPeerAddr, only one of them can be used.
|
||||
func WithPeer(p peer.ID) FilterSubscribeOption {
|
||||
return func(params *FilterSubscribeParameters) error {
|
||||
params.selectedPeer = p
|
||||
if params.peerAddr != nil {
|
||||
return errors.New("peerAddr and peerId options are mutually exclusive")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithPeerAddr is an option used to specify a peerAddress.
|
||||
// This new peer will be added to peerStore.
|
||||
// Note that this option is mutually exclusive to WithPeerAddr, only one of them can be used.
|
||||
func WithPeerAddr(pAddr multiaddr.Multiaddr) FilterSubscribeOption {
|
||||
return func(params *FilterSubscribeParameters) error {
|
||||
params.peerAddr = pAddr
|
||||
if params.selectedPeer != "" {
|
||||
return errors.New("peerAddr and peerId options are mutually exclusive")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@ -156,6 +178,12 @@ func WithMaxSubscribers(maxSubscribers int) Option {
|
|||
}
|
||||
}
|
||||
|
||||
func WithPeerManager(pm *peermanager.PeerManager) Option {
|
||||
return func(params *FilterParameters) {
|
||||
params.pm = pm
|
||||
}
|
||||
}
|
||||
|
||||
func DefaultOptions() []Option {
|
||||
return []Option{
|
||||
WithTimeout(24 * time.Hour),
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.31.0
|
||||
// protoc v4.23.4
|
||||
// source: waku_filter_v2.proto
|
||||
// protoc v4.24.4
|
||||
// source: filter.proto
|
||||
|
||||
// 12/WAKU2-FILTER rfc: https://rfc.vac.dev/spec/12/
|
||||
|
||||
|
@ -59,11 +59,11 @@ func (x FilterSubscribeRequest_FilterSubscribeType) String() string {
|
|||
}
|
||||
|
||||
func (FilterSubscribeRequest_FilterSubscribeType) Descriptor() protoreflect.EnumDescriptor {
|
||||
return file_waku_filter_v2_proto_enumTypes[0].Descriptor()
|
||||
return file_filter_proto_enumTypes[0].Descriptor()
|
||||
}
|
||||
|
||||
func (FilterSubscribeRequest_FilterSubscribeType) Type() protoreflect.EnumType {
|
||||
return &file_waku_filter_v2_proto_enumTypes[0]
|
||||
return &file_filter_proto_enumTypes[0]
|
||||
}
|
||||
|
||||
func (x FilterSubscribeRequest_FilterSubscribeType) Number() protoreflect.EnumNumber {
|
||||
|
@ -72,7 +72,7 @@ func (x FilterSubscribeRequest_FilterSubscribeType) Number() protoreflect.EnumNu
|
|||
|
||||
// Deprecated: Use FilterSubscribeRequest_FilterSubscribeType.Descriptor instead.
|
||||
func (FilterSubscribeRequest_FilterSubscribeType) EnumDescriptor() ([]byte, []int) {
|
||||
return file_waku_filter_v2_proto_rawDescGZIP(), []int{0, 0}
|
||||
return file_filter_proto_rawDescGZIP(), []int{0, 0}
|
||||
}
|
||||
|
||||
// Protocol identifier: /vac/waku/filter-subscribe/2.0.0-beta1
|
||||
|
@ -82,7 +82,7 @@ type FilterSubscribeRequest struct {
|
|||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
|
||||
FilterSubscribeType FilterSubscribeRequest_FilterSubscribeType `protobuf:"varint,2,opt,name=filter_subscribe_type,json=filterSubscribeType,proto3,enum=pb.FilterSubscribeRequest_FilterSubscribeType" json:"filter_subscribe_type,omitempty"`
|
||||
FilterSubscribeType FilterSubscribeRequest_FilterSubscribeType `protobuf:"varint,2,opt,name=filter_subscribe_type,json=filterSubscribeType,proto3,enum=waku.filter.v2.FilterSubscribeRequest_FilterSubscribeType" json:"filter_subscribe_type,omitempty"`
|
||||
// Filter criteria
|
||||
PubsubTopic *string `protobuf:"bytes,10,opt,name=pubsub_topic,json=pubsubTopic,proto3,oneof" json:"pubsub_topic,omitempty"`
|
||||
ContentTopics []string `protobuf:"bytes,11,rep,name=content_topics,json=contentTopics,proto3" json:"content_topics,omitempty"`
|
||||
|
@ -91,7 +91,7 @@ type FilterSubscribeRequest struct {
|
|||
func (x *FilterSubscribeRequest) Reset() {
|
||||
*x = FilterSubscribeRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_waku_filter_v2_proto_msgTypes[0]
|
||||
mi := &file_filter_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
@ -104,7 +104,7 @@ func (x *FilterSubscribeRequest) String() string {
|
|||
func (*FilterSubscribeRequest) ProtoMessage() {}
|
||||
|
||||
func (x *FilterSubscribeRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_waku_filter_v2_proto_msgTypes[0]
|
||||
mi := &file_filter_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
@ -117,7 +117,7 @@ func (x *FilterSubscribeRequest) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use FilterSubscribeRequest.ProtoReflect.Descriptor instead.
|
||||
func (*FilterSubscribeRequest) Descriptor() ([]byte, []int) {
|
||||
return file_waku_filter_v2_proto_rawDescGZIP(), []int{0}
|
||||
return file_filter_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *FilterSubscribeRequest) GetRequestId() string {
|
||||
|
@ -153,15 +153,15 @@ type FilterSubscribeResponse struct {
|
|||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
|
||||
StatusCode uint32 `protobuf:"varint,10,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"`
|
||||
StatusDesc string `protobuf:"bytes,11,opt,name=status_desc,json=statusDesc,proto3" json:"status_desc,omitempty"`
|
||||
RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
|
||||
StatusCode uint32 `protobuf:"varint,10,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"`
|
||||
StatusDesc *string `protobuf:"bytes,11,opt,name=status_desc,json=statusDesc,proto3,oneof" json:"status_desc,omitempty"`
|
||||
}
|
||||
|
||||
func (x *FilterSubscribeResponse) Reset() {
|
||||
*x = FilterSubscribeResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_waku_filter_v2_proto_msgTypes[1]
|
||||
mi := &file_filter_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
@ -174,7 +174,7 @@ func (x *FilterSubscribeResponse) String() string {
|
|||
func (*FilterSubscribeResponse) ProtoMessage() {}
|
||||
|
||||
func (x *FilterSubscribeResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_waku_filter_v2_proto_msgTypes[1]
|
||||
mi := &file_filter_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
@ -187,7 +187,7 @@ func (x *FilterSubscribeResponse) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use FilterSubscribeResponse.ProtoReflect.Descriptor instead.
|
||||
func (*FilterSubscribeResponse) Descriptor() ([]byte, []int) {
|
||||
return file_waku_filter_v2_proto_rawDescGZIP(), []int{1}
|
||||
return file_filter_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *FilterSubscribeResponse) GetRequestId() string {
|
||||
|
@ -205,14 +205,14 @@ func (x *FilterSubscribeResponse) GetStatusCode() uint32 {
|
|||
}
|
||||
|
||||
func (x *FilterSubscribeResponse) GetStatusDesc() string {
|
||||
if x != nil {
|
||||
return x.StatusDesc
|
||||
if x != nil && x.StatusDesc != nil {
|
||||
return *x.StatusDesc
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Protocol identifier: /vac/waku/filter-push/2.0.0-beta1
|
||||
type MessagePushV2 struct {
|
||||
type MessagePush struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
@ -221,23 +221,23 @@ type MessagePushV2 struct {
|
|||
PubsubTopic *string `protobuf:"bytes,2,opt,name=pubsub_topic,json=pubsubTopic,proto3,oneof" json:"pubsub_topic,omitempty"`
|
||||
}
|
||||
|
||||
func (x *MessagePushV2) Reset() {
|
||||
*x = MessagePushV2{}
|
||||
func (x *MessagePush) Reset() {
|
||||
*x = MessagePush{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_waku_filter_v2_proto_msgTypes[2]
|
||||
mi := &file_filter_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *MessagePushV2) String() string {
|
||||
func (x *MessagePush) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*MessagePushV2) ProtoMessage() {}
|
||||
func (*MessagePush) ProtoMessage() {}
|
||||
|
||||
func (x *MessagePushV2) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_waku_filter_v2_proto_msgTypes[2]
|
||||
func (x *MessagePush) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_filter_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
@ -248,95 +248,99 @@ func (x *MessagePushV2) ProtoReflect() protoreflect.Message {
|
|||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use MessagePushV2.ProtoReflect.Descriptor instead.
|
||||
func (*MessagePushV2) Descriptor() ([]byte, []int) {
|
||||
return file_waku_filter_v2_proto_rawDescGZIP(), []int{2}
|
||||
// Deprecated: Use MessagePush.ProtoReflect.Descriptor instead.
|
||||
func (*MessagePush) Descriptor() ([]byte, []int) {
|
||||
return file_filter_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *MessagePushV2) GetWakuMessage() *pb.WakuMessage {
|
||||
func (x *MessagePush) GetWakuMessage() *pb.WakuMessage {
|
||||
if x != nil {
|
||||
return x.WakuMessage
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *MessagePushV2) GetPubsubTopic() string {
|
||||
func (x *MessagePush) GetPubsubTopic() string {
|
||||
if x != nil && x.PubsubTopic != nil {
|
||||
return *x.PubsubTopic
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
var File_waku_filter_v2_proto protoreflect.FileDescriptor
|
||||
var File_filter_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_waku_filter_v2_proto_rawDesc = []byte{
|
||||
0x0a, 0x14, 0x77, 0x61, 0x6b, 0x75, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x76, 0x32,
|
||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x12, 0x77, 0x61, 0x6b, 0x75,
|
||||
0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xdc,
|
||||
0x02, 0x0a, 0x16, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69,
|
||||
0x62, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72,
|
||||
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x62, 0x0a, 0x15, 0x66, 0x69, 0x6c, 0x74,
|
||||
0x65, 0x72, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x5f, 0x74, 0x79, 0x70,
|
||||
0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c,
|
||||
0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72,
|
||||
0x69, 0x62, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x13, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53,
|
||||
0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x26, 0x0a, 0x0c,
|
||||
0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x5f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x0a, 0x20, 0x01,
|
||||
0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x54, 0x6f, 0x70, 0x69,
|
||||
0x63, 0x88, 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f,
|
||||
0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x63, 0x6f,
|
||||
0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x22, 0x5f, 0x0a, 0x13, 0x46,
|
||||
0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x54, 0x79,
|
||||
0x70, 0x65, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x55, 0x42, 0x53, 0x43, 0x52, 0x49, 0x42, 0x45, 0x52,
|
||||
0x5f, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x55, 0x42, 0x53, 0x43,
|
||||
0x52, 0x49, 0x42, 0x45, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x55, 0x42, 0x53,
|
||||
0x43, 0x52, 0x49, 0x42, 0x45, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x55, 0x4e, 0x53, 0x55, 0x42,
|
||||
0x53, 0x43, 0x52, 0x49, 0x42, 0x45, 0x5f, 0x41, 0x4c, 0x4c, 0x10, 0x03, 0x42, 0x0f, 0x0a, 0x0d,
|
||||
0x5f, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x5f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x22, 0x7a, 0x0a,
|
||||
0x17, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65,
|
||||
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75,
|
||||
var file_filter_proto_rawDesc = []byte{
|
||||
0x0a, 0x0c, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e,
|
||||
0x77, 0x61, 0x6b, 0x75, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x1a, 0x1d,
|
||||
0x77, 0x61, 0x6b, 0x75, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2f, 0x76, 0x31, 0x2f,
|
||||
0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe8, 0x02,
|
||||
0x0a, 0x16, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62,
|
||||
0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65,
|
||||
0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75,
|
||||
0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x74,
|
||||
0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74,
|
||||
0x75, 0x73, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73,
|
||||
0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x65, 0x73, 0x63, 0x22, 0x7c, 0x0a, 0x0d, 0x4d, 0x65, 0x73,
|
||||
0x73, 0x61, 0x67, 0x65, 0x50, 0x75, 0x73, 0x68, 0x56, 0x32, 0x12, 0x32, 0x0a, 0x0c, 0x77, 0x61,
|
||||
0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x6e, 0x0a, 0x15, 0x66, 0x69, 0x6c, 0x74, 0x65,
|
||||
0x72, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65,
|
||||
0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3a, 0x2e, 0x77, 0x61, 0x6b, 0x75, 0x2e, 0x66, 0x69,
|
||||
0x6c, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x75,
|
||||
0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46,
|
||||
0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x54, 0x79,
|
||||
0x70, 0x65, 0x52, 0x13, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72,
|
||||
0x69, 0x62, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x26, 0x0a, 0x0c, 0x70, 0x75, 0x62, 0x73, 0x75,
|
||||
0x62, 0x5f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52,
|
||||
0x0b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x88, 0x01, 0x01, 0x12,
|
||||
0x25, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x6f, 0x70, 0x69, 0x63,
|
||||
0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
|
||||
0x54, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x22, 0x5f, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72,
|
||||
0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x13, 0x0a,
|
||||
0x0f, 0x53, 0x55, 0x42, 0x53, 0x43, 0x52, 0x49, 0x42, 0x45, 0x52, 0x5f, 0x50, 0x49, 0x4e, 0x47,
|
||||
0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x55, 0x42, 0x53, 0x43, 0x52, 0x49, 0x42, 0x45, 0x10,
|
||||
0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x55, 0x42, 0x53, 0x43, 0x52, 0x49, 0x42, 0x45,
|
||||
0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x55, 0x4e, 0x53, 0x55, 0x42, 0x53, 0x43, 0x52, 0x49, 0x42,
|
||||
0x45, 0x5f, 0x41, 0x4c, 0x4c, 0x10, 0x03, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x70, 0x75, 0x62, 0x73,
|
||||
0x75, 0x62, 0x5f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x22, 0x8f, 0x01, 0x0a, 0x17, 0x46, 0x69, 0x6c,
|
||||
0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x73, 0x70,
|
||||
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f,
|
||||
0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73,
|
||||
0x74, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f,
|
||||
0x64, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
|
||||
0x43, 0x6f, 0x64, 0x65, 0x12, 0x24, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x64,
|
||||
0x65, 0x73, 0x63, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x74, 0x61,
|
||||
0x74, 0x75, 0x73, 0x44, 0x65, 0x73, 0x63, 0x88, 0x01, 0x01, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x73,
|
||||
0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x22, 0x87, 0x01, 0x0a, 0x0b, 0x4d,
|
||||
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x50, 0x75, 0x73, 0x68, 0x12, 0x3f, 0x0a, 0x0c, 0x77, 0x61,
|
||||
0x6b, 0x75, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
|
||||
0x32, 0x0f, 0x2e, 0x70, 0x62, 0x2e, 0x57, 0x61, 0x6b, 0x75, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67,
|
||||
0x65, 0x52, 0x0b, 0x77, 0x61, 0x6b, 0x75, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x26,
|
||||
0x0a, 0x0c, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x5f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02,
|
||||
0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x54, 0x6f,
|
||||
0x70, 0x69, 0x63, 0x88, 0x01, 0x01, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x70, 0x75, 0x62, 0x73, 0x75,
|
||||
0x62, 0x5f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
0x32, 0x1c, 0x2e, 0x77, 0x61, 0x6b, 0x75, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x57, 0x61, 0x6b, 0x75, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x0b,
|
||||
0x77, 0x61, 0x6b, 0x75, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x26, 0x0a, 0x0c, 0x70,
|
||||
0x75, 0x62, 0x73, 0x75, 0x62, 0x5f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28,
|
||||
0x09, 0x48, 0x00, 0x52, 0x0b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x54, 0x6f, 0x70, 0x69, 0x63,
|
||||
0x88, 0x01, 0x01, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x5f, 0x74,
|
||||
0x6f, 0x70, 0x69, 0x63, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_waku_filter_v2_proto_rawDescOnce sync.Once
|
||||
file_waku_filter_v2_proto_rawDescData = file_waku_filter_v2_proto_rawDesc
|
||||
file_filter_proto_rawDescOnce sync.Once
|
||||
file_filter_proto_rawDescData = file_filter_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_waku_filter_v2_proto_rawDescGZIP() []byte {
|
||||
file_waku_filter_v2_proto_rawDescOnce.Do(func() {
|
||||
file_waku_filter_v2_proto_rawDescData = protoimpl.X.CompressGZIP(file_waku_filter_v2_proto_rawDescData)
|
||||
func file_filter_proto_rawDescGZIP() []byte {
|
||||
file_filter_proto_rawDescOnce.Do(func() {
|
||||
file_filter_proto_rawDescData = protoimpl.X.CompressGZIP(file_filter_proto_rawDescData)
|
||||
})
|
||||
return file_waku_filter_v2_proto_rawDescData
|
||||
return file_filter_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_waku_filter_v2_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
|
||||
var file_waku_filter_v2_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
|
||||
var file_waku_filter_v2_proto_goTypes = []interface{}{
|
||||
(FilterSubscribeRequest_FilterSubscribeType)(0), // 0: pb.FilterSubscribeRequest.FilterSubscribeType
|
||||
(*FilterSubscribeRequest)(nil), // 1: pb.FilterSubscribeRequest
|
||||
(*FilterSubscribeResponse)(nil), // 2: pb.FilterSubscribeResponse
|
||||
(*MessagePushV2)(nil), // 3: pb.MessagePushV2
|
||||
(*pb.WakuMessage)(nil), // 4: pb.WakuMessage
|
||||
var file_filter_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
|
||||
var file_filter_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
|
||||
var file_filter_proto_goTypes = []interface{}{
|
||||
(FilterSubscribeRequest_FilterSubscribeType)(0), // 0: waku.filter.v2.FilterSubscribeRequest.FilterSubscribeType
|
||||
(*FilterSubscribeRequest)(nil), // 1: waku.filter.v2.FilterSubscribeRequest
|
||||
(*FilterSubscribeResponse)(nil), // 2: waku.filter.v2.FilterSubscribeResponse
|
||||
(*MessagePush)(nil), // 3: waku.filter.v2.MessagePush
|
||||
(*pb.WakuMessage)(nil), // 4: waku.message.v1.WakuMessage
|
||||
}
|
||||
var file_waku_filter_v2_proto_depIdxs = []int32{
|
||||
0, // 0: pb.FilterSubscribeRequest.filter_subscribe_type:type_name -> pb.FilterSubscribeRequest.FilterSubscribeType
|
||||
4, // 1: pb.MessagePushV2.waku_message:type_name -> pb.WakuMessage
|
||||
var file_filter_proto_depIdxs = []int32{
|
||||
0, // 0: waku.filter.v2.FilterSubscribeRequest.filter_subscribe_type:type_name -> waku.filter.v2.FilterSubscribeRequest.FilterSubscribeType
|
||||
4, // 1: waku.filter.v2.MessagePush.waku_message:type_name -> waku.message.v1.WakuMessage
|
||||
2, // [2:2] is the sub-list for method output_type
|
||||
2, // [2:2] is the sub-list for method input_type
|
||||
2, // [2:2] is the sub-list for extension type_name
|
||||
|
@ -344,13 +348,13 @@ var file_waku_filter_v2_proto_depIdxs = []int32{
|
|||
0, // [0:2] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_waku_filter_v2_proto_init() }
|
||||
func file_waku_filter_v2_proto_init() {
|
||||
if File_waku_filter_v2_proto != nil {
|
||||
func init() { file_filter_proto_init() }
|
||||
func file_filter_proto_init() {
|
||||
if File_filter_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_waku_filter_v2_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
file_filter_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*FilterSubscribeRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
|
@ -362,7 +366,7 @@ func file_waku_filter_v2_proto_init() {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
file_waku_filter_v2_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
file_filter_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*FilterSubscribeResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
|
@ -374,8 +378,8 @@ func file_waku_filter_v2_proto_init() {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
file_waku_filter_v2_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*MessagePushV2); i {
|
||||
file_filter_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*MessagePush); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
|
@ -387,25 +391,26 @@ func file_waku_filter_v2_proto_init() {
|
|||
}
|
||||
}
|
||||
}
|
||||
file_waku_filter_v2_proto_msgTypes[0].OneofWrappers = []interface{}{}
|
||||
file_waku_filter_v2_proto_msgTypes[2].OneofWrappers = []interface{}{}
|
||||
file_filter_proto_msgTypes[0].OneofWrappers = []interface{}{}
|
||||
file_filter_proto_msgTypes[1].OneofWrappers = []interface{}{}
|
||||
file_filter_proto_msgTypes[2].OneofWrappers = []interface{}{}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_waku_filter_v2_proto_rawDesc,
|
||||
RawDescriptor: file_filter_proto_rawDesc,
|
||||
NumEnums: 1,
|
||||
NumMessages: 3,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_waku_filter_v2_proto_goTypes,
|
||||
DependencyIndexes: file_waku_filter_v2_proto_depIdxs,
|
||||
EnumInfos: file_waku_filter_v2_proto_enumTypes,
|
||||
MessageInfos: file_waku_filter_v2_proto_msgTypes,
|
||||
GoTypes: file_filter_proto_goTypes,
|
||||
DependencyIndexes: file_filter_proto_depIdxs,
|
||||
EnumInfos: file_filter_proto_enumTypes,
|
||||
MessageInfos: file_filter_proto_msgTypes,
|
||||
}.Build()
|
||||
File_waku_filter_v2_proto = out.File
|
||||
file_waku_filter_v2_proto_rawDesc = nil
|
||||
file_waku_filter_v2_proto_goTypes = nil
|
||||
file_waku_filter_v2_proto_depIdxs = nil
|
||||
File_filter_proto = out.File
|
||||
file_filter_proto_rawDesc = nil
|
||||
file_filter_proto_goTypes = nil
|
||||
file_filter_proto_depIdxs = nil
|
||||
}
|
|
@ -1,3 +1,3 @@
|
|||
package pb
|
||||
|
||||
//go:generate protoc -I./../../pb/. -I. --go_opt=paths=source_relative --go_opt=Mwaku_filter_v2.proto=github.com/waku-org/go-waku/waku/v2/protocol/filter/pb --go_opt=Mwaku_message.proto=github.com/waku-org/go-waku/waku/v2/protocol/pb --go_out=. ./waku_filter_v2.proto
|
||||
//go:generate protoc -I./../../waku-proto/waku/filter/v2/. -I./../../waku-proto/ --go_opt=paths=source_relative --go_opt=Mfilter.proto=github.com/waku-org/go-waku/waku/v2/protocol/filter/pb --go_opt=Mwaku/message/v1/message.proto=github.com/waku-org/go-waku/waku/v2/protocol/pb --go_out=. ./../../waku-proto/waku/filter/v2/filter.proto
|
||||
|
|
|
@ -52,7 +52,7 @@ func (x *FilterSubscribeResponse) Validate() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (x *MessagePushV2) Validate() error {
|
||||
func (x *MessagePush) Validate() error {
|
||||
if x.WakuMessage == nil {
|
||||
return errMissingMessage
|
||||
}
|
||||
|
|
36
vendor/github.com/waku-org/go-waku/waku/v2/protocol/filter/pb/waku_filter_v2.proto
generated
vendored
36
vendor/github.com/waku-org/go-waku/waku/v2/protocol/filter/pb/waku_filter_v2.proto
generated
vendored
|
@ -1,36 +0,0 @@
|
|||
syntax = "proto3";
|
||||
|
||||
// 12/WAKU2-FILTER rfc: https://rfc.vac.dev/spec/12/
|
||||
package pb;
|
||||
|
||||
import "waku_message.proto";
|
||||
|
||||
|
||||
// Protocol identifier: /vac/waku/filter-subscribe/2.0.0-beta1
|
||||
message FilterSubscribeRequest {
|
||||
enum FilterSubscribeType {
|
||||
SUBSCRIBER_PING = 0;
|
||||
SUBSCRIBE = 1;
|
||||
UNSUBSCRIBE = 2;
|
||||
UNSUBSCRIBE_ALL = 3;
|
||||
}
|
||||
|
||||
string request_id = 1;
|
||||
FilterSubscribeType filter_subscribe_type = 2;
|
||||
|
||||
// Filter criteria
|
||||
optional string pubsub_topic = 10;
|
||||
repeated string content_topics = 11;
|
||||
}
|
||||
|
||||
message FilterSubscribeResponse {
|
||||
string request_id = 1;
|
||||
uint32 status_code = 10;
|
||||
string status_desc = 11;
|
||||
}
|
||||
|
||||
// Protocol identifier: /vac/waku/filter-push/2.0.0-beta1
|
||||
message MessagePushV2 {
|
||||
WakuMessage waku_message = 1;
|
||||
optional string pubsub_topic = 2;
|
||||
}
|
|
@ -17,14 +17,16 @@ import (
|
|||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/filter/pb"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||
"github.com/waku-org/go-waku/waku/v2/service"
|
||||
"github.com/waku-org/go-waku/waku/v2/timesource"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// FilterSubscribeID_v20beta1 is the current Waku Filter protocol identifier for servers to
|
||||
// allow filter clients to subscribe, modify, refresh and unsubscribe a desired set of filter criteria
|
||||
const FilterSubscribeID_v20beta1 = libp2pProtocol.ID("/vac/waku/filter-subscribe/2.0.0-beta1")
|
||||
|
||||
const FilterSubscribeENRField = uint8(1 << 2)
|
||||
const peerHasNoSubscription = "peer has no subscriptions"
|
||||
|
||||
type (
|
||||
|
@ -33,7 +35,7 @@ type (
|
|||
msgSub *relay.Subscription
|
||||
metrics Metrics
|
||||
log *zap.Logger
|
||||
*protocol.CommonService
|
||||
*service.CommonService
|
||||
subscriptions *SubscribersMap
|
||||
|
||||
maxSubscriptions int
|
||||
|
@ -52,11 +54,13 @@ func NewWakuFilterFullNode(timesource timesource.Timesource, reg prometheus.Regi
|
|||
opt(params)
|
||||
}
|
||||
|
||||
wf.CommonService = protocol.NewCommonService()
|
||||
wf.CommonService = service.NewCommonService()
|
||||
wf.metrics = newMetrics(reg)
|
||||
wf.subscriptions = NewSubscribersMap(params.Timeout)
|
||||
wf.maxSubscriptions = params.MaxSubscribers
|
||||
|
||||
if params.pm != nil {
|
||||
params.pm.RegisterWakuProtocol(FilterSubscribeID_v20beta1, FilterSubscribeENRField)
|
||||
}
|
||||
return wf
|
||||
}
|
||||
|
||||
|
@ -133,9 +137,10 @@ func (wf *WakuFilterFullNode) reply(ctx context.Context, stream network.Stream,
|
|||
}
|
||||
|
||||
if len(description) != 0 {
|
||||
response.StatusDesc = description[0]
|
||||
response.StatusDesc = &description[0]
|
||||
} else {
|
||||
response.StatusDesc = http.StatusText(statusCode)
|
||||
desc := http.StatusText(statusCode)
|
||||
response.StatusDesc = &desc
|
||||
}
|
||||
|
||||
writer := pbio.NewDelimitedWriter(stream)
|
||||
|
@ -213,20 +218,23 @@ func (wf *WakuFilterFullNode) filterListener(ctx context.Context) {
|
|||
handle := func(envelope *protocol.Envelope) error {
|
||||
msg := envelope.Message()
|
||||
pubsubTopic := envelope.PubsubTopic()
|
||||
logger := wf.log.With(logging.HexBytes("envelopeHash", envelope.Hash()))
|
||||
logger := utils.MessagesLogger("filter").With(logging.HexBytes("hash", envelope.Hash()),
|
||||
zap.String("pubsubTopic", envelope.PubsubTopic()),
|
||||
zap.String("contentTopic", envelope.Message().ContentTopic),
|
||||
)
|
||||
logger.Debug("push message to filter subscribers")
|
||||
|
||||
// Each subscriber is a light node that earlier on invoked
|
||||
// a FilterRequest on this node
|
||||
for subscriber := range wf.subscriptions.Items(pubsubTopic, msg.ContentTopic) {
|
||||
logger := logger.With(logging.HostID("subscriber", subscriber))
|
||||
subscriber := subscriber // https://golang.org/doc/faq#closures_and_goroutines
|
||||
logger := logger.With(logging.HostID("peer", subscriber))
|
||||
// Do a message push to light node
|
||||
logger.Info("pushing message to light node")
|
||||
logger.Debug("pushing message to light node")
|
||||
wf.WaitGroup().Add(1)
|
||||
go func(subscriber peer.ID) {
|
||||
defer wf.WaitGroup().Done()
|
||||
start := time.Now()
|
||||
err := wf.pushMessage(ctx, subscriber, envelope)
|
||||
err := wf.pushMessage(ctx, logger, subscriber, envelope)
|
||||
if err != nil {
|
||||
logger.Error("pushing message", zap.Error(err))
|
||||
return
|
||||
|
@ -245,15 +253,9 @@ func (wf *WakuFilterFullNode) filterListener(ctx context.Context) {
|
|||
}
|
||||
}
|
||||
|
||||
func (wf *WakuFilterFullNode) pushMessage(ctx context.Context, peerID peer.ID, env *protocol.Envelope) error {
|
||||
logger := wf.log.With(
|
||||
logging.HostID("peer", peerID),
|
||||
logging.HexBytes("envelopeHash", env.Hash()),
|
||||
zap.String("pubsubTopic", env.PubsubTopic()),
|
||||
zap.String("contentTopic", env.Message().ContentTopic),
|
||||
)
|
||||
func (wf *WakuFilterFullNode) pushMessage(ctx context.Context, logger *zap.Logger, peerID peer.ID, env *protocol.Envelope) error {
|
||||
pubSubTopic := env.PubsubTopic()
|
||||
messagePush := &pb.MessagePushV2{
|
||||
messagePush := &pb.MessagePush{
|
||||
PubsubTopic: &pubSubTopic,
|
||||
WakuMessage: env.Message(),
|
||||
}
|
||||
|
|
4
vendor/github.com/waku-org/go-waku/waku/v2/protocol/legacy_filter/pb/generate.go
generated
vendored
4
vendor/github.com/waku-org/go-waku/waku/v2/protocol/legacy_filter/pb/generate.go
generated
vendored
|
@ -1,3 +1,5 @@
|
|||
package pb
|
||||
|
||||
//go:generate protoc -I./../../pb/. -I. --go_opt=paths=source_relative --go_opt=Mwaku_filter.proto=github.com/waku-org/go-waku/waku/v2/protocol/filter/pb --go_opt=Mwaku_message.proto=github.com/waku-org/go-waku/waku/v2/protocol/pb --go_out=. ./waku_filter.proto
|
||||
//go:generate mv ./../../waku-proto/waku/filter/v2beta1/filter.proto ./../../waku-proto/waku/filter/v2beta1/legacy_filter.proto
|
||||
//go:generate protoc -I./../../waku-proto/waku/filter/v2beta1/. -I./../../waku-proto/ --go_opt=paths=source_relative --go_opt=Mlegacy_filter.proto=github.com/waku-org/go-waku/waku/v2/protocol/legacy_filter/pb --go_opt=Mwaku/message/v1/message.proto=github.com/waku-org/go-waku/waku/v2/protocol/pb --go_out=. ./../../waku-proto/waku/filter/v2beta1/legacy_filter.proto
|
||||
//go:generate mv ./../../waku-proto/waku/filter/v2beta1/legacy_filter.proto ./../../waku-proto/waku/filter/v2beta1/filter.proto
|
||||
|
|
394
vendor/github.com/waku-org/go-waku/waku/v2/protocol/legacy_filter/pb/legacy_filter.pb.go
generated
vendored
Normal file
394
vendor/github.com/waku-org/go-waku/waku/v2/protocol/legacy_filter/pb/legacy_filter.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,394 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.31.0
|
||||
// protoc v4.24.4
|
||||
// source: legacy_filter.proto
|
||||
|
||||
// 12/WAKU2-FILTER rfc: https://rfc.vac.dev/spec/12/
|
||||
// Protocol identifier: /vac/waku/filter/2.0.0-beta1
|
||||
|
||||
package pb
|
||||
|
||||
import (
|
||||
pb "github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type FilterRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Subscribe bool `protobuf:"varint,1,opt,name=subscribe,proto3" json:"subscribe,omitempty"`
|
||||
Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"`
|
||||
ContentFilters []*FilterRequest_ContentFilter `protobuf:"bytes,3,rep,name=content_filters,json=contentFilters,proto3" json:"content_filters,omitempty"`
|
||||
}
|
||||
|
||||
func (x *FilterRequest) Reset() {
|
||||
*x = FilterRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_legacy_filter_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *FilterRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*FilterRequest) ProtoMessage() {}
|
||||
|
||||
func (x *FilterRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_legacy_filter_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use FilterRequest.ProtoReflect.Descriptor instead.
|
||||
func (*FilterRequest) Descriptor() ([]byte, []int) {
|
||||
return file_legacy_filter_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *FilterRequest) GetSubscribe() bool {
|
||||
if x != nil {
|
||||
return x.Subscribe
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *FilterRequest) GetTopic() string {
|
||||
if x != nil {
|
||||
return x.Topic
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *FilterRequest) GetContentFilters() []*FilterRequest_ContentFilter {
|
||||
if x != nil {
|
||||
return x.ContentFilters
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type MessagePush struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Messages []*pb.WakuMessage `protobuf:"bytes,1,rep,name=messages,proto3" json:"messages,omitempty"`
|
||||
}
|
||||
|
||||
func (x *MessagePush) Reset() {
|
||||
*x = MessagePush{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_legacy_filter_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *MessagePush) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*MessagePush) ProtoMessage() {}
|
||||
|
||||
func (x *MessagePush) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_legacy_filter_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use MessagePush.ProtoReflect.Descriptor instead.
|
||||
func (*MessagePush) Descriptor() ([]byte, []int) {
|
||||
return file_legacy_filter_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *MessagePush) GetMessages() []*pb.WakuMessage {
|
||||
if x != nil {
|
||||
return x.Messages
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type FilterRpc struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
|
||||
Request *FilterRequest `protobuf:"bytes,2,opt,name=request,proto3,oneof" json:"request,omitempty"`
|
||||
Push *MessagePush `protobuf:"bytes,3,opt,name=push,proto3,oneof" json:"push,omitempty"`
|
||||
}
|
||||
|
||||
func (x *FilterRpc) Reset() {
|
||||
*x = FilterRpc{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_legacy_filter_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *FilterRpc) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*FilterRpc) ProtoMessage() {}
|
||||
|
||||
func (x *FilterRpc) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_legacy_filter_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use FilterRpc.ProtoReflect.Descriptor instead.
|
||||
func (*FilterRpc) Descriptor() ([]byte, []int) {
|
||||
return file_legacy_filter_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *FilterRpc) GetRequestId() string {
|
||||
if x != nil {
|
||||
return x.RequestId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *FilterRpc) GetRequest() *FilterRequest {
|
||||
if x != nil {
|
||||
return x.Request
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *FilterRpc) GetPush() *MessagePush {
|
||||
if x != nil {
|
||||
return x.Push
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type FilterRequest_ContentFilter struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
ContentTopic string `protobuf:"bytes,1,opt,name=content_topic,json=contentTopic,proto3" json:"content_topic,omitempty"`
|
||||
}
|
||||
|
||||
func (x *FilterRequest_ContentFilter) Reset() {
|
||||
*x = FilterRequest_ContentFilter{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_legacy_filter_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *FilterRequest_ContentFilter) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*FilterRequest_ContentFilter) ProtoMessage() {}
|
||||
|
||||
func (x *FilterRequest_ContentFilter) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_legacy_filter_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use FilterRequest_ContentFilter.ProtoReflect.Descriptor instead.
|
||||
func (*FilterRequest_ContentFilter) Descriptor() ([]byte, []int) {
|
||||
return file_legacy_filter_proto_rawDescGZIP(), []int{0, 0}
|
||||
}
|
||||
|
||||
func (x *FilterRequest_ContentFilter) GetContentTopic() string {
|
||||
if x != nil {
|
||||
return x.ContentTopic
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
var File_legacy_filter_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_legacy_filter_proto_rawDesc = []byte{
|
||||
0x0a, 0x13, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x77, 0x61, 0x6b, 0x75, 0x2e, 0x66, 0x69, 0x6c, 0x74,
|
||||
0x65, 0x72, 0x2e, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x1a, 0x1d, 0x77, 0x61, 0x6b, 0x75,
|
||||
0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73,
|
||||
0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd4, 0x01, 0x0a, 0x0d, 0x46, 0x69,
|
||||
0x6c, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x73,
|
||||
0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09,
|
||||
0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70,
|
||||
0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12,
|
||||
0x59, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65,
|
||||
0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x77, 0x61, 0x6b, 0x75, 0x2e,
|
||||
0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x46,
|
||||
0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x43, 0x6f, 0x6e,
|
||||
0x74, 0x65, 0x6e, 0x74, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x74,
|
||||
0x65, 0x6e, 0x74, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x1a, 0x34, 0x0a, 0x0d, 0x43, 0x6f,
|
||||
0x6e, 0x74, 0x65, 0x6e, 0x74, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x63,
|
||||
0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01,
|
||||
0x28, 0x09, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63,
|
||||
0x22, 0x47, 0x0a, 0x0b, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x50, 0x75, 0x73, 0x68, 0x12,
|
||||
0x38, 0x0a, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
|
||||
0x0b, 0x32, 0x1c, 0x2e, 0x77, 0x61, 0x6b, 0x75, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
|
||||
0x2e, 0x76, 0x31, 0x2e, 0x57, 0x61, 0x6b, 0x75, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52,
|
||||
0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x22, 0xbd, 0x01, 0x0a, 0x09, 0x46, 0x69,
|
||||
0x6c, 0x74, 0x65, 0x72, 0x52, 0x70, 0x63, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x41, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73,
|
||||
0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x77, 0x61, 0x6b, 0x75, 0x2e, 0x66,
|
||||
0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x46, 0x69,
|
||||
0x6c, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x07, 0x72,
|
||||
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x88, 0x01, 0x01, 0x12, 0x39, 0x0a, 0x04, 0x70, 0x75, 0x73,
|
||||
0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x77, 0x61, 0x6b, 0x75, 0x2e, 0x66,
|
||||
0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4d, 0x65,
|
||||
0x73, 0x73, 0x61, 0x67, 0x65, 0x50, 0x75, 0x73, 0x68, 0x48, 0x01, 0x52, 0x04, 0x70, 0x75, 0x73,
|
||||
0x68, 0x88, 0x01, 0x01, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
|
||||
0x42, 0x07, 0x0a, 0x05, 0x5f, 0x70, 0x75, 0x73, 0x68, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_legacy_filter_proto_rawDescOnce sync.Once
|
||||
file_legacy_filter_proto_rawDescData = file_legacy_filter_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_legacy_filter_proto_rawDescGZIP() []byte {
|
||||
file_legacy_filter_proto_rawDescOnce.Do(func() {
|
||||
file_legacy_filter_proto_rawDescData = protoimpl.X.CompressGZIP(file_legacy_filter_proto_rawDescData)
|
||||
})
|
||||
return file_legacy_filter_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_legacy_filter_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
|
||||
var file_legacy_filter_proto_goTypes = []interface{}{
|
||||
(*FilterRequest)(nil), // 0: waku.filter.v2beta1.FilterRequest
|
||||
(*MessagePush)(nil), // 1: waku.filter.v2beta1.MessagePush
|
||||
(*FilterRpc)(nil), // 2: waku.filter.v2beta1.FilterRpc
|
||||
(*FilterRequest_ContentFilter)(nil), // 3: waku.filter.v2beta1.FilterRequest.ContentFilter
|
||||
(*pb.WakuMessage)(nil), // 4: waku.message.v1.WakuMessage
|
||||
}
|
||||
var file_legacy_filter_proto_depIdxs = []int32{
|
||||
3, // 0: waku.filter.v2beta1.FilterRequest.content_filters:type_name -> waku.filter.v2beta1.FilterRequest.ContentFilter
|
||||
4, // 1: waku.filter.v2beta1.MessagePush.messages:type_name -> waku.message.v1.WakuMessage
|
||||
0, // 2: waku.filter.v2beta1.FilterRpc.request:type_name -> waku.filter.v2beta1.FilterRequest
|
||||
1, // 3: waku.filter.v2beta1.FilterRpc.push:type_name -> waku.filter.v2beta1.MessagePush
|
||||
4, // [4:4] is the sub-list for method output_type
|
||||
4, // [4:4] is the sub-list for method input_type
|
||||
4, // [4:4] is the sub-list for extension type_name
|
||||
4, // [4:4] is the sub-list for extension extendee
|
||||
0, // [0:4] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_legacy_filter_proto_init() }
|
||||
func file_legacy_filter_proto_init() {
|
||||
if File_legacy_filter_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_legacy_filter_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*FilterRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_legacy_filter_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*MessagePush); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_legacy_filter_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*FilterRpc); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_legacy_filter_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*FilterRequest_ContentFilter); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
file_legacy_filter_proto_msgTypes[2].OneofWrappers = []interface{}{}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_legacy_filter_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 4,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_legacy_filter_proto_goTypes,
|
||||
DependencyIndexes: file_legacy_filter_proto_depIdxs,
|
||||
MessageInfos: file_legacy_filter_proto_msgTypes,
|
||||
}.Build()
|
||||
File_legacy_filter_proto = out.File
|
||||
file_legacy_filter_proto_rawDesc = nil
|
||||
file_legacy_filter_proto_goTypes = nil
|
||||
file_legacy_filter_proto_depIdxs = nil
|
||||
}
|
381
vendor/github.com/waku-org/go-waku/waku/v2/protocol/legacy_filter/pb/waku_filter.pb.go
generated
vendored
381
vendor/github.com/waku-org/go-waku/waku/v2/protocol/legacy_filter/pb/waku_filter.pb.go
generated
vendored
|
@ -1,381 +0,0 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.26.0
|
||||
// protoc v3.21.12
|
||||
// source: waku_filter.proto
|
||||
|
||||
package pb
|
||||
|
||||
import (
|
||||
pb "github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type FilterRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Subscribe bool `protobuf:"varint,1,opt,name=subscribe,proto3" json:"subscribe,omitempty"`
|
||||
Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"`
|
||||
ContentFilters []*FilterRequest_ContentFilter `protobuf:"bytes,3,rep,name=contentFilters,proto3" json:"contentFilters,omitempty"`
|
||||
}
|
||||
|
||||
func (x *FilterRequest) Reset() {
|
||||
*x = FilterRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_waku_filter_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *FilterRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*FilterRequest) ProtoMessage() {}
|
||||
|
||||
func (x *FilterRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_waku_filter_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use FilterRequest.ProtoReflect.Descriptor instead.
|
||||
func (*FilterRequest) Descriptor() ([]byte, []int) {
|
||||
return file_waku_filter_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *FilterRequest) GetSubscribe() bool {
|
||||
if x != nil {
|
||||
return x.Subscribe
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *FilterRequest) GetTopic() string {
|
||||
if x != nil {
|
||||
return x.Topic
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *FilterRequest) GetContentFilters() []*FilterRequest_ContentFilter {
|
||||
if x != nil {
|
||||
return x.ContentFilters
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type MessagePush struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Messages []*pb.WakuMessage `protobuf:"bytes,1,rep,name=messages,proto3" json:"messages,omitempty"`
|
||||
}
|
||||
|
||||
func (x *MessagePush) Reset() {
|
||||
*x = MessagePush{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_waku_filter_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *MessagePush) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*MessagePush) ProtoMessage() {}
|
||||
|
||||
func (x *MessagePush) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_waku_filter_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use MessagePush.ProtoReflect.Descriptor instead.
|
||||
func (*MessagePush) Descriptor() ([]byte, []int) {
|
||||
return file_waku_filter_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *MessagePush) GetMessages() []*pb.WakuMessage {
|
||||
if x != nil {
|
||||
return x.Messages
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type FilterRPC struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
RequestId string `protobuf:"bytes,1,opt,name=requestId,proto3" json:"requestId,omitempty"`
|
||||
Request *FilterRequest `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"`
|
||||
Push *MessagePush `protobuf:"bytes,3,opt,name=push,proto3" json:"push,omitempty"`
|
||||
}
|
||||
|
||||
func (x *FilterRPC) Reset() {
|
||||
*x = FilterRPC{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_waku_filter_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *FilterRPC) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*FilterRPC) ProtoMessage() {}
|
||||
|
||||
func (x *FilterRPC) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_waku_filter_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use FilterRPC.ProtoReflect.Descriptor instead.
|
||||
func (*FilterRPC) Descriptor() ([]byte, []int) {
|
||||
return file_waku_filter_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *FilterRPC) GetRequestId() string {
|
||||
if x != nil {
|
||||
return x.RequestId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *FilterRPC) GetRequest() *FilterRequest {
|
||||
if x != nil {
|
||||
return x.Request
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *FilterRPC) GetPush() *MessagePush {
|
||||
if x != nil {
|
||||
return x.Push
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type FilterRequest_ContentFilter struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
ContentTopic string `protobuf:"bytes,1,opt,name=contentTopic,proto3" json:"contentTopic,omitempty"`
|
||||
}
|
||||
|
||||
func (x *FilterRequest_ContentFilter) Reset() {
|
||||
*x = FilterRequest_ContentFilter{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_waku_filter_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *FilterRequest_ContentFilter) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*FilterRequest_ContentFilter) ProtoMessage() {}
|
||||
|
||||
func (x *FilterRequest_ContentFilter) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_waku_filter_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use FilterRequest_ContentFilter.ProtoReflect.Descriptor instead.
|
||||
func (*FilterRequest_ContentFilter) Descriptor() ([]byte, []int) {
|
||||
return file_waku_filter_proto_rawDescGZIP(), []int{0, 0}
|
||||
}
|
||||
|
||||
func (x *FilterRequest_ContentFilter) GetContentTopic() string {
|
||||
if x != nil {
|
||||
return x.ContentTopic
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
var File_waku_filter_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_waku_filter_proto_rawDesc = []byte{
|
||||
0x0a, 0x11, 0x77, 0x61, 0x6b, 0x75, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x12, 0x77, 0x61, 0x6b, 0x75, 0x5f, 0x6d, 0x65,
|
||||
0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc1, 0x01, 0x0a, 0x0d,
|
||||
0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a,
|
||||
0x09, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08,
|
||||
0x52, 0x09, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74,
|
||||
0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69,
|
||||
0x63, 0x12, 0x47, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x46, 0x69, 0x6c, 0x74,
|
||||
0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x70, 0x62, 0x2e, 0x46,
|
||||
0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x43, 0x6f, 0x6e,
|
||||
0x74, 0x65, 0x6e, 0x74, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x74,
|
||||
0x65, 0x6e, 0x74, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x1a, 0x33, 0x0a, 0x0d, 0x43, 0x6f,
|
||||
0x6e, 0x74, 0x65, 0x6e, 0x74, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x22, 0x0a, 0x0c, 0x63,
|
||||
0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x09, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x22,
|
||||
0x3a, 0x0a, 0x0b, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x50, 0x75, 0x73, 0x68, 0x12, 0x2b,
|
||||
0x0a, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
|
||||
0x32, 0x0f, 0x2e, 0x70, 0x62, 0x2e, 0x57, 0x61, 0x6b, 0x75, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67,
|
||||
0x65, 0x52, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x22, 0x7b, 0x0a, 0x09, 0x46,
|
||||
0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x50, 0x43, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x2b, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73,
|
||||
0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c,
|
||||
0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x04, 0x70, 0x75, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28,
|
||||
0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x50, 0x75,
|
||||
0x73, 0x68, 0x52, 0x04, 0x70, 0x75, 0x73, 0x68, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_waku_filter_proto_rawDescOnce sync.Once
|
||||
file_waku_filter_proto_rawDescData = file_waku_filter_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_waku_filter_proto_rawDescGZIP() []byte {
|
||||
file_waku_filter_proto_rawDescOnce.Do(func() {
|
||||
file_waku_filter_proto_rawDescData = protoimpl.X.CompressGZIP(file_waku_filter_proto_rawDescData)
|
||||
})
|
||||
return file_waku_filter_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_waku_filter_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
|
||||
var file_waku_filter_proto_goTypes = []interface{}{
|
||||
(*FilterRequest)(nil), // 0: pb.FilterRequest
|
||||
(*MessagePush)(nil), // 1: pb.MessagePush
|
||||
(*FilterRPC)(nil), // 2: pb.FilterRPC
|
||||
(*FilterRequest_ContentFilter)(nil), // 3: pb.FilterRequest.ContentFilter
|
||||
(*pb.WakuMessage)(nil), // 4: pb.WakuMessage
|
||||
}
|
||||
var file_waku_filter_proto_depIdxs = []int32{
|
||||
3, // 0: pb.FilterRequest.contentFilters:type_name -> pb.FilterRequest.ContentFilter
|
||||
4, // 1: pb.MessagePush.messages:type_name -> pb.WakuMessage
|
||||
0, // 2: pb.FilterRPC.request:type_name -> pb.FilterRequest
|
||||
1, // 3: pb.FilterRPC.push:type_name -> pb.MessagePush
|
||||
4, // [4:4] is the sub-list for method output_type
|
||||
4, // [4:4] is the sub-list for method input_type
|
||||
4, // [4:4] is the sub-list for extension type_name
|
||||
4, // [4:4] is the sub-list for extension extendee
|
||||
0, // [0:4] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_waku_filter_proto_init() }
|
||||
func file_waku_filter_proto_init() {
|
||||
if File_waku_filter_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_waku_filter_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*FilterRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_waku_filter_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*MessagePush); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_waku_filter_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*FilterRPC); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_waku_filter_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*FilterRequest_ContentFilter); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_waku_filter_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 4,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_waku_filter_proto_goTypes,
|
||||
DependencyIndexes: file_waku_filter_proto_depIdxs,
|
||||
MessageInfos: file_waku_filter_proto_msgTypes,
|
||||
}.Build()
|
||||
File_waku_filter_proto = out.File
|
||||
file_waku_filter_proto_rawDesc = nil
|
||||
file_waku_filter_proto_goTypes = nil
|
||||
file_waku_filter_proto_depIdxs = nil
|
||||
}
|
25
vendor/github.com/waku-org/go-waku/waku/v2/protocol/legacy_filter/pb/waku_filter.proto
generated
vendored
25
vendor/github.com/waku-org/go-waku/waku/v2/protocol/legacy_filter/pb/waku_filter.proto
generated
vendored
|
@ -1,25 +0,0 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package pb;
|
||||
|
||||
import "waku_message.proto";
|
||||
|
||||
message FilterRequest {
|
||||
bool subscribe = 1;
|
||||
string topic = 2;
|
||||
repeated ContentFilter contentFilters = 3;
|
||||
|
||||
message ContentFilter {
|
||||
string contentTopic = 1;
|
||||
}
|
||||
}
|
||||
|
||||
message MessagePush {
|
||||
repeated WakuMessage messages = 1;
|
||||
}
|
||||
|
||||
message FilterRPC {
|
||||
string requestId = 1;
|
||||
FilterRequest request = 2;
|
||||
MessagePush push = 3;
|
||||
}
|
15
vendor/github.com/waku-org/go-waku/waku/v2/protocol/legacy_filter/waku_filter.go
generated
vendored
15
vendor/github.com/waku-org/go-waku/waku/v2/protocol/legacy_filter/waku_filter.go
generated
vendored
|
@ -18,6 +18,7 @@ import (
|
|||
"github.com/waku-org/go-waku/waku/v2/protocol/legacy_filter/pb"
|
||||
wpb "github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||
"github.com/waku-org/go-waku/waku/v2/service"
|
||||
"github.com/waku-org/go-waku/waku/v2/timesource"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
@ -47,7 +48,7 @@ type (
|
|||
}
|
||||
|
||||
WakuFilter struct {
|
||||
*protocol.CommonService
|
||||
*service.CommonService
|
||||
h host.Host
|
||||
pm *peermanager.PeerManager
|
||||
isFullNode bool
|
||||
|
@ -76,7 +77,7 @@ func NewWakuFilter(broadcaster relay.Broadcaster, isFullNode bool, timesource ti
|
|||
}
|
||||
|
||||
wf.isFullNode = isFullNode
|
||||
wf.CommonService = protocol.NewCommonService()
|
||||
wf.CommonService = service.NewCommonService()
|
||||
wf.filters = NewFilterMap(broadcaster, timesource)
|
||||
wf.subscribers = NewSubscribers(params.Timeout)
|
||||
wf.metrics = newMetrics(reg)
|
||||
|
@ -108,7 +109,7 @@ func (wf *WakuFilter) onRequest(ctx context.Context) func(network.Stream) {
|
|||
peerID := stream.Conn().RemotePeer()
|
||||
logger := wf.log.With(logging.HostID("peer", peerID))
|
||||
|
||||
filterRPCRequest := &pb.FilterRPC{}
|
||||
filterRPCRequest := &pb.FilterRpc{}
|
||||
|
||||
reader := pbio.NewDelimitedReader(stream, math.MaxInt32)
|
||||
|
||||
|
@ -165,7 +166,7 @@ func (wf *WakuFilter) onRequest(ctx context.Context) func(network.Stream) {
|
|||
}
|
||||
|
||||
func (wf *WakuFilter) pushMessage(ctx context.Context, subscriber Subscriber, msg *wpb.WakuMessage) error {
|
||||
pushRPC := &pb.FilterRPC{RequestId: subscriber.requestID, Push: &pb.MessagePush{Messages: []*wpb.WakuMessage{msg}}}
|
||||
pushRPC := &pb.FilterRpc{RequestId: subscriber.requestID, Push: &pb.MessagePush{Messages: []*wpb.WakuMessage{msg}}}
|
||||
logger := wf.log.With(logging.HostID("peer", subscriber.peer))
|
||||
|
||||
stream, err := wf.h.NewStream(ctx, subscriber.peer, FilterID_v20beta1)
|
||||
|
@ -255,7 +256,7 @@ func (wf *WakuFilter) requestSubscription(ctx context.Context, filter ContentFil
|
|||
peermanager.PeerSelectionCriteria{
|
||||
SelectionType: params.peerSelectionType,
|
||||
Proto: FilterID_v20beta1,
|
||||
PubsubTopic: filter.Topic,
|
||||
PubsubTopics: []string{filter.Topic},
|
||||
SpecificPeers: params.preferredPeers,
|
||||
Ctx: ctx,
|
||||
},
|
||||
|
@ -287,7 +288,7 @@ func (wf *WakuFilter) requestSubscription(ctx context.Context, filter ContentFil
|
|||
requestID := hex.EncodeToString(protocol.GenerateRequestID())
|
||||
|
||||
writer := pbio.NewDelimitedWriter(stream)
|
||||
filterRPC := &pb.FilterRPC{RequestId: requestID, Request: request}
|
||||
filterRPC := &pb.FilterRpc{RequestId: requestID, Request: request}
|
||||
wf.log.Debug("sending filterRPC", zap.Stringer("rpc", filterRPC))
|
||||
err = writer.WriteMsg(filterRPC)
|
||||
if err != nil {
|
||||
|
@ -331,7 +332,7 @@ func (wf *WakuFilter) Unsubscribe(ctx context.Context, contentFilter ContentFilt
|
|||
}
|
||||
|
||||
writer := pbio.NewDelimitedWriter(stream)
|
||||
filterRPC := &pb.FilterRPC{RequestId: hex.EncodeToString(id), Request: request}
|
||||
filterRPC := &pb.FilterRpc{RequestId: hex.EncodeToString(id), Request: request}
|
||||
err = writer.WriteMsg(filterRPC)
|
||||
if err != nil {
|
||||
wf.metrics.RecordError(writeRequestFailure)
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
package pb
|
||||
|
||||
//go:generate protoc -I./../../pb/. -I. --go_opt=paths=source_relative --go_opt=Mwaku_lightpush.proto=github.com/waku-org/go-waku/waku/v2/protocol/lightpush/pb --go_opt=Mwaku_message.proto=github.com/waku-org/go-waku/waku/v2/protocol/pb --go_out=. ./waku_lightpush.proto
|
||||
//go:generate protoc -I./../../waku-proto/waku/lightpush/v2beta1/. -I./../../waku-proto/ --go_opt=paths=source_relative --go_opt=Mlightpush.proto=github.com/waku-org/go-waku/waku/v2/protocol/lightpush/pb --go_opt=Mwaku/message/v1/message.proto=github.com/waku-org/go-waku/waku/v2/protocol/pb --go_out=. ./../../waku-proto/waku/lightpush/v2beta1/lightpush.proto
|
||||
|
|
328
vendor/github.com/waku-org/go-waku/waku/v2/protocol/lightpush/pb/lightpush.pb.go
generated
vendored
Normal file
328
vendor/github.com/waku-org/go-waku/waku/v2/protocol/lightpush/pb/lightpush.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,328 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.31.0
|
||||
// protoc v4.24.4
|
||||
// source: lightpush.proto
|
||||
|
||||
// 19/WAKU2-LIGHTPUSH rfc: https://rfc.vac.dev/spec/19/
|
||||
// Protocol identifier: /vac/waku/lightpush/2.0.0-beta1
|
||||
|
||||
package pb
|
||||
|
||||
import (
|
||||
pb "github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type PushRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
PubsubTopic string `protobuf:"bytes,1,opt,name=pubsub_topic,json=pubsubTopic,proto3" json:"pubsub_topic,omitempty"`
|
||||
Message *pb.WakuMessage `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
|
||||
}
|
||||
|
||||
func (x *PushRequest) Reset() {
|
||||
*x = PushRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_lightpush_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *PushRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*PushRequest) ProtoMessage() {}
|
||||
|
||||
func (x *PushRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_lightpush_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use PushRequest.ProtoReflect.Descriptor instead.
|
||||
func (*PushRequest) Descriptor() ([]byte, []int) {
|
||||
return file_lightpush_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *PushRequest) GetPubsubTopic() string {
|
||||
if x != nil {
|
||||
return x.PubsubTopic
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *PushRequest) GetMessage() *pb.WakuMessage {
|
||||
if x != nil {
|
||||
return x.Message
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type PushResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
IsSuccess bool `protobuf:"varint,1,opt,name=is_success,json=isSuccess,proto3" json:"is_success,omitempty"`
|
||||
Info *string `protobuf:"bytes,2,opt,name=info,proto3,oneof" json:"info,omitempty"`
|
||||
}
|
||||
|
||||
func (x *PushResponse) Reset() {
|
||||
*x = PushResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_lightpush_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *PushResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*PushResponse) ProtoMessage() {}
|
||||
|
||||
func (x *PushResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_lightpush_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use PushResponse.ProtoReflect.Descriptor instead.
|
||||
func (*PushResponse) Descriptor() ([]byte, []int) {
|
||||
return file_lightpush_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *PushResponse) GetIsSuccess() bool {
|
||||
if x != nil {
|
||||
return x.IsSuccess
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *PushResponse) GetInfo() string {
|
||||
if x != nil && x.Info != nil {
|
||||
return *x.Info
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type PushRpc struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
|
||||
Request *PushRequest `protobuf:"bytes,2,opt,name=request,proto3,oneof" json:"request,omitempty"`
|
||||
Response *PushResponse `protobuf:"bytes,3,opt,name=response,proto3,oneof" json:"response,omitempty"`
|
||||
}
|
||||
|
||||
func (x *PushRpc) Reset() {
|
||||
*x = PushRpc{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_lightpush_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *PushRpc) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*PushRpc) ProtoMessage() {}
|
||||
|
||||
func (x *PushRpc) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_lightpush_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use PushRpc.ProtoReflect.Descriptor instead.
|
||||
func (*PushRpc) Descriptor() ([]byte, []int) {
|
||||
return file_lightpush_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *PushRpc) GetRequestId() string {
|
||||
if x != nil {
|
||||
return x.RequestId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *PushRpc) GetRequest() *PushRequest {
|
||||
if x != nil {
|
||||
return x.Request
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *PushRpc) GetResponse() *PushResponse {
|
||||
if x != nil {
|
||||
return x.Response
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_lightpush_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_lightpush_proto_rawDesc = []byte{
|
||||
0x0a, 0x0f, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x70, 0x75, 0x73, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x12, 0x16, 0x77, 0x61, 0x6b, 0x75, 0x2e, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x70, 0x75, 0x73,
|
||||
0x68, 0x2e, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x1a, 0x1d, 0x77, 0x61, 0x6b, 0x75, 0x2f,
|
||||
0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61,
|
||||
0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x68, 0x0a, 0x0b, 0x50, 0x75, 0x73, 0x68,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x75, 0x62, 0x73, 0x75,
|
||||
0x62, 0x5f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70,
|
||||
0x75, 0x62, 0x73, 0x75, 0x62, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x36, 0x0a, 0x07, 0x6d, 0x65,
|
||||
0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x77, 0x61,
|
||||
0x6b, 0x75, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x61,
|
||||
0x6b, 0x75, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61,
|
||||
0x67, 0x65, 0x22, 0x4f, 0x0a, 0x0c, 0x50, 0x75, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x73, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73,
|
||||
0x73, 0x12, 0x17, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48,
|
||||
0x00, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x88, 0x01, 0x01, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x69,
|
||||
0x6e, 0x66, 0x6f, 0x22, 0xcc, 0x01, 0x0a, 0x07, 0x50, 0x75, 0x73, 0x68, 0x52, 0x70, 0x63, 0x12,
|
||||
0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x42,
|
||||
0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
|
||||
0x23, 0x2e, 0x77, 0x61, 0x6b, 0x75, 0x2e, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x70, 0x75, 0x73, 0x68,
|
||||
0x2e, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x52, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x88,
|
||||
0x01, 0x01, 0x12, 0x45, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x03,
|
||||
0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x77, 0x61, 0x6b, 0x75, 0x2e, 0x6c, 0x69, 0x67, 0x68,
|
||||
0x74, 0x70, 0x75, 0x73, 0x68, 0x2e, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x50, 0x75,
|
||||
0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x01, 0x52, 0x08, 0x72, 0x65,
|
||||
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x88, 0x01, 0x01, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x72, 0x65,
|
||||
0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_lightpush_proto_rawDescOnce sync.Once
|
||||
file_lightpush_proto_rawDescData = file_lightpush_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_lightpush_proto_rawDescGZIP() []byte {
|
||||
file_lightpush_proto_rawDescOnce.Do(func() {
|
||||
file_lightpush_proto_rawDescData = protoimpl.X.CompressGZIP(file_lightpush_proto_rawDescData)
|
||||
})
|
||||
return file_lightpush_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_lightpush_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
|
||||
var file_lightpush_proto_goTypes = []interface{}{
|
||||
(*PushRequest)(nil), // 0: waku.lightpush.v2beta1.PushRequest
|
||||
(*PushResponse)(nil), // 1: waku.lightpush.v2beta1.PushResponse
|
||||
(*PushRpc)(nil), // 2: waku.lightpush.v2beta1.PushRpc
|
||||
(*pb.WakuMessage)(nil), // 3: waku.message.v1.WakuMessage
|
||||
}
|
||||
var file_lightpush_proto_depIdxs = []int32{
|
||||
3, // 0: waku.lightpush.v2beta1.PushRequest.message:type_name -> waku.message.v1.WakuMessage
|
||||
0, // 1: waku.lightpush.v2beta1.PushRpc.request:type_name -> waku.lightpush.v2beta1.PushRequest
|
||||
1, // 2: waku.lightpush.v2beta1.PushRpc.response:type_name -> waku.lightpush.v2beta1.PushResponse
|
||||
3, // [3:3] is the sub-list for method output_type
|
||||
3, // [3:3] is the sub-list for method input_type
|
||||
3, // [3:3] is the sub-list for extension type_name
|
||||
3, // [3:3] is the sub-list for extension extendee
|
||||
0, // [0:3] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_lightpush_proto_init() }
|
||||
func file_lightpush_proto_init() {
|
||||
if File_lightpush_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_lightpush_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*PushRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_lightpush_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*PushResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_lightpush_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*PushRpc); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
file_lightpush_proto_msgTypes[1].OneofWrappers = []interface{}{}
|
||||
file_lightpush_proto_msgTypes[2].OneofWrappers = []interface{}{}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_lightpush_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 3,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_lightpush_proto_goTypes,
|
||||
DependencyIndexes: file_lightpush_proto_depIdxs,
|
||||
MessageInfos: file_lightpush_proto_msgTypes,
|
||||
}.Build()
|
||||
File_lightpush_proto = out.File
|
||||
file_lightpush_proto_rawDesc = nil
|
||||
file_lightpush_proto_goTypes = nil
|
||||
file_lightpush_proto_depIdxs = nil
|
||||
}
|
12
vendor/github.com/waku-org/go-waku/waku/v2/protocol/lightpush/pb/validation.go
generated
vendored
12
vendor/github.com/waku-org/go-waku/waku/v2/protocol/lightpush/pb/validation.go
generated
vendored
|
@ -11,27 +11,27 @@ var (
|
|||
errMissingResponse = errors.New("missing Response field")
|
||||
)
|
||||
|
||||
func (x *PushRPC) ValidateRequest() error {
|
||||
func (x *PushRpc) ValidateRequest() error {
|
||||
if x.RequestId == "" {
|
||||
return errMissingRequestID
|
||||
}
|
||||
|
||||
if x.Query == nil {
|
||||
if x.Request == nil {
|
||||
return errMissingQuery
|
||||
}
|
||||
|
||||
if x.Query.PubsubTopic == "" {
|
||||
if x.Request.PubsubTopic == "" {
|
||||
return errMissingPubsubTopic
|
||||
}
|
||||
|
||||
if x.Query.Message == nil {
|
||||
if x.Request.Message == nil {
|
||||
return errMissingMessage
|
||||
}
|
||||
|
||||
return x.Query.Message.Validate()
|
||||
return x.Request.Message.Validate()
|
||||
}
|
||||
|
||||
func (x *PushRPC) ValidateResponse(requestID string) error {
|
||||
func (x *PushRpc) ValidateResponse(requestID string) error {
|
||||
if x.RequestId == "" {
|
||||
return errMissingRequestID
|
||||
}
|
||||
|
|
316
vendor/github.com/waku-org/go-waku/waku/v2/protocol/lightpush/pb/waku_lightpush.pb.go
generated
vendored
316
vendor/github.com/waku-org/go-waku/waku/v2/protocol/lightpush/pb/waku_lightpush.pb.go
generated
vendored
|
@ -1,316 +0,0 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.26.0
|
||||
// protoc v3.21.12
|
||||
// source: waku_lightpush.proto
|
||||
|
||||
package pb
|
||||
|
||||
import (
|
||||
pb "github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type PushRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
PubsubTopic string `protobuf:"bytes,1,opt,name=pubsub_topic,json=pubsubTopic,proto3" json:"pubsub_topic,omitempty"`
|
||||
Message *pb.WakuMessage `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
|
||||
}
|
||||
|
||||
func (x *PushRequest) Reset() {
|
||||
*x = PushRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_waku_lightpush_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *PushRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*PushRequest) ProtoMessage() {}
|
||||
|
||||
func (x *PushRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_waku_lightpush_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use PushRequest.ProtoReflect.Descriptor instead.
|
||||
func (*PushRequest) Descriptor() ([]byte, []int) {
|
||||
return file_waku_lightpush_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *PushRequest) GetPubsubTopic() string {
|
||||
if x != nil {
|
||||
return x.PubsubTopic
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *PushRequest) GetMessage() *pb.WakuMessage {
|
||||
if x != nil {
|
||||
return x.Message
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type PushResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
IsSuccess bool `protobuf:"varint,1,opt,name=is_success,json=isSuccess,proto3" json:"is_success,omitempty"`
|
||||
// Error messages, etc
|
||||
Info string `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"`
|
||||
}
|
||||
|
||||
func (x *PushResponse) Reset() {
|
||||
*x = PushResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_waku_lightpush_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *PushResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*PushResponse) ProtoMessage() {}
|
||||
|
||||
func (x *PushResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_waku_lightpush_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use PushResponse.ProtoReflect.Descriptor instead.
|
||||
func (*PushResponse) Descriptor() ([]byte, []int) {
|
||||
return file_waku_lightpush_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *PushResponse) GetIsSuccess() bool {
|
||||
if x != nil {
|
||||
return x.IsSuccess
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *PushResponse) GetInfo() string {
|
||||
if x != nil {
|
||||
return x.Info
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type PushRPC struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
|
||||
Query *PushRequest `protobuf:"bytes,2,opt,name=query,proto3" json:"query,omitempty"`
|
||||
Response *PushResponse `protobuf:"bytes,3,opt,name=response,proto3" json:"response,omitempty"`
|
||||
}
|
||||
|
||||
func (x *PushRPC) Reset() {
|
||||
*x = PushRPC{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_waku_lightpush_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *PushRPC) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*PushRPC) ProtoMessage() {}
|
||||
|
||||
func (x *PushRPC) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_waku_lightpush_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use PushRPC.ProtoReflect.Descriptor instead.
|
||||
func (*PushRPC) Descriptor() ([]byte, []int) {
|
||||
return file_waku_lightpush_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *PushRPC) GetRequestId() string {
|
||||
if x != nil {
|
||||
return x.RequestId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *PushRPC) GetQuery() *PushRequest {
|
||||
if x != nil {
|
||||
return x.Query
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *PushRPC) GetResponse() *PushResponse {
|
||||
if x != nil {
|
||||
return x.Response
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_waku_lightpush_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_waku_lightpush_proto_rawDesc = []byte{
|
||||
0x0a, 0x14, 0x77, 0x61, 0x6b, 0x75, 0x5f, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x70, 0x75, 0x73, 0x68,
|
||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x12, 0x77, 0x61, 0x6b, 0x75,
|
||||
0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x5b,
|
||||
0x0a, 0x0b, 0x50, 0x75, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a,
|
||||
0x0c, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x5f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x54, 0x6f, 0x70, 0x69, 0x63,
|
||||
0x12, 0x29, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
|
||||
0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x62, 0x2e, 0x57, 0x61, 0x6b, 0x75, 0x4d, 0x65, 0x73, 0x73, 0x61,
|
||||
0x67, 0x65, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x41, 0x0a, 0x0c, 0x50,
|
||||
0x75, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69,
|
||||
0x73, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52,
|
||||
0x09, 0x69, 0x73, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x69, 0x6e,
|
||||
0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0x7d,
|
||||
0x0a, 0x07, 0x50, 0x75, 0x73, 0x68, 0x52, 0x50, 0x43, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72,
|
||||
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72,
|
||||
0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x73,
|
||||
0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12,
|
||||
0x2c, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
|
||||
0x0b, 0x32, 0x10, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f,
|
||||
0x6e, 0x73, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x62, 0x06, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_waku_lightpush_proto_rawDescOnce sync.Once
|
||||
file_waku_lightpush_proto_rawDescData = file_waku_lightpush_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_waku_lightpush_proto_rawDescGZIP() []byte {
|
||||
file_waku_lightpush_proto_rawDescOnce.Do(func() {
|
||||
file_waku_lightpush_proto_rawDescData = protoimpl.X.CompressGZIP(file_waku_lightpush_proto_rawDescData)
|
||||
})
|
||||
return file_waku_lightpush_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_waku_lightpush_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
|
||||
var file_waku_lightpush_proto_goTypes = []interface{}{
|
||||
(*PushRequest)(nil), // 0: pb.PushRequest
|
||||
(*PushResponse)(nil), // 1: pb.PushResponse
|
||||
(*PushRPC)(nil), // 2: pb.PushRPC
|
||||
(*pb.WakuMessage)(nil), // 3: pb.WakuMessage
|
||||
}
|
||||
var file_waku_lightpush_proto_depIdxs = []int32{
|
||||
3, // 0: pb.PushRequest.message:type_name -> pb.WakuMessage
|
||||
0, // 1: pb.PushRPC.query:type_name -> pb.PushRequest
|
||||
1, // 2: pb.PushRPC.response:type_name -> pb.PushResponse
|
||||
3, // [3:3] is the sub-list for method output_type
|
||||
3, // [3:3] is the sub-list for method input_type
|
||||
3, // [3:3] is the sub-list for extension type_name
|
||||
3, // [3:3] is the sub-list for extension extendee
|
||||
0, // [0:3] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_waku_lightpush_proto_init() }
|
||||
func file_waku_lightpush_proto_init() {
|
||||
if File_waku_lightpush_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_waku_lightpush_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*PushRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_waku_lightpush_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*PushResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_waku_lightpush_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*PushRPC); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_waku_lightpush_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 3,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_waku_lightpush_proto_goTypes,
|
||||
DependencyIndexes: file_waku_lightpush_proto_depIdxs,
|
||||
MessageInfos: file_waku_lightpush_proto_msgTypes,
|
||||
}.Build()
|
||||
File_waku_lightpush_proto = out.File
|
||||
file_waku_lightpush_proto_rawDesc = nil
|
||||
file_waku_lightpush_proto_goTypes = nil
|
||||
file_waku_lightpush_proto_depIdxs = nil
|
||||
}
|
22
vendor/github.com/waku-org/go-waku/waku/v2/protocol/lightpush/pb/waku_lightpush.proto
generated
vendored
22
vendor/github.com/waku-org/go-waku/waku/v2/protocol/lightpush/pb/waku_lightpush.proto
generated
vendored
|
@ -1,22 +0,0 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package pb;
|
||||
|
||||
import "waku_message.proto";
|
||||
|
||||
message PushRequest {
|
||||
string pubsub_topic = 1;
|
||||
WakuMessage message = 2;
|
||||
}
|
||||
|
||||
message PushResponse {
|
||||
bool is_success = 1;
|
||||
// Error messages, etc
|
||||
string info = 2;
|
||||
}
|
||||
|
||||
message PushRPC {
|
||||
string request_id = 1;
|
||||
PushRequest query = 2;
|
||||
PushResponse response = 3;
|
||||
}
|
57
vendor/github.com/waku-org/go-waku/waku/v2/protocol/lightpush/waku_lightpush.go
generated
vendored
57
vendor/github.com/waku-org/go-waku/waku/v2/protocol/lightpush/waku_lightpush.go
generated
vendored
|
@ -14,15 +14,18 @@ import (
|
|||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/waku-org/go-waku/logging"
|
||||
"github.com/waku-org/go-waku/waku/v2/peermanager"
|
||||
"github.com/waku-org/go-waku/waku/v2/peerstore"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/lightpush/pb"
|
||||
wpb "github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// LightPushID_v20beta1 is the current Waku LightPush protocol identifier
|
||||
const LightPushID_v20beta1 = libp2pProtocol.ID("/vac/waku/lightpush/2.0.0-beta1")
|
||||
const LightPushENRField = uint8(1 << 3)
|
||||
|
||||
var (
|
||||
ErrNoPeersAvailable = errors.New("no suitable remote peers")
|
||||
|
@ -49,6 +52,7 @@ func NewWakuLightPush(relay *relay.WakuRelay, pm *peermanager.PeerManager, reg p
|
|||
wakuLP.log = log.Named("lightpush")
|
||||
wakuLP.pm = pm
|
||||
wakuLP.metrics = newMetrics(reg)
|
||||
|
||||
return wakuLP
|
||||
}
|
||||
|
||||
|
@ -69,6 +73,9 @@ func (wakuLP *WakuLightPush) Start(ctx context.Context) error {
|
|||
wakuLP.h.SetStreamHandlerMatch(LightPushID_v20beta1, protocol.PrefixTextMatch(string(LightPushID_v20beta1)), wakuLP.onRequest(ctx))
|
||||
wakuLP.log.Info("Light Push protocol started")
|
||||
|
||||
if wakuLP.pm != nil {
|
||||
wakuLP.pm.RegisterWakuProtocol(LightPushID_v20beta1, LightPushENRField)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -80,7 +87,7 @@ func (wakuLP *WakuLightPush) relayIsNotAvailable() bool {
|
|||
func (wakuLP *WakuLightPush) onRequest(ctx context.Context) func(network.Stream) {
|
||||
return func(stream network.Stream) {
|
||||
logger := wakuLP.log.With(logging.HostID("peer", stream.Conn().RemotePeer()))
|
||||
requestPushRPC := &pb.PushRPC{}
|
||||
requestPushRPC := &pb.PushRpc{}
|
||||
|
||||
reader := pbio.NewDelimitedReader(stream, math.MaxInt32)
|
||||
|
||||
|
@ -94,13 +101,14 @@ func (wakuLP *WakuLightPush) onRequest(ctx context.Context) func(network.Stream)
|
|||
return
|
||||
}
|
||||
|
||||
responsePushRPC := &pb.PushRPC{
|
||||
responsePushRPC := &pb.PushRpc{
|
||||
RequestId: requestPushRPC.RequestId,
|
||||
Response: &pb.PushResponse{},
|
||||
}
|
||||
|
||||
if err := requestPushRPC.ValidateRequest(); err != nil {
|
||||
responsePushRPC.Response.Info = err.Error()
|
||||
responseMsg := err.Error()
|
||||
responsePushRPC.Response.Info = &responseMsg
|
||||
wakuLP.metrics.RecordError(requestBodyFailure)
|
||||
wakuLP.reply(stream, responsePushRPC, logger)
|
||||
return
|
||||
|
@ -110,8 +118,8 @@ func (wakuLP *WakuLightPush) onRequest(ctx context.Context) func(network.Stream)
|
|||
|
||||
logger.Info("push request")
|
||||
|
||||
pubSubTopic := requestPushRPC.Query.PubsubTopic
|
||||
message := requestPushRPC.Query.Message
|
||||
pubSubTopic := requestPushRPC.Request.PubsubTopic
|
||||
message := requestPushRPC.Request.Message
|
||||
|
||||
wakuLP.metrics.RecordMessage()
|
||||
|
||||
|
@ -122,11 +130,13 @@ func (wakuLP *WakuLightPush) onRequest(ctx context.Context) func(network.Stream)
|
|||
if err != nil {
|
||||
logger.Error("publishing message", zap.Error(err))
|
||||
wakuLP.metrics.RecordError(messagePushFailure)
|
||||
responsePushRPC.Response.Info = fmt.Sprintf("Could not publish message: %s", err.Error())
|
||||
responseMsg := fmt.Sprintf("Could not publish message: %s", err.Error())
|
||||
responsePushRPC.Response.Info = &responseMsg
|
||||
return
|
||||
} else {
|
||||
responsePushRPC.Response.IsSuccess = true
|
||||
responsePushRPC.Response.Info = "OK"
|
||||
responseMsg := "OK"
|
||||
responsePushRPC.Response.Info = &responseMsg
|
||||
}
|
||||
|
||||
wakuLP.reply(stream, responsePushRPC, logger)
|
||||
|
@ -138,12 +148,12 @@ func (wakuLP *WakuLightPush) onRequest(ctx context.Context) func(network.Stream)
|
|||
if responsePushRPC.Response.IsSuccess {
|
||||
logger.Info("request success")
|
||||
} else {
|
||||
logger.Info("request failure", zap.String("info", responsePushRPC.Response.Info))
|
||||
logger.Info("request failure", zap.String("info", responsePushRPC.GetResponse().GetInfo()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (wakuLP *WakuLightPush) reply(stream network.Stream, responsePushRPC *pb.PushRPC, logger *zap.Logger) {
|
||||
func (wakuLP *WakuLightPush) reply(stream network.Stream, responsePushRPC *pb.PushRpc, logger *zap.Logger) {
|
||||
writer := pbio.NewDelimitedWriter(stream)
|
||||
err := writer.WriteMsg(responsePushRPC)
|
||||
if err != nil {
|
||||
|
@ -175,7 +185,7 @@ func (wakuLP *WakuLightPush) request(ctx context.Context, req *pb.PushRequest, p
|
|||
wakuLP.metrics.RecordError(dialFailure)
|
||||
return nil, err
|
||||
}
|
||||
pushRequestRPC := &pb.PushRPC{RequestId: hex.EncodeToString(params.requestID), Query: req}
|
||||
pushRequestRPC := &pb.PushRpc{RequestId: hex.EncodeToString(params.requestID), Request: req}
|
||||
|
||||
writer := pbio.NewDelimitedWriter(stream)
|
||||
reader := pbio.NewDelimitedReader(stream, math.MaxInt32)
|
||||
|
@ -190,7 +200,7 @@ func (wakuLP *WakuLightPush) request(ctx context.Context, req *pb.PushRequest, p
|
|||
return nil, err
|
||||
}
|
||||
|
||||
pushResponseRPC := &pb.PushRPC{}
|
||||
pushResponseRPC := &pb.PushRpc{}
|
||||
err = reader.ReadMsg(pushResponseRPC)
|
||||
if err != nil {
|
||||
logger.Error("reading response", zap.Error(err))
|
||||
|
@ -230,7 +240,10 @@ func (wakuLP *WakuLightPush) handleOpts(ctx context.Context, message *wpb.WakuMe
|
|||
|
||||
optList := append(DefaultOptions(wakuLP.h), opts...)
|
||||
for _, opt := range optList {
|
||||
opt(params)
|
||||
err := opt(params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if params.pubsubTopic == "" {
|
||||
|
@ -240,12 +253,21 @@ func (wakuLP *WakuLightPush) handleOpts(ctx context.Context, message *wpb.WakuMe
|
|||
}
|
||||
}
|
||||
|
||||
if params.pm != nil && params.peerAddr != nil {
|
||||
pData, err := wakuLP.pm.AddPeer(params.peerAddr, peerstore.Static, []string{params.pubsubTopic}, LightPushID_v20beta1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wakuLP.pm.Connect(pData)
|
||||
params.selectedPeer = pData.AddrInfo.ID
|
||||
}
|
||||
|
||||
if params.pm != nil && params.selectedPeer == "" {
|
||||
params.selectedPeer, err = wakuLP.pm.SelectPeer(
|
||||
peermanager.PeerSelectionCriteria{
|
||||
SelectionType: params.peerSelectionType,
|
||||
Proto: LightPushID_v20beta1,
|
||||
PubsubTopic: params.pubsubTopic,
|
||||
PubsubTopics: []string{params.pubsubTopic},
|
||||
SpecificPeers: params.preferredPeers,
|
||||
Ctx: ctx,
|
||||
},
|
||||
|
@ -284,9 +306,14 @@ func (wakuLP *WakuLightPush) Publish(ctx context.Context, message *wpb.WakuMessa
|
|||
|
||||
if response.IsSuccess {
|
||||
hash := message.Hash(params.pubsubTopic)
|
||||
wakuLP.log.Info("waku.lightpush published", logging.HexString("hash", hash))
|
||||
utils.MessagesLogger("lightpush").Debug("waku.lightpush published", logging.HexBytes("hash", hash))
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
return nil, errors.New(response.Info)
|
||||
errMsg := "lightpush error"
|
||||
if response.Info != nil {
|
||||
errMsg = *response.Info
|
||||
}
|
||||
|
||||
return nil, errors.New(errMsg)
|
||||
}
|
||||
|
|
43
vendor/github.com/waku-org/go-waku/waku/v2/protocol/lightpush/waku_lightpush_option.go
generated
vendored
43
vendor/github.com/waku-org/go-waku/waku/v2/protocol/lightpush/waku_lightpush_option.go
generated
vendored
|
@ -1,8 +1,11 @@
|
|||
package lightpush
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/waku-org/go-waku/waku/v2/peermanager"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||
|
@ -11,6 +14,7 @@ import (
|
|||
|
||||
type lightPushParameters struct {
|
||||
host host.Host
|
||||
peerAddr multiaddr.Multiaddr
|
||||
selectedPeer peer.ID
|
||||
peerSelectionType peermanager.PeerSelection
|
||||
preferredPeers peer.IDSlice
|
||||
|
@ -21,12 +25,29 @@ type lightPushParameters struct {
|
|||
}
|
||||
|
||||
// Option is the type of options accepted when performing LightPush protocol requests
|
||||
type Option func(*lightPushParameters)
|
||||
type Option func(*lightPushParameters) error
|
||||
|
||||
// WithPeer is an option used to specify the peerID to push a waku message to
|
||||
func WithPeer(p peer.ID) Option {
|
||||
return func(params *lightPushParameters) {
|
||||
return func(params *lightPushParameters) error {
|
||||
params.selectedPeer = p
|
||||
if params.peerAddr != nil {
|
||||
return errors.New("peerAddr and peerId options are mutually exclusive")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithPeerAddr is an option used to specify a peerAddress
|
||||
// This new peer will be added to peerStore.
|
||||
// Note that this option is mutually exclusive to WithPeerAddr, only one of them can be used.
|
||||
func WithPeerAddr(pAddr multiaddr.Multiaddr) Option {
|
||||
return func(params *lightPushParameters) error {
|
||||
params.peerAddr = pAddr
|
||||
if params.selectedPeer != "" {
|
||||
return errors.New("peerAddr and peerId options are mutually exclusive")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -35,9 +56,10 @@ func WithPeer(p peer.ID) Option {
|
|||
// from that list assuming it supports the chosen protocol, otherwise it will chose a peer
|
||||
// from the node peerstore
|
||||
func WithAutomaticPeerSelection(fromThesePeers ...peer.ID) Option {
|
||||
return func(params *lightPushParameters) {
|
||||
return func(params *lightPushParameters) error {
|
||||
params.peerSelectionType = peermanager.Automatic
|
||||
params.preferredPeers = fromThesePeers
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -46,38 +68,43 @@ func WithAutomaticPeerSelection(fromThesePeers ...peer.ID) Option {
|
|||
// from that list assuming it supports the chosen protocol, otherwise it will chose a peer
|
||||
// from the node peerstore
|
||||
func WithFastestPeerSelection(fromThesePeers ...peer.ID) Option {
|
||||
return func(params *lightPushParameters) {
|
||||
return func(params *lightPushParameters) error {
|
||||
params.peerSelectionType = peermanager.LowestRTT
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithPubSubTopic is used to specify the pubsub topic on which a WakuMessage will be broadcasted
|
||||
func WithPubSubTopic(pubsubTopic string) Option {
|
||||
return func(params *lightPushParameters) {
|
||||
return func(params *lightPushParameters) error {
|
||||
params.pubsubTopic = pubsubTopic
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDefaultPubsubTopic is used to indicate that the message should be broadcasted in the default pubsub topic
|
||||
func WithDefaultPubsubTopic() Option {
|
||||
return func(params *lightPushParameters) {
|
||||
return func(params *lightPushParameters) error {
|
||||
params.pubsubTopic = relay.DefaultWakuTopic
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithRequestID is an option to set a specific request ID to be used when
|
||||
// publishing a message
|
||||
func WithRequestID(requestID []byte) Option {
|
||||
return func(params *lightPushParameters) {
|
||||
return func(params *lightPushParameters) error {
|
||||
params.requestID = requestID
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithAutomaticRequestID is an option to automatically generate a request ID
|
||||
// when publishing a message
|
||||
func WithAutomaticRequestID() Option {
|
||||
return func(params *lightPushParameters) {
|
||||
return func(params *lightPushParameters) error {
|
||||
params.requestID = protocol.GenerateRequestID()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
package pb
|
||||
|
||||
//go:generate protoc -I. --go_opt=paths=source_relative --go_opt=Mwaku_metadata.proto=github.com/waku-org/go-waku/waku/v2/protocol/metadata/pb --go_out=. ./waku_metadata.proto
|
||||
//go:generate protoc -I./../../waku-proto/waku/metadata/v1/. -I./../../waku-proto/ --go_opt=paths=source_relative --go_opt=Mwaku_metadata.proto=github.com/waku-org/go-waku/waku/v2/protocol/metadata/pb --go_out=. ./../../waku-proto/waku/metadata/v1/waku_metadata.proto
|
||||
|
|
37
vendor/github.com/waku-org/go-waku/waku/v2/protocol/metadata/pb/waku_metadata.pb.go
generated
vendored
37
vendor/github.com/waku-org/go-waku/waku/v2/protocol/metadata/pb/waku_metadata.pb.go
generated
vendored
|
@ -1,9 +1,11 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.31.0
|
||||
// protoc v3.21.12
|
||||
// protoc v4.24.4
|
||||
// source: waku_metadata.proto
|
||||
|
||||
// rfc: https://rfc.vac.dev/spec/66/
|
||||
|
||||
package pb
|
||||
|
||||
import (
|
||||
|
@ -134,20 +136,21 @@ var File_waku_metadata_proto protoreflect.FileDescriptor
|
|||
|
||||
var file_waku_metadata_proto_rawDesc = []byte{
|
||||
0x0a, 0x13, 0x77, 0x61, 0x6b, 0x75, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x22, 0x60, 0x0a, 0x13, 0x57, 0x61, 0x6b,
|
||||
0x75, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
|
||||
0x12, 0x22, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49,
|
||||
0x64, 0x88, 0x01, 0x01, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x02,
|
||||
0x20, 0x03, 0x28, 0x0d, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x42, 0x0d, 0x0a, 0x0b,
|
||||
0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x22, 0x61, 0x0a, 0x14, 0x57,
|
||||
0x61, 0x6b, 0x75, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f,
|
||||
0x6e, 0x73, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69,
|
||||
0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74,
|
||||
0x65, 0x72, 0x49, 0x64, 0x88, 0x01, 0x01, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64,
|
||||
0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x42,
|
||||
0x0d, 0x0a, 0x0b, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x62, 0x06,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x77, 0x61, 0x6b, 0x75, 0x2e, 0x6d, 0x65, 0x74, 0x61,
|
||||
0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, 0x31, 0x22, 0x60, 0x0a, 0x13, 0x57, 0x61, 0x6b, 0x75, 0x4d,
|
||||
0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x22,
|
||||
0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
|
||||
0x28, 0x0d, 0x48, 0x00, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x88,
|
||||
0x01, 0x01, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03,
|
||||
0x28, 0x0d, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x63,
|
||||
0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x22, 0x61, 0x0a, 0x14, 0x57, 0x61, 0x6b,
|
||||
0x75, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x12, 0x22, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
|
||||
0x49, 0x64, 0x88, 0x01, 0x01, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18,
|
||||
0x02, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x42, 0x0d, 0x0a,
|
||||
0x0b, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x62, 0x06, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -164,8 +167,8 @@ func file_waku_metadata_proto_rawDescGZIP() []byte {
|
|||
|
||||
var file_waku_metadata_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
|
||||
var file_waku_metadata_proto_goTypes = []interface{}{
|
||||
(*WakuMetadataRequest)(nil), // 0: pb.WakuMetadataRequest
|
||||
(*WakuMetadataResponse)(nil), // 1: pb.WakuMetadataResponse
|
||||
(*WakuMetadataRequest)(nil), // 0: waku.metadata.v1.WakuMetadataRequest
|
||||
(*WakuMetadataResponse)(nil), // 1: waku.metadata.v1.WakuMetadataResponse
|
||||
}
|
||||
var file_waku_metadata_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
|
|
13
vendor/github.com/waku-org/go-waku/waku/v2/protocol/metadata/pb/waku_metadata.proto
generated
vendored
13
vendor/github.com/waku-org/go-waku/waku/v2/protocol/metadata/pb/waku_metadata.proto
generated
vendored
|
@ -1,13 +0,0 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package pb;
|
||||
|
||||
message WakuMetadataRequest {
|
||||
optional uint32 cluster_id = 1;
|
||||
repeated uint32 shards = 2;
|
||||
}
|
||||
|
||||
message WakuMetadataResponse {
|
||||
optional uint32 cluster_id = 1;
|
||||
repeated uint32 shards = 2;
|
||||
}
|
|
@ -4,7 +4,6 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"math"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
|
@ -57,6 +56,7 @@ func (wakuM *WakuMetadata) SetHost(h host.Host) {
|
|||
func (wakuM *WakuMetadata) Start(ctx context.Context) error {
|
||||
if wakuM.clusterID == 0 {
|
||||
wakuM.log.Warn("no clusterID is specified. Protocol will not be initialized")
|
||||
return nil
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
@ -135,16 +135,21 @@ func (wakuM *WakuMetadata) Request(ctx context.Context, peerID peer.ID) (*protoc
|
|||
stream.Close()
|
||||
|
||||
if response.ClusterId == nil {
|
||||
return nil, nil // Node is not using sharding
|
||||
return nil, errors.New("node did not provide a waku clusterid")
|
||||
}
|
||||
|
||||
result := &protocol.RelayShards{}
|
||||
result.ClusterID = uint16(*response.ClusterId)
|
||||
rClusterID := uint16(*response.ClusterId)
|
||||
var rShardIDs []uint16
|
||||
for _, i := range response.Shards {
|
||||
result.ShardIDs = append(result.ShardIDs, uint16(i))
|
||||
rShardIDs = append(rShardIDs, uint16(i))
|
||||
}
|
||||
|
||||
return result, nil
|
||||
rs, err := protocol.NewRelayShards(rClusterID, rShardIDs...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &rs, nil
|
||||
}
|
||||
|
||||
func (wakuM *WakuMetadata) onRequest(ctx context.Context) func(network.Stream) {
|
||||
|
@ -209,6 +214,15 @@ func (wakuM *WakuMetadata) ListenClose(n network.Network, m multiaddr.Multiaddr)
|
|||
// Do nothing
|
||||
}
|
||||
|
||||
func (wakuM *WakuMetadata) disconnectPeer(peerID peer.ID, reason error) {
|
||||
logger := wakuM.log.With(logging.HostID("peerID", peerID))
|
||||
logger.Error("disconnecting from peer", zap.Error(reason))
|
||||
wakuM.h.Peerstore().RemovePeer(peerID)
|
||||
if err := wakuM.h.Network().ClosePeer(peerID); err != nil {
|
||||
logger.Error("could not disconnect from peer", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
// Connected is called when a connection is opened
|
||||
func (wakuM *WakuMetadata) Connected(n network.Network, cc network.Conn) {
|
||||
go func() {
|
||||
|
@ -219,30 +233,14 @@ func (wakuM *WakuMetadata) Connected(n network.Network, cc network.Conn) {
|
|||
|
||||
peerID := cc.RemotePeer()
|
||||
|
||||
logger := wakuM.log.With(logging.HostID("peerID", peerID))
|
||||
|
||||
shouldDisconnect := true
|
||||
shard, err := wakuM.Request(wakuM.ctx, peerID)
|
||||
if err == nil {
|
||||
if shard == nil {
|
||||
err = errors.New("no shard reported")
|
||||
} else if shard.ClusterID != wakuM.clusterID {
|
||||
err = errors.New("different clusterID reported")
|
||||
}
|
||||
} else {
|
||||
// Only disconnect from peers if they support the protocol
|
||||
// TODO: open a PR in go-libp2p to create a var with this error to not have to compare strings but use errors.Is instead
|
||||
if strings.Contains(err.Error(), "protocols not supported") {
|
||||
shouldDisconnect = false
|
||||
}
|
||||
if err != nil {
|
||||
wakuM.disconnectPeer(peerID, err)
|
||||
return
|
||||
}
|
||||
|
||||
if shouldDisconnect && err != nil {
|
||||
logger.Error("disconnecting from peer", zap.Error(err))
|
||||
wakuM.h.Peerstore().RemovePeer(peerID)
|
||||
if err := wakuM.h.Network().ClosePeer(peerID); err != nil {
|
||||
logger.Error("could not disconnect from peer", zap.Error(err))
|
||||
}
|
||||
if shard.ClusterID != wakuM.clusterID {
|
||||
wakuM.disconnectPeer(peerID, errors.New("different clusterID reported"))
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
|
|
@ -0,0 +1,29 @@
|
|||
package pb
|
||||
|
||||
import (
|
||||
"google.golang.org/protobuf/encoding/protojson"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func (m *WakuMessage) MarshalJSON() ([]byte, error) {
|
||||
return (protojson.MarshalOptions{}).Marshal(m)
|
||||
}
|
||||
|
||||
func Unmarshal(data []byte) (*WakuMessage, error) {
|
||||
msg := &WakuMessage{}
|
||||
err := proto.Unmarshal(data, msg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = msg.Validate()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return msg, nil
|
||||
}
|
||||
|
||||
func (m *WakuMessage) UnmarshalJSON(data []byte) error {
|
||||
return (protojson.UnmarshalOptions{}).Unmarshal(data, m)
|
||||
}
|
|
@ -1,3 +1,3 @@
|
|||
package pb
|
||||
|
||||
//go:generate protoc -I. --go_opt=paths=source_relative --go_opt=Mwaku_message.proto=github.com/waku-org/go-waku/waku/v2/protocol/pb --go_out=. ./waku_message.proto
|
||||
//go:generate protoc -I./../waku-proto/waku/message/v1/. -I./../waku-proto/ --go_opt=paths=source_relative --go_opt=Mmessage.proto=github.com/waku-org/go-waku/waku/v2/pb --go_out=. ./../waku-proto/waku/message/v1/message.proto
|
||||
|
|
210
vendor/github.com/waku-org/go-waku/waku/v2/protocol/pb/message.pb.go
generated
vendored
Normal file
210
vendor/github.com/waku-org/go-waku/waku/v2/protocol/pb/message.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,210 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.31.0
|
||||
// protoc v4.24.4
|
||||
// source: message.proto
|
||||
|
||||
// 14/WAKU2-MESSAGE rfc: https://rfc.vac.dev/spec/14/
|
||||
|
||||
package pb
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type WakuMessage struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Payload []byte `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"`
|
||||
ContentTopic string `protobuf:"bytes,2,opt,name=content_topic,json=contentTopic,proto3" json:"content_topic,omitempty"`
|
||||
Version *uint32 `protobuf:"varint,3,opt,name=version,proto3,oneof" json:"version,omitempty"`
|
||||
Timestamp *int64 `protobuf:"zigzag64,10,opt,name=timestamp,proto3,oneof" json:"timestamp,omitempty"`
|
||||
Meta []byte `protobuf:"bytes,11,opt,name=meta,proto3,oneof" json:"meta,omitempty"`
|
||||
Ephemeral *bool `protobuf:"varint,31,opt,name=ephemeral,proto3,oneof" json:"ephemeral,omitempty"`
|
||||
RateLimitProof []byte `protobuf:"bytes,21,opt,name=rate_limit_proof,json=rateLimitProof,proto3,oneof" json:"rate_limit_proof,omitempty"`
|
||||
}
|
||||
|
||||
func (x *WakuMessage) Reset() {
|
||||
*x = WakuMessage{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_message_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *WakuMessage) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*WakuMessage) ProtoMessage() {}
|
||||
|
||||
func (x *WakuMessage) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_message_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use WakuMessage.ProtoReflect.Descriptor instead.
|
||||
func (*WakuMessage) Descriptor() ([]byte, []int) {
|
||||
return file_message_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *WakuMessage) GetPayload() []byte {
|
||||
if x != nil {
|
||||
return x.Payload
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *WakuMessage) GetContentTopic() string {
|
||||
if x != nil {
|
||||
return x.ContentTopic
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *WakuMessage) GetVersion() uint32 {
|
||||
if x != nil && x.Version != nil {
|
||||
return *x.Version
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *WakuMessage) GetTimestamp() int64 {
|
||||
if x != nil && x.Timestamp != nil {
|
||||
return *x.Timestamp
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *WakuMessage) GetMeta() []byte {
|
||||
if x != nil {
|
||||
return x.Meta
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *WakuMessage) GetEphemeral() bool {
|
||||
if x != nil && x.Ephemeral != nil {
|
||||
return *x.Ephemeral
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *WakuMessage) GetRateLimitProof() []byte {
|
||||
if x != nil {
|
||||
return x.RateLimitProof
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_message_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_message_proto_rawDesc = []byte{
|
||||
0x0a, 0x0d, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
|
||||
0x0f, 0x77, 0x61, 0x6b, 0x75, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x31,
|
||||
0x22, 0xbf, 0x02, 0x0a, 0x0b, 0x57, 0x61, 0x6b, 0x75, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
|
||||
0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f,
|
||||
0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28,
|
||||
0x09, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12,
|
||||
0x1d, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d,
|
||||
0x48, 0x00, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x21,
|
||||
0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x0a, 0x20, 0x01, 0x28,
|
||||
0x12, 0x48, 0x01, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x88, 0x01,
|
||||
0x01, 0x12, 0x17, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0c, 0x48,
|
||||
0x02, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x88, 0x01, 0x01, 0x12, 0x21, 0x0a, 0x09, 0x65, 0x70,
|
||||
0x68, 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x48, 0x03, 0x52,
|
||||
0x09, 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x88, 0x01, 0x01, 0x12, 0x2d, 0x0a,
|
||||
0x10, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x6f,
|
||||
0x66, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x04, 0x52, 0x0e, 0x72, 0x61, 0x74, 0x65, 0x4c,
|
||||
0x69, 0x6d, 0x69, 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x88, 0x01, 0x01, 0x42, 0x0a, 0x0a, 0x08,
|
||||
0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x74, 0x69, 0x6d,
|
||||
0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x42,
|
||||
0x0c, 0x0a, 0x0a, 0x5f, 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x42, 0x13, 0x0a,
|
||||
0x11, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x70, 0x72, 0x6f,
|
||||
0x6f, 0x66, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_message_proto_rawDescOnce sync.Once
|
||||
file_message_proto_rawDescData = file_message_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_message_proto_rawDescGZIP() []byte {
|
||||
file_message_proto_rawDescOnce.Do(func() {
|
||||
file_message_proto_rawDescData = protoimpl.X.CompressGZIP(file_message_proto_rawDescData)
|
||||
})
|
||||
return file_message_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_message_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
|
||||
var file_message_proto_goTypes = []interface{}{
|
||||
(*WakuMessage)(nil), // 0: waku.message.v1.WakuMessage
|
||||
}
|
||||
var file_message_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_message_proto_init() }
|
||||
func file_message_proto_init() {
|
||||
if File_message_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_message_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*WakuMessage); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
file_message_proto_msgTypes[0].OneofWrappers = []interface{}{}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_message_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 1,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_message_proto_goTypes,
|
||||
DependencyIndexes: file_message_proto_depIdxs,
|
||||
MessageInfos: file_message_proto_msgTypes,
|
||||
}.Build()
|
||||
File_message_proto = out.File
|
||||
file_message_proto_rawDesc = nil
|
||||
file_message_proto_goTypes = nil
|
||||
file_message_proto_depIdxs = nil
|
||||
}
|
|
@ -2,46 +2,28 @@ package pb
|
|||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
const MaxMetaAttrLength = 64
|
||||
|
||||
var (
|
||||
errMissingPayload = errors.New("missing Payload field")
|
||||
errMissingContentTopic = errors.New("missing ContentTopic field")
|
||||
errInvalidMetaLength = errors.New("invalid length for Meta field")
|
||||
ErrMissingPayload = errors.New("missing Payload field")
|
||||
ErrMissingContentTopic = errors.New("missing ContentTopic field")
|
||||
ErrInvalidMetaLength = errors.New("invalid length for Meta field")
|
||||
)
|
||||
|
||||
func (msg *WakuMessage) Validate() error {
|
||||
if len(msg.Payload) == 0 {
|
||||
return errMissingPayload
|
||||
return ErrMissingPayload
|
||||
}
|
||||
|
||||
if msg.ContentTopic == "" {
|
||||
return errMissingContentTopic
|
||||
return ErrMissingContentTopic
|
||||
}
|
||||
|
||||
if len(msg.Meta) > MaxMetaAttrLength {
|
||||
return errInvalidMetaLength
|
||||
return ErrInvalidMetaLength
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func Unmarshal(data []byte) (*WakuMessage, error) {
|
||||
msg := &WakuMessage{}
|
||||
err := proto.Unmarshal(data, msg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = msg.Validate()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return msg, nil
|
||||
|
||||
}
|
||||
|
|
|
@ -1,324 +0,0 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.26.0
|
||||
// protoc v3.21.12
|
||||
// source: waku_message.proto
|
||||
|
||||
package pb
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type RateLimitProof struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Proof []byte `protobuf:"bytes,1,opt,name=proof,proto3" json:"proof,omitempty"`
|
||||
MerkleRoot []byte `protobuf:"bytes,2,opt,name=merkle_root,json=merkleRoot,proto3" json:"merkle_root,omitempty"`
|
||||
Epoch []byte `protobuf:"bytes,3,opt,name=epoch,proto3" json:"epoch,omitempty"`
|
||||
ShareX []byte `protobuf:"bytes,4,opt,name=share_x,json=shareX,proto3" json:"share_x,omitempty"`
|
||||
ShareY []byte `protobuf:"bytes,5,opt,name=share_y,json=shareY,proto3" json:"share_y,omitempty"`
|
||||
Nullifier []byte `protobuf:"bytes,6,opt,name=nullifier,proto3" json:"nullifier,omitempty"`
|
||||
RlnIdentifier []byte `protobuf:"bytes,7,opt,name=rln_identifier,json=rlnIdentifier,proto3" json:"rln_identifier,omitempty"`
|
||||
}
|
||||
|
||||
func (x *RateLimitProof) Reset() {
|
||||
*x = RateLimitProof{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_waku_message_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *RateLimitProof) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*RateLimitProof) ProtoMessage() {}
|
||||
|
||||
func (x *RateLimitProof) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_waku_message_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use RateLimitProof.ProtoReflect.Descriptor instead.
|
||||
func (*RateLimitProof) Descriptor() ([]byte, []int) {
|
||||
return file_waku_message_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *RateLimitProof) GetProof() []byte {
|
||||
if x != nil {
|
||||
return x.Proof
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *RateLimitProof) GetMerkleRoot() []byte {
|
||||
if x != nil {
|
||||
return x.MerkleRoot
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *RateLimitProof) GetEpoch() []byte {
|
||||
if x != nil {
|
||||
return x.Epoch
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *RateLimitProof) GetShareX() []byte {
|
||||
if x != nil {
|
||||
return x.ShareX
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *RateLimitProof) GetShareY() []byte {
|
||||
if x != nil {
|
||||
return x.ShareY
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *RateLimitProof) GetNullifier() []byte {
|
||||
if x != nil {
|
||||
return x.Nullifier
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *RateLimitProof) GetRlnIdentifier() []byte {
|
||||
if x != nil {
|
||||
return x.RlnIdentifier
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type WakuMessage struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Payload []byte `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"`
|
||||
ContentTopic string `protobuf:"bytes,2,opt,name=contentTopic,proto3" json:"contentTopic,omitempty"`
|
||||
Version uint32 `protobuf:"varint,3,opt,name=version,proto3" json:"version,omitempty"`
|
||||
Timestamp int64 `protobuf:"zigzag64,10,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
||||
Meta []byte `protobuf:"bytes,11,opt,name=meta,proto3" json:"meta,omitempty"`
|
||||
RateLimitProof *RateLimitProof `protobuf:"bytes,21,opt,name=rate_limit_proof,json=rateLimitProof,proto3" json:"rate_limit_proof,omitempty"`
|
||||
Ephemeral bool `protobuf:"varint,31,opt,name=ephemeral,proto3" json:"ephemeral,omitempty"`
|
||||
}
|
||||
|
||||
func (x *WakuMessage) Reset() {
|
||||
*x = WakuMessage{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_waku_message_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *WakuMessage) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*WakuMessage) ProtoMessage() {}
|
||||
|
||||
func (x *WakuMessage) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_waku_message_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use WakuMessage.ProtoReflect.Descriptor instead.
|
||||
func (*WakuMessage) Descriptor() ([]byte, []int) {
|
||||
return file_waku_message_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *WakuMessage) GetPayload() []byte {
|
||||
if x != nil {
|
||||
return x.Payload
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *WakuMessage) GetContentTopic() string {
|
||||
if x != nil {
|
||||
return x.ContentTopic
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *WakuMessage) GetVersion() uint32 {
|
||||
if x != nil {
|
||||
return x.Version
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *WakuMessage) GetTimestamp() int64 {
|
||||
if x != nil {
|
||||
return x.Timestamp
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *WakuMessage) GetMeta() []byte {
|
||||
if x != nil {
|
||||
return x.Meta
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *WakuMessage) GetRateLimitProof() *RateLimitProof {
|
||||
if x != nil {
|
||||
return x.RateLimitProof
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *WakuMessage) GetEphemeral() bool {
|
||||
if x != nil {
|
||||
return x.Ephemeral
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var File_waku_message_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_waku_message_proto_rawDesc = []byte{
|
||||
0x0a, 0x12, 0x77, 0x61, 0x6b, 0x75, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x22, 0xd4, 0x01, 0x0a, 0x0e, 0x52, 0x61, 0x74,
|
||||
0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x14, 0x0a, 0x05, 0x70,
|
||||
0x72, 0x6f, 0x6f, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x70, 0x72, 0x6f, 0x6f,
|
||||
0x66, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x65, 0x72, 0x6b, 0x6c, 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74,
|
||||
0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x6d, 0x65, 0x72, 0x6b, 0x6c, 0x65, 0x52, 0x6f,
|
||||
0x6f, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28,
|
||||
0x0c, 0x52, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x68, 0x61, 0x72,
|
||||
0x65, 0x5f, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x65,
|
||||
0x58, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x68, 0x61, 0x72, 0x65, 0x5f, 0x79, 0x18, 0x05, 0x20, 0x01,
|
||||
0x28, 0x0c, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x65, 0x59, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x75,
|
||||
0x6c, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6e,
|
||||
0x75, 0x6c, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x6c, 0x6e, 0x5f,
|
||||
0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c,
|
||||
0x52, 0x0d, 0x72, 0x6c, 0x6e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x22,
|
||||
0xf3, 0x01, 0x0a, 0x0b, 0x57, 0x61, 0x6b, 0x75, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12,
|
||||
0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c,
|
||||
0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x22, 0x0a, 0x0c, 0x63, 0x6f, 0x6e,
|
||||
0x74, 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||
0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x18, 0x0a,
|
||||
0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07,
|
||||
0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73,
|
||||
0x74, 0x61, 0x6d, 0x70, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x12, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65,
|
||||
0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x0b, 0x20,
|
||||
0x01, 0x28, 0x0c, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x3c, 0x0a, 0x10, 0x72, 0x61, 0x74,
|
||||
0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x15, 0x20,
|
||||
0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d,
|
||||
0x69, 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x0e, 0x72, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d,
|
||||
0x69, 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x1c, 0x0a, 0x09, 0x65, 0x70, 0x68, 0x65, 0x6d,
|
||||
0x65, 0x72, 0x61, 0x6c, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x65, 0x70, 0x68, 0x65,
|
||||
0x6d, 0x65, 0x72, 0x61, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_waku_message_proto_rawDescOnce sync.Once
|
||||
file_waku_message_proto_rawDescData = file_waku_message_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_waku_message_proto_rawDescGZIP() []byte {
|
||||
file_waku_message_proto_rawDescOnce.Do(func() {
|
||||
file_waku_message_proto_rawDescData = protoimpl.X.CompressGZIP(file_waku_message_proto_rawDescData)
|
||||
})
|
||||
return file_waku_message_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_waku_message_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
|
||||
var file_waku_message_proto_goTypes = []interface{}{
|
||||
(*RateLimitProof)(nil), // 0: pb.RateLimitProof
|
||||
(*WakuMessage)(nil), // 1: pb.WakuMessage
|
||||
}
|
||||
var file_waku_message_proto_depIdxs = []int32{
|
||||
0, // 0: pb.WakuMessage.rate_limit_proof:type_name -> pb.RateLimitProof
|
||||
1, // [1:1] is the sub-list for method output_type
|
||||
1, // [1:1] is the sub-list for method input_type
|
||||
1, // [1:1] is the sub-list for extension type_name
|
||||
1, // [1:1] is the sub-list for extension extendee
|
||||
0, // [0:1] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_waku_message_proto_init() }
|
||||
func file_waku_message_proto_init() {
|
||||
if File_waku_message_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_waku_message_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*RateLimitProof); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_waku_message_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*WakuMessage); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_waku_message_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 2,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_waku_message_proto_goTypes,
|
||||
DependencyIndexes: file_waku_message_proto_depIdxs,
|
||||
MessageInfos: file_waku_message_proto_msgTypes,
|
||||
}.Build()
|
||||
File_waku_message_proto = out.File
|
||||
file_waku_message_proto_rawDesc = nil
|
||||
file_waku_message_proto_goTypes = nil
|
||||
file_waku_message_proto_depIdxs = nil
|
||||
}
|
|
@ -1,23 +0,0 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package pb;
|
||||
|
||||
message RateLimitProof {
|
||||
bytes proof = 1;
|
||||
bytes merkle_root = 2;
|
||||
bytes epoch = 3;
|
||||
bytes share_x = 4;
|
||||
bytes share_y = 5;
|
||||
bytes nullifier = 6;
|
||||
bytes rln_identifier = 7;
|
||||
}
|
||||
|
||||
message WakuMessage {
|
||||
bytes payload = 1;
|
||||
string contentTopic = 2;
|
||||
uint32 version = 3;
|
||||
sint64 timestamp = 10;
|
||||
bytes meta = 11;
|
||||
RateLimitProof rate_limit_proof = 21;
|
||||
bool ephemeral = 31;
|
||||
}
|
|
@ -14,6 +14,7 @@ import (
|
|||
"github.com/waku-org/go-waku/waku/v2/peerstore"
|
||||
wenr "github.com/waku-org/go-waku/waku/v2/protocol/enr"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/peer_exchange/pb"
|
||||
"github.com/waku-org/go-waku/waku/v2/service"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -26,8 +27,21 @@ func (wakuPX *WakuPeerExchange) Request(ctx context.Context, numPeers int, opts
|
|||
optList := DefaultOptions(wakuPX.h)
|
||||
optList = append(optList, opts...)
|
||||
for _, opt := range optList {
|
||||
opt(params)
|
||||
err := opt(params)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if params.pm != nil && params.peerAddr != nil {
|
||||
pData, err := wakuPX.pm.AddPeer(params.peerAddr, peerstore.Static, []string{}, PeerExchangeID_v20alpha1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
wakuPX.pm.Connect(pData)
|
||||
params.selectedPeer = pData.AddrInfo.ID
|
||||
}
|
||||
|
||||
if params.pm != nil && params.selectedPeer == "" {
|
||||
var err error
|
||||
params.selectedPeer, err = wakuPX.pm.SelectPeer(
|
||||
|
@ -90,9 +104,9 @@ func (wakuPX *WakuPeerExchange) handleResponse(ctx context.Context, response *pb
|
|||
|
||||
for _, p := range response.PeerInfos {
|
||||
enrRecord := &enr.Record{}
|
||||
buf := bytes.NewBuffer(p.ENR)
|
||||
buf := bytes.NewBuffer(p.Enr)
|
||||
|
||||
err := enrRecord.DecodeRLP(rlp.NewStream(buf, uint64(len(p.ENR))))
|
||||
err := enrRecord.DecodeRLP(rlp.NewStream(buf, uint64(len(p.Enr))))
|
||||
if err != nil {
|
||||
wakuPX.log.Error("converting bytes to enr", zap.Error(err))
|
||||
return err
|
||||
|
@ -124,11 +138,11 @@ func (wakuPX *WakuPeerExchange) handleResponse(ctx context.Context, response *pb
|
|||
go func() {
|
||||
defer wakuPX.WaitGroup().Done()
|
||||
|
||||
peerCh := make(chan peermanager.PeerData)
|
||||
peerCh := make(chan service.PeerData)
|
||||
defer close(peerCh)
|
||||
wakuPX.peerConnector.Subscribe(ctx, peerCh)
|
||||
for _, p := range discoveredPeers {
|
||||
peer := peermanager.PeerData{
|
||||
peer := service.PeerData{
|
||||
Origin: peerstore.PeerExchange,
|
||||
AddrInfo: p.addrInfo,
|
||||
ENR: p.enr,
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/hashicorp/golang-lru/simplelru"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/peer_exchange/pb"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// simpleLRU internal uses container/list, which is ring buffer(double linked list)
|
||||
|
@ -17,14 +18,16 @@ type enrCache struct {
|
|||
data *simplelru.LRU
|
||||
rng *rand.Rand
|
||||
mu sync.RWMutex
|
||||
log *zap.Logger
|
||||
}
|
||||
|
||||
// err on negative size
|
||||
func newEnrCache(size int) (*enrCache, error) {
|
||||
func newEnrCache(size int, log *zap.Logger) (*enrCache, error) {
|
||||
inner, err := simplelru.NewLRU(size, nil)
|
||||
return &enrCache{
|
||||
data: inner,
|
||||
rng: rand.New(rand.NewSource(rand.Int63())),
|
||||
log: log.Named("enr-cache"),
|
||||
}, err
|
||||
}
|
||||
|
||||
|
@ -35,6 +38,7 @@ func (c *enrCache) updateCache(node *enode.Node) {
|
|||
currNode, ok := c.data.Get(node.ID())
|
||||
if !ok || node.Seq() > currNode.(*enode.Node).Seq() {
|
||||
c.data.Add(node.ID(), node)
|
||||
c.log.Debug("discovered px peer via discv5", zap.Stringer("enr", node))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -67,7 +71,7 @@ func (c *enrCache) getENRs(neededPeers int) ([]*pb.PeerInfo, error) {
|
|||
}
|
||||
writer.Flush()
|
||||
result = append(result, &pb.PeerInfo{
|
||||
ENR: b.Bytes(),
|
||||
Enr: b.Bytes(),
|
||||
})
|
||||
}
|
||||
return result, nil
|
||||
|
|
2
vendor/github.com/waku-org/go-waku/waku/v2/protocol/peer_exchange/pb/generate.go
generated
vendored
2
vendor/github.com/waku-org/go-waku/waku/v2/protocol/peer_exchange/pb/generate.go
generated
vendored
|
@ -1,3 +1,3 @@
|
|||
package pb
|
||||
|
||||
//go:generate protoc -I. --go_opt=paths=source_relative --go_opt=Mwaku_peer_exchange.proto=github.com/waku-org/go-waku/waku/v2/protocol/peer_exchange/pb --go_out=. ./waku_peer_exchange.proto
|
||||
//go:generate protoc -I./../../waku-proto/waku/peer_exchange/v2alpha1/. -I./../../waku-proto/ --go_opt=paths=source_relative --go_opt=Mpeer_exchange.proto=github.com/waku-org/go-waku/waku/v2/protocol/peer_exchange/pb --go_out=. ./../../waku-proto/waku/peer_exchange/v2alpha1/peer_exchange.proto
|
||||
|
|
|
@ -1,8 +1,11 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.26.0
|
||||
// protoc v3.21.12
|
||||
// source: waku_peer_exchange.proto
|
||||
// protoc-gen-go v1.31.0
|
||||
// protoc v4.24.4
|
||||
// source: peer_exchange.proto
|
||||
|
||||
// 34/WAKU2-PEER-EXCHANGE rfc: https://rfc.vac.dev/spec/34/
|
||||
// Protocol identifier: /vac/waku/peer-exchange/2.0.0-alpha1
|
||||
|
||||
package pb
|
||||
|
||||
|
@ -25,13 +28,13 @@ type PeerInfo struct {
|
|||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
ENR []byte `protobuf:"bytes,1,opt,name=ENR,proto3" json:"ENR,omitempty"`
|
||||
Enr []byte `protobuf:"bytes,1,opt,name=enr,proto3" json:"enr,omitempty"`
|
||||
}
|
||||
|
||||
func (x *PeerInfo) Reset() {
|
||||
*x = PeerInfo{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_waku_peer_exchange_proto_msgTypes[0]
|
||||
mi := &file_peer_exchange_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
@ -44,7 +47,7 @@ func (x *PeerInfo) String() string {
|
|||
func (*PeerInfo) ProtoMessage() {}
|
||||
|
||||
func (x *PeerInfo) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_waku_peer_exchange_proto_msgTypes[0]
|
||||
mi := &file_peer_exchange_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
@ -57,12 +60,12 @@ func (x *PeerInfo) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use PeerInfo.ProtoReflect.Descriptor instead.
|
||||
func (*PeerInfo) Descriptor() ([]byte, []int) {
|
||||
return file_waku_peer_exchange_proto_rawDescGZIP(), []int{0}
|
||||
return file_peer_exchange_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *PeerInfo) GetENR() []byte {
|
||||
func (x *PeerInfo) GetEnr() []byte {
|
||||
if x != nil {
|
||||
return x.ENR
|
||||
return x.Enr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -72,13 +75,13 @@ type PeerExchangeQuery struct {
|
|||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
NumPeers uint64 `protobuf:"varint,1,opt,name=numPeers,proto3" json:"numPeers,omitempty"` // number of peers requested
|
||||
NumPeers uint64 `protobuf:"varint,1,opt,name=num_peers,json=numPeers,proto3" json:"num_peers,omitempty"`
|
||||
}
|
||||
|
||||
func (x *PeerExchangeQuery) Reset() {
|
||||
*x = PeerExchangeQuery{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_waku_peer_exchange_proto_msgTypes[1]
|
||||
mi := &file_peer_exchange_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
@ -91,7 +94,7 @@ func (x *PeerExchangeQuery) String() string {
|
|||
func (*PeerExchangeQuery) ProtoMessage() {}
|
||||
|
||||
func (x *PeerExchangeQuery) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_waku_peer_exchange_proto_msgTypes[1]
|
||||
mi := &file_peer_exchange_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
@ -104,7 +107,7 @@ func (x *PeerExchangeQuery) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use PeerExchangeQuery.ProtoReflect.Descriptor instead.
|
||||
func (*PeerExchangeQuery) Descriptor() ([]byte, []int) {
|
||||
return file_waku_peer_exchange_proto_rawDescGZIP(), []int{1}
|
||||
return file_peer_exchange_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *PeerExchangeQuery) GetNumPeers() uint64 {
|
||||
|
@ -119,13 +122,13 @@ type PeerExchangeResponse struct {
|
|||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
PeerInfos []*PeerInfo `protobuf:"bytes,1,rep,name=peerInfos,proto3" json:"peerInfos,omitempty"`
|
||||
PeerInfos []*PeerInfo `protobuf:"bytes,1,rep,name=peer_infos,json=peerInfos,proto3" json:"peer_infos,omitempty"`
|
||||
}
|
||||
|
||||
func (x *PeerExchangeResponse) Reset() {
|
||||
*x = PeerExchangeResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_waku_peer_exchange_proto_msgTypes[2]
|
||||
mi := &file_peer_exchange_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
@ -138,7 +141,7 @@ func (x *PeerExchangeResponse) String() string {
|
|||
func (*PeerExchangeResponse) ProtoMessage() {}
|
||||
|
||||
func (x *PeerExchangeResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_waku_peer_exchange_proto_msgTypes[2]
|
||||
mi := &file_peer_exchange_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
@ -151,7 +154,7 @@ func (x *PeerExchangeResponse) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use PeerExchangeResponse.ProtoReflect.Descriptor instead.
|
||||
func (*PeerExchangeResponse) Descriptor() ([]byte, []int) {
|
||||
return file_waku_peer_exchange_proto_rawDescGZIP(), []int{2}
|
||||
return file_peer_exchange_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *PeerExchangeResponse) GetPeerInfos() []*PeerInfo {
|
||||
|
@ -173,7 +176,7 @@ type PeerExchangeRPC struct {
|
|||
func (x *PeerExchangeRPC) Reset() {
|
||||
*x = PeerExchangeRPC{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_waku_peer_exchange_proto_msgTypes[3]
|
||||
mi := &file_peer_exchange_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
@ -186,7 +189,7 @@ func (x *PeerExchangeRPC) String() string {
|
|||
func (*PeerExchangeRPC) ProtoMessage() {}
|
||||
|
||||
func (x *PeerExchangeRPC) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_waku_peer_exchange_proto_msgTypes[3]
|
||||
mi := &file_peer_exchange_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
@ -199,7 +202,7 @@ func (x *PeerExchangeRPC) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use PeerExchangeRPC.ProtoReflect.Descriptor instead.
|
||||
func (*PeerExchangeRPC) Descriptor() ([]byte, []int) {
|
||||
return file_waku_peer_exchange_proto_rawDescGZIP(), []int{3}
|
||||
return file_peer_exchange_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *PeerExchangeRPC) GetQuery() *PeerExchangeQuery {
|
||||
|
@ -216,53 +219,60 @@ func (x *PeerExchangeRPC) GetResponse() *PeerExchangeResponse {
|
|||
return nil
|
||||
}
|
||||
|
||||
var File_waku_peer_exchange_proto protoreflect.FileDescriptor
|
||||
var File_peer_exchange_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_waku_peer_exchange_proto_rawDesc = []byte{
|
||||
0x0a, 0x18, 0x77, 0x61, 0x6b, 0x75, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x65, 0x78, 0x63, 0x68,
|
||||
0x61, 0x6e, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x22, 0x1c,
|
||||
0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x10, 0x0a, 0x03, 0x45, 0x4e,
|
||||
0x52, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x45, 0x4e, 0x52, 0x22, 0x2f, 0x0a, 0x11,
|
||||
0x50, 0x65, 0x65, 0x72, 0x45, 0x78, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x51, 0x75, 0x65, 0x72,
|
||||
0x79, 0x12, 0x1a, 0x0a, 0x08, 0x6e, 0x75, 0x6d, 0x50, 0x65, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x04, 0x52, 0x08, 0x6e, 0x75, 0x6d, 0x50, 0x65, 0x65, 0x72, 0x73, 0x22, 0x42, 0x0a,
|
||||
0x14, 0x50, 0x65, 0x65, 0x72, 0x45, 0x78, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73,
|
||||
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x09, 0x70, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66,
|
||||
0x6f, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x65,
|
||||
0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x70, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f,
|
||||
0x73, 0x22, 0x74, 0x0a, 0x0f, 0x50, 0x65, 0x65, 0x72, 0x45, 0x78, 0x63, 0x68, 0x61, 0x6e, 0x67,
|
||||
0x65, 0x52, 0x50, 0x43, 0x12, 0x2b, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x45, 0x78, 0x63,
|
||||
0x68, 0x61, 0x6e, 0x67, 0x65, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72,
|
||||
0x79, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x02, 0x20,
|
||||
0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x45, 0x78, 0x63,
|
||||
0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x08, 0x72,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
var file_peer_exchange_proto_rawDesc = []byte{
|
||||
0x0a, 0x13, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x65, 0x78, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1b, 0x77, 0x61, 0x6b, 0x75, 0x2e, 0x70, 0x65, 0x65, 0x72,
|
||||
0x5f, 0x65, 0x78, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68,
|
||||
0x61, 0x31, 0x22, 0x1c, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x10,
|
||||
0x0a, 0x03, 0x65, 0x6e, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x65, 0x6e, 0x72,
|
||||
0x22, 0x30, 0x0a, 0x11, 0x50, 0x65, 0x65, 0x72, 0x45, 0x78, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65,
|
||||
0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x75, 0x6d, 0x5f, 0x70, 0x65, 0x65,
|
||||
0x72, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6e, 0x75, 0x6d, 0x50, 0x65, 0x65,
|
||||
0x72, 0x73, 0x22, 0x5c, 0x0a, 0x14, 0x50, 0x65, 0x65, 0x72, 0x45, 0x78, 0x63, 0x68, 0x61, 0x6e,
|
||||
0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x70, 0x65,
|
||||
0x65, 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25,
|
||||
0x2e, 0x77, 0x61, 0x6b, 0x75, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x65, 0x78, 0x63, 0x68, 0x61,
|
||||
0x6e, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x50, 0x65, 0x65,
|
||||
0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x70, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x73,
|
||||
0x22, 0xa6, 0x01, 0x0a, 0x0f, 0x50, 0x65, 0x65, 0x72, 0x45, 0x78, 0x63, 0x68, 0x61, 0x6e, 0x67,
|
||||
0x65, 0x52, 0x50, 0x43, 0x12, 0x44, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x77, 0x61, 0x6b, 0x75, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x5f,
|
||||
0x65, 0x78, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61,
|
||||
0x31, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x45, 0x78, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x51, 0x75,
|
||||
0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x4d, 0x0a, 0x08, 0x72, 0x65,
|
||||
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x77,
|
||||
0x61, 0x6b, 0x75, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x65, 0x78, 0x63, 0x68, 0x61, 0x6e, 0x67,
|
||||
0x65, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x45,
|
||||
0x78, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52,
|
||||
0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_waku_peer_exchange_proto_rawDescOnce sync.Once
|
||||
file_waku_peer_exchange_proto_rawDescData = file_waku_peer_exchange_proto_rawDesc
|
||||
file_peer_exchange_proto_rawDescOnce sync.Once
|
||||
file_peer_exchange_proto_rawDescData = file_peer_exchange_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_waku_peer_exchange_proto_rawDescGZIP() []byte {
|
||||
file_waku_peer_exchange_proto_rawDescOnce.Do(func() {
|
||||
file_waku_peer_exchange_proto_rawDescData = protoimpl.X.CompressGZIP(file_waku_peer_exchange_proto_rawDescData)
|
||||
func file_peer_exchange_proto_rawDescGZIP() []byte {
|
||||
file_peer_exchange_proto_rawDescOnce.Do(func() {
|
||||
file_peer_exchange_proto_rawDescData = protoimpl.X.CompressGZIP(file_peer_exchange_proto_rawDescData)
|
||||
})
|
||||
return file_waku_peer_exchange_proto_rawDescData
|
||||
return file_peer_exchange_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_waku_peer_exchange_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
|
||||
var file_waku_peer_exchange_proto_goTypes = []interface{}{
|
||||
(*PeerInfo)(nil), // 0: pb.PeerInfo
|
||||
(*PeerExchangeQuery)(nil), // 1: pb.PeerExchangeQuery
|
||||
(*PeerExchangeResponse)(nil), // 2: pb.PeerExchangeResponse
|
||||
(*PeerExchangeRPC)(nil), // 3: pb.PeerExchangeRPC
|
||||
var file_peer_exchange_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
|
||||
var file_peer_exchange_proto_goTypes = []interface{}{
|
||||
(*PeerInfo)(nil), // 0: waku.peer_exchange.v2alpha1.PeerInfo
|
||||
(*PeerExchangeQuery)(nil), // 1: waku.peer_exchange.v2alpha1.PeerExchangeQuery
|
||||
(*PeerExchangeResponse)(nil), // 2: waku.peer_exchange.v2alpha1.PeerExchangeResponse
|
||||
(*PeerExchangeRPC)(nil), // 3: waku.peer_exchange.v2alpha1.PeerExchangeRPC
|
||||
}
|
||||
var file_waku_peer_exchange_proto_depIdxs = []int32{
|
||||
0, // 0: pb.PeerExchangeResponse.peerInfos:type_name -> pb.PeerInfo
|
||||
1, // 1: pb.PeerExchangeRPC.query:type_name -> pb.PeerExchangeQuery
|
||||
2, // 2: pb.PeerExchangeRPC.response:type_name -> pb.PeerExchangeResponse
|
||||
var file_peer_exchange_proto_depIdxs = []int32{
|
||||
0, // 0: waku.peer_exchange.v2alpha1.PeerExchangeResponse.peer_infos:type_name -> waku.peer_exchange.v2alpha1.PeerInfo
|
||||
1, // 1: waku.peer_exchange.v2alpha1.PeerExchangeRPC.query:type_name -> waku.peer_exchange.v2alpha1.PeerExchangeQuery
|
||||
2, // 2: waku.peer_exchange.v2alpha1.PeerExchangeRPC.response:type_name -> waku.peer_exchange.v2alpha1.PeerExchangeResponse
|
||||
3, // [3:3] is the sub-list for method output_type
|
||||
3, // [3:3] is the sub-list for method input_type
|
||||
3, // [3:3] is the sub-list for extension type_name
|
||||
|
@ -270,13 +280,13 @@ var file_waku_peer_exchange_proto_depIdxs = []int32{
|
|||
0, // [0:3] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_waku_peer_exchange_proto_init() }
|
||||
func file_waku_peer_exchange_proto_init() {
|
||||
if File_waku_peer_exchange_proto != nil {
|
||||
func init() { file_peer_exchange_proto_init() }
|
||||
func file_peer_exchange_proto_init() {
|
||||
if File_peer_exchange_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_waku_peer_exchange_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
file_peer_exchange_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*PeerInfo); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
|
@ -288,7 +298,7 @@ func file_waku_peer_exchange_proto_init() {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
file_waku_peer_exchange_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
file_peer_exchange_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*PeerExchangeQuery); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
|
@ -300,7 +310,7 @@ func file_waku_peer_exchange_proto_init() {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
file_waku_peer_exchange_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
file_peer_exchange_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*PeerExchangeResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
|
@ -312,7 +322,7 @@ func file_waku_peer_exchange_proto_init() {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
file_waku_peer_exchange_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
file_peer_exchange_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*PeerExchangeRPC); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
|
@ -329,18 +339,18 @@ func file_waku_peer_exchange_proto_init() {
|
|||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_waku_peer_exchange_proto_rawDesc,
|
||||
RawDescriptor: file_peer_exchange_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 4,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_waku_peer_exchange_proto_goTypes,
|
||||
DependencyIndexes: file_waku_peer_exchange_proto_depIdxs,
|
||||
MessageInfos: file_waku_peer_exchange_proto_msgTypes,
|
||||
GoTypes: file_peer_exchange_proto_goTypes,
|
||||
DependencyIndexes: file_peer_exchange_proto_depIdxs,
|
||||
MessageInfos: file_peer_exchange_proto_msgTypes,
|
||||
}.Build()
|
||||
File_waku_peer_exchange_proto = out.File
|
||||
file_waku_peer_exchange_proto_rawDesc = nil
|
||||
file_waku_peer_exchange_proto_goTypes = nil
|
||||
file_waku_peer_exchange_proto_depIdxs = nil
|
||||
File_peer_exchange_proto = out.File
|
||||
file_peer_exchange_proto_rawDesc = nil
|
||||
file_peer_exchange_proto_goTypes = nil
|
||||
file_peer_exchange_proto_depIdxs = nil
|
||||
}
|
|
@ -1,20 +0,0 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package pb;
|
||||
|
||||
message PeerInfo {
|
||||
bytes ENR = 1;
|
||||
}
|
||||
|
||||
message PeerExchangeQuery {
|
||||
uint64 numPeers = 1; // number of peers requested
|
||||
}
|
||||
|
||||
message PeerExchangeResponse {
|
||||
repeated PeerInfo peerInfos = 1;
|
||||
}
|
||||
|
||||
message PeerExchangeRPC {
|
||||
PeerExchangeQuery query = 1;
|
||||
PeerExchangeResponse response = 2;
|
||||
}
|
|
@ -18,6 +18,7 @@ import (
|
|||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/enr"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/peer_exchange/pb"
|
||||
"github.com/waku-org/go-waku/waku/v2/service"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -32,7 +33,7 @@ var (
|
|||
|
||||
// PeerConnector will subscribe to a channel containing the information for all peers found by this discovery protocol
|
||||
type PeerConnector interface {
|
||||
Subscribe(context.Context, <-chan peermanager.PeerData)
|
||||
Subscribe(context.Context, <-chan service.PeerData)
|
||||
}
|
||||
|
||||
type WakuPeerExchange struct {
|
||||
|
@ -42,7 +43,7 @@ type WakuPeerExchange struct {
|
|||
metrics Metrics
|
||||
log *zap.Logger
|
||||
|
||||
*protocol.CommonService
|
||||
*service.CommonService
|
||||
|
||||
peerConnector PeerConnector
|
||||
enrCache *enrCache
|
||||
|
@ -52,18 +53,20 @@ type WakuPeerExchange struct {
|
|||
// Takes an optional peermanager if WakuPeerExchange is being created along with WakuNode.
|
||||
// If using libp2p host, then pass peermanager as nil
|
||||
func NewWakuPeerExchange(disc *discv5.DiscoveryV5, peerConnector PeerConnector, pm *peermanager.PeerManager, reg prometheus.Registerer, log *zap.Logger) (*WakuPeerExchange, error) {
|
||||
newEnrCache, err := newEnrCache(MaxCacheSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wakuPX := new(WakuPeerExchange)
|
||||
wakuPX.disc = disc
|
||||
wakuPX.metrics = newMetrics(reg)
|
||||
wakuPX.log = log.Named("wakupx")
|
||||
wakuPX.enrCache = newEnrCache
|
||||
wakuPX.peerConnector = peerConnector
|
||||
wakuPX.pm = pm
|
||||
wakuPX.CommonService = protocol.NewCommonService()
|
||||
wakuPX.CommonService = service.NewCommonService()
|
||||
|
||||
newEnrCache, err := newEnrCache(MaxCacheSize, wakuPX.log)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
wakuPX.enrCache = newEnrCache
|
||||
|
||||
return wakuPX, nil
|
||||
}
|
||||
|
@ -158,7 +161,6 @@ func (wakuPX *WakuPeerExchange) iterate(ctx context.Context) error {
|
|||
continue
|
||||
}
|
||||
|
||||
wakuPX.log.Debug("Discovered px peers via discv5")
|
||||
wakuPX.enrCache.updateCache(iterator.Node())
|
||||
|
||||
select {
|
||||
|
|
|
@ -1,8 +1,11 @@
|
|||
package peer_exchange
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/waku-org/go-waku/waku/v2/peermanager"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -10,18 +13,36 @@ import (
|
|||
type PeerExchangeParameters struct {
|
||||
host host.Host
|
||||
selectedPeer peer.ID
|
||||
peerAddr multiaddr.Multiaddr
|
||||
peerSelectionType peermanager.PeerSelection
|
||||
preferredPeers peer.IDSlice
|
||||
pm *peermanager.PeerManager
|
||||
log *zap.Logger
|
||||
}
|
||||
|
||||
type PeerExchangeOption func(*PeerExchangeParameters)
|
||||
type PeerExchangeOption func(*PeerExchangeParameters) error
|
||||
|
||||
// WithPeer is an option used to specify the peerID to push a waku message to
|
||||
// WithPeer is an option used to specify the peerID to fetch peers from
|
||||
func WithPeer(p peer.ID) PeerExchangeOption {
|
||||
return func(params *PeerExchangeParameters) {
|
||||
return func(params *PeerExchangeParameters) error {
|
||||
params.selectedPeer = p
|
||||
if params.peerAddr != nil {
|
||||
return errors.New("peerAddr and peerId options are mutually exclusive")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithPeerAddr is an option used to specify a peerAddress to fetch peers from
|
||||
// This new peer will be added to peerStore.
|
||||
// Note that this option is mutually exclusive to WithPeerAddr, only one of them can be used.
|
||||
func WithPeerAddr(pAddr multiaddr.Multiaddr) PeerExchangeOption {
|
||||
return func(params *PeerExchangeParameters) error {
|
||||
params.peerAddr = pAddr
|
||||
if params.selectedPeer != "" {
|
||||
return errors.New("peerAddr and peerId options are mutually exclusive")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -31,9 +52,10 @@ func WithPeer(p peer.ID) PeerExchangeOption {
|
|||
// from the node peerstore
|
||||
// Note: this option can only be used if WakuNode is initialized which internally intializes the peerManager
|
||||
func WithAutomaticPeerSelection(fromThesePeers ...peer.ID) PeerExchangeOption {
|
||||
return func(params *PeerExchangeParameters) {
|
||||
return func(params *PeerExchangeParameters) error {
|
||||
params.peerSelectionType = peermanager.Automatic
|
||||
params.preferredPeers = fromThesePeers
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -42,9 +64,10 @@ func WithAutomaticPeerSelection(fromThesePeers ...peer.ID) PeerExchangeOption {
|
|||
// from that list assuming it supports the chosen protocol, otherwise it will chose a peer
|
||||
// from the node peerstore
|
||||
func WithFastestPeerSelection(fromThesePeers ...peer.ID) PeerExchangeOption {
|
||||
return func(params *PeerExchangeParameters) {
|
||||
return func(params *PeerExchangeParameters) error {
|
||||
params.peerSelectionType = peermanager.LowestRTT
|
||||
params.preferredPeers = fromThesePeers
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@ var DefaultRelaySubscriptionBufferSize int = 1024
|
|||
|
||||
type RelaySubscribeParameters struct {
|
||||
dontConsume bool
|
||||
cacheSize uint
|
||||
}
|
||||
|
||||
type RelaySubscribeOption func(*RelaySubscribeParameters) error
|
||||
|
@ -28,6 +29,13 @@ func WithoutConsumer() RelaySubscribeOption {
|
|||
}
|
||||
}
|
||||
|
||||
func WithCacheSize(size uint) RelaySubscribeOption {
|
||||
return func(params *RelaySubscribeParameters) error {
|
||||
params.cacheSize = size
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func msgIDFn(pmsg *pubsub_pb.Message) string {
|
||||
return string(hash.SHA256(pmsg.Data))
|
||||
}
|
||||
|
|
|
@ -53,6 +53,6 @@ func (m *metricsImpl) RecordMessage(envelope *waku_proto.Envelope) {
|
|||
messageSize.Observe(payloadSizeInKb)
|
||||
pubsubTopic := envelope.PubsubTopic()
|
||||
messages.WithLabelValues(pubsubTopic).Inc()
|
||||
m.log.Debug("waku.relay received", zap.String("pubsubTopic", pubsubTopic), logging.HexString("hash", envelope.Hash()), zap.Int64("receivedTime", envelope.Index().ReceiverTime), zap.Int("payloadSizeBytes", payloadSizeInBytes))
|
||||
m.log.Debug("waku.relay received", zap.String("pubsubTopic", pubsubTopic), logging.HexBytes("hash", envelope.Hash()), zap.Int64("receivedTime", envelope.Index().ReceiverTime), zap.Int("payloadSizeBytes", payloadSizeInBytes))
|
||||
}()
|
||||
}
|
||||
|
|
|
@ -14,7 +14,7 @@ func WithPubSubTopic(pubsubTopic string) PublishOption {
|
|||
}
|
||||
}
|
||||
|
||||
// WithPubSubTopic is used to indicate that the message should be broadcasted in the default pubsub topic
|
||||
// WithDefaultPubsubTopic is used to indicate that the message should be broadcasted in the default pubsub topic
|
||||
func WithDefaultPubsubTopic() PublishOption {
|
||||
return func(params *publishParameters) {
|
||||
params.pubsubTopic = DefaultWakuTopic
|
||||
|
|
|
@ -21,10 +21,10 @@ import (
|
|||
|
||||
func msgHash(pubSubTopic string, msg *pb.WakuMessage) []byte {
|
||||
timestampBytes := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(timestampBytes, uint64(msg.Timestamp))
|
||||
binary.LittleEndian.PutUint64(timestampBytes, uint64(msg.GetTimestamp()))
|
||||
|
||||
var ephemeralByte byte
|
||||
if msg.Ephemeral {
|
||||
if msg.GetEphemeral() {
|
||||
ephemeralByte = 1
|
||||
}
|
||||
|
||||
|
@ -67,9 +67,10 @@ func (w *WakuRelay) topicValidator(topic string) func(ctx context.Context, peerI
|
|||
}
|
||||
|
||||
w.topicValidatorMutex.RLock()
|
||||
validators, exists := w.topicValidators[topic]
|
||||
validators := w.topicValidators[topic]
|
||||
validators = append(validators, w.defaultTopicValidators...)
|
||||
w.topicValidatorMutex.RUnlock()
|
||||
exists := len(validators) > 0
|
||||
|
||||
if exists {
|
||||
for _, v := range validators {
|
||||
|
@ -101,12 +102,12 @@ func (w *WakuRelay) AddSignedTopicValidator(topic string, publicKey *ecdsa.Publi
|
|||
const messageWindowDuration = time.Minute * 5
|
||||
|
||||
func withinTimeWindow(t timesource.Timesource, msg *pb.WakuMessage) bool {
|
||||
if msg.Timestamp == 0 {
|
||||
if msg.GetTimestamp() == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
now := t.Now()
|
||||
msgTime := time.Unix(0, msg.Timestamp)
|
||||
msgTime := time.Unix(0, msg.GetTimestamp())
|
||||
|
||||
return now.Sub(msgTime).Abs() <= messageWindowDuration
|
||||
}
|
||||
|
|
|
@ -18,11 +18,14 @@ import (
|
|||
"github.com/waku-org/go-waku/logging"
|
||||
waku_proto "github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
"github.com/waku-org/go-waku/waku/v2/service"
|
||||
"github.com/waku-org/go-waku/waku/v2/timesource"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
)
|
||||
|
||||
// WakuRelayID_v200 is the current protocol ID used for WakuRelay
|
||||
const WakuRelayID_v200 = protocol.ID("/vac/waku/relay/2.0.0")
|
||||
const WakuRelayENRField = uint8(1 << 0)
|
||||
|
||||
// DefaultWakuTopic is the default pubsub topic used across all Waku protocols
|
||||
var DefaultWakuTopic string = waku_proto.DefaultPubsubTopic{}.String()
|
||||
|
@ -39,7 +42,8 @@ type WakuRelay struct {
|
|||
timesource timesource.Timesource
|
||||
metrics Metrics
|
||||
|
||||
log *zap.Logger
|
||||
log *zap.Logger
|
||||
logMessages *zap.Logger
|
||||
|
||||
bcaster Broadcaster
|
||||
|
||||
|
@ -49,11 +53,8 @@ type WakuRelay struct {
|
|||
topicValidators map[string][]validatorFn
|
||||
defaultTopicValidators []validatorFn
|
||||
|
||||
// TODO: convert to concurrent maps
|
||||
topicsMutex sync.RWMutex
|
||||
wakuRelayTopics map[string]*pubsub.Topic
|
||||
relaySubs map[string]*pubsub.Subscription
|
||||
topicEvtHanders map[string]*pubsub.TopicEventHandler
|
||||
topicsMutex sync.RWMutex
|
||||
topics map[string]*pubsubTopicSubscriptionDetails
|
||||
|
||||
events event.Bus
|
||||
emitters struct {
|
||||
|
@ -61,8 +62,15 @@ type WakuRelay struct {
|
|||
EvtRelayUnsubscribed event.Emitter
|
||||
EvtPeerTopic event.Emitter
|
||||
}
|
||||
contentSubs map[string]map[int]*Subscription
|
||||
*waku_proto.CommonService
|
||||
|
||||
*service.CommonService
|
||||
}
|
||||
|
||||
type pubsubTopicSubscriptionDetails struct {
|
||||
topic *pubsub.Topic
|
||||
subscription *pubsub.Subscription
|
||||
topicEventHandler *pubsub.TopicEventHandler
|
||||
contentSubs map[int]*Subscription
|
||||
}
|
||||
|
||||
// NewWakuRelay returns a new instance of a WakuRelay struct
|
||||
|
@ -70,20 +78,18 @@ func NewWakuRelay(bcaster Broadcaster, minPeersToPublish int, timesource timesou
|
|||
reg prometheus.Registerer, log *zap.Logger, opts ...pubsub.Option) *WakuRelay {
|
||||
w := new(WakuRelay)
|
||||
w.timesource = timesource
|
||||
w.wakuRelayTopics = make(map[string]*pubsub.Topic)
|
||||
w.relaySubs = make(map[string]*pubsub.Subscription)
|
||||
w.topicEvtHanders = make(map[string]*pubsub.TopicEventHandler)
|
||||
w.topics = make(map[string]*pubsubTopicSubscriptionDetails)
|
||||
w.topicValidators = make(map[string][]validatorFn)
|
||||
w.bcaster = bcaster
|
||||
w.minPeersToPublish = minPeersToPublish
|
||||
w.CommonService = waku_proto.NewCommonService()
|
||||
w.CommonService = service.NewCommonService()
|
||||
w.log = log.Named("relay")
|
||||
w.logMessages = utils.MessagesLogger("relay")
|
||||
w.events = eventbus.NewBus()
|
||||
w.metrics = newMetrics(reg, w.log)
|
||||
w.metrics = newMetrics(reg, w.logMessages)
|
||||
|
||||
// default options required by WakuRelay
|
||||
w.opts = append(w.defaultPubsubOptions(), opts...)
|
||||
w.contentSubs = make(map[string]map[int]*Subscription)
|
||||
return w
|
||||
}
|
||||
|
||||
|
@ -143,7 +149,7 @@ func (w *WakuRelay) Topics() []string {
|
|||
w.topicsMutex.RLock()
|
||||
|
||||
var result []string
|
||||
for topic := range w.relaySubs {
|
||||
for topic := range w.topics {
|
||||
result = append(result, topic)
|
||||
}
|
||||
return result
|
||||
|
@ -153,7 +159,7 @@ func (w *WakuRelay) Topics() []string {
|
|||
func (w *WakuRelay) IsSubscribed(topic string) bool {
|
||||
w.topicsMutex.RLock()
|
||||
defer w.topicsMutex.RUnlock()
|
||||
_, ok := w.relaySubs[topic]
|
||||
_, ok := w.topics[topic]
|
||||
return ok
|
||||
}
|
||||
|
||||
|
@ -163,64 +169,77 @@ func (w *WakuRelay) SetPubSub(pubSub *pubsub.PubSub) {
|
|||
}
|
||||
|
||||
func (w *WakuRelay) upsertTopic(topic string) (*pubsub.Topic, error) {
|
||||
w.topicsMutex.Lock()
|
||||
defer w.topicsMutex.Unlock()
|
||||
|
||||
pubSubTopic, ok := w.wakuRelayTopics[topic]
|
||||
topicData, ok := w.topics[topic]
|
||||
if !ok { // Joins topic if node hasn't joined yet
|
||||
err := w.pubsub.RegisterTopicValidator(topic, w.topicValidator(topic))
|
||||
if err != nil {
|
||||
w.log.Error("failed to register topic validator", zap.String("pubsubTopic", topic), zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newTopic, err := w.pubsub.Join(string(topic))
|
||||
if err != nil {
|
||||
w.log.Error("failed to join pubsubTopic", zap.String("pubsubTopic", topic), zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = newTopic.SetScoreParams(w.topicParams)
|
||||
if err != nil {
|
||||
w.log.Error("failed to set score params", zap.String("pubsubTopic", topic), zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
w.wakuRelayTopics[topic] = newTopic
|
||||
pubSubTopic = newTopic
|
||||
w.topics[topic] = &pubsubTopicSubscriptionDetails{
|
||||
topic: newTopic,
|
||||
}
|
||||
|
||||
return newTopic, nil
|
||||
}
|
||||
return pubSubTopic, nil
|
||||
|
||||
return topicData.topic, nil
|
||||
}
|
||||
|
||||
func (w *WakuRelay) subscribeToPubsubTopic(topic string) (subs *pubsub.Subscription, err error) {
|
||||
sub, ok := w.relaySubs[topic]
|
||||
func (w *WakuRelay) subscribeToPubsubTopic(topic string) (*pubsubTopicSubscriptionDetails, error) {
|
||||
w.topicsMutex.Lock()
|
||||
defer w.topicsMutex.Unlock()
|
||||
w.log.Info("subscribing to underlying pubsubTopic", zap.String("pubsubTopic", topic))
|
||||
|
||||
result, ok := w.topics[topic]
|
||||
if !ok {
|
||||
pubSubTopic, err := w.upsertTopic(topic)
|
||||
if err != nil {
|
||||
w.log.Error("failed to upsert topic", zap.String("pubsubTopic", topic), zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sub, err = pubSubTopic.Subscribe(pubsub.WithBufferSize(1024))
|
||||
subscription, err := pubSubTopic.Subscribe(pubsub.WithBufferSize(1024))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
w.WaitGroup().Add(1)
|
||||
go w.pubsubTopicMsgHandler(topic, sub)
|
||||
go w.pubsubTopicMsgHandler(subscription)
|
||||
|
||||
evtHandler, err := w.addPeerTopicEventListener(pubSubTopic)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
w.topicEvtHanders[topic] = evtHandler
|
||||
w.relaySubs[topic] = sub
|
||||
|
||||
w.topics[topic].contentSubs = make(map[int]*Subscription)
|
||||
w.topics[topic].subscription = subscription
|
||||
w.topics[topic].topicEventHandler = evtHandler
|
||||
|
||||
err = w.emitters.EvtRelaySubscribed.Emit(EvtRelaySubscribed{topic, pubSubTopic})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
w.log.Info("subscribing to topic", zap.String("topic", sub.Topic()))
|
||||
w.log.Info("gossipsub subscription", zap.String("pubsubTopic", subscription.Topic()))
|
||||
|
||||
result = w.topics[topic]
|
||||
}
|
||||
|
||||
return sub, nil
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// PublishToTopic is used to broadcast a WakuMessage to a pubsub topic. The pubsubTopic is derived from contentTopic
|
||||
|
@ -257,6 +276,9 @@ func (w *WakuRelay) Publish(ctx context.Context, message *pb.WakuMessage, opts .
|
|||
return nil, errors.New("not enough peers to publish")
|
||||
}
|
||||
|
||||
w.topicsMutex.RLock()
|
||||
defer w.topicsMutex.RUnlock()
|
||||
|
||||
pubSubTopic, err := w.upsertTopic(params.pubsubTopic)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -267,6 +289,10 @@ func (w *WakuRelay) Publish(ctx context.Context, message *pb.WakuMessage, opts .
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if len(out) > pubsub.DefaultMaxMessageSize {
|
||||
return nil, errors.New("message size exceeds gossipsub max message size")
|
||||
}
|
||||
|
||||
err = pubSubTopic.Publish(ctx, out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -274,26 +300,56 @@ func (w *WakuRelay) Publish(ctx context.Context, message *pb.WakuMessage, opts .
|
|||
|
||||
hash := message.Hash(params.pubsubTopic)
|
||||
|
||||
w.log.Debug("waku.relay published", zap.String("pubsubTopic", params.pubsubTopic), logging.HexString("hash", hash), zap.Int64("publishTime", w.timesource.Now().UnixNano()), zap.Int("payloadSizeBytes", len(message.Payload)))
|
||||
w.logMessages.Debug("waku.relay published", zap.String("pubsubTopic", params.pubsubTopic), logging.HexBytes("hash", hash), zap.Int64("publishTime", w.timesource.Now().UnixNano()), zap.Int("payloadSizeBytes", len(message.Payload)))
|
||||
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
func (w *WakuRelay) GetSubscription(contentTopic string) (*Subscription, error) {
|
||||
pubSubTopic, err := waku_proto.GetPubSubTopicFromContentTopic(contentTopic)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
contentFilter := waku_proto.NewContentFilter(pubSubTopic, contentTopic)
|
||||
cSubs := w.contentSubs[pubSubTopic]
|
||||
for _, sub := range cSubs {
|
||||
if sub.contentFilter.Equals(contentFilter) {
|
||||
return sub, nil
|
||||
func (w *WakuRelay) getSubscription(contentFilter waku_proto.ContentFilter) (*Subscription, error) {
|
||||
w.topicsMutex.RLock()
|
||||
defer w.topicsMutex.RUnlock()
|
||||
topicData, ok := w.topics[contentFilter.PubsubTopic]
|
||||
if ok {
|
||||
for _, sub := range topicData.contentSubs {
|
||||
if sub.contentFilter.Equals(contentFilter) {
|
||||
if sub.noConsume { //This check is to ensure that default no-consumer subscription is not returned
|
||||
continue
|
||||
}
|
||||
return sub, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.New("no subscription found for content topic")
|
||||
}
|
||||
|
||||
// GetSubscriptionWithPubsubTopic fetches subscription matching pubsub and contentTopic
|
||||
func (w *WakuRelay) GetSubscriptionWithPubsubTopic(pubsubTopic string, contentTopic string) (*Subscription, error) {
|
||||
var contentFilter waku_proto.ContentFilter
|
||||
if contentTopic != "" {
|
||||
contentFilter = waku_proto.NewContentFilter(pubsubTopic, contentTopic)
|
||||
} else {
|
||||
contentFilter = waku_proto.NewContentFilter(pubsubTopic)
|
||||
}
|
||||
sub, err := w.getSubscription(contentFilter)
|
||||
if err != nil {
|
||||
err = errors.New("no subscription found for pubsubTopic")
|
||||
}
|
||||
return sub, err
|
||||
}
|
||||
|
||||
// GetSubscription fetches subscription matching a contentTopic(via autosharding)
|
||||
func (w *WakuRelay) GetSubscription(contentTopic string) (*Subscription, error) {
|
||||
pubsubTopic, err := waku_proto.GetPubSubTopicFromContentTopic(contentTopic)
|
||||
if err != nil {
|
||||
w.log.Error("failed to derive pubsubTopic", zap.Error(err), zap.String("contentTopic", contentTopic))
|
||||
return nil, err
|
||||
}
|
||||
contentFilter := waku_proto.NewContentFilter(pubsubTopic, contentTopic)
|
||||
|
||||
return w.getSubscription(contentFilter)
|
||||
}
|
||||
|
||||
// Stop unmounts the relay protocol and stops all subscriptions
|
||||
func (w *WakuRelay) Stop() {
|
||||
w.CommonService.Stop(func() {
|
||||
|
@ -328,12 +384,16 @@ func (w *WakuRelay) subscribe(ctx context.Context, contentFilter waku_proto.Cont
|
|||
for _, opt := range optList {
|
||||
err := opt(params)
|
||||
if err != nil {
|
||||
w.log.Error("failed to apply option", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if params.cacheSize <= 0 {
|
||||
params.cacheSize = uint(DefaultRelaySubscriptionBufferSize)
|
||||
}
|
||||
|
||||
for pubSubTopic, cTopics := range pubSubTopicMap {
|
||||
w.log.Info("subscribing to", zap.String("pubsubTopic", pubSubTopic), zap.Strings("contenTopics", cTopics))
|
||||
w.log.Info("subscribing to", zap.String("pubsubTopic", pubSubTopic), zap.Strings("contentTopics", cTopics))
|
||||
var cFilter waku_proto.ContentFilter
|
||||
cFilter.PubsubTopic = pubSubTopic
|
||||
cFilter.ContentTopics = waku_proto.NewContentTopicSet(cTopics...)
|
||||
|
@ -343,21 +403,22 @@ func (w *WakuRelay) subscribe(ctx context.Context, contentFilter waku_proto.Cont
|
|||
_, err := w.subscribeToPubsubTopic(cFilter.PubsubTopic)
|
||||
if err != nil {
|
||||
//TODO: Handle partial errors.
|
||||
w.log.Error("failed to subscribe to pubsubTopic", zap.Error(err), zap.String("pubsubTopic", cFilter.PubsubTopic))
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
subscription := w.bcaster.Register(cFilter, WithBufferSize(DefaultRelaySubscriptionBufferSize),
|
||||
subscription := w.bcaster.Register(cFilter, WithBufferSize(int(params.cacheSize)),
|
||||
WithConsumerOption(params.dontConsume))
|
||||
|
||||
// Create Content subscription
|
||||
w.topicsMutex.RLock()
|
||||
if _, ok := w.contentSubs[pubSubTopic]; !ok {
|
||||
w.contentSubs[pubSubTopic] = map[int]*Subscription{}
|
||||
w.topicsMutex.Lock()
|
||||
topicData, ok := w.topics[pubSubTopic]
|
||||
if ok {
|
||||
topicData.contentSubs[subscription.ID] = subscription
|
||||
}
|
||||
w.contentSubs[pubSubTopic][subscription.ID] = subscription
|
||||
w.topicsMutex.Unlock()
|
||||
|
||||
w.topicsMutex.RUnlock()
|
||||
subscriptions = append(subscriptions, subscription)
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
|
@ -379,6 +440,8 @@ func (w *WakuRelay) Unsubscribe(ctx context.Context, contentFilter waku_proto.Co
|
|||
|
||||
pubSubTopicMap, err := waku_proto.ContentFilterToPubSubTopicMap(contentFilter)
|
||||
if err != nil {
|
||||
w.log.Error("failed to derive pubsubTopic from contentFilter", zap.String("pubsubTopic", contentFilter.PubsubTopic),
|
||||
zap.Strings("contentTopics", contentFilter.ContentTopicsList()))
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -388,20 +451,23 @@ func (w *WakuRelay) Unsubscribe(ctx context.Context, contentFilter waku_proto.Co
|
|||
for pubSubTopic, cTopics := range pubSubTopicMap {
|
||||
cfTemp := waku_proto.NewContentFilter(pubSubTopic, cTopics...)
|
||||
pubsubUnsubscribe := false
|
||||
sub, ok := w.relaySubs[pubSubTopic]
|
||||
sub, ok := w.topics[pubSubTopic]
|
||||
if !ok {
|
||||
w.log.Error("not subscribed to topic", zap.String("topic", pubSubTopic))
|
||||
return errors.New("not subscribed to topic")
|
||||
}
|
||||
cSubs := w.contentSubs[pubSubTopic]
|
||||
if cSubs != nil {
|
||||
|
||||
topicData, ok := w.topics[pubSubTopic]
|
||||
if ok {
|
||||
//Remove relevant subscription
|
||||
for subID, sub := range cSubs {
|
||||
for subID, sub := range topicData.contentSubs {
|
||||
if sub.contentFilter.Equals(cfTemp) {
|
||||
sub.Unsubscribe()
|
||||
delete(cSubs, subID)
|
||||
delete(topicData.contentSubs, subID)
|
||||
}
|
||||
}
|
||||
if len(cSubs) == 0 {
|
||||
|
||||
if len(topicData.contentSubs) == 0 {
|
||||
pubsubUnsubscribe = true
|
||||
}
|
||||
} else {
|
||||
|
@ -424,40 +490,36 @@ func (w *WakuRelay) Unsubscribe(ctx context.Context, contentFilter waku_proto.Co
|
|||
|
||||
// unsubscribeFromPubsubTopic unsubscribes subscription from underlying pubsub.
|
||||
// Note: caller has to acquire topicsMutex in order to avoid race conditions
|
||||
func (w *WakuRelay) unsubscribeFromPubsubTopic(sub *pubsub.Subscription) error {
|
||||
func (w *WakuRelay) unsubscribeFromPubsubTopic(topicData *pubsubTopicSubscriptionDetails) error {
|
||||
|
||||
pubSubTopic := sub.Topic()
|
||||
w.log.Info("unsubscribing from topic", zap.String("topic", pubSubTopic))
|
||||
pubSubTopic := topicData.subscription.Topic()
|
||||
w.log.Info("unsubscribing from pubsubTopic", zap.String("topic", pubSubTopic))
|
||||
|
||||
sub.Cancel()
|
||||
delete(w.relaySubs, pubSubTopic)
|
||||
topicData.subscription.Cancel()
|
||||
topicData.topicEventHandler.Cancel()
|
||||
|
||||
w.bcaster.UnRegister(pubSubTopic)
|
||||
|
||||
delete(w.contentSubs, pubSubTopic)
|
||||
|
||||
evtHandler, ok := w.topicEvtHanders[pubSubTopic]
|
||||
if ok {
|
||||
evtHandler.Cancel()
|
||||
delete(w.topicEvtHanders, pubSubTopic)
|
||||
}
|
||||
|
||||
err := w.wakuRelayTopics[pubSubTopic].Close()
|
||||
err := topicData.topic.Close()
|
||||
if err != nil {
|
||||
w.log.Error("failed to close the pubsubTopic", zap.String("topic", pubSubTopic))
|
||||
return err
|
||||
}
|
||||
delete(w.wakuRelayTopics, pubSubTopic)
|
||||
|
||||
w.RemoveTopicValidator(pubSubTopic)
|
||||
|
||||
err = w.emitters.EvtRelayUnsubscribed.Emit(EvtRelayUnsubscribed{pubSubTopic})
|
||||
err = w.pubsub.UnregisterTopicValidator(pubSubTopic)
|
||||
if err != nil {
|
||||
w.log.Error("failed to unregister topic validator", zap.String("topic", pubSubTopic))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
delete(w.topics, pubSubTopic)
|
||||
|
||||
return w.emitters.EvtRelayUnsubscribed.Emit(EvtRelayUnsubscribed{pubSubTopic})
|
||||
}
|
||||
|
||||
func (w *WakuRelay) pubsubTopicMsgHandler(pubsubTopic string, sub *pubsub.Subscription) {
|
||||
func (w *WakuRelay) pubsubTopicMsgHandler(sub *pubsub.Subscription) {
|
||||
defer w.WaitGroup().Done()
|
||||
|
||||
for {
|
||||
|
@ -476,7 +538,7 @@ func (w *WakuRelay) pubsubTopicMsgHandler(pubsubTopic string, sub *pubsub.Subscr
|
|||
return
|
||||
}
|
||||
|
||||
envelope := waku_proto.NewEnvelope(wakuMessage, w.timesource.Now().UnixNano(), pubsubTopic)
|
||||
envelope := waku_proto.NewEnvelope(wakuMessage, w.timesource.Now().UnixNano(), sub.Topic())
|
||||
w.metrics.RecordMessage(envelope)
|
||||
|
||||
w.bcaster.Submit(envelope)
|
||||
|
|
|
@ -3,7 +3,9 @@ package rln
|
|||
import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
rlnpb "github.com/waku-org/go-waku/waku/v2/protocol/rln/pb"
|
||||
"github.com/waku-org/go-zerokit-rln/rln"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
type messageValidationResult int
|
||||
|
@ -37,20 +39,27 @@ func toRLNSignal(wakuMessage *pb.WakuMessage) []byte {
|
|||
return append(wakuMessage.Payload, contentTopicBytes...)
|
||||
}
|
||||
|
||||
func toRateLimitProof(msg *pb.WakuMessage) *rln.RateLimitProof {
|
||||
if msg == nil || msg.RateLimitProof == nil {
|
||||
return nil
|
||||
// Bytres2RateLimitProof converts a slice of bytes into a RateLimitProof instance
|
||||
func BytesToRateLimitProof(data []byte) (*rln.RateLimitProof, error) {
|
||||
if data == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
rateLimitProof := &rlnpb.RateLimitProof{}
|
||||
err := proto.Unmarshal(data, rateLimitProof)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := &rln.RateLimitProof{
|
||||
Proof: rln.ZKSNARK(rln.Bytes128(msg.RateLimitProof.Proof)),
|
||||
MerkleRoot: rln.MerkleNode(rln.Bytes32(msg.RateLimitProof.MerkleRoot)),
|
||||
Epoch: rln.Epoch(rln.Bytes32(msg.RateLimitProof.Epoch)),
|
||||
ShareX: rln.MerkleNode(rln.Bytes32(msg.RateLimitProof.ShareX)),
|
||||
ShareY: rln.MerkleNode(rln.Bytes32(msg.RateLimitProof.ShareY)),
|
||||
Nullifier: rln.Nullifier(rln.Bytes32(msg.RateLimitProof.Nullifier)),
|
||||
RLNIdentifier: rln.RLNIdentifier(rln.Bytes32(msg.RateLimitProof.RlnIdentifier)),
|
||||
Proof: rln.ZKSNARK(rln.Bytes128(rateLimitProof.Proof)),
|
||||
MerkleRoot: rln.MerkleNode(rln.Bytes32(rateLimitProof.MerkleRoot)),
|
||||
Epoch: rln.Epoch(rln.Bytes32(rateLimitProof.Epoch)),
|
||||
ShareX: rln.MerkleNode(rln.Bytes32(rateLimitProof.ShareX)),
|
||||
ShareY: rln.MerkleNode(rln.Bytes32(rateLimitProof.ShareY)),
|
||||
Nullifier: rln.Nullifier(rln.Bytes32(rateLimitProof.Nullifier)),
|
||||
RLNIdentifier: rln.RLNIdentifier(rln.Bytes32(rateLimitProof.RlnIdentifier)),
|
||||
}
|
||||
|
||||
return result
|
||||
return result, nil
|
||||
}
|
||||
|
|
|
@ -244,10 +244,7 @@ func (gm *DynamicGroupManager) InsertMembers(toInsert *om.OrderedMap) error {
|
|||
|
||||
gm.metrics.RecordRegisteredMembership(gm.rln.LeavesSet())
|
||||
|
||||
_, err = gm.rootTracker.UpdateLatestRoot(pair.Key.(uint64))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
gm.rootTracker.UpdateLatestRoot(pair.Key.(uint64))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
17
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/root_tracker.go
generated
vendored
17
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/root_tracker.go
generated
vendored
|
@ -4,7 +4,9 @@ import (
|
|||
"bytes"
|
||||
"sync"
|
||||
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
"github.com/waku-org/go-zerokit-rln/rln"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// RootsPerBlock stores the merkle root generated at N block number
|
||||
|
@ -27,18 +29,15 @@ type MerkleRootTracker struct {
|
|||
const maxBufferSize = 20
|
||||
|
||||
// NewMerkleRootTracker creates an instance of MerkleRootTracker
|
||||
func NewMerkleRootTracker(acceptableRootWindowSize int, rlnInstance *rln.RLN) (*MerkleRootTracker, error) {
|
||||
func NewMerkleRootTracker(acceptableRootWindowSize int, rlnInstance *rln.RLN) *MerkleRootTracker {
|
||||
result := &MerkleRootTracker{
|
||||
acceptableRootWindowSize: acceptableRootWindowSize,
|
||||
rln: rlnInstance,
|
||||
}
|
||||
|
||||
_, err := result.UpdateLatestRoot(0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result.UpdateLatestRoot(0)
|
||||
|
||||
return result, nil
|
||||
return result
|
||||
}
|
||||
|
||||
// Backfill is used to pop merkle roots when there is a chain fork
|
||||
|
@ -102,18 +101,18 @@ func (m *MerkleRootTracker) IndexOf(root [32]byte) int {
|
|||
|
||||
// UpdateLatestRoot should be called when a block containing a new
|
||||
// IDCommitment is received so we can keep track of the merkle root change
|
||||
func (m *MerkleRootTracker) UpdateLatestRoot(blockNumber uint64) (rln.MerkleNode, error) {
|
||||
func (m *MerkleRootTracker) UpdateLatestRoot(blockNumber uint64) rln.MerkleNode {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
root, err := m.rln.GetMerkleRoot()
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
utils.Logger().Named("root-tracker").Panic("could not retrieve merkle root", zap.Error(err))
|
||||
}
|
||||
|
||||
m.pushRoot(blockNumber, root)
|
||||
|
||||
return root, nil
|
||||
return root
|
||||
}
|
||||
|
||||
func (m *MerkleRootTracker) pushRoot(blockNumber uint64, root [32]byte) {
|
||||
|
|
|
@ -68,10 +68,7 @@ func (gm *StaticGroupManager) insertMembers(idCommitments []rln.IDCommitment) er
|
|||
|
||||
latestIndex := gm.nextIndex + uint64(len(idCommitments))
|
||||
|
||||
_, err = gm.rootTracker.UpdateLatestRoot(latestIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
gm.rootTracker.UpdateLatestRoot(latestIndex)
|
||||
|
||||
gm.nextIndex = latestIndex + 1
|
||||
|
||||
|
|
|
@ -101,10 +101,11 @@ var (
|
|||
type invalidCategory string
|
||||
|
||||
var (
|
||||
invalidNoProof invalidCategory = "no_proof"
|
||||
invalidEpoch invalidCategory = "invalid_epoch"
|
||||
invalidRoot invalidCategory = "invalid_root"
|
||||
invalidProof invalidCategory = "invalid_proof"
|
||||
invalidNoProof invalidCategory = "no_proof"
|
||||
invalidEpoch invalidCategory = "invalid_epoch"
|
||||
invalidRoot invalidCategory = "invalid_root"
|
||||
invalidProof invalidCategory = "invalid_proof"
|
||||
proofExtractionErr invalidCategory = "invalid_proof_extract_err"
|
||||
)
|
||||
|
||||
// Metrics exposes the functions required to update prometheus metrics for lightpush protocol
|
||||
|
|
3
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/pb/generate.go
generated
vendored
Normal file
3
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/pb/generate.go
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
package pb
|
||||
|
||||
//go:generate protoc -I./../../waku-proto/waku/rln/v1/. -I./../../waku-proto/ --go_opt=paths=source_relative --go_opt=Mrln.proto=github.com/waku-org/go-waku/waku/v2/protocol/rln/pb --go_out=. ./../../waku-proto/waku/rln/v1/rln.proto
|
202
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/pb/rln.pb.go
generated
vendored
Normal file
202
vendor/github.com/waku-org/go-waku/waku/v2/protocol/rln/pb/rln.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,202 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.31.0
|
||||
// protoc v4.24.4
|
||||
// source: rln.proto
|
||||
|
||||
// rfc: https://rfc.vac.dev/spec/17/
|
||||
|
||||
package pb
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type RateLimitProof struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Proof []byte `protobuf:"bytes,1,opt,name=proof,proto3" json:"proof,omitempty"`
|
||||
MerkleRoot []byte `protobuf:"bytes,2,opt,name=merkle_root,json=merkleRoot,proto3" json:"merkle_root,omitempty"`
|
||||
Epoch []byte `protobuf:"bytes,3,opt,name=epoch,proto3" json:"epoch,omitempty"`
|
||||
ShareX []byte `protobuf:"bytes,4,opt,name=share_x,json=shareX,proto3" json:"share_x,omitempty"`
|
||||
ShareY []byte `protobuf:"bytes,5,opt,name=share_y,json=shareY,proto3" json:"share_y,omitempty"`
|
||||
Nullifier []byte `protobuf:"bytes,6,opt,name=nullifier,proto3" json:"nullifier,omitempty"`
|
||||
RlnIdentifier []byte `protobuf:"bytes,7,opt,name=rln_identifier,json=rlnIdentifier,proto3" json:"rln_identifier,omitempty"`
|
||||
}
|
||||
|
||||
func (x *RateLimitProof) Reset() {
|
||||
*x = RateLimitProof{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_rln_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *RateLimitProof) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*RateLimitProof) ProtoMessage() {}
|
||||
|
||||
func (x *RateLimitProof) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_rln_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use RateLimitProof.ProtoReflect.Descriptor instead.
|
||||
func (*RateLimitProof) Descriptor() ([]byte, []int) {
|
||||
return file_rln_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *RateLimitProof) GetProof() []byte {
|
||||
if x != nil {
|
||||
return x.Proof
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *RateLimitProof) GetMerkleRoot() []byte {
|
||||
if x != nil {
|
||||
return x.MerkleRoot
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *RateLimitProof) GetEpoch() []byte {
|
||||
if x != nil {
|
||||
return x.Epoch
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *RateLimitProof) GetShareX() []byte {
|
||||
if x != nil {
|
||||
return x.ShareX
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *RateLimitProof) GetShareY() []byte {
|
||||
if x != nil {
|
||||
return x.ShareY
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *RateLimitProof) GetNullifier() []byte {
|
||||
if x != nil {
|
||||
return x.Nullifier
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *RateLimitProof) GetRlnIdentifier() []byte {
|
||||
if x != nil {
|
||||
return x.RlnIdentifier
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_rln_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_rln_proto_rawDesc = []byte{
|
||||
0x0a, 0x09, 0x72, 0x6c, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x77, 0x61, 0x6b,
|
||||
0x75, 0x2e, 0x72, 0x6c, 0x6e, 0x2e, 0x76, 0x31, 0x22, 0xd4, 0x01, 0x0a, 0x0e, 0x52, 0x61, 0x74,
|
||||
0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x14, 0x0a, 0x05, 0x70,
|
||||
0x72, 0x6f, 0x6f, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x70, 0x72, 0x6f, 0x6f,
|
||||
0x66, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x65, 0x72, 0x6b, 0x6c, 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74,
|
||||
0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x6d, 0x65, 0x72, 0x6b, 0x6c, 0x65, 0x52, 0x6f,
|
||||
0x6f, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28,
|
||||
0x0c, 0x52, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x68, 0x61, 0x72,
|
||||
0x65, 0x5f, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x65,
|
||||
0x58, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x68, 0x61, 0x72, 0x65, 0x5f, 0x79, 0x18, 0x05, 0x20, 0x01,
|
||||
0x28, 0x0c, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x65, 0x59, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x75,
|
||||
0x6c, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6e,
|
||||
0x75, 0x6c, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x6c, 0x6e, 0x5f,
|
||||
0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c,
|
||||
0x52, 0x0d, 0x72, 0x6c, 0x6e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x62,
|
||||
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_rln_proto_rawDescOnce sync.Once
|
||||
file_rln_proto_rawDescData = file_rln_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_rln_proto_rawDescGZIP() []byte {
|
||||
file_rln_proto_rawDescOnce.Do(func() {
|
||||
file_rln_proto_rawDescData = protoimpl.X.CompressGZIP(file_rln_proto_rawDescData)
|
||||
})
|
||||
return file_rln_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_rln_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
|
||||
var file_rln_proto_goTypes = []interface{}{
|
||||
(*RateLimitProof)(nil), // 0: waku.rln.v1.RateLimitProof
|
||||
}
|
||||
var file_rln_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_rln_proto_init() }
|
||||
func file_rln_proto_init() {
|
||||
if File_rln_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_rln_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*RateLimitProof); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_rln_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 1,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_rln_proto_goTypes,
|
||||
DependencyIndexes: file_rln_proto_depIdxs,
|
||||
MessageInfos: file_rln_proto_msgTypes,
|
||||
}.Build()
|
||||
File_rln_proto = out.File
|
||||
file_rln_proto_rawDesc = nil
|
||||
file_rln_proto_goTypes = nil
|
||||
file_rln_proto_depIdxs = nil
|
||||
}
|
|
@ -11,9 +11,11 @@ import (
|
|||
"github.com/waku-org/go-waku/logging"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager"
|
||||
rlnpb "github.com/waku-org/go-waku/waku/v2/protocol/rln/pb"
|
||||
"github.com/waku-org/go-waku/waku/v2/timesource"
|
||||
"github.com/waku-org/go-zerokit-rln/rln"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
type WakuRLNRelay struct {
|
||||
|
@ -45,10 +47,8 @@ func GetRLNInstanceAndRootTracker(treePath string) (*rln.RLN, *group_manager.Mer
|
|||
return nil, nil, err
|
||||
}
|
||||
|
||||
rootTracker, err := group_manager.NewMerkleRootTracker(acceptableRootWindowSize, rlnInstance)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
rootTracker := group_manager.NewMerkleRootTracker(acceptableRootWindowSize, rlnInstance)
|
||||
|
||||
return rlnInstance, rootTracker, nil
|
||||
}
|
||||
func New(
|
||||
|
@ -106,7 +106,12 @@ func (rlnRelay *WakuRLNRelay) ValidateMessage(msg *pb.WakuMessage, optionalTime
|
|||
epoch = rln.CalcEpoch(rlnRelay.timesource.Now())
|
||||
}
|
||||
|
||||
msgProof := toRateLimitProof(msg)
|
||||
msgProof, err := BytesToRateLimitProof(msg.RateLimitProof)
|
||||
if err != nil {
|
||||
rlnRelay.log.Debug("invalid message: could not extract proof", zap.Error(err))
|
||||
rlnRelay.metrics.RecordInvalidMessage(proofExtractionErr)
|
||||
}
|
||||
|
||||
if msgProof == nil {
|
||||
// message does not contain a proof
|
||||
rlnRelay.log.Debug("invalid message: message does not contain a proof")
|
||||
|
@ -133,7 +138,7 @@ func (rlnRelay *WakuRLNRelay) ValidateMessage(msg *pb.WakuMessage, optionalTime
|
|||
}
|
||||
|
||||
if !(rlnRelay.RootTracker.ContainsRoot(msgProof.MerkleRoot)) {
|
||||
rlnRelay.log.Debug("invalid message: unexpected root", logging.HexBytes("msgRoot", msg.RateLimitProof.MerkleRoot))
|
||||
rlnRelay.log.Debug("invalid message: unexpected root", logging.HexBytes("msgRoot", msgProof.MerkleRoot[:]))
|
||||
rlnRelay.metrics.RecordInvalidMessage(invalidRoot)
|
||||
return invalidMessage, nil
|
||||
}
|
||||
|
@ -206,7 +211,12 @@ func (rlnRelay *WakuRLNRelay) AppendRLNProof(msg *pb.WakuMessage, senderEpochTim
|
|||
}
|
||||
rlnRelay.metrics.RecordProofGeneration(time.Since(start))
|
||||
|
||||
msg.RateLimitProof = proof
|
||||
b, err := proto.Marshal(proof)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msg.RateLimitProof = b
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -262,7 +272,7 @@ func (rlnRelay *WakuRLNRelay) Validator(
|
|||
}
|
||||
}
|
||||
|
||||
func (rlnRelay *WakuRLNRelay) generateProof(input []byte, epoch rln.Epoch) (*pb.RateLimitProof, error) {
|
||||
func (rlnRelay *WakuRLNRelay) generateProof(input []byte, epoch rln.Epoch) (*rlnpb.RateLimitProof, error) {
|
||||
identityCredentials, err := rlnRelay.GroupManager.IdentityCredentials()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -275,7 +285,7 @@ func (rlnRelay *WakuRLNRelay) generateProof(input []byte, epoch rln.Epoch) (*pb.
|
|||
return nil, err
|
||||
}
|
||||
|
||||
return &pb.RateLimitProof{
|
||||
return &rlnpb.RateLimitProof{
|
||||
Proof: proof.Proof[:],
|
||||
MerkleRoot: proof.MerkleRoot[:],
|
||||
Epoch: proof.Epoch[:],
|
||||
|
|
|
@ -225,7 +225,7 @@ func FromBitVector(buf []byte) (RelayShards, error) {
|
|||
// This is based on Autosharding algorithm defined in RFC 51
|
||||
func GetShardFromContentTopic(topic ContentTopic, shardCount int) StaticShardingPubsubTopic {
|
||||
bytes := []byte(topic.ApplicationName)
|
||||
bytes = append(bytes, []byte(fmt.Sprintf("%d", topic.ApplicationVersion))...)
|
||||
bytes = append(bytes, []byte(topic.ApplicationVersion)...)
|
||||
|
||||
hash := hash.SHA256(bytes)
|
||||
//We only use the last 64 bits of the hash as having more shards is unlikely.
|
||||
|
@ -245,3 +245,26 @@ func GetPubSubTopicFromContentTopic(cTopicString string) (string, error) {
|
|||
|
||||
return pTopic.String(), nil
|
||||
}
|
||||
|
||||
func GeneratePubsubToContentTopicMap(pubsubTopic string, contentTopics []string) (map[string][]string, error) {
|
||||
|
||||
pubSubTopicMap := make(map[string][]string, 0)
|
||||
|
||||
if pubsubTopic == "" {
|
||||
//Should we derive pubsub topic from contentTopic so that peer selection and discovery can be done accordingly?
|
||||
for _, cTopic := range contentTopics {
|
||||
pTopic, err := GetPubSubTopicFromContentTopic(cTopic)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, ok := pubSubTopicMap[pTopic]
|
||||
if !ok {
|
||||
pubSubTopicMap[pTopic] = []string{}
|
||||
}
|
||||
pubSubTopicMap[pTopic] = append(pubSubTopicMap[pTopic], cTopic)
|
||||
}
|
||||
} else {
|
||||
pubSubTopicMap[pubsubTopic] = append(pubSubTopicMap[pubsubTopic], contentTopics...)
|
||||
}
|
||||
return pubSubTopicMap, nil
|
||||
}
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
package pb
|
||||
|
||||
//go:generate protoc -I./../../pb/. -I. --go_opt=paths=source_relative --go_opt=Mwaku_store.proto=github.com/waku-org/go-waku/waku/v2/protocol/store/pb --go_opt=Mwaku_message.proto=github.com/waku-org/go-waku/waku/v2/protocol/pb --go_out=. ./waku_store.proto
|
||||
//go:generate protoc -I./../../waku-proto/waku/store/v2beta4//. -I./../../waku-proto/ --go_opt=paths=source_relative --go_opt=Mstore.proto=github.com/waku-org/go-waku/waku/v2/protocol/store/pb --go_opt=Mwaku/message/v1/message.proto=github.com/waku-org/go-waku/waku/v2/protocol/pb --go_out=. ./../../waku-proto/waku/store/v2beta4/store.proto
|
||||
|
|
727
vendor/github.com/waku-org/go-waku/waku/v2/protocol/store/pb/store.pb.go
generated
vendored
Normal file
727
vendor/github.com/waku-org/go-waku/waku/v2/protocol/store/pb/store.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,727 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.31.0
|
||||
// protoc v4.24.4
|
||||
// source: store.proto
|
||||
|
||||
// 13/WAKU2-STORE rfc: https://rfc.vac.dev/spec/13/
|
||||
// Protocol identifier: /vac/waku/store/2.0.0-beta4
|
||||
|
||||
package pb
|
||||
|
||||
import (
|
||||
pb "github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type PagingInfo_Direction int32
|
||||
|
||||
const (
|
||||
PagingInfo_BACKWARD PagingInfo_Direction = 0
|
||||
PagingInfo_FORWARD PagingInfo_Direction = 1
|
||||
)
|
||||
|
||||
// Enum value maps for PagingInfo_Direction.
|
||||
var (
|
||||
PagingInfo_Direction_name = map[int32]string{
|
||||
0: "BACKWARD",
|
||||
1: "FORWARD",
|
||||
}
|
||||
PagingInfo_Direction_value = map[string]int32{
|
||||
"BACKWARD": 0,
|
||||
"FORWARD": 1,
|
||||
}
|
||||
)
|
||||
|
||||
func (x PagingInfo_Direction) Enum() *PagingInfo_Direction {
|
||||
p := new(PagingInfo_Direction)
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
|
||||
func (x PagingInfo_Direction) String() string {
|
||||
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
|
||||
}
|
||||
|
||||
func (PagingInfo_Direction) Descriptor() protoreflect.EnumDescriptor {
|
||||
return file_store_proto_enumTypes[0].Descriptor()
|
||||
}
|
||||
|
||||
func (PagingInfo_Direction) Type() protoreflect.EnumType {
|
||||
return &file_store_proto_enumTypes[0]
|
||||
}
|
||||
|
||||
func (x PagingInfo_Direction) Number() protoreflect.EnumNumber {
|
||||
return protoreflect.EnumNumber(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use PagingInfo_Direction.Descriptor instead.
|
||||
func (PagingInfo_Direction) EnumDescriptor() ([]byte, []int) {
|
||||
return file_store_proto_rawDescGZIP(), []int{1, 0}
|
||||
}
|
||||
|
||||
type HistoryResponse_Error int32
|
||||
|
||||
const (
|
||||
HistoryResponse_NONE HistoryResponse_Error = 0
|
||||
HistoryResponse_INVALID_CURSOR HistoryResponse_Error = 1
|
||||
)
|
||||
|
||||
// Enum value maps for HistoryResponse_Error.
|
||||
var (
|
||||
HistoryResponse_Error_name = map[int32]string{
|
||||
0: "NONE",
|
||||
1: "INVALID_CURSOR",
|
||||
}
|
||||
HistoryResponse_Error_value = map[string]int32{
|
||||
"NONE": 0,
|
||||
"INVALID_CURSOR": 1,
|
||||
}
|
||||
)
|
||||
|
||||
func (x HistoryResponse_Error) Enum() *HistoryResponse_Error {
|
||||
p := new(HistoryResponse_Error)
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
|
||||
func (x HistoryResponse_Error) String() string {
|
||||
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
|
||||
}
|
||||
|
||||
func (HistoryResponse_Error) Descriptor() protoreflect.EnumDescriptor {
|
||||
return file_store_proto_enumTypes[1].Descriptor()
|
||||
}
|
||||
|
||||
func (HistoryResponse_Error) Type() protoreflect.EnumType {
|
||||
return &file_store_proto_enumTypes[1]
|
||||
}
|
||||
|
||||
func (x HistoryResponse_Error) Number() protoreflect.EnumNumber {
|
||||
return protoreflect.EnumNumber(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use HistoryResponse_Error.Descriptor instead.
|
||||
func (HistoryResponse_Error) EnumDescriptor() ([]byte, []int) {
|
||||
return file_store_proto_rawDescGZIP(), []int{4, 0}
|
||||
}
|
||||
|
||||
type Index struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Digest []byte `protobuf:"bytes,1,opt,name=digest,proto3" json:"digest,omitempty"`
|
||||
ReceiverTime int64 `protobuf:"zigzag64,2,opt,name=receiver_time,json=receiverTime,proto3" json:"receiver_time,omitempty"`
|
||||
SenderTime int64 `protobuf:"zigzag64,3,opt,name=sender_time,json=senderTime,proto3" json:"sender_time,omitempty"`
|
||||
PubsubTopic string `protobuf:"bytes,4,opt,name=pubsub_topic,json=pubsubTopic,proto3" json:"pubsub_topic,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Index) Reset() {
|
||||
*x = Index{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_store_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Index) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Index) ProtoMessage() {}
|
||||
|
||||
func (x *Index) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_store_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Index.ProtoReflect.Descriptor instead.
|
||||
func (*Index) Descriptor() ([]byte, []int) {
|
||||
return file_store_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *Index) GetDigest() []byte {
|
||||
if x != nil {
|
||||
return x.Digest
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Index) GetReceiverTime() int64 {
|
||||
if x != nil {
|
||||
return x.ReceiverTime
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *Index) GetSenderTime() int64 {
|
||||
if x != nil {
|
||||
return x.SenderTime
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *Index) GetPubsubTopic() string {
|
||||
if x != nil {
|
||||
return x.PubsubTopic
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type PagingInfo struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
PageSize uint64 `protobuf:"varint,1,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
|
||||
Cursor *Index `protobuf:"bytes,2,opt,name=cursor,proto3" json:"cursor,omitempty"`
|
||||
Direction PagingInfo_Direction `protobuf:"varint,3,opt,name=direction,proto3,enum=waku.store.v2beta4.PagingInfo_Direction" json:"direction,omitempty"`
|
||||
}
|
||||
|
||||
func (x *PagingInfo) Reset() {
|
||||
*x = PagingInfo{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_store_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *PagingInfo) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*PagingInfo) ProtoMessage() {}
|
||||
|
||||
func (x *PagingInfo) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_store_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use PagingInfo.ProtoReflect.Descriptor instead.
|
||||
func (*PagingInfo) Descriptor() ([]byte, []int) {
|
||||
return file_store_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *PagingInfo) GetPageSize() uint64 {
|
||||
if x != nil {
|
||||
return x.PageSize
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *PagingInfo) GetCursor() *Index {
|
||||
if x != nil {
|
||||
return x.Cursor
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *PagingInfo) GetDirection() PagingInfo_Direction {
|
||||
if x != nil {
|
||||
return x.Direction
|
||||
}
|
||||
return PagingInfo_BACKWARD
|
||||
}
|
||||
|
||||
type ContentFilter struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
ContentTopic string `protobuf:"bytes,1,opt,name=content_topic,json=contentTopic,proto3" json:"content_topic,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ContentFilter) Reset() {
|
||||
*x = ContentFilter{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_store_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ContentFilter) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ContentFilter) ProtoMessage() {}
|
||||
|
||||
func (x *ContentFilter) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_store_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ContentFilter.ProtoReflect.Descriptor instead.
|
||||
func (*ContentFilter) Descriptor() ([]byte, []int) {
|
||||
return file_store_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *ContentFilter) GetContentTopic() string {
|
||||
if x != nil {
|
||||
return x.ContentTopic
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type HistoryQuery struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// The first field is reserved for future use
|
||||
PubsubTopic string `protobuf:"bytes,2,opt,name=pubsub_topic,json=pubsubTopic,proto3" json:"pubsub_topic,omitempty"`
|
||||
ContentFilters []*ContentFilter `protobuf:"bytes,3,rep,name=content_filters,json=contentFilters,proto3" json:"content_filters,omitempty"`
|
||||
PagingInfo *PagingInfo `protobuf:"bytes,4,opt,name=paging_info,json=pagingInfo,proto3" json:"paging_info,omitempty"`
|
||||
StartTime *int64 `protobuf:"zigzag64,5,opt,name=start_time,json=startTime,proto3,oneof" json:"start_time,omitempty"`
|
||||
EndTime *int64 `protobuf:"zigzag64,6,opt,name=end_time,json=endTime,proto3,oneof" json:"end_time,omitempty"`
|
||||
}
|
||||
|
||||
func (x *HistoryQuery) Reset() {
|
||||
*x = HistoryQuery{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_store_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *HistoryQuery) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*HistoryQuery) ProtoMessage() {}
|
||||
|
||||
func (x *HistoryQuery) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_store_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use HistoryQuery.ProtoReflect.Descriptor instead.
|
||||
func (*HistoryQuery) Descriptor() ([]byte, []int) {
|
||||
return file_store_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *HistoryQuery) GetPubsubTopic() string {
|
||||
if x != nil {
|
||||
return x.PubsubTopic
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *HistoryQuery) GetContentFilters() []*ContentFilter {
|
||||
if x != nil {
|
||||
return x.ContentFilters
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *HistoryQuery) GetPagingInfo() *PagingInfo {
|
||||
if x != nil {
|
||||
return x.PagingInfo
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *HistoryQuery) GetStartTime() int64 {
|
||||
if x != nil && x.StartTime != nil {
|
||||
return *x.StartTime
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *HistoryQuery) GetEndTime() int64 {
|
||||
if x != nil && x.EndTime != nil {
|
||||
return *x.EndTime
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type HistoryResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// The first field is reserved for future use
|
||||
Messages []*pb.WakuMessage `protobuf:"bytes,2,rep,name=messages,proto3" json:"messages,omitempty"`
|
||||
PagingInfo *PagingInfo `protobuf:"bytes,3,opt,name=paging_info,json=pagingInfo,proto3" json:"paging_info,omitempty"`
|
||||
Error HistoryResponse_Error `protobuf:"varint,4,opt,name=error,proto3,enum=waku.store.v2beta4.HistoryResponse_Error" json:"error,omitempty"`
|
||||
}
|
||||
|
||||
func (x *HistoryResponse) Reset() {
|
||||
*x = HistoryResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_store_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *HistoryResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*HistoryResponse) ProtoMessage() {}
|
||||
|
||||
func (x *HistoryResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_store_proto_msgTypes[4]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use HistoryResponse.ProtoReflect.Descriptor instead.
|
||||
func (*HistoryResponse) Descriptor() ([]byte, []int) {
|
||||
return file_store_proto_rawDescGZIP(), []int{4}
|
||||
}
|
||||
|
||||
func (x *HistoryResponse) GetMessages() []*pb.WakuMessage {
|
||||
if x != nil {
|
||||
return x.Messages
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *HistoryResponse) GetPagingInfo() *PagingInfo {
|
||||
if x != nil {
|
||||
return x.PagingInfo
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *HistoryResponse) GetError() HistoryResponse_Error {
|
||||
if x != nil {
|
||||
return x.Error
|
||||
}
|
||||
return HistoryResponse_NONE
|
||||
}
|
||||
|
||||
type HistoryRPC struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
|
||||
Query *HistoryQuery `protobuf:"bytes,2,opt,name=query,proto3" json:"query,omitempty"`
|
||||
Response *HistoryResponse `protobuf:"bytes,3,opt,name=response,proto3" json:"response,omitempty"`
|
||||
}
|
||||
|
||||
func (x *HistoryRPC) Reset() {
|
||||
*x = HistoryRPC{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_store_proto_msgTypes[5]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *HistoryRPC) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*HistoryRPC) ProtoMessage() {}
|
||||
|
||||
func (x *HistoryRPC) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_store_proto_msgTypes[5]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use HistoryRPC.ProtoReflect.Descriptor instead.
|
||||
func (*HistoryRPC) Descriptor() ([]byte, []int) {
|
||||
return file_store_proto_rawDescGZIP(), []int{5}
|
||||
}
|
||||
|
||||
func (x *HistoryRPC) GetRequestId() string {
|
||||
if x != nil {
|
||||
return x.RequestId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *HistoryRPC) GetQuery() *HistoryQuery {
|
||||
if x != nil {
|
||||
return x.Query
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *HistoryRPC) GetResponse() *HistoryResponse {
|
||||
if x != nil {
|
||||
return x.Response
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_store_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_store_proto_rawDesc = []byte{
|
||||
0x0a, 0x0b, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x77,
|
||||
0x61, 0x6b, 0x75, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61,
|
||||
0x34, 0x1a, 0x1d, 0x77, 0x61, 0x6b, 0x75, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2f,
|
||||
0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x22, 0x88, 0x01, 0x0a, 0x05, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69,
|
||||
0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65,
|
||||
0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x72, 0x5f, 0x74,
|
||||
0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x12, 0x52, 0x0c, 0x72, 0x65, 0x63, 0x65, 0x69,
|
||||
0x76, 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x6e, 0x64, 0x65,
|
||||
0x72, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x12, 0x52, 0x0a, 0x73, 0x65,
|
||||
0x6e, 0x64, 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x75, 0x62, 0x73,
|
||||
0x75, 0x62, 0x5f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
|
||||
0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x22, 0xcc, 0x01, 0x0a, 0x0a,
|
||||
0x50, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61,
|
||||
0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x70,
|
||||
0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x31, 0x0a, 0x06, 0x63, 0x75, 0x72, 0x73, 0x6f,
|
||||
0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x77, 0x61, 0x6b, 0x75, 0x2e, 0x73,
|
||||
0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x34, 0x2e, 0x49, 0x6e, 0x64,
|
||||
0x65, 0x78, 0x52, 0x06, 0x63, 0x75, 0x72, 0x73, 0x6f, 0x72, 0x12, 0x46, 0x0a, 0x09, 0x64, 0x69,
|
||||
0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e,
|
||||
0x77, 0x61, 0x6b, 0x75, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x32, 0x62, 0x65, 0x74,
|
||||
0x61, 0x34, 0x2e, 0x50, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x44, 0x69,
|
||||
0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x22, 0x26, 0x0a, 0x09, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12,
|
||||
0x0c, 0x0a, 0x08, 0x42, 0x41, 0x43, 0x4b, 0x57, 0x41, 0x52, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a,
|
||||
0x07, 0x46, 0x4f, 0x52, 0x57, 0x41, 0x52, 0x44, 0x10, 0x01, 0x22, 0x34, 0x0a, 0x0d, 0x43, 0x6f,
|
||||
0x6e, 0x74, 0x65, 0x6e, 0x74, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x63,
|
||||
0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01,
|
||||
0x28, 0x09, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63,
|
||||
0x22, 0x9e, 0x02, 0x0a, 0x0c, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x51, 0x75, 0x65, 0x72,
|
||||
0x79, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x5f, 0x74, 0x6f, 0x70, 0x69,
|
||||
0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x54,
|
||||
0x6f, 0x70, 0x69, 0x63, 0x12, 0x4a, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f,
|
||||
0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e,
|
||||
0x77, 0x61, 0x6b, 0x75, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x32, 0x62, 0x65, 0x74,
|
||||
0x61, 0x34, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72,
|
||||
0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73,
|
||||
0x12, 0x3f, 0x0a, 0x0b, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18,
|
||||
0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x77, 0x61, 0x6b, 0x75, 0x2e, 0x73, 0x74, 0x6f,
|
||||
0x72, 0x65, 0x2e, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x34, 0x2e, 0x50, 0x61, 0x67, 0x69, 0x6e,
|
||||
0x67, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x66,
|
||||
0x6f, 0x12, 0x22, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18,
|
||||
0x05, 0x20, 0x01, 0x28, 0x12, 0x48, 0x00, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69,
|
||||
0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x1e, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d,
|
||||
0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x12, 0x48, 0x01, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69,
|
||||
0x6d, 0x65, 0x88, 0x01, 0x01, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f,
|
||||
0x74, 0x69, 0x6d, 0x65, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d,
|
||||
0x65, 0x22, 0xf4, 0x01, 0x0a, 0x0f, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x73,
|
||||
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
|
||||
0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x77, 0x61, 0x6b, 0x75, 0x2e, 0x6d,
|
||||
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x61, 0x6b, 0x75, 0x4d, 0x65,
|
||||
0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12,
|
||||
0x3f, 0x0a, 0x0b, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x03,
|
||||
0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x77, 0x61, 0x6b, 0x75, 0x2e, 0x73, 0x74, 0x6f, 0x72,
|
||||
0x65, 0x2e, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x34, 0x2e, 0x50, 0x61, 0x67, 0x69, 0x6e, 0x67,
|
||||
0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x66, 0x6f,
|
||||
0x12, 0x3f, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32,
|
||||
0x29, 0x2e, 0x77, 0x61, 0x6b, 0x75, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x32, 0x62,
|
||||
0x65, 0x74, 0x61, 0x34, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70,
|
||||
0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f,
|
||||
0x72, 0x22, 0x25, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f,
|
||||
0x4e, 0x45, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f,
|
||||
0x43, 0x55, 0x52, 0x53, 0x4f, 0x52, 0x10, 0x01, 0x22, 0xa4, 0x01, 0x0a, 0x0a, 0x48, 0x69, 0x73,
|
||||
0x74, 0x6f, 0x72, 0x79, 0x52, 0x50, 0x43, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x36, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18,
|
||||
0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x77, 0x61, 0x6b, 0x75, 0x2e, 0x73, 0x74, 0x6f,
|
||||
0x72, 0x65, 0x2e, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x34, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f,
|
||||
0x72, 0x79, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x3f,
|
||||
0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
|
||||
0x32, 0x23, 0x2e, 0x77, 0x61, 0x6b, 0x75, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x32,
|
||||
0x62, 0x65, 0x74, 0x61, 0x34, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x73,
|
||||
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x62,
|
||||
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_store_proto_rawDescOnce sync.Once
|
||||
file_store_proto_rawDescData = file_store_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_store_proto_rawDescGZIP() []byte {
|
||||
file_store_proto_rawDescOnce.Do(func() {
|
||||
file_store_proto_rawDescData = protoimpl.X.CompressGZIP(file_store_proto_rawDescData)
|
||||
})
|
||||
return file_store_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_store_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
|
||||
var file_store_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
|
||||
var file_store_proto_goTypes = []interface{}{
|
||||
(PagingInfo_Direction)(0), // 0: waku.store.v2beta4.PagingInfo.Direction
|
||||
(HistoryResponse_Error)(0), // 1: waku.store.v2beta4.HistoryResponse.Error
|
||||
(*Index)(nil), // 2: waku.store.v2beta4.Index
|
||||
(*PagingInfo)(nil), // 3: waku.store.v2beta4.PagingInfo
|
||||
(*ContentFilter)(nil), // 4: waku.store.v2beta4.ContentFilter
|
||||
(*HistoryQuery)(nil), // 5: waku.store.v2beta4.HistoryQuery
|
||||
(*HistoryResponse)(nil), // 6: waku.store.v2beta4.HistoryResponse
|
||||
(*HistoryRPC)(nil), // 7: waku.store.v2beta4.HistoryRPC
|
||||
(*pb.WakuMessage)(nil), // 8: waku.message.v1.WakuMessage
|
||||
}
|
||||
var file_store_proto_depIdxs = []int32{
|
||||
2, // 0: waku.store.v2beta4.PagingInfo.cursor:type_name -> waku.store.v2beta4.Index
|
||||
0, // 1: waku.store.v2beta4.PagingInfo.direction:type_name -> waku.store.v2beta4.PagingInfo.Direction
|
||||
4, // 2: waku.store.v2beta4.HistoryQuery.content_filters:type_name -> waku.store.v2beta4.ContentFilter
|
||||
3, // 3: waku.store.v2beta4.HistoryQuery.paging_info:type_name -> waku.store.v2beta4.PagingInfo
|
||||
8, // 4: waku.store.v2beta4.HistoryResponse.messages:type_name -> waku.message.v1.WakuMessage
|
||||
3, // 5: waku.store.v2beta4.HistoryResponse.paging_info:type_name -> waku.store.v2beta4.PagingInfo
|
||||
1, // 6: waku.store.v2beta4.HistoryResponse.error:type_name -> waku.store.v2beta4.HistoryResponse.Error
|
||||
5, // 7: waku.store.v2beta4.HistoryRPC.query:type_name -> waku.store.v2beta4.HistoryQuery
|
||||
6, // 8: waku.store.v2beta4.HistoryRPC.response:type_name -> waku.store.v2beta4.HistoryResponse
|
||||
9, // [9:9] is the sub-list for method output_type
|
||||
9, // [9:9] is the sub-list for method input_type
|
||||
9, // [9:9] is the sub-list for extension type_name
|
||||
9, // [9:9] is the sub-list for extension extendee
|
||||
0, // [0:9] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_store_proto_init() }
|
||||
func file_store_proto_init() {
|
||||
if File_store_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_store_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Index); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_store_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*PagingInfo); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_store_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ContentFilter); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_store_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*HistoryQuery); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_store_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*HistoryResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_store_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*HistoryRPC); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
file_store_proto_msgTypes[3].OneofWrappers = []interface{}{}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_store_proto_rawDesc,
|
||||
NumEnums: 2,
|
||||
NumMessages: 6,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_store_proto_goTypes,
|
||||
DependencyIndexes: file_store_proto_depIdxs,
|
||||
EnumInfos: file_store_proto_enumTypes,
|
||||
MessageInfos: file_store_proto_msgTypes,
|
||||
}.Build()
|
||||
File_store_proto = out.File
|
||||
file_store_proto_rawDesc = nil
|
||||
file_store_proto_goTypes = nil
|
||||
file_store_proto_depIdxs = nil
|
||||
}
|
709
vendor/github.com/waku-org/go-waku/waku/v2/protocol/store/pb/waku_store.pb.go
generated
vendored
709
vendor/github.com/waku-org/go-waku/waku/v2/protocol/store/pb/waku_store.pb.go
generated
vendored
|
@ -1,709 +0,0 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.26.0
|
||||
// protoc v3.21.12
|
||||
// source: waku_store.proto
|
||||
|
||||
package pb
|
||||
|
||||
import (
|
||||
pb "github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type PagingInfo_Direction int32
|
||||
|
||||
const (
|
||||
PagingInfo_BACKWARD PagingInfo_Direction = 0
|
||||
PagingInfo_FORWARD PagingInfo_Direction = 1
|
||||
)
|
||||
|
||||
// Enum value maps for PagingInfo_Direction.
|
||||
var (
|
||||
PagingInfo_Direction_name = map[int32]string{
|
||||
0: "BACKWARD",
|
||||
1: "FORWARD",
|
||||
}
|
||||
PagingInfo_Direction_value = map[string]int32{
|
||||
"BACKWARD": 0,
|
||||
"FORWARD": 1,
|
||||
}
|
||||
)
|
||||
|
||||
func (x PagingInfo_Direction) Enum() *PagingInfo_Direction {
|
||||
p := new(PagingInfo_Direction)
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
|
||||
func (x PagingInfo_Direction) String() string {
|
||||
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
|
||||
}
|
||||
|
||||
func (PagingInfo_Direction) Descriptor() protoreflect.EnumDescriptor {
|
||||
return file_waku_store_proto_enumTypes[0].Descriptor()
|
||||
}
|
||||
|
||||
func (PagingInfo_Direction) Type() protoreflect.EnumType {
|
||||
return &file_waku_store_proto_enumTypes[0]
|
||||
}
|
||||
|
||||
func (x PagingInfo_Direction) Number() protoreflect.EnumNumber {
|
||||
return protoreflect.EnumNumber(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use PagingInfo_Direction.Descriptor instead.
|
||||
func (PagingInfo_Direction) EnumDescriptor() ([]byte, []int) {
|
||||
return file_waku_store_proto_rawDescGZIP(), []int{1, 0}
|
||||
}
|
||||
|
||||
type HistoryResponse_Error int32
|
||||
|
||||
const (
|
||||
HistoryResponse_NONE HistoryResponse_Error = 0
|
||||
HistoryResponse_INVALID_CURSOR HistoryResponse_Error = 1
|
||||
)
|
||||
|
||||
// Enum value maps for HistoryResponse_Error.
|
||||
var (
|
||||
HistoryResponse_Error_name = map[int32]string{
|
||||
0: "NONE",
|
||||
1: "INVALID_CURSOR",
|
||||
}
|
||||
HistoryResponse_Error_value = map[string]int32{
|
||||
"NONE": 0,
|
||||
"INVALID_CURSOR": 1,
|
||||
}
|
||||
)
|
||||
|
||||
func (x HistoryResponse_Error) Enum() *HistoryResponse_Error {
|
||||
p := new(HistoryResponse_Error)
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
|
||||
func (x HistoryResponse_Error) String() string {
|
||||
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
|
||||
}
|
||||
|
||||
func (HistoryResponse_Error) Descriptor() protoreflect.EnumDescriptor {
|
||||
return file_waku_store_proto_enumTypes[1].Descriptor()
|
||||
}
|
||||
|
||||
func (HistoryResponse_Error) Type() protoreflect.EnumType {
|
||||
return &file_waku_store_proto_enumTypes[1]
|
||||
}
|
||||
|
||||
func (x HistoryResponse_Error) Number() protoreflect.EnumNumber {
|
||||
return protoreflect.EnumNumber(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use HistoryResponse_Error.Descriptor instead.
|
||||
func (HistoryResponse_Error) EnumDescriptor() ([]byte, []int) {
|
||||
return file_waku_store_proto_rawDescGZIP(), []int{4, 0}
|
||||
}
|
||||
|
||||
type Index struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Digest []byte `protobuf:"bytes,1,opt,name=digest,proto3" json:"digest,omitempty"`
|
||||
ReceiverTime int64 `protobuf:"zigzag64,2,opt,name=receiverTime,proto3" json:"receiverTime,omitempty"`
|
||||
SenderTime int64 `protobuf:"zigzag64,3,opt,name=senderTime,proto3" json:"senderTime,omitempty"`
|
||||
PubsubTopic string `protobuf:"bytes,4,opt,name=pubsubTopic,proto3" json:"pubsubTopic,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Index) Reset() {
|
||||
*x = Index{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_waku_store_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Index) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Index) ProtoMessage() {}
|
||||
|
||||
func (x *Index) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_waku_store_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Index.ProtoReflect.Descriptor instead.
|
||||
func (*Index) Descriptor() ([]byte, []int) {
|
||||
return file_waku_store_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *Index) GetDigest() []byte {
|
||||
if x != nil {
|
||||
return x.Digest
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Index) GetReceiverTime() int64 {
|
||||
if x != nil {
|
||||
return x.ReceiverTime
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *Index) GetSenderTime() int64 {
|
||||
if x != nil {
|
||||
return x.SenderTime
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *Index) GetPubsubTopic() string {
|
||||
if x != nil {
|
||||
return x.PubsubTopic
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type PagingInfo struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
PageSize uint64 `protobuf:"varint,1,opt,name=pageSize,proto3" json:"pageSize,omitempty"`
|
||||
Cursor *Index `protobuf:"bytes,2,opt,name=cursor,proto3" json:"cursor,omitempty"`
|
||||
Direction PagingInfo_Direction `protobuf:"varint,3,opt,name=direction,proto3,enum=pb.PagingInfo_Direction" json:"direction,omitempty"`
|
||||
}
|
||||
|
||||
func (x *PagingInfo) Reset() {
|
||||
*x = PagingInfo{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_waku_store_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *PagingInfo) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*PagingInfo) ProtoMessage() {}
|
||||
|
||||
func (x *PagingInfo) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_waku_store_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use PagingInfo.ProtoReflect.Descriptor instead.
|
||||
func (*PagingInfo) Descriptor() ([]byte, []int) {
|
||||
return file_waku_store_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *PagingInfo) GetPageSize() uint64 {
|
||||
if x != nil {
|
||||
return x.PageSize
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *PagingInfo) GetCursor() *Index {
|
||||
if x != nil {
|
||||
return x.Cursor
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *PagingInfo) GetDirection() PagingInfo_Direction {
|
||||
if x != nil {
|
||||
return x.Direction
|
||||
}
|
||||
return PagingInfo_BACKWARD
|
||||
}
|
||||
|
||||
type ContentFilter struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
ContentTopic string `protobuf:"bytes,1,opt,name=contentTopic,proto3" json:"contentTopic,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ContentFilter) Reset() {
|
||||
*x = ContentFilter{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_waku_store_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ContentFilter) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ContentFilter) ProtoMessage() {}
|
||||
|
||||
func (x *ContentFilter) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_waku_store_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ContentFilter.ProtoReflect.Descriptor instead.
|
||||
func (*ContentFilter) Descriptor() ([]byte, []int) {
|
||||
return file_waku_store_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *ContentFilter) GetContentTopic() string {
|
||||
if x != nil {
|
||||
return x.ContentTopic
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type HistoryQuery struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
PubsubTopic string `protobuf:"bytes,2,opt,name=pubsubTopic,proto3" json:"pubsubTopic,omitempty"`
|
||||
ContentFilters []*ContentFilter `protobuf:"bytes,3,rep,name=contentFilters,proto3" json:"contentFilters,omitempty"`
|
||||
PagingInfo *PagingInfo `protobuf:"bytes,4,opt,name=pagingInfo,proto3" json:"pagingInfo,omitempty"` // used for pagination
|
||||
StartTime int64 `protobuf:"zigzag64,5,opt,name=startTime,proto3" json:"startTime,omitempty"`
|
||||
EndTime int64 `protobuf:"zigzag64,6,opt,name=endTime,proto3" json:"endTime,omitempty"`
|
||||
}
|
||||
|
||||
func (x *HistoryQuery) Reset() {
|
||||
*x = HistoryQuery{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_waku_store_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *HistoryQuery) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*HistoryQuery) ProtoMessage() {}
|
||||
|
||||
func (x *HistoryQuery) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_waku_store_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use HistoryQuery.ProtoReflect.Descriptor instead.
|
||||
func (*HistoryQuery) Descriptor() ([]byte, []int) {
|
||||
return file_waku_store_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *HistoryQuery) GetPubsubTopic() string {
|
||||
if x != nil {
|
||||
return x.PubsubTopic
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *HistoryQuery) GetContentFilters() []*ContentFilter {
|
||||
if x != nil {
|
||||
return x.ContentFilters
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *HistoryQuery) GetPagingInfo() *PagingInfo {
|
||||
if x != nil {
|
||||
return x.PagingInfo
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *HistoryQuery) GetStartTime() int64 {
|
||||
if x != nil {
|
||||
return x.StartTime
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *HistoryQuery) GetEndTime() int64 {
|
||||
if x != nil {
|
||||
return x.EndTime
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type HistoryResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// the first field is reserved for future use
|
||||
Messages []*pb.WakuMessage `protobuf:"bytes,2,rep,name=messages,proto3" json:"messages,omitempty"`
|
||||
PagingInfo *PagingInfo `protobuf:"bytes,3,opt,name=pagingInfo,proto3" json:"pagingInfo,omitempty"`
|
||||
Error HistoryResponse_Error `protobuf:"varint,4,opt,name=error,proto3,enum=pb.HistoryResponse_Error" json:"error,omitempty"`
|
||||
}
|
||||
|
||||
func (x *HistoryResponse) Reset() {
|
||||
*x = HistoryResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_waku_store_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *HistoryResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*HistoryResponse) ProtoMessage() {}
|
||||
|
||||
func (x *HistoryResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_waku_store_proto_msgTypes[4]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use HistoryResponse.ProtoReflect.Descriptor instead.
|
||||
func (*HistoryResponse) Descriptor() ([]byte, []int) {
|
||||
return file_waku_store_proto_rawDescGZIP(), []int{4}
|
||||
}
|
||||
|
||||
func (x *HistoryResponse) GetMessages() []*pb.WakuMessage {
|
||||
if x != nil {
|
||||
return x.Messages
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *HistoryResponse) GetPagingInfo() *PagingInfo {
|
||||
if x != nil {
|
||||
return x.PagingInfo
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *HistoryResponse) GetError() HistoryResponse_Error {
|
||||
if x != nil {
|
||||
return x.Error
|
||||
}
|
||||
return HistoryResponse_NONE
|
||||
}
|
||||
|
||||
type HistoryRPC struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
|
||||
Query *HistoryQuery `protobuf:"bytes,2,opt,name=query,proto3" json:"query,omitempty"`
|
||||
Response *HistoryResponse `protobuf:"bytes,3,opt,name=response,proto3" json:"response,omitempty"`
|
||||
}
|
||||
|
||||
func (x *HistoryRPC) Reset() {
|
||||
*x = HistoryRPC{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_waku_store_proto_msgTypes[5]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *HistoryRPC) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*HistoryRPC) ProtoMessage() {}
|
||||
|
||||
func (x *HistoryRPC) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_waku_store_proto_msgTypes[5]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use HistoryRPC.ProtoReflect.Descriptor instead.
|
||||
func (*HistoryRPC) Descriptor() ([]byte, []int) {
|
||||
return file_waku_store_proto_rawDescGZIP(), []int{5}
|
||||
}
|
||||
|
||||
func (x *HistoryRPC) GetRequestId() string {
|
||||
if x != nil {
|
||||
return x.RequestId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *HistoryRPC) GetQuery() *HistoryQuery {
|
||||
if x != nil {
|
||||
return x.Query
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *HistoryRPC) GetResponse() *HistoryResponse {
|
||||
if x != nil {
|
||||
return x.Response
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_waku_store_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_waku_store_proto_rawDesc = []byte{
|
||||
0x0a, 0x10, 0x77, 0x61, 0x6b, 0x75, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x12, 0x77, 0x61, 0x6b, 0x75, 0x5f, 0x6d, 0x65, 0x73,
|
||||
0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x85, 0x01, 0x0a, 0x05, 0x49,
|
||||
0x6e, 0x64, 0x65, 0x78, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x22, 0x0a, 0x0c,
|
||||
0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01,
|
||||
0x28, 0x12, 0x52, 0x0c, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65,
|
||||
0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x18, 0x03,
|
||||
0x20, 0x01, 0x28, 0x12, 0x52, 0x0a, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65,
|
||||
0x12, 0x20, 0x0a, 0x0b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x18,
|
||||
0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x54, 0x6f, 0x70,
|
||||
0x69, 0x63, 0x22, 0xab, 0x01, 0x0a, 0x0a, 0x50, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x66,
|
||||
0x6f, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x04, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x21, 0x0a,
|
||||
0x06, 0x63, 0x75, 0x72, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e,
|
||||
0x70, 0x62, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x06, 0x63, 0x75, 0x72, 0x73, 0x6f, 0x72,
|
||||
0x12, 0x36, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20,
|
||||
0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x49,
|
||||
0x6e, 0x66, 0x6f, 0x2e, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x64,
|
||||
0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x26, 0x0a, 0x09, 0x44, 0x69, 0x72, 0x65,
|
||||
0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0c, 0x0a, 0x08, 0x42, 0x41, 0x43, 0x4b, 0x57, 0x41, 0x52,
|
||||
0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x46, 0x4f, 0x52, 0x57, 0x41, 0x52, 0x44, 0x10, 0x01,
|
||||
0x22, 0x33, 0x0a, 0x0d, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x46, 0x69, 0x6c, 0x74, 0x65,
|
||||
0x72, 0x12, 0x22, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x70, 0x69,
|
||||
0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
|
||||
0x54, 0x6f, 0x70, 0x69, 0x63, 0x22, 0xd3, 0x01, 0x0a, 0x0c, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72,
|
||||
0x79, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62,
|
||||
0x54, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x75, 0x62,
|
||||
0x73, 0x75, 0x62, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x39, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x74,
|
||||
0x65, 0x6e, 0x74, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b,
|
||||
0x32, 0x11, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x46, 0x69, 0x6c,
|
||||
0x74, 0x65, 0x72, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x46, 0x69, 0x6c, 0x74,
|
||||
0x65, 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x66,
|
||||
0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x61, 0x67,
|
||||
0x69, 0x6e, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x49,
|
||||
0x6e, 0x66, 0x6f, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65,
|
||||
0x18, 0x05, 0x20, 0x01, 0x28, 0x12, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d,
|
||||
0x65, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01,
|
||||
0x28, 0x12, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x22, 0xc6, 0x01, 0x0a, 0x0f,
|
||||
0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
|
||||
0x2b, 0x0a, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
|
||||
0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x62, 0x2e, 0x57, 0x61, 0x6b, 0x75, 0x4d, 0x65, 0x73, 0x73, 0x61,
|
||||
0x67, 0x65, 0x52, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, 0x2e, 0x0a, 0x0a,
|
||||
0x70, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
|
||||
0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x66, 0x6f,
|
||||
0x52, 0x0a, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2f, 0x0a, 0x05,
|
||||
0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x70, 0x62,
|
||||
0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
|
||||
0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x25, 0x0a,
|
||||
0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00,
|
||||
0x12, 0x12, 0x0a, 0x0e, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x43, 0x55, 0x52, 0x53,
|
||||
0x4f, 0x52, 0x10, 0x01, 0x22, 0x84, 0x01, 0x0a, 0x0a, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79,
|
||||
0x52, 0x50, 0x43, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69,
|
||||
0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
|
||||
0x49, 0x64, 0x12, 0x26, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28,
|
||||
0x0b, 0x32, 0x10, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x51, 0x75,
|
||||
0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x2f, 0x0a, 0x08, 0x72, 0x65,
|
||||
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70,
|
||||
0x62, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_waku_store_proto_rawDescOnce sync.Once
|
||||
file_waku_store_proto_rawDescData = file_waku_store_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_waku_store_proto_rawDescGZIP() []byte {
|
||||
file_waku_store_proto_rawDescOnce.Do(func() {
|
||||
file_waku_store_proto_rawDescData = protoimpl.X.CompressGZIP(file_waku_store_proto_rawDescData)
|
||||
})
|
||||
return file_waku_store_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_waku_store_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
|
||||
var file_waku_store_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
|
||||
var file_waku_store_proto_goTypes = []interface{}{
|
||||
(PagingInfo_Direction)(0), // 0: pb.PagingInfo.Direction
|
||||
(HistoryResponse_Error)(0), // 1: pb.HistoryResponse.Error
|
||||
(*Index)(nil), // 2: pb.Index
|
||||
(*PagingInfo)(nil), // 3: pb.PagingInfo
|
||||
(*ContentFilter)(nil), // 4: pb.ContentFilter
|
||||
(*HistoryQuery)(nil), // 5: pb.HistoryQuery
|
||||
(*HistoryResponse)(nil), // 6: pb.HistoryResponse
|
||||
(*HistoryRPC)(nil), // 7: pb.HistoryRPC
|
||||
(*pb.WakuMessage)(nil), // 8: pb.WakuMessage
|
||||
}
|
||||
var file_waku_store_proto_depIdxs = []int32{
|
||||
2, // 0: pb.PagingInfo.cursor:type_name -> pb.Index
|
||||
0, // 1: pb.PagingInfo.direction:type_name -> pb.PagingInfo.Direction
|
||||
4, // 2: pb.HistoryQuery.contentFilters:type_name -> pb.ContentFilter
|
||||
3, // 3: pb.HistoryQuery.pagingInfo:type_name -> pb.PagingInfo
|
||||
8, // 4: pb.HistoryResponse.messages:type_name -> pb.WakuMessage
|
||||
3, // 5: pb.HistoryResponse.pagingInfo:type_name -> pb.PagingInfo
|
||||
1, // 6: pb.HistoryResponse.error:type_name -> pb.HistoryResponse.Error
|
||||
5, // 7: pb.HistoryRPC.query:type_name -> pb.HistoryQuery
|
||||
6, // 8: pb.HistoryRPC.response:type_name -> pb.HistoryResponse
|
||||
9, // [9:9] is the sub-list for method output_type
|
||||
9, // [9:9] is the sub-list for method input_type
|
||||
9, // [9:9] is the sub-list for extension type_name
|
||||
9, // [9:9] is the sub-list for extension extendee
|
||||
0, // [0:9] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_waku_store_proto_init() }
|
||||
func file_waku_store_proto_init() {
|
||||
if File_waku_store_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_waku_store_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Index); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_waku_store_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*PagingInfo); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_waku_store_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ContentFilter); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_waku_store_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*HistoryQuery); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_waku_store_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*HistoryResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_waku_store_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*HistoryRPC); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_waku_store_proto_rawDesc,
|
||||
NumEnums: 2,
|
||||
NumMessages: 6,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_waku_store_proto_goTypes,
|
||||
DependencyIndexes: file_waku_store_proto_depIdxs,
|
||||
EnumInfos: file_waku_store_proto_enumTypes,
|
||||
MessageInfos: file_waku_store_proto_msgTypes,
|
||||
}.Build()
|
||||
File_waku_store_proto = out.File
|
||||
file_waku_store_proto_rawDesc = nil
|
||||
file_waku_store_proto_goTypes = nil
|
||||
file_waku_store_proto_depIdxs = nil
|
||||
}
|
|
@ -1,51 +0,0 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package pb;
|
||||
|
||||
import "waku_message.proto";
|
||||
|
||||
message Index {
|
||||
bytes digest = 1;
|
||||
sint64 receiverTime = 2;
|
||||
sint64 senderTime = 3;
|
||||
string pubsubTopic = 4;
|
||||
}
|
||||
|
||||
message PagingInfo {
|
||||
uint64 pageSize = 1;
|
||||
Index cursor = 2;
|
||||
enum Direction {
|
||||
BACKWARD = 0;
|
||||
FORWARD = 1;
|
||||
}
|
||||
Direction direction = 3;
|
||||
}
|
||||
|
||||
message ContentFilter {
|
||||
string contentTopic = 1;
|
||||
}
|
||||
|
||||
message HistoryQuery {
|
||||
string pubsubTopic = 2;
|
||||
repeated ContentFilter contentFilters = 3;
|
||||
PagingInfo pagingInfo = 4; // used for pagination
|
||||
sint64 startTime = 5;
|
||||
sint64 endTime = 6;
|
||||
}
|
||||
|
||||
message HistoryResponse {
|
||||
// the first field is reserved for future use
|
||||
repeated WakuMessage messages = 2;
|
||||
PagingInfo pagingInfo = 3;
|
||||
enum Error {
|
||||
NONE = 0;
|
||||
INVALID_CURSOR = 1;
|
||||
}
|
||||
Error error = 4;
|
||||
}
|
||||
|
||||
message HistoryRPC {
|
||||
string request_id = 1;
|
||||
HistoryQuery query = 2;
|
||||
HistoryResponse response = 3;
|
||||
}
|
130
vendor/github.com/waku-org/go-waku/waku/v2/protocol/store/waku_store_client.go
generated
vendored
130
vendor/github.com/waku-org/go-waku/waku/v2/protocol/store/waku_store_client.go
generated
vendored
|
@ -8,20 +8,22 @@ import (
|
|||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-msgio/pbio"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/waku-org/go-waku/logging"
|
||||
"github.com/waku-org/go-waku/waku/v2/peermanager"
|
||||
"github.com/waku-org/go-waku/waku/v2/peerstore"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
wpb "github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/store/pb"
|
||||
)
|
||||
|
||||
type Query struct {
|
||||
Topic string
|
||||
PubsubTopic string
|
||||
ContentTopics []string
|
||||
StartTime int64
|
||||
EndTime int64
|
||||
StartTime *int64
|
||||
EndTime *int64
|
||||
}
|
||||
|
||||
// Result represents a valid response from a store node
|
||||
|
@ -82,6 +84,7 @@ type criteriaFN = func(msg *wpb.WakuMessage) (bool, error)
|
|||
|
||||
type HistoryRequestParameters struct {
|
||||
selectedPeer peer.ID
|
||||
peerAddr multiaddr.Multiaddr
|
||||
peerSelectionType peermanager.PeerSelection
|
||||
preferredPeers peer.IDSlice
|
||||
localQuery bool
|
||||
|
@ -93,12 +96,31 @@ type HistoryRequestParameters struct {
|
|||
s *WakuStore
|
||||
}
|
||||
|
||||
type HistoryRequestOption func(*HistoryRequestParameters)
|
||||
type HistoryRequestOption func(*HistoryRequestParameters) error
|
||||
|
||||
// WithPeer is an option used to specify the peerID to request the message history
|
||||
// WithPeer is an option used to specify the peerID to request the message history.
|
||||
// Note that this option is mutually exclusive to WithPeerAddr, only one of them can be used.
|
||||
func WithPeer(p peer.ID) HistoryRequestOption {
|
||||
return func(params *HistoryRequestParameters) {
|
||||
return func(params *HistoryRequestParameters) error {
|
||||
params.selectedPeer = p
|
||||
if params.peerAddr != nil {
|
||||
return errors.New("peerId and peerAddr options are mutually exclusive")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
//WithPeerAddr is an option used to specify a peerAddress to request the message history.
|
||||
// This new peer will be added to peerStore.
|
||||
// Note that this option is mutually exclusive to WithPeerAddr, only one of them can be used.
|
||||
|
||||
func WithPeerAddr(pAddr multiaddr.Multiaddr) HistoryRequestOption {
|
||||
return func(params *HistoryRequestParameters) error {
|
||||
params.peerAddr = pAddr
|
||||
if params.selectedPeer != "" {
|
||||
return errors.New("peerAddr and peerId options are mutually exclusive")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -108,9 +130,10 @@ func WithPeer(p peer.ID) HistoryRequestOption {
|
|||
// from the node peerstore
|
||||
// Note: This option is avaiable only with peerManager
|
||||
func WithAutomaticPeerSelection(fromThesePeers ...peer.ID) HistoryRequestOption {
|
||||
return func(params *HistoryRequestParameters) {
|
||||
return func(params *HistoryRequestParameters) error {
|
||||
params.peerSelectionType = peermanager.Automatic
|
||||
params.preferredPeers = fromThesePeers
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -120,44 +143,50 @@ func WithAutomaticPeerSelection(fromThesePeers ...peer.ID) HistoryRequestOption
|
|||
// from the node peerstore
|
||||
// Note: This option is avaiable only with peerManager
|
||||
func WithFastestPeerSelection(fromThesePeers ...peer.ID) HistoryRequestOption {
|
||||
return func(params *HistoryRequestParameters) {
|
||||
return func(params *HistoryRequestParameters) error {
|
||||
params.peerSelectionType = peermanager.LowestRTT
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithRequestID is an option to set a specific request ID to be used when
|
||||
// creating a store request
|
||||
func WithRequestID(requestID []byte) HistoryRequestOption {
|
||||
return func(params *HistoryRequestParameters) {
|
||||
return func(params *HistoryRequestParameters) error {
|
||||
params.requestID = requestID
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithAutomaticRequestID is an option to automatically generate a request ID
|
||||
// when creating a store request
|
||||
func WithAutomaticRequestID() HistoryRequestOption {
|
||||
return func(params *HistoryRequestParameters) {
|
||||
return func(params *HistoryRequestParameters) error {
|
||||
params.requestID = protocol.GenerateRequestID()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithCursor(c *pb.Index) HistoryRequestOption {
|
||||
return func(params *HistoryRequestParameters) {
|
||||
return func(params *HistoryRequestParameters) error {
|
||||
params.cursor = c
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithPaging is an option used to specify the order and maximum number of records to return
|
||||
func WithPaging(asc bool, pageSize uint64) HistoryRequestOption {
|
||||
return func(params *HistoryRequestParameters) {
|
||||
return func(params *HistoryRequestParameters) error {
|
||||
params.asc = asc
|
||||
params.pageSize = pageSize
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithLocalQuery() HistoryRequestOption {
|
||||
return func(params *HistoryRequestParameters) {
|
||||
return func(params *HistoryRequestParameters) error {
|
||||
params.localQuery = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -247,35 +276,62 @@ func (store *WakuStore) localQuery(historyQuery *pb.HistoryRPC) (*pb.HistoryResp
|
|||
}
|
||||
|
||||
func (store *WakuStore) Query(ctx context.Context, query Query, opts ...HistoryRequestOption) (*Result, error) {
|
||||
|
||||
params := new(HistoryRequestParameters)
|
||||
params.s = store
|
||||
|
||||
optList := DefaultOptions()
|
||||
optList = append(optList, opts...)
|
||||
for _, opt := range optList {
|
||||
opt(params)
|
||||
}
|
||||
if store.pm != nil && params.selectedPeer == "" {
|
||||
var err error
|
||||
params.selectedPeer, err = store.pm.SelectPeer(
|
||||
peermanager.PeerSelectionCriteria{
|
||||
SelectionType: params.peerSelectionType,
|
||||
Proto: StoreID_v20beta4,
|
||||
PubsubTopic: query.Topic,
|
||||
SpecificPeers: params.preferredPeers,
|
||||
Ctx: ctx,
|
||||
},
|
||||
)
|
||||
err := opt(params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if !params.localQuery {
|
||||
pubsubTopics := []string{}
|
||||
if query.PubsubTopic == "" {
|
||||
for _, cTopic := range query.ContentTopics {
|
||||
pubsubTopic, err := protocol.GetPubSubTopicFromContentTopic(cTopic)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pubsubTopics = append(pubsubTopics, pubsubTopic)
|
||||
}
|
||||
} else {
|
||||
pubsubTopics = append(pubsubTopics, query.PubsubTopic)
|
||||
}
|
||||
|
||||
//Add Peer to peerstore.
|
||||
if store.pm != nil && params.peerAddr != nil {
|
||||
pData, err := store.pm.AddPeer(params.peerAddr, peerstore.Static, pubsubTopics, StoreID_v20beta4)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
store.pm.Connect(pData)
|
||||
params.selectedPeer = pData.AddrInfo.ID
|
||||
}
|
||||
if store.pm != nil && params.selectedPeer == "" {
|
||||
var err error
|
||||
params.selectedPeer, err = store.pm.SelectPeer(
|
||||
peermanager.PeerSelectionCriteria{
|
||||
SelectionType: params.peerSelectionType,
|
||||
Proto: StoreID_v20beta4,
|
||||
PubsubTopics: pubsubTopics,
|
||||
SpecificPeers: params.preferredPeers,
|
||||
Ctx: ctx,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
historyRequest := &pb.HistoryRPC{
|
||||
RequestId: hex.EncodeToString(params.requestID),
|
||||
Query: &pb.HistoryQuery{
|
||||
PubsubTopic: query.Topic,
|
||||
PubsubTopic: query.PubsubTopic,
|
||||
ContentFilters: []*pb.ContentFilter{},
|
||||
StartTime: query.StartTime,
|
||||
EndTime: query.EndTime,
|
||||
|
@ -396,23 +452,9 @@ func (store *WakuStore) Next(ctx context.Context, r *Result) (*Result, error) {
|
|||
|
||||
historyRequest := &pb.HistoryRPC{
|
||||
RequestId: hex.EncodeToString(protocol.GenerateRequestID()),
|
||||
Query: &pb.HistoryQuery{
|
||||
PubsubTopic: r.Query().PubsubTopic,
|
||||
ContentFilters: r.Query().ContentFilters,
|
||||
StartTime: r.Query().StartTime,
|
||||
EndTime: r.Query().EndTime,
|
||||
PagingInfo: &pb.PagingInfo{
|
||||
PageSize: r.Query().PagingInfo.PageSize,
|
||||
Direction: r.Query().PagingInfo.Direction,
|
||||
Cursor: &pb.Index{
|
||||
Digest: r.Cursor().Digest,
|
||||
ReceiverTime: r.Cursor().ReceiverTime,
|
||||
SenderTime: r.Cursor().SenderTime,
|
||||
PubsubTopic: r.Cursor().PubsubTopic,
|
||||
},
|
||||
},
|
||||
},
|
||||
Query: r.Query(),
|
||||
}
|
||||
historyRequest.Query.PagingInfo.Cursor = r.Cursor()
|
||||
|
||||
response, err := store.queryFrom(ctx, historyRequest, r.PeerID())
|
||||
if err != nil {
|
||||
|
|
|
@ -16,6 +16,7 @@ import (
|
|||
|
||||
// StoreID_v20beta4 is the current Waku Store protocol identifier
|
||||
const StoreID_v20beta4 = libp2pProtocol.ID("/vac/waku/store/2.0.0-beta4")
|
||||
const StoreENRField = uint8(1 << 1)
|
||||
|
||||
// MaxPageSize is the maximum number of waku messages to return per page
|
||||
const MaxPageSize = 20
|
||||
|
@ -64,5 +65,8 @@ func NewWakuStore(p MessageProvider, pm *peermanager.PeerManager, timesource tim
|
|||
wakuStore.pm = pm
|
||||
wakuStore.metrics = newMetrics(reg)
|
||||
|
||||
if pm != nil {
|
||||
pm.RegisterWakuProtocol(StoreID_v20beta4, StoreENRField)
|
||||
}
|
||||
return wakuStore
|
||||
}
|
||||
|
|
6
vendor/github.com/waku-org/go-waku/waku/v2/protocol/store/waku_store_protocol.go
generated
vendored
6
vendor/github.com/waku-org/go-waku/waku/v2/protocol/store/waku_store_protocol.go
generated
vendored
|
@ -133,7 +133,7 @@ func (store *WakuStore) Start(ctx context.Context, sub *relay.Subscription) erro
|
|||
|
||||
func (store *WakuStore) storeMessage(env *protocol.Envelope) error {
|
||||
|
||||
if env.Message().Ephemeral {
|
||||
if env.Message().GetEphemeral() {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -337,8 +337,8 @@ func (store *WakuStore) Resume(ctx context.Context, pubsubTopic string, peerList
|
|||
|
||||
rpc := &pb.HistoryQuery{
|
||||
PubsubTopic: pubsubTopic,
|
||||
StartTime: lastSeenTime,
|
||||
EndTime: currentTime,
|
||||
StartTime: &lastSeenTime,
|
||||
EndTime: ¤tTime,
|
||||
PagingInfo: &pb.PagingInfo{
|
||||
PageSize: 0,
|
||||
Direction: pb.PagingInfo_BACKWARD,
|
||||
|
|
122
vendor/github.com/waku-org/go-waku/waku/v2/protocol/subscription/subscription_details.go
generated
vendored
Normal file
122
vendor/github.com/waku-org/go-waku/waku/v2/protocol/subscription/subscription_details.go
generated
vendored
Normal file
|
@ -0,0 +1,122 @@
|
|||
package subscription
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"sync"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
)
|
||||
|
||||
// Map of SubscriptionDetails.ID to subscriptions
|
||||
type SubscriptionSet map[string]*SubscriptionDetails
|
||||
|
||||
type PeerSubscription struct {
|
||||
PeerID peer.ID
|
||||
SubsPerPubsubTopic map[string]SubscriptionSet
|
||||
}
|
||||
|
||||
type PeerContentFilter struct {
|
||||
PeerID peer.ID `json:"peerID"`
|
||||
PubsubTopic string `json:"pubsubTopics"`
|
||||
ContentTopics []string `json:"contentTopics"`
|
||||
}
|
||||
|
||||
type SubscriptionDetails struct {
|
||||
sync.RWMutex
|
||||
|
||||
ID string `json:"subscriptionID"`
|
||||
mapRef *SubscriptionsMap
|
||||
Closed bool `json:"-"`
|
||||
once sync.Once
|
||||
|
||||
PeerID peer.ID `json:"peerID"`
|
||||
ContentFilter protocol.ContentFilter `json:"contentFilters"`
|
||||
C chan *protocol.Envelope `json:"-"`
|
||||
}
|
||||
|
||||
func (s *SubscriptionDetails) Add(contentTopics ...string) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
for _, ct := range contentTopics {
|
||||
if _, ok := s.ContentFilter.ContentTopics[ct]; !ok {
|
||||
s.ContentFilter.ContentTopics[ct] = struct{}{}
|
||||
// Increase the number of subscriptions for this (pubsubTopic, contentTopic) pair
|
||||
s.mapRef.Lock()
|
||||
s.mapRef.increaseSubFor(s.ContentFilter.PubsubTopic, ct)
|
||||
s.mapRef.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SubscriptionDetails) Remove(contentTopics ...string) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
for _, ct := range contentTopics {
|
||||
if _, ok := s.ContentFilter.ContentTopics[ct]; ok {
|
||||
delete(s.ContentFilter.ContentTopics, ct)
|
||||
// Decrease the number of subscriptions for this (pubsubTopic, contentTopic) pair
|
||||
s.mapRef.Lock()
|
||||
s.mapRef.decreaseSubFor(s.ContentFilter.PubsubTopic, ct)
|
||||
s.mapRef.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
if len(s.ContentFilter.ContentTopics) == 0 {
|
||||
// err doesn't matter
|
||||
_ = s.mapRef.Delete(s)
|
||||
}
|
||||
}
|
||||
|
||||
// C1 if contentFilter is empty, it means that given subscription is part of contentFilter
|
||||
// C2 if not empty, check matching pubsubsTopic and atleast 1 contentTopic
|
||||
func (s *SubscriptionDetails) isPartOf(contentFilter protocol.ContentFilter) bool {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
if contentFilter.PubsubTopic != "" && // C1
|
||||
s.ContentFilter.PubsubTopic != contentFilter.PubsubTopic { // C2
|
||||
return false
|
||||
}
|
||||
// C1
|
||||
if len(contentFilter.ContentTopics) == 0 {
|
||||
return true
|
||||
}
|
||||
// C2
|
||||
for cTopic := range contentFilter.ContentTopics {
|
||||
if _, ok := s.ContentFilter.ContentTopics[cTopic]; ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *SubscriptionDetails) CloseC() {
|
||||
s.once.Do(func() {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
s.Closed = true
|
||||
close(s.C)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *SubscriptionDetails) Close() error {
|
||||
s.CloseC()
|
||||
return s.mapRef.Delete(s)
|
||||
}
|
||||
|
||||
func (s *SubscriptionDetails) MarshalJSON() ([]byte, error) {
|
||||
result := struct {
|
||||
PeerID peer.ID `json:"peerID"`
|
||||
PubsubTopic string `json:"pubsubTopics"`
|
||||
ContentTopics []string `json:"contentTopics"`
|
||||
}{
|
||||
PeerID: s.PeerID,
|
||||
PubsubTopic: s.ContentFilter.PubsubTopic,
|
||||
ContentTopics: s.ContentFilter.ContentTopics.ToList(),
|
||||
}
|
||||
|
||||
return json.Marshal(result)
|
||||
}
|
165
vendor/github.com/waku-org/go-waku/waku/v2/protocol/subscription/subscriptions_map.go
generated
vendored
165
vendor/github.com/waku-org/go-waku/waku/v2/protocol/subscription/subscriptions_map.go
generated
vendored
|
@ -1,7 +1,6 @@
|
|||
package subscription
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
|
@ -12,53 +11,57 @@ import (
|
|||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
type SubscriptionDetails struct {
|
||||
sync.RWMutex
|
||||
|
||||
ID string `json:"subscriptionID"`
|
||||
mapRef *SubscriptionsMap
|
||||
Closed bool `json:"-"`
|
||||
once sync.Once
|
||||
|
||||
PeerID peer.ID `json:"peerID"`
|
||||
ContentFilter protocol.ContentFilter `json:"contentFilters"`
|
||||
C chan *protocol.Envelope `json:"-"`
|
||||
}
|
||||
|
||||
// Map of SubscriptionDetails.ID to subscriptions
|
||||
type SubscriptionSet map[string]*SubscriptionDetails
|
||||
|
||||
type PeerSubscription struct {
|
||||
PeerID peer.ID
|
||||
SubsPerPubsubTopic map[string]SubscriptionSet
|
||||
}
|
||||
|
||||
type SubscriptionsMap struct {
|
||||
sync.RWMutex
|
||||
logger *zap.Logger
|
||||
Items map[peer.ID]*PeerSubscription
|
||||
logger *zap.Logger
|
||||
items map[peer.ID]*PeerSubscription
|
||||
noOfSubs map[string]map[string]int
|
||||
}
|
||||
|
||||
var ErrNotFound = errors.New("not found")
|
||||
|
||||
func NewSubscriptionMap(logger *zap.Logger) *SubscriptionsMap {
|
||||
return &SubscriptionsMap{
|
||||
logger: logger.Named("subscription-map"),
|
||||
Items: make(map[peer.ID]*PeerSubscription),
|
||||
logger: logger.Named("subscription-map"),
|
||||
items: make(map[peer.ID]*PeerSubscription),
|
||||
noOfSubs: map[string]map[string]int{},
|
||||
}
|
||||
}
|
||||
|
||||
func (m *SubscriptionsMap) Count() int {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
return len(m.items)
|
||||
}
|
||||
|
||||
func (m *SubscriptionsMap) IsListening(pubsubTopic, contentTopic string) bool {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
return m.noOfSubs[pubsubTopic] != nil && m.noOfSubs[pubsubTopic][contentTopic] > 0
|
||||
}
|
||||
|
||||
func (m *SubscriptionsMap) increaseSubFor(pubsubTopic, contentTopic string) {
|
||||
if m.noOfSubs[pubsubTopic] == nil {
|
||||
m.noOfSubs[pubsubTopic] = map[string]int{}
|
||||
}
|
||||
m.noOfSubs[pubsubTopic][contentTopic] = m.noOfSubs[pubsubTopic][contentTopic] + 1
|
||||
}
|
||||
|
||||
func (m *SubscriptionsMap) decreaseSubFor(pubsubTopic, contentTopic string) {
|
||||
m.noOfSubs[pubsubTopic][contentTopic] = m.noOfSubs[pubsubTopic][contentTopic] - 1
|
||||
}
|
||||
|
||||
func (sub *SubscriptionsMap) NewSubscription(peerID peer.ID, cf protocol.ContentFilter) *SubscriptionDetails {
|
||||
sub.Lock()
|
||||
defer sub.Unlock()
|
||||
|
||||
peerSubscription, ok := sub.Items[peerID]
|
||||
peerSubscription, ok := sub.items[peerID]
|
||||
if !ok {
|
||||
peerSubscription = &PeerSubscription{
|
||||
PeerID: peerID,
|
||||
SubsPerPubsubTopic: make(map[string]SubscriptionSet),
|
||||
}
|
||||
sub.Items[peerID] = peerSubscription
|
||||
sub.items[peerID] = peerSubscription
|
||||
}
|
||||
|
||||
_, ok = peerSubscription.SubsPerPubsubTopic[cf.PubsubTopic]
|
||||
|
@ -74,7 +77,12 @@ func (sub *SubscriptionsMap) NewSubscription(peerID peer.ID, cf protocol.Content
|
|||
ContentFilter: protocol.ContentFilter{PubsubTopic: cf.PubsubTopic, ContentTopics: maps.Clone(cf.ContentTopics)},
|
||||
}
|
||||
|
||||
sub.Items[peerID].SubsPerPubsubTopic[cf.PubsubTopic][details.ID] = details
|
||||
// Increase the number of subscriptions for this (pubsubTopic, contentTopic) pair
|
||||
for contentTopic := range cf.ContentTopics {
|
||||
sub.increaseSubFor(cf.PubsubTopic, contentTopic)
|
||||
}
|
||||
|
||||
sub.items[peerID].SubsPerPubsubTopic[cf.PubsubTopic][details.ID] = details
|
||||
|
||||
return details
|
||||
}
|
||||
|
@ -83,7 +91,7 @@ func (sub *SubscriptionsMap) IsSubscribedTo(peerID peer.ID) bool {
|
|||
sub.RLock()
|
||||
defer sub.RUnlock()
|
||||
|
||||
_, ok := sub.Items[peerID]
|
||||
_, ok := sub.items[peerID]
|
||||
return ok
|
||||
}
|
||||
|
||||
|
@ -93,7 +101,7 @@ func (sub *SubscriptionsMap) Has(peerID peer.ID, cf protocol.ContentFilter) bool
|
|||
defer sub.RUnlock()
|
||||
|
||||
// Check if peer exits
|
||||
peerSubscription, ok := sub.Items[peerID]
|
||||
peerSubscription, ok := sub.items[peerID]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
@ -125,67 +133,24 @@ func (sub *SubscriptionsMap) Delete(subscription *SubscriptionDetails) error {
|
|||
sub.Lock()
|
||||
defer sub.Unlock()
|
||||
|
||||
peerSubscription, ok := sub.Items[subscription.PeerID]
|
||||
peerSubscription, ok := sub.items[subscription.PeerID]
|
||||
if !ok {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
delete(peerSubscription.SubsPerPubsubTopic[subscription.ContentFilter.PubsubTopic], subscription.ID)
|
||||
contentFilter := subscription.ContentFilter
|
||||
delete(peerSubscription.SubsPerPubsubTopic[contentFilter.PubsubTopic], subscription.ID)
|
||||
|
||||
// Decrease the number of subscriptions for this (pubsubTopic, contentTopic) pair
|
||||
for contentTopic := range contentFilter.ContentTopics {
|
||||
sub.decreaseSubFor(contentFilter.PubsubTopic, contentTopic)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SubscriptionDetails) Add(contentTopics ...string) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
for _, ct := range contentTopics {
|
||||
s.ContentFilter.ContentTopics[ct] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SubscriptionDetails) Remove(contentTopics ...string) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
for _, ct := range contentTopics {
|
||||
delete(s.ContentFilter.ContentTopics, ct)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SubscriptionDetails) CloseC() {
|
||||
s.once.Do(func() {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
s.Closed = true
|
||||
close(s.C)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *SubscriptionDetails) Close() error {
|
||||
s.CloseC()
|
||||
return s.mapRef.Delete(s)
|
||||
}
|
||||
|
||||
func (s *SubscriptionDetails) Clone() *SubscriptionDetails {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
result := &SubscriptionDetails{
|
||||
ID: uuid.NewString(),
|
||||
mapRef: s.mapRef,
|
||||
Closed: false,
|
||||
PeerID: s.PeerID,
|
||||
ContentFilter: protocol.ContentFilter{PubsubTopic: s.ContentFilter.PubsubTopic, ContentTopics: maps.Clone(s.ContentFilter.ContentTopics)},
|
||||
C: make(chan *protocol.Envelope),
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (sub *SubscriptionsMap) clear() {
|
||||
for _, peerSubscription := range sub.Items {
|
||||
for _, peerSubscription := range sub.items {
|
||||
for _, subscriptionSet := range peerSubscription.SubsPerPubsubTopic {
|
||||
for _, subscription := range subscriptionSet {
|
||||
subscription.CloseC()
|
||||
|
@ -193,7 +158,7 @@ func (sub *SubscriptionsMap) clear() {
|
|||
}
|
||||
}
|
||||
|
||||
sub.Items = make(map[peer.ID]*PeerSubscription)
|
||||
sub.items = make(map[peer.ID]*PeerSubscription)
|
||||
}
|
||||
|
||||
func (sub *SubscriptionsMap) Clear() {
|
||||
|
@ -206,7 +171,7 @@ func (sub *SubscriptionsMap) Notify(peerID peer.ID, envelope *protocol.Envelope)
|
|||
sub.RLock()
|
||||
defer sub.RUnlock()
|
||||
|
||||
subscriptions, ok := sub.Items[peerID].SubsPerPubsubTopic[envelope.PubsubTopic()]
|
||||
subscriptions, ok := sub.items[peerID].SubsPerPubsubTopic[envelope.PubsubTopic()]
|
||||
if ok {
|
||||
iterateSubscriptionSet(sub.logger, subscriptions, envelope)
|
||||
}
|
||||
|
@ -234,21 +199,21 @@ func iterateSubscriptionSet(logger *zap.Logger, subscriptions SubscriptionSet, e
|
|||
}
|
||||
}
|
||||
|
||||
func (s *SubscriptionDetails) MarshalJSON() ([]byte, error) {
|
||||
type resultType struct {
|
||||
PeerID string `json:"peerID"`
|
||||
PubsubTopic string `json:"pubsubTopic"`
|
||||
ContentTopics []string `json:"contentTopics"`
|
||||
}
|
||||
func (m *SubscriptionsMap) GetSubscription(peerID peer.ID, contentFilter protocol.ContentFilter) []*SubscriptionDetails {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
|
||||
result := resultType{
|
||||
PeerID: s.PeerID.Pretty(),
|
||||
PubsubTopic: s.ContentFilter.PubsubTopic,
|
||||
var output []*SubscriptionDetails
|
||||
for _, peerSubs := range m.items {
|
||||
if peerID == "" || peerSubs.PeerID == peerID {
|
||||
for _, subs := range peerSubs.SubsPerPubsubTopic {
|
||||
for _, subscriptionDetail := range subs {
|
||||
if subscriptionDetail.isPartOf(contentFilter) {
|
||||
output = append(output, subscriptionDetail)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for c := range s.ContentFilter.ContentTopics {
|
||||
result.ContentTopics = append(result.ContentTopics, c)
|
||||
}
|
||||
|
||||
return json.Marshal(result)
|
||||
return output
|
||||
}
|
||||
|
|
|
@ -8,9 +8,9 @@ import (
|
|||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
rvs "github.com/waku-org/go-libp2p-rendezvous"
|
||||
"github.com/waku-org/go-waku/waku/v2/peermanager"
|
||||
"github.com/waku-org/go-waku/waku/v2/peerstore"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/service"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -31,12 +31,12 @@ type Rendezvous struct {
|
|||
peerConnector PeerConnector
|
||||
|
||||
log *zap.Logger
|
||||
*peermanager.CommonDiscoveryService
|
||||
*service.CommonDiscoveryService
|
||||
}
|
||||
|
||||
// PeerConnector will subscribe to a channel containing the information for all peers found by this discovery protocol
|
||||
type PeerConnector interface {
|
||||
Subscribe(context.Context, <-chan peermanager.PeerData)
|
||||
Subscribe(context.Context, <-chan service.PeerData)
|
||||
}
|
||||
|
||||
// NewRendezvous creates an instance of Rendezvous struct
|
||||
|
@ -46,7 +46,7 @@ func NewRendezvous(db *DB, peerConnector PeerConnector, log *zap.Logger) *Rendez
|
|||
db: db,
|
||||
peerConnector: peerConnector,
|
||||
log: logger,
|
||||
CommonDiscoveryService: peermanager.NewCommonDiscoveryService(),
|
||||
CommonDiscoveryService: service.NewCommonDiscoveryService(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -104,10 +104,10 @@ func (r *Rendezvous) DiscoverWithNamespace(ctx context.Context, namespace string
|
|||
rp.SetSuccess(cookie)
|
||||
|
||||
for _, p := range addrInfo {
|
||||
peer := peermanager.PeerData{
|
||||
peer := service.PeerData{
|
||||
Origin: peerstore.Rendezvous,
|
||||
AddrInfo: p,
|
||||
PubSubTopics: []string{namespace},
|
||||
PubsubTopics: []string{namespace},
|
||||
}
|
||||
if !r.PushToChan(peer) {
|
||||
r.log.Error("could push to closed channel/context completed")
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
package peermanager
|
||||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
@ -7,7 +7,6 @@ import (
|
|||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
wps "github.com/waku-org/go-waku/waku/v2/peerstore"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
)
|
||||
|
||||
// PeerData contains information about a peer useful in establishing connections with it.
|
||||
|
@ -15,17 +14,17 @@ type PeerData struct {
|
|||
Origin wps.Origin
|
||||
AddrInfo peer.AddrInfo
|
||||
ENR *enode.Node
|
||||
PubSubTopics []string
|
||||
PubsubTopics []string
|
||||
}
|
||||
|
||||
type CommonDiscoveryService struct {
|
||||
commonService *protocol.CommonService
|
||||
commonService *CommonService
|
||||
channel chan PeerData
|
||||
}
|
||||
|
||||
func NewCommonDiscoveryService() *CommonDiscoveryService {
|
||||
return &CommonDiscoveryService{
|
||||
commonService: protocol.NewCommonService(),
|
||||
commonService: NewCommonService(),
|
||||
}
|
||||
}
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
package protocol
|
||||
package service
|
||||
|
||||
import (
|
||||
"context"
|
|
@ -9,6 +9,7 @@ import (
|
|||
)
|
||||
|
||||
var log *zap.Logger
|
||||
var messageLoggers map[string]*zap.Logger
|
||||
|
||||
// Logger creates a zap.Logger with some reasonable defaults
|
||||
func Logger() *zap.Logger {
|
||||
|
@ -18,6 +19,20 @@ func Logger() *zap.Logger {
|
|||
return log
|
||||
}
|
||||
|
||||
// MessagesLogger returns a logger used for debug logging of receivent/sent messages
|
||||
func MessagesLogger(prefix string) *zap.Logger {
|
||||
if messageLoggers == nil {
|
||||
messageLoggers = make(map[string]*zap.Logger)
|
||||
}
|
||||
logger := messageLoggers[prefix]
|
||||
if logger == nil {
|
||||
logger = logging.Logger(prefix + ".messages").Desugar()
|
||||
messageLoggers[prefix] = logger
|
||||
}
|
||||
|
||||
return logger
|
||||
}
|
||||
|
||||
// InitLogger initializes a global logger using an specific encoding
|
||||
func InitLogger(encoding string, output string) {
|
||||
cfg := logging.GetConfig()
|
||||
|
@ -50,10 +65,12 @@ func InitLogger(encoding string, output string) {
|
|||
cfg.File = "./waku.log"
|
||||
}
|
||||
}
|
||||
if cfg.Level == logging.LevelError {
|
||||
// Override default level setting
|
||||
cfg.Level = logging.LevelInfo
|
||||
}
|
||||
|
||||
logging.SetupLogging(cfg)
|
||||
|
||||
log = logging.Logger("gowaku").Desugar()
|
||||
|
||||
logging.SetAllLoggers(logging.LevelInfo)
|
||||
}
|
||||
|
|
|
@ -2,11 +2,13 @@ package utils
|
|||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// GetUnixEpochFrom converts a time into a unix timestamp with nanoseconds
|
||||
func GetUnixEpochFrom(now time.Time) int64 {
|
||||
return now.UnixNano()
|
||||
func GetUnixEpochFrom(now time.Time) *int64 {
|
||||
return proto.Int64(now.UnixNano())
|
||||
}
|
||||
|
||||
type Timesource interface {
|
||||
|
@ -16,7 +18,7 @@ type Timesource interface {
|
|||
// GetUnixEpoch returns the current time in unix timestamp with the integer part
|
||||
// representing seconds and the decimal part representing subseconds.
|
||||
// Optionally receives a timesource to obtain the time from
|
||||
func GetUnixEpoch(timesource ...Timesource) int64 {
|
||||
func GetUnixEpoch(timesource ...Timesource) *int64 {
|
||||
if len(timesource) != 0 {
|
||||
return GetUnixEpochFrom(timesource[0].Now())
|
||||
}
|
||||
|
|
|
@ -0,0 +1,665 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package protojson
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/protobuf/internal/encoding/json"
|
||||
"google.golang.org/protobuf/internal/encoding/messageset"
|
||||
"google.golang.org/protobuf/internal/errors"
|
||||
"google.golang.org/protobuf/internal/flags"
|
||||
"google.golang.org/protobuf/internal/genid"
|
||||
"google.golang.org/protobuf/internal/pragma"
|
||||
"google.golang.org/protobuf/internal/set"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
)
|
||||
|
||||
// Unmarshal reads the given []byte into the given proto.Message.
|
||||
// The provided message must be mutable (e.g., a non-nil pointer to a message).
|
||||
func Unmarshal(b []byte, m proto.Message) error {
|
||||
return UnmarshalOptions{}.Unmarshal(b, m)
|
||||
}
|
||||
|
||||
// UnmarshalOptions is a configurable JSON format parser.
|
||||
type UnmarshalOptions struct {
|
||||
pragma.NoUnkeyedLiterals
|
||||
|
||||
// If AllowPartial is set, input for messages that will result in missing
|
||||
// required fields will not return an error.
|
||||
AllowPartial bool
|
||||
|
||||
// If DiscardUnknown is set, unknown fields are ignored.
|
||||
DiscardUnknown bool
|
||||
|
||||
// Resolver is used for looking up types when unmarshaling
|
||||
// google.protobuf.Any messages or extension fields.
|
||||
// If nil, this defaults to using protoregistry.GlobalTypes.
|
||||
Resolver interface {
|
||||
protoregistry.MessageTypeResolver
|
||||
protoregistry.ExtensionTypeResolver
|
||||
}
|
||||
}
|
||||
|
||||
// Unmarshal reads the given []byte and populates the given proto.Message
|
||||
// using options in the UnmarshalOptions object.
|
||||
// It will clear the message first before setting the fields.
|
||||
// If it returns an error, the given message may be partially set.
|
||||
// The provided message must be mutable (e.g., a non-nil pointer to a message).
|
||||
func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error {
|
||||
return o.unmarshal(b, m)
|
||||
}
|
||||
|
||||
// unmarshal is a centralized function that all unmarshal operations go through.
|
||||
// For profiling purposes, avoid changing the name of this function or
|
||||
// introducing other code paths for unmarshal that do not go through this.
|
||||
func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error {
|
||||
proto.Reset(m)
|
||||
|
||||
if o.Resolver == nil {
|
||||
o.Resolver = protoregistry.GlobalTypes
|
||||
}
|
||||
|
||||
dec := decoder{json.NewDecoder(b), o}
|
||||
if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check for EOF.
|
||||
tok, err := dec.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if tok.Kind() != json.EOF {
|
||||
return dec.unexpectedTokenError(tok)
|
||||
}
|
||||
|
||||
if o.AllowPartial {
|
||||
return nil
|
||||
}
|
||||
return proto.CheckInitialized(m)
|
||||
}
|
||||
|
||||
type decoder struct {
|
||||
*json.Decoder
|
||||
opts UnmarshalOptions
|
||||
}
|
||||
|
||||
// newError returns an error object with position info.
|
||||
func (d decoder) newError(pos int, f string, x ...interface{}) error {
|
||||
line, column := d.Position(pos)
|
||||
head := fmt.Sprintf("(line %d:%d): ", line, column)
|
||||
return errors.New(head+f, x...)
|
||||
}
|
||||
|
||||
// unexpectedTokenError returns a syntax error for the given unexpected token.
|
||||
func (d decoder) unexpectedTokenError(tok json.Token) error {
|
||||
return d.syntaxError(tok.Pos(), "unexpected token %s", tok.RawString())
|
||||
}
|
||||
|
||||
// syntaxError returns a syntax error for given position.
|
||||
func (d decoder) syntaxError(pos int, f string, x ...interface{}) error {
|
||||
line, column := d.Position(pos)
|
||||
head := fmt.Sprintf("syntax error (line %d:%d): ", line, column)
|
||||
return errors.New(head+f, x...)
|
||||
}
|
||||
|
||||
// unmarshalMessage unmarshals a message into the given protoreflect.Message.
|
||||
func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) error {
|
||||
if unmarshal := wellKnownTypeUnmarshaler(m.Descriptor().FullName()); unmarshal != nil {
|
||||
return unmarshal(d, m)
|
||||
}
|
||||
|
||||
tok, err := d.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if tok.Kind() != json.ObjectOpen {
|
||||
return d.unexpectedTokenError(tok)
|
||||
}
|
||||
|
||||
messageDesc := m.Descriptor()
|
||||
if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) {
|
||||
return errors.New("no support for proto1 MessageSets")
|
||||
}
|
||||
|
||||
var seenNums set.Ints
|
||||
var seenOneofs set.Ints
|
||||
fieldDescs := messageDesc.Fields()
|
||||
for {
|
||||
// Read field name.
|
||||
tok, err := d.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch tok.Kind() {
|
||||
default:
|
||||
return d.unexpectedTokenError(tok)
|
||||
case json.ObjectClose:
|
||||
return nil
|
||||
case json.Name:
|
||||
// Continue below.
|
||||
}
|
||||
|
||||
name := tok.Name()
|
||||
// Unmarshaling a non-custom embedded message in Any will contain the
|
||||
// JSON field "@type" which should be skipped because it is not a field
|
||||
// of the embedded message, but simply an artifact of the Any format.
|
||||
if skipTypeURL && name == "@type" {
|
||||
d.Read()
|
||||
continue
|
||||
}
|
||||
|
||||
// Get the FieldDescriptor.
|
||||
var fd protoreflect.FieldDescriptor
|
||||
if strings.HasPrefix(name, "[") && strings.HasSuffix(name, "]") {
|
||||
// Only extension names are in [name] format.
|
||||
extName := protoreflect.FullName(name[1 : len(name)-1])
|
||||
extType, err := d.opts.Resolver.FindExtensionByName(extName)
|
||||
if err != nil && err != protoregistry.NotFound {
|
||||
return d.newError(tok.Pos(), "unable to resolve %s: %v", tok.RawString(), err)
|
||||
}
|
||||
if extType != nil {
|
||||
fd = extType.TypeDescriptor()
|
||||
if !messageDesc.ExtensionRanges().Has(fd.Number()) || fd.ContainingMessage().FullName() != messageDesc.FullName() {
|
||||
return d.newError(tok.Pos(), "message %v cannot be extended by %v", messageDesc.FullName(), fd.FullName())
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// The name can either be the JSON name or the proto field name.
|
||||
fd = fieldDescs.ByJSONName(name)
|
||||
if fd == nil {
|
||||
fd = fieldDescs.ByTextName(name)
|
||||
}
|
||||
}
|
||||
if flags.ProtoLegacy {
|
||||
if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() {
|
||||
fd = nil // reset since the weak reference is not linked in
|
||||
}
|
||||
}
|
||||
|
||||
if fd == nil {
|
||||
// Field is unknown.
|
||||
if d.opts.DiscardUnknown {
|
||||
if err := d.skipJSONValue(); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
return d.newError(tok.Pos(), "unknown field %v", tok.RawString())
|
||||
}
|
||||
|
||||
// Do not allow duplicate fields.
|
||||
num := uint64(fd.Number())
|
||||
if seenNums.Has(num) {
|
||||
return d.newError(tok.Pos(), "duplicate field %v", tok.RawString())
|
||||
}
|
||||
seenNums.Set(num)
|
||||
|
||||
// No need to set values for JSON null unless the field type is
|
||||
// google.protobuf.Value or google.protobuf.NullValue.
|
||||
if tok, _ := d.Peek(); tok.Kind() == json.Null && !isKnownValue(fd) && !isNullValue(fd) {
|
||||
d.Read()
|
||||
continue
|
||||
}
|
||||
|
||||
switch {
|
||||
case fd.IsList():
|
||||
list := m.Mutable(fd).List()
|
||||
if err := d.unmarshalList(list, fd); err != nil {
|
||||
return err
|
||||
}
|
||||
case fd.IsMap():
|
||||
mmap := m.Mutable(fd).Map()
|
||||
if err := d.unmarshalMap(mmap, fd); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
// If field is a oneof, check if it has already been set.
|
||||
if od := fd.ContainingOneof(); od != nil {
|
||||
idx := uint64(od.Index())
|
||||
if seenOneofs.Has(idx) {
|
||||
return d.newError(tok.Pos(), "error parsing %s, oneof %v is already set", tok.RawString(), od.FullName())
|
||||
}
|
||||
seenOneofs.Set(idx)
|
||||
}
|
||||
|
||||
// Required or optional fields.
|
||||
if err := d.unmarshalSingular(m, fd); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func isKnownValue(fd protoreflect.FieldDescriptor) bool {
|
||||
md := fd.Message()
|
||||
return md != nil && md.FullName() == genid.Value_message_fullname
|
||||
}
|
||||
|
||||
func isNullValue(fd protoreflect.FieldDescriptor) bool {
|
||||
ed := fd.Enum()
|
||||
return ed != nil && ed.FullName() == genid.NullValue_enum_fullname
|
||||
}
|
||||
|
||||
// unmarshalSingular unmarshals to the non-repeated field specified
|
||||
// by the given FieldDescriptor.
|
||||
func (d decoder) unmarshalSingular(m protoreflect.Message, fd protoreflect.FieldDescriptor) error {
|
||||
var val protoreflect.Value
|
||||
var err error
|
||||
switch fd.Kind() {
|
||||
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||
val = m.NewField(fd)
|
||||
err = d.unmarshalMessage(val.Message(), false)
|
||||
default:
|
||||
val, err = d.unmarshalScalar(fd)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.Set(fd, val)
|
||||
return nil
|
||||
}
|
||||
|
||||
// unmarshalScalar unmarshals to a scalar/enum protoreflect.Value specified by
|
||||
// the given FieldDescriptor.
|
||||
func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
|
||||
const b32 int = 32
|
||||
const b64 int = 64
|
||||
|
||||
tok, err := d.Read()
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, err
|
||||
}
|
||||
|
||||
kind := fd.Kind()
|
||||
switch kind {
|
||||
case protoreflect.BoolKind:
|
||||
if tok.Kind() == json.Bool {
|
||||
return protoreflect.ValueOfBool(tok.Bool()), nil
|
||||
}
|
||||
|
||||
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
|
||||
if v, ok := unmarshalInt(tok, b32); ok {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
|
||||
if v, ok := unmarshalInt(tok, b64); ok {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
|
||||
if v, ok := unmarshalUint(tok, b32); ok {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
|
||||
if v, ok := unmarshalUint(tok, b64); ok {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
case protoreflect.FloatKind:
|
||||
if v, ok := unmarshalFloat(tok, b32); ok {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
case protoreflect.DoubleKind:
|
||||
if v, ok := unmarshalFloat(tok, b64); ok {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
case protoreflect.StringKind:
|
||||
if tok.Kind() == json.String {
|
||||
return protoreflect.ValueOfString(tok.ParsedString()), nil
|
||||
}
|
||||
|
||||
case protoreflect.BytesKind:
|
||||
if v, ok := unmarshalBytes(tok); ok {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
case protoreflect.EnumKind:
|
||||
if v, ok := unmarshalEnum(tok, fd); ok {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
default:
|
||||
panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind))
|
||||
}
|
||||
|
||||
return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString())
|
||||
}
|
||||
|
||||
func unmarshalInt(tok json.Token, bitSize int) (protoreflect.Value, bool) {
|
||||
switch tok.Kind() {
|
||||
case json.Number:
|
||||
return getInt(tok, bitSize)
|
||||
|
||||
case json.String:
|
||||
// Decode number from string.
|
||||
s := strings.TrimSpace(tok.ParsedString())
|
||||
if len(s) != len(tok.ParsedString()) {
|
||||
return protoreflect.Value{}, false
|
||||
}
|
||||
dec := json.NewDecoder([]byte(s))
|
||||
tok, err := dec.Read()
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, false
|
||||
}
|
||||
return getInt(tok, bitSize)
|
||||
}
|
||||
return protoreflect.Value{}, false
|
||||
}
|
||||
|
||||
func getInt(tok json.Token, bitSize int) (protoreflect.Value, bool) {
|
||||
n, ok := tok.Int(bitSize)
|
||||
if !ok {
|
||||
return protoreflect.Value{}, false
|
||||
}
|
||||
if bitSize == 32 {
|
||||
return protoreflect.ValueOfInt32(int32(n)), true
|
||||
}
|
||||
return protoreflect.ValueOfInt64(n), true
|
||||
}
|
||||
|
||||
func unmarshalUint(tok json.Token, bitSize int) (protoreflect.Value, bool) {
|
||||
switch tok.Kind() {
|
||||
case json.Number:
|
||||
return getUint(tok, bitSize)
|
||||
|
||||
case json.String:
|
||||
// Decode number from string.
|
||||
s := strings.TrimSpace(tok.ParsedString())
|
||||
if len(s) != len(tok.ParsedString()) {
|
||||
return protoreflect.Value{}, false
|
||||
}
|
||||
dec := json.NewDecoder([]byte(s))
|
||||
tok, err := dec.Read()
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, false
|
||||
}
|
||||
return getUint(tok, bitSize)
|
||||
}
|
||||
return protoreflect.Value{}, false
|
||||
}
|
||||
|
||||
func getUint(tok json.Token, bitSize int) (protoreflect.Value, bool) {
|
||||
n, ok := tok.Uint(bitSize)
|
||||
if !ok {
|
||||
return protoreflect.Value{}, false
|
||||
}
|
||||
if bitSize == 32 {
|
||||
return protoreflect.ValueOfUint32(uint32(n)), true
|
||||
}
|
||||
return protoreflect.ValueOfUint64(n), true
|
||||
}
|
||||
|
||||
func unmarshalFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) {
|
||||
switch tok.Kind() {
|
||||
case json.Number:
|
||||
return getFloat(tok, bitSize)
|
||||
|
||||
case json.String:
|
||||
s := tok.ParsedString()
|
||||
switch s {
|
||||
case "NaN":
|
||||
if bitSize == 32 {
|
||||
return protoreflect.ValueOfFloat32(float32(math.NaN())), true
|
||||
}
|
||||
return protoreflect.ValueOfFloat64(math.NaN()), true
|
||||
case "Infinity":
|
||||
if bitSize == 32 {
|
||||
return protoreflect.ValueOfFloat32(float32(math.Inf(+1))), true
|
||||
}
|
||||
return protoreflect.ValueOfFloat64(math.Inf(+1)), true
|
||||
case "-Infinity":
|
||||
if bitSize == 32 {
|
||||
return protoreflect.ValueOfFloat32(float32(math.Inf(-1))), true
|
||||
}
|
||||
return protoreflect.ValueOfFloat64(math.Inf(-1)), true
|
||||
}
|
||||
|
||||
// Decode number from string.
|
||||
if len(s) != len(strings.TrimSpace(s)) {
|
||||
return protoreflect.Value{}, false
|
||||
}
|
||||
dec := json.NewDecoder([]byte(s))
|
||||
tok, err := dec.Read()
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, false
|
||||
}
|
||||
return getFloat(tok, bitSize)
|
||||
}
|
||||
return protoreflect.Value{}, false
|
||||
}
|
||||
|
||||
func getFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) {
|
||||
n, ok := tok.Float(bitSize)
|
||||
if !ok {
|
||||
return protoreflect.Value{}, false
|
||||
}
|
||||
if bitSize == 32 {
|
||||
return protoreflect.ValueOfFloat32(float32(n)), true
|
||||
}
|
||||
return protoreflect.ValueOfFloat64(n), true
|
||||
}
|
||||
|
||||
func unmarshalBytes(tok json.Token) (protoreflect.Value, bool) {
|
||||
if tok.Kind() != json.String {
|
||||
return protoreflect.Value{}, false
|
||||
}
|
||||
|
||||
s := tok.ParsedString()
|
||||
enc := base64.StdEncoding
|
||||
if strings.ContainsAny(s, "-_") {
|
||||
enc = base64.URLEncoding
|
||||
}
|
||||
if len(s)%4 != 0 {
|
||||
enc = enc.WithPadding(base64.NoPadding)
|
||||
}
|
||||
b, err := enc.DecodeString(s)
|
||||
if err != nil {
|
||||
return protoreflect.Value{}, false
|
||||
}
|
||||
return protoreflect.ValueOfBytes(b), true
|
||||
}
|
||||
|
||||
func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.Value, bool) {
|
||||
switch tok.Kind() {
|
||||
case json.String:
|
||||
// Lookup EnumNumber based on name.
|
||||
s := tok.ParsedString()
|
||||
if enumVal := fd.Enum().Values().ByName(protoreflect.Name(s)); enumVal != nil {
|
||||
return protoreflect.ValueOfEnum(enumVal.Number()), true
|
||||
}
|
||||
|
||||
case json.Number:
|
||||
if n, ok := tok.Int(32); ok {
|
||||
return protoreflect.ValueOfEnum(protoreflect.EnumNumber(n)), true
|
||||
}
|
||||
|
||||
case json.Null:
|
||||
// This is only valid for google.protobuf.NullValue.
|
||||
if isNullValue(fd) {
|
||||
return protoreflect.ValueOfEnum(0), true
|
||||
}
|
||||
}
|
||||
|
||||
return protoreflect.Value{}, false
|
||||
}
|
||||
|
||||
func (d decoder) unmarshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error {
|
||||
tok, err := d.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if tok.Kind() != json.ArrayOpen {
|
||||
return d.unexpectedTokenError(tok)
|
||||
}
|
||||
|
||||
switch fd.Kind() {
|
||||
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||
for {
|
||||
tok, err := d.Peek()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if tok.Kind() == json.ArrayClose {
|
||||
d.Read()
|
||||
return nil
|
||||
}
|
||||
|
||||
val := list.NewElement()
|
||||
if err := d.unmarshalMessage(val.Message(), false); err != nil {
|
||||
return err
|
||||
}
|
||||
list.Append(val)
|
||||
}
|
||||
default:
|
||||
for {
|
||||
tok, err := d.Peek()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if tok.Kind() == json.ArrayClose {
|
||||
d.Read()
|
||||
return nil
|
||||
}
|
||||
|
||||
val, err := d.unmarshalScalar(fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
list.Append(val)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d decoder) unmarshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error {
|
||||
tok, err := d.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if tok.Kind() != json.ObjectOpen {
|
||||
return d.unexpectedTokenError(tok)
|
||||
}
|
||||
|
||||
// Determine ahead whether map entry is a scalar type or a message type in
|
||||
// order to call the appropriate unmarshalMapValue func inside the for loop
|
||||
// below.
|
||||
var unmarshalMapValue func() (protoreflect.Value, error)
|
||||
switch fd.MapValue().Kind() {
|
||||
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||
unmarshalMapValue = func() (protoreflect.Value, error) {
|
||||
val := mmap.NewValue()
|
||||
if err := d.unmarshalMessage(val.Message(), false); err != nil {
|
||||
return protoreflect.Value{}, err
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
default:
|
||||
unmarshalMapValue = func() (protoreflect.Value, error) {
|
||||
return d.unmarshalScalar(fd.MapValue())
|
||||
}
|
||||
}
|
||||
|
||||
Loop:
|
||||
for {
|
||||
// Read field name.
|
||||
tok, err := d.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch tok.Kind() {
|
||||
default:
|
||||
return d.unexpectedTokenError(tok)
|
||||
case json.ObjectClose:
|
||||
break Loop
|
||||
case json.Name:
|
||||
// Continue.
|
||||
}
|
||||
|
||||
// Unmarshal field name.
|
||||
pkey, err := d.unmarshalMapKey(tok, fd.MapKey())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check for duplicate field name.
|
||||
if mmap.Has(pkey) {
|
||||
return d.newError(tok.Pos(), "duplicate map key %v", tok.RawString())
|
||||
}
|
||||
|
||||
// Read and unmarshal field value.
|
||||
pval, err := unmarshalMapValue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mmap.Set(pkey, pval)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// unmarshalMapKey converts given token of Name kind into a protoreflect.MapKey.
|
||||
// A map key type is any integral or string type.
|
||||
func (d decoder) unmarshalMapKey(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.MapKey, error) {
|
||||
const b32 = 32
|
||||
const b64 = 64
|
||||
const base10 = 10
|
||||
|
||||
name := tok.Name()
|
||||
kind := fd.Kind()
|
||||
switch kind {
|
||||
case protoreflect.StringKind:
|
||||
return protoreflect.ValueOfString(name).MapKey(), nil
|
||||
|
||||
case protoreflect.BoolKind:
|
||||
switch name {
|
||||
case "true":
|
||||
return protoreflect.ValueOfBool(true).MapKey(), nil
|
||||
case "false":
|
||||
return protoreflect.ValueOfBool(false).MapKey(), nil
|
||||
}
|
||||
|
||||
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
|
||||
if n, err := strconv.ParseInt(name, base10, b32); err == nil {
|
||||
return protoreflect.ValueOfInt32(int32(n)).MapKey(), nil
|
||||
}
|
||||
|
||||
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
|
||||
if n, err := strconv.ParseInt(name, base10, b64); err == nil {
|
||||
return protoreflect.ValueOfInt64(int64(n)).MapKey(), nil
|
||||
}
|
||||
|
||||
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
|
||||
if n, err := strconv.ParseUint(name, base10, b32); err == nil {
|
||||
return protoreflect.ValueOfUint32(uint32(n)).MapKey(), nil
|
||||
}
|
||||
|
||||
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
|
||||
if n, err := strconv.ParseUint(name, base10, b64); err == nil {
|
||||
return protoreflect.ValueOfUint64(uint64(n)).MapKey(), nil
|
||||
}
|
||||
|
||||
default:
|
||||
panic(fmt.Sprintf("invalid kind for map key: %v", kind))
|
||||
}
|
||||
|
||||
return protoreflect.MapKey{}, d.newError(tok.Pos(), "invalid value for %v key: %s", kind, tok.RawString())
|
||||
}
|
|
@ -0,0 +1,11 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package protojson marshals and unmarshals protocol buffer messages as JSON
|
||||
// format. It follows the guide at
|
||||
// https://protobuf.dev/programming-guides/proto3#json.
|
||||
//
|
||||
// This package produces a different output than the standard "encoding/json"
|
||||
// package, which does not operate correctly on protocol buffer messages.
|
||||
package protojson
|
|
@ -0,0 +1,349 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package protojson
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/protobuf/internal/encoding/json"
|
||||
"google.golang.org/protobuf/internal/encoding/messageset"
|
||||
"google.golang.org/protobuf/internal/errors"
|
||||
"google.golang.org/protobuf/internal/filedesc"
|
||||
"google.golang.org/protobuf/internal/flags"
|
||||
"google.golang.org/protobuf/internal/genid"
|
||||
"google.golang.org/protobuf/internal/order"
|
||||
"google.golang.org/protobuf/internal/pragma"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
)
|
||||
|
||||
const defaultIndent = " "
|
||||
|
||||
// Format formats the message as a multiline string.
|
||||
// This function is only intended for human consumption and ignores errors.
|
||||
// Do not depend on the output being stable. It may change over time across
|
||||
// different versions of the program.
|
||||
func Format(m proto.Message) string {
|
||||
return MarshalOptions{Multiline: true}.Format(m)
|
||||
}
|
||||
|
||||
// Marshal writes the given proto.Message in JSON format using default options.
|
||||
// Do not depend on the output being stable. It may change over time across
|
||||
// different versions of the program.
|
||||
func Marshal(m proto.Message) ([]byte, error) {
|
||||
return MarshalOptions{}.Marshal(m)
|
||||
}
|
||||
|
||||
// MarshalOptions is a configurable JSON format marshaler.
|
||||
type MarshalOptions struct {
|
||||
pragma.NoUnkeyedLiterals
|
||||
|
||||
// Multiline specifies whether the marshaler should format the output in
|
||||
// indented-form with every textual element on a new line.
|
||||
// If Indent is an empty string, then an arbitrary indent is chosen.
|
||||
Multiline bool
|
||||
|
||||
// Indent specifies the set of indentation characters to use in a multiline
|
||||
// formatted output such that every entry is preceded by Indent and
|
||||
// terminated by a newline. If non-empty, then Multiline is treated as true.
|
||||
// Indent can only be composed of space or tab characters.
|
||||
Indent string
|
||||
|
||||
// AllowPartial allows messages that have missing required fields to marshal
|
||||
// without returning an error. If AllowPartial is false (the default),
|
||||
// Marshal will return error if there are any missing required fields.
|
||||
AllowPartial bool
|
||||
|
||||
// UseProtoNames uses proto field name instead of lowerCamelCase name in JSON
|
||||
// field names.
|
||||
UseProtoNames bool
|
||||
|
||||
// UseEnumNumbers emits enum values as numbers.
|
||||
UseEnumNumbers bool
|
||||
|
||||
// EmitUnpopulated specifies whether to emit unpopulated fields. It does not
|
||||
// emit unpopulated oneof fields or unpopulated extension fields.
|
||||
// The JSON value emitted for unpopulated fields are as follows:
|
||||
// ╔═══════╤════════════════════════════╗
|
||||
// ║ JSON │ Protobuf field ║
|
||||
// ╠═══════╪════════════════════════════╣
|
||||
// ║ false │ proto3 boolean fields ║
|
||||
// ║ 0 │ proto3 numeric fields ║
|
||||
// ║ "" │ proto3 string/bytes fields ║
|
||||
// ║ null │ proto2 scalar fields ║
|
||||
// ║ null │ message fields ║
|
||||
// ║ [] │ list fields ║
|
||||
// ║ {} │ map fields ║
|
||||
// ╚═══════╧════════════════════════════╝
|
||||
EmitUnpopulated bool
|
||||
|
||||
// Resolver is used for looking up types when expanding google.protobuf.Any
|
||||
// messages. If nil, this defaults to using protoregistry.GlobalTypes.
|
||||
Resolver interface {
|
||||
protoregistry.ExtensionTypeResolver
|
||||
protoregistry.MessageTypeResolver
|
||||
}
|
||||
}
|
||||
|
||||
// Format formats the message as a string.
|
||||
// This method is only intended for human consumption and ignores errors.
|
||||
// Do not depend on the output being stable. It may change over time across
|
||||
// different versions of the program.
|
||||
func (o MarshalOptions) Format(m proto.Message) string {
|
||||
if m == nil || !m.ProtoReflect().IsValid() {
|
||||
return "<nil>" // invalid syntax, but okay since this is for debugging
|
||||
}
|
||||
o.AllowPartial = true
|
||||
b, _ := o.Marshal(m)
|
||||
return string(b)
|
||||
}
|
||||
|
||||
// Marshal marshals the given proto.Message in the JSON format using options in
|
||||
// MarshalOptions. Do not depend on the output being stable. It may change over
|
||||
// time across different versions of the program.
|
||||
func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) {
|
||||
return o.marshal(nil, m)
|
||||
}
|
||||
|
||||
// MarshalAppend appends the JSON format encoding of m to b,
|
||||
// returning the result.
|
||||
func (o MarshalOptions) MarshalAppend(b []byte, m proto.Message) ([]byte, error) {
|
||||
return o.marshal(b, m)
|
||||
}
|
||||
|
||||
// marshal is a centralized function that all marshal operations go through.
|
||||
// For profiling purposes, avoid changing the name of this function or
|
||||
// introducing other code paths for marshal that do not go through this.
|
||||
func (o MarshalOptions) marshal(b []byte, m proto.Message) ([]byte, error) {
|
||||
if o.Multiline && o.Indent == "" {
|
||||
o.Indent = defaultIndent
|
||||
}
|
||||
if o.Resolver == nil {
|
||||
o.Resolver = protoregistry.GlobalTypes
|
||||
}
|
||||
|
||||
internalEnc, err := json.NewEncoder(b, o.Indent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Treat nil message interface as an empty message,
|
||||
// in which case the output in an empty JSON object.
|
||||
if m == nil {
|
||||
return append(b, '{', '}'), nil
|
||||
}
|
||||
|
||||
enc := encoder{internalEnc, o}
|
||||
if err := enc.marshalMessage(m.ProtoReflect(), ""); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if o.AllowPartial {
|
||||
return enc.Bytes(), nil
|
||||
}
|
||||
return enc.Bytes(), proto.CheckInitialized(m)
|
||||
}
|
||||
|
||||
type encoder struct {
|
||||
*json.Encoder
|
||||
opts MarshalOptions
|
||||
}
|
||||
|
||||
// typeFieldDesc is a synthetic field descriptor used for the "@type" field.
|
||||
var typeFieldDesc = func() protoreflect.FieldDescriptor {
|
||||
var fd filedesc.Field
|
||||
fd.L0.FullName = "@type"
|
||||
fd.L0.Index = -1
|
||||
fd.L1.Cardinality = protoreflect.Optional
|
||||
fd.L1.Kind = protoreflect.StringKind
|
||||
return &fd
|
||||
}()
|
||||
|
||||
// typeURLFieldRanger wraps a protoreflect.Message and modifies its Range method
|
||||
// to additionally iterate over a synthetic field for the type URL.
|
||||
type typeURLFieldRanger struct {
|
||||
order.FieldRanger
|
||||
typeURL string
|
||||
}
|
||||
|
||||
func (m typeURLFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
|
||||
if !f(typeFieldDesc, protoreflect.ValueOfString(m.typeURL)) {
|
||||
return
|
||||
}
|
||||
m.FieldRanger.Range(f)
|
||||
}
|
||||
|
||||
// unpopulatedFieldRanger wraps a protoreflect.Message and modifies its Range
|
||||
// method to additionally iterate over unpopulated fields.
|
||||
type unpopulatedFieldRanger struct{ protoreflect.Message }
|
||||
|
||||
func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
|
||||
fds := m.Descriptor().Fields()
|
||||
for i := 0; i < fds.Len(); i++ {
|
||||
fd := fds.Get(i)
|
||||
if m.Has(fd) || fd.ContainingOneof() != nil {
|
||||
continue // ignore populated fields and fields within a oneofs
|
||||
}
|
||||
|
||||
v := m.Get(fd)
|
||||
isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid()
|
||||
isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil
|
||||
if isProto2Scalar || isSingularMessage {
|
||||
v = protoreflect.Value{} // use invalid value to emit null
|
||||
}
|
||||
if !f(fd, v) {
|
||||
return
|
||||
}
|
||||
}
|
||||
m.Message.Range(f)
|
||||
}
|
||||
|
||||
// marshalMessage marshals the fields in the given protoreflect.Message.
|
||||
// If the typeURL is non-empty, then a synthetic "@type" field is injected
|
||||
// containing the URL as the value.
|
||||
func (e encoder) marshalMessage(m protoreflect.Message, typeURL string) error {
|
||||
if !flags.ProtoLegacy && messageset.IsMessageSet(m.Descriptor()) {
|
||||
return errors.New("no support for proto1 MessageSets")
|
||||
}
|
||||
|
||||
if marshal := wellKnownTypeMarshaler(m.Descriptor().FullName()); marshal != nil {
|
||||
return marshal(e, m)
|
||||
}
|
||||
|
||||
e.StartObject()
|
||||
defer e.EndObject()
|
||||
|
||||
var fields order.FieldRanger = m
|
||||
if e.opts.EmitUnpopulated {
|
||||
fields = unpopulatedFieldRanger{m}
|
||||
}
|
||||
if typeURL != "" {
|
||||
fields = typeURLFieldRanger{fields, typeURL}
|
||||
}
|
||||
|
||||
var err error
|
||||
order.RangeFields(fields, order.IndexNameFieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
||||
name := fd.JSONName()
|
||||
if e.opts.UseProtoNames {
|
||||
name = fd.TextName()
|
||||
}
|
||||
|
||||
if err = e.WriteName(name); err != nil {
|
||||
return false
|
||||
}
|
||||
if err = e.marshalValue(v, fd); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// marshalValue marshals the given protoreflect.Value.
|
||||
func (e encoder) marshalValue(val protoreflect.Value, fd protoreflect.FieldDescriptor) error {
|
||||
switch {
|
||||
case fd.IsList():
|
||||
return e.marshalList(val.List(), fd)
|
||||
case fd.IsMap():
|
||||
return e.marshalMap(val.Map(), fd)
|
||||
default:
|
||||
return e.marshalSingular(val, fd)
|
||||
}
|
||||
}
|
||||
|
||||
// marshalSingular marshals the given non-repeated field value. This includes
|
||||
// all scalar types, enums, messages, and groups.
|
||||
func (e encoder) marshalSingular(val protoreflect.Value, fd protoreflect.FieldDescriptor) error {
|
||||
if !val.IsValid() {
|
||||
e.WriteNull()
|
||||
return nil
|
||||
}
|
||||
|
||||
switch kind := fd.Kind(); kind {
|
||||
case protoreflect.BoolKind:
|
||||
e.WriteBool(val.Bool())
|
||||
|
||||
case protoreflect.StringKind:
|
||||
if e.WriteString(val.String()) != nil {
|
||||
return errors.InvalidUTF8(string(fd.FullName()))
|
||||
}
|
||||
|
||||
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
|
||||
e.WriteInt(val.Int())
|
||||
|
||||
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
|
||||
e.WriteUint(val.Uint())
|
||||
|
||||
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Uint64Kind,
|
||||
protoreflect.Sfixed64Kind, protoreflect.Fixed64Kind:
|
||||
// 64-bit integers are written out as JSON string.
|
||||
e.WriteString(val.String())
|
||||
|
||||
case protoreflect.FloatKind:
|
||||
// Encoder.WriteFloat handles the special numbers NaN and infinites.
|
||||
e.WriteFloat(val.Float(), 32)
|
||||
|
||||
case protoreflect.DoubleKind:
|
||||
// Encoder.WriteFloat handles the special numbers NaN and infinites.
|
||||
e.WriteFloat(val.Float(), 64)
|
||||
|
||||
case protoreflect.BytesKind:
|
||||
e.WriteString(base64.StdEncoding.EncodeToString(val.Bytes()))
|
||||
|
||||
case protoreflect.EnumKind:
|
||||
if fd.Enum().FullName() == genid.NullValue_enum_fullname {
|
||||
e.WriteNull()
|
||||
} else {
|
||||
desc := fd.Enum().Values().ByNumber(val.Enum())
|
||||
if e.opts.UseEnumNumbers || desc == nil {
|
||||
e.WriteInt(int64(val.Enum()))
|
||||
} else {
|
||||
e.WriteString(string(desc.Name()))
|
||||
}
|
||||
}
|
||||
|
||||
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||
if err := e.marshalMessage(val.Message(), ""); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
default:
|
||||
panic(fmt.Sprintf("%v has unknown kind: %v", fd.FullName(), kind))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// marshalList marshals the given protoreflect.List.
|
||||
func (e encoder) marshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error {
|
||||
e.StartArray()
|
||||
defer e.EndArray()
|
||||
|
||||
for i := 0; i < list.Len(); i++ {
|
||||
item := list.Get(i)
|
||||
if err := e.marshalSingular(item, fd); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// marshalMap marshals given protoreflect.Map.
|
||||
func (e encoder) marshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error {
|
||||
e.StartObject()
|
||||
defer e.EndObject()
|
||||
|
||||
var err error
|
||||
order.RangeEntries(mmap, order.GenericKeyOrder, func(k protoreflect.MapKey, v protoreflect.Value) bool {
|
||||
if err = e.WriteName(k.String()); err != nil {
|
||||
return false
|
||||
}
|
||||
if err = e.marshalSingular(v, fd.MapValue()); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
return err
|
||||
}
|
895
vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
generated
vendored
Normal file
895
vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
generated
vendored
Normal file
|
@ -0,0 +1,895 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package protojson
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"google.golang.org/protobuf/internal/encoding/json"
|
||||
"google.golang.org/protobuf/internal/errors"
|
||||
"google.golang.org/protobuf/internal/genid"
|
||||
"google.golang.org/protobuf/internal/strs"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
)
|
||||
|
||||
type marshalFunc func(encoder, protoreflect.Message) error
|
||||
|
||||
// wellKnownTypeMarshaler returns a marshal function if the message type
|
||||
// has specialized serialization behavior. It returns nil otherwise.
|
||||
func wellKnownTypeMarshaler(name protoreflect.FullName) marshalFunc {
|
||||
if name.Parent() == genid.GoogleProtobuf_package {
|
||||
switch name.Name() {
|
||||
case genid.Any_message_name:
|
||||
return encoder.marshalAny
|
||||
case genid.Timestamp_message_name:
|
||||
return encoder.marshalTimestamp
|
||||
case genid.Duration_message_name:
|
||||
return encoder.marshalDuration
|
||||
case genid.BoolValue_message_name,
|
||||
genid.Int32Value_message_name,
|
||||
genid.Int64Value_message_name,
|
||||
genid.UInt32Value_message_name,
|
||||
genid.UInt64Value_message_name,
|
||||
genid.FloatValue_message_name,
|
||||
genid.DoubleValue_message_name,
|
||||
genid.StringValue_message_name,
|
||||
genid.BytesValue_message_name:
|
||||
return encoder.marshalWrapperType
|
||||
case genid.Struct_message_name:
|
||||
return encoder.marshalStruct
|
||||
case genid.ListValue_message_name:
|
||||
return encoder.marshalListValue
|
||||
case genid.Value_message_name:
|
||||
return encoder.marshalKnownValue
|
||||
case genid.FieldMask_message_name:
|
||||
return encoder.marshalFieldMask
|
||||
case genid.Empty_message_name:
|
||||
return encoder.marshalEmpty
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type unmarshalFunc func(decoder, protoreflect.Message) error
|
||||
|
||||
// wellKnownTypeUnmarshaler returns a unmarshal function if the message type
|
||||
// has specialized serialization behavior. It returns nil otherwise.
|
||||
func wellKnownTypeUnmarshaler(name protoreflect.FullName) unmarshalFunc {
|
||||
if name.Parent() == genid.GoogleProtobuf_package {
|
||||
switch name.Name() {
|
||||
case genid.Any_message_name:
|
||||
return decoder.unmarshalAny
|
||||
case genid.Timestamp_message_name:
|
||||
return decoder.unmarshalTimestamp
|
||||
case genid.Duration_message_name:
|
||||
return decoder.unmarshalDuration
|
||||
case genid.BoolValue_message_name,
|
||||
genid.Int32Value_message_name,
|
||||
genid.Int64Value_message_name,
|
||||
genid.UInt32Value_message_name,
|
||||
genid.UInt64Value_message_name,
|
||||
genid.FloatValue_message_name,
|
||||
genid.DoubleValue_message_name,
|
||||
genid.StringValue_message_name,
|
||||
genid.BytesValue_message_name:
|
||||
return decoder.unmarshalWrapperType
|
||||
case genid.Struct_message_name:
|
||||
return decoder.unmarshalStruct
|
||||
case genid.ListValue_message_name:
|
||||
return decoder.unmarshalListValue
|
||||
case genid.Value_message_name:
|
||||
return decoder.unmarshalKnownValue
|
||||
case genid.FieldMask_message_name:
|
||||
return decoder.unmarshalFieldMask
|
||||
case genid.Empty_message_name:
|
||||
return decoder.unmarshalEmpty
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// The JSON representation of an Any message uses the regular representation of
|
||||
// the deserialized, embedded message, with an additional field `@type` which
|
||||
// contains the type URL. If the embedded message type is well-known and has a
|
||||
// custom JSON representation, that representation will be embedded adding a
|
||||
// field `value` which holds the custom JSON in addition to the `@type` field.
|
||||
|
||||
func (e encoder) marshalAny(m protoreflect.Message) error {
|
||||
fds := m.Descriptor().Fields()
|
||||
fdType := fds.ByNumber(genid.Any_TypeUrl_field_number)
|
||||
fdValue := fds.ByNumber(genid.Any_Value_field_number)
|
||||
|
||||
if !m.Has(fdType) {
|
||||
if !m.Has(fdValue) {
|
||||
// If message is empty, marshal out empty JSON object.
|
||||
e.StartObject()
|
||||
e.EndObject()
|
||||
return nil
|
||||
} else {
|
||||
// Return error if type_url field is not set, but value is set.
|
||||
return errors.New("%s: %v is not set", genid.Any_message_fullname, genid.Any_TypeUrl_field_name)
|
||||
}
|
||||
}
|
||||
|
||||
typeVal := m.Get(fdType)
|
||||
valueVal := m.Get(fdValue)
|
||||
|
||||
// Resolve the type in order to unmarshal value field.
|
||||
typeURL := typeVal.String()
|
||||
emt, err := e.opts.Resolver.FindMessageByURL(typeURL)
|
||||
if err != nil {
|
||||
return errors.New("%s: unable to resolve %q: %v", genid.Any_message_fullname, typeURL, err)
|
||||
}
|
||||
|
||||
em := emt.New()
|
||||
err = proto.UnmarshalOptions{
|
||||
AllowPartial: true, // never check required fields inside an Any
|
||||
Resolver: e.opts.Resolver,
|
||||
}.Unmarshal(valueVal.Bytes(), em.Interface())
|
||||
if err != nil {
|
||||
return errors.New("%s: unable to unmarshal %q: %v", genid.Any_message_fullname, typeURL, err)
|
||||
}
|
||||
|
||||
// If type of value has custom JSON encoding, marshal out a field "value"
|
||||
// with corresponding custom JSON encoding of the embedded message as a
|
||||
// field.
|
||||
if marshal := wellKnownTypeMarshaler(emt.Descriptor().FullName()); marshal != nil {
|
||||
e.StartObject()
|
||||
defer e.EndObject()
|
||||
|
||||
// Marshal out @type field.
|
||||
e.WriteName("@type")
|
||||
if err := e.WriteString(typeURL); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
e.WriteName("value")
|
||||
return marshal(e, em)
|
||||
}
|
||||
|
||||
// Else, marshal out the embedded message's fields in this Any object.
|
||||
if err := e.marshalMessage(em, typeURL); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d decoder) unmarshalAny(m protoreflect.Message) error {
|
||||
// Peek to check for json.ObjectOpen to avoid advancing a read.
|
||||
start, err := d.Peek()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if start.Kind() != json.ObjectOpen {
|
||||
return d.unexpectedTokenError(start)
|
||||
}
|
||||
|
||||
// Use another decoder to parse the unread bytes for @type field. This
|
||||
// avoids advancing a read from current decoder because the current JSON
|
||||
// object may contain the fields of the embedded type.
|
||||
dec := decoder{d.Clone(), UnmarshalOptions{}}
|
||||
tok, err := findTypeURL(dec)
|
||||
switch err {
|
||||
case errEmptyObject:
|
||||
// An empty JSON object translates to an empty Any message.
|
||||
d.Read() // Read json.ObjectOpen.
|
||||
d.Read() // Read json.ObjectClose.
|
||||
return nil
|
||||
|
||||
case errMissingType:
|
||||
if d.opts.DiscardUnknown {
|
||||
// Treat all fields as unknowns, similar to an empty object.
|
||||
return d.skipJSONValue()
|
||||
}
|
||||
// Use start.Pos() for line position.
|
||||
return d.newError(start.Pos(), err.Error())
|
||||
|
||||
default:
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
typeURL := tok.ParsedString()
|
||||
emt, err := d.opts.Resolver.FindMessageByURL(typeURL)
|
||||
if err != nil {
|
||||
return d.newError(tok.Pos(), "unable to resolve %v: %q", tok.RawString(), err)
|
||||
}
|
||||
|
||||
// Create new message for the embedded message type and unmarshal into it.
|
||||
em := emt.New()
|
||||
if unmarshal := wellKnownTypeUnmarshaler(emt.Descriptor().FullName()); unmarshal != nil {
|
||||
// If embedded message is a custom type,
|
||||
// unmarshal the JSON "value" field into it.
|
||||
if err := d.unmarshalAnyValue(unmarshal, em); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Else unmarshal the current JSON object into it.
|
||||
if err := d.unmarshalMessage(em, true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Serialize the embedded message and assign the resulting bytes to the
|
||||
// proto value field.
|
||||
b, err := proto.MarshalOptions{
|
||||
AllowPartial: true, // No need to check required fields inside an Any.
|
||||
Deterministic: true,
|
||||
}.Marshal(em.Interface())
|
||||
if err != nil {
|
||||
return d.newError(start.Pos(), "error in marshaling Any.value field: %v", err)
|
||||
}
|
||||
|
||||
fds := m.Descriptor().Fields()
|
||||
fdType := fds.ByNumber(genid.Any_TypeUrl_field_number)
|
||||
fdValue := fds.ByNumber(genid.Any_Value_field_number)
|
||||
|
||||
m.Set(fdType, protoreflect.ValueOfString(typeURL))
|
||||
m.Set(fdValue, protoreflect.ValueOfBytes(b))
|
||||
return nil
|
||||
}
|
||||
|
||||
var errEmptyObject = fmt.Errorf(`empty object`)
|
||||
var errMissingType = fmt.Errorf(`missing "@type" field`)
|
||||
|
||||
// findTypeURL returns the token for the "@type" field value from the given
|
||||
// JSON bytes. It is expected that the given bytes start with json.ObjectOpen.
|
||||
// It returns errEmptyObject if the JSON object is empty or errMissingType if
|
||||
// @type field does not exist. It returns other error if the @type field is not
|
||||
// valid or other decoding issues.
|
||||
func findTypeURL(d decoder) (json.Token, error) {
|
||||
var typeURL string
|
||||
var typeTok json.Token
|
||||
numFields := 0
|
||||
// Skip start object.
|
||||
d.Read()
|
||||
|
||||
Loop:
|
||||
for {
|
||||
tok, err := d.Read()
|
||||
if err != nil {
|
||||
return json.Token{}, err
|
||||
}
|
||||
|
||||
switch tok.Kind() {
|
||||
case json.ObjectClose:
|
||||
if typeURL == "" {
|
||||
// Did not find @type field.
|
||||
if numFields > 0 {
|
||||
return json.Token{}, errMissingType
|
||||
}
|
||||
return json.Token{}, errEmptyObject
|
||||
}
|
||||
break Loop
|
||||
|
||||
case json.Name:
|
||||
numFields++
|
||||
if tok.Name() != "@type" {
|
||||
// Skip value.
|
||||
if err := d.skipJSONValue(); err != nil {
|
||||
return json.Token{}, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Return error if this was previously set already.
|
||||
if typeURL != "" {
|
||||
return json.Token{}, d.newError(tok.Pos(), `duplicate "@type" field`)
|
||||
}
|
||||
// Read field value.
|
||||
tok, err := d.Read()
|
||||
if err != nil {
|
||||
return json.Token{}, err
|
||||
}
|
||||
if tok.Kind() != json.String {
|
||||
return json.Token{}, d.newError(tok.Pos(), `@type field value is not a string: %v`, tok.RawString())
|
||||
}
|
||||
typeURL = tok.ParsedString()
|
||||
if typeURL == "" {
|
||||
return json.Token{}, d.newError(tok.Pos(), `@type field contains empty value`)
|
||||
}
|
||||
typeTok = tok
|
||||
}
|
||||
}
|
||||
|
||||
return typeTok, nil
|
||||
}
|
||||
|
||||
// skipJSONValue parses a JSON value (null, boolean, string, number, object and
|
||||
// array) in order to advance the read to the next JSON value. It relies on
|
||||
// the decoder returning an error if the types are not in valid sequence.
|
||||
func (d decoder) skipJSONValue() error {
|
||||
tok, err := d.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Only need to continue reading for objects and arrays.
|
||||
switch tok.Kind() {
|
||||
case json.ObjectOpen:
|
||||
for {
|
||||
tok, err := d.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch tok.Kind() {
|
||||
case json.ObjectClose:
|
||||
return nil
|
||||
case json.Name:
|
||||
// Skip object field value.
|
||||
if err := d.skipJSONValue(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case json.ArrayOpen:
|
||||
for {
|
||||
tok, err := d.Peek()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch tok.Kind() {
|
||||
case json.ArrayClose:
|
||||
d.Read()
|
||||
return nil
|
||||
default:
|
||||
// Skip array item.
|
||||
if err := d.skipJSONValue(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// unmarshalAnyValue unmarshals the given custom-type message from the JSON
|
||||
// object's "value" field.
|
||||
func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m protoreflect.Message) error {
|
||||
// Skip ObjectOpen, and start reading the fields.
|
||||
d.Read()
|
||||
|
||||
var found bool // Used for detecting duplicate "value".
|
||||
for {
|
||||
tok, err := d.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch tok.Kind() {
|
||||
case json.ObjectClose:
|
||||
if !found {
|
||||
return d.newError(tok.Pos(), `missing "value" field`)
|
||||
}
|
||||
return nil
|
||||
|
||||
case json.Name:
|
||||
switch tok.Name() {
|
||||
case "@type":
|
||||
// Skip the value as this was previously parsed already.
|
||||
d.Read()
|
||||
|
||||
case "value":
|
||||
if found {
|
||||
return d.newError(tok.Pos(), `duplicate "value" field`)
|
||||
}
|
||||
// Unmarshal the field value into the given message.
|
||||
if err := unmarshal(d, m); err != nil {
|
||||
return err
|
||||
}
|
||||
found = true
|
||||
|
||||
default:
|
||||
if d.opts.DiscardUnknown {
|
||||
if err := d.skipJSONValue(); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
return d.newError(tok.Pos(), "unknown field %v", tok.RawString())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Wrapper types are encoded as JSON primitives like string, number or boolean.
|
||||
|
||||
func (e encoder) marshalWrapperType(m protoreflect.Message) error {
|
||||
fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number)
|
||||
val := m.Get(fd)
|
||||
return e.marshalSingular(val, fd)
|
||||
}
|
||||
|
||||
func (d decoder) unmarshalWrapperType(m protoreflect.Message) error {
|
||||
fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number)
|
||||
val, err := d.unmarshalScalar(fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.Set(fd, val)
|
||||
return nil
|
||||
}
|
||||
|
||||
// The JSON representation for Empty is an empty JSON object.
|
||||
|
||||
func (e encoder) marshalEmpty(protoreflect.Message) error {
|
||||
e.StartObject()
|
||||
e.EndObject()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d decoder) unmarshalEmpty(protoreflect.Message) error {
|
||||
tok, err := d.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if tok.Kind() != json.ObjectOpen {
|
||||
return d.unexpectedTokenError(tok)
|
||||
}
|
||||
|
||||
for {
|
||||
tok, err := d.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch tok.Kind() {
|
||||
case json.ObjectClose:
|
||||
return nil
|
||||
|
||||
case json.Name:
|
||||
if d.opts.DiscardUnknown {
|
||||
if err := d.skipJSONValue(); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
return d.newError(tok.Pos(), "unknown field %v", tok.RawString())
|
||||
|
||||
default:
|
||||
return d.unexpectedTokenError(tok)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The JSON representation for Struct is a JSON object that contains the encoded
|
||||
// Struct.fields map and follows the serialization rules for a map.
|
||||
|
||||
func (e encoder) marshalStruct(m protoreflect.Message) error {
|
||||
fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number)
|
||||
return e.marshalMap(m.Get(fd).Map(), fd)
|
||||
}
|
||||
|
||||
func (d decoder) unmarshalStruct(m protoreflect.Message) error {
|
||||
fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number)
|
||||
return d.unmarshalMap(m.Mutable(fd).Map(), fd)
|
||||
}
|
||||
|
||||
// The JSON representation for ListValue is JSON array that contains the encoded
|
||||
// ListValue.values repeated field and follows the serialization rules for a
|
||||
// repeated field.
|
||||
|
||||
func (e encoder) marshalListValue(m protoreflect.Message) error {
|
||||
fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number)
|
||||
return e.marshalList(m.Get(fd).List(), fd)
|
||||
}
|
||||
|
||||
func (d decoder) unmarshalListValue(m protoreflect.Message) error {
|
||||
fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number)
|
||||
return d.unmarshalList(m.Mutable(fd).List(), fd)
|
||||
}
|
||||
|
||||
// The JSON representation for a Value is dependent on the oneof field that is
|
||||
// set. Each of the field in the oneof has its own custom serialization rule. A
|
||||
// Value message needs to be a oneof field set, else it is an error.
|
||||
|
||||
func (e encoder) marshalKnownValue(m protoreflect.Message) error {
|
||||
od := m.Descriptor().Oneofs().ByName(genid.Value_Kind_oneof_name)
|
||||
fd := m.WhichOneof(od)
|
||||
if fd == nil {
|
||||
return errors.New("%s: none of the oneof fields is set", genid.Value_message_fullname)
|
||||
}
|
||||
if fd.Number() == genid.Value_NumberValue_field_number {
|
||||
if v := m.Get(fd).Float(); math.IsNaN(v) || math.IsInf(v, 0) {
|
||||
return errors.New("%s: invalid %v value", genid.Value_NumberValue_field_fullname, v)
|
||||
}
|
||||
}
|
||||
return e.marshalSingular(m.Get(fd), fd)
|
||||
}
|
||||
|
||||
func (d decoder) unmarshalKnownValue(m protoreflect.Message) error {
|
||||
tok, err := d.Peek()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var fd protoreflect.FieldDescriptor
|
||||
var val protoreflect.Value
|
||||
switch tok.Kind() {
|
||||
case json.Null:
|
||||
d.Read()
|
||||
fd = m.Descriptor().Fields().ByNumber(genid.Value_NullValue_field_number)
|
||||
val = protoreflect.ValueOfEnum(0)
|
||||
|
||||
case json.Bool:
|
||||
tok, err := d.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fd = m.Descriptor().Fields().ByNumber(genid.Value_BoolValue_field_number)
|
||||
val = protoreflect.ValueOfBool(tok.Bool())
|
||||
|
||||
case json.Number:
|
||||
tok, err := d.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fd = m.Descriptor().Fields().ByNumber(genid.Value_NumberValue_field_number)
|
||||
var ok bool
|
||||
val, ok = unmarshalFloat(tok, 64)
|
||||
if !ok {
|
||||
return d.newError(tok.Pos(), "invalid %v: %v", genid.Value_message_fullname, tok.RawString())
|
||||
}
|
||||
|
||||
case json.String:
|
||||
// A JSON string may have been encoded from the number_value field,
|
||||
// e.g. "NaN", "Infinity", etc. Parsing a proto double type also allows
|
||||
// for it to be in JSON string form. Given this custom encoding spec,
|
||||
// however, there is no way to identify that and hence a JSON string is
|
||||
// always assigned to the string_value field, which means that certain
|
||||
// encoding cannot be parsed back to the same field.
|
||||
tok, err := d.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fd = m.Descriptor().Fields().ByNumber(genid.Value_StringValue_field_number)
|
||||
val = protoreflect.ValueOfString(tok.ParsedString())
|
||||
|
||||
case json.ObjectOpen:
|
||||
fd = m.Descriptor().Fields().ByNumber(genid.Value_StructValue_field_number)
|
||||
val = m.NewField(fd)
|
||||
if err := d.unmarshalStruct(val.Message()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case json.ArrayOpen:
|
||||
fd = m.Descriptor().Fields().ByNumber(genid.Value_ListValue_field_number)
|
||||
val = m.NewField(fd)
|
||||
if err := d.unmarshalListValue(val.Message()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
default:
|
||||
return d.newError(tok.Pos(), "invalid %v: %v", genid.Value_message_fullname, tok.RawString())
|
||||
}
|
||||
|
||||
m.Set(fd, val)
|
||||
return nil
|
||||
}
|
||||
|
||||
// The JSON representation for a Duration is a JSON string that ends in the
|
||||
// suffix "s" (indicating seconds) and is preceded by the number of seconds,
|
||||
// with nanoseconds expressed as fractional seconds.
|
||||
//
|
||||
// Durations less than one second are represented with a 0 seconds field and a
|
||||
// positive or negative nanos field. For durations of one second or more, a
|
||||
// non-zero value for the nanos field must be of the same sign as the seconds
|
||||
// field.
|
||||
//
|
||||
// Duration.seconds must be from -315,576,000,000 to +315,576,000,000 inclusive.
|
||||
// Duration.nanos must be from -999,999,999 to +999,999,999 inclusive.
|
||||
|
||||
const (
|
||||
secondsInNanos = 999999999
|
||||
maxSecondsInDuration = 315576000000
|
||||
)
|
||||
|
||||
func (e encoder) marshalDuration(m protoreflect.Message) error {
|
||||
fds := m.Descriptor().Fields()
|
||||
fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number)
|
||||
fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number)
|
||||
|
||||
secsVal := m.Get(fdSeconds)
|
||||
nanosVal := m.Get(fdNanos)
|
||||
secs := secsVal.Int()
|
||||
nanos := nanosVal.Int()
|
||||
if secs < -maxSecondsInDuration || secs > maxSecondsInDuration {
|
||||
return errors.New("%s: seconds out of range %v", genid.Duration_message_fullname, secs)
|
||||
}
|
||||
if nanos < -secondsInNanos || nanos > secondsInNanos {
|
||||
return errors.New("%s: nanos out of range %v", genid.Duration_message_fullname, nanos)
|
||||
}
|
||||
if (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0) {
|
||||
return errors.New("%s: signs of seconds and nanos do not match", genid.Duration_message_fullname)
|
||||
}
|
||||
// Generated output always contains 0, 3, 6, or 9 fractional digits,
|
||||
// depending on required precision, followed by the suffix "s".
|
||||
var sign string
|
||||
if secs < 0 || nanos < 0 {
|
||||
sign, secs, nanos = "-", -1*secs, -1*nanos
|
||||
}
|
||||
x := fmt.Sprintf("%s%d.%09d", sign, secs, nanos)
|
||||
x = strings.TrimSuffix(x, "000")
|
||||
x = strings.TrimSuffix(x, "000")
|
||||
x = strings.TrimSuffix(x, ".000")
|
||||
e.WriteString(x + "s")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d decoder) unmarshalDuration(m protoreflect.Message) error {
|
||||
tok, err := d.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if tok.Kind() != json.String {
|
||||
return d.unexpectedTokenError(tok)
|
||||
}
|
||||
|
||||
secs, nanos, ok := parseDuration(tok.ParsedString())
|
||||
if !ok {
|
||||
return d.newError(tok.Pos(), "invalid %v value %v", genid.Duration_message_fullname, tok.RawString())
|
||||
}
|
||||
// Validate seconds. No need to validate nanos because parseDuration would
|
||||
// have covered that already.
|
||||
if secs < -maxSecondsInDuration || secs > maxSecondsInDuration {
|
||||
return d.newError(tok.Pos(), "%v value out of range: %v", genid.Duration_message_fullname, tok.RawString())
|
||||
}
|
||||
|
||||
fds := m.Descriptor().Fields()
|
||||
fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number)
|
||||
fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number)
|
||||
|
||||
m.Set(fdSeconds, protoreflect.ValueOfInt64(secs))
|
||||
m.Set(fdNanos, protoreflect.ValueOfInt32(nanos))
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseDuration parses the given input string for seconds and nanoseconds value
|
||||
// for the Duration JSON format. The format is a decimal number with a suffix
|
||||
// 's'. It can have optional plus/minus sign. There needs to be at least an
|
||||
// integer or fractional part. Fractional part is limited to 9 digits only for
|
||||
// nanoseconds precision, regardless of whether there are trailing zero digits.
|
||||
// Example values are 1s, 0.1s, 1.s, .1s, +1s, -1s, -.1s.
|
||||
func parseDuration(input string) (int64, int32, bool) {
|
||||
b := []byte(input)
|
||||
size := len(b)
|
||||
if size < 2 {
|
||||
return 0, 0, false
|
||||
}
|
||||
if b[size-1] != 's' {
|
||||
return 0, 0, false
|
||||
}
|
||||
b = b[:size-1]
|
||||
|
||||
// Read optional plus/minus symbol.
|
||||
var neg bool
|
||||
switch b[0] {
|
||||
case '-':
|
||||
neg = true
|
||||
b = b[1:]
|
||||
case '+':
|
||||
b = b[1:]
|
||||
}
|
||||
if len(b) == 0 {
|
||||
return 0, 0, false
|
||||
}
|
||||
|
||||
// Read the integer part.
|
||||
var intp []byte
|
||||
switch {
|
||||
case b[0] == '0':
|
||||
b = b[1:]
|
||||
|
||||
case '1' <= b[0] && b[0] <= '9':
|
||||
intp = b[0:]
|
||||
b = b[1:]
|
||||
n := 1
|
||||
for len(b) > 0 && '0' <= b[0] && b[0] <= '9' {
|
||||
n++
|
||||
b = b[1:]
|
||||
}
|
||||
intp = intp[:n]
|
||||
|
||||
case b[0] == '.':
|
||||
// Continue below.
|
||||
|
||||
default:
|
||||
return 0, 0, false
|
||||
}
|
||||
|
||||
hasFrac := false
|
||||
var frac [9]byte
|
||||
if len(b) > 0 {
|
||||
if b[0] != '.' {
|
||||
return 0, 0, false
|
||||
}
|
||||
// Read the fractional part.
|
||||
b = b[1:]
|
||||
n := 0
|
||||
for len(b) > 0 && n < 9 && '0' <= b[0] && b[0] <= '9' {
|
||||
frac[n] = b[0]
|
||||
n++
|
||||
b = b[1:]
|
||||
}
|
||||
// It is not valid if there are more bytes left.
|
||||
if len(b) > 0 {
|
||||
return 0, 0, false
|
||||
}
|
||||
// Pad fractional part with 0s.
|
||||
for i := n; i < 9; i++ {
|
||||
frac[i] = '0'
|
||||
}
|
||||
hasFrac = true
|
||||
}
|
||||
|
||||
var secs int64
|
||||
if len(intp) > 0 {
|
||||
var err error
|
||||
secs, err = strconv.ParseInt(string(intp), 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, false
|
||||
}
|
||||
}
|
||||
|
||||
var nanos int64
|
||||
if hasFrac {
|
||||
nanob := bytes.TrimLeft(frac[:], "0")
|
||||
if len(nanob) > 0 {
|
||||
var err error
|
||||
nanos, err = strconv.ParseInt(string(nanob), 10, 32)
|
||||
if err != nil {
|
||||
return 0, 0, false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if neg {
|
||||
if secs > 0 {
|
||||
secs = -secs
|
||||
}
|
||||
if nanos > 0 {
|
||||
nanos = -nanos
|
||||
}
|
||||
}
|
||||
return secs, int32(nanos), true
|
||||
}
|
||||
|
||||
// The JSON representation for a Timestamp is a JSON string in the RFC 3339
|
||||
// format, i.e. "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" where
|
||||
// {year} is always expressed using four digits while {month}, {day}, {hour},
|
||||
// {min}, and {sec} are zero-padded to two digits each. The fractional seconds,
|
||||
// which can go up to 9 digits, up to 1 nanosecond resolution, is optional. The
|
||||
// "Z" suffix indicates the timezone ("UTC"); the timezone is required. Encoding
|
||||
// should always use UTC (as indicated by "Z") and a decoder should be able to
|
||||
// accept both UTC and other timezones (as indicated by an offset).
|
||||
//
|
||||
// Timestamp.seconds must be from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z
|
||||
// inclusive.
|
||||
// Timestamp.nanos must be from 0 to 999,999,999 inclusive.
|
||||
|
||||
const (
|
||||
maxTimestampSeconds = 253402300799
|
||||
minTimestampSeconds = -62135596800
|
||||
)
|
||||
|
||||
func (e encoder) marshalTimestamp(m protoreflect.Message) error {
|
||||
fds := m.Descriptor().Fields()
|
||||
fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number)
|
||||
fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number)
|
||||
|
||||
secsVal := m.Get(fdSeconds)
|
||||
nanosVal := m.Get(fdNanos)
|
||||
secs := secsVal.Int()
|
||||
nanos := nanosVal.Int()
|
||||
if secs < minTimestampSeconds || secs > maxTimestampSeconds {
|
||||
return errors.New("%s: seconds out of range %v", genid.Timestamp_message_fullname, secs)
|
||||
}
|
||||
if nanos < 0 || nanos > secondsInNanos {
|
||||
return errors.New("%s: nanos out of range %v", genid.Timestamp_message_fullname, nanos)
|
||||
}
|
||||
// Uses RFC 3339, where generated output will be Z-normalized and uses 0, 3,
|
||||
// 6 or 9 fractional digits.
|
||||
t := time.Unix(secs, nanos).UTC()
|
||||
x := t.Format("2006-01-02T15:04:05.000000000")
|
||||
x = strings.TrimSuffix(x, "000")
|
||||
x = strings.TrimSuffix(x, "000")
|
||||
x = strings.TrimSuffix(x, ".000")
|
||||
e.WriteString(x + "Z")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d decoder) unmarshalTimestamp(m protoreflect.Message) error {
|
||||
tok, err := d.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if tok.Kind() != json.String {
|
||||
return d.unexpectedTokenError(tok)
|
||||
}
|
||||
|
||||
s := tok.ParsedString()
|
||||
t, err := time.Parse(time.RFC3339Nano, s)
|
||||
if err != nil {
|
||||
return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString())
|
||||
}
|
||||
// Validate seconds.
|
||||
secs := t.Unix()
|
||||
if secs < minTimestampSeconds || secs > maxTimestampSeconds {
|
||||
return d.newError(tok.Pos(), "%v value out of range: %v", genid.Timestamp_message_fullname, tok.RawString())
|
||||
}
|
||||
// Validate subseconds.
|
||||
i := strings.LastIndexByte(s, '.') // start of subsecond field
|
||||
j := strings.LastIndexAny(s, "Z-+") // start of timezone field
|
||||
if i >= 0 && j >= i && j-i > len(".999999999") {
|
||||
return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString())
|
||||
}
|
||||
|
||||
fds := m.Descriptor().Fields()
|
||||
fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number)
|
||||
fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number)
|
||||
|
||||
m.Set(fdSeconds, protoreflect.ValueOfInt64(secs))
|
||||
m.Set(fdNanos, protoreflect.ValueOfInt32(int32(t.Nanosecond())))
|
||||
return nil
|
||||
}
|
||||
|
||||
// The JSON representation for a FieldMask is a JSON string where paths are
|
||||
// separated by a comma. Fields name in each path are converted to/from
|
||||
// lower-camel naming conventions. Encoding should fail if the path name would
|
||||
// end up differently after a round-trip.
|
||||
|
||||
func (e encoder) marshalFieldMask(m protoreflect.Message) error {
|
||||
fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number)
|
||||
list := m.Get(fd).List()
|
||||
paths := make([]string, 0, list.Len())
|
||||
|
||||
for i := 0; i < list.Len(); i++ {
|
||||
s := list.Get(i).String()
|
||||
if !protoreflect.FullName(s).IsValid() {
|
||||
return errors.New("%s contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s)
|
||||
}
|
||||
// Return error if conversion to camelCase is not reversible.
|
||||
cc := strs.JSONCamelCase(s)
|
||||
if s != strs.JSONSnakeCase(cc) {
|
||||
return errors.New("%s contains irreversible value %q", genid.FieldMask_Paths_field_fullname, s)
|
||||
}
|
||||
paths = append(paths, cc)
|
||||
}
|
||||
|
||||
e.WriteString(strings.Join(paths, ","))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d decoder) unmarshalFieldMask(m protoreflect.Message) error {
|
||||
tok, err := d.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if tok.Kind() != json.String {
|
||||
return d.unexpectedTokenError(tok)
|
||||
}
|
||||
str := strings.TrimSpace(tok.ParsedString())
|
||||
if str == "" {
|
||||
return nil
|
||||
}
|
||||
paths := strings.Split(str, ",")
|
||||
|
||||
fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number)
|
||||
list := m.Mutable(fd).List()
|
||||
|
||||
for _, s0 := range paths {
|
||||
s := strs.JSONSnakeCase(s0)
|
||||
if strings.Contains(s0, "_") || !protoreflect.FullName(s).IsValid() {
|
||||
return d.newError(tok.Pos(), "%v contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s0)
|
||||
}
|
||||
list.Append(protoreflect.ValueOfString(s))
|
||||
}
|
||||
return nil
|
||||
}
|
340
vendor/google.golang.org/protobuf/internal/encoding/json/decode.go
generated
vendored
Normal file
340
vendor/google.golang.org/protobuf/internal/encoding/json/decode.go
generated
vendored
Normal file
|
@ -0,0 +1,340 @@
|
|||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"regexp"
|
||||
"unicode/utf8"
|
||||
|
||||
"google.golang.org/protobuf/internal/errors"
|
||||
)
|
||||
|
||||
// call specifies which Decoder method was invoked.
|
||||
type call uint8
|
||||
|
||||
const (
|
||||
readCall call = iota
|
||||
peekCall
|
||||
)
|
||||
|
||||
const unexpectedFmt = "unexpected token %s"
|
||||
|
||||
// ErrUnexpectedEOF means that EOF was encountered in the middle of the input.
|
||||
var ErrUnexpectedEOF = errors.New("%v", io.ErrUnexpectedEOF)
|
||||
|
||||
// Decoder is a token-based JSON decoder.
|
||||
type Decoder struct {
|
||||
// lastCall is last method called, either readCall or peekCall.
|
||||
// Initial value is readCall.
|
||||
lastCall call
|
||||
|
||||
// lastToken contains the last read token.
|
||||
lastToken Token
|
||||
|
||||
// lastErr contains the last read error.
|
||||
lastErr error
|
||||
|
||||
// openStack is a stack containing ObjectOpen and ArrayOpen values. The
|
||||
// top of stack represents the object or the array the current value is
|
||||
// directly located in.
|
||||
openStack []Kind
|
||||
|
||||
// orig is used in reporting line and column.
|
||||
orig []byte
|
||||
// in contains the unconsumed input.
|
||||
in []byte
|
||||
}
|
||||
|
||||
// NewDecoder returns a Decoder to read the given []byte.
|
||||
func NewDecoder(b []byte) *Decoder {
|
||||
return &Decoder{orig: b, in: b}
|
||||
}
|
||||
|
||||
// Peek looks ahead and returns the next token kind without advancing a read.
|
||||
func (d *Decoder) Peek() (Token, error) {
|
||||
defer func() { d.lastCall = peekCall }()
|
||||
if d.lastCall == readCall {
|
||||
d.lastToken, d.lastErr = d.Read()
|
||||
}
|
||||
return d.lastToken, d.lastErr
|
||||
}
|
||||
|
||||
// Read returns the next JSON token.
|
||||
// It will return an error if there is no valid token.
|
||||
func (d *Decoder) Read() (Token, error) {
|
||||
const scalar = Null | Bool | Number | String
|
||||
|
||||
defer func() { d.lastCall = readCall }()
|
||||
if d.lastCall == peekCall {
|
||||
return d.lastToken, d.lastErr
|
||||
}
|
||||
|
||||
tok, err := d.parseNext()
|
||||
if err != nil {
|
||||
return Token{}, err
|
||||
}
|
||||
|
||||
switch tok.kind {
|
||||
case EOF:
|
||||
if len(d.openStack) != 0 ||
|
||||
d.lastToken.kind&scalar|ObjectClose|ArrayClose == 0 {
|
||||
return Token{}, ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
case Null:
|
||||
if !d.isValueNext() {
|
||||
return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
|
||||
}
|
||||
|
||||
case Bool, Number:
|
||||
if !d.isValueNext() {
|
||||
return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
|
||||
}
|
||||
|
||||
case String:
|
||||
if d.isValueNext() {
|
||||
break
|
||||
}
|
||||
// This string token should only be for a field name.
|
||||
if d.lastToken.kind&(ObjectOpen|comma) == 0 {
|
||||
return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
|
||||
}
|
||||
if len(d.in) == 0 {
|
||||
return Token{}, ErrUnexpectedEOF
|
||||
}
|
||||
if c := d.in[0]; c != ':' {
|
||||
return Token{}, d.newSyntaxError(d.currPos(), `unexpected character %s, missing ":" after field name`, string(c))
|
||||
}
|
||||
tok.kind = Name
|
||||
d.consume(1)
|
||||
|
||||
case ObjectOpen, ArrayOpen:
|
||||
if !d.isValueNext() {
|
||||
return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
|
||||
}
|
||||
d.openStack = append(d.openStack, tok.kind)
|
||||
|
||||
case ObjectClose:
|
||||
if len(d.openStack) == 0 ||
|
||||
d.lastToken.kind == comma ||
|
||||
d.openStack[len(d.openStack)-1] != ObjectOpen {
|
||||
return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
|
||||
}
|
||||
d.openStack = d.openStack[:len(d.openStack)-1]
|
||||
|
||||
case ArrayClose:
|
||||
if len(d.openStack) == 0 ||
|
||||
d.lastToken.kind == comma ||
|
||||
d.openStack[len(d.openStack)-1] != ArrayOpen {
|
||||
return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
|
||||
}
|
||||
d.openStack = d.openStack[:len(d.openStack)-1]
|
||||
|
||||
case comma:
|
||||
if len(d.openStack) == 0 ||
|
||||
d.lastToken.kind&(scalar|ObjectClose|ArrayClose) == 0 {
|
||||
return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
|
||||
}
|
||||
}
|
||||
|
||||
// Update d.lastToken only after validating token to be in the right sequence.
|
||||
d.lastToken = tok
|
||||
|
||||
if d.lastToken.kind == comma {
|
||||
return d.Read()
|
||||
}
|
||||
return tok, nil
|
||||
}
|
||||
|
||||
// Any sequence that looks like a non-delimiter (for error reporting).
|
||||
var errRegexp = regexp.MustCompile(`^([-+._a-zA-Z0-9]{1,32}|.)`)
|
||||
|
||||
// parseNext parses for the next JSON token. It returns a Token object for
|
||||
// different types, except for Name. It does not handle whether the next token
|
||||
// is in a valid sequence or not.
|
||||
func (d *Decoder) parseNext() (Token, error) {
|
||||
// Trim leading spaces.
|
||||
d.consume(0)
|
||||
|
||||
in := d.in
|
||||
if len(in) == 0 {
|
||||
return d.consumeToken(EOF, 0), nil
|
||||
}
|
||||
|
||||
switch in[0] {
|
||||
case 'n':
|
||||
if n := matchWithDelim("null", in); n != 0 {
|
||||
return d.consumeToken(Null, n), nil
|
||||
}
|
||||
|
||||
case 't':
|
||||
if n := matchWithDelim("true", in); n != 0 {
|
||||
return d.consumeBoolToken(true, n), nil
|
||||
}
|
||||
|
||||
case 'f':
|
||||
if n := matchWithDelim("false", in); n != 0 {
|
||||
return d.consumeBoolToken(false, n), nil
|
||||
}
|
||||
|
||||
case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
|
||||
if n, ok := parseNumber(in); ok {
|
||||
return d.consumeToken(Number, n), nil
|
||||
}
|
||||
|
||||
case '"':
|
||||
s, n, err := d.parseString(in)
|
||||
if err != nil {
|
||||
return Token{}, err
|
||||
}
|
||||
return d.consumeStringToken(s, n), nil
|
||||
|
||||
case '{':
|
||||
return d.consumeToken(ObjectOpen, 1), nil
|
||||
|
||||
case '}':
|
||||
return d.consumeToken(ObjectClose, 1), nil
|
||||
|
||||
case '[':
|
||||
return d.consumeToken(ArrayOpen, 1), nil
|
||||
|
||||
case ']':
|
||||
return d.consumeToken(ArrayClose, 1), nil
|
||||
|
||||
case ',':
|
||||
return d.consumeToken(comma, 1), nil
|
||||
}
|
||||
return Token{}, d.newSyntaxError(d.currPos(), "invalid value %s", errRegexp.Find(in))
|
||||
}
|
||||
|
||||
// newSyntaxError returns an error with line and column information useful for
|
||||
// syntax errors.
|
||||
func (d *Decoder) newSyntaxError(pos int, f string, x ...interface{}) error {
|
||||
e := errors.New(f, x...)
|
||||
line, column := d.Position(pos)
|
||||
return errors.New("syntax error (line %d:%d): %v", line, column, e)
|
||||
}
|
||||
|
||||
// Position returns line and column number of given index of the original input.
|
||||
// It will panic if index is out of range.
|
||||
func (d *Decoder) Position(idx int) (line int, column int) {
|
||||
b := d.orig[:idx]
|
||||
line = bytes.Count(b, []byte("\n")) + 1
|
||||
if i := bytes.LastIndexByte(b, '\n'); i >= 0 {
|
||||
b = b[i+1:]
|
||||
}
|
||||
column = utf8.RuneCount(b) + 1 // ignore multi-rune characters
|
||||
return line, column
|
||||
}
|
||||
|
||||
// currPos returns the current index position of d.in from d.orig.
|
||||
func (d *Decoder) currPos() int {
|
||||
return len(d.orig) - len(d.in)
|
||||
}
|
||||
|
||||
// matchWithDelim matches s with the input b and verifies that the match
|
||||
// terminates with a delimiter of some form (e.g., r"[^-+_.a-zA-Z0-9]").
|
||||
// As a special case, EOF is considered a delimiter. It returns the length of s
|
||||
// if there is a match, else 0.
|
||||
func matchWithDelim(s string, b []byte) int {
|
||||
if !bytes.HasPrefix(b, []byte(s)) {
|
||||
return 0
|
||||
}
|
||||
|
||||
n := len(s)
|
||||
if n < len(b) && isNotDelim(b[n]) {
|
||||
return 0
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// isNotDelim returns true if given byte is a not delimiter character.
|
||||
func isNotDelim(c byte) bool {
|
||||
return (c == '-' || c == '+' || c == '.' || c == '_' ||
|
||||
('a' <= c && c <= 'z') ||
|
||||
('A' <= c && c <= 'Z') ||
|
||||
('0' <= c && c <= '9'))
|
||||
}
|
||||
|
||||
// consume consumes n bytes of input and any subsequent whitespace.
|
||||
func (d *Decoder) consume(n int) {
|
||||
d.in = d.in[n:]
|
||||
for len(d.in) > 0 {
|
||||
switch d.in[0] {
|
||||
case ' ', '\n', '\r', '\t':
|
||||
d.in = d.in[1:]
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// isValueNext returns true if next type should be a JSON value: Null,
|
||||
// Number, String or Bool.
|
||||
func (d *Decoder) isValueNext() bool {
|
||||
if len(d.openStack) == 0 {
|
||||
return d.lastToken.kind == 0
|
||||
}
|
||||
|
||||
start := d.openStack[len(d.openStack)-1]
|
||||
switch start {
|
||||
case ObjectOpen:
|
||||
return d.lastToken.kind&Name != 0
|
||||
case ArrayOpen:
|
||||
return d.lastToken.kind&(ArrayOpen|comma) != 0
|
||||
}
|
||||
panic(fmt.Sprintf(
|
||||
"unreachable logic in Decoder.isValueNext, lastToken.kind: %v, openStack: %v",
|
||||
d.lastToken.kind, start))
|
||||
}
|
||||
|
||||
// consumeToken constructs a Token for given Kind with raw value derived from
|
||||
// current d.in and given size, and consumes the given size-length of it.
|
||||
func (d *Decoder) consumeToken(kind Kind, size int) Token {
|
||||
tok := Token{
|
||||
kind: kind,
|
||||
raw: d.in[:size],
|
||||
pos: len(d.orig) - len(d.in),
|
||||
}
|
||||
d.consume(size)
|
||||
return tok
|
||||
}
|
||||
|
||||
// consumeBoolToken constructs a Token for a Bool kind with raw value derived from
|
||||
// current d.in and given size.
|
||||
func (d *Decoder) consumeBoolToken(b bool, size int) Token {
|
||||
tok := Token{
|
||||
kind: Bool,
|
||||
raw: d.in[:size],
|
||||
pos: len(d.orig) - len(d.in),
|
||||
boo: b,
|
||||
}
|
||||
d.consume(size)
|
||||
return tok
|
||||
}
|
||||
|
||||
// consumeStringToken constructs a Token for a String kind with raw value derived
|
||||
// from current d.in and given size.
|
||||
func (d *Decoder) consumeStringToken(s string, size int) Token {
|
||||
tok := Token{
|
||||
kind: String,
|
||||
raw: d.in[:size],
|
||||
pos: len(d.orig) - len(d.in),
|
||||
str: s,
|
||||
}
|
||||
d.consume(size)
|
||||
return tok
|
||||
}
|
||||
|
||||
// Clone returns a copy of the Decoder for use in reading ahead the next JSON
|
||||
// object, array or other values without affecting current Decoder.
|
||||
func (d *Decoder) Clone() *Decoder {
|
||||
ret := *d
|
||||
ret.openStack = append([]Kind(nil), ret.openStack...)
|
||||
return &ret
|
||||
}
|
254
vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go
generated
vendored
Normal file
254
vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go
generated
vendored
Normal file
|
@ -0,0 +1,254 @@
|
|||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// parseNumber reads the given []byte for a valid JSON number. If it is valid,
|
||||
// it returns the number of bytes. Parsing logic follows the definition in
|
||||
// https://tools.ietf.org/html/rfc7159#section-6, and is based off
|
||||
// encoding/json.isValidNumber function.
|
||||
func parseNumber(input []byte) (int, bool) {
|
||||
var n int
|
||||
|
||||
s := input
|
||||
if len(s) == 0 {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// Optional -
|
||||
if s[0] == '-' {
|
||||
s = s[1:]
|
||||
n++
|
||||
if len(s) == 0 {
|
||||
return 0, false
|
||||
}
|
||||
}
|
||||
|
||||
// Digits
|
||||
switch {
|
||||
case s[0] == '0':
|
||||
s = s[1:]
|
||||
n++
|
||||
|
||||
case '1' <= s[0] && s[0] <= '9':
|
||||
s = s[1:]
|
||||
n++
|
||||
for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
|
||||
s = s[1:]
|
||||
n++
|
||||
}
|
||||
|
||||
default:
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// . followed by 1 or more digits.
|
||||
if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' {
|
||||
s = s[2:]
|
||||
n += 2
|
||||
for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
|
||||
s = s[1:]
|
||||
n++
|
||||
}
|
||||
}
|
||||
|
||||
// e or E followed by an optional - or + and
|
||||
// 1 or more digits.
|
||||
if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') {
|
||||
s = s[1:]
|
||||
n++
|
||||
if s[0] == '+' || s[0] == '-' {
|
||||
s = s[1:]
|
||||
n++
|
||||
if len(s) == 0 {
|
||||
return 0, false
|
||||
}
|
||||
}
|
||||
for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
|
||||
s = s[1:]
|
||||
n++
|
||||
}
|
||||
}
|
||||
|
||||
// Check that next byte is a delimiter or it is at the end.
|
||||
if n < len(input) && isNotDelim(input[n]) {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
return n, true
|
||||
}
|
||||
|
||||
// numberParts is the result of parsing out a valid JSON number. It contains
|
||||
// the parts of a number. The parts are used for integer conversion.
|
||||
type numberParts struct {
|
||||
neg bool
|
||||
intp []byte
|
||||
frac []byte
|
||||
exp []byte
|
||||
}
|
||||
|
||||
// parseNumber constructs numberParts from given []byte. The logic here is
|
||||
// similar to consumeNumber above with the difference of having to construct
|
||||
// numberParts. The slice fields in numberParts are subslices of the input.
|
||||
func parseNumberParts(input []byte) (numberParts, bool) {
|
||||
var neg bool
|
||||
var intp []byte
|
||||
var frac []byte
|
||||
var exp []byte
|
||||
|
||||
s := input
|
||||
if len(s) == 0 {
|
||||
return numberParts{}, false
|
||||
}
|
||||
|
||||
// Optional -
|
||||
if s[0] == '-' {
|
||||
neg = true
|
||||
s = s[1:]
|
||||
if len(s) == 0 {
|
||||
return numberParts{}, false
|
||||
}
|
||||
}
|
||||
|
||||
// Digits
|
||||
switch {
|
||||
case s[0] == '0':
|
||||
// Skip first 0 and no need to store.
|
||||
s = s[1:]
|
||||
|
||||
case '1' <= s[0] && s[0] <= '9':
|
||||
intp = s
|
||||
n := 1
|
||||
s = s[1:]
|
||||
for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
|
||||
s = s[1:]
|
||||
n++
|
||||
}
|
||||
intp = intp[:n]
|
||||
|
||||
default:
|
||||
return numberParts{}, false
|
||||
}
|
||||
|
||||
// . followed by 1 or more digits.
|
||||
if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' {
|
||||
frac = s[1:]
|
||||
n := 1
|
||||
s = s[2:]
|
||||
for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
|
||||
s = s[1:]
|
||||
n++
|
||||
}
|
||||
frac = frac[:n]
|
||||
}
|
||||
|
||||
// e or E followed by an optional - or + and
|
||||
// 1 or more digits.
|
||||
if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') {
|
||||
s = s[1:]
|
||||
exp = s
|
||||
n := 0
|
||||
if s[0] == '+' || s[0] == '-' {
|
||||
s = s[1:]
|
||||
n++
|
||||
if len(s) == 0 {
|
||||
return numberParts{}, false
|
||||
}
|
||||
}
|
||||
for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
|
||||
s = s[1:]
|
||||
n++
|
||||
}
|
||||
exp = exp[:n]
|
||||
}
|
||||
|
||||
return numberParts{
|
||||
neg: neg,
|
||||
intp: intp,
|
||||
frac: bytes.TrimRight(frac, "0"), // Remove unnecessary 0s to the right.
|
||||
exp: exp,
|
||||
}, true
|
||||
}
|
||||
|
||||
// normalizeToIntString returns an integer string in normal form without the
|
||||
// E-notation for given numberParts. It will return false if it is not an
|
||||
// integer or if the exponent exceeds than max/min int value.
|
||||
func normalizeToIntString(n numberParts) (string, bool) {
|
||||
intpSize := len(n.intp)
|
||||
fracSize := len(n.frac)
|
||||
|
||||
if intpSize == 0 && fracSize == 0 {
|
||||
return "0", true
|
||||
}
|
||||
|
||||
var exp int
|
||||
if len(n.exp) > 0 {
|
||||
i, err := strconv.ParseInt(string(n.exp), 10, 32)
|
||||
if err != nil {
|
||||
return "", false
|
||||
}
|
||||
exp = int(i)
|
||||
}
|
||||
|
||||
var num []byte
|
||||
if exp >= 0 {
|
||||
// For positive E, shift fraction digits into integer part and also pad
|
||||
// with zeroes as needed.
|
||||
|
||||
// If there are more digits in fraction than the E value, then the
|
||||
// number is not an integer.
|
||||
if fracSize > exp {
|
||||
return "", false
|
||||
}
|
||||
|
||||
// Make sure resulting digits are within max value limit to avoid
|
||||
// unnecessarily constructing a large byte slice that may simply fail
|
||||
// later on.
|
||||
const maxDigits = 20 // Max uint64 value has 20 decimal digits.
|
||||
if intpSize+exp > maxDigits {
|
||||
return "", false
|
||||
}
|
||||
|
||||
// Set cap to make a copy of integer part when appended.
|
||||
num = n.intp[:len(n.intp):len(n.intp)]
|
||||
num = append(num, n.frac...)
|
||||
for i := 0; i < exp-fracSize; i++ {
|
||||
num = append(num, '0')
|
||||
}
|
||||
} else {
|
||||
// For negative E, shift digits in integer part out.
|
||||
|
||||
// If there are fractions, then the number is not an integer.
|
||||
if fracSize > 0 {
|
||||
return "", false
|
||||
}
|
||||
|
||||
// index is where the decimal point will be after adjusting for negative
|
||||
// exponent.
|
||||
index := intpSize + exp
|
||||
if index < 0 {
|
||||
return "", false
|
||||
}
|
||||
|
||||
num = n.intp
|
||||
// If any of the digits being shifted to the right of the decimal point
|
||||
// is non-zero, then the number is not an integer.
|
||||
for i := index; i < intpSize; i++ {
|
||||
if num[i] != '0' {
|
||||
return "", false
|
||||
}
|
||||
}
|
||||
num = num[:index]
|
||||
}
|
||||
|
||||
if n.neg {
|
||||
return "-" + string(num), true
|
||||
}
|
||||
return string(num), true
|
||||
}
|
91
vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go
generated
vendored
Normal file
91
vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go
generated
vendored
Normal file
|
@ -0,0 +1,91 @@
|
|||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"unicode"
|
||||
"unicode/utf16"
|
||||
"unicode/utf8"
|
||||
|
||||
"google.golang.org/protobuf/internal/strs"
|
||||
)
|
||||
|
||||
func (d *Decoder) parseString(in []byte) (string, int, error) {
|
||||
in0 := in
|
||||
if len(in) == 0 {
|
||||
return "", 0, ErrUnexpectedEOF
|
||||
}
|
||||
if in[0] != '"' {
|
||||
return "", 0, d.newSyntaxError(d.currPos(), "invalid character %q at start of string", in[0])
|
||||
}
|
||||
in = in[1:]
|
||||
i := indexNeedEscapeInBytes(in)
|
||||
in, out := in[i:], in[:i:i] // set cap to prevent mutations
|
||||
for len(in) > 0 {
|
||||
switch r, n := utf8.DecodeRune(in); {
|
||||
case r == utf8.RuneError && n == 1:
|
||||
return "", 0, d.newSyntaxError(d.currPos(), "invalid UTF-8 in string")
|
||||
case r < ' ':
|
||||
return "", 0, d.newSyntaxError(d.currPos(), "invalid character %q in string", r)
|
||||
case r == '"':
|
||||
in = in[1:]
|
||||
n := len(in0) - len(in)
|
||||
return string(out), n, nil
|
||||
case r == '\\':
|
||||
if len(in) < 2 {
|
||||
return "", 0, ErrUnexpectedEOF
|
||||
}
|
||||
switch r := in[1]; r {
|
||||
case '"', '\\', '/':
|
||||
in, out = in[2:], append(out, r)
|
||||
case 'b':
|
||||
in, out = in[2:], append(out, '\b')
|
||||
case 'f':
|
||||
in, out = in[2:], append(out, '\f')
|
||||
case 'n':
|
||||
in, out = in[2:], append(out, '\n')
|
||||
case 'r':
|
||||
in, out = in[2:], append(out, '\r')
|
||||
case 't':
|
||||
in, out = in[2:], append(out, '\t')
|
||||
case 'u':
|
||||
if len(in) < 6 {
|
||||
return "", 0, ErrUnexpectedEOF
|
||||
}
|
||||
v, err := strconv.ParseUint(string(in[2:6]), 16, 16)
|
||||
if err != nil {
|
||||
return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:6])
|
||||
}
|
||||
in = in[6:]
|
||||
|
||||
r := rune(v)
|
||||
if utf16.IsSurrogate(r) {
|
||||
if len(in) < 6 {
|
||||
return "", 0, ErrUnexpectedEOF
|
||||
}
|
||||
v, err := strconv.ParseUint(string(in[2:6]), 16, 16)
|
||||
r = utf16.DecodeRune(r, rune(v))
|
||||
if in[0] != '\\' || in[1] != 'u' ||
|
||||
r == unicode.ReplacementChar || err != nil {
|
||||
return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:6])
|
||||
}
|
||||
in = in[6:]
|
||||
}
|
||||
out = append(out, string(r)...)
|
||||
default:
|
||||
return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:2])
|
||||
}
|
||||
default:
|
||||
i := indexNeedEscapeInBytes(in[n:])
|
||||
in, out = in[n+i:], append(out, in[:n+i]...)
|
||||
}
|
||||
}
|
||||
return "", 0, ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
// indexNeedEscapeInBytes returns the index of the character that needs
|
||||
// escaping. If no characters need escaping, this returns the input length.
|
||||
func indexNeedEscapeInBytes(b []byte) int { return indexNeedEscapeInString(strs.UnsafeString(b)) }
|
192
vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go
generated
vendored
Normal file
192
vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go
generated
vendored
Normal file
|
@ -0,0 +1,192 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Kind represents a token kind expressible in the JSON format.
|
||||
type Kind uint16
|
||||
|
||||
const (
|
||||
Invalid Kind = (1 << iota) / 2
|
||||
EOF
|
||||
Null
|
||||
Bool
|
||||
Number
|
||||
String
|
||||
Name
|
||||
ObjectOpen
|
||||
ObjectClose
|
||||
ArrayOpen
|
||||
ArrayClose
|
||||
|
||||
// comma is only for parsing in between tokens and
|
||||
// does not need to be exported.
|
||||
comma
|
||||
)
|
||||
|
||||
func (k Kind) String() string {
|
||||
switch k {
|
||||
case EOF:
|
||||
return "eof"
|
||||
case Null:
|
||||
return "null"
|
||||
case Bool:
|
||||
return "bool"
|
||||
case Number:
|
||||
return "number"
|
||||
case String:
|
||||
return "string"
|
||||
case ObjectOpen:
|
||||
return "{"
|
||||
case ObjectClose:
|
||||
return "}"
|
||||
case Name:
|
||||
return "name"
|
||||
case ArrayOpen:
|
||||
return "["
|
||||
case ArrayClose:
|
||||
return "]"
|
||||
case comma:
|
||||
return ","
|
||||
}
|
||||
return "<invalid>"
|
||||
}
|
||||
|
||||
// Token provides a parsed token kind and value.
|
||||
//
|
||||
// Values are provided by the difference accessor methods. The accessor methods
|
||||
// Name, Bool, and ParsedString will panic if called on the wrong kind. There
|
||||
// are different accessor methods for the Number kind for converting to the
|
||||
// appropriate Go numeric type and those methods have the ok return value.
|
||||
type Token struct {
|
||||
// Token kind.
|
||||
kind Kind
|
||||
// pos provides the position of the token in the original input.
|
||||
pos int
|
||||
// raw bytes of the serialized token.
|
||||
// This is a subslice into the original input.
|
||||
raw []byte
|
||||
// boo is parsed boolean value.
|
||||
boo bool
|
||||
// str is parsed string value.
|
||||
str string
|
||||
}
|
||||
|
||||
// Kind returns the token kind.
|
||||
func (t Token) Kind() Kind {
|
||||
return t.kind
|
||||
}
|
||||
|
||||
// RawString returns the read value in string.
|
||||
func (t Token) RawString() string {
|
||||
return string(t.raw)
|
||||
}
|
||||
|
||||
// Pos returns the token position from the input.
|
||||
func (t Token) Pos() int {
|
||||
return t.pos
|
||||
}
|
||||
|
||||
// Name returns the object name if token is Name, else it panics.
|
||||
func (t Token) Name() string {
|
||||
if t.kind == Name {
|
||||
return t.str
|
||||
}
|
||||
panic(fmt.Sprintf("Token is not a Name: %v", t.RawString()))
|
||||
}
|
||||
|
||||
// Bool returns the bool value if token kind is Bool, else it panics.
|
||||
func (t Token) Bool() bool {
|
||||
if t.kind == Bool {
|
||||
return t.boo
|
||||
}
|
||||
panic(fmt.Sprintf("Token is not a Bool: %v", t.RawString()))
|
||||
}
|
||||
|
||||
// ParsedString returns the string value for a JSON string token or the read
|
||||
// value in string if token is not a string.
|
||||
func (t Token) ParsedString() string {
|
||||
if t.kind == String {
|
||||
return t.str
|
||||
}
|
||||
panic(fmt.Sprintf("Token is not a String: %v", t.RawString()))
|
||||
}
|
||||
|
||||
// Float returns the floating-point number if token kind is Number.
|
||||
//
|
||||
// The floating-point precision is specified by the bitSize parameter: 32 for
|
||||
// float32 or 64 for float64. If bitSize=32, the result still has type float64,
|
||||
// but it will be convertible to float32 without changing its value. It will
|
||||
// return false if the number exceeds the floating point limits for given
|
||||
// bitSize.
|
||||
func (t Token) Float(bitSize int) (float64, bool) {
|
||||
if t.kind != Number {
|
||||
return 0, false
|
||||
}
|
||||
f, err := strconv.ParseFloat(t.RawString(), bitSize)
|
||||
if err != nil {
|
||||
return 0, false
|
||||
}
|
||||
return f, true
|
||||
}
|
||||
|
||||
// Int returns the signed integer number if token is Number.
|
||||
//
|
||||
// The given bitSize specifies the integer type that the result must fit into.
|
||||
// It returns false if the number is not an integer value or if the result
|
||||
// exceeds the limits for given bitSize.
|
||||
func (t Token) Int(bitSize int) (int64, bool) {
|
||||
s, ok := t.getIntStr()
|
||||
if !ok {
|
||||
return 0, false
|
||||
}
|
||||
n, err := strconv.ParseInt(s, 10, bitSize)
|
||||
if err != nil {
|
||||
return 0, false
|
||||
}
|
||||
return n, true
|
||||
}
|
||||
|
||||
// Uint returns the signed integer number if token is Number.
|
||||
//
|
||||
// The given bitSize specifies the unsigned integer type that the result must
|
||||
// fit into. It returns false if the number is not an unsigned integer value
|
||||
// or if the result exceeds the limits for given bitSize.
|
||||
func (t Token) Uint(bitSize int) (uint64, bool) {
|
||||
s, ok := t.getIntStr()
|
||||
if !ok {
|
||||
return 0, false
|
||||
}
|
||||
n, err := strconv.ParseUint(s, 10, bitSize)
|
||||
if err != nil {
|
||||
return 0, false
|
||||
}
|
||||
return n, true
|
||||
}
|
||||
|
||||
func (t Token) getIntStr() (string, bool) {
|
||||
if t.kind != Number {
|
||||
return "", false
|
||||
}
|
||||
parts, ok := parseNumberParts(t.raw)
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
return normalizeToIntString(parts)
|
||||
}
|
||||
|
||||
// TokenEquals returns true if given Tokens are equal, else false.
|
||||
func TokenEquals(x, y Token) bool {
|
||||
return x.kind == y.kind &&
|
||||
x.pos == y.pos &&
|
||||
bytes.Equal(x.raw, y.raw) &&
|
||||
x.boo == y.boo &&
|
||||
x.str == y.str
|
||||
}
|
278
vendor/google.golang.org/protobuf/internal/encoding/json/encode.go
generated
vendored
Normal file
278
vendor/google.golang.org/protobuf/internal/encoding/json/encode.go
generated
vendored
Normal file
|
@ -0,0 +1,278 @@
|
|||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"math"
|
||||
"math/bits"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"google.golang.org/protobuf/internal/detrand"
|
||||
"google.golang.org/protobuf/internal/errors"
|
||||
)
|
||||
|
||||
// kind represents an encoding type.
|
||||
type kind uint8
|
||||
|
||||
const (
|
||||
_ kind = (1 << iota) / 2
|
||||
name
|
||||
scalar
|
||||
objectOpen
|
||||
objectClose
|
||||
arrayOpen
|
||||
arrayClose
|
||||
)
|
||||
|
||||
// Encoder provides methods to write out JSON constructs and values. The user is
|
||||
// responsible for producing valid sequences of JSON constructs and values.
|
||||
type Encoder struct {
|
||||
indent string
|
||||
lastKind kind
|
||||
indents []byte
|
||||
out []byte
|
||||
}
|
||||
|
||||
// NewEncoder returns an Encoder.
|
||||
//
|
||||
// If indent is a non-empty string, it causes every entry for an Array or Object
|
||||
// to be preceded by the indent and trailed by a newline.
|
||||
func NewEncoder(buf []byte, indent string) (*Encoder, error) {
|
||||
e := &Encoder{
|
||||
out: buf,
|
||||
}
|
||||
if len(indent) > 0 {
|
||||
if strings.Trim(indent, " \t") != "" {
|
||||
return nil, errors.New("indent may only be composed of space or tab characters")
|
||||
}
|
||||
e.indent = indent
|
||||
}
|
||||
return e, nil
|
||||
}
|
||||
|
||||
// Bytes returns the content of the written bytes.
|
||||
func (e *Encoder) Bytes() []byte {
|
||||
return e.out
|
||||
}
|
||||
|
||||
// WriteNull writes out the null value.
|
||||
func (e *Encoder) WriteNull() {
|
||||
e.prepareNext(scalar)
|
||||
e.out = append(e.out, "null"...)
|
||||
}
|
||||
|
||||
// WriteBool writes out the given boolean value.
|
||||
func (e *Encoder) WriteBool(b bool) {
|
||||
e.prepareNext(scalar)
|
||||
if b {
|
||||
e.out = append(e.out, "true"...)
|
||||
} else {
|
||||
e.out = append(e.out, "false"...)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteString writes out the given string in JSON string value. Returns error
|
||||
// if input string contains invalid UTF-8.
|
||||
func (e *Encoder) WriteString(s string) error {
|
||||
e.prepareNext(scalar)
|
||||
var err error
|
||||
if e.out, err = appendString(e.out, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sentinel error used for indicating invalid UTF-8.
|
||||
var errInvalidUTF8 = errors.New("invalid UTF-8")
|
||||
|
||||
func appendString(out []byte, in string) ([]byte, error) {
|
||||
out = append(out, '"')
|
||||
i := indexNeedEscapeInString(in)
|
||||
in, out = in[i:], append(out, in[:i]...)
|
||||
for len(in) > 0 {
|
||||
switch r, n := utf8.DecodeRuneInString(in); {
|
||||
case r == utf8.RuneError && n == 1:
|
||||
return out, errInvalidUTF8
|
||||
case r < ' ' || r == '"' || r == '\\':
|
||||
out = append(out, '\\')
|
||||
switch r {
|
||||
case '"', '\\':
|
||||
out = append(out, byte(r))
|
||||
case '\b':
|
||||
out = append(out, 'b')
|
||||
case '\f':
|
||||
out = append(out, 'f')
|
||||
case '\n':
|
||||
out = append(out, 'n')
|
||||
case '\r':
|
||||
out = append(out, 'r')
|
||||
case '\t':
|
||||
out = append(out, 't')
|
||||
default:
|
||||
out = append(out, 'u')
|
||||
out = append(out, "0000"[1+(bits.Len32(uint32(r))-1)/4:]...)
|
||||
out = strconv.AppendUint(out, uint64(r), 16)
|
||||
}
|
||||
in = in[n:]
|
||||
default:
|
||||
i := indexNeedEscapeInString(in[n:])
|
||||
in, out = in[n+i:], append(out, in[:n+i]...)
|
||||
}
|
||||
}
|
||||
out = append(out, '"')
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// indexNeedEscapeInString returns the index of the character that needs
|
||||
// escaping. If no characters need escaping, this returns the input length.
|
||||
func indexNeedEscapeInString(s string) int {
|
||||
for i, r := range s {
|
||||
if r < ' ' || r == '\\' || r == '"' || r == utf8.RuneError {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return len(s)
|
||||
}
|
||||
|
||||
// WriteFloat writes out the given float and bitSize in JSON number value.
|
||||
func (e *Encoder) WriteFloat(n float64, bitSize int) {
|
||||
e.prepareNext(scalar)
|
||||
e.out = appendFloat(e.out, n, bitSize)
|
||||
}
|
||||
|
||||
// appendFloat formats given float in bitSize, and appends to the given []byte.
|
||||
func appendFloat(out []byte, n float64, bitSize int) []byte {
|
||||
switch {
|
||||
case math.IsNaN(n):
|
||||
return append(out, `"NaN"`...)
|
||||
case math.IsInf(n, +1):
|
||||
return append(out, `"Infinity"`...)
|
||||
case math.IsInf(n, -1):
|
||||
return append(out, `"-Infinity"`...)
|
||||
}
|
||||
|
||||
// JSON number formatting logic based on encoding/json.
|
||||
// See floatEncoder.encode for reference.
|
||||
fmt := byte('f')
|
||||
if abs := math.Abs(n); abs != 0 {
|
||||
if bitSize == 64 && (abs < 1e-6 || abs >= 1e21) ||
|
||||
bitSize == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) {
|
||||
fmt = 'e'
|
||||
}
|
||||
}
|
||||
out = strconv.AppendFloat(out, n, fmt, -1, bitSize)
|
||||
if fmt == 'e' {
|
||||
n := len(out)
|
||||
if n >= 4 && out[n-4] == 'e' && out[n-3] == '-' && out[n-2] == '0' {
|
||||
out[n-2] = out[n-1]
|
||||
out = out[:n-1]
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// WriteInt writes out the given signed integer in JSON number value.
|
||||
func (e *Encoder) WriteInt(n int64) {
|
||||
e.prepareNext(scalar)
|
||||
e.out = strconv.AppendInt(e.out, n, 10)
|
||||
}
|
||||
|
||||
// WriteUint writes out the given unsigned integer in JSON number value.
|
||||
func (e *Encoder) WriteUint(n uint64) {
|
||||
e.prepareNext(scalar)
|
||||
e.out = strconv.AppendUint(e.out, n, 10)
|
||||
}
|
||||
|
||||
// StartObject writes out the '{' symbol.
|
||||
func (e *Encoder) StartObject() {
|
||||
e.prepareNext(objectOpen)
|
||||
e.out = append(e.out, '{')
|
||||
}
|
||||
|
||||
// EndObject writes out the '}' symbol.
|
||||
func (e *Encoder) EndObject() {
|
||||
e.prepareNext(objectClose)
|
||||
e.out = append(e.out, '}')
|
||||
}
|
||||
|
||||
// WriteName writes out the given string in JSON string value and the name
|
||||
// separator ':'. Returns error if input string contains invalid UTF-8, which
|
||||
// should not be likely as protobuf field names should be valid.
|
||||
func (e *Encoder) WriteName(s string) error {
|
||||
e.prepareNext(name)
|
||||
var err error
|
||||
// Append to output regardless of error.
|
||||
e.out, err = appendString(e.out, s)
|
||||
e.out = append(e.out, ':')
|
||||
return err
|
||||
}
|
||||
|
||||
// StartArray writes out the '[' symbol.
|
||||
func (e *Encoder) StartArray() {
|
||||
e.prepareNext(arrayOpen)
|
||||
e.out = append(e.out, '[')
|
||||
}
|
||||
|
||||
// EndArray writes out the ']' symbol.
|
||||
func (e *Encoder) EndArray() {
|
||||
e.prepareNext(arrayClose)
|
||||
e.out = append(e.out, ']')
|
||||
}
|
||||
|
||||
// prepareNext adds possible comma and indentation for the next value based
|
||||
// on last type and indent option. It also updates lastKind to next.
|
||||
func (e *Encoder) prepareNext(next kind) {
|
||||
defer func() {
|
||||
// Set lastKind to next.
|
||||
e.lastKind = next
|
||||
}()
|
||||
|
||||
if len(e.indent) == 0 {
|
||||
// Need to add comma on the following condition.
|
||||
if e.lastKind&(scalar|objectClose|arrayClose) != 0 &&
|
||||
next&(name|scalar|objectOpen|arrayOpen) != 0 {
|
||||
e.out = append(e.out, ',')
|
||||
// For single-line output, add a random extra space after each
|
||||
// comma to make output unstable.
|
||||
if detrand.Bool() {
|
||||
e.out = append(e.out, ' ')
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
switch {
|
||||
case e.lastKind&(objectOpen|arrayOpen) != 0:
|
||||
// If next type is NOT closing, add indent and newline.
|
||||
if next&(objectClose|arrayClose) == 0 {
|
||||
e.indents = append(e.indents, e.indent...)
|
||||
e.out = append(e.out, '\n')
|
||||
e.out = append(e.out, e.indents...)
|
||||
}
|
||||
|
||||
case e.lastKind&(scalar|objectClose|arrayClose) != 0:
|
||||
switch {
|
||||
// If next type is either a value or name, add comma and newline.
|
||||
case next&(name|scalar|objectOpen|arrayOpen) != 0:
|
||||
e.out = append(e.out, ',', '\n')
|
||||
|
||||
// If next type is a closing object or array, adjust indentation.
|
||||
case next&(objectClose|arrayClose) != 0:
|
||||
e.indents = e.indents[:len(e.indents)-len(e.indent)]
|
||||
e.out = append(e.out, '\n')
|
||||
}
|
||||
e.out = append(e.out, e.indents...)
|
||||
|
||||
case e.lastKind&name != 0:
|
||||
e.out = append(e.out, ' ')
|
||||
// For multi-line output, add a random extra space after key: to make
|
||||
// output unstable.
|
||||
if detrand.Bool() {
|
||||
e.out = append(e.out, ' ')
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1011,7 +1011,7 @@ github.com/waku-org/go-discover/discover/v5wire
|
|||
github.com/waku-org/go-libp2p-rendezvous
|
||||
github.com/waku-org/go-libp2p-rendezvous/db
|
||||
github.com/waku-org/go-libp2p-rendezvous/pb
|
||||
# github.com/waku-org/go-waku v0.8.1-0.20231103161423-351dd55a1498
|
||||
# github.com/waku-org/go-waku v0.8.1-0.20231201063231-bdd5d02a91a3
|
||||
## explicit; go 1.19
|
||||
github.com/waku-org/go-waku/logging
|
||||
github.com/waku-org/go-waku/waku/persistence
|
||||
|
@ -1042,11 +1042,13 @@ github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager
|
|||
github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/dynamic
|
||||
github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/static
|
||||
github.com/waku-org/go-waku/waku/v2/protocol/rln/keystore
|
||||
github.com/waku-org/go-waku/waku/v2/protocol/rln/pb
|
||||
github.com/waku-org/go-waku/waku/v2/protocol/rln/web3
|
||||
github.com/waku-org/go-waku/waku/v2/protocol/store
|
||||
github.com/waku-org/go-waku/waku/v2/protocol/store/pb
|
||||
github.com/waku-org/go-waku/waku/v2/protocol/subscription
|
||||
github.com/waku-org/go-waku/waku/v2/rendezvous
|
||||
github.com/waku-org/go-waku/waku/v2/service
|
||||
github.com/waku-org/go-waku/waku/v2/timesource
|
||||
github.com/waku-org/go-waku/waku/v2/utils
|
||||
# github.com/waku-org/go-zerokit-rln v0.1.14-0.20230916173259-d284a3d8f2fd
|
||||
|
@ -1296,12 +1298,14 @@ golang.org/x/xerrors/internal
|
|||
google.golang.org/protobuf/cmd/protoc-gen-go
|
||||
google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo
|
||||
google.golang.org/protobuf/compiler/protogen
|
||||
google.golang.org/protobuf/encoding/protojson
|
||||
google.golang.org/protobuf/encoding/prototext
|
||||
google.golang.org/protobuf/encoding/protowire
|
||||
google.golang.org/protobuf/internal/descfmt
|
||||
google.golang.org/protobuf/internal/descopts
|
||||
google.golang.org/protobuf/internal/detrand
|
||||
google.golang.org/protobuf/internal/encoding/defval
|
||||
google.golang.org/protobuf/internal/encoding/json
|
||||
google.golang.org/protobuf/internal/encoding/messageset
|
||||
google.golang.org/protobuf/internal/encoding/tag
|
||||
google.golang.org/protobuf/internal/encoding/text
|
||||
|
|
|
@ -35,6 +35,8 @@ import (
|
|||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// List of errors
|
||||
|
@ -246,11 +248,11 @@ func (api *PublicWakuAPI) Post(ctx context.Context, req NewMessage) (hexutil.Byt
|
|||
|
||||
wakuMsg := &pb.WakuMessage{
|
||||
Payload: payload,
|
||||
Version: version,
|
||||
Version: &version,
|
||||
ContentTopic: req.ContentTopic.ContentTopic(),
|
||||
Timestamp: api.w.timestamp(),
|
||||
Timestamp: proto.Int64(api.w.timestamp()),
|
||||
Meta: []byte{}, // TODO: empty for now. Once we use Waku Archive v2, we should deprecate the timestamp and use an ULID here
|
||||
Ephemeral: req.Ephemeral,
|
||||
Ephemeral: &req.Ephemeral,
|
||||
}
|
||||
|
||||
hash, err := api.w.Send(req.PubsubTopic, wakuMsg)
|
||||
|
|
|
@ -166,7 +166,7 @@ func NewReceivedMessage(env *protocol.Envelope, msgType MessageType) *ReceivedMe
|
|||
return &ReceivedMessage{
|
||||
Envelope: env,
|
||||
MsgType: msgType,
|
||||
Sent: uint32(env.Message().Timestamp / int64(time.Second)),
|
||||
Sent: uint32(env.Message().GetTimestamp() / int64(time.Second)),
|
||||
ContentTopic: ct,
|
||||
PubsubTopic: env.PubsubTopic(),
|
||||
}
|
||||
|
@ -240,7 +240,7 @@ func (msg *ReceivedMessage) Open(watcher *Filter) (result *ReceivedMessage) {
|
|||
result.Signature = raw.Signature
|
||||
result.Src = raw.PubKey
|
||||
|
||||
result.Sent = uint32(msg.Envelope.Message().Timestamp / int64(time.Second))
|
||||
result.Sent = uint32(msg.Envelope.Message().GetTimestamp() / int64(time.Second))
|
||||
|
||||
ct, err := ExtractTopicFromContentTopic(msg.Envelope.Message().ContentTopic)
|
||||
if err != nil {
|
||||
|
|
|
@ -99,11 +99,11 @@ func (d *DBStore) Validate(env *protocol.Envelope) error {
|
|||
lowerBound := n.Add(-MaxTimeVariance)
|
||||
|
||||
// Ensure that messages don't "jump" to the front of the queue with future timestamps
|
||||
if env.Message().Timestamp > upperBound.UnixNano() {
|
||||
if env.Message().GetTimestamp() > upperBound.UnixNano() {
|
||||
return ErrFutureMessage
|
||||
}
|
||||
|
||||
if env.Message().Timestamp < lowerBound.UnixNano() {
|
||||
if env.Message().GetTimestamp() < lowerBound.UnixNano() {
|
||||
return ErrMessageTooOld
|
||||
}
|
||||
|
||||
|
@ -255,21 +255,21 @@ func (d *DBStore) Query(query *storepb.HistoryQuery) (*storepb.Index, []gowakuPe
|
|||
}
|
||||
}
|
||||
|
||||
if query.StartTime != 0 {
|
||||
if query.GetStartTime() != 0 {
|
||||
if !usesCursor || query.PagingInfo.Direction == storepb.PagingInfo_BACKWARD {
|
||||
paramCnt++
|
||||
conditions = append(conditions, fmt.Sprintf("id >= $%d", paramCnt))
|
||||
startTimeDBKey := gowakuPersistence.NewDBKey(uint64(query.StartTime), uint64(query.StartTime), "", []byte{})
|
||||
startTimeDBKey := gowakuPersistence.NewDBKey(uint64(query.GetStartTime()), uint64(query.GetStartTime()), "", []byte{})
|
||||
parameters = append(parameters, startTimeDBKey.Bytes())
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if query.EndTime != 0 {
|
||||
if query.GetEndTime() != 0 {
|
||||
if !usesCursor || query.PagingInfo.Direction == storepb.PagingInfo_FORWARD {
|
||||
paramCnt++
|
||||
conditions = append(conditions, fmt.Sprintf("id <= $%d", paramCnt))
|
||||
endTimeDBKey := gowakuPersistence.NewDBKey(uint64(query.EndTime), uint64(query.EndTime), "", []byte{})
|
||||
endTimeDBKey := gowakuPersistence.NewDBKey(uint64(query.GetEndTime()), uint64(query.GetEndTime()), "", []byte{})
|
||||
parameters = append(parameters, endTimeDBKey.Bytes())
|
||||
}
|
||||
}
|
||||
|
@ -407,8 +407,8 @@ func (d *DBStore) GetStoredMessage(row *sql.Rows) (gowakuPersistence.StoredMessa
|
|||
msg := new(pb.WakuMessage)
|
||||
msg.ContentTopic = contentTopic
|
||||
msg.Payload = payload
|
||||
msg.Timestamp = senderTimestamp
|
||||
msg.Version = version
|
||||
msg.Timestamp = &senderTimestamp
|
||||
msg.Version = &version
|
||||
|
||||
record := gowakuPersistence.StoredMessage{
|
||||
ID: id,
|
||||
|
|
|
@ -35,6 +35,7 @@ import (
|
|||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/p2p/protocol/identify"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
|
@ -1024,7 +1025,7 @@ func (w *Waku) broadcast() {
|
|||
case envelope := <-w.sendQueue:
|
||||
pubsubTopic := envelope.PubsubTopic()
|
||||
var err error
|
||||
logger := w.logger.With(zap.String("envelopeHash", hexutil.Encode(envelope.Hash())), zap.String("pubsubTopic", pubsubTopic), zap.String("contentTopic", envelope.Message().ContentTopic), zap.Int64("timestamp", envelope.Message().Timestamp))
|
||||
logger := w.logger.With(zap.String("envelopeHash", hexutil.Encode(envelope.Hash())), zap.String("pubsubTopic", pubsubTopic), zap.String("contentTopic", envelope.Message().ContentTopic), zap.Int64("timestamp", envelope.Message().GetTimestamp()))
|
||||
// For now only used in testing to simulate going offline
|
||||
if w.settings.SkipPublishToTopic {
|
||||
err = errors.New("Test send failure")
|
||||
|
@ -1077,7 +1078,7 @@ func (w *Waku) Send(pubsubTopic string, msg *pb.WakuMessage) ([]byte, error) {
|
|||
}
|
||||
}
|
||||
|
||||
envelope := protocol.NewEnvelope(msg, msg.Timestamp, pubsubTopic)
|
||||
envelope := protocol.NewEnvelope(msg, msg.GetTimestamp(), pubsubTopic)
|
||||
|
||||
w.sendQueue <- envelope
|
||||
|
||||
|
@ -1102,10 +1103,10 @@ func (w *Waku) query(ctx context.Context, peerID peer.ID, pubsubTopic string, to
|
|||
opts = append(opts, store.WithPeer(peerID))
|
||||
|
||||
query := store.Query{
|
||||
StartTime: int64(from) * int64(time.Second),
|
||||
EndTime: int64(to) * int64(time.Second),
|
||||
StartTime: proto.Int64(int64(from) * int64(time.Second)),
|
||||
EndTime: proto.Int64(int64(to) * int64(time.Second)),
|
||||
ContentTopics: strTopics,
|
||||
Topic: pubsubTopic,
|
||||
PubsubTopic: pubsubTopic,
|
||||
}
|
||||
|
||||
return w.node.Store().Query(ctx, query, opts...)
|
||||
|
@ -1129,7 +1130,7 @@ func (w *Waku) Query(ctx context.Context, peerID peer.ID, pubsubTopic string, to
|
|||
// See https://github.com/vacp2p/rfc/issues/563
|
||||
msg.RateLimitProof = nil
|
||||
|
||||
envelope := protocol.NewEnvelope(msg, msg.Timestamp, pubsubTopic)
|
||||
envelope := protocol.NewEnvelope(msg, msg.GetTimestamp(), pubsubTopic)
|
||||
w.logger.Info("received waku2 store message", zap.Any("envelopeHash", hexutil.Encode(envelope.Hash())), zap.String("pubsubTopic", pubsubTopic))
|
||||
err = w.OnNewEnvelopes(envelope, common.StoreMessageType)
|
||||
if err != nil {
|
||||
|
@ -1335,7 +1336,7 @@ func (w *Waku) OnNewEnvelopes(envelope *protocol.Envelope, msgType common.Messag
|
|||
logger := w.logger.With(
|
||||
zap.String("envelopeHash", hexutil.Encode(envelope.Hash())),
|
||||
zap.String("contentTopic", envelope.Message().ContentTopic),
|
||||
zap.Int64("timestamp", envelope.Message().Timestamp),
|
||||
zap.Int64("timestamp", envelope.Message().GetTimestamp()),
|
||||
)
|
||||
|
||||
logger.Debug("received new envelope")
|
||||
|
@ -1413,7 +1414,7 @@ func (w *Waku) processQueue() {
|
|||
zap.String("envelopeHash", hexutil.Encode(e.Envelope.Hash())),
|
||||
zap.String("pubsubTopic", e.PubsubTopic),
|
||||
zap.String("contentTopic", e.ContentTopic.ContentTopic()),
|
||||
zap.Int64("timestamp", e.Envelope.Message().Timestamp),
|
||||
zap.Int64("timestamp", e.Envelope.Message().GetTimestamp()),
|
||||
)
|
||||
if e.MsgType == common.StoreMessageType {
|
||||
// We need to insert it first, and then remove it if not matched,
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff/v3"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"golang.org/x/exp/maps"
|
||||
|
@ -158,8 +159,8 @@ func TestBasicWakuV2(t *testing.T) {
|
|||
_, err = w.Send(relay.DefaultWakuTopic, &pb.WakuMessage{
|
||||
Payload: []byte{1, 2, 3, 4, 5},
|
||||
ContentTopic: contentTopic.ContentTopic(),
|
||||
Version: 0,
|
||||
Timestamp: msgTimestamp,
|
||||
Version: proto.Uint32(0),
|
||||
Timestamp: &msgTimestamp,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -240,8 +241,8 @@ func TestWakuV2Filter(t *testing.T) {
|
|||
_, err = w.Send("", &pb.WakuMessage{
|
||||
Payload: []byte{1, 2, 3, 4, 5},
|
||||
ContentTopic: contentTopic.ContentTopic(),
|
||||
Version: 0,
|
||||
Timestamp: msgTimestamp,
|
||||
Version: proto.Uint32(0),
|
||||
Timestamp: &msgTimestamp,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -354,8 +355,8 @@ func TestWakuV2Store(t *testing.T) {
|
|||
_, err = w1.Send(relay.DefaultWakuTopic, &pb.WakuMessage{
|
||||
Payload: []byte{1, 2, 3, 4, 5},
|
||||
ContentTopic: contentTopic.ContentTopic(),
|
||||
Version: 0,
|
||||
Timestamp: msgTimestamp,
|
||||
Version: proto.Uint32(0),
|
||||
Timestamp: &msgTimestamp,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
Loading…
Reference in New Issue