mirror of https://github.com/status-im/go-waku.git
feat: improve metrics for filter, lightpush, dns discovery and store protocols
This commit is contained in:
parent
6550565afc
commit
68b615a87e
|
@ -51,11 +51,24 @@ func NewMetricsServer(address string, port int, log *zap.Logger) *Server {
|
||||||
// Register the views
|
// Register the views
|
||||||
if err := view.Register(
|
if err := view.Register(
|
||||||
metrics.MessageView,
|
metrics.MessageView,
|
||||||
metrics.StoreMessagesView,
|
metrics.LegacyFilterErrorTypesView,
|
||||||
|
metrics.LegacyFilterMessagesView,
|
||||||
|
metrics.LegacyFilterSubscribersView,
|
||||||
|
metrics.LegacyFilterSubscriptionsView,
|
||||||
metrics.FilterSubscriptionsView,
|
metrics.FilterSubscriptionsView,
|
||||||
|
metrics.FilterErrorTypesView,
|
||||||
|
metrics.FilterHandleMessageDurationView,
|
||||||
|
metrics.FilterMessagesView,
|
||||||
|
metrics.FilterRequestDurationView,
|
||||||
|
metrics.FilterRequestsView,
|
||||||
metrics.StoreErrorTypesView,
|
metrics.StoreErrorTypesView,
|
||||||
|
metrics.LightpushMessagesView,
|
||||||
metrics.LightpushErrorTypesView,
|
metrics.LightpushErrorTypesView,
|
||||||
|
metrics.DnsDiscoveryNodesView,
|
||||||
|
metrics.DnsDiscoveryErrorTypesView,
|
||||||
metrics.StoreMessagesView,
|
metrics.StoreMessagesView,
|
||||||
|
metrics.StoreErrorTypesView,
|
||||||
|
metrics.StoreQueriesView,
|
||||||
metrics.PeersView,
|
metrics.PeersView,
|
||||||
metrics.DialsView,
|
metrics.DialsView,
|
||||||
metrics.VersionView,
|
metrics.VersionView,
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||||
"github.com/libp2p/go-libp2p/core/peer"
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
"github.com/waku-org/go-waku/waku/v2/metrics"
|
||||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||||
|
|
||||||
ma "github.com/multiformats/go-multiaddr"
|
ma "github.com/multiformats/go-multiaddr"
|
||||||
|
@ -46,12 +47,14 @@ func RetrieveNodes(ctx context.Context, url string, opts ...DnsDiscoveryOption)
|
||||||
|
|
||||||
tree, err := client.SyncTree(url)
|
tree, err := client.SyncTree(url)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordDnsDiscoveryError(ctx, "tree_sync_failure")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, node := range tree.Nodes() {
|
for _, node := range tree.Nodes() {
|
||||||
peerID, m, err := utils.Multiaddress(node)
|
peerID, m, err := utils.Multiaddress(node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordDnsDiscoveryError(ctx, "peer_info_failure")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3,6 +3,7 @@ package metrics
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||||
"go.opencensus.io/stats"
|
"go.opencensus.io/stats"
|
||||||
|
@ -12,16 +13,34 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
WakuVersion = stats.Int64("waku_version", "", stats.UnitDimensionless)
|
WakuVersion = stats.Int64("waku_version", "", stats.UnitDimensionless)
|
||||||
Messages = stats.Int64("node_messages", "Number of messages received", stats.UnitDimensionless)
|
Messages = stats.Int64("node_messages", "Number of messages received", stats.UnitDimensionless)
|
||||||
Peers = stats.Int64("peers", "Number of connected peers", stats.UnitDimensionless)
|
Peers = stats.Int64("peers", "Number of connected peers", stats.UnitDimensionless)
|
||||||
Dials = stats.Int64("dials", "Number of peer dials", stats.UnitDimensionless)
|
Dials = stats.Int64("dials", "Number of peer dials", stats.UnitDimensionless)
|
||||||
StoreMessages = stats.Int64("store_messages", "Number of historical messages", stats.UnitDimensionless)
|
|
||||||
FilterSubscriptions = stats.Int64("filter_subscriptions", "Number of filter subscriptions", stats.UnitDimensionless)
|
LegacyFilterMessages = stats.Int64("legacy_filter_messages", "Number of legacy filter messages", stats.UnitDimensionless)
|
||||||
StoreErrors = stats.Int64("errors", "Number of errors in store protocol", stats.UnitDimensionless)
|
LegacyFilterSubscribers = stats.Int64("legacy_filter_subscribers", "Number of legacy filter subscribers", stats.UnitDimensionless)
|
||||||
StoreQueries = stats.Int64("store_queries", "Number of store queries", stats.UnitDimensionless)
|
LegacyFilterSubscriptions = stats.Int64("legacy_filter_subscriptions", "Number of legacy filter subscriptions", stats.UnitDimensionless)
|
||||||
LightpushErrors = stats.Int64("errors", "Number of errors in lightpush protocol", stats.UnitDimensionless)
|
LegacyFilterErrors = stats.Int64("legacy_filter_errors", "Number of errors in legacy filter protocol", stats.UnitDimensionless)
|
||||||
PeerExchangeError = stats.Int64("errors", "Number of errors in peer exchange protocol", stats.UnitDimensionless)
|
|
||||||
|
FilterMessages = stats.Int64("filter_messages", "Number of filter messages", stats.UnitDimensionless)
|
||||||
|
FilterRequests = stats.Int64("filter_requests", "Number of filter requests", stats.UnitDimensionless)
|
||||||
|
FilterSubscriptions = stats.Int64("filter_subscriptions", "Number of filter subscriptions", stats.UnitDimensionless)
|
||||||
|
FilterErrors = stats.Int64("filter_errors", "Number of errors in filter protocol", stats.UnitDimensionless)
|
||||||
|
FilterRequestDurationSeconds = stats.Int64("filter_request_duration_seconds", "Duration of Filter Subscribe Requests", stats.UnitSeconds)
|
||||||
|
FilterHandleMessageDurationSeconds = stats.Int64("filter_handle_msessageduration_seconds", "Duration to Push Message to Filter Subscribers", stats.UnitSeconds)
|
||||||
|
|
||||||
|
StoredMessages = stats.Int64("store_messages", "Number of historical messages", stats.UnitDimensionless)
|
||||||
|
StoreErrors = stats.Int64("errors", "Number of errors in store protocol", stats.UnitDimensionless)
|
||||||
|
StoreQueries = stats.Int64("store_queries", "Number of store queries", stats.UnitDimensionless)
|
||||||
|
|
||||||
|
LightpushMessages = stats.Int64("lightpush_messages", "Number of messages sent via lightpush protocol", stats.UnitDimensionless)
|
||||||
|
LightpushErrors = stats.Int64("errors", "Number of errors in lightpush protocol", stats.UnitDimensionless)
|
||||||
|
|
||||||
|
PeerExchangeError = stats.Int64("errors", "Number of errors in peer exchange protocol", stats.UnitDimensionless)
|
||||||
|
|
||||||
|
DnsDiscoveryNodes = stats.Int64("dnsdisc_nodes", "Number of discovered nodes", stats.UnitDimensionless)
|
||||||
|
DnsDiscoveryErrors = stats.Int64("errors", "Number of errors in dns discovery", stats.UnitDimensionless)
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -49,6 +68,7 @@ var (
|
||||||
Description: "The number of the messages received",
|
Description: "The number of the messages received",
|
||||||
Aggregation: view.Count(),
|
Aggregation: view.Count(),
|
||||||
}
|
}
|
||||||
|
|
||||||
StoreQueriesView = &view.View{
|
StoreQueriesView = &view.View{
|
||||||
Name: "gowaku_store_queries",
|
Name: "gowaku_store_queries",
|
||||||
Measure: StoreQueries,
|
Measure: StoreQueries,
|
||||||
|
@ -57,17 +77,11 @@ var (
|
||||||
}
|
}
|
||||||
StoreMessagesView = &view.View{
|
StoreMessagesView = &view.View{
|
||||||
Name: "gowaku_store_messages",
|
Name: "gowaku_store_messages",
|
||||||
Measure: StoreMessages,
|
Measure: StoredMessages,
|
||||||
Description: "The distribution of the store protocol messages",
|
Description: "The distribution of the store protocol messages",
|
||||||
Aggregation: view.LastValue(),
|
Aggregation: view.LastValue(),
|
||||||
TagKeys: []tag.Key{KeyType},
|
TagKeys: []tag.Key{KeyType},
|
||||||
}
|
}
|
||||||
FilterSubscriptionsView = &view.View{
|
|
||||||
Name: "gowaku_filter_subscriptions",
|
|
||||||
Measure: FilterSubscriptions,
|
|
||||||
Description: "The number of content filter subscriptions",
|
|
||||||
Aggregation: view.LastValue(),
|
|
||||||
}
|
|
||||||
StoreErrorTypesView = &view.View{
|
StoreErrorTypesView = &view.View{
|
||||||
Name: "gowaku_store_errors",
|
Name: "gowaku_store_errors",
|
||||||
Measure: StoreErrors,
|
Measure: StoreErrors,
|
||||||
|
@ -75,6 +89,82 @@ var (
|
||||||
Aggregation: view.Count(),
|
Aggregation: view.Count(),
|
||||||
TagKeys: []tag.Key{ErrorType},
|
TagKeys: []tag.Key{ErrorType},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
LegacyFilterSubscriptionsView = &view.View{
|
||||||
|
Name: "gowaku_legacy_filter_subscriptions",
|
||||||
|
Measure: LegacyFilterSubscriptions,
|
||||||
|
Description: "The number of legacy filter subscriptions",
|
||||||
|
Aggregation: view.Count(),
|
||||||
|
}
|
||||||
|
LegacyFilterSubscribersView = &view.View{
|
||||||
|
Name: "gowaku_legacy_filter_subscribers",
|
||||||
|
Measure: LegacyFilterSubscribers,
|
||||||
|
Description: "The number of legacy filter subscribers",
|
||||||
|
Aggregation: view.LastValue(),
|
||||||
|
}
|
||||||
|
LegacyFilterMessagesView = &view.View{
|
||||||
|
Name: "gowaku_legacy_filter_messages",
|
||||||
|
Measure: LegacyFilterMessages,
|
||||||
|
Description: "The distribution of the legacy filter protocol messages received",
|
||||||
|
Aggregation: view.Count(),
|
||||||
|
TagKeys: []tag.Key{KeyType},
|
||||||
|
}
|
||||||
|
LegacyFilterErrorTypesView = &view.View{
|
||||||
|
Name: "gowaku_legacy_filter_errors",
|
||||||
|
Measure: LegacyFilterErrors,
|
||||||
|
Description: "The distribution of the legacy filter protocol errors",
|
||||||
|
Aggregation: view.Count(),
|
||||||
|
TagKeys: []tag.Key{ErrorType},
|
||||||
|
}
|
||||||
|
|
||||||
|
FilterSubscriptionsView = &view.View{
|
||||||
|
Name: "gowaku_filter_subscriptions",
|
||||||
|
Measure: FilterSubscriptions,
|
||||||
|
Description: "The number of filter subscriptions",
|
||||||
|
Aggregation: view.Count(),
|
||||||
|
}
|
||||||
|
FilterRequestsView = &view.View{
|
||||||
|
Name: "gowaku_filter_requests",
|
||||||
|
Measure: FilterRequests,
|
||||||
|
Description: "The number of filter requests",
|
||||||
|
Aggregation: view.Count(),
|
||||||
|
}
|
||||||
|
FilterMessagesView = &view.View{
|
||||||
|
Name: "gowaku_filter_messages",
|
||||||
|
Measure: FilterMessages,
|
||||||
|
Description: "The distribution of the filter protocol messages received",
|
||||||
|
Aggregation: view.Count(),
|
||||||
|
TagKeys: []tag.Key{KeyType},
|
||||||
|
}
|
||||||
|
FilterErrorTypesView = &view.View{
|
||||||
|
Name: "gowaku_filter_errors",
|
||||||
|
Measure: FilterErrors,
|
||||||
|
Description: "The distribution of the filter protocol errors",
|
||||||
|
Aggregation: view.Count(),
|
||||||
|
TagKeys: []tag.Key{ErrorType},
|
||||||
|
}
|
||||||
|
FilterRequestDurationView = &view.View{
|
||||||
|
Name: "gowaku_filter_request_duration_seconds",
|
||||||
|
Measure: FilterRequestDurationSeconds,
|
||||||
|
Description: "Duration of Filter Subscribe Requests",
|
||||||
|
Aggregation: view.Count(),
|
||||||
|
TagKeys: []tag.Key{ErrorType},
|
||||||
|
}
|
||||||
|
FilterHandleMessageDurationView = &view.View{
|
||||||
|
Name: "gowaku_filter_handle_msessageduration_seconds",
|
||||||
|
Measure: FilterHandleMessageDurationSeconds,
|
||||||
|
Description: "Duration to Push Message to Filter Subscribers",
|
||||||
|
Aggregation: view.Count(),
|
||||||
|
TagKeys: []tag.Key{ErrorType},
|
||||||
|
}
|
||||||
|
|
||||||
|
LightpushMessagesView = &view.View{
|
||||||
|
Name: "gowaku_lightpush_messages",
|
||||||
|
Measure: StoredMessages,
|
||||||
|
Description: "The distribution of the lightpush protocol messages",
|
||||||
|
Aggregation: view.LastValue(),
|
||||||
|
TagKeys: []tag.Key{KeyType},
|
||||||
|
}
|
||||||
LightpushErrorTypesView = &view.View{
|
LightpushErrorTypesView = &view.View{
|
||||||
Name: "gowaku_lightpush_errors",
|
Name: "gowaku_lightpush_errors",
|
||||||
Measure: LightpushErrors,
|
Measure: LightpushErrors,
|
||||||
|
@ -89,6 +179,19 @@ var (
|
||||||
Aggregation: view.LastValue(),
|
Aggregation: view.LastValue(),
|
||||||
TagKeys: []tag.Key{GitVersion},
|
TagKeys: []tag.Key{GitVersion},
|
||||||
}
|
}
|
||||||
|
DnsDiscoveryNodesView = &view.View{
|
||||||
|
Name: "gowaku_dnsdisc_discovered",
|
||||||
|
Measure: DnsDiscoveryNodes,
|
||||||
|
Description: "The number of nodes discovered via DNS discovery",
|
||||||
|
Aggregation: view.Count(),
|
||||||
|
}
|
||||||
|
DnsDiscoveryErrorTypesView = &view.View{
|
||||||
|
Name: "gowaku_dnsdisc_errors",
|
||||||
|
Measure: DnsDiscoveryErrors,
|
||||||
|
Description: "The distribution of the dns discovery protocol errors",
|
||||||
|
Aggregation: view.Count(),
|
||||||
|
TagKeys: []tag.Key{ErrorType},
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
func recordWithTags(ctx context.Context, tagKey tag.Key, tagType string, ms stats.Measurement) {
|
func recordWithTags(ctx context.Context, tagKey tag.Key, tagType string, ms stats.Measurement) {
|
||||||
|
@ -97,16 +200,53 @@ func recordWithTags(ctx context.Context, tagKey tag.Key, tagType string, ms stat
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func RecordLightpushMessage(ctx context.Context, tagType string) {
|
||||||
|
if err := stats.RecordWithTags(ctx, []tag.Mutator{tag.Insert(KeyType, tagType)}, LightpushMessages.M(1)); err != nil {
|
||||||
|
utils.Logger().Error("failed to record with tags", zap.Error(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func RecordLightpushError(ctx context.Context, tagType string) {
|
func RecordLightpushError(ctx context.Context, tagType string) {
|
||||||
recordWithTags(ctx, ErrorType, tagType, LightpushErrors.M(1))
|
recordWithTags(ctx, ErrorType, tagType, LightpushErrors.M(1))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func RecordLegacyFilterError(ctx context.Context, tagType string) {
|
||||||
|
recordWithTags(ctx, ErrorType, tagType, LegacyFilterErrors.M(1))
|
||||||
|
}
|
||||||
|
|
||||||
|
func RecordFilterError(ctx context.Context, tagType string) {
|
||||||
|
recordWithTags(ctx, ErrorType, tagType, FilterErrors.M(1))
|
||||||
|
}
|
||||||
|
|
||||||
|
func RecordFilterRequest(ctx context.Context, tagType string, duration time.Duration) {
|
||||||
|
if err := stats.RecordWithTags(ctx, []tag.Mutator{tag.Insert(KeyType, tagType)}, FilterRequests.M(1)); err != nil {
|
||||||
|
utils.Logger().Error("failed to record with tags", zap.Error(err))
|
||||||
|
}
|
||||||
|
FilterRequestDurationSeconds.M(int64(duration.Seconds()))
|
||||||
|
}
|
||||||
|
|
||||||
|
func RecordFilterMessage(ctx context.Context, tagType string, len int) {
|
||||||
|
if err := stats.RecordWithTags(ctx, []tag.Mutator{tag.Insert(KeyType, tagType)}, FilterMessages.M(int64(len))); err != nil {
|
||||||
|
utils.Logger().Error("failed to record with tags", zap.Error(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func RecordLegacyFilterMessage(ctx context.Context, tagType string, len int) {
|
||||||
|
if err := stats.RecordWithTags(ctx, []tag.Mutator{tag.Insert(KeyType, tagType)}, LegacyFilterMessages.M(int64(len))); err != nil {
|
||||||
|
utils.Logger().Error("failed to record with tags", zap.Error(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func RecordPeerExchangeError(ctx context.Context, tagType string) {
|
func RecordPeerExchangeError(ctx context.Context, tagType string) {
|
||||||
recordWithTags(ctx, ErrorType, tagType, PeerExchangeError.M(1))
|
recordWithTags(ctx, ErrorType, tagType, PeerExchangeError.M(1))
|
||||||
}
|
}
|
||||||
|
|
||||||
func RecordMessage(ctx context.Context, tagType string, len int) {
|
func RecordDnsDiscoveryError(ctx context.Context, tagType string) {
|
||||||
if err := stats.RecordWithTags(ctx, []tag.Mutator{tag.Insert(KeyType, tagType)}, StoreMessages.M(int64(len))); err != nil {
|
recordWithTags(ctx, ErrorType, tagType, DnsDiscoveryErrors.M(1))
|
||||||
|
}
|
||||||
|
|
||||||
|
func RecordStoreMessage(ctx context.Context, tagType string, len int) {
|
||||||
|
if err := stats.RecordWithTags(ctx, []tag.Mutator{tag.Insert(KeyType, tagType)}, StoredMessages.M(int64(len))); err != nil {
|
||||||
utils.Logger().Error("failed to record with tags", zap.Error(err))
|
utils.Logger().Error("failed to record with tags", zap.Error(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -119,9 +119,12 @@ func (wf *WakuFilterLightnode) onRequest(ctx context.Context) func(s network.Str
|
||||||
err := reader.ReadMsg(messagePush)
|
err := reader.ReadMsg(messagePush)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("reading message push", zap.Error(err))
|
logger.Error("reading message push", zap.Error(err))
|
||||||
|
metrics.RecordFilterError(ctx, "decode_rpc_failure")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
metrics.RecordFilterMessage(ctx, "PushMessage", 1)
|
||||||
|
|
||||||
wf.notify(s.Conn().RemotePeer(), messagePush.PubsubTopic, messagePush.WakuMessage)
|
wf.notify(s.Conn().RemotePeer(), messagePush.PubsubTopic, messagePush.WakuMessage)
|
||||||
|
|
||||||
logger.Info("received message push")
|
logger.Info("received message push")
|
||||||
|
@ -141,12 +144,14 @@ func (wf *WakuFilterLightnode) notify(remotePeerID peer.ID, pubsubTopic string,
|
||||||
func (wf *WakuFilterLightnode) request(ctx context.Context, params *FilterSubscribeParameters, reqType pb.FilterSubscribeRequest_FilterSubscribeType, contentFilter ContentFilter) error {
|
func (wf *WakuFilterLightnode) request(ctx context.Context, params *FilterSubscribeParameters, reqType pb.FilterSubscribeRequest_FilterSubscribeType, contentFilter ContentFilter) error {
|
||||||
err := wf.h.Connect(ctx, wf.h.Peerstore().PeerInfo(params.selectedPeer))
|
err := wf.h.Connect(ctx, wf.h.Peerstore().PeerInfo(params.selectedPeer))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordFilterError(ctx, "dial_failure")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var conn network.Stream
|
var conn network.Stream
|
||||||
conn, err = wf.h.NewStream(ctx, params.selectedPeer, FilterSubscribeID_v20beta1)
|
conn, err = wf.h.NewStream(ctx, params.selectedPeer, FilterSubscribeID_v20beta1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordFilterError(ctx, "dial_failure")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
|
@ -164,6 +169,7 @@ func (wf *WakuFilterLightnode) request(ctx context.Context, params *FilterSubscr
|
||||||
wf.log.Debug("sending FilterSubscribeRequest", zap.Stringer("request", request))
|
wf.log.Debug("sending FilterSubscribeRequest", zap.Stringer("request", request))
|
||||||
err = writer.WriteMsg(request)
|
err = writer.WriteMsg(request)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordFilterError(ctx, "write_request_failure")
|
||||||
wf.log.Error("sending FilterSubscribeRequest", zap.Error(err))
|
wf.log.Error("sending FilterSubscribeRequest", zap.Error(err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -172,10 +178,19 @@ func (wf *WakuFilterLightnode) request(ctx context.Context, params *FilterSubscr
|
||||||
err = reader.ReadMsg(filterSubscribeResponse)
|
err = reader.ReadMsg(filterSubscribeResponse)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
wf.log.Error("receiving FilterSubscribeResponse", zap.Error(err))
|
wf.log.Error("receiving FilterSubscribeResponse", zap.Error(err))
|
||||||
|
metrics.RecordFilterError(ctx, "decode_rpc_failure")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if filterSubscribeResponse.RequestId != request.RequestId {
|
||||||
|
wf.log.Error("requestId mismatch", zap.String("expected", request.RequestId), zap.String("received", filterSubscribeResponse.RequestId))
|
||||||
|
metrics.RecordFilterError(ctx, "request_id_mismatch")
|
||||||
|
err := NewFilterError(300, "request_id_mismatch")
|
||||||
|
return &err
|
||||||
|
}
|
||||||
|
|
||||||
if filterSubscribeResponse.StatusCode != http.StatusOK {
|
if filterSubscribeResponse.StatusCode != http.StatusOK {
|
||||||
|
metrics.RecordFilterError(ctx, "error_response")
|
||||||
err := NewFilterError(int(filterSubscribeResponse.StatusCode), filterSubscribeResponse.StatusDesc)
|
err := NewFilterError(int(filterSubscribeResponse.StatusCode), filterSubscribeResponse.StatusDesc)
|
||||||
return &err
|
return &err
|
||||||
}
|
}
|
||||||
|
@ -208,6 +223,7 @@ func (wf *WakuFilterLightnode) Subscribe(ctx context.Context, contentFilter Cont
|
||||||
}
|
}
|
||||||
|
|
||||||
if params.selectedPeer == "" {
|
if params.selectedPeer == "" {
|
||||||
|
metrics.RecordFilterError(ctx, "peer_not_found_failure")
|
||||||
return nil, ErrNoPeersAvailable
|
return nil, ErrNoPeersAvailable
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"math"
|
"math"
|
||||||
"net/http"
|
"net/http"
|
||||||
"sync"
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/libp2p/go-libp2p/core/host"
|
"github.com/libp2p/go-libp2p/core/host"
|
||||||
"github.com/libp2p/go-libp2p/core/network"
|
"github.com/libp2p/go-libp2p/core/network"
|
||||||
|
@ -19,6 +20,7 @@ import (
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol/filter/pb"
|
"github.com/waku-org/go-waku/waku/v2/protocol/filter/pb"
|
||||||
"github.com/waku-org/go-waku/waku/v2/timesource"
|
"github.com/waku-org/go-waku/waku/v2/timesource"
|
||||||
|
"go.opencensus.io/stats"
|
||||||
"go.opencensus.io/tag"
|
"go.opencensus.io/tag"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
@ -101,28 +103,33 @@ func (wf *WakuFilterFullNode) onRequest(ctx context.Context) func(s network.Stre
|
||||||
subscribeRequest := &pb.FilterSubscribeRequest{}
|
subscribeRequest := &pb.FilterSubscribeRequest{}
|
||||||
err := reader.ReadMsg(subscribeRequest)
|
err := reader.ReadMsg(subscribeRequest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordFilterError(ctx, "decode_rpc_failure")
|
||||||
logger.Error("reading request", zap.Error(err))
|
logger.Error("reading request", zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
logger = logger.With(zap.String("requestID", subscribeRequest.RequestId))
|
logger = logger.With(zap.String("requestID", subscribeRequest.RequestId))
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
|
|
||||||
switch subscribeRequest.FilterSubscribeType {
|
switch subscribeRequest.FilterSubscribeType {
|
||||||
case pb.FilterSubscribeRequest_SUBSCRIBE:
|
case pb.FilterSubscribeRequest_SUBSCRIBE:
|
||||||
wf.subscribe(s, logger, subscribeRequest)
|
wf.subscribe(ctx, s, logger, subscribeRequest)
|
||||||
case pb.FilterSubscribeRequest_SUBSCRIBER_PING:
|
case pb.FilterSubscribeRequest_SUBSCRIBER_PING:
|
||||||
wf.ping(s, logger, subscribeRequest)
|
wf.ping(ctx, s, logger, subscribeRequest)
|
||||||
case pb.FilterSubscribeRequest_UNSUBSCRIBE:
|
case pb.FilterSubscribeRequest_UNSUBSCRIBE:
|
||||||
wf.unsubscribe(s, logger, subscribeRequest)
|
wf.unsubscribe(ctx, s, logger, subscribeRequest)
|
||||||
case pb.FilterSubscribeRequest_UNSUBSCRIBE_ALL:
|
case pb.FilterSubscribeRequest_UNSUBSCRIBE_ALL:
|
||||||
wf.unsubscribeAll(s, logger, subscribeRequest)
|
wf.unsubscribeAll(ctx, s, logger, subscribeRequest)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
metrics.RecordFilterRequest(ctx, subscribeRequest.FilterSubscribeType.String(), time.Since(start))
|
||||||
|
|
||||||
logger.Info("received request")
|
logger.Info("received request")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func reply(s network.Stream, logger *zap.Logger, request *pb.FilterSubscribeRequest, statusCode int, description ...string) {
|
func reply(ctx context.Context, s network.Stream, logger *zap.Logger, request *pb.FilterSubscribeRequest, statusCode int, description ...string) {
|
||||||
response := &pb.FilterSubscribeResponse{
|
response := &pb.FilterSubscribeResponse{
|
||||||
RequestId: request.RequestId,
|
RequestId: request.RequestId,
|
||||||
StatusCode: uint32(statusCode),
|
StatusCode: uint32(statusCode),
|
||||||
|
@ -137,37 +144,38 @@ func reply(s network.Stream, logger *zap.Logger, request *pb.FilterSubscribeRequ
|
||||||
writer := pbio.NewDelimitedWriter(s)
|
writer := pbio.NewDelimitedWriter(s)
|
||||||
err := writer.WriteMsg(response)
|
err := writer.WriteMsg(response)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordFilterError(ctx, "write_response_failure")
|
||||||
logger.Error("sending response", zap.Error(err))
|
logger.Error("sending response", zap.Error(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (wf *WakuFilterFullNode) ping(s network.Stream, logger *zap.Logger, request *pb.FilterSubscribeRequest) {
|
func (wf *WakuFilterFullNode) ping(ctx context.Context, s network.Stream, logger *zap.Logger, request *pb.FilterSubscribeRequest) {
|
||||||
exists := wf.subscriptions.Has(s.Conn().RemotePeer())
|
exists := wf.subscriptions.Has(s.Conn().RemotePeer())
|
||||||
|
|
||||||
if exists {
|
if exists {
|
||||||
reply(s, logger, request, http.StatusOK)
|
reply(ctx, s, logger, request, http.StatusOK)
|
||||||
} else {
|
} else {
|
||||||
reply(s, logger, request, http.StatusNotFound, peerHasNoSubscription)
|
reply(ctx, s, logger, request, http.StatusNotFound, peerHasNoSubscription)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (wf *WakuFilterFullNode) subscribe(s network.Stream, logger *zap.Logger, request *pb.FilterSubscribeRequest) {
|
func (wf *WakuFilterFullNode) subscribe(ctx context.Context, s network.Stream, logger *zap.Logger, request *pb.FilterSubscribeRequest) {
|
||||||
if request.PubsubTopic == "" {
|
if request.PubsubTopic == "" {
|
||||||
reply(s, logger, request, http.StatusBadRequest, "pubsubtopic can't be empty")
|
reply(ctx, s, logger, request, http.StatusBadRequest, "pubsubtopic can't be empty")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(request.ContentTopics) == 0 {
|
if len(request.ContentTopics) == 0 {
|
||||||
reply(s, logger, request, http.StatusBadRequest, "at least one contenttopic should be specified")
|
reply(ctx, s, logger, request, http.StatusBadRequest, "at least one contenttopic should be specified")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(request.ContentTopics) > MaxContentTopicsPerRequest {
|
if len(request.ContentTopics) > MaxContentTopicsPerRequest {
|
||||||
reply(s, logger, request, http.StatusBadRequest, fmt.Sprintf("exceeds maximum content topics: %d", MaxContentTopicsPerRequest))
|
reply(ctx, s, logger, request, http.StatusBadRequest, fmt.Sprintf("exceeds maximum content topics: %d", MaxContentTopicsPerRequest))
|
||||||
}
|
}
|
||||||
|
|
||||||
if wf.subscriptions.Count() >= wf.maxSubscriptions {
|
if wf.subscriptions.Count() >= wf.maxSubscriptions {
|
||||||
reply(s, logger, request, http.StatusServiceUnavailable, "node has reached maximum number of subscriptions")
|
reply(ctx, s, logger, request, http.StatusServiceUnavailable, "node has reached maximum number of subscriptions")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -180,45 +188,49 @@ func (wf *WakuFilterFullNode) subscribe(s network.Stream, logger *zap.Logger, re
|
||||||
}
|
}
|
||||||
|
|
||||||
if ctTotal+len(request.ContentTopics) > MaxCriteriaPerSubscription {
|
if ctTotal+len(request.ContentTopics) > MaxCriteriaPerSubscription {
|
||||||
reply(s, logger, request, http.StatusServiceUnavailable, "peer has reached maximum number of filter criteria")
|
reply(ctx, s, logger, request, http.StatusServiceUnavailable, "peer has reached maximum number of filter criteria")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
wf.subscriptions.Set(peerID, request.PubsubTopic, request.ContentTopics)
|
wf.subscriptions.Set(peerID, request.PubsubTopic, request.ContentTopics)
|
||||||
|
|
||||||
reply(s, logger, request, http.StatusOK)
|
stats.Record(ctx, metrics.FilterSubscriptions.M(int64(wf.subscriptions.Count())))
|
||||||
|
|
||||||
|
reply(ctx, s, logger, request, http.StatusOK)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (wf *WakuFilterFullNode) unsubscribe(s network.Stream, logger *zap.Logger, request *pb.FilterSubscribeRequest) {
|
func (wf *WakuFilterFullNode) unsubscribe(ctx context.Context, s network.Stream, logger *zap.Logger, request *pb.FilterSubscribeRequest) {
|
||||||
if request.PubsubTopic == "" {
|
if request.PubsubTopic == "" {
|
||||||
reply(s, logger, request, http.StatusBadRequest, "pubsubtopic can't be empty")
|
reply(ctx, s, logger, request, http.StatusBadRequest, "pubsubtopic can't be empty")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(request.ContentTopics) == 0 {
|
if len(request.ContentTopics) == 0 {
|
||||||
reply(s, logger, request, http.StatusBadRequest, "at least one contenttopic should be specified")
|
reply(ctx, s, logger, request, http.StatusBadRequest, "at least one contenttopic should be specified")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(request.ContentTopics) > MaxContentTopicsPerRequest {
|
if len(request.ContentTopics) > MaxContentTopicsPerRequest {
|
||||||
reply(s, logger, request, http.StatusBadRequest, fmt.Sprintf("exceeds maximum content topics: %d", MaxContentTopicsPerRequest))
|
reply(ctx, s, logger, request, http.StatusBadRequest, fmt.Sprintf("exceeds maximum content topics: %d", MaxContentTopicsPerRequest))
|
||||||
}
|
}
|
||||||
|
|
||||||
err := wf.subscriptions.Delete(s.Conn().RemotePeer(), request.PubsubTopic, request.ContentTopics)
|
err := wf.subscriptions.Delete(s.Conn().RemotePeer(), request.PubsubTopic, request.ContentTopics)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
reply(s, logger, request, http.StatusNotFound, peerHasNoSubscription)
|
reply(ctx, s, logger, request, http.StatusNotFound, peerHasNoSubscription)
|
||||||
} else {
|
} else {
|
||||||
reply(s, logger, request, http.StatusOK)
|
stats.Record(ctx, metrics.FilterSubscriptions.M(int64(wf.subscriptions.Count())))
|
||||||
|
reply(ctx, s, logger, request, http.StatusOK)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (wf *WakuFilterFullNode) unsubscribeAll(s network.Stream, logger *zap.Logger, request *pb.FilterSubscribeRequest) {
|
func (wf *WakuFilterFullNode) unsubscribeAll(ctx context.Context, s network.Stream, logger *zap.Logger, request *pb.FilterSubscribeRequest) {
|
||||||
err := wf.subscriptions.DeleteAll(s.Conn().RemotePeer())
|
err := wf.subscriptions.DeleteAll(s.Conn().RemotePeer())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
reply(s, logger, request, http.StatusNotFound, peerHasNoSubscription)
|
reply(ctx, s, logger, request, http.StatusNotFound, peerHasNoSubscription)
|
||||||
} else {
|
} else {
|
||||||
reply(s, logger, request, http.StatusOK)
|
stats.Record(ctx, metrics.FilterSubscriptions.M(int64(wf.subscriptions.Count())))
|
||||||
|
reply(ctx, s, logger, request, http.StatusOK)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -242,10 +254,14 @@ func (wf *WakuFilterFullNode) filterListener(ctx context.Context) {
|
||||||
wf.wg.Add(1)
|
wf.wg.Add(1)
|
||||||
go func(subscriber peer.ID) {
|
go func(subscriber peer.ID) {
|
||||||
defer wf.wg.Done()
|
defer wf.wg.Done()
|
||||||
|
start := time.Now()
|
||||||
err := wf.pushMessage(ctx, subscriber, envelope)
|
err := wf.pushMessage(ctx, subscriber, envelope)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("pushing message", zap.Error(err))
|
logger.Error("pushing message", zap.Error(err))
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
ellapsed := time.Since(start)
|
||||||
|
metrics.FilterHandleMessageDurationSeconds.M(int64(ellapsed.Seconds()))
|
||||||
}(subscriber)
|
}(subscriber)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -274,6 +290,11 @@ func (wf *WakuFilterFullNode) pushMessage(ctx context.Context, peerID peer.ID, e
|
||||||
err := wf.h.Connect(ctx, wf.h.Peerstore().PeerInfo(peerID))
|
err := wf.h.Connect(ctx, wf.h.Peerstore().PeerInfo(peerID))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
wf.subscriptions.FlagAsFailure(peerID)
|
wf.subscriptions.FlagAsFailure(peerID)
|
||||||
|
if errors.Is(context.DeadlineExceeded, err) {
|
||||||
|
metrics.RecordFilterError(ctx, "push_timeout_failure")
|
||||||
|
} else {
|
||||||
|
metrics.RecordFilterError(ctx, "dial_failure")
|
||||||
|
}
|
||||||
logger.Error("connecting to peer", zap.Error(err))
|
logger.Error("connecting to peer", zap.Error(err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -281,9 +302,12 @@ func (wf *WakuFilterFullNode) pushMessage(ctx context.Context, peerID peer.ID, e
|
||||||
conn, err := wf.h.NewStream(ctx, peerID, FilterPushID_v20beta1)
|
conn, err := wf.h.NewStream(ctx, peerID, FilterPushID_v20beta1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
wf.subscriptions.FlagAsFailure(peerID)
|
wf.subscriptions.FlagAsFailure(peerID)
|
||||||
|
if errors.Is(context.DeadlineExceeded, err) {
|
||||||
|
metrics.RecordFilterError(ctx, "push_timeout_failure")
|
||||||
|
} else {
|
||||||
|
metrics.RecordFilterError(ctx, "dial_failure")
|
||||||
|
}
|
||||||
logger.Error("opening peer stream", zap.Error(err))
|
logger.Error("opening peer stream", zap.Error(err))
|
||||||
//waku_filter_errors.inc(labelValues = [dialFailure])
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -291,6 +315,11 @@ func (wf *WakuFilterFullNode) pushMessage(ctx context.Context, peerID peer.ID, e
|
||||||
writer := pbio.NewDelimitedWriter(conn)
|
writer := pbio.NewDelimitedWriter(conn)
|
||||||
err = writer.WriteMsg(messagePush)
|
err = writer.WriteMsg(messagePush)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if errors.Is(context.DeadlineExceeded, err) {
|
||||||
|
metrics.RecordFilterError(ctx, "push_timeout_failure")
|
||||||
|
} else {
|
||||||
|
metrics.RecordFilterError(ctx, "response_write_failure")
|
||||||
|
}
|
||||||
logger.Error("pushing messages to peer", zap.Error(err))
|
logger.Error("pushing messages to peer", zap.Error(err))
|
||||||
wf.subscriptions.FlagAsFailure(peerID)
|
wf.subscriptions.FlagAsFailure(peerID)
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -125,6 +125,7 @@ func (wf *WakuFilter) onRequest(ctx context.Context) func(s network.Stream) {
|
||||||
|
|
||||||
err := reader.ReadMsg(filterRPCRequest)
|
err := reader.ReadMsg(filterRPCRequest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordLegacyFilterError(ctx, "decode_rpc_failure")
|
||||||
logger.Error("reading request", zap.Error(err))
|
logger.Error("reading request", zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -139,7 +140,7 @@ func (wf *WakuFilter) onRequest(ctx context.Context) func(s network.Stream) {
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Info("received a message push", zap.Int("messages", len(filterRPCRequest.Push.Messages)))
|
logger.Info("received a message push", zap.Int("messages", len(filterRPCRequest.Push.Messages)))
|
||||||
stats.Record(ctx, metrics.Messages.M(int64(len(filterRPCRequest.Push.Messages))))
|
metrics.RecordLegacyFilterMessage(ctx, "FilterRequest", len(filterRPCRequest.Push.Messages))
|
||||||
} else if filterRPCRequest.Request != nil && wf.isFullNode {
|
} else if filterRPCRequest.Request != nil && wf.isFullNode {
|
||||||
// We're on a full node.
|
// We're on a full node.
|
||||||
// This is a filter request coming from a light node.
|
// This is a filter request coming from a light node.
|
||||||
|
@ -152,13 +153,13 @@ func (wf *WakuFilter) onRequest(ctx context.Context) func(s network.Stream) {
|
||||||
len := wf.subscribers.Append(subscriber)
|
len := wf.subscribers.Append(subscriber)
|
||||||
|
|
||||||
logger.Info("adding subscriber")
|
logger.Info("adding subscriber")
|
||||||
stats.Record(ctx, metrics.FilterSubscriptions.M(int64(len)))
|
stats.Record(ctx, metrics.LegacyFilterSubscribers.M(int64(len)))
|
||||||
} else {
|
} else {
|
||||||
peerId := s.Conn().RemotePeer()
|
peerId := s.Conn().RemotePeer()
|
||||||
wf.subscribers.RemoveContentFilters(peerId, filterRPCRequest.RequestId, filterRPCRequest.Request.ContentFilters)
|
wf.subscribers.RemoveContentFilters(peerId, filterRPCRequest.RequestId, filterRPCRequest.Request.ContentFilters)
|
||||||
|
|
||||||
logger.Info("removing subscriber")
|
logger.Info("removing subscriber")
|
||||||
stats.Record(ctx, metrics.FilterSubscriptions.M(int64(wf.subscribers.Length())))
|
stats.Record(ctx, metrics.LegacyFilterSubscribers.M(int64(wf.subscribers.Length())))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
logger.Error("can't serve request")
|
logger.Error("can't serve request")
|
||||||
|
@ -176,15 +177,15 @@ func (wf *WakuFilter) pushMessage(ctx context.Context, subscriber Subscriber, ms
|
||||||
if err != nil {
|
if err != nil {
|
||||||
wf.subscribers.FlagAsFailure(subscriber.peer)
|
wf.subscribers.FlagAsFailure(subscriber.peer)
|
||||||
logger.Error("connecting to peer", zap.Error(err))
|
logger.Error("connecting to peer", zap.Error(err))
|
||||||
|
metrics.RecordLegacyFilterError(ctx, "dial_failure")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
conn, err := wf.h.NewStream(ctx, subscriber.peer, FilterID_v20beta1)
|
conn, err := wf.h.NewStream(ctx, subscriber.peer, FilterID_v20beta1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
wf.subscribers.FlagAsFailure(subscriber.peer)
|
wf.subscribers.FlagAsFailure(subscriber.peer)
|
||||||
|
|
||||||
logger.Error("opening peer stream", zap.Error(err))
|
logger.Error("opening peer stream", zap.Error(err))
|
||||||
//waku_filter_errors.inc(labelValues = [dialFailure])
|
metrics.RecordLegacyFilterError(ctx, "dial_failure")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -194,6 +195,7 @@ func (wf *WakuFilter) pushMessage(ctx context.Context, subscriber Subscriber, ms
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("pushing messages to peer", zap.Error(err))
|
logger.Error("pushing messages to peer", zap.Error(err))
|
||||||
wf.subscribers.FlagAsFailure(subscriber.peer)
|
wf.subscribers.FlagAsFailure(subscriber.peer)
|
||||||
|
metrics.RecordLegacyFilterError(ctx, "push_write_error")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -259,6 +261,7 @@ func (wf *WakuFilter) requestSubscription(ctx context.Context, filter ContentFil
|
||||||
}
|
}
|
||||||
|
|
||||||
if params.selectedPeer == "" {
|
if params.selectedPeer == "" {
|
||||||
|
metrics.RecordLegacyFilterError(ctx, "peer_not_found_failure")
|
||||||
return nil, ErrNoPeersAvailable
|
return nil, ErrNoPeersAvailable
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -270,6 +273,7 @@ func (wf *WakuFilter) requestSubscription(ctx context.Context, filter ContentFil
|
||||||
// We connect first so dns4 addresses are resolved (NewStream does not do it)
|
// We connect first so dns4 addresses are resolved (NewStream does not do it)
|
||||||
err = wf.h.Connect(ctx, wf.h.Peerstore().PeerInfo(params.selectedPeer))
|
err = wf.h.Connect(ctx, wf.h.Peerstore().PeerInfo(params.selectedPeer))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordLegacyFilterError(ctx, "dial_failure")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -282,6 +286,7 @@ func (wf *WakuFilter) requestSubscription(ctx context.Context, filter ContentFil
|
||||||
var conn network.Stream
|
var conn network.Stream
|
||||||
conn, err = wf.h.NewStream(ctx, params.selectedPeer, FilterID_v20beta1)
|
conn, err = wf.h.NewStream(ctx, params.selectedPeer, FilterID_v20beta1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordLegacyFilterError(ctx, "dial_failure")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -295,6 +300,7 @@ func (wf *WakuFilter) requestSubscription(ctx context.Context, filter ContentFil
|
||||||
wf.log.Debug("sending filterRPC", zap.Stringer("rpc", filterRPC))
|
wf.log.Debug("sending filterRPC", zap.Stringer("rpc", filterRPC))
|
||||||
err = writer.WriteMsg(filterRPC)
|
err = writer.WriteMsg(filterRPC)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordLegacyFilterError(ctx, "request_write_error")
|
||||||
wf.log.Error("sending filterRPC", zap.Error(err))
|
wf.log.Error("sending filterRPC", zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -311,11 +317,13 @@ func (wf *WakuFilter) Unsubscribe(ctx context.Context, contentFilter ContentFilt
|
||||||
// We connect first so dns4 addresses are resolved (NewStream does not do it)
|
// We connect first so dns4 addresses are resolved (NewStream does not do it)
|
||||||
err := wf.h.Connect(ctx, wf.h.Peerstore().PeerInfo(peer))
|
err := wf.h.Connect(ctx, wf.h.Peerstore().PeerInfo(peer))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordLegacyFilterError(ctx, "dial_failure")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
conn, err := wf.h.NewStream(ctx, peer, FilterID_v20beta1)
|
conn, err := wf.h.NewStream(ctx, peer, FilterID_v20beta1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordLegacyFilterError(ctx, "dial_failure")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -339,6 +347,7 @@ func (wf *WakuFilter) Unsubscribe(ctx context.Context, contentFilter ContentFilt
|
||||||
filterRPC := &pb.FilterRPC{RequestId: hex.EncodeToString(id), Request: request}
|
filterRPC := &pb.FilterRPC{RequestId: hex.EncodeToString(id), Request: request}
|
||||||
err = writer.WriteMsg(filterRPC)
|
err = writer.WriteMsg(filterRPC)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordLegacyFilterError(ctx, "request_write_error")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -81,7 +81,7 @@ func (wakuLP *WakuLightPush) onRequest(ctx context.Context) func(s network.Strea
|
||||||
err := reader.ReadMsg(requestPushRPC)
|
err := reader.ReadMsg(requestPushRPC)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("reading request", zap.Error(err))
|
logger.Error("reading request", zap.Error(err))
|
||||||
metrics.RecordLightpushError(ctx, "decodeRpcFailure")
|
metrics.RecordLightpushError(ctx, "decode_rpc_failure")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -93,6 +93,8 @@ func (wakuLP *WakuLightPush) onRequest(ctx context.Context) func(s network.Strea
|
||||||
pubSubTopic := requestPushRPC.Query.PubsubTopic
|
pubSubTopic := requestPushRPC.Query.PubsubTopic
|
||||||
message := requestPushRPC.Query.Message
|
message := requestPushRPC.Query.Message
|
||||||
|
|
||||||
|
metrics.RecordLightpushMessage(ctx, "PushRequest")
|
||||||
|
|
||||||
// TODO: Assumes success, should probably be extended to check for network, peers, etc
|
// TODO: Assumes success, should probably be extended to check for network, peers, etc
|
||||||
// It might make sense to use WithReadiness option here?
|
// It might make sense to use WithReadiness option here?
|
||||||
|
|
||||||
|
@ -100,6 +102,7 @@ func (wakuLP *WakuLightPush) onRequest(ctx context.Context) func(s network.Strea
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("publishing message", zap.Error(err))
|
logger.Error("publishing message", zap.Error(err))
|
||||||
|
metrics.RecordLightpushError(ctx, "message_push_failure")
|
||||||
response.Info = "Could not publish message"
|
response.Info = "Could not publish message"
|
||||||
} else {
|
} else {
|
||||||
response.IsSuccess = true
|
response.IsSuccess = true
|
||||||
|
@ -112,11 +115,14 @@ func (wakuLP *WakuLightPush) onRequest(ctx context.Context) func(s network.Strea
|
||||||
|
|
||||||
err = writer.WriteMsg(responsePushRPC)
|
err = writer.WriteMsg(responsePushRPC)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordLightpushError(ctx, "response_write_failure")
|
||||||
logger.Error("writing response", zap.Error(err))
|
logger.Error("writing response", zap.Error(err))
|
||||||
_ = s.Reset()
|
_ = s.Reset()
|
||||||
} else {
|
} else {
|
||||||
logger.Info("response sent")
|
logger.Info("response sent")
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
metrics.RecordLightpushError(ctx, "empty_request_body_failure")
|
||||||
}
|
}
|
||||||
|
|
||||||
if requestPushRPC.Response != nil {
|
if requestPushRPC.Response != nil {
|
||||||
|
@ -125,6 +131,8 @@ func (wakuLP *WakuLightPush) onRequest(ctx context.Context) func(s network.Strea
|
||||||
} else {
|
} else {
|
||||||
logger.Info("request failure", zap.String("info=", requestPushRPC.Response.Info))
|
logger.Info("request failure", zap.String("info=", requestPushRPC.Response.Info))
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
metrics.RecordLightpushError(ctx, "empty_response_body_failure")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -140,7 +148,7 @@ func (wakuLP *WakuLightPush) request(ctx context.Context, req *pb.PushRequest, o
|
||||||
}
|
}
|
||||||
|
|
||||||
if params.selectedPeer == "" {
|
if params.selectedPeer == "" {
|
||||||
metrics.RecordLightpushError(ctx, "dialError")
|
metrics.RecordLightpushError(ctx, "peer_not_found_failure")
|
||||||
return nil, ErrNoPeersAvailable
|
return nil, ErrNoPeersAvailable
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -152,6 +160,7 @@ func (wakuLP *WakuLightPush) request(ctx context.Context, req *pb.PushRequest, o
|
||||||
// We connect first so dns4 addresses are resolved (NewStream does not do it)
|
// We connect first so dns4 addresses are resolved (NewStream does not do it)
|
||||||
err := wakuLP.h.Connect(ctx, wakuLP.h.Peerstore().PeerInfo(params.selectedPeer))
|
err := wakuLP.h.Connect(ctx, wakuLP.h.Peerstore().PeerInfo(params.selectedPeer))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordLightpushError(ctx, "dial_failure")
|
||||||
logger.Error("connecting peer", zap.Error(err))
|
logger.Error("connecting peer", zap.Error(err))
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -159,7 +168,7 @@ func (wakuLP *WakuLightPush) request(ctx context.Context, req *pb.PushRequest, o
|
||||||
connOpt, err := wakuLP.h.NewStream(ctx, params.selectedPeer, LightPushID_v20beta1)
|
connOpt, err := wakuLP.h.NewStream(ctx, params.selectedPeer, LightPushID_v20beta1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("creating stream to peer", zap.Error(err))
|
logger.Error("creating stream to peer", zap.Error(err))
|
||||||
metrics.RecordLightpushError(ctx, "dialError")
|
metrics.RecordLightpushError(ctx, "dial_failure")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -167,7 +176,7 @@ func (wakuLP *WakuLightPush) request(ctx context.Context, req *pb.PushRequest, o
|
||||||
defer func() {
|
defer func() {
|
||||||
err := connOpt.Reset()
|
err := connOpt.Reset()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
metrics.RecordLightpushError(ctx, "dialError")
|
metrics.RecordLightpushError(ctx, "dial_failure")
|
||||||
logger.Error("resetting connection", zap.Error(err))
|
logger.Error("resetting connection", zap.Error(err))
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
@ -179,6 +188,7 @@ func (wakuLP *WakuLightPush) request(ctx context.Context, req *pb.PushRequest, o
|
||||||
|
|
||||||
err = writer.WriteMsg(pushRequestRPC)
|
err = writer.WriteMsg(pushRequestRPC)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
metrics.RecordLightpushError(ctx, "request_write_failure")
|
||||||
logger.Error("writing request", zap.Error(err))
|
logger.Error("writing request", zap.Error(err))
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -187,7 +197,7 @@ func (wakuLP *WakuLightPush) request(ctx context.Context, req *pb.PushRequest, o
|
||||||
err = reader.ReadMsg(pushResponseRPC)
|
err = reader.ReadMsg(pushResponseRPC)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("reading response", zap.Error(err))
|
logger.Error("reading response", zap.Error(err))
|
||||||
metrics.RecordLightpushError(ctx, "decodeRPCFailure")
|
metrics.RecordLightpushError(ctx, "decode_rpc_failure")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -180,12 +180,14 @@ func (store *WakuStore) queryFrom(ctx context.Context, q *pb.HistoryQuery, selec
|
||||||
err := store.h.Connect(ctx, store.h.Peerstore().PeerInfo(selectedPeer))
|
err := store.h.Connect(ctx, store.h.Peerstore().PeerInfo(selectedPeer))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("connecting to peer", zap.Error(err))
|
logger.Error("connecting to peer", zap.Error(err))
|
||||||
|
metrics.RecordStoreError(store.ctx, "dial_failure")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
connOpt, err := store.h.NewStream(ctx, selectedPeer, StoreID_v20beta4)
|
connOpt, err := store.h.NewStream(ctx, selectedPeer, StoreID_v20beta4)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("creating stream to peer", zap.Error(err))
|
logger.Error("creating stream to peer", zap.Error(err))
|
||||||
|
metrics.RecordStoreError(store.ctx, "dial_failure")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -202,6 +204,7 @@ func (store *WakuStore) queryFrom(ctx context.Context, q *pb.HistoryQuery, selec
|
||||||
err = writer.WriteMsg(historyRequest)
|
err = writer.WriteMsg(historyRequest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("writing request", zap.Error(err))
|
logger.Error("writing request", zap.Error(err))
|
||||||
|
metrics.RecordStoreError(store.ctx, "write_request_failure")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -209,7 +212,7 @@ func (store *WakuStore) queryFrom(ctx context.Context, q *pb.HistoryQuery, selec
|
||||||
err = reader.ReadMsg(historyResponseRPC)
|
err = reader.ReadMsg(historyResponseRPC)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("reading response", zap.Error(err))
|
logger.Error("reading response", zap.Error(err))
|
||||||
metrics.RecordStoreError(store.ctx, "decodeRPCFailure")
|
metrics.RecordStoreError(store.ctx, "decode_rpc_failure")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -220,7 +223,7 @@ func (store *WakuStore) queryFrom(ctx context.Context, q *pb.HistoryQuery, selec
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
metrics.RecordMessage(ctx, "retrieved", len(historyResponseRPC.Response.Messages))
|
metrics.RecordStoreMessage(ctx, "retrieved", len(historyResponseRPC.Response.Messages))
|
||||||
|
|
||||||
return historyResponseRPC.Response, nil
|
return historyResponseRPC.Response, nil
|
||||||
}
|
}
|
||||||
|
@ -275,6 +278,7 @@ func (store *WakuStore) Query(ctx context.Context, query Query, opts ...HistoryR
|
||||||
}
|
}
|
||||||
|
|
||||||
if !params.localQuery && params.selectedPeer == "" {
|
if !params.localQuery && params.selectedPeer == "" {
|
||||||
|
metrics.RecordStoreError(ctx, "peer_not_found_failure")
|
||||||
return nil, ErrNoPeersAvailable
|
return nil, ErrNoPeersAvailable
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -120,7 +120,7 @@ func (store *WakuStore) Start(ctx context.Context) error {
|
||||||
err := store.msgProvider.Start(ctx, store.timesource) // TODO: store protocol should not start a message provider
|
err := store.msgProvider.Start(ctx, store.timesource) // TODO: store protocol should not start a message provider
|
||||||
if err != nil {
|
if err != nil {
|
||||||
store.log.Error("Error starting message provider", zap.Error(err))
|
store.log.Error("Error starting message provider", zap.Error(err))
|
||||||
return nil
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
store.started = true
|
store.started = true
|
||||||
|
@ -179,7 +179,7 @@ func (store *WakuStore) updateMetrics(ctx context.Context) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
store.log.Error("updating store metrics", zap.Error(err))
|
store.log.Error("updating store metrics", zap.Error(err))
|
||||||
} else {
|
} else {
|
||||||
metrics.RecordMessage(store.ctx, "stored", msgCount)
|
metrics.RecordStoreMessage(store.ctx, "stored", msgCount)
|
||||||
}
|
}
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
|
@ -198,7 +198,7 @@ func (store *WakuStore) onRequest(s network.Stream) {
|
||||||
err := reader.ReadMsg(historyRPCRequest)
|
err := reader.ReadMsg(historyRPCRequest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("reading request", zap.Error(err))
|
logger.Error("reading request", zap.Error(err))
|
||||||
metrics.RecordStoreError(store.ctx, "decodeRPCFailure")
|
metrics.RecordStoreError(store.ctx, "decode_rpc_failure")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -207,7 +207,7 @@ func (store *WakuStore) onRequest(s network.Stream) {
|
||||||
logger = logger.With(logging.Filters(query.GetContentFilters()))
|
logger = logger.With(logging.Filters(query.GetContentFilters()))
|
||||||
} else {
|
} else {
|
||||||
logger.Error("reading request", zap.Error(err))
|
logger.Error("reading request", zap.Error(err))
|
||||||
metrics.RecordStoreError(store.ctx, "emptyRpcQueryFailure")
|
metrics.RecordStoreError(store.ctx, "empty_rpc_query_failure")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -222,6 +222,7 @@ func (store *WakuStore) onRequest(s network.Stream) {
|
||||||
err = writer.WriteMsg(historyResponseRPC)
|
err = writer.WriteMsg(historyResponseRPC)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("writing response", zap.Error(err), logging.PagingInfo(historyResponseRPC.Response.PagingInfo))
|
logger.Error("writing response", zap.Error(err), logging.PagingInfo(historyResponseRPC.Response.PagingInfo))
|
||||||
|
metrics.RecordStoreError(store.ctx, "response_write_failure")
|
||||||
_ = s.Reset()
|
_ = s.Reset()
|
||||||
} else {
|
} else {
|
||||||
logger.Info("response sent")
|
logger.Info("response sent")
|
||||||
|
|
Loading…
Reference in New Issue