mirror of
https://github.com/logos-messaging/logos-messaging-go.git
synced 2026-01-02 14:03:06 +00:00
Merge branch 'master' into e2e-reliability
This commit is contained in:
commit
14155d87f6
2
.github/docker-compose/nwaku.yml
vendored
2
.github/docker-compose/nwaku.yml
vendored
@ -1,6 +1,6 @@
|
||||
services:
|
||||
nwaku:
|
||||
image: "harbor.status.im/wakuorg/nwaku:latest"
|
||||
command: ["--relay", "--store", "--nodekey=1122334455667788990011223344556677889900112233445566778899001122"]
|
||||
command: ["--relay", "--store", "--nodekey=1122334455667788990011223344556677889900112233445566778899001122", "--cluster-id=99", "--pubsub-topic=/waku/2/rs/99/1"]
|
||||
ports:
|
||||
- "60000"
|
||||
|
||||
2
.github/workflows/ci.yml
vendored
2
.github/workflows/ci.yml
vendored
@ -163,7 +163,7 @@ jobs:
|
||||
- name: "Run storev3 tests"
|
||||
run: |
|
||||
docker compose -f .github/docker-compose/nwaku.yml up -d
|
||||
NWAKU_HOST=$(docker-compose -f .github/docker-compose/nwaku.yml port nwaku 60000)
|
||||
NWAKU_HOST=$(docker compose -f .github/docker-compose/nwaku.yml port nwaku 60000)
|
||||
NWAKU_PORT=$(echo $NWAKU_HOST | cut -d ":" -f 2)
|
||||
sleep 5
|
||||
make test-storev3 TEST_STOREV3_NODE="/ip4/127.0.0.1/tcp/${NWAKU_PORT}/p2p/16Uiu2HAmMGhfSTUzKbsjMWxc6T1X4wiTWSF1bEWSLjAukCm7KiHV"
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
library 'status-jenkins-lib@v1.7.0'
|
||||
library 'status-jenkins-lib@v1.9.3'
|
||||
|
||||
pipeline {
|
||||
agent {
|
||||
@ -27,10 +27,7 @@ pipeline {
|
||||
stages {
|
||||
stage('Build') {
|
||||
steps { script {
|
||||
sh("""#!/usr/bin/env bash
|
||||
${nix._sourceProfileInline()}
|
||||
nix build --print-out-paths .#node
|
||||
""")
|
||||
nix.flake('node')
|
||||
} }
|
||||
}
|
||||
stage('Check') {
|
||||
@ -45,15 +42,12 @@ pipeline {
|
||||
stages {
|
||||
stage('Build') {
|
||||
steps { script {
|
||||
sh("""#!/usr/bin/env bash
|
||||
${nix._sourceProfileInline()}
|
||||
nix build --print-out-paths .#library
|
||||
""")
|
||||
nix.flake('static-library')
|
||||
} }
|
||||
}
|
||||
stage('Check') {
|
||||
steps {
|
||||
sh 'ldd ./result/bin/c'
|
||||
sh 'readelf -h ./result/bin/libgowaku.a'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -134,7 +134,7 @@ func Execute(options NodeOptions) error {
|
||||
node.WithLogLevel(lvl),
|
||||
node.WithPrivateKey(prvKey),
|
||||
node.WithHostAddress(hostAddr),
|
||||
node.WithKeepAlive(options.KeepAlive),
|
||||
node.WithKeepAlive(10*time.Second, options.KeepAlive),
|
||||
node.WithMaxPeerConnections(options.MaxPeerConnections),
|
||||
node.WithPrometheusRegisterer(prometheus.DefaultRegisterer),
|
||||
node.WithPeerStoreCapacity(options.PeerStoreCapacity),
|
||||
|
||||
33
default.nix
Normal file
33
default.nix
Normal file
@ -0,0 +1,33 @@
|
||||
{
|
||||
pkgs ? import <nixpkgs> { },
|
||||
self ? ./.,
|
||||
subPkgs ? "cmd/waku",
|
||||
ldflags ? [],
|
||||
output ? null,
|
||||
commit ? builtins.substring 0 7 (self.rev or "dirty"),
|
||||
version ? builtins.readFile ./VERSION,
|
||||
}:
|
||||
|
||||
pkgs.buildGo121Module {
|
||||
name = "go-waku";
|
||||
src = self;
|
||||
|
||||
subPackages = subPkgs;
|
||||
tags = ["gowaku_no_rln"];
|
||||
ldflags = [
|
||||
"-X github.com/waku-org/go-waku/waku/v2/node.GitCommit=${commit}"
|
||||
"-X github.com/waku-org/go-waku/waku/v2/node.Version=${version}"
|
||||
] ++ ldflags;
|
||||
doCheck = false;
|
||||
|
||||
# Otherwise library would be just called bin/c.
|
||||
postInstall = if builtins.isString output then ''
|
||||
mv $out/bin/* $out/bin/${output}
|
||||
'' else "";
|
||||
|
||||
# FIXME: This needs to be manually changed when updating modules.
|
||||
vendorHash = "sha256-cOh9LNmcaBnBeMFM1HS2pdH5TTraHfo8PXL37t/A3gQ=";
|
||||
|
||||
# Fix for 'nix run' trying to execute 'go-waku'.
|
||||
meta = { mainProgram = "waku"; };
|
||||
}
|
||||
@ -38,7 +38,7 @@ One of these options must be specified when instantiating a node supporting the
|
||||
```go
|
||||
...
|
||||
|
||||
peerAddr, err := multiaddr.NewMultiaddr("/dns4/node-01.do-ams3.waku.test.statusim.net/tcp/30303/p2p/16Uiu2HAkykgaECHswi3YKJ5dMLbq2kPVCo89fcyTd38UcQD6ej5W")
|
||||
peerAddr, err := multiaddr.NewMultiaddr("/dns4/node-01.do-ams3.waku.test.status.im/tcp/30303/p2p/16Uiu2HAkykgaECHswi3YKJ5dMLbq2kPVCo89fcyTd38UcQD6ej5W")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@ -17,12 +17,12 @@ or store and serve historical messages itself.
|
||||
|
||||
Ensure that `store` is enabled (this is `true` by default) and provide at least one store service node address with the `--storenode` CLI option.
|
||||
|
||||
See the following example, using the peer at `/dns4/node-01.ac-cn-hongkong-c.waku.test.statusim.net/tcp/30303/p2p/16Uiu2HAkzHaTP5JsUwfR9NR8Rj9HC24puS6ocaU8wze4QrXr9iXp` as store service node.
|
||||
See the following example, using the peer at `/dns4/node-01.ac-cn-hongkong-c.waku.test.status.im/tcp/30303/p2p/16Uiu2HAkzHaTP5JsUwfR9NR8Rj9HC24puS6ocaU8wze4QrXr9iXp` as store service node.
|
||||
|
||||
```sh
|
||||
./build/waku \
|
||||
--store=true \
|
||||
--storenode=/dns4/node-01.ac-cn-hongkong-c.waku.test.statusim.net/tcp/30303/p2p/16Uiu2HAkzHaTP5JsUwfR9NR8Rj9HC24puS6ocaU8wze4QrXr9iXp
|
||||
--storenode=/dns4/node-01.ac-cn-hongkong-c.waku.test.status.im/tcp/30303/p2p/16Uiu2HAkzHaTP5JsUwfR9NR8Rj9HC24puS6ocaU8wze4QrXr9iXp
|
||||
```
|
||||
|
||||
Your node can now send queries to retrieve historical messages
|
||||
|
||||
@ -59,7 +59,7 @@ class MainActivity : AppCompatActivity() {
|
||||
lbl.text = (lbl.text.toString() + ">>> Default pubsub topic: " + defaultPubsubTopic() + "\n");
|
||||
|
||||
try {
|
||||
node.connect("/dns4/node-01.gc-us-central1-a.waku.test.statusim.net/tcp/30303/p2p/16Uiu2HAmDCp8XJ9z1ev18zuv8NHekAsjNyezAvmMfFEJkiharitG")
|
||||
node.connect("/dns4/node-01.gc-us-central1-a.waku.test.status.im/tcp/30303/p2p/16Uiu2HAmDCp8XJ9z1ev18zuv8NHekAsjNyezAvmMfFEJkiharitG")
|
||||
lbl.text = (lbl.text.toString() + ">>> Connected to Peer" + "\n")
|
||||
|
||||
node.peers().forEach {
|
||||
|
||||
@ -12,7 +12,7 @@ replace github.com/libp2p/go-libp2p-pubsub v0.11.0 => github.com/waku-org/go-lib
|
||||
|
||||
require (
|
||||
github.com/ethereum/go-ethereum v1.10.26
|
||||
github.com/libp2p/go-libp2p v0.35.0
|
||||
github.com/libp2p/go-libp2p v0.35.2
|
||||
github.com/multiformats/go-multiaddr v0.12.4
|
||||
github.com/urfave/cli/v2 v2.27.2
|
||||
github.com/waku-org/go-waku v0.2.3-0.20221109195301-b2a5a68d28ba
|
||||
@ -30,6 +30,7 @@ require (
|
||||
github.com/btcsuite/btcd v0.20.1-beta // indirect
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.1 // indirect
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d // indirect
|
||||
github.com/cenkalti/backoff/v3 v3.2.2 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.1.2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/containerd/cgroups v1.1.0 // indirect
|
||||
@ -53,7 +54,7 @@ require (
|
||||
github.com/google/gopacket v1.1.19 // indirect
|
||||
github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect
|
||||
github.com/google/uuid v1.4.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.1 // indirect
|
||||
github.com/gorilla/websocket v1.5.3 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/huin/goupnp v1.3.0 // indirect
|
||||
@ -94,7 +95,7 @@ require (
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
|
||||
github.com/pion/datachannel v1.5.6 // indirect
|
||||
github.com/pion/dtls/v2 v2.2.11 // indirect
|
||||
github.com/pion/ice/v2 v2.3.24 // indirect
|
||||
github.com/pion/ice/v2 v2.3.25 // indirect
|
||||
github.com/pion/interceptor v0.1.29 // indirect
|
||||
github.com/pion/logging v0.2.2 // indirect
|
||||
github.com/pion/mdns v0.0.12 // indirect
|
||||
@ -136,7 +137,7 @@ require (
|
||||
github.com/wk8/go-ordered-map v1.0.0 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect
|
||||
go.uber.org/dig v1.17.1 // indirect
|
||||
go.uber.org/fx v1.21.1 // indirect
|
||||
go.uber.org/fx v1.22.1 // indirect
|
||||
go.uber.org/mock v0.4.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.23.0 // indirect
|
||||
|
||||
@ -94,6 +94,8 @@ github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtE
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
|
||||
github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34=
|
||||
github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M=
|
||||
github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=
|
||||
github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo=
|
||||
github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
@ -283,8 +285,8 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY=
|
||||
github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
||||
@ -382,8 +384,8 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6
|
||||
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
|
||||
github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM=
|
||||
github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro=
|
||||
github.com/libp2p/go-libp2p v0.35.0 h1:1xS1Bkr9X7GtdvV6ntLnDV9xB1kNjHK1lZ0eaO6gnhc=
|
||||
github.com/libp2p/go-libp2p v0.35.0/go.mod h1:snyJQix4ET6Tj+LeI0VPjjxTtdWpeOhYt5lEY0KirkQ=
|
||||
github.com/libp2p/go-libp2p v0.35.2 h1:287oHbuplkrLdAF+syB0n/qDgd50AUBtEODqS0e0HDs=
|
||||
github.com/libp2p/go-libp2p v0.35.2/go.mod h1:RKCDNt30IkFipGL0tl8wQW/3zVWEGFUZo8g2gAKxwjU=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
|
||||
github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA=
|
||||
@ -523,8 +525,8 @@ github.com/pion/datachannel v1.5.6/go.mod h1:1eKT6Q85pRnr2mHiWHxJwO50SfZRtWHTsNI
|
||||
github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s=
|
||||
github.com/pion/dtls/v2 v2.2.11 h1:9U/dpCYl1ySttROPWJgqWKEylUdT0fXp/xst6JwY5Ks=
|
||||
github.com/pion/dtls/v2 v2.2.11/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE=
|
||||
github.com/pion/ice/v2 v2.3.24 h1:RYgzhH/u5lH0XO+ABatVKCtRd+4U1GEaCXSMjNr13tI=
|
||||
github.com/pion/ice/v2 v2.3.24/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw=
|
||||
github.com/pion/ice/v2 v2.3.25 h1:M5rJA07dqhi3nobJIg+uPtcVjFECTrhcR3n0ns8kDZs=
|
||||
github.com/pion/ice/v2 v2.3.25/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw=
|
||||
github.com/pion/interceptor v0.1.29 h1:39fsnlP1U8gw2JzOFWdfCU82vHvhW9o0rZnZF56wF+M=
|
||||
github.com/pion/interceptor v0.1.29/go.mod h1:ri+LGNjRUc5xUNtDEPzfdkmSqISixVTBF/z/Zms/6T4=
|
||||
github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY=
|
||||
@ -732,8 +734,8 @@ go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc=
|
||||
go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
|
||||
go.uber.org/fx v1.21.1 h1:RqBh3cYdzZS0uqwVeEjOX2p73dddLpym315myy/Bpb0=
|
||||
go.uber.org/fx v1.21.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48=
|
||||
go.uber.org/fx v1.22.1 h1:nvvln7mwyT5s1q201YE29V/BFrGor6vMiDNpU/78Mys=
|
||||
go.uber.org/fx v1.22.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48=
|
||||
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
|
||||
@ -29,6 +29,7 @@ require (
|
||||
github.com/btcsuite/btcd v0.20.1-beta // indirect
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.1 // indirect
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d // indirect
|
||||
github.com/cenkalti/backoff/v3 v3.2.2 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.1.2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/containerd/cgroups v1.1.0 // indirect
|
||||
@ -52,7 +53,7 @@ require (
|
||||
github.com/google/gopacket v1.1.19 // indirect
|
||||
github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect
|
||||
github.com/google/uuid v1.4.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.1 // indirect
|
||||
github.com/gorilla/websocket v1.5.3 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c // indirect
|
||||
@ -66,7 +67,7 @@ require (
|
||||
github.com/koron/go-ssdp v0.0.4 // indirect
|
||||
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
|
||||
github.com/libp2p/go-flow-metrics v0.1.0 // indirect
|
||||
github.com/libp2p/go-libp2p v0.35.0 // indirect
|
||||
github.com/libp2p/go-libp2p v0.35.2 // indirect
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
|
||||
github.com/libp2p/go-libp2p-pubsub v0.11.0 // indirect
|
||||
github.com/libp2p/go-msgio v0.3.0 // indirect
|
||||
@ -95,7 +96,7 @@ require (
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
|
||||
github.com/pion/datachannel v1.5.6 // indirect
|
||||
github.com/pion/dtls/v2 v2.2.11 // indirect
|
||||
github.com/pion/ice/v2 v2.3.24 // indirect
|
||||
github.com/pion/ice/v2 v2.3.25 // indirect
|
||||
github.com/pion/interceptor v0.1.29 // indirect
|
||||
github.com/pion/logging v0.2.2 // indirect
|
||||
github.com/pion/mdns v0.0.12 // indirect
|
||||
@ -137,7 +138,7 @@ require (
|
||||
github.com/wk8/go-ordered-map v1.0.0 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect
|
||||
go.uber.org/dig v1.17.1 // indirect
|
||||
go.uber.org/fx v1.21.1 // indirect
|
||||
go.uber.org/fx v1.22.1 // indirect
|
||||
go.uber.org/mock v0.4.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.23.0 // indirect
|
||||
|
||||
@ -94,6 +94,8 @@ github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtE
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
|
||||
github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34=
|
||||
github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M=
|
||||
github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=
|
||||
github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo=
|
||||
github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
@ -284,8 +286,8 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY=
|
||||
github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
||||
@ -384,8 +386,8 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6
|
||||
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
|
||||
github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM=
|
||||
github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro=
|
||||
github.com/libp2p/go-libp2p v0.35.0 h1:1xS1Bkr9X7GtdvV6ntLnDV9xB1kNjHK1lZ0eaO6gnhc=
|
||||
github.com/libp2p/go-libp2p v0.35.0/go.mod h1:snyJQix4ET6Tj+LeI0VPjjxTtdWpeOhYt5lEY0KirkQ=
|
||||
github.com/libp2p/go-libp2p v0.35.2 h1:287oHbuplkrLdAF+syB0n/qDgd50AUBtEODqS0e0HDs=
|
||||
github.com/libp2p/go-libp2p v0.35.2/go.mod h1:RKCDNt30IkFipGL0tl8wQW/3zVWEGFUZo8g2gAKxwjU=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
|
||||
github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA=
|
||||
@ -525,8 +527,8 @@ github.com/pion/datachannel v1.5.6/go.mod h1:1eKT6Q85pRnr2mHiWHxJwO50SfZRtWHTsNI
|
||||
github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s=
|
||||
github.com/pion/dtls/v2 v2.2.11 h1:9U/dpCYl1ySttROPWJgqWKEylUdT0fXp/xst6JwY5Ks=
|
||||
github.com/pion/dtls/v2 v2.2.11/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE=
|
||||
github.com/pion/ice/v2 v2.3.24 h1:RYgzhH/u5lH0XO+ABatVKCtRd+4U1GEaCXSMjNr13tI=
|
||||
github.com/pion/ice/v2 v2.3.24/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw=
|
||||
github.com/pion/ice/v2 v2.3.25 h1:M5rJA07dqhi3nobJIg+uPtcVjFECTrhcR3n0ns8kDZs=
|
||||
github.com/pion/ice/v2 v2.3.25/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw=
|
||||
github.com/pion/interceptor v0.1.29 h1:39fsnlP1U8gw2JzOFWdfCU82vHvhW9o0rZnZF56wF+M=
|
||||
github.com/pion/interceptor v0.1.29/go.mod h1:ri+LGNjRUc5xUNtDEPzfdkmSqISixVTBF/z/Zms/6T4=
|
||||
github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY=
|
||||
@ -734,8 +736,8 @@ go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc=
|
||||
go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
|
||||
go.uber.org/fx v1.21.1 h1:RqBh3cYdzZS0uqwVeEjOX2p73dddLpym315myy/Bpb0=
|
||||
go.uber.org/fx v1.21.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48=
|
||||
go.uber.org/fx v1.22.1 h1:nvvln7mwyT5s1q201YE29V/BFrGor6vMiDNpU/78Mys=
|
||||
go.uber.org/fx v1.22.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48=
|
||||
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
|
||||
@ -159,7 +159,7 @@ int main(int argc, char *argv[])
|
||||
printf("Discovered nodes: %s\n", discoveredNodes);
|
||||
|
||||
// Connect to a node
|
||||
waku_connect(ctx, "/dns4/node-01.do-ams3.waku.test.statusim.net/tcp/30303/"
|
||||
waku_connect(ctx, "/dns4/node-01.do-ams3.waku.test.status.im/tcp/30303/"
|
||||
"p2p/16Uiu2HAkykgaECHswi3YKJ5dMLbq2kPVCo89fcyTd38UcQD6ej5W",
|
||||
0, on_response, NULL);
|
||||
|
||||
|
||||
@ -28,7 +28,7 @@ You may need to set DNS server if behind a VPN,
|
||||
In order to connect to a *specific* node as [`relay`](https://specs.vac.dev/specs/waku/v2/waku-relay) peer, define that node's `multiaddr` as a `staticnode` when starting the app:
|
||||
|
||||
```
|
||||
./build/chat2 -staticnode=/dns4/node-01.do-ams3.waku.test.statusim.net/tcp/30303/p2p/16Uiu2HAkykgaECHswi3YKJ5dMLbq2kPVCo89fcyTd38UcQD6ej5W
|
||||
./build/chat2 -staticnode=/dns4/node-01.do-ams3.waku.test.status.im/tcp/30303/p2p/16Uiu2HAkykgaECHswi3YKJ5dMLbq2kPVCo89fcyTd38UcQD6ej5W
|
||||
```
|
||||
|
||||
This will bypass the random peer selection process and connect to the specified node.
|
||||
|
||||
@ -16,7 +16,7 @@ require (
|
||||
github.com/charmbracelet/lipgloss v0.5.0
|
||||
github.com/ethereum/go-ethereum v1.10.26
|
||||
github.com/ipfs/go-log/v2 v2.5.1
|
||||
github.com/libp2p/go-libp2p v0.35.0
|
||||
github.com/libp2p/go-libp2p v0.35.2
|
||||
github.com/muesli/reflow v0.3.0
|
||||
github.com/multiformats/go-multiaddr v0.12.4
|
||||
github.com/urfave/cli/v2 v2.27.2
|
||||
@ -36,6 +36,7 @@ require (
|
||||
github.com/btcsuite/btcd v0.20.1-beta // indirect
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.1 // indirect
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d // indirect
|
||||
github.com/cenkalti/backoff/v3 v3.2.2 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.1.2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/containerd/cgroups v1.1.0 // indirect
|
||||
@ -60,7 +61,7 @@ require (
|
||||
github.com/google/gopacket v1.1.19 // indirect
|
||||
github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect
|
||||
github.com/google/uuid v1.4.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.1 // indirect
|
||||
github.com/gorilla/websocket v1.5.3 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c // indirect
|
||||
@ -106,7 +107,7 @@ require (
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
|
||||
github.com/pion/datachannel v1.5.6 // indirect
|
||||
github.com/pion/dtls/v2 v2.2.11 // indirect
|
||||
github.com/pion/ice/v2 v2.3.24 // indirect
|
||||
github.com/pion/ice/v2 v2.3.25 // indirect
|
||||
github.com/pion/interceptor v0.1.29 // indirect
|
||||
github.com/pion/logging v0.2.2 // indirect
|
||||
github.com/pion/mdns v0.0.12 // indirect
|
||||
@ -149,7 +150,7 @@ require (
|
||||
github.com/wk8/go-ordered-map v1.0.0 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect
|
||||
go.uber.org/dig v1.17.1 // indirect
|
||||
go.uber.org/fx v1.21.1 // indirect
|
||||
go.uber.org/fx v1.22.1 // indirect
|
||||
go.uber.org/mock v0.4.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
|
||||
@ -96,6 +96,8 @@ github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtE
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
|
||||
github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34=
|
||||
github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M=
|
||||
github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=
|
||||
github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo=
|
||||
github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
@ -296,8 +298,8 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY=
|
||||
github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
||||
@ -396,8 +398,8 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6
|
||||
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
|
||||
github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM=
|
||||
github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro=
|
||||
github.com/libp2p/go-libp2p v0.35.0 h1:1xS1Bkr9X7GtdvV6ntLnDV9xB1kNjHK1lZ0eaO6gnhc=
|
||||
github.com/libp2p/go-libp2p v0.35.0/go.mod h1:snyJQix4ET6Tj+LeI0VPjjxTtdWpeOhYt5lEY0KirkQ=
|
||||
github.com/libp2p/go-libp2p v0.35.2 h1:287oHbuplkrLdAF+syB0n/qDgd50AUBtEODqS0e0HDs=
|
||||
github.com/libp2p/go-libp2p v0.35.2/go.mod h1:RKCDNt30IkFipGL0tl8wQW/3zVWEGFUZo8g2gAKxwjU=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
|
||||
github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA=
|
||||
@ -553,8 +555,8 @@ github.com/pion/datachannel v1.5.6/go.mod h1:1eKT6Q85pRnr2mHiWHxJwO50SfZRtWHTsNI
|
||||
github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s=
|
||||
github.com/pion/dtls/v2 v2.2.11 h1:9U/dpCYl1ySttROPWJgqWKEylUdT0fXp/xst6JwY5Ks=
|
||||
github.com/pion/dtls/v2 v2.2.11/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE=
|
||||
github.com/pion/ice/v2 v2.3.24 h1:RYgzhH/u5lH0XO+ABatVKCtRd+4U1GEaCXSMjNr13tI=
|
||||
github.com/pion/ice/v2 v2.3.24/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw=
|
||||
github.com/pion/ice/v2 v2.3.25 h1:M5rJA07dqhi3nobJIg+uPtcVjFECTrhcR3n0ns8kDZs=
|
||||
github.com/pion/ice/v2 v2.3.25/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw=
|
||||
github.com/pion/interceptor v0.1.29 h1:39fsnlP1U8gw2JzOFWdfCU82vHvhW9o0rZnZF56wF+M=
|
||||
github.com/pion/interceptor v0.1.29/go.mod h1:ri+LGNjRUc5xUNtDEPzfdkmSqISixVTBF/z/Zms/6T4=
|
||||
github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY=
|
||||
@ -766,8 +768,8 @@ go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc=
|
||||
go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
|
||||
go.uber.org/fx v1.21.1 h1:RqBh3cYdzZS0uqwVeEjOX2p73dddLpym315myy/Bpb0=
|
||||
go.uber.org/fx v1.21.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48=
|
||||
go.uber.org/fx v1.22.1 h1:nvvln7mwyT5s1q201YE29V/BFrGor6vMiDNpU/78Mys=
|
||||
go.uber.org/fx v1.22.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48=
|
||||
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
|
||||
@ -26,6 +26,7 @@ require (
|
||||
github.com/btcsuite/btcd v0.20.1-beta // indirect
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.1 // indirect
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d // indirect
|
||||
github.com/cenkalti/backoff/v3 v3.2.2 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.1.2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/containerd/cgroups v1.1.0 // indirect
|
||||
@ -48,7 +49,7 @@ require (
|
||||
github.com/google/gopacket v1.1.19 // indirect
|
||||
github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect
|
||||
github.com/google/uuid v1.4.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.1 // indirect
|
||||
github.com/gorilla/websocket v1.5.3 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c // indirect
|
||||
@ -61,7 +62,7 @@ require (
|
||||
github.com/koron/go-ssdp v0.0.4 // indirect
|
||||
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
|
||||
github.com/libp2p/go-flow-metrics v0.1.0 // indirect
|
||||
github.com/libp2p/go-libp2p v0.35.0 // indirect
|
||||
github.com/libp2p/go-libp2p v0.35.2 // indirect
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
|
||||
github.com/libp2p/go-libp2p-pubsub v0.11.0 // indirect
|
||||
github.com/libp2p/go-msgio v0.3.0 // indirect
|
||||
@ -91,7 +92,7 @@ require (
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
|
||||
github.com/pion/datachannel v1.5.6 // indirect
|
||||
github.com/pion/dtls/v2 v2.2.11 // indirect
|
||||
github.com/pion/ice/v2 v2.3.24 // indirect
|
||||
github.com/pion/ice/v2 v2.3.25 // indirect
|
||||
github.com/pion/interceptor v0.1.29 // indirect
|
||||
github.com/pion/logging v0.2.2 // indirect
|
||||
github.com/pion/mdns v0.0.12 // indirect
|
||||
@ -131,7 +132,7 @@ require (
|
||||
github.com/waku-org/go-zerokit-rln-x86_64 v0.0.0-20230916171518-2a77c3734dd1 // indirect
|
||||
github.com/wk8/go-ordered-map v1.0.0 // indirect
|
||||
go.uber.org/dig v1.17.1 // indirect
|
||||
go.uber.org/fx v1.21.1 // indirect
|
||||
go.uber.org/fx v1.22.1 // indirect
|
||||
go.uber.org/mock v0.4.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
|
||||
@ -92,6 +92,8 @@ github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtE
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
|
||||
github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34=
|
||||
github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M=
|
||||
github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=
|
||||
github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo=
|
||||
github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
@ -282,8 +284,8 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY=
|
||||
github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
||||
@ -382,8 +384,8 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6
|
||||
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
|
||||
github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM=
|
||||
github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro=
|
||||
github.com/libp2p/go-libp2p v0.35.0 h1:1xS1Bkr9X7GtdvV6ntLnDV9xB1kNjHK1lZ0eaO6gnhc=
|
||||
github.com/libp2p/go-libp2p v0.35.0/go.mod h1:snyJQix4ET6Tj+LeI0VPjjxTtdWpeOhYt5lEY0KirkQ=
|
||||
github.com/libp2p/go-libp2p v0.35.2 h1:287oHbuplkrLdAF+syB0n/qDgd50AUBtEODqS0e0HDs=
|
||||
github.com/libp2p/go-libp2p v0.35.2/go.mod h1:RKCDNt30IkFipGL0tl8wQW/3zVWEGFUZo8g2gAKxwjU=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
|
||||
github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA=
|
||||
@ -523,8 +525,8 @@ github.com/pion/datachannel v1.5.6/go.mod h1:1eKT6Q85pRnr2mHiWHxJwO50SfZRtWHTsNI
|
||||
github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s=
|
||||
github.com/pion/dtls/v2 v2.2.11 h1:9U/dpCYl1ySttROPWJgqWKEylUdT0fXp/xst6JwY5Ks=
|
||||
github.com/pion/dtls/v2 v2.2.11/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE=
|
||||
github.com/pion/ice/v2 v2.3.24 h1:RYgzhH/u5lH0XO+ABatVKCtRd+4U1GEaCXSMjNr13tI=
|
||||
github.com/pion/ice/v2 v2.3.24/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw=
|
||||
github.com/pion/ice/v2 v2.3.25 h1:M5rJA07dqhi3nobJIg+uPtcVjFECTrhcR3n0ns8kDZs=
|
||||
github.com/pion/ice/v2 v2.3.25/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw=
|
||||
github.com/pion/interceptor v0.1.29 h1:39fsnlP1U8gw2JzOFWdfCU82vHvhW9o0rZnZF56wF+M=
|
||||
github.com/pion/interceptor v0.1.29/go.mod h1:ri+LGNjRUc5xUNtDEPzfdkmSqISixVTBF/z/Zms/6T4=
|
||||
github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY=
|
||||
@ -734,8 +736,8 @@ go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc=
|
||||
go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
|
||||
go.uber.org/fx v1.21.1 h1:RqBh3cYdzZS0uqwVeEjOX2p73dddLpym315myy/Bpb0=
|
||||
go.uber.org/fx v1.21.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48=
|
||||
go.uber.org/fx v1.22.1 h1:nvvln7mwyT5s1q201YE29V/BFrGor6vMiDNpU/78Mys=
|
||||
go.uber.org/fx v1.22.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48=
|
||||
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
|
||||
@ -12,7 +12,7 @@ replace github.com/libp2p/go-libp2p-pubsub v0.11.0 => github.com/waku-org/go-lib
|
||||
|
||||
require (
|
||||
github.com/ipfs/go-log/v2 v2.5.1
|
||||
github.com/libp2p/go-libp2p v0.35.0
|
||||
github.com/libp2p/go-libp2p v0.35.2
|
||||
github.com/waku-org/go-noise v0.0.4
|
||||
github.com/waku-org/go-waku v0.2.3-0.20221109195301-b2a5a68d28ba
|
||||
go.uber.org/zap v1.27.0
|
||||
@ -28,6 +28,7 @@ require (
|
||||
github.com/btcsuite/btcd v0.20.1-beta // indirect
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.1 // indirect
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d // indirect
|
||||
github.com/cenkalti/backoff/v3 v3.2.2 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.1.2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/containerd/cgroups v1.1.0 // indirect
|
||||
@ -51,7 +52,7 @@ require (
|
||||
github.com/google/gopacket v1.1.19 // indirect
|
||||
github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect
|
||||
github.com/google/uuid v1.4.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.1 // indirect
|
||||
github.com/gorilla/websocket v1.5.3 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c // indirect
|
||||
@ -93,7 +94,7 @@ require (
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
|
||||
github.com/pion/datachannel v1.5.6 // indirect
|
||||
github.com/pion/dtls/v2 v2.2.11 // indirect
|
||||
github.com/pion/ice/v2 v2.3.24 // indirect
|
||||
github.com/pion/ice/v2 v2.3.25 // indirect
|
||||
github.com/pion/interceptor v0.1.29 // indirect
|
||||
github.com/pion/logging v0.2.2 // indirect
|
||||
github.com/pion/mdns v0.0.12 // indirect
|
||||
@ -133,7 +134,7 @@ require (
|
||||
github.com/waku-org/go-zerokit-rln-x86_64 v0.0.0-20230916171518-2a77c3734dd1 // indirect
|
||||
github.com/wk8/go-ordered-map v1.0.0 // indirect
|
||||
go.uber.org/dig v1.17.1 // indirect
|
||||
go.uber.org/fx v1.21.1 // indirect
|
||||
go.uber.org/fx v1.22.1 // indirect
|
||||
go.uber.org/mock v0.4.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.23.0 // indirect
|
||||
|
||||
@ -92,6 +92,8 @@ github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtE
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
|
||||
github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34=
|
||||
github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M=
|
||||
github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=
|
||||
github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo=
|
||||
github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
@ -282,8 +284,8 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY=
|
||||
github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
||||
@ -382,8 +384,8 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6
|
||||
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
|
||||
github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM=
|
||||
github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro=
|
||||
github.com/libp2p/go-libp2p v0.35.0 h1:1xS1Bkr9X7GtdvV6ntLnDV9xB1kNjHK1lZ0eaO6gnhc=
|
||||
github.com/libp2p/go-libp2p v0.35.0/go.mod h1:snyJQix4ET6Tj+LeI0VPjjxTtdWpeOhYt5lEY0KirkQ=
|
||||
github.com/libp2p/go-libp2p v0.35.2 h1:287oHbuplkrLdAF+syB0n/qDgd50AUBtEODqS0e0HDs=
|
||||
github.com/libp2p/go-libp2p v0.35.2/go.mod h1:RKCDNt30IkFipGL0tl8wQW/3zVWEGFUZo8g2gAKxwjU=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
|
||||
github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA=
|
||||
@ -523,8 +525,8 @@ github.com/pion/datachannel v1.5.6/go.mod h1:1eKT6Q85pRnr2mHiWHxJwO50SfZRtWHTsNI
|
||||
github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s=
|
||||
github.com/pion/dtls/v2 v2.2.11 h1:9U/dpCYl1ySttROPWJgqWKEylUdT0fXp/xst6JwY5Ks=
|
||||
github.com/pion/dtls/v2 v2.2.11/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE=
|
||||
github.com/pion/ice/v2 v2.3.24 h1:RYgzhH/u5lH0XO+ABatVKCtRd+4U1GEaCXSMjNr13tI=
|
||||
github.com/pion/ice/v2 v2.3.24/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw=
|
||||
github.com/pion/ice/v2 v2.3.25 h1:M5rJA07dqhi3nobJIg+uPtcVjFECTrhcR3n0ns8kDZs=
|
||||
github.com/pion/ice/v2 v2.3.25/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw=
|
||||
github.com/pion/interceptor v0.1.29 h1:39fsnlP1U8gw2JzOFWdfCU82vHvhW9o0rZnZF56wF+M=
|
||||
github.com/pion/interceptor v0.1.29/go.mod h1:ri+LGNjRUc5xUNtDEPzfdkmSqISixVTBF/z/Zms/6T4=
|
||||
github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY=
|
||||
@ -736,8 +738,8 @@ go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc=
|
||||
go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
|
||||
go.uber.org/fx v1.21.1 h1:RqBh3cYdzZS0uqwVeEjOX2p73dddLpym315myy/Bpb0=
|
||||
go.uber.org/fx v1.21.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48=
|
||||
go.uber.org/fx v1.22.1 h1:nvvln7mwyT5s1q201YE29V/BFrGor6vMiDNpU/78Mys=
|
||||
go.uber.org/fx v1.22.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48=
|
||||
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
|
||||
@ -26,6 +26,7 @@ require (
|
||||
github.com/btcsuite/btcd v0.20.1-beta // indirect
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.1 // indirect
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d // indirect
|
||||
github.com/cenkalti/backoff/v3 v3.2.2 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.1.2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/containerd/cgroups v1.1.0 // indirect
|
||||
@ -48,7 +49,7 @@ require (
|
||||
github.com/google/gopacket v1.1.19 // indirect
|
||||
github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect
|
||||
github.com/google/uuid v1.4.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.1 // indirect
|
||||
github.com/gorilla/websocket v1.5.3 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c // indirect
|
||||
@ -62,7 +63,7 @@ require (
|
||||
github.com/koron/go-ssdp v0.0.4 // indirect
|
||||
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
|
||||
github.com/libp2p/go-flow-metrics v0.1.0 // indirect
|
||||
github.com/libp2p/go-libp2p v0.35.0 // indirect
|
||||
github.com/libp2p/go-libp2p v0.35.2 // indirect
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
|
||||
github.com/libp2p/go-libp2p-pubsub v0.11.0 // indirect
|
||||
github.com/libp2p/go-msgio v0.3.0 // indirect
|
||||
@ -92,7 +93,7 @@ require (
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
|
||||
github.com/pion/datachannel v1.5.6 // indirect
|
||||
github.com/pion/dtls/v2 v2.2.11 // indirect
|
||||
github.com/pion/ice/v2 v2.3.24 // indirect
|
||||
github.com/pion/ice/v2 v2.3.25 // indirect
|
||||
github.com/pion/interceptor v0.1.29 // indirect
|
||||
github.com/pion/logging v0.2.2 // indirect
|
||||
github.com/pion/mdns v0.0.12 // indirect
|
||||
@ -132,7 +133,7 @@ require (
|
||||
github.com/waku-org/go-zerokit-rln-x86_64 v0.0.0-20230916171518-2a77c3734dd1 // indirect
|
||||
github.com/wk8/go-ordered-map v1.0.0 // indirect
|
||||
go.uber.org/dig v1.17.1 // indirect
|
||||
go.uber.org/fx v1.21.1 // indirect
|
||||
go.uber.org/fx v1.22.1 // indirect
|
||||
go.uber.org/mock v0.4.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.23.0 // indirect
|
||||
|
||||
@ -92,6 +92,8 @@ github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtE
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
|
||||
github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34=
|
||||
github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M=
|
||||
github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=
|
||||
github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo=
|
||||
github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
@ -282,8 +284,8 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY=
|
||||
github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
||||
@ -382,8 +384,8 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6
|
||||
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
|
||||
github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM=
|
||||
github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro=
|
||||
github.com/libp2p/go-libp2p v0.35.0 h1:1xS1Bkr9X7GtdvV6ntLnDV9xB1kNjHK1lZ0eaO6gnhc=
|
||||
github.com/libp2p/go-libp2p v0.35.0/go.mod h1:snyJQix4ET6Tj+LeI0VPjjxTtdWpeOhYt5lEY0KirkQ=
|
||||
github.com/libp2p/go-libp2p v0.35.2 h1:287oHbuplkrLdAF+syB0n/qDgd50AUBtEODqS0e0HDs=
|
||||
github.com/libp2p/go-libp2p v0.35.2/go.mod h1:RKCDNt30IkFipGL0tl8wQW/3zVWEGFUZo8g2gAKxwjU=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
|
||||
github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA=
|
||||
@ -523,8 +525,8 @@ github.com/pion/datachannel v1.5.6/go.mod h1:1eKT6Q85pRnr2mHiWHxJwO50SfZRtWHTsNI
|
||||
github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s=
|
||||
github.com/pion/dtls/v2 v2.2.11 h1:9U/dpCYl1ySttROPWJgqWKEylUdT0fXp/xst6JwY5Ks=
|
||||
github.com/pion/dtls/v2 v2.2.11/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE=
|
||||
github.com/pion/ice/v2 v2.3.24 h1:RYgzhH/u5lH0XO+ABatVKCtRd+4U1GEaCXSMjNr13tI=
|
||||
github.com/pion/ice/v2 v2.3.24/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw=
|
||||
github.com/pion/ice/v2 v2.3.25 h1:M5rJA07dqhi3nobJIg+uPtcVjFECTrhcR3n0ns8kDZs=
|
||||
github.com/pion/ice/v2 v2.3.25/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw=
|
||||
github.com/pion/interceptor v0.1.29 h1:39fsnlP1U8gw2JzOFWdfCU82vHvhW9o0rZnZF56wF+M=
|
||||
github.com/pion/interceptor v0.1.29/go.mod h1:ri+LGNjRUc5xUNtDEPzfdkmSqISixVTBF/z/Zms/6T4=
|
||||
github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY=
|
||||
@ -734,8 +736,8 @@ go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc=
|
||||
go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
|
||||
go.uber.org/fx v1.21.1 h1:RqBh3cYdzZS0uqwVeEjOX2p73dddLpym315myy/Bpb0=
|
||||
go.uber.org/fx v1.21.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48=
|
||||
go.uber.org/fx v1.22.1 h1:nvvln7mwyT5s1q201YE29V/BFrGor6vMiDNpU/78Mys=
|
||||
go.uber.org/fx v1.22.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48=
|
||||
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
|
||||
@ -45,7 +45,7 @@ Console.WriteLine(">>> Default pubsub topic: " + Waku.Utils.DefaultPubsubTopic()
|
||||
|
||||
try
|
||||
{
|
||||
node.Connect("/dns4/node-01.gc-us-central1-a.waku.test.statusim.net/tcp/30303/p2p/16Uiu2HAmDCp8XJ9z1ev18zuv8NHekAsjNyezAvmMfFEJkiharitG");
|
||||
node.Connect("/dns4/node-01.gc-us-central1-a.waku.test.status.im/tcp/30303/p2p/16Uiu2HAmDCp8XJ9z1ev18zuv8NHekAsjNyezAvmMfFEJkiharitG");
|
||||
Console.WriteLine(">>> Connected to Peer");
|
||||
|
||||
foreach (Waku.Peer peer in node.Peers())
|
||||
|
||||
42
flake.nix
42
flake.nix
@ -11,11 +11,11 @@
|
||||
];
|
||||
forAllSystems = f: nixpkgs.lib.genAttrs supportedSystems (system: f system);
|
||||
|
||||
nixpkgsFor = forAllSystems (system: import nixpkgs { inherit system; });
|
||||
pkgsFor = forAllSystems (system: import nixpkgs { inherit system; });
|
||||
|
||||
buildPackage = system: subPackages:
|
||||
let
|
||||
pkgs = nixpkgsFor.${system};
|
||||
pkgs = pkgsFor.${system};
|
||||
commit = builtins.substring 0 7 (self.rev or "dirty");
|
||||
version = builtins.readFile ./VERSION;
|
||||
in pkgs.buildGo121Module {
|
||||
@ -29,22 +29,42 @@
|
||||
];
|
||||
doCheck = false;
|
||||
# FIXME: This needs to be manually changed when updating modules.
|
||||
vendorHash = "sha256-9AnVgIcsQyB8xfxJqj17TrdWqQYeAHrUaUDQe10gAzE=";
|
||||
vendorHash = "sha256-cOh9LNmcaBnBeMFM1HS2pdH5TTraHfo8PXL37t/A3gQ=";
|
||||
# Fix for 'nix run' trying to execute 'go-waku'.
|
||||
meta = { mainProgram = "waku"; };
|
||||
};
|
||||
in rec {
|
||||
packages = forAllSystems (system: {
|
||||
node = buildPackage system ["cmd/waku"];
|
||||
library = buildPackage system ["library/c"];
|
||||
packages = forAllSystems (system: let
|
||||
pkgs = pkgsFor.${system};
|
||||
os = pkgs.stdenv.hostPlatform.uname.system;
|
||||
sttLibExtMap = { Windows = "lib"; Darwin = "a"; Linux = "a"; };
|
||||
dynLibExtMap = { Windows = "dll"; Darwin = "dylib"; Linux = "so"; };
|
||||
buildPackage = pkgs.callPackage ./default.nix;
|
||||
in rec {
|
||||
default = node;
|
||||
node = buildPackage {
|
||||
inherit self;
|
||||
subPkgs = ["cmd/waku"];
|
||||
};
|
||||
static-library = buildPackage {
|
||||
inherit self;
|
||||
subPkgs = ["library/c"];
|
||||
ldflags = ["-buildmode=c-archive"];
|
||||
output = "libgowaku.${sttLibExtMap.${os}}";
|
||||
};
|
||||
# FIXME: Compilation fails with:
|
||||
# relocation R_X86_64_TPOFF32 against runtime.tlsg can not be
|
||||
# used when making a shared object; recompile with -fPIC
|
||||
dynamic-library = buildPackage {
|
||||
inherit self;
|
||||
subPkgs = ["library/c"];
|
||||
ldflags = ["-buildmode=c-shared"];
|
||||
output = "libgowaku.${dynLibExtMap.${os}}";
|
||||
};
|
||||
});
|
||||
|
||||
defaultPackage = forAllSystems (system:
|
||||
buildPackage system ["cmd/waku"]
|
||||
);
|
||||
|
||||
devShells = forAllSystems (system: let
|
||||
pkgs = nixpkgsFor.${system};
|
||||
pkgs = pkgsFor.${system};
|
||||
inherit (pkgs) lib stdenv mkShell;
|
||||
in {
|
||||
default = mkShell {
|
||||
|
||||
11
go.mod
11
go.mod
@ -15,7 +15,7 @@ require (
|
||||
github.com/golang-migrate/migrate/v4 v4.15.2
|
||||
github.com/ipfs/go-ds-sql v0.3.0
|
||||
github.com/ipfs/go-log/v2 v2.5.1
|
||||
github.com/libp2p/go-libp2p v0.35.0
|
||||
github.com/libp2p/go-libp2p v0.35.2
|
||||
github.com/libp2p/go-libp2p-pubsub v0.11.0
|
||||
github.com/libp2p/go-msgio v0.3.0
|
||||
github.com/mattn/go-sqlite3 v1.14.17
|
||||
@ -35,6 +35,7 @@ require (
|
||||
|
||||
require (
|
||||
github.com/avast/retry-go/v4 v4.5.1
|
||||
github.com/cenkalti/backoff/v3 v3.2.2
|
||||
github.com/cenkalti/backoff/v4 v4.1.2
|
||||
github.com/dustin/go-humanize v1.0.1
|
||||
github.com/go-chi/chi/v5 v5.0.0
|
||||
@ -68,7 +69,7 @@ require (
|
||||
github.com/onsi/ginkgo/v2 v2.15.0 // indirect
|
||||
github.com/pion/datachannel v1.5.6 // indirect
|
||||
github.com/pion/dtls/v2 v2.2.11 // indirect
|
||||
github.com/pion/ice/v2 v2.3.24 // indirect
|
||||
github.com/pion/ice/v2 v2.3.25 // indirect
|
||||
github.com/pion/interceptor v0.1.29 // indirect
|
||||
github.com/pion/logging v0.2.2 // indirect
|
||||
github.com/pion/mdns v0.0.12 // indirect
|
||||
@ -91,7 +92,7 @@ require (
|
||||
github.com/waku-org/go-zerokit-rln-arm v0.0.0-20230916171929-1dd9494ff065 // indirect
|
||||
github.com/waku-org/go-zerokit-rln-x86_64 v0.0.0-20230916171518-2a77c3734dd1 // indirect
|
||||
go.uber.org/dig v1.17.1 // indirect
|
||||
go.uber.org/fx v1.21.1 // indirect
|
||||
go.uber.org/fx v1.22.1 // indirect
|
||||
go.uber.org/mock v0.4.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect
|
||||
)
|
||||
@ -120,7 +121,7 @@ require (
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/gopacket v1.1.19 // indirect
|
||||
github.com/google/uuid v1.4.0
|
||||
github.com/gorilla/websocket v1.5.1 // indirect
|
||||
github.com/gorilla/websocket v1.5.3 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d
|
||||
@ -154,7 +155,7 @@ require (
|
||||
github.com/multiformats/go-multibase v0.2.0 // indirect
|
||||
github.com/multiformats/go-multicodec v0.9.0 // indirect
|
||||
github.com/multiformats/go-multihash v0.2.3 // indirect
|
||||
github.com/multiformats/go-multistream v0.5.0
|
||||
github.com/multiformats/go-multistream v0.5.0 // indirect
|
||||
github.com/multiformats/go-varint v0.0.7 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.2.0 // indirect
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58
|
||||
|
||||
18
go.sum
18
go.sum
@ -245,6 +245,8 @@ github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8n
|
||||
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
|
||||
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
|
||||
github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34=
|
||||
github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M=
|
||||
github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=
|
||||
github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo=
|
||||
github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
@ -785,8 +787,8 @@ github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB7
|
||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY=
|
||||
github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
@ -1040,8 +1042,8 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6
|
||||
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
|
||||
github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM=
|
||||
github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro=
|
||||
github.com/libp2p/go-libp2p v0.35.0 h1:1xS1Bkr9X7GtdvV6ntLnDV9xB1kNjHK1lZ0eaO6gnhc=
|
||||
github.com/libp2p/go-libp2p v0.35.0/go.mod h1:snyJQix4ET6Tj+LeI0VPjjxTtdWpeOhYt5lEY0KirkQ=
|
||||
github.com/libp2p/go-libp2p v0.35.2 h1:287oHbuplkrLdAF+syB0n/qDgd50AUBtEODqS0e0HDs=
|
||||
github.com/libp2p/go-libp2p v0.35.2/go.mod h1:RKCDNt30IkFipGL0tl8wQW/3zVWEGFUZo8g2gAKxwjU=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
|
||||
github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA=
|
||||
@ -1302,8 +1304,8 @@ github.com/pion/datachannel v1.5.6/go.mod h1:1eKT6Q85pRnr2mHiWHxJwO50SfZRtWHTsNI
|
||||
github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s=
|
||||
github.com/pion/dtls/v2 v2.2.11 h1:9U/dpCYl1ySttROPWJgqWKEylUdT0fXp/xst6JwY5Ks=
|
||||
github.com/pion/dtls/v2 v2.2.11/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE=
|
||||
github.com/pion/ice/v2 v2.3.24 h1:RYgzhH/u5lH0XO+ABatVKCtRd+4U1GEaCXSMjNr13tI=
|
||||
github.com/pion/ice/v2 v2.3.24/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw=
|
||||
github.com/pion/ice/v2 v2.3.25 h1:M5rJA07dqhi3nobJIg+uPtcVjFECTrhcR3n0ns8kDZs=
|
||||
github.com/pion/ice/v2 v2.3.25/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw=
|
||||
github.com/pion/interceptor v0.1.29 h1:39fsnlP1U8gw2JzOFWdfCU82vHvhW9o0rZnZF56wF+M=
|
||||
github.com/pion/interceptor v0.1.29/go.mod h1:ri+LGNjRUc5xUNtDEPzfdkmSqISixVTBF/z/Zms/6T4=
|
||||
github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY=
|
||||
@ -1680,8 +1682,8 @@ go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc=
|
||||
go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
|
||||
go.uber.org/fx v1.21.1 h1:RqBh3cYdzZS0uqwVeEjOX2p73dddLpym315myy/Bpb0=
|
||||
go.uber.org/fx v1.21.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48=
|
||||
go.uber.org/fx v1.22.1 h1:nvvln7mwyT5s1q201YE29V/BFrGor6vMiDNpU/78Mys=
|
||||
go.uber.org/fx v1.22.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48=
|
||||
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
|
||||
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
|
||||
@ -1340,8 +1340,8 @@ If the function is executed succesfully, `onOkCb` will receive an array objects
|
||||
{
|
||||
"peerID":"16Uiu2HAkykgaECHswi3YKJ5dMLbq2kPVCo89fcyTd38UcQD6ej5W",
|
||||
"multiaddrs":[
|
||||
"/dns4/node-01.do-ams3.waku.test.statusim.net/tcp/30303/p2p/16Uiu2HAkykgaECHswi3YKJ5dMLbq2kPVCo89fcyTd38UcQD6ej5W",
|
||||
"/dns4/node-01.do-ams3.waku.test.statusim.net/tcp/8000/wss/p2p/16Uiu2HAkykgaECHswi3YKJ5dMLbq2kPVCo89fcyTd38UcQD6ej5W"
|
||||
"/dns4/node-01.do-ams3.waku.test.status.im/tcp/30303/p2p/16Uiu2HAkykgaECHswi3YKJ5dMLbq2kPVCo89fcyTd38UcQD6ej5W",
|
||||
"/dns4/node-01.do-ams3.waku.test.status.im/tcp/8000/wss/p2p/16Uiu2HAkykgaECHswi3YKJ5dMLbq2kPVCo89fcyTd38UcQD6ej5W"
|
||||
],
|
||||
"enr":"enr:-QESuEC1p_s3xJzAC_XlOuuNrhVUETmfhbm1wxRGis0f7DlqGSw2FM-p2Ugl_r25UHQJ3f1rIRrpzxJXSMaJe4yk1XFSAYJpZIJ2NIJpcISygI2rim11bHRpYWRkcnO4XAArNiZub2RlLTAxLmRvLWFtczMud2FrdS50ZXN0LnN0YXR1c2ltLm5ldAZ2XwAtNiZub2RlLTAxLmRvLWFtczMud2FrdS50ZXN0LnN0YXR1c2ltLm5ldAYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQJATXRSRSUyTw_QLB6H_U3oziVQgNRgrXpK7wp2AMyNxYN0Y3CCdl-DdWRwgiMohXdha3UyDw"
|
||||
},
|
||||
|
||||
@ -163,7 +163,7 @@ func NewNode(instance *WakuInstance, configJSON string) error {
|
||||
opts := []node.WakuNodeOption{
|
||||
node.WithPrivateKey(prvKey),
|
||||
node.WithHostAddress(hostAddr),
|
||||
node.WithKeepAlive(time.Duration(*config.KeepAliveInterval) * time.Second),
|
||||
node.WithKeepAlive(10*time.Second, time.Duration(*config.KeepAliveInterval)*time.Second),
|
||||
}
|
||||
|
||||
if *config.EnableRelay {
|
||||
|
||||
@ -74,6 +74,10 @@ func (t timestamp) String() string {
|
||||
return time.Unix(0, int64(t)).Format(time.RFC3339)
|
||||
}
|
||||
|
||||
func Epoch(key string, time time.Time) zap.Field {
|
||||
return zap.String(key, fmt.Sprintf("%d", time.UnixNano()))
|
||||
}
|
||||
|
||||
// History Query Filters
|
||||
type historyFilters []*pb.ContentFilter
|
||||
|
||||
|
||||
@ -21,6 +21,7 @@ import (
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/cenkalti/backoff/v3"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
|
||||
gcrypto "github.com/ethereum/go-ethereum/crypto"
|
||||
@ -437,3 +438,21 @@ func WaitForTimeout(t *testing.T, ctx context.Context, timeout time.Duration, wg
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
type BackOffOption func(*backoff.ExponentialBackOff)
|
||||
|
||||
func RetryWithBackOff(o func() error, options ...BackOffOption) error {
|
||||
b := backoff.ExponentialBackOff{
|
||||
InitialInterval: time.Millisecond * 100,
|
||||
RandomizationFactor: 0.1,
|
||||
Multiplier: 1,
|
||||
MaxInterval: time.Second,
|
||||
MaxElapsedTime: time.Second * 10,
|
||||
Clock: backoff.SystemClock,
|
||||
}
|
||||
for _, option := range options {
|
||||
option(&b)
|
||||
}
|
||||
b.Reset()
|
||||
return backoff.Retry(o, &b)
|
||||
}
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
package api
|
||||
package filter
|
||||
|
||||
import (
|
||||
"context"
|
||||
@ -14,7 +14,6 @@ import (
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const FilterPingTimeout = 5 * time.Second
|
||||
const MultiplexChannelBuffer = 100
|
||||
|
||||
type FilterConfig struct {
|
||||
248
waku/v2/api/filter/filter_manager.go
Normal file
248
waku/v2/api/filter/filter_manager.go
Normal file
@ -0,0 +1,248 @@
|
||||
package filter
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/exp/maps"
|
||||
|
||||
"github.com/waku-org/go-waku/waku/v2/onlinechecker"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/filter"
|
||||
)
|
||||
|
||||
// Methods on FilterManager just aggregate filters from application and subscribe to them
|
||||
//
|
||||
// startFilterSubLoop runs a loop where-in it waits for an interval to batch subscriptions
|
||||
//
|
||||
// runFilterSubscriptionLoop runs a loop for receiving messages from underlying subscriptions and invokes onNewEnvelopes
|
||||
//
|
||||
// filterConfigs is the map of filer IDs to filter configs
|
||||
// filterSubscriptions is the map of filter subscription IDs to subscriptions
|
||||
|
||||
const filterSubBatchSize = 90
|
||||
|
||||
type appFilterMap map[string]filterConfig
|
||||
|
||||
type FilterManager struct {
|
||||
sync.Mutex
|
||||
ctx context.Context
|
||||
minPeersPerFilter int
|
||||
onlineChecker *onlinechecker.DefaultOnlineChecker
|
||||
filterSubscriptions map[string]SubDetails // map of aggregated filters to apiSub details
|
||||
logger *zap.Logger
|
||||
node *filter.WakuFilterLightNode
|
||||
filterSubBatchDuration time.Duration
|
||||
incompleteFilterBatch map[string]filterConfig
|
||||
filterConfigs appFilterMap // map of application filterID to {aggregatedFilterID, application ContentFilter}
|
||||
waitingToSubQueue chan filterConfig
|
||||
envProcessor EnevelopeProcessor
|
||||
}
|
||||
|
||||
type SubDetails struct {
|
||||
cancel func()
|
||||
sub *Sub
|
||||
}
|
||||
|
||||
type filterConfig struct {
|
||||
ID string
|
||||
contentFilter protocol.ContentFilter
|
||||
}
|
||||
|
||||
// EnevelopeProcessor is responsible for processing of received messages
|
||||
// This is application specific
|
||||
type EnevelopeProcessor interface {
|
||||
OnNewEnvelope(env *protocol.Envelope) error
|
||||
}
|
||||
|
||||
func NewFilterManager(ctx context.Context, logger *zap.Logger, minPeersPerFilter int, envProcessor EnevelopeProcessor, node *filter.WakuFilterLightNode) *FilterManager {
|
||||
// This fn is being mocked in test
|
||||
mgr := new(FilterManager)
|
||||
mgr.ctx = ctx
|
||||
mgr.logger = logger
|
||||
mgr.minPeersPerFilter = minPeersPerFilter
|
||||
mgr.envProcessor = envProcessor
|
||||
mgr.filterSubscriptions = make(map[string]SubDetails)
|
||||
mgr.node = node
|
||||
mgr.onlineChecker = onlinechecker.NewDefaultOnlineChecker(false).(*onlinechecker.DefaultOnlineChecker)
|
||||
mgr.node.SetOnlineChecker(mgr.onlineChecker)
|
||||
mgr.filterSubBatchDuration = 5 * time.Second
|
||||
mgr.incompleteFilterBatch = make(map[string]filterConfig)
|
||||
mgr.filterConfigs = make(appFilterMap)
|
||||
mgr.waitingToSubQueue = make(chan filterConfig, 100)
|
||||
go mgr.startFilterSubLoop()
|
||||
return mgr
|
||||
}
|
||||
|
||||
func (mgr *FilterManager) startFilterSubLoop() {
|
||||
ticker := time.NewTicker(mgr.filterSubBatchDuration)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-mgr.ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
// TODO: Optimization, handle case where 1st addFilter happens just before ticker expires.
|
||||
if mgr.onlineChecker.IsOnline() {
|
||||
mgr.Lock()
|
||||
for _, af := range mgr.incompleteFilterBatch {
|
||||
mgr.logger.Debug("ticker hit, hence subscribing", zap.String("agg-filter-id", af.ID), zap.Int("batch-size", len(af.contentFilter.ContentTopics)),
|
||||
zap.Stringer("agg-content-filter", af.contentFilter))
|
||||
go mgr.subscribeAndRunLoop(af)
|
||||
}
|
||||
mgr.incompleteFilterBatch = make(map[string]filterConfig)
|
||||
mgr.Unlock()
|
||||
}
|
||||
subs := mgr.node.Subscriptions()
|
||||
mgr.logger.Debug("filter stats", zap.Int("agg filters count", len(mgr.filterSubscriptions)), zap.Int("filter subs count", len(subs)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// addFilter method checks if there are existing waiting filters for the pubsubTopic to be subscribed and adds the new filter to the same batch
|
||||
// once batchlimit is hit, all filters are subscribed to and new batch is created.
|
||||
// if node is not online, then batch is pushed to a queue to be picked up later for subscription and new batch is created
|
||||
|
||||
func (mgr *FilterManager) SubscribeFilter(filterID string, cf protocol.ContentFilter) {
|
||||
mgr.logger.Debug("adding filter", zap.String("filter-id", filterID))
|
||||
|
||||
mgr.Lock()
|
||||
defer mgr.Unlock()
|
||||
|
||||
afilter, ok := mgr.incompleteFilterBatch[cf.PubsubTopic]
|
||||
if !ok {
|
||||
// no existing batch for pubsubTopic
|
||||
mgr.logger.Debug("new pubsubTopic batch", zap.String("topic", cf.PubsubTopic))
|
||||
afilter = filterConfig{uuid.NewString(), cf}
|
||||
mgr.incompleteFilterBatch[cf.PubsubTopic] = afilter
|
||||
mgr.filterConfigs[filterID] = filterConfig{afilter.ID, cf}
|
||||
} else {
|
||||
mgr.logger.Debug("existing pubsubTopic batch", zap.String("agg-filter-id", afilter.ID), zap.String("topic", cf.PubsubTopic))
|
||||
if len(afilter.contentFilter.ContentTopics)+len(cf.ContentTopics) > filterSubBatchSize {
|
||||
// filter batch limit is hit
|
||||
if mgr.onlineChecker.IsOnline() {
|
||||
// node is online, go ahead and subscribe the batch
|
||||
mgr.logger.Debug("crossed pubsubTopic batchsize and online, subscribing to filters", zap.String("agg-filter-id", afilter.ID), zap.String("topic", cf.PubsubTopic), zap.Int("batch-size", len(afilter.contentFilter.ContentTopics)+len(cf.ContentTopics)))
|
||||
go mgr.subscribeAndRunLoop(afilter)
|
||||
} else {
|
||||
mgr.logger.Debug("crossed pubsubTopic batchsize and offline, queuing filters", zap.String("agg-filter-id", afilter.ID), zap.String("topic", cf.PubsubTopic), zap.Int("batch-size", len(afilter.contentFilter.ContentTopics)+len(cf.ContentTopics)))
|
||||
// queue existing batch as node is not online
|
||||
mgr.waitingToSubQueue <- afilter
|
||||
}
|
||||
afilter = filterConfig{uuid.NewString(), cf}
|
||||
mgr.logger.Debug("creating a new pubsubTopic batch", zap.String("agg-filter-id", afilter.ID), zap.String("topic", cf.PubsubTopic), zap.Stringer("content-filter", cf))
|
||||
mgr.incompleteFilterBatch[cf.PubsubTopic] = afilter
|
||||
mgr.filterConfigs[filterID] = filterConfig{afilter.ID, cf}
|
||||
} else {
|
||||
// add to existing batch as batch limit not reached
|
||||
for _, ct := range maps.Keys(cf.ContentTopics) {
|
||||
afilter.contentFilter.ContentTopics[ct] = struct{}{}
|
||||
}
|
||||
mgr.logger.Debug("adding to existing pubsubTopic batch", zap.String("agg-filter-id", afilter.ID), zap.Stringer("content-filter", cf), zap.Int("batch-size", len(afilter.contentFilter.ContentTopics)))
|
||||
mgr.filterConfigs[filterID] = filterConfig{afilter.ID, cf}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (mgr *FilterManager) subscribeAndRunLoop(f filterConfig) {
|
||||
ctx, cancel := context.WithCancel(mgr.ctx)
|
||||
config := FilterConfig{MaxPeers: mgr.minPeersPerFilter}
|
||||
sub, err := Subscribe(ctx, mgr.node, f.contentFilter, config, mgr.logger)
|
||||
mgr.Lock()
|
||||
mgr.filterSubscriptions[f.ID] = SubDetails{cancel, sub}
|
||||
mgr.Unlock()
|
||||
if err == nil {
|
||||
mgr.logger.Debug("subscription successful, running loop", zap.String("agg-filter-id", f.ID), zap.Stringer("content-filter", f.contentFilter))
|
||||
mgr.runFilterSubscriptionLoop(sub)
|
||||
} else {
|
||||
mgr.logger.Error("subscription fail, need to debug issue", zap.String("agg-filter-id", f.ID), zap.Stringer("content-filter", f.contentFilter), zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
// NetworkChange is to be invoked when there is a change in network detected by application
|
||||
// This should retrigger a ping to verify if subscriptions are fine.
|
||||
func (mgr *FilterManager) NetworkChange() {
|
||||
mgr.node.PingPeers() // ping all peers to check if subscriptions are alive
|
||||
}
|
||||
|
||||
// OnConnectionStatusChange to be triggered when connection status change is detected either from offline to online or vice-versa
|
||||
// Note that pubsubTopic specific change can be triggered by specifying pubsubTopic,
|
||||
// if pubsubTopic is empty it indicates complete connection status change such as node went offline or came back online.
|
||||
func (mgr *FilterManager) OnConnectionStatusChange(pubsubTopic string, newStatus bool) {
|
||||
subs := mgr.node.Subscriptions()
|
||||
mgr.logger.Debug("inside on connection status change", zap.Bool("new-status", newStatus),
|
||||
zap.Int("agg filters count", len(mgr.filterSubscriptions)), zap.Int("filter subs count", len(subs)))
|
||||
if newStatus && !mgr.onlineChecker.IsOnline() { // switched from offline to Online
|
||||
mgr.NetworkChange()
|
||||
mgr.logger.Debug("switching from offline to online")
|
||||
mgr.Lock()
|
||||
if len(mgr.waitingToSubQueue) > 0 {
|
||||
for af := range mgr.waitingToSubQueue {
|
||||
// TODO: change the below logic once topic specific health is implemented for lightClients
|
||||
if pubsubTopic == "" || pubsubTopic == af.contentFilter.PubsubTopic {
|
||||
// check if any filter subs are pending and subscribe them
|
||||
mgr.logger.Debug("subscribing from filter queue", zap.String("filter-id", af.ID), zap.Stringer("content-filter", af.contentFilter))
|
||||
go mgr.subscribeAndRunLoop(af)
|
||||
} else {
|
||||
mgr.waitingToSubQueue <- af
|
||||
}
|
||||
if len(mgr.waitingToSubQueue) == 0 {
|
||||
mgr.logger.Debug("no pending subscriptions")
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
mgr.Unlock()
|
||||
}
|
||||
|
||||
mgr.onlineChecker.SetOnline(newStatus)
|
||||
}
|
||||
|
||||
func (mgr *FilterManager) UnsubscribeFilter(filterID string) {
|
||||
mgr.Lock()
|
||||
defer mgr.Unlock()
|
||||
mgr.logger.Debug("removing filter", zap.String("filter-id", filterID))
|
||||
filterConfig, ok := mgr.filterConfigs[filterID]
|
||||
if !ok {
|
||||
mgr.logger.Debug("filter removal: filter not found", zap.String("filter-id", filterID))
|
||||
return
|
||||
}
|
||||
af, ok := mgr.filterSubscriptions[filterConfig.ID]
|
||||
if ok {
|
||||
delete(mgr.filterConfigs, filterID)
|
||||
for ct := range filterConfig.contentFilter.ContentTopics {
|
||||
delete(af.sub.ContentFilter.ContentTopics, ct)
|
||||
}
|
||||
if len(af.sub.ContentFilter.ContentTopics) == 0 {
|
||||
af.cancel()
|
||||
} else {
|
||||
go af.sub.Unsubscribe(filterConfig.contentFilter)
|
||||
}
|
||||
} else {
|
||||
mgr.logger.Debug("filter removal: aggregated filter not found", zap.String("filter-id", filterID), zap.String("agg-filter-id", filterConfig.ID))
|
||||
}
|
||||
}
|
||||
|
||||
func (mgr *FilterManager) runFilterSubscriptionLoop(sub *Sub) {
|
||||
for {
|
||||
select {
|
||||
case <-mgr.ctx.Done():
|
||||
mgr.logger.Debug("subscription loop ended", zap.Stringer("content-filter", sub.ContentFilter))
|
||||
return
|
||||
case env, ok := <-sub.DataCh:
|
||||
if ok {
|
||||
err := mgr.envProcessor.OnNewEnvelope(env)
|
||||
if err != nil {
|
||||
mgr.logger.Error("invoking onNewEnvelopes error", zap.Error(err))
|
||||
}
|
||||
} else {
|
||||
mgr.logger.Debug("filter sub is closed", zap.Any("content-filter", sub.ContentFilter))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1,10 +1,15 @@
|
||||
package api
|
||||
//go:build !race
|
||||
|
||||
package filter
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
@ -19,6 +24,7 @@ func TestFilterApiSuite(t *testing.T) {
|
||||
|
||||
type FilterApiTestSuite struct {
|
||||
filter.FilterTestSuite
|
||||
msgRcvd chan bool
|
||||
}
|
||||
|
||||
func (s *FilterApiTestSuite) SetupTest() {
|
||||
@ -96,3 +102,92 @@ func (s *FilterApiTestSuite) TestSubscribe() {
|
||||
s.Log.Info("DataCh is closed")
|
||||
|
||||
}
|
||||
|
||||
func (s *FilterApiTestSuite) OnNewEnvelope(env *protocol.Envelope) error {
|
||||
if env.Message().ContentTopic == s.ContentFilter.ContentTopicsList()[0] {
|
||||
s.Log.Info("received message via filter")
|
||||
s.msgRcvd <- true
|
||||
} else {
|
||||
s.Log.Info("received message via filter but doesn't match contentTopic")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *FilterApiTestSuite) TestFilterManager() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
testPubsubTopic := s.TestTopic
|
||||
contentTopicBytes := make([]byte, 4)
|
||||
_, err := rand.Read(contentTopicBytes)
|
||||
|
||||
s.Require().NoError(err)
|
||||
|
||||
s.ContentFilter = protocol.ContentFilter{
|
||||
PubsubTopic: testPubsubTopic,
|
||||
ContentTopics: protocol.NewContentTopicSet("/test/filtermgr" + hex.EncodeToString(contentTopicBytes) + "/topic/proto"),
|
||||
}
|
||||
|
||||
s.msgRcvd = make(chan bool, 1)
|
||||
|
||||
s.Log.Info("creating filterManager")
|
||||
fm := NewFilterManager(ctx, s.Log, 2, s, s.LightNode)
|
||||
fm.filterSubBatchDuration = 1 * time.Second
|
||||
fm.onlineChecker.SetOnline(true)
|
||||
fID := uuid.NewString()
|
||||
fm.SubscribeFilter(fID, s.ContentFilter)
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
// Ensure there is at least 1 active filter subscription
|
||||
subscriptions := s.LightNode.Subscriptions()
|
||||
s.Require().Greater(len(subscriptions), 0)
|
||||
|
||||
s.Log.Info("publishing msg")
|
||||
|
||||
s.PublishMsg(&filter.WakuMsg{
|
||||
Payload: "filtermgr testMsg",
|
||||
ContentTopic: s.ContentFilter.ContentTopicsList()[0],
|
||||
PubSubTopic: testPubsubTopic,
|
||||
})
|
||||
t := time.NewTicker(2 * time.Second)
|
||||
select {
|
||||
case received := <-s.msgRcvd:
|
||||
s.Require().True(received)
|
||||
s.Log.Info("unsubscribe 1")
|
||||
case <-t.C:
|
||||
s.Log.Error("timed out waiting for message")
|
||||
s.Fail("timed out waiting for message")
|
||||
}
|
||||
// Mock peers going down
|
||||
s.LightNodeHost.Peerstore().RemovePeer(s.FullNodeHost.ID())
|
||||
|
||||
fm.OnConnectionStatusChange("", false)
|
||||
time.Sleep(2 * time.Second)
|
||||
fm.OnConnectionStatusChange("", true)
|
||||
s.ConnectToFullNode(s.LightNode, s.FullNode)
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
// Ensure there is at least 1 active filter subscription
|
||||
subscriptions = s.LightNode.Subscriptions()
|
||||
s.Require().Greater(len(subscriptions), 0)
|
||||
s.Log.Info("publish message 2")
|
||||
|
||||
// Ensure that messages are retrieved with a fresh sub
|
||||
s.PublishMsg(&filter.WakuMsg{
|
||||
Payload: "filtermgr testMsg2",
|
||||
ContentTopic: s.ContentFilter.ContentTopicsList()[0],
|
||||
PubSubTopic: testPubsubTopic,
|
||||
})
|
||||
t = time.NewTicker(2 * time.Second)
|
||||
|
||||
select {
|
||||
case received := <-s.msgRcvd:
|
||||
s.Require().True(received)
|
||||
s.Log.Info("received message 2")
|
||||
case <-t.C:
|
||||
s.Log.Error("timed out waiting for message 2")
|
||||
s.Fail("timed out waiting for message 2")
|
||||
}
|
||||
|
||||
fm.UnsubscribeFilter(fID)
|
||||
cancel()
|
||||
}
|
||||
47
waku/v2/api/missing/criteria_interest.go
Normal file
47
waku/v2/api/missing/criteria_interest.go
Normal file
@ -0,0 +1,47 @@
|
||||
package missing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
)
|
||||
|
||||
type criteriaInterest struct {
|
||||
peerID peer.ID
|
||||
contentFilter protocol.ContentFilter
|
||||
lastChecked time.Time
|
||||
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
func (c criteriaInterest) equals(other criteriaInterest) bool {
|
||||
if c.peerID != other.peerID {
|
||||
return false
|
||||
}
|
||||
|
||||
if c.contentFilter.PubsubTopic != other.contentFilter.PubsubTopic {
|
||||
return false
|
||||
}
|
||||
|
||||
contentTopics := c.contentFilter.ContentTopics.ToList()
|
||||
otherContentTopics := other.contentFilter.ContentTopics.ToList()
|
||||
|
||||
slices.Sort(contentTopics)
|
||||
slices.Sort(otherContentTopics)
|
||||
|
||||
if len(contentTopics) != len(otherContentTopics) {
|
||||
return false
|
||||
}
|
||||
|
||||
for i, contentTopic := range contentTopics {
|
||||
if contentTopic != otherContentTopics[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
302
waku/v2/api/missing/missing_messages.go
Normal file
302
waku/v2/api/missing/missing_messages.go
Normal file
@ -0,0 +1,302 @@
|
||||
package missing
|
||||
|
||||
// test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/waku-org/go-waku/logging"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/store"
|
||||
"github.com/waku-org/go-waku/waku/v2/timesource"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
const maxContentTopicsPerRequest = 10
|
||||
const maxMsgHashesPerRequest = 50
|
||||
|
||||
// MessageTracker should keep track of messages it has seen before and
|
||||
// provide a way to determine whether a message exists or not. This
|
||||
// is application specific
|
||||
type MessageTracker interface {
|
||||
MessageExists(pb.MessageHash) (bool, error)
|
||||
}
|
||||
|
||||
// MissingMessageVerifier is used to periodically retrieve missing messages from store nodes that have some specific criteria
|
||||
type MissingMessageVerifier struct {
|
||||
ctx context.Context
|
||||
params missingMessageVerifierParams
|
||||
|
||||
messageTracker MessageTracker
|
||||
|
||||
criteriaInterest map[string]criteriaInterest // Track message verification requests and when was the last time a pubsub topic was verified for missing messages
|
||||
criteriaInterestMu sync.Mutex
|
||||
|
||||
C <-chan *protocol.Envelope
|
||||
|
||||
store *store.WakuStore
|
||||
timesource timesource.Timesource
|
||||
logger *zap.Logger
|
||||
}
|
||||
|
||||
// NewMissingMessageVerifier creates an instance of a MissingMessageVerifier
|
||||
func NewMissingMessageVerifier(store *store.WakuStore, messageTracker MessageTracker, timesource timesource.Timesource, logger *zap.Logger, options ...MissingMessageVerifierOption) *MissingMessageVerifier {
|
||||
options = append(defaultMissingMessagesVerifierOptions, options...)
|
||||
params := missingMessageVerifierParams{}
|
||||
for _, opt := range options {
|
||||
opt(¶ms)
|
||||
}
|
||||
|
||||
return &MissingMessageVerifier{
|
||||
store: store,
|
||||
timesource: timesource,
|
||||
messageTracker: messageTracker,
|
||||
logger: logger.Named("missing-msg-verifier"),
|
||||
params: params,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MissingMessageVerifier) SetCriteriaInterest(peerID peer.ID, contentFilter protocol.ContentFilter) {
|
||||
m.criteriaInterestMu.Lock()
|
||||
defer m.criteriaInterestMu.Unlock()
|
||||
|
||||
ctx, cancel := context.WithCancel(m.ctx)
|
||||
criteriaInterest := criteriaInterest{
|
||||
peerID: peerID,
|
||||
contentFilter: contentFilter,
|
||||
lastChecked: m.timesource.Now().Add(-m.params.delay),
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
}
|
||||
|
||||
currMessageVerificationRequest, ok := m.criteriaInterest[contentFilter.PubsubTopic]
|
||||
|
||||
if ok && currMessageVerificationRequest.equals(criteriaInterest) {
|
||||
return
|
||||
}
|
||||
|
||||
if ok {
|
||||
// If there is an ongoing request, we cancel it before replacing it
|
||||
// by the new list. This can be probably optimized further by tracking
|
||||
// the last time a content topic was synced, but might not be necessary
|
||||
// since cancelling an ongoing request would mean cancelling just a single
|
||||
// page of results
|
||||
currMessageVerificationRequest.cancel()
|
||||
}
|
||||
|
||||
m.criteriaInterest[contentFilter.PubsubTopic] = criteriaInterest
|
||||
}
|
||||
|
||||
func (m *MissingMessageVerifier) Start(ctx context.Context) {
|
||||
m.ctx = ctx
|
||||
m.criteriaInterest = make(map[string]criteriaInterest)
|
||||
|
||||
c := make(chan *protocol.Envelope, 1000)
|
||||
m.C = c
|
||||
|
||||
go func() {
|
||||
t := time.NewTicker(m.params.interval)
|
||||
defer t.Stop()
|
||||
|
||||
var semaphore = make(chan struct{}, 5)
|
||||
for {
|
||||
select {
|
||||
case <-t.C:
|
||||
m.logger.Debug("checking for missing messages...")
|
||||
m.criteriaInterestMu.Lock()
|
||||
for _, interest := range m.criteriaInterest {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
semaphore <- struct{}{}
|
||||
go func(interest criteriaInterest) {
|
||||
m.fetchHistory(c, interest)
|
||||
<-semaphore
|
||||
}(interest)
|
||||
}
|
||||
}
|
||||
m.criteriaInterestMu.Unlock()
|
||||
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (m *MissingMessageVerifier) fetchHistory(c chan<- *protocol.Envelope, interest criteriaInterest) {
|
||||
contentTopics := interest.contentFilter.ContentTopics.ToList()
|
||||
for i := 0; i < len(contentTopics); i += maxContentTopicsPerRequest {
|
||||
j := i + maxContentTopicsPerRequest
|
||||
if j > len(contentTopics) {
|
||||
j = len(contentTopics)
|
||||
}
|
||||
|
||||
now := m.timesource.Now()
|
||||
err := m.fetchMessagesBatch(c, interest, i, j, now)
|
||||
if err != nil {
|
||||
if errors.Is(err, context.Canceled) {
|
||||
return
|
||||
}
|
||||
|
||||
m.logger.Error("could not fetch history",
|
||||
zap.Stringer("peerID", interest.peerID),
|
||||
zap.String("pubsubTopic", interest.contentFilter.PubsubTopic),
|
||||
zap.Strings("contentTopics", contentTopics))
|
||||
continue
|
||||
}
|
||||
|
||||
m.criteriaInterestMu.Lock()
|
||||
c := m.criteriaInterest[interest.contentFilter.PubsubTopic]
|
||||
if c.equals(interest) {
|
||||
c.lastChecked = now
|
||||
m.criteriaInterest[interest.contentFilter.PubsubTopic] = c
|
||||
}
|
||||
m.criteriaInterestMu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MissingMessageVerifier) storeQueryWithRetry(ctx context.Context, queryFunc func(ctx context.Context) (*store.Result, error), logger *zap.Logger, logMsg string) (*store.Result, error) {
|
||||
retry := true
|
||||
count := 1
|
||||
for retry && count <= m.params.maxAttemptsToRetrieveHistory {
|
||||
logger.Debug(logMsg, zap.Int("attempt", count))
|
||||
tCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
||||
result, err := queryFunc(tCtx)
|
||||
cancel()
|
||||
if err != nil {
|
||||
logger.Error("could not query storenode", zap.Error(err), zap.Int("attempt", count))
|
||||
select {
|
||||
case <-m.ctx.Done():
|
||||
return nil, m.ctx.Err()
|
||||
case <-time.After(2 * time.Second):
|
||||
}
|
||||
} else {
|
||||
return result, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.New("storenode not available")
|
||||
}
|
||||
|
||||
func (m *MissingMessageVerifier) fetchMessagesBatch(c chan<- *protocol.Envelope, interest criteriaInterest, batchFrom int, batchTo int, now time.Time) error {
|
||||
contentTopics := interest.contentFilter.ContentTopics.ToList()
|
||||
|
||||
logger := m.logger.With(
|
||||
zap.Stringer("peerID", interest.peerID),
|
||||
zap.Strings("contentTopics", contentTopics[batchFrom:batchTo]),
|
||||
zap.String("pubsubTopic", interest.contentFilter.PubsubTopic),
|
||||
logging.Epoch("from", interest.lastChecked),
|
||||
logging.Epoch("to", now),
|
||||
)
|
||||
|
||||
result, err := m.storeQueryWithRetry(interest.ctx, func(ctx context.Context) (*store.Result, error) {
|
||||
return m.store.Query(ctx, store.FilterCriteria{
|
||||
ContentFilter: protocol.NewContentFilter(interest.contentFilter.PubsubTopic, contentTopics[batchFrom:batchTo]...),
|
||||
TimeStart: proto.Int64(interest.lastChecked.Add(-m.params.delay).UnixNano()),
|
||||
TimeEnd: proto.Int64(now.Add(-m.params.delay).UnixNano()),
|
||||
}, store.WithPeer(interest.peerID), store.WithPaging(false, 100), store.IncludeData(false))
|
||||
}, logger, "retrieving history to check for missing messages")
|
||||
if err != nil {
|
||||
if !errors.Is(err, context.Canceled) {
|
||||
logger.Error("storenode not available", zap.Error(err))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
var missingHashes []pb.MessageHash
|
||||
|
||||
for !result.IsComplete() {
|
||||
for _, mkv := range result.Messages() {
|
||||
hash := pb.ToMessageHash(mkv.MessageHash)
|
||||
exists, err := m.messageTracker.MessageExists(hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if exists {
|
||||
continue
|
||||
}
|
||||
|
||||
missingHashes = append(missingHashes, hash)
|
||||
}
|
||||
|
||||
result, err = m.storeQueryWithRetry(interest.ctx, func(ctx context.Context) (*store.Result, error) {
|
||||
if err = result.Next(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}, logger.With(zap.String("cursor", hex.EncodeToString(result.Cursor()))), "retrieving next page")
|
||||
if err != nil {
|
||||
if !errors.Is(err, context.Canceled) {
|
||||
logger.Error("storenode not available", zap.Error(err))
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(missingHashes) == 0 {
|
||||
// Nothing to do here
|
||||
return nil
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
// Split into batches
|
||||
for i := 0; i < len(missingHashes); i += maxMsgHashesPerRequest {
|
||||
j := i + maxMsgHashesPerRequest
|
||||
if j > len(missingHashes) {
|
||||
j = len(missingHashes)
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func(messageHashes []pb.MessageHash) {
|
||||
defer wg.Wait()
|
||||
|
||||
result, err = m.storeQueryWithRetry(interest.ctx, func(ctx context.Context) (*store.Result, error) {
|
||||
return m.store.QueryByHash(ctx, messageHashes, store.WithPeer(interest.peerID), store.WithPaging(false, maxMsgHashesPerRequest))
|
||||
}, logger, "retrieving missing messages")
|
||||
if err != nil {
|
||||
if !errors.Is(err, context.Canceled) {
|
||||
logger.Error("storenode not available", zap.Error(err))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
for !result.IsComplete() {
|
||||
for _, mkv := range result.Messages() {
|
||||
select {
|
||||
case c <- protocol.NewEnvelope(mkv.Message, mkv.Message.GetTimestamp(), mkv.GetPubsubTopic()):
|
||||
default:
|
||||
m.logger.Warn("subscriber is too slow!")
|
||||
}
|
||||
}
|
||||
|
||||
result, err = m.storeQueryWithRetry(interest.ctx, func(ctx context.Context) (*store.Result, error) {
|
||||
if err = result.Next(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}, logger.With(zap.String("cursor", hex.EncodeToString(result.Cursor()))), "retrieving next page")
|
||||
if err != nil {
|
||||
if !errors.Is(err, context.Canceled) {
|
||||
logger.Error("storenode not available", zap.Error(err))
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
}(missingHashes[i:j])
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
39
waku/v2/api/missing/options.go
Normal file
39
waku/v2/api/missing/options.go
Normal file
@ -0,0 +1,39 @@
|
||||
package missing
|
||||
|
||||
import "time"
|
||||
|
||||
type missingMessageVerifierParams struct {
|
||||
delay time.Duration
|
||||
interval time.Duration
|
||||
maxAttemptsToRetrieveHistory int
|
||||
}
|
||||
|
||||
// MissingMessageVerifierOption is an option that can be used to customize the MissingMessageVerifier behavior
|
||||
type MissingMessageVerifierOption func(*missingMessageVerifierParams)
|
||||
|
||||
// WithVerificationInterval is an option used to setup the verification interval
|
||||
func WithVerificationInterval(t time.Duration) MissingMessageVerifierOption {
|
||||
return func(params *missingMessageVerifierParams) {
|
||||
params.interval = t
|
||||
}
|
||||
}
|
||||
|
||||
// WithDelay is an option used to indicate the delay to apply for verifying messages
|
||||
func WithDelay(t time.Duration) MissingMessageVerifierOption {
|
||||
return func(params *missingMessageVerifierParams) {
|
||||
params.delay = t
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxAttempts indicates how many times will the message verifier retry a failed storenode request
|
||||
func WithMaxRetryAttempts(max int) MissingMessageVerifierOption {
|
||||
return func(params *missingMessageVerifierParams) {
|
||||
params.maxAttemptsToRetrieveHistory = max
|
||||
}
|
||||
}
|
||||
|
||||
var defaultMissingMessagesVerifierOptions = []MissingMessageVerifierOption{
|
||||
WithVerificationInterval(time.Minute),
|
||||
WithDelay(20 * time.Second),
|
||||
WithMaxRetryAttempts(3),
|
||||
}
|
||||
9
waku/v2/api/publish/common.go
Normal file
9
waku/v2/api/publish/common.go
Normal file
@ -0,0 +1,9 @@
|
||||
package publish
|
||||
|
||||
import (
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// PublishFn represents a function that will publish a message.
|
||||
type PublishFn = func(envelope *protocol.Envelope, logger *zap.Logger) error
|
||||
255
waku/v2/api/publish/message_check.go
Normal file
255
waku/v2/api/publish/message_check.go
Normal file
@ -0,0 +1,255 @@
|
||||
package publish
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/store"
|
||||
"github.com/waku-org/go-waku/waku/v2/timesource"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const DefaultMaxHashQueryLength = 50
|
||||
const DefaultHashQueryInterval = 3 * time.Second
|
||||
const DefaultMessageSentPeriod = 3 // in seconds
|
||||
const DefaultMessageExpiredPerid = 10 // in seconds
|
||||
|
||||
type MessageSentCheckOption func(*MessageSentCheck) error
|
||||
|
||||
type ISentCheck interface {
|
||||
Start()
|
||||
Add(topic string, messageID common.Hash, sentTime uint32)
|
||||
DeleteByMessageIDs(messageIDs []common.Hash)
|
||||
SetStorePeerID(peerID peer.ID)
|
||||
}
|
||||
|
||||
// MessageSentCheck tracks the outgoing messages and check against store node
|
||||
// if the message sent time has passed the `messageSentPeriod`, the message id will be includes for the next query
|
||||
// if the message keeps missing after `messageExpiredPerid`, the message id will be expired
|
||||
type MessageSentCheck struct {
|
||||
messageIDs map[string]map[common.Hash]uint32
|
||||
messageIDsMu sync.RWMutex
|
||||
storePeerID peer.ID
|
||||
messageStoredChan chan common.Hash
|
||||
messageExpiredChan chan common.Hash
|
||||
ctx context.Context
|
||||
store *store.WakuStore
|
||||
timesource timesource.Timesource
|
||||
logger *zap.Logger
|
||||
maxHashQueryLength uint64
|
||||
hashQueryInterval time.Duration
|
||||
messageSentPeriod uint32
|
||||
messageExpiredPerid uint32
|
||||
}
|
||||
|
||||
// NewMessageSentCheck creates a new instance of MessageSentCheck with default parameters
|
||||
func NewMessageSentCheck(ctx context.Context, store *store.WakuStore, timesource timesource.Timesource, msgStoredChan chan common.Hash, msgExpiredChan chan common.Hash, logger *zap.Logger) *MessageSentCheck {
|
||||
return &MessageSentCheck{
|
||||
messageIDs: make(map[string]map[common.Hash]uint32),
|
||||
messageIDsMu: sync.RWMutex{},
|
||||
messageStoredChan: msgStoredChan,
|
||||
messageExpiredChan: msgExpiredChan,
|
||||
ctx: ctx,
|
||||
store: store,
|
||||
timesource: timesource,
|
||||
logger: logger,
|
||||
maxHashQueryLength: DefaultMaxHashQueryLength,
|
||||
hashQueryInterval: DefaultHashQueryInterval,
|
||||
messageSentPeriod: DefaultMessageSentPeriod,
|
||||
messageExpiredPerid: DefaultMessageExpiredPerid,
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxHashQueryLength sets the maximum number of message hashes to query in one request
|
||||
func WithMaxHashQueryLength(count uint64) MessageSentCheckOption {
|
||||
return func(params *MessageSentCheck) error {
|
||||
params.maxHashQueryLength = count
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithHashQueryInterval sets the interval to query the store node
|
||||
func WithHashQueryInterval(interval time.Duration) MessageSentCheckOption {
|
||||
return func(params *MessageSentCheck) error {
|
||||
params.hashQueryInterval = interval
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithMessageSentPeriod sets the delay period to query the store node after message is published
|
||||
func WithMessageSentPeriod(period uint32) MessageSentCheckOption {
|
||||
return func(params *MessageSentCheck) error {
|
||||
params.messageSentPeriod = period
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithMessageExpiredPerid sets the period that a message is considered expired
|
||||
func WithMessageExpiredPerid(period uint32) MessageSentCheckOption {
|
||||
return func(params *MessageSentCheck) error {
|
||||
params.messageExpiredPerid = period
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Add adds a message for message sent check
|
||||
func (m *MessageSentCheck) Add(topic string, messageID common.Hash, sentTime uint32) {
|
||||
m.messageIDsMu.Lock()
|
||||
defer m.messageIDsMu.Unlock()
|
||||
|
||||
if _, ok := m.messageIDs[topic]; !ok {
|
||||
m.messageIDs[topic] = make(map[common.Hash]uint32)
|
||||
}
|
||||
m.messageIDs[topic][messageID] = sentTime
|
||||
}
|
||||
|
||||
// DeleteByMessageIDs deletes the message ids from the message sent check, used by scenarios like message acked with MVDS
|
||||
func (m *MessageSentCheck) DeleteByMessageIDs(messageIDs []common.Hash) {
|
||||
m.messageIDsMu.Lock()
|
||||
defer m.messageIDsMu.Unlock()
|
||||
|
||||
for pubsubTopic, subMsgs := range m.messageIDs {
|
||||
for _, hash := range messageIDs {
|
||||
delete(subMsgs, hash)
|
||||
if len(subMsgs) == 0 {
|
||||
delete(m.messageIDs, pubsubTopic)
|
||||
} else {
|
||||
m.messageIDs[pubsubTopic] = subMsgs
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetStorePeerID sets the peer id of store node
|
||||
func (m *MessageSentCheck) SetStorePeerID(peerID peer.ID) {
|
||||
m.storePeerID = peerID
|
||||
}
|
||||
|
||||
// Start checks if the tracked outgoing messages are stored periodically
|
||||
func (m *MessageSentCheck) Start() {
|
||||
ticker := time.NewTicker(m.hashQueryInterval)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-m.ctx.Done():
|
||||
m.logger.Debug("stop the look for message stored check")
|
||||
return
|
||||
case <-ticker.C:
|
||||
m.messageIDsMu.Lock()
|
||||
m.logger.Debug("running loop for messages stored check", zap.Any("messageIds", m.messageIDs))
|
||||
pubsubTopics := make([]string, 0, len(m.messageIDs))
|
||||
pubsubMessageIds := make([][]common.Hash, 0, len(m.messageIDs))
|
||||
pubsubMessageTime := make([][]uint32, 0, len(m.messageIDs))
|
||||
for pubsubTopic, subMsgs := range m.messageIDs {
|
||||
var queryMsgIds []common.Hash
|
||||
var queryMsgTime []uint32
|
||||
for msgID, sendTime := range subMsgs {
|
||||
if uint64(len(queryMsgIds)) >= m.maxHashQueryLength {
|
||||
break
|
||||
}
|
||||
// message is sent 5 seconds ago, check if it's stored
|
||||
if uint32(m.timesource.Now().Unix()) > sendTime+m.messageSentPeriod {
|
||||
queryMsgIds = append(queryMsgIds, msgID)
|
||||
queryMsgTime = append(queryMsgTime, sendTime)
|
||||
}
|
||||
}
|
||||
m.logger.Debug("store query for message hashes", zap.Any("queryMsgIds", queryMsgIds), zap.String("pubsubTopic", pubsubTopic))
|
||||
if len(queryMsgIds) > 0 {
|
||||
pubsubTopics = append(pubsubTopics, pubsubTopic)
|
||||
pubsubMessageIds = append(pubsubMessageIds, queryMsgIds)
|
||||
pubsubMessageTime = append(pubsubMessageTime, queryMsgTime)
|
||||
}
|
||||
}
|
||||
m.messageIDsMu.Unlock()
|
||||
|
||||
pubsubProcessedMessages := make([][]common.Hash, len(pubsubTopics))
|
||||
for i, pubsubTopic := range pubsubTopics {
|
||||
processedMessages := m.messageHashBasedQuery(m.ctx, pubsubMessageIds[i], pubsubMessageTime[i], pubsubTopic)
|
||||
pubsubProcessedMessages[i] = processedMessages
|
||||
}
|
||||
|
||||
m.messageIDsMu.Lock()
|
||||
for i, pubsubTopic := range pubsubTopics {
|
||||
subMsgs, ok := m.messageIDs[pubsubTopic]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
for _, hash := range pubsubProcessedMessages[i] {
|
||||
delete(subMsgs, hash)
|
||||
if len(subMsgs) == 0 {
|
||||
delete(m.messageIDs, pubsubTopic)
|
||||
} else {
|
||||
m.messageIDs[pubsubTopic] = subMsgs
|
||||
}
|
||||
}
|
||||
}
|
||||
m.logger.Debug("messages for next store hash query", zap.Any("messageIds", m.messageIDs))
|
||||
m.messageIDsMu.Unlock()
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MessageSentCheck) messageHashBasedQuery(ctx context.Context, hashes []common.Hash, relayTime []uint32, pubsubTopic string) []common.Hash {
|
||||
selectedPeer := m.storePeerID
|
||||
if selectedPeer == "" {
|
||||
m.logger.Error("no store peer id available", zap.String("pubsubTopic", pubsubTopic))
|
||||
return []common.Hash{}
|
||||
}
|
||||
|
||||
var opts []store.RequestOption
|
||||
requestID := protocol.GenerateRequestID()
|
||||
opts = append(opts, store.WithRequestID(requestID))
|
||||
opts = append(opts, store.WithPeer(selectedPeer))
|
||||
opts = append(opts, store.WithPaging(false, m.maxHashQueryLength))
|
||||
opts = append(opts, store.IncludeData(false))
|
||||
|
||||
messageHashes := make([]pb.MessageHash, len(hashes))
|
||||
for i, hash := range hashes {
|
||||
messageHashes[i] = pb.ToMessageHash(hash.Bytes())
|
||||
}
|
||||
|
||||
m.logger.Debug("store.queryByHash request", zap.String("requestID", hexutil.Encode(requestID)), zap.Stringer("peerID", selectedPeer), zap.Stringers("messageHashes", messageHashes))
|
||||
|
||||
result, err := m.store.QueryByHash(ctx, messageHashes, opts...)
|
||||
if err != nil {
|
||||
m.logger.Error("store.queryByHash failed", zap.String("requestID", hexutil.Encode(requestID)), zap.Stringer("peerID", selectedPeer), zap.Error(err))
|
||||
return []common.Hash{}
|
||||
}
|
||||
|
||||
m.logger.Debug("store.queryByHash result", zap.String("requestID", hexutil.Encode(requestID)), zap.Int("messages", len(result.Messages())))
|
||||
|
||||
var ackHashes []common.Hash
|
||||
var missedHashes []common.Hash
|
||||
for i, hash := range hashes {
|
||||
found := false
|
||||
for _, msg := range result.Messages() {
|
||||
if bytes.Equal(msg.GetMessageHash(), hash.Bytes()) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if found {
|
||||
ackHashes = append(ackHashes, hash)
|
||||
m.messageStoredChan <- hash
|
||||
}
|
||||
|
||||
if !found && uint32(m.timesource.Now().Unix()) > relayTime[i]+m.messageExpiredPerid {
|
||||
missedHashes = append(missedHashes, hash)
|
||||
m.messageExpiredChan <- hash
|
||||
}
|
||||
}
|
||||
|
||||
m.logger.Debug("ack message hashes", zap.Stringers("ackHashes", ackHashes))
|
||||
m.logger.Debug("missed message hashes", zap.Stringers("missedHashes", missedHashes))
|
||||
|
||||
return append(ackHashes, missedHashes...)
|
||||
}
|
||||
33
waku/v2/api/publish/message_check_test.go
Normal file
33
waku/v2/api/publish/message_check_test.go
Normal file
@ -0,0 +1,33 @@
|
||||
package publish
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAddAndDelete(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
messageSentCheck := NewMessageSentCheck(ctx, nil, nil, nil, nil, nil)
|
||||
|
||||
messageSentCheck.Add("topic", [32]byte{1}, 1)
|
||||
messageSentCheck.Add("topic", [32]byte{2}, 2)
|
||||
messageSentCheck.Add("topic", [32]byte{3}, 3)
|
||||
messageSentCheck.Add("another-topic", [32]byte{4}, 4)
|
||||
|
||||
require.Equal(t, uint32(1), messageSentCheck.messageIDs["topic"][[32]byte{1}])
|
||||
require.Equal(t, uint32(2), messageSentCheck.messageIDs["topic"][[32]byte{2}])
|
||||
require.Equal(t, uint32(3), messageSentCheck.messageIDs["topic"][[32]byte{3}])
|
||||
require.Equal(t, uint32(4), messageSentCheck.messageIDs["another-topic"][[32]byte{4}])
|
||||
|
||||
messageSentCheck.DeleteByMessageIDs([]common.Hash{[32]byte{1}, [32]byte{2}})
|
||||
require.NotNil(t, messageSentCheck.messageIDs["topic"])
|
||||
require.Equal(t, uint32(3), messageSentCheck.messageIDs["topic"][[32]byte{3}])
|
||||
|
||||
messageSentCheck.DeleteByMessageIDs([]common.Hash{[32]byte{3}})
|
||||
require.Nil(t, messageSentCheck.messageIDs["topic"])
|
||||
|
||||
require.Equal(t, uint32(4), messageSentCheck.messageIDs["another-topic"][[32]byte{4}])
|
||||
}
|
||||
168
waku/v2/api/publish/message_queue.go
Normal file
168
waku/v2/api/publish/message_queue.go
Normal file
@ -0,0 +1,168 @@
|
||||
package publish
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"context"
|
||||
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
)
|
||||
|
||||
// MessagePriority determines the ordering for the message priority queue
|
||||
type MessagePriority = int
|
||||
|
||||
const (
|
||||
LowPriority MessagePriority = 1
|
||||
NormalPriority MessagePriority = 2
|
||||
HighPriority MessagePriority = 3
|
||||
)
|
||||
|
||||
type envelopePriority struct {
|
||||
envelope *protocol.Envelope
|
||||
priority int
|
||||
index int
|
||||
}
|
||||
|
||||
type envelopePriorityQueue []*envelopePriority
|
||||
|
||||
func (pq envelopePriorityQueue) Len() int { return len(pq) }
|
||||
|
||||
func (pq envelopePriorityQueue) Less(i, j int) bool {
|
||||
if pq[i].priority > pq[j].priority {
|
||||
return true
|
||||
} else if pq[i].priority == pq[j].priority {
|
||||
return pq[i].envelope.Message().GetTimestamp() < pq[j].envelope.Message().GetTimestamp()
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (pq envelopePriorityQueue) Swap(i, j int) {
|
||||
pq[i], pq[j] = pq[j], pq[i]
|
||||
pq[i].index = i
|
||||
pq[j].index = j
|
||||
}
|
||||
|
||||
func (pq *envelopePriorityQueue) Push(x any) {
|
||||
n := len(*pq)
|
||||
item := x.(*envelopePriority)
|
||||
item.index = n
|
||||
*pq = append(*pq, item)
|
||||
}
|
||||
|
||||
func (pq *envelopePriorityQueue) Pop() any {
|
||||
old := *pq
|
||||
n := len(old)
|
||||
item := old[n-1]
|
||||
old[n-1] = nil // avoid memory leak
|
||||
item.index = -1 // for safety
|
||||
*pq = old[0 : n-1]
|
||||
return item
|
||||
}
|
||||
|
||||
// MessageQueue is a structure used to handle the ordering of the messages to publish
|
||||
type MessageQueue struct {
|
||||
usePriorityQueue bool
|
||||
|
||||
toSendChan chan *protocol.Envelope
|
||||
throttledPrioritySendQueue chan *envelopePriority
|
||||
envelopeAvailableOnPriorityQueueSignal chan struct{}
|
||||
envelopePriorityQueue envelopePriorityQueue
|
||||
}
|
||||
|
||||
// NewMessageQueue returns a new instance of MessageQueue. The MessageQueue can internally use a
|
||||
// priority queue to handle the ordering of the messages, or use a simple FIFO queue.
|
||||
func NewMessageQueue(bufferSize int, usePriorityQueue bool) *MessageQueue {
|
||||
m := &MessageQueue{
|
||||
usePriorityQueue: usePriorityQueue,
|
||||
}
|
||||
|
||||
if m.usePriorityQueue {
|
||||
m.envelopePriorityQueue = make(envelopePriorityQueue, 0)
|
||||
m.throttledPrioritySendQueue = make(chan *envelopePriority, bufferSize)
|
||||
m.envelopeAvailableOnPriorityQueueSignal = make(chan struct{}, bufferSize)
|
||||
heap.Init(&m.envelopePriorityQueue)
|
||||
} else {
|
||||
m.toSendChan = make(chan *protocol.Envelope, bufferSize)
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// Start must be called to handle the lifetime of the internals of the message queue
|
||||
func (m *MessageQueue) Start(ctx context.Context) {
|
||||
|
||||
for {
|
||||
select {
|
||||
case envelopePriority, ok := <-m.throttledPrioritySendQueue:
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
heap.Push(&m.envelopePriorityQueue, envelopePriority)
|
||||
|
||||
m.envelopeAvailableOnPriorityQueueSignal <- struct{}{}
|
||||
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Push an envelope into the message queue. The priority is optional, and will be ignored
|
||||
// if the message queue does not use a priority queue
|
||||
func (m *MessageQueue) Push(ctx context.Context, envelope *protocol.Envelope, priority ...MessagePriority) error {
|
||||
if m.usePriorityQueue {
|
||||
msgPriority := NormalPriority
|
||||
if len(priority) != 0 {
|
||||
msgPriority = priority[0]
|
||||
}
|
||||
|
||||
pEnvelope := &envelopePriority{
|
||||
envelope: envelope,
|
||||
priority: msgPriority,
|
||||
}
|
||||
|
||||
select {
|
||||
case m.throttledPrioritySendQueue <- pEnvelope:
|
||||
// Do nothing
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
} else {
|
||||
select {
|
||||
case m.toSendChan <- envelope:
|
||||
// Do nothing
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Pop will return a channel on which a message can be retrieved from the message queue
|
||||
func (m *MessageQueue) Pop(ctx context.Context) <-chan *protocol.Envelope {
|
||||
ch := make(chan *protocol.Envelope)
|
||||
|
||||
go func() {
|
||||
defer close(ch)
|
||||
|
||||
select {
|
||||
case _, ok := <-m.envelopeAvailableOnPriorityQueueSignal:
|
||||
if ok {
|
||||
ch <- heap.Pop(&m.envelopePriorityQueue).(*envelopePriority).envelope
|
||||
}
|
||||
|
||||
case envelope, ok := <-m.toSendChan:
|
||||
if ok {
|
||||
ch <- envelope
|
||||
}
|
||||
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
|
||||
}()
|
||||
|
||||
return ch
|
||||
}
|
||||
109
waku/v2/api/publish/message_queue_test.go
Normal file
109
waku/v2/api/publish/message_queue_test.go
Normal file
@ -0,0 +1,109 @@
|
||||
package publish
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func TestFifoQueue(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
queue := NewMessageQueue(10, false)
|
||||
go queue.Start(ctx)
|
||||
|
||||
err := queue.Push(ctx, protocol.NewEnvelope(&pb.WakuMessage{}, 0, "A"))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = queue.Push(ctx, protocol.NewEnvelope(&pb.WakuMessage{}, 0, "B"))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = queue.Push(ctx, protocol.NewEnvelope(&pb.WakuMessage{}, 0, "C"))
|
||||
require.NoError(t, err)
|
||||
|
||||
envelope, ok := <-queue.Pop(ctx)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "A", envelope.PubsubTopic())
|
||||
|
||||
envelope, ok = <-queue.Pop(ctx)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "B", envelope.PubsubTopic())
|
||||
|
||||
envelope, ok = <-queue.Pop(ctx)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "C", envelope.PubsubTopic())
|
||||
|
||||
cancel()
|
||||
|
||||
_, ok = <-queue.Pop(ctx)
|
||||
require.False(t, ok)
|
||||
}
|
||||
|
||||
func TestPriorityQueue(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
queue := NewMessageQueue(10, true)
|
||||
go queue.Start(ctx)
|
||||
|
||||
err := queue.Push(ctx, protocol.NewEnvelope(&pb.WakuMessage{Timestamp: proto.Int64(0)}, 0, "A"), LowPriority)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = queue.Push(ctx, protocol.NewEnvelope(&pb.WakuMessage{Timestamp: proto.Int64(1)}, 0, "B"), LowPriority)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = queue.Push(ctx, protocol.NewEnvelope(&pb.WakuMessage{Timestamp: proto.Int64(2)}, 0, "C"), HighPriority)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = queue.Push(ctx, protocol.NewEnvelope(&pb.WakuMessage{Timestamp: proto.Int64(3)}, 0, "D"), NormalPriority)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = queue.Push(ctx, protocol.NewEnvelope(&pb.WakuMessage{Timestamp: proto.Int64(4)}, 0, "E"), HighPriority)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = queue.Push(ctx, protocol.NewEnvelope(&pb.WakuMessage{Timestamp: proto.Int64(5)}, 0, "F"), LowPriority)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = queue.Push(ctx, protocol.NewEnvelope(&pb.WakuMessage{Timestamp: proto.Int64(6)}, 0, "G"), NormalPriority)
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
envelope, ok := <-queue.Pop(ctx)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "C", envelope.PubsubTopic())
|
||||
|
||||
envelope, ok = <-queue.Pop(ctx)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "E", envelope.PubsubTopic())
|
||||
|
||||
envelope, ok = <-queue.Pop(ctx)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "D", envelope.PubsubTopic())
|
||||
|
||||
envelope, ok = <-queue.Pop(ctx)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "G", envelope.PubsubTopic())
|
||||
|
||||
envelope, ok = <-queue.Pop(ctx)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "A", envelope.PubsubTopic())
|
||||
|
||||
envelope, ok = <-queue.Pop(ctx)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "B", envelope.PubsubTopic())
|
||||
|
||||
envelope, ok = <-queue.Pop(ctx)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "F", envelope.PubsubTopic())
|
||||
|
||||
cancel()
|
||||
|
||||
_, ok = <-queue.Pop(ctx)
|
||||
require.False(t, ok)
|
||||
|
||||
}
|
||||
170
waku/v2/api/publish/message_sender.go
Normal file
170
waku/v2/api/publish/message_sender.go
Normal file
@ -0,0 +1,170 @@
|
||||
package publish
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/lightpush"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
const DefaultPeersToPublishForLightpush = 2
|
||||
const DefaultPublishingLimiterRate = rate.Limit(2)
|
||||
const DefaultPublishingLimitBurst = 4
|
||||
|
||||
type PublishMethod int
|
||||
|
||||
const (
|
||||
LightPush PublishMethod = iota
|
||||
Relay
|
||||
UnknownMethod
|
||||
)
|
||||
|
||||
func (pm PublishMethod) String() string {
|
||||
switch pm {
|
||||
case LightPush:
|
||||
return "LightPush"
|
||||
case Relay:
|
||||
return "Relay"
|
||||
default:
|
||||
return "Unknown"
|
||||
}
|
||||
}
|
||||
|
||||
type MessageSender struct {
|
||||
publishMethod PublishMethod
|
||||
lightPush *lightpush.WakuLightPush
|
||||
relay *relay.WakuRelay
|
||||
messageSentCheck ISentCheck
|
||||
rateLimiter *PublishRateLimiter
|
||||
logger *zap.Logger
|
||||
}
|
||||
|
||||
type Request struct {
|
||||
ctx context.Context
|
||||
envelope *protocol.Envelope
|
||||
publishMethod PublishMethod
|
||||
}
|
||||
|
||||
func NewRequest(ctx context.Context, envelope *protocol.Envelope) *Request {
|
||||
return &Request{
|
||||
ctx: ctx,
|
||||
envelope: envelope,
|
||||
publishMethod: UnknownMethod,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Request) WithPublishMethod(publishMethod PublishMethod) *Request {
|
||||
r.publishMethod = publishMethod
|
||||
return r
|
||||
}
|
||||
|
||||
func NewMessageSender(publishMethod PublishMethod, lightPush *lightpush.WakuLightPush, relay *relay.WakuRelay, logger *zap.Logger) (*MessageSender, error) {
|
||||
if publishMethod == UnknownMethod {
|
||||
return nil, errors.New("publish method is required")
|
||||
}
|
||||
return &MessageSender{
|
||||
publishMethod: publishMethod,
|
||||
lightPush: lightPush,
|
||||
relay: relay,
|
||||
rateLimiter: NewPublishRateLimiter(DefaultPublishingLimiterRate, DefaultPublishingLimitBurst),
|
||||
logger: logger,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ms *MessageSender) WithMessageSentCheck(messageSentCheck ISentCheck) *MessageSender {
|
||||
ms.messageSentCheck = messageSentCheck
|
||||
return ms
|
||||
}
|
||||
|
||||
func (ms *MessageSender) WithRateLimiting(rateLimiter *PublishRateLimiter) *MessageSender {
|
||||
ms.rateLimiter = rateLimiter
|
||||
return ms
|
||||
}
|
||||
|
||||
func (ms *MessageSender) Send(req *Request) error {
|
||||
logger := ms.logger.With(
|
||||
zap.Stringer("envelopeHash", req.envelope.Hash()),
|
||||
zap.String("pubsubTopic", req.envelope.PubsubTopic()),
|
||||
zap.String("contentTopic", req.envelope.Message().ContentTopic),
|
||||
zap.Int64("timestamp", req.envelope.Message().GetTimestamp()),
|
||||
)
|
||||
|
||||
if ms.rateLimiter != nil {
|
||||
if err := ms.rateLimiter.Check(req.ctx, logger); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
publishMethod := req.publishMethod
|
||||
if publishMethod == UnknownMethod {
|
||||
publishMethod = ms.publishMethod
|
||||
}
|
||||
|
||||
switch publishMethod {
|
||||
case LightPush:
|
||||
if ms.lightPush == nil {
|
||||
return errors.New("lightpush is not available")
|
||||
}
|
||||
logger.Info("publishing message via lightpush")
|
||||
_, err := ms.lightPush.Publish(
|
||||
req.ctx,
|
||||
req.envelope.Message(),
|
||||
lightpush.WithPubSubTopic(req.envelope.PubsubTopic()),
|
||||
lightpush.WithMaxPeers(DefaultPeersToPublishForLightpush),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case Relay:
|
||||
if ms.relay == nil {
|
||||
return errors.New("relay is not available")
|
||||
}
|
||||
peerCnt := len(ms.relay.PubSub().ListPeers(req.envelope.PubsubTopic()))
|
||||
logger.Info("publishing message via relay", zap.Int("peerCnt", peerCnt))
|
||||
_, err := ms.relay.Publish(req.ctx, req.envelope.Message(), relay.WithPubSubTopic(req.envelope.PubsubTopic()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return errors.New("unknown publish method")
|
||||
}
|
||||
|
||||
if ms.messageSentCheck != nil && !req.envelope.Message().GetEphemeral() {
|
||||
ms.messageSentCheck.Add(
|
||||
req.envelope.PubsubTopic(),
|
||||
common.BytesToHash(req.envelope.Hash().Bytes()),
|
||||
uint32(req.envelope.Message().GetTimestamp()/int64(time.Second)),
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *MessageSender) Start() {
|
||||
if ms.messageSentCheck != nil {
|
||||
go ms.messageSentCheck.Start()
|
||||
}
|
||||
}
|
||||
|
||||
func (ms *MessageSender) PublishMethod() PublishMethod {
|
||||
return ms.publishMethod
|
||||
}
|
||||
|
||||
func (ms *MessageSender) MessagesDelivered(messageIDs []common.Hash) {
|
||||
if ms.messageSentCheck != nil {
|
||||
ms.messageSentCheck.DeleteByMessageIDs(messageIDs)
|
||||
}
|
||||
}
|
||||
|
||||
func (ms *MessageSender) SetStorePeerID(peerID peer.ID) {
|
||||
if ms.messageSentCheck != nil {
|
||||
ms.messageSentCheck.SetStorePeerID(peerID)
|
||||
}
|
||||
}
|
||||
123
waku/v2/api/publish/message_sender_test.go
Normal file
123
waku/v2/api/publish/message_sender_test.go
Normal file
@ -0,0 +1,123 @@
|
||||
package publish
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/waku-org/go-waku/tests"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||
"github.com/waku-org/go-waku/waku/v2/timesource"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
)
|
||||
|
||||
type MockMessageSentCheck struct {
|
||||
Messages map[string]map[common.Hash]uint32
|
||||
}
|
||||
|
||||
func (m *MockMessageSentCheck) Add(topic string, messageID common.Hash, time uint32) {
|
||||
if m.Messages[topic] == nil {
|
||||
m.Messages[topic] = make(map[common.Hash]uint32)
|
||||
}
|
||||
m.Messages[topic][messageID] = time
|
||||
}
|
||||
|
||||
func (m *MockMessageSentCheck) DeleteByMessageIDs(messageIDs []common.Hash) {
|
||||
}
|
||||
|
||||
func (m *MockMessageSentCheck) SetStorePeerID(peerID peer.ID) {
|
||||
}
|
||||
|
||||
func (m *MockMessageSentCheck) Start() {
|
||||
}
|
||||
|
||||
func TestNewSenderWithUnknownMethod(t *testing.T) {
|
||||
sender, err := NewMessageSender(UnknownMethod, nil, nil, nil)
|
||||
require.NotNil(t, err)
|
||||
require.Nil(t, sender)
|
||||
}
|
||||
|
||||
func TestNewSenderWithRelay(t *testing.T) {
|
||||
_, relayNode := createRelayNode(t)
|
||||
err := relayNode.Start(context.Background())
|
||||
require.Nil(t, err)
|
||||
defer relayNode.Stop()
|
||||
sender, err := NewMessageSender(Relay, nil, relayNode, utils.Logger())
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, sender)
|
||||
require.Nil(t, sender.messageSentCheck)
|
||||
require.Equal(t, Relay, sender.publishMethod)
|
||||
|
||||
msg := &pb.WakuMessage{
|
||||
Payload: []byte{1, 2, 3},
|
||||
Timestamp: utils.GetUnixEpoch(),
|
||||
ContentTopic: "test-content-topic",
|
||||
}
|
||||
envelope := protocol.NewEnvelope(msg, *utils.GetUnixEpoch(), "test-pubsub-topic")
|
||||
req := NewRequest(context.TODO(), envelope)
|
||||
err = sender.Send(req)
|
||||
require.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestNewSenderWithRelayAndMessageSentCheck(t *testing.T) {
|
||||
_, relayNode := createRelayNode(t)
|
||||
err := relayNode.Start(context.Background())
|
||||
require.Nil(t, err)
|
||||
defer relayNode.Stop()
|
||||
sender, err := NewMessageSender(Relay, nil, relayNode, utils.Logger())
|
||||
|
||||
check := &MockMessageSentCheck{Messages: make(map[string]map[common.Hash]uint32)}
|
||||
sender.WithMessageSentCheck(check)
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, sender)
|
||||
require.NotNil(t, sender.messageSentCheck)
|
||||
require.Equal(t, Relay, sender.publishMethod)
|
||||
|
||||
msg := &pb.WakuMessage{
|
||||
Payload: []byte{1, 2, 3},
|
||||
Timestamp: utils.GetUnixEpoch(),
|
||||
ContentTopic: "test-content-topic",
|
||||
}
|
||||
envelope := protocol.NewEnvelope(msg, *utils.GetUnixEpoch(), "test-pubsub-topic")
|
||||
req := NewRequest(context.TODO(), envelope)
|
||||
|
||||
require.Equal(t, 0, len(check.Messages))
|
||||
|
||||
err = sender.Send(req)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, 1, len(check.Messages))
|
||||
require.Equal(
|
||||
t,
|
||||
uint32(msg.GetTimestamp()/int64(time.Second)),
|
||||
check.Messages["test-pubsub-topic"][common.BytesToHash(envelope.Hash().Bytes())],
|
||||
)
|
||||
}
|
||||
|
||||
func TestNewSenderWithLightPush(t *testing.T) {
|
||||
sender, err := NewMessageSender(LightPush, nil, nil, nil)
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, sender)
|
||||
require.Equal(t, LightPush, sender.publishMethod)
|
||||
}
|
||||
|
||||
func createRelayNode(t *testing.T) (host.Host, *relay.WakuRelay) {
|
||||
port, err := tests.FindFreePort(t, "", 5)
|
||||
require.NoError(t, err)
|
||||
host, err := tests.MakeHost(context.Background(), port, rand.Reader)
|
||||
require.NoError(t, err)
|
||||
bcaster := relay.NewBroadcaster(10)
|
||||
relay := relay.NewWakuRelay(bcaster, 0, timesource.NewDefaultClock(), prometheus.DefaultRegisterer, utils.Logger())
|
||||
relay.SetHost(host)
|
||||
err = bcaster.Start(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
return host, relay
|
||||
}
|
||||
44
waku/v2/api/publish/rate_limiting.go
Normal file
44
waku/v2/api/publish/rate_limiting.go
Normal file
@ -0,0 +1,44 @@
|
||||
package publish
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
// PublishRateLimiter is used to decorate publish functions to limit the
|
||||
// number of messages per second that can be published
|
||||
type PublishRateLimiter struct {
|
||||
limiter *rate.Limiter
|
||||
}
|
||||
|
||||
// NewPublishRateLimiter will create a new instance of PublishRateLimiter.
|
||||
// You can specify an rate.Inf value to in practice ignore the rate limiting
|
||||
func NewPublishRateLimiter(r rate.Limit, b int) *PublishRateLimiter {
|
||||
return &PublishRateLimiter{
|
||||
limiter: rate.NewLimiter(r, b),
|
||||
}
|
||||
}
|
||||
|
||||
// ThrottlePublishFn is used to decorate a PublishFn so rate limiting is applied
|
||||
func (p *PublishRateLimiter) ThrottlePublishFn(ctx context.Context, publishFn PublishFn) PublishFn {
|
||||
return func(envelope *protocol.Envelope, logger *zap.Logger) error {
|
||||
if err := p.Check(ctx, logger); err != nil {
|
||||
return err
|
||||
}
|
||||
return publishFn(envelope, logger)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PublishRateLimiter) Check(ctx context.Context, logger *zap.Logger) error {
|
||||
if err := p.limiter.Wait(ctx); err != nil {
|
||||
if !errors.Is(err, context.Canceled) {
|
||||
logger.Error("could not send message (limiter)", zap.Error(err))
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
36
waku/v2/api/publish/rate_limiting_test.go
Normal file
36
waku/v2/api/publish/rate_limiting_test.go
Normal file
@ -0,0 +1,36 @@
|
||||
package publish
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
func TestRateLimit(t *testing.T) {
|
||||
r := NewPublishRateLimiter(rate.Limit(1), 1)
|
||||
l := utils.Logger()
|
||||
|
||||
var counter atomic.Int32
|
||||
fn := r.ThrottlePublishFn(context.Background(), func(envelope *protocol.Envelope, logger *zap.Logger) error {
|
||||
counter.Add(1)
|
||||
return nil
|
||||
})
|
||||
|
||||
go func() {
|
||||
for i := 0; i <= 10; i++ {
|
||||
err := fn(nil, l)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}()
|
||||
|
||||
<-time.After(2 * time.Second)
|
||||
|
||||
require.LessOrEqual(t, counter.Load(), int32(3))
|
||||
}
|
||||
@ -10,18 +10,18 @@ import (
|
||||
)
|
||||
|
||||
func TestExternalAddressSelection(t *testing.T) {
|
||||
a1, _ := ma.NewMultiaddr("/ip4/192.168.0.106/tcp/60000/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // Valid
|
||||
a2, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/60000/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // Valid but should not be prefered
|
||||
a3, _ := ma.NewMultiaddr("/ip4/192.168.1.20/tcp/19710/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // Valid
|
||||
a4, _ := ma.NewMultiaddr("/dns4/www.status.im/tcp/2012/ws/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // Invalid (it's useless)
|
||||
a5, _ := ma.NewMultiaddr("/dns4/www.status.im/tcp/443/wss/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // Valid
|
||||
a6, _ := ma.NewMultiaddr("/ip4/192.168.1.20/tcp/19710/wss/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // Invalid (local + wss)
|
||||
a7, _ := ma.NewMultiaddr("/ip4/192.168.1.20/tcp/19710/ws/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // Invalid (it's useless)
|
||||
a8, _ := ma.NewMultiaddr("/dns4/node-02.gc-us-central1-a.status.prod.statusim.net/tcp/30303/p2p/16Uiu2HAmDQugwDHM3YeUp86iGjrUvbdw3JPRgikC7YoGBsT2ymMg/p2p-circuit/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // VALID
|
||||
a9, _ := ma.NewMultiaddr("/dns4/node-02.gc-us-central1-a.status.prod.statusim.net/tcp/443/wss/p2p/16Uiu2HAmDQugwDHM3YeUp86iGjrUvbdw3JPRgikC7YoGBsT2ymMg/p2p-circuit/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // VALID
|
||||
a10, _ := ma.NewMultiaddr("/dns4/node-01.gc-us-central1-a.waku.test.statusim.net/tcp/8000/wss/p2p/16Uiu2HAmDCp8XJ9z1ev18zuv8NHekAsjNyezAvmMfFEJkiharitG/p2p-circuit/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // VALID
|
||||
a11, _ := ma.NewMultiaddr("/dns4/node-01.gc-us-central1-a.waku.test.statusim.net/tcp/30303/p2p/16Uiu2HAmDCp8XJ9z1ev18zuv8NHekAsjNyezAvmMfFEJkiharitG/p2p-circuit/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // VALID
|
||||
a12, _ := ma.NewMultiaddr("/ip4/188.23.1.8/tcp/30303/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // VALID
|
||||
a1, _ := ma.NewMultiaddr("/ip4/192.168.0.106/tcp/60000/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // Valid
|
||||
a2, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/60000/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // Valid but should not be prefered
|
||||
a3, _ := ma.NewMultiaddr("/ip4/192.168.1.20/tcp/19710/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // Valid
|
||||
a4, _ := ma.NewMultiaddr("/dns4/www.status.im/tcp/2012/ws/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // Invalid (it's useless)
|
||||
a5, _ := ma.NewMultiaddr("/dns4/www.status.im/tcp/443/wss/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // Valid
|
||||
a6, _ := ma.NewMultiaddr("/ip4/192.168.1.20/tcp/19710/wss/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // Invalid (local + wss)
|
||||
a7, _ := ma.NewMultiaddr("/ip4/192.168.1.20/tcp/19710/ws/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // Invalid (it's useless)
|
||||
a8, _ := ma.NewMultiaddr("/dns4/store-01.gc-us-central1-a.status.prod.status.im/tcp/30303/p2p/16Uiu2HAmDQugwDHM3YeUp86iGjrUvbdw3JPRgikC7YoGBsT2ymMg/p2p-circuit/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // VALID
|
||||
a9, _ := ma.NewMultiaddr("/dns4/store-01.gc-us-central1-a.status.prod.status.im/tcp/443/wss/p2p/16Uiu2HAmDQugwDHM3YeUp86iGjrUvbdw3JPRgikC7YoGBsT2ymMg/p2p-circuit/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // VALID
|
||||
a10, _ := ma.NewMultiaddr("/dns4/node-01.gc-us-central1-a.waku.test.status.im/tcp/8000/wss/p2p/16Uiu2HAmDCp8XJ9z1ev18zuv8NHekAsjNyezAvmMfFEJkiharitG/p2p-circuit/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // VALID
|
||||
a11, _ := ma.NewMultiaddr("/dns4/node-01.gc-us-central1-a.waku.test.status.im/tcp/30303/p2p/16Uiu2HAmDCp8XJ9z1ev18zuv8NHekAsjNyezAvmMfFEJkiharitG/p2p-circuit/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // VALID
|
||||
a12, _ := ma.NewMultiaddr("/ip4/188.23.1.8/tcp/30303/p2p/16Uiu2HAmUVVrJo1KMw4QwUANYF7Ws4mfcRqf9xHaaGP87GbMuY2f") // VALID
|
||||
|
||||
addrs := []ma.Multiaddr{a1, a2, a3, a4, a5, a6, a7}
|
||||
|
||||
|
||||
@ -2,14 +2,18 @@ package node
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/p2p/protocol/ping"
|
||||
"github.com/waku-org/go-waku/logging"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
const maxAllowedPingFailures = 2
|
||||
@ -19,86 +23,199 @@ const maxAllowedPingFailures = 2
|
||||
// the peers if they don't reply back
|
||||
const sleepDetectionIntervalFactor = 3
|
||||
|
||||
const maxPeersToPingPerProtocol = 10
|
||||
|
||||
const maxAllowedSubsequentPingFailures = 2
|
||||
|
||||
func disconnectAllPeers(host host.Host, logger *zap.Logger) {
|
||||
for _, p := range host.Network().Peers() {
|
||||
err := host.Network().ClosePeer(p)
|
||||
if err != nil {
|
||||
logger.Debug("closing conn to peer", zap.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// startKeepAlive creates a go routine that periodically pings connected peers.
|
||||
// This is necessary because TCP connections are automatically closed due to inactivity,
|
||||
// and doing a ping will avoid this (with a small bandwidth cost)
|
||||
func (w *WakuNode) startKeepAlive(ctx context.Context, t time.Duration) {
|
||||
func (w *WakuNode) startKeepAlive(ctx context.Context, randomPeersPingDuration time.Duration, allPeersPingDuration time.Duration) {
|
||||
defer w.wg.Done()
|
||||
w.log.Info("setting up ping protocol", zap.Duration("duration", t))
|
||||
ticker := time.NewTicker(t)
|
||||
defer ticker.Stop()
|
||||
|
||||
if !w.opts.enableRelay {
|
||||
return
|
||||
}
|
||||
|
||||
w.log.Info("setting up ping protocol", zap.Duration("randomPeersPingDuration", randomPeersPingDuration), zap.Duration("allPeersPingDuration", allPeersPingDuration))
|
||||
|
||||
randomPeersTickerC := make(<-chan time.Time)
|
||||
if randomPeersPingDuration != 0 {
|
||||
randomPeersTicker := time.NewTicker(randomPeersPingDuration)
|
||||
defer randomPeersTicker.Stop()
|
||||
randomPeersTickerC = randomPeersTicker.C
|
||||
}
|
||||
|
||||
allPeersTickerC := make(<-chan time.Time)
|
||||
if allPeersPingDuration != 0 {
|
||||
allPeersTicker := time.NewTicker(allPeersPingDuration)
|
||||
defer allPeersTicker.Stop()
|
||||
randomPeersTickerC = allPeersTicker.C
|
||||
}
|
||||
|
||||
lastTimeExecuted := w.timesource.Now()
|
||||
|
||||
sleepDetectionInterval := int64(t) * sleepDetectionIntervalFactor
|
||||
sleepDetectionInterval := int64(randomPeersPingDuration) * sleepDetectionIntervalFactor
|
||||
|
||||
var iterationFailure int
|
||||
for {
|
||||
peersToPing := []peer.ID{}
|
||||
|
||||
select {
|
||||
case <-ticker.C:
|
||||
case <-allPeersTickerC:
|
||||
if w.opts.enableRelay {
|
||||
relayPeersSet := make(map[peer.ID]struct{})
|
||||
for _, t := range w.Relay().Topics() {
|
||||
for _, p := range w.Relay().PubSub().ListPeers(t) {
|
||||
relayPeersSet[p] = struct{}{}
|
||||
}
|
||||
}
|
||||
peersToPing = append(peersToPing, maps.Keys(relayPeersSet)...)
|
||||
}
|
||||
|
||||
case <-randomPeersTickerC:
|
||||
difference := w.timesource.Now().UnixNano() - lastTimeExecuted.UnixNano()
|
||||
forceDisconnectOnPingFailure := false
|
||||
if difference > sleepDetectionInterval {
|
||||
forceDisconnectOnPingFailure = true
|
||||
lastTimeExecuted = w.timesource.Now()
|
||||
w.log.Warn("keep alive hasnt been executed recently. Killing connections to peers if ping fails")
|
||||
w.log.Warn("keep alive hasnt been executed recently. Killing all connections")
|
||||
disconnectAllPeers(w.host, w.log)
|
||||
continue
|
||||
} else if iterationFailure >= maxAllowedSubsequentPingFailures {
|
||||
iterationFailure = 0
|
||||
w.log.Warn("Pinging random peers failed, node is likely disconnected. Killing all connections")
|
||||
disconnectAllPeers(w.host, w.log)
|
||||
continue
|
||||
}
|
||||
|
||||
// Network's peers collection,
|
||||
// contains only currently active peers
|
||||
pingWg := sync.WaitGroup{}
|
||||
peersToPing := w.host.Network().Peers()
|
||||
pingWg.Add(len(peersToPing))
|
||||
for _, p := range peersToPing {
|
||||
if p != w.host.ID() {
|
||||
go w.pingPeer(ctx, &pingWg, p, forceDisconnectOnPingFailure)
|
||||
if w.opts.enableRelay {
|
||||
// Priorize mesh peers
|
||||
meshPeersSet := make(map[peer.ID]struct{})
|
||||
for _, t := range w.Relay().Topics() {
|
||||
for _, p := range w.Relay().PubSub().MeshPeers(t) {
|
||||
meshPeersSet[p] = struct{}{}
|
||||
}
|
||||
}
|
||||
peersToPing = append(peersToPing, maps.Keys(meshPeersSet)...)
|
||||
|
||||
// Ping also some random relay peers
|
||||
if maxPeersToPingPerProtocol-len(peersToPing) > 0 {
|
||||
relayPeersSet := make(map[peer.ID]struct{})
|
||||
for _, t := range w.Relay().Topics() {
|
||||
for _, p := range w.Relay().PubSub().ListPeers(t) {
|
||||
if _, ok := meshPeersSet[p]; !ok {
|
||||
relayPeersSet[p] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
relayPeers := maps.Keys(relayPeersSet)
|
||||
rand.Shuffle(len(relayPeers), func(i, j int) { relayPeers[i], relayPeers[j] = relayPeers[j], relayPeers[i] })
|
||||
|
||||
peerLen := maxPeersToPingPerProtocol - len(peersToPing)
|
||||
if peerLen > len(relayPeers) {
|
||||
peerLen = len(relayPeers)
|
||||
}
|
||||
peersToPing = append(peersToPing, relayPeers[0:peerLen]...)
|
||||
}
|
||||
}
|
||||
pingWg.Wait()
|
||||
|
||||
lastTimeExecuted = w.timesource.Now()
|
||||
if w.opts.enableFilterLightNode {
|
||||
// We also ping all filter nodes
|
||||
filterPeersSet := make(map[peer.ID]struct{})
|
||||
for _, s := range w.FilterLightnode().Subscriptions() {
|
||||
filterPeersSet[s.PeerID] = struct{}{}
|
||||
}
|
||||
peersToPing = append(peersToPing, maps.Keys(filterPeersSet)...)
|
||||
}
|
||||
case <-ctx.Done():
|
||||
w.log.Info("stopping ping protocol")
|
||||
return
|
||||
}
|
||||
|
||||
pingWg := sync.WaitGroup{}
|
||||
pingWg.Add(len(peersToPing))
|
||||
pingResultChan := make(chan bool, len(peersToPing))
|
||||
for _, p := range peersToPing {
|
||||
go w.pingPeer(ctx, &pingWg, p, pingResultChan)
|
||||
}
|
||||
pingWg.Wait()
|
||||
close(pingResultChan)
|
||||
|
||||
failureCounter := 0
|
||||
for couldPing := range pingResultChan {
|
||||
if !couldPing {
|
||||
failureCounter++
|
||||
}
|
||||
}
|
||||
|
||||
if len(peersToPing) > 0 && failureCounter == len(peersToPing) {
|
||||
iterationFailure++
|
||||
} else {
|
||||
iterationFailure = 0
|
||||
}
|
||||
|
||||
lastTimeExecuted = w.timesource.Now()
|
||||
}
|
||||
}
|
||||
|
||||
func (w *WakuNode) pingPeer(ctx context.Context, wg *sync.WaitGroup, peerID peer.ID, forceDisconnectOnFail bool) {
|
||||
func (w *WakuNode) pingPeer(ctx context.Context, wg *sync.WaitGroup, peerID peer.ID, resultChan chan bool) {
|
||||
defer wg.Done()
|
||||
|
||||
logger := w.log.With(logging.HostID("peer", peerID))
|
||||
|
||||
for i := 0; i < maxAllowedPingFailures; i++ {
|
||||
if w.host.Network().Connectedness(peerID) != network.Connected {
|
||||
// Peer is no longer connected. No need to ping
|
||||
resultChan <- false
|
||||
return
|
||||
}
|
||||
|
||||
logger.Debug("pinging")
|
||||
|
||||
if w.tryPing(ctx, peerID, logger) {
|
||||
resultChan <- true
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if w.host.Network().Connectedness(peerID) != network.Connected {
|
||||
resultChan <- false
|
||||
return
|
||||
}
|
||||
|
||||
logger.Info("disconnecting dead peer")
|
||||
if err := w.host.Network().ClosePeer(peerID); err != nil {
|
||||
logger.Debug("closing conn to peer", zap.Error(err))
|
||||
}
|
||||
|
||||
resultChan <- false
|
||||
}
|
||||
|
||||
func (w *WakuNode) tryPing(ctx context.Context, peerID peer.ID, logger *zap.Logger) bool {
|
||||
ctx, cancel := context.WithTimeout(ctx, 7*time.Second)
|
||||
defer cancel()
|
||||
|
||||
logger := w.log.With(logging.HostID("peer", peerID))
|
||||
logger.Debug("pinging")
|
||||
pr := ping.Ping(ctx, w.host, peerID)
|
||||
select {
|
||||
case res := <-pr:
|
||||
if res.Error != nil {
|
||||
w.keepAliveMutex.Lock()
|
||||
w.keepAliveFails[peerID]++
|
||||
w.keepAliveMutex.Unlock()
|
||||
logger.Debug("could not ping", zap.Error(res.Error))
|
||||
} else {
|
||||
w.keepAliveMutex.Lock()
|
||||
delete(w.keepAliveFails, peerID)
|
||||
w.keepAliveMutex.Unlock()
|
||||
return false
|
||||
}
|
||||
case <-ctx.Done():
|
||||
w.keepAliveMutex.Lock()
|
||||
w.keepAliveFails[peerID]++
|
||||
w.keepAliveMutex.Unlock()
|
||||
logger.Debug("could not ping (context done)", zap.Error(ctx.Err()))
|
||||
}
|
||||
|
||||
w.keepAliveMutex.Lock()
|
||||
if (forceDisconnectOnFail || w.keepAliveFails[peerID] > maxAllowedPingFailures) && w.host.Network().Connectedness(peerID) == network.Connected {
|
||||
logger.Info("disconnecting peer")
|
||||
if err := w.host.Network().ClosePeer(peerID); err != nil {
|
||||
logger.Debug("closing conn to peer", zap.Error(err))
|
||||
if !errors.Is(ctx.Err(), context.Canceled) {
|
||||
logger.Debug("could not ping (context)", zap.Error(ctx.Err()))
|
||||
}
|
||||
w.keepAliveFails[peerID] = 0
|
||||
return false
|
||||
}
|
||||
w.keepAliveMutex.Unlock()
|
||||
return true
|
||||
}
|
||||
|
||||
@ -9,7 +9,6 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/libp2p/go-libp2p"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/peerstore"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/stretchr/testify/require"
|
||||
@ -40,17 +39,17 @@ func TestKeepAlive(t *testing.T) {
|
||||
wg := &sync.WaitGroup{}
|
||||
|
||||
w := &WakuNode{
|
||||
host: host1,
|
||||
wg: wg,
|
||||
log: utils.Logger(),
|
||||
keepAliveMutex: sync.Mutex{},
|
||||
keepAliveFails: make(map[peer.ID]int),
|
||||
host: host1,
|
||||
wg: wg,
|
||||
log: utils.Logger(),
|
||||
}
|
||||
|
||||
w.wg.Add(1)
|
||||
w.pingPeer(ctx2, w.wg, peerID2, false)
|
||||
|
||||
peerFailureSignalChan := make(chan bool, 1)
|
||||
w.pingPeer(ctx2, w.wg, peerID2, peerFailureSignalChan)
|
||||
require.NoError(t, ctx.Err())
|
||||
close(peerFailureSignalChan)
|
||||
}
|
||||
|
||||
func TestPeriodicKeepAlive(t *testing.T) {
|
||||
@ -70,7 +69,7 @@ func TestPeriodicKeepAlive(t *testing.T) {
|
||||
WithPrivateKey(prvKey),
|
||||
WithHostAddress(hostAddr),
|
||||
WithWakuRelay(),
|
||||
WithKeepAlive(time.Second),
|
||||
WithKeepAlive(time.Minute, time.Second),
|
||||
)
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -338,6 +338,14 @@ func (w *WakuNode) setupENR(ctx context.Context, addrs []ma.Multiaddr) error {
|
||||
|
||||
}
|
||||
|
||||
func (w *WakuNode) SetRelayShards(rs protocol.RelayShards) error {
|
||||
err := wenr.Update(w.log, w.localNode, wenr.WithWakuRelaySharding(rs))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *WakuNode) watchTopicShards(ctx context.Context) error {
|
||||
evtRelaySubscribed, err := w.Relay().Events().Subscribe(new(relay.EvtRelaySubscribed))
|
||||
if err != nil {
|
||||
@ -374,7 +382,7 @@ func (w *WakuNode) watchTopicShards(ctx context.Context) error {
|
||||
}
|
||||
|
||||
if len(rs) == 1 {
|
||||
w.log.Info("updating advertised relay shards in ENR")
|
||||
w.log.Info("updating advertised relay shards in ENR", zap.Any("newShardInfo", rs[0]))
|
||||
if len(rs[0].ShardIDs) != len(topics) {
|
||||
w.log.Warn("A mix of named and static shards found. ENR shard will contain only the following shards", zap.Any("shards", rs[0]))
|
||||
}
|
||||
|
||||
@ -116,9 +116,6 @@ type WakuNode struct {
|
||||
addressChangesSub event.Subscription
|
||||
enrChangeCh chan struct{}
|
||||
|
||||
keepAliveMutex sync.Mutex
|
||||
keepAliveFails map[peer.ID]int
|
||||
|
||||
cancel context.CancelFunc
|
||||
wg *sync.WaitGroup
|
||||
|
||||
@ -193,7 +190,6 @@ func New(opts ...WakuNodeOption) (*WakuNode, error) {
|
||||
w.opts = params
|
||||
w.log = params.logger.Named("node2")
|
||||
w.wg = &sync.WaitGroup{}
|
||||
w.keepAliveFails = make(map[peer.ID]int)
|
||||
w.wakuFlag = enr.NewWakuEnrBitfield(w.opts.enableLightPush, w.opts.enableFilterFullNode, w.opts.enableStore, w.opts.enableRelay)
|
||||
w.circuitRelayNodes = make(chan peer.AddrInfo)
|
||||
w.metrics = newMetrics(params.prometheusReg)
|
||||
@ -276,7 +272,7 @@ func New(opts ...WakuNodeOption) (*WakuNode, error) {
|
||||
}
|
||||
}
|
||||
|
||||
w.peerExchange, err = peer_exchange.NewWakuPeerExchange(w.DiscV5(), w.opts.clusterID, w.peerConnector, w.peermanager, w.opts.prometheusReg, w.log)
|
||||
w.peerExchange, err = peer_exchange.NewWakuPeerExchange(w.DiscV5(), w.opts.clusterID, w.peerConnector, w.peermanager, w.opts.prometheusReg, w.log, w.opts.peerExchangeOptions...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -382,11 +378,6 @@ func (w *WakuNode) Start(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if w.opts.keepAliveInterval > time.Duration(0) {
|
||||
w.wg.Add(1)
|
||||
go w.startKeepAlive(ctx, w.opts.keepAliveInterval)
|
||||
}
|
||||
|
||||
w.metadata.SetHost(host)
|
||||
err = w.metadata.Start(ctx)
|
||||
if err != nil {
|
||||
@ -461,16 +452,30 @@ func (w *WakuNode) Start(ctx context.Context) error {
|
||||
}
|
||||
|
||||
w.filterLightNode.SetHost(host)
|
||||
|
||||
err = w.setupENR(ctx, w.ListenAddresses())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if w.opts.enableFilterLightNode {
|
||||
err := w.filterLightNode.Start(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//TODO: setting this up temporarily to improve connectivity success for lightNode in status.
|
||||
//This will have to be removed or changed with community sharding will be implemented.
|
||||
if w.opts.shards != nil {
|
||||
err = w.SetRelayShards(*w.opts.shards)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = w.setupENR(ctx, w.ListenAddresses())
|
||||
if err != nil {
|
||||
return err
|
||||
if w.opts.keepAliveRandomPeersInterval > time.Duration(0) || w.opts.keepAliveAllPeersInterval > time.Duration(0) {
|
||||
w.wg.Add(1)
|
||||
go w.startKeepAlive(ctx, w.opts.keepAliveRandomPeersInterval, w.opts.keepAliveAllPeersInterval)
|
||||
}
|
||||
|
||||
w.peerExchange.SetHost(host)
|
||||
@ -794,6 +799,17 @@ func (w *WakuNode) ClosePeerByAddress(address string) error {
|
||||
return w.ClosePeerById(info.ID)
|
||||
}
|
||||
|
||||
func (w *WakuNode) DisconnectAllPeers() {
|
||||
w.host.Network().StopNotify(w.connectionNotif)
|
||||
for _, peerID := range w.host.Network().Peers() {
|
||||
err := w.ClosePeerById(peerID)
|
||||
if err != nil {
|
||||
w.log.Info("failed to close peer", zap.Stringer("peer", peerID), zap.Error(err))
|
||||
}
|
||||
}
|
||||
w.host.Network().Notify(w.connectionNotif)
|
||||
}
|
||||
|
||||
// ClosePeerById is used to close a connection to a peer
|
||||
func (w *WakuNode) ClosePeerById(id peer.ID) error {
|
||||
err := w.host.Network().ClosePeer(id)
|
||||
|
||||
@ -13,6 +13,7 @@ import (
|
||||
"time"
|
||||
|
||||
wenr "github.com/waku-org/go-waku/waku/v2/protocol/enr"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/peer_exchange"
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
@ -540,3 +541,62 @@ func TestStaticShardingLimits(t *testing.T) {
|
||||
tests.WaitForMsg(t, 2*time.Second, &wg, s2.Ch)
|
||||
|
||||
}
|
||||
|
||||
func TestPeerExchangeRatelimit(t *testing.T) {
|
||||
log := utils.Logger()
|
||||
|
||||
if os.Getenv("RUN_FLAKY_TESTS") != "true" {
|
||||
|
||||
log.Info("Skipping", zap.String("test", t.Name()),
|
||||
zap.String("reason", "RUN_FLAKY_TESTS environment variable is not set to true"))
|
||||
t.SkipNow()
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second)
|
||||
defer cancel()
|
||||
|
||||
testClusterID := uint16(21)
|
||||
|
||||
// Node1 with Relay
|
||||
hostAddr1, err := net.ResolveTCPAddr("tcp", "0.0.0.0:0")
|
||||
require.NoError(t, err)
|
||||
wakuNode1, err := New(
|
||||
WithHostAddress(hostAddr1),
|
||||
WithWakuRelay(),
|
||||
WithClusterID(testClusterID),
|
||||
WithPeerExchange(peer_exchange.WithRateLimiter(1, 1)),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
err = wakuNode1.Start(ctx)
|
||||
require.NoError(t, err)
|
||||
defer wakuNode1.Stop()
|
||||
|
||||
// Node2 with Relay
|
||||
hostAddr2, err := net.ResolveTCPAddr("tcp", "0.0.0.0:0")
|
||||
require.NoError(t, err)
|
||||
wakuNode2, err := New(
|
||||
WithHostAddress(hostAddr2),
|
||||
WithWakuRelay(),
|
||||
WithClusterID(testClusterID),
|
||||
WithPeerExchange(peer_exchange.WithRateLimiter(1, 1)),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
err = wakuNode2.Start(ctx)
|
||||
require.NoError(t, err)
|
||||
defer wakuNode2.Stop()
|
||||
|
||||
err = wakuNode2.DialPeer(ctx, wakuNode1.ListenAddresses()[0].String())
|
||||
require.NoError(t, err)
|
||||
|
||||
//time.Sleep(1 * time.Second)
|
||||
|
||||
err = wakuNode1.PeerExchange().Request(ctx, 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = wakuNode1.PeerExchange().Request(ctx, 1)
|
||||
require.Error(t, err)
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
err = wakuNode1.PeerExchange().Request(ctx, 1)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@ -27,10 +27,12 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/waku-org/go-waku/waku/v2/onlinechecker"
|
||||
"github.com/waku-org/go-waku/waku/v2/peermanager"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/filter"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/legacy_store"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/lightpush"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/peer_exchange"
|
||||
"github.com/waku-org/go-waku/waku/v2/rendezvous"
|
||||
"github.com/waku-org/go-waku/waku/v2/timesource"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
@ -52,6 +54,7 @@ type WakuNodeParameters struct {
|
||||
hostAddr *net.TCPAddr
|
||||
maxConnectionsPerIP int
|
||||
clusterID uint16
|
||||
shards *protocol.RelayShards
|
||||
dns4Domain string
|
||||
advertiseAddrs []multiaddr.Multiaddr
|
||||
multiAddr []multiaddr.Multiaddr
|
||||
@ -102,7 +105,8 @@ type WakuNodeParameters struct {
|
||||
discV5bootnodes []*enode.Node
|
||||
discV5autoUpdate bool
|
||||
|
||||
enablePeerExchange bool
|
||||
enablePeerExchange bool
|
||||
peerExchangeOptions []peer_exchange.Option
|
||||
|
||||
enableRLN bool
|
||||
rlnRelayMemIndex *uint
|
||||
@ -114,7 +118,8 @@ type WakuNodeParameters struct {
|
||||
rlnTreePath string
|
||||
rlnMembershipContractAddress common.Address
|
||||
|
||||
keepAliveInterval time.Duration
|
||||
keepAliveRandomPeersInterval time.Duration
|
||||
keepAliveAllPeersInterval time.Duration
|
||||
|
||||
enableLightPush bool
|
||||
|
||||
@ -314,6 +319,23 @@ func WithClusterID(clusterID uint16) WakuNodeOption {
|
||||
}
|
||||
}
|
||||
|
||||
func WithPubSubTopics(topics []string) WakuNodeOption {
|
||||
return func(params *WakuNodeParameters) error {
|
||||
rs, err := protocol.TopicsToRelayShards(topics...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(rs) == 0 {
|
||||
return nil
|
||||
}
|
||||
if rs[0].ClusterID != params.clusterID {
|
||||
return errors.New("pubsubtopics have different clusterID than configured clusterID")
|
||||
}
|
||||
params.shards = &rs[0] //Only consider 0 as a node can only support 1 cluster as of now
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxConnectionsPerIP sets the max number of allowed peers from the same IP
|
||||
func WithMaxConnectionsPerIP(limit int) WakuNodeOption {
|
||||
return func(params *WakuNodeParameters) error {
|
||||
@ -410,9 +432,10 @@ func WithDiscoveryV5(udpPort uint, bootnodes []*enode.Node, autoUpdate bool) Wak
|
||||
}
|
||||
|
||||
// WithPeerExchange is a WakuOption used to enable Peer Exchange
|
||||
func WithPeerExchange() WakuNodeOption {
|
||||
func WithPeerExchange(options ...peer_exchange.Option) WakuNodeOption {
|
||||
return func(params *WakuNodeParameters) error {
|
||||
params.enablePeerExchange = true
|
||||
params.peerExchangeOptions = options
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@ -476,10 +499,14 @@ func WithLightPush(lightpushOpts ...lightpush.Option) WakuNodeOption {
|
||||
}
|
||||
|
||||
// WithKeepAlive is a WakuNodeOption used to set the interval of time when
|
||||
// each peer will be ping to keep the TCP connection alive
|
||||
func WithKeepAlive(t time.Duration) WakuNodeOption {
|
||||
// each peer will be ping to keep the TCP connection alive. Option accepts two
|
||||
// intervals, the `randomPeersInterval`, which will be used to ping full mesh
|
||||
// peers (if using relay) and random connected peers, and `allPeersInterval`
|
||||
// which is used to ping all connected peers
|
||||
func WithKeepAlive(randomPeersInterval time.Duration, allPeersInterval time.Duration) WakuNodeOption {
|
||||
return func(params *WakuNodeParameters) error {
|
||||
params.keepAliveInterval = t
|
||||
params.keepAliveRandomPeersInterval = randomPeersInterval
|
||||
params.keepAliveAllPeersInterval = allPeersInterval
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@ -58,7 +58,7 @@ func TestWakuOptions(t *testing.T) {
|
||||
WithWakuStore(),
|
||||
WithMessageProvider(&persistence.DBStore{}),
|
||||
WithLightPush(),
|
||||
WithKeepAlive(time.Hour),
|
||||
WithKeepAlive(time.Minute, time.Hour),
|
||||
WithTopicHealthStatusChannel(topicHealthStatusChan),
|
||||
WithWakuStoreFactory(storeFactory),
|
||||
}
|
||||
@ -107,7 +107,7 @@ func TestWakuRLNOptions(t *testing.T) {
|
||||
WithWakuStore(),
|
||||
WithMessageProvider(&persistence.DBStore{}),
|
||||
WithLightPush(),
|
||||
WithKeepAlive(time.Hour),
|
||||
WithKeepAlive(time.Minute, time.Hour),
|
||||
WithTopicHealthStatusChannel(topicHealthStatusChan),
|
||||
WithWakuStoreFactory(storeFactory),
|
||||
WithStaticRLNRelay(&index, handleSpam),
|
||||
@ -147,7 +147,7 @@ func TestWakuRLNOptions(t *testing.T) {
|
||||
WithWakuStore(),
|
||||
WithMessageProvider(&persistence.DBStore{}),
|
||||
WithLightPush(),
|
||||
WithKeepAlive(time.Hour),
|
||||
WithKeepAlive(time.Minute, time.Hour),
|
||||
WithTopicHealthStatusChannel(topicHealthStatusChan),
|
||||
WithWakuStoreFactory(storeFactory),
|
||||
WithDynamicRLNRelay(keystorePath, keystorePassword, rlnTreePath, common.HexToAddress(contractAddress), &index, handleSpam, ethClientAddress),
|
||||
|
||||
@ -112,7 +112,7 @@ func (pm *PeerManager) discoverPeersByPubsubTopics(pubsubTopics []string, proto
|
||||
for _, shardInfo := range shardsInfo {
|
||||
err = pm.DiscoverAndConnectToPeers(ctx, shardInfo.ClusterID, shardInfo.ShardIDs[0], proto, maxCount)
|
||||
if err != nil {
|
||||
pm.logger.Error("failed to discover and connect to peers", zap.Error(err))
|
||||
pm.logger.Warn("failed to discover and connect to peers", zap.Error(err))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
||||
@ -309,17 +309,15 @@ func (pm *PeerManager) ensureMinRelayConnsPerTopic() {
|
||||
defer pm.topicMutex.RUnlock()
|
||||
for topicStr, topicInst := range pm.subRelayTopics {
|
||||
|
||||
// @cammellos reported that ListPeers returned an invalid number of
|
||||
// peers. This will ensure that the peers returned by this function
|
||||
// match those peers that are currently connected
|
||||
meshPeerLen := pm.checkAndUpdateTopicHealth(topicInst)
|
||||
curConnectedPeerLen := pm.getPeersBasedOnconnectionStatus(topicStr, network.Connected).Len()
|
||||
|
||||
curPeerLen := pm.checkAndUpdateTopicHealth(topicInst)
|
||||
if curPeerLen < pm.OutPeersTarget {
|
||||
if meshPeerLen < waku_proto.GossipSubDMin || curConnectedPeerLen < pm.OutPeersTarget {
|
||||
pm.logger.Debug("subscribed topic has not reached target peers, initiating more connections to maintain healthy mesh",
|
||||
zap.String("pubSubTopic", topicStr), zap.Int("connectedPeerCount", curPeerLen),
|
||||
zap.String("pubSubTopic", topicStr), zap.Int("connectedPeerCount", curConnectedPeerLen),
|
||||
zap.Int("targetPeers", pm.OutPeersTarget))
|
||||
//Find not connected peers.
|
||||
notConnectedPeers := pm.getNotConnectedPers(topicStr)
|
||||
notConnectedPeers := pm.getPeersBasedOnconnectionStatus(topicStr, network.NotConnected)
|
||||
if notConnectedPeers.Len() == 0 {
|
||||
pm.logger.Debug("could not find any peers in peerstore to connect to, discovering more", zap.String("pubSubTopic", topicStr))
|
||||
go pm.discoverPeersByPubsubTopics([]string{topicStr}, relay.WakuRelayID_v200, pm.ctx, 2)
|
||||
@ -327,12 +325,13 @@ func (pm *PeerManager) ensureMinRelayConnsPerTopic() {
|
||||
}
|
||||
pm.logger.Debug("connecting to eligible peers in peerstore", zap.String("pubSubTopic", topicStr))
|
||||
//Connect to eligible peers.
|
||||
numPeersToConnect := pm.OutPeersTarget - curPeerLen
|
||||
|
||||
if numPeersToConnect > notConnectedPeers.Len() {
|
||||
numPeersToConnect = notConnectedPeers.Len()
|
||||
numPeersToConnect := pm.OutPeersTarget - curConnectedPeerLen
|
||||
if numPeersToConnect > 0 {
|
||||
if numPeersToConnect > notConnectedPeers.Len() {
|
||||
numPeersToConnect = notConnectedPeers.Len()
|
||||
}
|
||||
pm.connectToSpecifiedPeers(notConnectedPeers[0:numPeersToConnect])
|
||||
}
|
||||
pm.connectToSpecifiedPeers(notConnectedPeers[0:numPeersToConnect])
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -372,8 +371,8 @@ func (pm *PeerManager) connectToSpecifiedPeers(peers peer.IDSlice) {
|
||||
}
|
||||
}
|
||||
|
||||
// getNotConnectedPers returns peers for a pubSubTopic that are not connected.
|
||||
func (pm *PeerManager) getNotConnectedPers(pubsubTopic string) (notConnectedPeers peer.IDSlice) {
|
||||
// getPeersBasedOnconnectionStatus returns peers for a pubSubTopic that are either connected/not-connected based on status passed.
|
||||
func (pm *PeerManager) getPeersBasedOnconnectionStatus(pubsubTopic string, connected network.Connectedness) (filteredPeers peer.IDSlice) {
|
||||
var peerList peer.IDSlice
|
||||
if pubsubTopic == "" {
|
||||
peerList = pm.host.Peerstore().Peers()
|
||||
@ -381,8 +380,8 @@ func (pm *PeerManager) getNotConnectedPers(pubsubTopic string) (notConnectedPeer
|
||||
peerList = pm.host.Peerstore().(*wps.WakuPeerstoreImpl).PeersByPubSubTopic(pubsubTopic)
|
||||
}
|
||||
for _, peerID := range peerList {
|
||||
if pm.host.Network().Connectedness(peerID) != network.Connected {
|
||||
notConnectedPeers = append(notConnectedPeers, peerID)
|
||||
if pm.host.Network().Connectedness(peerID) == connected {
|
||||
filteredPeers = append(filteredPeers, peerID)
|
||||
}
|
||||
}
|
||||
return
|
||||
|
||||
@ -75,8 +75,8 @@ func TestMultiaddr(t *testing.T) {
|
||||
wakuFlag := NewWakuEnrBitfield(true, true, true, true)
|
||||
|
||||
//wss, _ := ma.NewMultiaddr("/dns4/www.somedomainname.com/tcp/443/wss")
|
||||
circuit1, _ := ma.NewMultiaddr("/dns4/node-02.gc-us-central1-a.status.prod.statusim.net/tcp/30303/p2p/16Uiu2HAmDQugwDHM3YeUp86iGjrUvbdw3JPRgikC7YoGBsT2ymMg/p2p-circuit")
|
||||
circuit2, _ := ma.NewMultiaddr("/dns4/node-01.gc-us-central1-a.status.prod.statusim.net/tcp/30303/p2p/16Uiu2HAmDQugwDHM3YeUp86iGjrUvbdw3JPRgikC7YoGBsT2ymMg/p2p-circuit")
|
||||
circuit1, _ := ma.NewMultiaddr("/dns4/node-02.gc-us-central1-a.status.prod.status.im/tcp/30303/p2p/16Uiu2HAmDQugwDHM3YeUp86iGjrUvbdw3JPRgikC7YoGBsT2ymMg/p2p-circuit")
|
||||
circuit2, _ := ma.NewMultiaddr("/dns4/node-01.gc-us-central1-a.status.prod.status.im/tcp/30303/p2p/16Uiu2HAmDQugwDHM3YeUp86iGjrUvbdw3JPRgikC7YoGBsT2ymMg/p2p-circuit")
|
||||
|
||||
multiaddrValues := []ma.Multiaddr{
|
||||
//wss,
|
||||
|
||||
@ -156,6 +156,7 @@ func (wf *WakuFilterLightNode) onRequest(ctx context.Context) func(network.Strea
|
||||
if !wf.subscriptions.IsSubscribedTo(peerID) {
|
||||
logger.Warn("received message push from unknown peer", logging.HostID("peerID", peerID))
|
||||
wf.metrics.RecordError(unknownPeerMessagePush)
|
||||
//Send a wildcard unsubscribe to this peer so that further requests are not forwarded to us
|
||||
if err := stream.Reset(); err != nil {
|
||||
wf.log.Error("resetting connection", zap.Error(err))
|
||||
}
|
||||
@ -199,8 +200,8 @@ func (wf *WakuFilterLightNode) onRequest(ctx context.Context) func(network.Strea
|
||||
}
|
||||
|
||||
logger = messagePush.WakuMessage.Logger(logger, pubSubTopic)
|
||||
|
||||
if !wf.subscriptions.Has(peerID, protocol.NewContentFilter(pubSubTopic, messagePush.WakuMessage.ContentTopic)) {
|
||||
cf := protocol.NewContentFilter(pubSubTopic, messagePush.WakuMessage.ContentTopic)
|
||||
if !wf.subscriptions.Has(peerID, cf) {
|
||||
logger.Warn("received messagepush with invalid subscription parameters")
|
||||
wf.metrics.RecordError(invalidSubscriptionMessage)
|
||||
return
|
||||
@ -208,13 +209,13 @@ func (wf *WakuFilterLightNode) onRequest(ctx context.Context) func(network.Strea
|
||||
|
||||
wf.metrics.RecordMessage()
|
||||
|
||||
wf.notify(peerID, pubSubTopic, messagePush.WakuMessage)
|
||||
wf.notify(ctx, peerID, pubSubTopic, messagePush.WakuMessage)
|
||||
|
||||
logger.Info("received message push")
|
||||
}
|
||||
}
|
||||
|
||||
func (wf *WakuFilterLightNode) notify(remotePeerID peer.ID, pubsubTopic string, msg *wpb.WakuMessage) {
|
||||
func (wf *WakuFilterLightNode) notify(ctx context.Context, remotePeerID peer.ID, pubsubTopic string, msg *wpb.WakuMessage) {
|
||||
envelope := protocol.NewEnvelope(msg, wf.timesource.Now().UnixNano(), pubsubTopic)
|
||||
|
||||
if wf.broadcaster != nil {
|
||||
@ -222,11 +223,11 @@ func (wf *WakuFilterLightNode) notify(remotePeerID peer.ID, pubsubTopic string,
|
||||
wf.broadcaster.Submit(envelope)
|
||||
}
|
||||
// Notify filter subscribers
|
||||
wf.subscriptions.Notify(remotePeerID, envelope)
|
||||
wf.subscriptions.Notify(ctx, remotePeerID, envelope)
|
||||
}
|
||||
|
||||
func (wf *WakuFilterLightNode) request(ctx context.Context, requestID []byte,
|
||||
reqType pb.FilterSubscribeRequest_FilterSubscribeType, contentFilter protocol.ContentFilter, peer peer.ID) error {
|
||||
reqType pb.FilterSubscribeRequest_FilterSubscribeType, contentFilter protocol.ContentFilter, peerID peer.ID) error {
|
||||
request := &pb.FilterSubscribeRequest{
|
||||
RequestId: hex.EncodeToString(requestID),
|
||||
FilterSubscribeType: reqType,
|
||||
@ -239,11 +240,14 @@ func (wf *WakuFilterLightNode) request(ctx context.Context, requestID []byte,
|
||||
return err
|
||||
}
|
||||
|
||||
logger := wf.log.With(logging.HostID("peerID", peer))
|
||||
logger := wf.log.With(logging.HostID("peerID", peerID))
|
||||
|
||||
stream, err := wf.h.NewStream(ctx, peer, FilterSubscribeID_v20beta1)
|
||||
stream, err := wf.h.NewStream(ctx, peerID, FilterSubscribeID_v20beta1)
|
||||
if err != nil {
|
||||
wf.metrics.RecordError(dialFailure)
|
||||
if ps, ok := wf.h.Peerstore().(peerstore.WakuPeerstore); ok {
|
||||
ps.AddConnFailure(peer.AddrInfo{ID: peerID})
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@ -403,21 +407,35 @@ func (wf *WakuFilterLightNode) Subscribe(ctx context.Context, contentFilter prot
|
||||
|
||||
paramsCopy := params.Copy()
|
||||
paramsCopy.selectedPeers = selectedPeers
|
||||
for _, peer := range selectedPeers {
|
||||
err := wf.request(
|
||||
ctx,
|
||||
params.requestID,
|
||||
pb.FilterSubscribeRequest_SUBSCRIBE,
|
||||
cFilter,
|
||||
peer)
|
||||
if err != nil {
|
||||
wf.log.Error("Failed to subscribe", zap.String("pubSubTopic", pubSubTopic), zap.Strings("contentTopics", cTopics),
|
||||
zap.Error(err))
|
||||
failedContentTopics = append(failedContentTopics, cTopics...)
|
||||
continue
|
||||
var wg sync.WaitGroup
|
||||
reqCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
tmpSubs := make([]*subscription.SubscriptionDetails, len(selectedPeers))
|
||||
for i, peerID := range selectedPeers {
|
||||
wg.Add(1)
|
||||
go func(index int, ID peer.ID) {
|
||||
defer wg.Done()
|
||||
err := wf.request(
|
||||
reqCtx,
|
||||
params.requestID,
|
||||
pb.FilterSubscribeRequest_SUBSCRIBE,
|
||||
cFilter,
|
||||
ID)
|
||||
if err != nil {
|
||||
wf.log.Error("Failed to subscribe", zap.String("pubSubTopic", pubSubTopic), zap.Strings("contentTopics", cTopics),
|
||||
zap.Error(err))
|
||||
failedContentTopics = append(failedContentTopics, cTopics...)
|
||||
} else {
|
||||
wf.log.Debug("subscription successful", zap.String("pubSubTopic", pubSubTopic), zap.Strings("contentTopics", cTopics), zap.Stringer("peer", ID))
|
||||
tmpSubs[index] = wf.subscriptions.NewSubscription(ID, cFilter)
|
||||
}
|
||||
}(i, peerID)
|
||||
}
|
||||
wg.Wait()
|
||||
for _, sub := range tmpSubs {
|
||||
if sub != nil {
|
||||
subscriptions = append(subscriptions, sub)
|
||||
}
|
||||
wf.log.Debug("subscription successful", zap.String("pubSubTopic", pubSubTopic), zap.Strings("contentTopics", cTopics), zap.Stringer("peer", peer))
|
||||
subscriptions = append(subscriptions, wf.subscriptions.NewSubscription(peer, cFilter))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -8,6 +8,8 @@ import (
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const PingTimeout = 5 * time.Second
|
||||
|
||||
func (wf *WakuFilterLightNode) PingPeers() {
|
||||
//Send a ping to all the peers and report their status to corresponding subscriptions
|
||||
// Alive or not or set state of subcription??
|
||||
@ -17,17 +19,23 @@ func (wf *WakuFilterLightNode) PingPeers() {
|
||||
}
|
||||
|
||||
func (wf *WakuFilterLightNode) PingPeer(peer peer.ID) {
|
||||
ctxWithTimeout, cancel := context.WithTimeout(wf.CommonService.Context(), wf.peerPingInterval)
|
||||
ctxWithTimeout, cancel := context.WithTimeout(wf.CommonService.Context(), PingTimeout)
|
||||
defer cancel()
|
||||
err := wf.Ping(ctxWithTimeout, peer)
|
||||
if err != nil {
|
||||
wf.log.Warn("Filter ping failed towards peer", zap.Stringer("peer", peer), zap.Error(err))
|
||||
|
||||
subscriptions := wf.subscriptions.GetAllSubscriptionsForPeer(peer)
|
||||
for _, subscription := range subscriptions {
|
||||
wf.log.Debug("Notifying sub closing", zap.String("subID", subscription.ID))
|
||||
//Indicating that subscription is closing,
|
||||
subscription.SetClosing()
|
||||
//quickly retry ping again before marking subscription as failure
|
||||
//Note that PingTimeout is a fraction of PingInterval so this shouldn't cause parallel pings being sent.
|
||||
ctxWithTimeout, cancel := context.WithTimeout(wf.CommonService.Context(), PingTimeout)
|
||||
defer cancel()
|
||||
err = wf.Ping(ctxWithTimeout, peer)
|
||||
if err != nil {
|
||||
subscriptions := wf.subscriptions.GetAllSubscriptionsForPeer(peer)
|
||||
for _, subscription := range subscriptions {
|
||||
wf.log.Debug("Notifying sub closing", zap.String("subID", subscription.ID))
|
||||
//Indicating that subscription is closing,
|
||||
subscription.SetClosing()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -39,7 +47,9 @@ func (wf *WakuFilterLightNode) FilterHealthCheckLoop() {
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
wf.PingPeers()
|
||||
if wf.onlineChecker.IsOnline() {
|
||||
wf.PingPeers()
|
||||
}
|
||||
case <-wf.CommonService.Context().Done():
|
||||
return
|
||||
}
|
||||
|
||||
@ -26,7 +26,7 @@ func (s *FilterTestSuite) TestUnSubscriptionPing() {
|
||||
err := s.LightNode.Ping(context.Background(), s.FullNodeHost.ID())
|
||||
s.Require().NoError(err)
|
||||
|
||||
_, err = s.LightNode.Unsubscribe(s.ctx, s.contentFilter, WithPeer(s.FullNodeHost.ID()))
|
||||
_, err = s.LightNode.Unsubscribe(s.ctx, s.ContentFilter, WithPeer(s.FullNodeHost.ID()))
|
||||
s.Require().NoError(err)
|
||||
|
||||
err = s.LightNode.Ping(context.Background(), s.FullNodeHost.ID())
|
||||
|
||||
@ -220,8 +220,8 @@ func (s *FilterTestSuite) TestIncorrectSubscribeIdentifier() {
|
||||
s.LightNodeHost.Peerstore().AddAddr(s.FullNodeHost.ID(), tests.GetHostAddress(s.FullNodeHost), peerstore.PermanentAddrTTL)
|
||||
|
||||
// Subscribe with incorrect SubscribeID
|
||||
s.contentFilter = protocol.ContentFilter{PubsubTopic: s.TestTopic, ContentTopics: protocol.NewContentTopicSet(s.TestContentTopic)}
|
||||
_, err := s.LightNode.IncorrectSubscribe(s.ctx, s.contentFilter, WithPeer(s.FullNodeHost.ID()))
|
||||
s.ContentFilter = protocol.ContentFilter{PubsubTopic: s.TestTopic, ContentTopics: protocol.NewContentTopicSet(s.TestContentTopic)}
|
||||
_, err := s.LightNode.IncorrectSubscribe(s.ctx, s.ContentFilter, WithPeer(s.FullNodeHost.ID()))
|
||||
s.Require().Error(err)
|
||||
|
||||
_, err = s.LightNode.UnsubscribeAll(s.ctx)
|
||||
@ -266,8 +266,8 @@ func (s *FilterTestSuite) TestIncorrectPushIdentifier() {
|
||||
s.Require().NoError(err)
|
||||
|
||||
// Subscribe
|
||||
s.contentFilter = protocol.ContentFilter{PubsubTopic: s.TestTopic, ContentTopics: protocol.NewContentTopicSet(s.TestContentTopic)}
|
||||
s.subDetails, err = s.LightNode.Subscribe(s.ctx, s.contentFilter, WithPeer(s.FullNodeHost.ID()))
|
||||
s.ContentFilter = protocol.ContentFilter{PubsubTopic: s.TestTopic, ContentTopics: protocol.NewContentTopicSet(s.TestContentTopic)}
|
||||
s.subDetails, err = s.LightNode.Subscribe(s.ctx, s.ContentFilter, WithPeer(s.FullNodeHost.ID()))
|
||||
s.Require().NoError(err)
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
@ -26,7 +26,7 @@ func (s *FilterTestSuite) TestWakuFilter() {
|
||||
// Wrong content topic
|
||||
s.waitForTimeout(&WakuMsg{s.TestTopic, "TopicB", "second"})
|
||||
|
||||
_, err := s.LightNode.Unsubscribe(s.ctx, s.contentFilter, WithPeer(s.FullNodeHost.ID()))
|
||||
_, err := s.LightNode.Unsubscribe(s.ctx, s.ContentFilter, WithPeer(s.FullNodeHost.ID()))
|
||||
s.Require().NoError(err)
|
||||
|
||||
// Should not receive after unsubscribe
|
||||
@ -180,8 +180,8 @@ func (s *FilterTestSuite) TestContentTopicsLimit() {
|
||||
s.ctx, s.ctxCancel = context.WithTimeout(context.Background(), 20*time.Second) // Test can't exceed 10 seconds
|
||||
|
||||
// Detect existing content topics from previous test
|
||||
if len(s.contentFilter.PubsubTopic) > 0 {
|
||||
existingTopics := len(s.contentFilter.ContentTopicsList())
|
||||
if len(s.ContentFilter.PubsubTopic) > 0 {
|
||||
existingTopics := len(s.ContentFilter.ContentTopicsList())
|
||||
if existingTopics > 0 {
|
||||
maxContentTopics = maxContentTopics - existingTopics
|
||||
}
|
||||
@ -233,13 +233,13 @@ func (s *FilterTestSuite) TestSubscribeErrorHandling() {
|
||||
})
|
||||
|
||||
// Subscribe with empty pubsub
|
||||
s.contentFilter = protocol.ContentFilter{PubsubTopic: messages[0].PubSubTopic, ContentTopics: protocol.NewContentTopicSet(messages[0].ContentTopic)}
|
||||
_, err := s.LightNode.Subscribe(s.ctx, s.contentFilter, WithPeer(s.FullNodeHost.ID()))
|
||||
s.ContentFilter = protocol.ContentFilter{PubsubTopic: messages[0].PubSubTopic, ContentTopics: protocol.NewContentTopicSet(messages[0].ContentTopic)}
|
||||
_, err := s.LightNode.Subscribe(s.ctx, s.ContentFilter, WithPeer(s.FullNodeHost.ID()))
|
||||
s.Require().Error(err)
|
||||
|
||||
// Subscribe with empty content topic
|
||||
s.contentFilter = protocol.ContentFilter{PubsubTopic: messages[1].PubSubTopic, ContentTopics: protocol.NewContentTopicSet(messages[1].ContentTopic)}
|
||||
_, err = s.LightNode.Subscribe(s.ctx, s.contentFilter, WithPeer(s.FullNodeHost.ID()))
|
||||
s.ContentFilter = protocol.ContentFilter{PubsubTopic: messages[1].PubSubTopic, ContentTopics: protocol.NewContentTopicSet(messages[1].ContentTopic)}
|
||||
_, err = s.LightNode.Subscribe(s.ctx, s.ContentFilter, WithPeer(s.FullNodeHost.ID()))
|
||||
s.Require().Error(err)
|
||||
|
||||
}
|
||||
@ -271,8 +271,8 @@ func (s *FilterTestSuite) TestMultipleFullNodeSubscriptions() {
|
||||
s.Log.Info("Subscribing to second", zap.String("fullNode", string(fullNodeIDHex)))
|
||||
|
||||
// Subscribe to the second full node
|
||||
s.contentFilter = protocol.ContentFilter{PubsubTopic: s.TestTopic, ContentTopics: protocol.NewContentTopicSet(s.TestContentTopic)}
|
||||
_, err = s.LightNode.Subscribe(s.ctx, s.contentFilter, WithPeer(s.FullNodeHost.ID()))
|
||||
s.ContentFilter = protocol.ContentFilter{PubsubTopic: s.TestTopic, ContentTopics: protocol.NewContentTopicSet(s.TestContentTopic)}
|
||||
_, err = s.LightNode.Subscribe(s.ctx, s.ContentFilter, WithPeer(s.FullNodeHost.ID()))
|
||||
s.Require().NoError(err)
|
||||
|
||||
_, err = s.LightNode.UnsubscribeAll(s.ctx)
|
||||
|
||||
@ -117,7 +117,7 @@ func (s *FilterTestSuite) TestAutoShard() {
|
||||
// Wrong content topic
|
||||
s.waitForTimeout(&WakuMsg{s.TestTopic, "TopicB", "second"})
|
||||
|
||||
_, err = s.LightNode.Unsubscribe(s.ctx, s.contentFilter, WithPeer(s.FullNodeHost.ID()))
|
||||
_, err = s.LightNode.Unsubscribe(s.ctx, s.ContentFilter, WithPeer(s.FullNodeHost.ID()))
|
||||
s.Require().NoError(err)
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
@ -14,6 +14,7 @@ import (
|
||||
"github.com/libp2p/go-msgio/pbio"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/waku-org/go-waku/logging"
|
||||
"github.com/waku-org/go-waku/waku/v2/peerstore"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/filter/pb"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||
@ -273,6 +274,9 @@ func (wf *WakuFilterFullNode) pushMessage(ctx context.Context, logger *zap.Logge
|
||||
wf.metrics.RecordError(pushTimeoutFailure)
|
||||
} else {
|
||||
wf.metrics.RecordError(dialFailure)
|
||||
if ps, ok := wf.h.Peerstore().(peerstore.WakuPeerstore); ok {
|
||||
ps.AddConnFailure(peer.AddrInfo{ID: peerID})
|
||||
}
|
||||
}
|
||||
logger.Error("opening peer stream", zap.Error(err))
|
||||
return err
|
||||
|
||||
@ -47,7 +47,7 @@ type FilterTestSuite struct {
|
||||
ctx context.Context
|
||||
ctxCancel context.CancelFunc
|
||||
wg *sync.WaitGroup
|
||||
contentFilter protocol.ContentFilter
|
||||
ContentFilter protocol.ContentFilter
|
||||
subDetails []*subscription.SubscriptionDetails
|
||||
|
||||
Log *zap.Logger
|
||||
@ -63,7 +63,7 @@ type WakuMsg struct {
|
||||
}
|
||||
|
||||
func (s *FilterTestSuite) SetupTest() {
|
||||
log := utils.Logger() //.Named("filterv2-test")
|
||||
log := utils.Logger()
|
||||
s.Log = log
|
||||
|
||||
s.Log.Info("SetupTest()")
|
||||
@ -192,7 +192,7 @@ func (s *FilterTestSuite) waitForMsgFromChan(msg *WakuMsg, ch chan *protocol.Env
|
||||
defer s.wg.Done()
|
||||
select {
|
||||
case env := <-ch:
|
||||
for _, topic := range s.contentFilter.ContentTopicsList() {
|
||||
for _, topic := range s.ContentFilter.ContentTopicsList() {
|
||||
if topic == env.Message().GetContentTopic() {
|
||||
msgFound = true
|
||||
}
|
||||
@ -308,8 +308,8 @@ func (s *FilterTestSuite) subscribe(pubsubTopic string, contentTopic string, pee
|
||||
for _, sub := range s.subDetails {
|
||||
if sub.ContentFilter.PubsubTopic == pubsubTopic {
|
||||
sub.Add(contentTopic)
|
||||
s.contentFilter = sub.ContentFilter
|
||||
subDetails, err := s.LightNode.Subscribe(s.ctx, s.contentFilter, WithPeer(peer))
|
||||
s.ContentFilter = sub.ContentFilter
|
||||
subDetails, err := s.LightNode.Subscribe(s.ctx, s.ContentFilter, WithPeer(peer))
|
||||
s.subDetails = subDetails
|
||||
s.Require().NoError(err)
|
||||
return
|
||||
@ -317,7 +317,7 @@ func (s *FilterTestSuite) subscribe(pubsubTopic string, contentTopic string, pee
|
||||
}
|
||||
|
||||
s.subDetails = s.getSub(pubsubTopic, contentTopic, peer)
|
||||
s.contentFilter = s.subDetails[0].ContentFilter
|
||||
s.ContentFilter = s.subDetails[0].ContentFilter
|
||||
}
|
||||
|
||||
func (s *FilterTestSuite) unsubscribe(pubsubTopic string, contentTopic string, peer peer.ID) []*subscription.SubscriptionDetails {
|
||||
@ -331,7 +331,7 @@ func (s *FilterTestSuite) unsubscribe(pubsubTopic string, contentTopic string, p
|
||||
} else {
|
||||
sub.Remove(contentTopic)
|
||||
}
|
||||
s.contentFilter = sub.ContentFilter
|
||||
s.ContentFilter = sub.ContentFilter
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -207,6 +207,9 @@ func (store *WakuStore) queryFrom(ctx context.Context, historyRequest *pb.Histor
|
||||
if err != nil {
|
||||
logger.Error("creating stream to peer", zap.Error(err))
|
||||
store.metrics.RecordError(dialFailure)
|
||||
if ps, ok := store.h.Peerstore().(peerstore.WakuPeerstore); ok {
|
||||
ps.AddConnFailure(peer.AddrInfo{ID: selectedPeer})
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
@ -6,6 +6,8 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
@ -187,21 +189,17 @@ func (wakuLP *WakuLightPush) reply(stream network.Stream, responsePushRPC *pb.Pu
|
||||
}
|
||||
|
||||
// request sends a message via lightPush protocol to either a specified peer or peer that is selected.
|
||||
func (wakuLP *WakuLightPush) request(ctx context.Context, req *pb.PushRequest, params *lightPushRequestParameters) (*pb.PushResponse, error) {
|
||||
if params == nil {
|
||||
return nil, errors.New("lightpush params are mandatory")
|
||||
}
|
||||
func (wakuLP *WakuLightPush) request(ctx context.Context, req *pb.PushRequest, params *lightPushRequestParameters, peerID peer.ID) (*pb.PushResponse, error) {
|
||||
|
||||
if len(params.requestID) == 0 {
|
||||
return nil, ErrInvalidID
|
||||
}
|
||||
logger := wakuLP.log.With(logging.HostID("peer", peerID))
|
||||
|
||||
logger := wakuLP.log.With(logging.HostID("peer", params.selectedPeer))
|
||||
|
||||
stream, err := wakuLP.h.NewStream(ctx, params.selectedPeer, LightPushID_v20beta1)
|
||||
stream, err := wakuLP.h.NewStream(ctx, peerID, LightPushID_v20beta1)
|
||||
if err != nil {
|
||||
logger.Error("creating stream to peer", zap.Error(err))
|
||||
wakuLP.metrics.RecordError(dialFailure)
|
||||
if ps, ok := wakuLP.h.Peerstore().(peerstore.WakuPeerstore); ok {
|
||||
ps.AddConnFailure(peer.AddrInfo{ID: peerID})
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
pushRequestRPC := &pb.PushRpc{RequestId: hex.EncodeToString(params.requestID), Request: req}
|
||||
@ -281,10 +279,10 @@ func (wakuLP *WakuLightPush) handleOpts(ctx context.Context, message *wpb.WakuMe
|
||||
return nil, err
|
||||
}
|
||||
wakuLP.pm.Connect(pData)
|
||||
params.selectedPeer = pData.AddrInfo.ID
|
||||
params.selectedPeers = append(params.selectedPeers, pData.AddrInfo.ID)
|
||||
}
|
||||
|
||||
if params.pm != nil && params.selectedPeer == "" {
|
||||
reqPeerCount := params.maxPeers - len(params.selectedPeers)
|
||||
if params.pm != nil && reqPeerCount > 0 {
|
||||
var selectedPeers peer.IDSlice
|
||||
//TODO: update this to work with multiple peer selection
|
||||
selectedPeers, err = wakuLP.pm.SelectPeers(
|
||||
@ -293,17 +291,17 @@ func (wakuLP *WakuLightPush) handleOpts(ctx context.Context, message *wpb.WakuMe
|
||||
Proto: LightPushID_v20beta1,
|
||||
PubsubTopics: []string{params.pubsubTopic},
|
||||
SpecificPeers: params.preferredPeers,
|
||||
MaxPeers: reqPeerCount,
|
||||
Ctx: ctx,
|
||||
},
|
||||
)
|
||||
if err == nil {
|
||||
params.selectedPeer = selectedPeers[0]
|
||||
params.selectedPeers = append(params.selectedPeers, selectedPeers...)
|
||||
}
|
||||
|
||||
}
|
||||
if params.selectedPeer == "" {
|
||||
if len(params.selectedPeers) == 0 {
|
||||
if err != nil {
|
||||
params.log.Error("selecting peer", zap.Error(err))
|
||||
params.log.Error("selecting peers", zap.Error(err))
|
||||
wakuLP.metrics.RecordError(peerNotFoundFailure)
|
||||
return nil, ErrNoPeersAvailable
|
||||
}
|
||||
@ -327,25 +325,45 @@ func (wakuLP *WakuLightPush) Publish(ctx context.Context, message *wpb.WakuMessa
|
||||
req.Message = message
|
||||
req.PubsubTopic = params.pubsubTopic
|
||||
|
||||
logger := message.Logger(wakuLP.log, params.pubsubTopic).With(logging.HostID("peerID", params.selectedPeer))
|
||||
logger := message.Logger(wakuLP.log, params.pubsubTopic)
|
||||
|
||||
logger.Debug("publishing message")
|
||||
|
||||
response, err := wakuLP.request(ctx, req, params)
|
||||
if err != nil {
|
||||
logger.Error("could not publish message", zap.Error(err))
|
||||
return wpb.MessageHash{}, err
|
||||
logger.Debug("publishing message", zap.Stringers("peers", params.selectedPeers))
|
||||
var wg sync.WaitGroup
|
||||
responses := make([]*pb.PushResponse, params.selectedPeers.Len())
|
||||
reqCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
for i, peerID := range params.selectedPeers {
|
||||
wg.Add(1)
|
||||
go func(index int, id peer.ID) {
|
||||
paramsValue := *params
|
||||
paramsValue.requestID = protocol.GenerateRequestID()
|
||||
defer wg.Done()
|
||||
response, err := wakuLP.request(reqCtx, req, ¶msValue, id)
|
||||
if err != nil {
|
||||
logger.Error("could not publish message", zap.Error(err), zap.Stringer("peer", id))
|
||||
}
|
||||
responses[index] = response
|
||||
}(i, peerID)
|
||||
}
|
||||
|
||||
if response.IsSuccess {
|
||||
hash := message.Hash(params.pubsubTopic)
|
||||
utils.MessagesLogger("lightpush").Debug("waku.lightpush published", logging.HexBytes("hash", hash[:]))
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
var successCount int
|
||||
errMsg := "lightpush error"
|
||||
if response.Info != nil {
|
||||
errMsg = *response.Info
|
||||
|
||||
for _, response := range responses {
|
||||
if response.GetIsSuccess() {
|
||||
successCount++
|
||||
} else {
|
||||
if response.GetInfo() != "" {
|
||||
errMsg += *response.Info
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//in case of partial failure, should we retry here or build a layer above that takes care of these things?
|
||||
if successCount > 0 {
|
||||
hash := message.Hash(params.pubsubTopic)
|
||||
utils.MessagesLogger("lightpush").Debug("waku.lightpush published", logging.HexBytes("hash", hash[:]), zap.Int("num-peers", len(responses)))
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
return wpb.MessageHash{}, errors.New(errMsg)
|
||||
|
||||
@ -29,7 +29,8 @@ func WithRateLimiter(r rate.Limit, b int) Option {
|
||||
type lightPushRequestParameters struct {
|
||||
host host.Host
|
||||
peerAddr multiaddr.Multiaddr
|
||||
selectedPeer peer.ID
|
||||
selectedPeers peer.IDSlice
|
||||
maxPeers int
|
||||
peerSelectionType peermanager.PeerSelection
|
||||
preferredPeers peer.IDSlice
|
||||
requestID []byte
|
||||
@ -41,10 +42,17 @@ type lightPushRequestParameters struct {
|
||||
// RequestOption is the type of options accepted when performing LightPush protocol requests
|
||||
type RequestOption func(*lightPushRequestParameters) error
|
||||
|
||||
func WithMaxPeers(num int) RequestOption {
|
||||
return func(params *lightPushRequestParameters) error {
|
||||
params.maxPeers = num
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithPeer is an option used to specify the peerID to push a waku message to
|
||||
func WithPeer(p peer.ID) RequestOption {
|
||||
return func(params *lightPushRequestParameters) error {
|
||||
params.selectedPeer = p
|
||||
params.selectedPeers = append(params.selectedPeers, p)
|
||||
if params.peerAddr != nil {
|
||||
return errors.New("peerAddr and peerId options are mutually exclusive")
|
||||
}
|
||||
@ -58,7 +66,7 @@ func WithPeer(p peer.ID) RequestOption {
|
||||
func WithPeerAddr(pAddr multiaddr.Multiaddr) RequestOption {
|
||||
return func(params *lightPushRequestParameters) error {
|
||||
params.peerAddr = pAddr
|
||||
if params.selectedPeer != "" {
|
||||
if len(params.selectedPeers) != 0 {
|
||||
return errors.New("peerAddr and peerId options are mutually exclusive")
|
||||
}
|
||||
return nil
|
||||
@ -125,7 +133,7 @@ func WithAutomaticRequestID() RequestOption {
|
||||
// DefaultOptions are the default options to be used when using the lightpush protocol
|
||||
func DefaultOptions(host host.Host) []RequestOption {
|
||||
return []RequestOption{
|
||||
WithAutomaticRequestID(),
|
||||
WithAutomaticPeerSelection(),
|
||||
WithMaxPeers(1), //keeping default as 2 for status use-case
|
||||
}
|
||||
}
|
||||
|
||||
@ -36,7 +36,7 @@ func TestLightPushOption(t *testing.T) {
|
||||
}
|
||||
|
||||
require.Equal(t, host, params.host)
|
||||
require.NotNil(t, params.selectedPeer)
|
||||
require.NotEqual(t, 0, len(params.selectedPeers))
|
||||
require.NotNil(t, params.requestID)
|
||||
|
||||
maddr, err := multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/12345/p2p/16Uiu2HAm8KUwGRruseAaEGD6xGg6XKrDo8Py5dwDoL9wUpCxawGy")
|
||||
|
||||
@ -43,15 +43,19 @@ func makeWakuRelay(t *testing.T, pusubTopic string) (*relay.WakuRelay, *relay.Su
|
||||
|
||||
// Node1: Relay
|
||||
// Node2: Relay+Lightpush
|
||||
// Node3: Relay+Lightpush
|
||||
|
||||
// Client that will lightpush a message
|
||||
//
|
||||
// Node1 and Node 2 are peers
|
||||
// Node1 and Node 3 are peers
|
||||
// Client and Node 2 are peers
|
||||
// Client will use lightpush request, sending the message to Node2
|
||||
// Client and Node 3 are peers
|
||||
// Client will use lightpush request, sending the message to Node2 and Node3
|
||||
//
|
||||
// Client send a successful message using lightpush
|
||||
// Node2 receive the message and broadcast it
|
||||
// Node1 receive the message
|
||||
// Node2, Node3 receive the message and broadcast it
|
||||
// Node1 receive the messages
|
||||
func TestWakuLightPush(t *testing.T) {
|
||||
testTopic := "/waku/2/go/lightpush/test"
|
||||
node1, sub1, host1 := makeWakuRelay(t, testTopic)
|
||||
@ -69,6 +73,16 @@ func TestWakuLightPush(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer lightPushNode2.Stop()
|
||||
|
||||
node3, sub3, host3 := makeWakuRelay(t, testTopic)
|
||||
defer node3.Stop()
|
||||
defer sub3.Unsubscribe()
|
||||
|
||||
lightPushNode3 := NewWakuLightPush(node3, nil, prometheus.DefaultRegisterer, utils.Logger())
|
||||
lightPushNode3.SetHost(host3)
|
||||
err = lightPushNode3.Start(ctx)
|
||||
require.NoError(t, err)
|
||||
defer lightPushNode3.Stop()
|
||||
|
||||
port, err := tests.FindFreePort(t, "", 5)
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -84,10 +98,21 @@ func TestWakuLightPush(t *testing.T) {
|
||||
err = host2.Connect(ctx, host2.Peerstore().PeerInfo(host1.ID()))
|
||||
require.NoError(t, err)
|
||||
|
||||
host3.Peerstore().AddAddr(host1.ID(), tests.GetHostAddress(host1), peerstore.PermanentAddrTTL)
|
||||
err = host3.Peerstore().AddProtocols(host1.ID(), relay.WakuRelayID_v200)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = host3.Connect(ctx, host3.Peerstore().PeerInfo(host1.ID()))
|
||||
require.NoError(t, err)
|
||||
|
||||
clientHost.Peerstore().AddAddr(host2.ID(), tests.GetHostAddress(host2), peerstore.PermanentAddrTTL)
|
||||
err = clientHost.Peerstore().AddProtocols(host2.ID(), LightPushID_v20beta1)
|
||||
require.NoError(t, err)
|
||||
|
||||
clientHost.Peerstore().AddAddr(host3.ID(), tests.GetHostAddress(host3), peerstore.PermanentAddrTTL)
|
||||
err = clientHost.Peerstore().AddProtocols(host3.ID(), LightPushID_v20beta1)
|
||||
require.NoError(t, err)
|
||||
|
||||
msg2 := tests.CreateWakuMessage("test2", utils.GetUnixEpoch())
|
||||
|
||||
// Wait for the mesh connection to happen between node1 and node2
|
||||
@ -109,6 +134,7 @@ func TestWakuLightPush(t *testing.T) {
|
||||
var lpOptions []RequestOption
|
||||
lpOptions = append(lpOptions, WithPubSubTopic(testTopic))
|
||||
lpOptions = append(lpOptions, WithPeer(host2.ID()))
|
||||
lpOptions = append(lpOptions, WithMaxPeers(2))
|
||||
|
||||
// Checking that msg hash is correct
|
||||
hash, err := client.Publish(ctx, msg2, lpOptions...)
|
||||
|
||||
@ -15,9 +15,11 @@ import (
|
||||
"github.com/libp2p/go-msgio/pbio"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/waku-org/go-waku/logging"
|
||||
"github.com/waku-org/go-waku/waku/v2/peerstore"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/enr"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/metadata/pb"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
@ -59,11 +61,6 @@ func (wakuM *WakuMetadata) SetHost(h host.Host) {
|
||||
|
||||
// Start inits the metadata protocol
|
||||
func (wakuM *WakuMetadata) Start(ctx context.Context) error {
|
||||
if wakuM.clusterID == 0 {
|
||||
wakuM.log.Warn("no clusterID is specified. Protocol will not be initialized")
|
||||
return nil
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
||||
wakuM.ctx = ctx
|
||||
@ -83,6 +80,7 @@ func (wakuM *WakuMetadata) RelayShard() (*protocol.RelayShards, error) {
|
||||
}
|
||||
|
||||
func (wakuM *WakuMetadata) ClusterAndShards() (*uint32, []uint32, error) {
|
||||
|
||||
shard, err := wakuM.RelayShard()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@ -100,12 +98,15 @@ func (wakuM *WakuMetadata) ClusterAndShards() (*uint32, []uint32, error) {
|
||||
return &u32ClusterID, shards, nil
|
||||
}
|
||||
|
||||
func (wakuM *WakuMetadata) Request(ctx context.Context, peerID peer.ID) (*protocol.RelayShards, error) {
|
||||
func (wakuM *WakuMetadata) Request(ctx context.Context, peerID peer.ID) (*pb.WakuMetadataResponse, error) {
|
||||
logger := wakuM.log.With(logging.HostID("peer", peerID))
|
||||
|
||||
stream, err := wakuM.h.NewStream(ctx, peerID, MetadataID_v1)
|
||||
if err != nil {
|
||||
logger.Error("creating stream to peer", zap.Error(err))
|
||||
if ps, ok := wakuM.h.Peerstore().(peerstore.WakuPeerstore); ok {
|
||||
ps.AddConnFailure(peer.AddrInfo{ID: peerID})
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -120,8 +121,6 @@ func (wakuM *WakuMetadata) Request(ctx context.Context, peerID peer.ID) (*protoc
|
||||
request := &pb.WakuMetadataRequest{}
|
||||
request.ClusterId = clusterID
|
||||
request.Shards = shards
|
||||
// TODO: remove with nwaku 0.28 deployment
|
||||
request.ShardsDeprecated = shards // nolint: staticcheck
|
||||
|
||||
writer := pbio.NewDelimitedWriter(stream)
|
||||
reader := pbio.NewDelimitedReader(stream, math.MaxInt32)
|
||||
@ -149,31 +148,7 @@ func (wakuM *WakuMetadata) Request(ctx context.Context, peerID peer.ID) (*protoc
|
||||
|
||||
stream.Close()
|
||||
logger.Debug("received metadata response")
|
||||
|
||||
if response.ClusterId == nil {
|
||||
return nil, errors.New("node did not provide a waku clusterid")
|
||||
}
|
||||
|
||||
rClusterID := uint16(*response.ClusterId)
|
||||
var rShardIDs []uint16
|
||||
if len(response.Shards) != 0 {
|
||||
for _, i := range response.Shards {
|
||||
rShardIDs = append(rShardIDs, uint16(i))
|
||||
}
|
||||
} else {
|
||||
// TODO: remove with nwaku 0.28 deployment
|
||||
for _, i := range response.ShardsDeprecated { // nolint: staticcheck
|
||||
rShardIDs = append(rShardIDs, uint16(i))
|
||||
}
|
||||
}
|
||||
logger.Debug("getting remote cluster and shards")
|
||||
|
||||
rs, err := protocol.NewRelayShards(rClusterID, rShardIDs...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &rs, nil
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (wakuM *WakuMetadata) onRequest(ctx context.Context) func(network.Stream) {
|
||||
@ -200,8 +175,6 @@ func (wakuM *WakuMetadata) onRequest(ctx context.Context) func(network.Stream) {
|
||||
} else {
|
||||
response.ClusterId = clusterID
|
||||
response.Shards = shards
|
||||
// TODO: remove with nwaku 0.28 deployment
|
||||
response.ShardsDeprecated = shards // nolint: staticcheck
|
||||
}
|
||||
|
||||
err = writer.WriteMsg(response)
|
||||
@ -259,14 +232,44 @@ func (wakuM *WakuMetadata) Connected(n network.Network, cc network.Conn) {
|
||||
}
|
||||
|
||||
peerID := cc.RemotePeer()
|
||||
shard, err := wakuM.Request(wakuM.ctx, peerID)
|
||||
response, err := wakuM.Request(wakuM.ctx, peerID)
|
||||
if err != nil {
|
||||
wakuM.disconnectPeer(peerID, err)
|
||||
return
|
||||
}
|
||||
if response.ClusterId == nil {
|
||||
wakuM.disconnectPeer(peerID, errors.New("node did not provide a waku clusterid"))
|
||||
return
|
||||
}
|
||||
|
||||
rClusterID := uint16(*response.ClusterId)
|
||||
var rs protocol.RelayShards
|
||||
|
||||
wakuM.log.Debug("relay peer checking cluster and shards")
|
||||
|
||||
var rShardIDs []uint16
|
||||
if len(response.Shards) != 0 {
|
||||
for _, i := range response.Shards {
|
||||
rShardIDs = append(rShardIDs, uint16(i))
|
||||
}
|
||||
} else {
|
||||
if proto, err := wakuM.h.Peerstore().FirstSupportedProtocol(peerID, relay.WakuRelayID_v200); err == nil && proto == "" {
|
||||
wakuM.log.Debug("light peer only checking clusterID")
|
||||
if rClusterID != wakuM.clusterID {
|
||||
wakuM.disconnectPeer(peerID, errors.New("different clusterID reported"))
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
wakuM.log.Debug("getting remote cluster and shards")
|
||||
//if peer supports relay, then check for both clusterID and shards.
|
||||
rs, err = protocol.NewRelayShards(rClusterID, rShardIDs...)
|
||||
if err != nil {
|
||||
wakuM.disconnectPeer(peerID, err)
|
||||
return
|
||||
}
|
||||
|
||||
if shard.ClusterID != wakuM.clusterID {
|
||||
if rs.ClusterID != wakuM.clusterID {
|
||||
wakuM.disconnectPeer(peerID, errors.New("different clusterID reported"))
|
||||
return
|
||||
}
|
||||
@ -274,7 +277,7 @@ func (wakuM *WakuMetadata) Connected(n network.Network, cc network.Conn) {
|
||||
// Store shards so they're used to verify if a relay peer supports the same shards we do
|
||||
wakuM.peerShardsMutex.Lock()
|
||||
defer wakuM.peerShardsMutex.Unlock()
|
||||
wakuM.peerShards[peerID] = shard.ShardIDs
|
||||
wakuM.peerShards[peerID] = rs.ShardIDs
|
||||
}()
|
||||
}
|
||||
|
||||
@ -336,7 +339,7 @@ func (wakuM *WakuMetadata) DisconnectPeerOnShardMismatch(ctx context.Context, pe
|
||||
return err
|
||||
}
|
||||
|
||||
if !rs.ContainsAnyShard(rs.ClusterID, peerShards) {
|
||||
if rs != nil && !rs.ContainsAnyShard(rs.ClusterID, peerShards) {
|
||||
wakuM.log.Info("shard mismatch", logging.HostID("peerID", peerID), zap.Uint16("clusterID", rs.ClusterID), zap.Uint16s("ourShardIDs", rs.ShardIDs), zap.Uint16s("theirShardIDs", peerShards))
|
||||
wakuM.disconnect(peerID)
|
||||
return errors.New("shard mismatch")
|
||||
|
||||
@ -3,20 +3,17 @@ package metadata
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
gcrypto "github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/peerstore"
|
||||
libp2pProtocol "github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/multiformats/go-multistream"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/waku-org/go-waku/tests"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/enr"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
)
|
||||
|
||||
@ -46,15 +43,6 @@ func createWakuMetadata(t *testing.T, rs *protocol.RelayShards) *WakuMetadata {
|
||||
return m1
|
||||
}
|
||||
|
||||
func isProtocolNotSupported(err error) bool {
|
||||
notSupportedErr := multistream.ErrNotSupported[libp2pProtocol.ID]{}
|
||||
return errors.Is(err, notSupportedErr)
|
||||
}
|
||||
|
||||
func isStreamReset(err error) bool {
|
||||
return strings.Contains(err.Error(), "stream reset")
|
||||
}
|
||||
|
||||
func TestWakuMetadataRequest(t *testing.T) {
|
||||
testShard16 := uint16(16)
|
||||
|
||||
@ -68,13 +56,28 @@ func TestWakuMetadataRequest(t *testing.T) {
|
||||
m_noRS := createWakuMetadata(t, nil)
|
||||
|
||||
m16_1.h.Peerstore().AddAddrs(m16_2.h.ID(), m16_2.h.Network().ListenAddresses(), peerstore.PermanentAddrTTL)
|
||||
err = m16_1.h.Peerstore().AddProtocols(m16_2.h.ID(), relay.WakuRelayID_v200)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = m16_2.h.Peerstore().AddProtocols(m16_1.h.ID(), relay.WakuRelayID_v200)
|
||||
require.NoError(t, err)
|
||||
|
||||
m16_1.h.Peerstore().AddAddrs(m_noRS.h.ID(), m_noRS.h.Network().ListenAddresses(), peerstore.PermanentAddrTTL)
|
||||
|
||||
// Query a peer that is subscribed to a shard
|
||||
result, err := m16_1.Request(context.Background(), m16_2.h.ID())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, testShard16, result.ClusterID)
|
||||
require.Equal(t, rs16_2.ShardIDs, result.ShardIDs)
|
||||
|
||||
var rShardIDs []uint16
|
||||
if len(result.Shards) != 0 {
|
||||
for _, i := range result.Shards {
|
||||
rShardIDs = append(rShardIDs, uint16(i))
|
||||
}
|
||||
}
|
||||
rs, err := protocol.NewRelayShards(uint16(*result.ClusterId), rShardIDs...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, testShard16, rs.ClusterID)
|
||||
require.Equal(t, rs16_2.ShardIDs, rs.ShardIDs)
|
||||
|
||||
// Updating the peer shards
|
||||
rs16_2.ShardIDs = append(rs16_2.ShardIDs, 3, 4)
|
||||
@ -84,12 +87,16 @@ func TestWakuMetadataRequest(t *testing.T) {
|
||||
// Query same peer, after that peer subscribes to more shards
|
||||
result, err = m16_1.Request(context.Background(), m16_2.h.ID())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, testShard16, result.ClusterID)
|
||||
require.ElementsMatch(t, rs16_2.ShardIDs, result.ShardIDs)
|
||||
|
||||
// Query a peer not subscribed to any shard
|
||||
_, err = m16_1.Request(context.Background(), m_noRS.h.ID())
|
||||
require.True(t, isProtocolNotSupported(err) || isStreamReset(err))
|
||||
rShardIDs = make([]uint16, 0)
|
||||
if len(result.Shards) != 0 {
|
||||
for _, i := range result.Shards {
|
||||
rShardIDs = append(rShardIDs, uint16(i))
|
||||
}
|
||||
}
|
||||
rs, err = protocol.NewRelayShards(uint16(*result.ClusterId), rShardIDs...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, testShard16, rs.ClusterID)
|
||||
require.ElementsMatch(t, rs16_2.ShardIDs, rs.ShardIDs)
|
||||
}
|
||||
|
||||
func TestNoNetwork(t *testing.T) {
|
||||
@ -159,5 +166,4 @@ func TestDropConnectionOnDiffNetworks(t *testing.T) {
|
||||
require.Len(t, m3.h.Network().Peers(), 1)
|
||||
require.Equal(t, []peer.ID{m3.h.ID()}, m2.h.Network().Peers())
|
||||
require.Equal(t, []peer.ID{m2.h.ID()}, m3.h.Network().Peers())
|
||||
|
||||
}
|
||||
|
||||
@ -76,6 +76,9 @@ func (wakuPX *WakuPeerExchange) Request(ctx context.Context, numPeers int, opts
|
||||
|
||||
stream, err := wakuPX.h.NewStream(ctx, params.selectedPeer, PeerExchangeID_v20alpha1)
|
||||
if err != nil {
|
||||
if ps, ok := wakuPX.h.Peerstore().(peerstore.WakuPeerstore); ok {
|
||||
ps.AddConnFailure(peer.AddrInfo{ID: params.selectedPeer})
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@ -98,8 +98,11 @@ func (wakuPX *WakuPeerExchange) onRequest() func(network.Stream) {
|
||||
|
||||
if wakuPX.limiter != nil && !wakuPX.limiter.Allow() {
|
||||
wakuPX.metrics.RecordError(rateLimitFailure)
|
||||
wakuPX.log.Error("exceeds the rate limit")
|
||||
wakuPX.log.Info("exceeds the rate limit")
|
||||
// TODO: peer exchange protocol should contain an err field
|
||||
if err := stream.Reset(); err != nil {
|
||||
wakuPX.log.Error("resetting connection", zap.Error(err))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@ -253,6 +253,9 @@ func (s *WakuStore) queryFrom(ctx context.Context, storeRequest *pb.StoreQueryRe
|
||||
stream, err := s.h.NewStream(ctx, selectedPeer, StoreQueryID_v300)
|
||||
if err != nil {
|
||||
logger.Error("creating stream to peer", zap.Error(err))
|
||||
if ps, ok := s.h.Peerstore().(peerstore.WakuPeerstore); ok {
|
||||
ps.AddConnFailure(peer.AddrInfo{ID: selectedPeer})
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
@ -10,6 +10,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
@ -17,6 +19,8 @@ import (
|
||||
"github.com/waku-org/go-waku/tests"
|
||||
"github.com/waku-org/go-waku/waku/v2/peermanager"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/enr"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/metadata"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||
"github.com/waku-org/go-waku/waku/v2/timesource"
|
||||
@ -34,6 +38,21 @@ func TestStoreClient(t *testing.T) {
|
||||
host, err := tests.MakeHost(context.Background(), port, rand.Reader)
|
||||
require.NoError(t, err)
|
||||
|
||||
db, err := enode.OpenDB("")
|
||||
require.NoError(t, err)
|
||||
priv, err := crypto.GenerateKey()
|
||||
require.NoError(t, err)
|
||||
localnode := enode.NewLocalNode(db, priv)
|
||||
|
||||
pubsubTopic := "/waku/2/rs/99/1"
|
||||
clusterID := uint16(99)
|
||||
rs, _ := protocol.NewRelayShards(clusterID, 1)
|
||||
enr.Update(utils.Logger(), localnode, enr.WithWakuRelaySharding(rs))
|
||||
|
||||
metadata := metadata.NewWakuMetadata(clusterID, localnode, utils.Logger())
|
||||
metadata.SetHost(host)
|
||||
metadata.Start(context.Background())
|
||||
|
||||
// Creating a relay instance for pushing messages to the store node
|
||||
b := relay.NewBroadcaster(10)
|
||||
require.NoError(t, b.Start(context.Background()))
|
||||
@ -53,7 +72,7 @@ func TestStoreClient(t *testing.T) {
|
||||
wakuStore := NewWakuStore(pm, timesource.NewDefaultClock(), utils.Logger())
|
||||
wakuStore.SetHost(host)
|
||||
|
||||
_, err = wakuRelay.Subscribe(context.Background(), protocol.NewContentFilter(protocol.DefaultPubsubTopic{}.String()), relay.WithoutConsumer())
|
||||
_, err = wakuRelay.Subscribe(context.Background(), protocol.NewContentFilter(pubsubTopic), relay.WithoutConsumer())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Obtain multiaddr from env
|
||||
@ -83,7 +102,7 @@ func TestStoreClient(t *testing.T) {
|
||||
Version: proto.Uint32(0),
|
||||
Timestamp: utils.GetUnixEpoch(timesource.NewDefaultClock()),
|
||||
}
|
||||
_, err := wakuRelay.Publish(ctx, msg, relay.WithDefaultPubsubTopic())
|
||||
_, err := wakuRelay.Publish(ctx, msg, relay.WithPubSubTopic(pubsubTopic))
|
||||
require.NoError(t, err)
|
||||
|
||||
messages = append(messages, msg)
|
||||
@ -94,7 +113,7 @@ func TestStoreClient(t *testing.T) {
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
// Check for message existence
|
||||
exists, err := wakuStore.Exists(ctx, messages[0].Hash(relay.DefaultWakuTopic), WithPeer(storenode.ID))
|
||||
exists, err := wakuStore.Exists(ctx, messages[0].Hash(pubsubTopic), WithPeer(storenode.ID))
|
||||
require.NoError(t, err)
|
||||
require.True(t, exists)
|
||||
|
||||
@ -104,7 +123,7 @@ func TestStoreClient(t *testing.T) {
|
||||
require.False(t, exists)
|
||||
|
||||
// Query messages with forward pagination
|
||||
response, err := wakuStore.Query(ctx, FilterCriteria{ContentFilter: protocol.NewContentFilter(relay.DefaultWakuTopic, "test"), TimeStart: startTime, TimeEnd: endTime}, WithPaging(true, 2))
|
||||
response, err := wakuStore.Query(ctx, FilterCriteria{ContentFilter: protocol.NewContentFilter(pubsubTopic, "test"), TimeStart: startTime, TimeEnd: endTime}, WithPaging(true, 2))
|
||||
require.NoError(t, err)
|
||||
|
||||
// -- First page:
|
||||
@ -141,7 +160,7 @@ func TestStoreClient(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Query messages with backward pagination
|
||||
response, err = wakuStore.Query(ctx, FilterCriteria{ContentFilter: protocol.NewContentFilter(relay.DefaultWakuTopic, "test"), TimeStart: startTime, TimeEnd: endTime}, WithPaging(false, 2))
|
||||
response, err = wakuStore.Query(ctx, FilterCriteria{ContentFilter: protocol.NewContentFilter(pubsubTopic, "test"), TimeStart: startTime, TimeEnd: endTime}, WithPaging(false, 2))
|
||||
require.NoError(t, err)
|
||||
|
||||
// -- First page:
|
||||
@ -176,46 +195,46 @@ func TestStoreClient(t *testing.T) {
|
||||
require.True(t, response.IsComplete())
|
||||
|
||||
// No cursor should be returned if there are no messages that match the criteria
|
||||
response, err = wakuStore.Query(ctx, FilterCriteria{ContentFilter: protocol.NewContentFilter(relay.DefaultWakuTopic, "no-messages"), TimeStart: startTime, TimeEnd: endTime}, WithPaging(true, 2))
|
||||
response, err = wakuStore.Query(ctx, FilterCriteria{ContentFilter: protocol.NewContentFilter(pubsubTopic, "no-messages"), TimeStart: startTime, TimeEnd: endTime}, WithPaging(true, 2))
|
||||
require.NoError(t, err)
|
||||
require.Len(t, response.messages, 0)
|
||||
require.Empty(t, response.Cursor())
|
||||
|
||||
// If the page size is larger than the number of existing messages, it should not return a cursor
|
||||
response, err = wakuStore.Query(ctx, FilterCriteria{ContentFilter: protocol.NewContentFilter(relay.DefaultWakuTopic, "test"), TimeStart: startTime, TimeEnd: endTime}, WithPaging(true, 100))
|
||||
response, err = wakuStore.Query(ctx, FilterCriteria{ContentFilter: protocol.NewContentFilter(pubsubTopic, "test"), TimeStart: startTime, TimeEnd: endTime}, WithPaging(true, 100))
|
||||
require.NoError(t, err)
|
||||
require.Len(t, response.messages, 5)
|
||||
require.Empty(t, response.Cursor())
|
||||
|
||||
// Invalid cursors should fail
|
||||
// TODO: nwaku does not support this feature yet
|
||||
//_, err = wakuStore.Query(ctx, FilterCriteria{ContentFilter: protocol.NewContentFilter(relay.DefaultWakuTopic, "test"), TimeStart: startTime, TimeEnd: endTime}, WithCursor([]byte{1, 2, 3, 4, 5, 6}))
|
||||
//_, err = wakuStore.Query(ctx, FilterCriteria{ContentFilter: protocol.NewContentFilter(pubsubTopic, "test"), TimeStart: startTime, TimeEnd: endTime}, WithCursor([]byte{1, 2, 3, 4, 5, 6}))
|
||||
//require.Error(t, err)
|
||||
|
||||
// Inexistent cursors should return an empty response
|
||||
// TODO: nwaku does not support this feature yet
|
||||
//response, err = wakuStore.Query(ctx, FilterCriteria{ContentFilter: protocol.NewContentFilter(relay.DefaultWakuTopic, "test"), TimeStart: startTime, TimeEnd: endTime}, WithCursor(make([]byte, 32))) // Requesting cursor 0x00...00
|
||||
//response, err = wakuStore.Query(ctx, FilterCriteria{ContentFilter: protocol.NewContentFilter(pubsubTopic, "test"), TimeStart: startTime, TimeEnd: endTime}, WithCursor(make([]byte, 32))) // Requesting cursor 0x00...00
|
||||
//require.NoError(t, err)
|
||||
//require.Len(t, response.messages, 0)
|
||||
//require.Empty(t, response.Cursor())
|
||||
|
||||
// Handle temporal history query with an invalid time window
|
||||
_, err = wakuStore.Query(ctx, FilterCriteria{ContentFilter: protocol.NewContentFilter(relay.DefaultWakuTopic, "test"), TimeStart: endTime, TimeEnd: startTime})
|
||||
_, err = wakuStore.Query(ctx, FilterCriteria{ContentFilter: protocol.NewContentFilter(pubsubTopic, "test"), TimeStart: endTime, TimeEnd: startTime})
|
||||
require.NotNil(t, err)
|
||||
|
||||
// Handle temporal history query with a zero-size time window
|
||||
response, err = wakuStore.Query(ctx, FilterCriteria{ContentFilter: protocol.NewContentFilter(relay.DefaultWakuTopic, "test"), TimeStart: startTime, TimeEnd: startTime})
|
||||
response, err = wakuStore.Query(ctx, FilterCriteria{ContentFilter: protocol.NewContentFilter(pubsubTopic, "test"), TimeStart: startTime, TimeEnd: startTime})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, response.messages, 0)
|
||||
require.Empty(t, response.Cursor())
|
||||
|
||||
// Should not include data
|
||||
response, err = wakuStore.Request(ctx, MessageHashCriteria{MessageHashes: []pb.MessageHash{messages[0].Hash(relay.DefaultWakuTopic)}}, IncludeData(false), WithPeer(storenode.ID))
|
||||
response, err = wakuStore.Request(ctx, MessageHashCriteria{MessageHashes: []pb.MessageHash{messages[0].Hash(pubsubTopic)}}, IncludeData(false), WithPeer(storenode.ID))
|
||||
require.NoError(t, err)
|
||||
require.Len(t, response.messages, 1)
|
||||
require.Nil(t, response.messages[0].Message)
|
||||
|
||||
response, err = wakuStore.Request(ctx, FilterCriteria{ContentFilter: protocol.NewContentFilter(relay.DefaultWakuTopic, "test")}, IncludeData(false))
|
||||
response, err = wakuStore.Request(ctx, FilterCriteria{ContentFilter: protocol.NewContentFilter(pubsubTopic, "test")}, IncludeData(false))
|
||||
require.NoError(t, err)
|
||||
require.GreaterOrEqual(t, len(response.messages), 1)
|
||||
require.Nil(t, response.messages[0].Message)
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
package subscription
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
@ -178,17 +179,17 @@ func (sub *SubscriptionsMap) Clear() {
|
||||
sub.clear()
|
||||
}
|
||||
|
||||
func (sub *SubscriptionsMap) Notify(peerID peer.ID, envelope *protocol.Envelope) {
|
||||
func (sub *SubscriptionsMap) Notify(ctx context.Context, peerID peer.ID, envelope *protocol.Envelope) {
|
||||
sub.RLock()
|
||||
defer sub.RUnlock()
|
||||
|
||||
subscriptions, ok := sub.items[peerID].SubsPerPubsubTopic[envelope.PubsubTopic()]
|
||||
if ok {
|
||||
iterateSubscriptionSet(sub.logger, subscriptions, envelope)
|
||||
iterateSubscriptionSet(ctx, sub.logger, subscriptions, envelope)
|
||||
}
|
||||
}
|
||||
|
||||
func iterateSubscriptionSet(logger *zap.Logger, subscriptions SubscriptionSet, envelope *protocol.Envelope) {
|
||||
func iterateSubscriptionSet(ctx context.Context, logger *zap.Logger, subscriptions SubscriptionSet, envelope *protocol.Envelope) {
|
||||
for _, subscription := range subscriptions {
|
||||
func(subscription *SubscriptionDetails) {
|
||||
subscription.RLock()
|
||||
@ -201,6 +202,8 @@ func iterateSubscriptionSet(logger *zap.Logger, subscriptions SubscriptionSet, e
|
||||
|
||||
if !subscription.Closed {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case subscription.C <- envelope:
|
||||
default:
|
||||
logger.Warn("can't deliver message to subscription. subscriber too slow")
|
||||
|
||||
@ -153,8 +153,8 @@ func TestSubscriptionsNotify(t *testing.T) {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
fmap.Notify(p1, envTopic1Ct1)
|
||||
fmap.Notify(p2, envTopic1Ct1)
|
||||
fmap.Notify(ctx, p1, envTopic1Ct1)
|
||||
fmap.Notify(ctx, p2, envTopic1Ct1)
|
||||
}()
|
||||
|
||||
<-successChan
|
||||
@ -177,8 +177,8 @@ func TestSubscriptionsNotify(t *testing.T) {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
fmap.Notify(p1, envTopic1Ct2)
|
||||
fmap.Notify(p2, envTopic1Ct2)
|
||||
fmap.Notify(ctx, p1, envTopic1Ct2)
|
||||
fmap.Notify(ctx, p2, envTopic1Ct2)
|
||||
}()
|
||||
|
||||
<-successChan
|
||||
@ -207,8 +207,8 @@ func TestSubscriptionsNotify(t *testing.T) {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
fmap.Notify(p1, envTopic1Ct1_2)
|
||||
fmap.Notify(p2, envTopic1Ct1_2)
|
||||
fmap.Notify(ctx, p1, envTopic1Ct1_2)
|
||||
fmap.Notify(ctx, p2, envTopic1Ct1_2)
|
||||
}()
|
||||
|
||||
<-successChan // One of these successes is for closing the subscription
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user