2023-01-11 06:57:40 +00:00
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"flag"
|
|
|
|
"fmt"
|
|
|
|
"net"
|
2023-01-13 19:40:43 +00:00
|
|
|
"bytes"
|
2023-01-14 15:01:59 +00:00
|
|
|
// "math/rand"
|
|
|
|
"strconv"
|
2023-01-13 19:40:43 +00:00
|
|
|
"encoding/binary"
|
2023-01-11 06:57:40 +00:00
|
|
|
"os"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
logging "github.com/ipfs/go-log/v2"
|
|
|
|
"github.com/multiformats/go-multiaddr"
|
|
|
|
"github.com/waku-org/go-waku/waku/v2/dnsdisc"
|
|
|
|
"github.com/waku-org/go-waku/waku/v2/node"
|
|
|
|
"github.com/waku-org/go-waku/waku/v2/protocol"
|
|
|
|
"github.com/waku-org/go-waku/waku/v2/protocol/filter"
|
2023-01-18 11:57:05 +00:00
|
|
|
// "github.com/waku-org/go-waku/waku/v2/utils"
|
2023-01-14 15:01:59 +00:00
|
|
|
"github.com/logos-co/wadoku/waku/common"
|
2023-01-11 06:57:40 +00:00
|
|
|
//"crypto/rand"
|
|
|
|
//"encoding/hex"
|
|
|
|
//"github.com/ethereum/go-ethereum/crypto"
|
|
|
|
//"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
|
|
|
//"github.com/waku-org/go-waku/waku/v2/payload"
|
|
|
|
)
|
|
|
|
|
|
|
|
var log = logging.Logger("filter")
|
|
|
|
var pubSubTopic = protocol.DefaultPubsubTopic()
|
2023-01-14 15:01:59 +00:00
|
|
|
var conf = common.Config{}
|
2023-01-18 11:57:05 +00:00
|
|
|
var nodeType = "filter"
|
2023-01-14 15:01:59 +00:00
|
|
|
//const dnsDiscoveryUrl = "enrtree://AOGECG2SPND25EEFMAJ5WF3KSGJNSGV356DSTL2YVLLZWIV6SAYBM@prod.waku.nodes.status.im"
|
|
|
|
//const nameServer = "1.1.1.1" // your local dns provider might be blocking entr
|
2023-01-11 06:57:40 +00:00
|
|
|
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
// args
|
|
|
|
fmt.Println("Populating CLI params...")
|
2023-01-14 15:01:59 +00:00
|
|
|
common.ArgInit(&conf)
|
2023-01-11 06:57:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func main() {
|
|
|
|
|
|
|
|
flag.Parse()
|
|
|
|
|
|
|
|
// setup the log
|
2023-01-13 19:40:43 +00:00
|
|
|
lvl, err := logging.LevelFromString(conf.LogLevel)
|
2023-01-11 06:57:40 +00:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
logging.SetAllLoggers(lvl)
|
|
|
|
|
2023-01-18 11:57:05 +00:00
|
|
|
tcpEndPoint := common.LocalHost +
|
|
|
|
":" +
|
|
|
|
strconv.Itoa(common.StartPort + common.RandInt(0, common.PortRange))
|
2023-01-11 06:57:40 +00:00
|
|
|
// create the waku node
|
2023-01-14 15:01:59 +00:00
|
|
|
hostAddr, _ := net.ResolveTCPAddr("tcp", tcpEndPoint)
|
2023-01-11 06:57:40 +00:00
|
|
|
ctx := context.Background()
|
2023-01-18 11:57:05 +00:00
|
|
|
filterNode, err := node.New(ctx,
|
2023-01-11 06:57:40 +00:00
|
|
|
//node.WithWakuRelay(),
|
2023-01-18 11:57:05 +00:00
|
|
|
//node.WithNTP(), // don't use NTP, fails at msec granularity
|
2023-01-11 06:57:40 +00:00
|
|
|
node.WithHostAddress(hostAddr),
|
2023-01-18 11:57:05 +00:00
|
|
|
node.WithWakuFilter(false), // we do NOT want a full node
|
2023-01-11 06:57:40 +00:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
2023-01-13 19:40:43 +00:00
|
|
|
log.Info("CONFIG : ", conf)
|
2023-01-11 06:57:40 +00:00
|
|
|
// find the list of full node fleet peers
|
2023-01-14 15:01:59 +00:00
|
|
|
log.Info("attempting DNS discovery with: ", common.DnsDiscoveryUrl)
|
|
|
|
nodes, err := dnsdisc.RetrieveNodes(ctx, common.DnsDiscoveryUrl, dnsdisc.WithNameserver(common.NameServer))
|
2023-01-11 06:57:40 +00:00
|
|
|
if err != nil {
|
|
|
|
panic(err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
// connect to the first peer
|
|
|
|
var nodeList []multiaddr.Multiaddr
|
|
|
|
for _, n := range nodes {
|
|
|
|
nodeList = append(nodeList, n.Addresses...)
|
|
|
|
}
|
2023-01-13 19:40:43 +00:00
|
|
|
log.Info("Discovered and connecting to: ", nodeList[0])
|
2023-01-11 06:57:40 +00:00
|
|
|
peerID, err := nodeList[0].ValueForProtocol(multiaddr.P_P2P)
|
|
|
|
if err != nil {
|
2023-01-13 19:40:43 +00:00
|
|
|
log.Error("could not get peerID: ", err)
|
2023-01-11 06:57:40 +00:00
|
|
|
panic(err)
|
|
|
|
}
|
2023-01-22 07:25:32 +00:00
|
|
|
|
|
|
|
// initialise the exponential back off
|
|
|
|
retries, sleepTime := 0, common.ExpBackOffInit
|
|
|
|
for {
|
|
|
|
// try / retry
|
|
|
|
if err = filterNode.DialPeerWithMultiAddress(ctx, nodeList[0]); err == nil {
|
|
|
|
break // success! done
|
|
|
|
}
|
|
|
|
// failed, back off for sleepTime and retry
|
|
|
|
log.Error("could not connect to ", peerID, err,
|
|
|
|
" : will retry in ", sleepTime, " retry# ", retries)
|
|
|
|
time.Sleep(sleepTime) // back off
|
|
|
|
retries++
|
|
|
|
sleepTime *= 2 // exponential : double the next wait time
|
|
|
|
// bail out
|
|
|
|
if retries > common.ExpBackOffRetries {
|
|
|
|
log.Error("Exhausted retries, could not connect to ", peerID, err,
|
|
|
|
"number of retries performed ", retries)
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/*
|
2023-01-18 11:57:05 +00:00
|
|
|
err = filterNode.DialPeerWithMultiAddress(ctx, nodeList[0])
|
2023-01-11 06:57:40 +00:00
|
|
|
if err != nil {
|
2023-01-13 19:40:43 +00:00
|
|
|
log.Error("could not connect to ", peerID, err)
|
2023-01-11 06:57:40 +00:00
|
|
|
panic(err)
|
2023-01-22 07:25:32 +00:00
|
|
|
}*/
|
2023-01-11 06:57:40 +00:00
|
|
|
|
2023-01-18 11:57:05 +00:00
|
|
|
log.Info("Starting the ", nodeType, " node ", conf.ContentTopic)
|
2023-01-11 06:57:40 +00:00
|
|
|
// start the light node
|
2023-01-18 11:57:05 +00:00
|
|
|
err = filterNode.Start()
|
2023-01-11 06:57:40 +00:00
|
|
|
if err != nil {
|
2023-01-18 11:57:05 +00:00
|
|
|
log.Error("Could not start the", nodeType, " node ", conf.ContentTopic)
|
2023-01-11 06:57:40 +00:00
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
2023-01-18 11:57:05 +00:00
|
|
|
log.Info("Subscribing to the content topic", conf.ContentTopic)
|
2023-01-11 06:57:40 +00:00
|
|
|
// Subscribe to our ContentTopic and send a FilterRequest
|
|
|
|
cf := filter.ContentFilter{
|
|
|
|
Topic: pubSubTopic.String(),
|
|
|
|
ContentTopics: []string{conf.ContentTopic},
|
|
|
|
}
|
2023-01-18 11:57:05 +00:00
|
|
|
_, theFilter, err := filterNode.Filter().Subscribe(ctx, cf)
|
2023-01-11 06:57:40 +00:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
stopC := make(chan struct{})
|
|
|
|
go func() {
|
|
|
|
f, err := os.OpenFile(conf.Ofname, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600)
|
|
|
|
if err != nil {
|
2023-01-13 19:40:43 +00:00
|
|
|
log.Error("Could not open file: ", err)
|
2023-01-11 06:57:40 +00:00
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
defer f.Close()
|
|
|
|
|
2023-01-18 11:57:05 +00:00
|
|
|
log.Info("Waiting to receive the message")
|
2023-01-11 06:57:40 +00:00
|
|
|
for env := range theFilter.Chan {
|
|
|
|
msg := env.Message()
|
2023-01-13 19:40:43 +00:00
|
|
|
|
|
|
|
rbuf := bytes.NewBuffer(msg.Payload)
|
|
|
|
var r32 int32 //:= make([]int64, (len(msg.Payload)+7)/8)
|
|
|
|
err = binary.Read(rbuf, binary.LittleEndian, &r32)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("binary.Read failed:", err)
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
msg_delay := time.Since(time.Unix(0, msg.Timestamp))
|
2023-01-21 11:37:33 +00:00
|
|
|
str := fmt.Sprintf("GOT : %d, %s, %d, %d, %d\n", r32, msg.ContentTopic, msg.Timestamp, msg_delay.Microseconds(), msg_delay.Milliseconds())
|
2023-01-18 11:57:05 +00:00
|
|
|
//str := fmt.Sprintf("GOT: %d %s %s %s %s\n", r32, msg, utils.GetUnixEpochFrom(lightNode.Timesource().Now()), msg_delay.Microseconds(), msg_delay.Milliseconds())
|
2023-01-11 06:57:40 +00:00
|
|
|
//"Received msg, @", string(msg.ContentTopic), "@", msg.Timestamp, "@", utils.GetUnixEpochFrom(lightNode.Timesource().Now()) )
|
2023-01-18 11:57:05 +00:00
|
|
|
log.Info(str)
|
|
|
|
if _, err = f.WriteString(str); err != nil {
|
2023-01-11 06:57:40 +00:00
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
}
|
2023-01-18 11:57:05 +00:00
|
|
|
log.Error("Out of the Write loop: Message channel closed - timeout")
|
2023-01-11 06:57:40 +00:00
|
|
|
stopC <- struct{}{}
|
|
|
|
}()
|
2023-01-20 14:51:02 +00:00
|
|
|
// add extra 20sec + 5% as a grace period to receive as much as possible
|
|
|
|
filterWait := conf.Duration +
|
|
|
|
common.InterPubSubDelay * time.Second +
|
|
|
|
conf.Duration/100*common.GraceWait
|
2023-01-11 06:57:40 +00:00
|
|
|
|
2023-01-20 14:51:02 +00:00
|
|
|
log.Info("Will be waiting for ", filterWait, ", excess ", common.GraceWait, "% from ", conf.Duration)
|
|
|
|
<-time.After(filterWait)
|
2023-01-18 11:57:05 +00:00
|
|
|
log.Error(conf.Duration, " elapsed, closing the " + nodeType + " node!");
|
2023-01-11 06:57:40 +00:00
|
|
|
|
|
|
|
// shut the nodes down
|
2023-01-18 11:57:05 +00:00
|
|
|
filterNode.Stop()
|
2023-01-11 06:57:40 +00:00
|
|
|
}
|