2023-01-18 11:57:05 +00:00
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"flag"
|
|
|
|
"fmt"
|
|
|
|
"net"
|
|
|
|
"bytes"
|
|
|
|
// "math/rand"
|
|
|
|
"strconv"
|
|
|
|
"encoding/binary"
|
|
|
|
"os"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
logging "github.com/ipfs/go-log/v2"
|
|
|
|
"github.com/multiformats/go-multiaddr"
|
|
|
|
"github.com/waku-org/go-waku/waku/v2/dnsdisc"
|
|
|
|
"github.com/waku-org/go-waku/waku/v2/node"
|
|
|
|
"github.com/waku-org/go-waku/waku/v2/protocol"
|
|
|
|
// "github.com/waku-org/go-waku/waku/v2/protocol/filter"
|
|
|
|
// "github.com/waku-org/go-waku/waku/v2/utils"
|
|
|
|
"github.com/logos-co/wadoku/waku/common"
|
|
|
|
//"crypto/rand"
|
|
|
|
//"encoding/hex"
|
|
|
|
//"github.com/ethereum/go-ethereum/crypto"
|
|
|
|
//"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
|
|
|
//"github.com/waku-org/go-waku/waku/v2/payload"
|
|
|
|
)
|
|
|
|
|
|
|
|
var log = logging.Logger("subscribe")
|
|
|
|
var pubSubTopic = protocol.DefaultPubsubTopic()
|
|
|
|
var conf = common.Config{}
|
2023-01-21 11:37:33 +00:00
|
|
|
var nodeType = "subscribe"
|
2023-09-26 11:01:03 +00:00
|
|
|
//const dnsDiscoveryUrl = "enrtree://ANEDLO25QVUGJOUTQFRYKWX6P4Z4GKVESBMHML7DZ6YK4LGS5FC5O@prod.wakuv2.nodes.status.im"
|
2023-01-18 11:57:05 +00:00
|
|
|
//const nameServer = "1.1.1.1" // your local dns provider might be blocking entr
|
|
|
|
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
// args
|
|
|
|
fmt.Println("Populating CLI params...")
|
|
|
|
common.ArgInit(&conf)
|
|
|
|
}
|
|
|
|
|
|
|
|
func main() {
|
|
|
|
|
|
|
|
flag.Parse()
|
|
|
|
|
|
|
|
// setup the log
|
|
|
|
lvl, err := logging.LevelFromString(conf.LogLevel)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
logging.SetAllLoggers(lvl)
|
|
|
|
|
2023-01-21 11:37:33 +00:00
|
|
|
tcpEndPoint := common.LocalHost +
|
|
|
|
":" +
|
|
|
|
strconv.Itoa(common.StartPort + common.RandInt(0, common.PortRange))
|
2023-01-18 11:57:05 +00:00
|
|
|
// create the waku node
|
|
|
|
hostAddr, _ := net.ResolveTCPAddr("tcp", tcpEndPoint)
|
|
|
|
ctx := context.Background()
|
2023-01-21 11:37:33 +00:00
|
|
|
subNode, err := node.New(ctx,
|
2023-01-18 11:57:05 +00:00
|
|
|
node.WithWakuRelay(),
|
2023-01-21 11:37:33 +00:00
|
|
|
//node.WithNTP(), // don't use NTP, fails at msec granularity
|
2023-01-18 11:57:05 +00:00
|
|
|
node.WithHostAddress(hostAddr),
|
2023-01-21 11:37:33 +00:00
|
|
|
//node.WithWakuFilter(false), // we do NOT want a full node
|
2023-01-18 11:57:05 +00:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Info("CONFIG : ", conf)
|
|
|
|
// find the list of full node fleet peers
|
|
|
|
log.Info("attempting DNS discovery with: ", common.DnsDiscoveryUrl)
|
|
|
|
nodes, err := dnsdisc.RetrieveNodes(ctx, common.DnsDiscoveryUrl, dnsdisc.WithNameserver(common.NameServer))
|
|
|
|
if err != nil {
|
|
|
|
panic(err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
// connect to the first peer
|
|
|
|
var nodeList []multiaddr.Multiaddr
|
|
|
|
for _, n := range nodes {
|
|
|
|
nodeList = append(nodeList, n.Addresses...)
|
|
|
|
}
|
|
|
|
log.Info("Discovered and connecting to: ", nodeList[0])
|
|
|
|
peerID, err := nodeList[0].ValueForProtocol(multiaddr.P_P2P)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("could not get peerID: ", err)
|
|
|
|
panic(err)
|
|
|
|
}
|
2023-01-22 07:25:32 +00:00
|
|
|
// initialise the exponential back off
|
|
|
|
retries, sleepTime := 0, common.ExpBackOffInit
|
|
|
|
for {
|
|
|
|
// try / retry
|
|
|
|
if err = subNode.DialPeerWithMultiAddress(ctx, nodeList[0]); err == nil {
|
|
|
|
break // success! done
|
|
|
|
}
|
|
|
|
// failed, back off for sleepTime and retry
|
|
|
|
log.Error("could not connect to ", peerID, err,
|
|
|
|
" : will retry in ", sleepTime, " retry# ", retries)
|
|
|
|
time.Sleep(sleepTime) // back off
|
|
|
|
retries++
|
|
|
|
sleepTime *= 2 // exponential : double the next wait time
|
|
|
|
// bail out
|
|
|
|
if retries > common.ExpBackOffRetries {
|
|
|
|
log.Error("Exhausted retries, could not connect to ", peerID, err,
|
|
|
|
"number of retries performed ", retries)
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/*
|
2023-01-21 11:37:33 +00:00
|
|
|
err = subNode.DialPeerWithMultiAddress(ctx, nodeList[0])
|
2023-01-18 11:57:05 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Error("could not connect to ", peerID, err)
|
|
|
|
panic(err)
|
|
|
|
}
|
2023-01-22 07:25:32 +00:00
|
|
|
*/
|
2023-01-18 11:57:05 +00:00
|
|
|
|
2023-01-21 11:37:33 +00:00
|
|
|
log.Info("Starting the ", nodeType, " node ", conf.ContentTopic)
|
|
|
|
// start the light node
|
|
|
|
err = subNode.Start()
|
2023-01-18 11:57:05 +00:00
|
|
|
if err != nil {
|
2023-01-21 11:37:33 +00:00
|
|
|
log.Error("Could not start the", nodeType, " node ", conf.ContentTopic)
|
2023-01-18 11:57:05 +00:00
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
2023-01-21 11:37:33 +00:00
|
|
|
log.Info("Subscribing to the content topic", conf.ContentTopic)
|
|
|
|
// Subscribe to our ContentTopic and send a FilterRequest
|
|
|
|
theSub, err := subNode.Relay().Subscribe(ctx)
|
2023-01-18 11:57:05 +00:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
stopC := make(chan struct{})
|
|
|
|
go func() {
|
|
|
|
f, err := os.OpenFile(conf.Ofname, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("Could not open file: ", err)
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
defer f.Close()
|
|
|
|
|
|
|
|
log.Info("Waiting to receive the message")
|
2023-01-21 11:37:33 +00:00
|
|
|
for env := range theSub.C {
|
2023-01-18 11:57:05 +00:00
|
|
|
msg := env.Message()
|
2023-01-21 18:28:45 +00:00
|
|
|
// ignore other relay messages
|
|
|
|
if msg.ContentTopic != conf.ContentTopic {
|
|
|
|
continue
|
|
|
|
}
|
2023-01-18 11:57:05 +00:00
|
|
|
|
|
|
|
rbuf := bytes.NewBuffer(msg.Payload)
|
|
|
|
var r32 int32 //:= make([]int64, (len(msg.Payload)+7)/8)
|
|
|
|
err = binary.Read(rbuf, binary.LittleEndian, &r32)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("binary.Read failed:", err)
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
2023-01-21 11:37:33 +00:00
|
|
|
msg_delay := time.Since(time.Unix(0, msg.Timestamp))
|
|
|
|
str := fmt.Sprintf("GOT : %d, %s, %d, %d, %d\n", r32, msg.ContentTopic, msg.Timestamp, msg_delay.Microseconds(), msg_delay.Milliseconds())
|
2023-01-18 11:57:05 +00:00
|
|
|
//str := fmt.Sprintf("GOT: %d %s %s %s %s\n", r32, msg, utils.GetUnixEpochFrom(lightNode.Timesource().Now()), msg_delay.Microseconds(), msg_delay.Milliseconds())
|
|
|
|
//"Received msg, @", string(msg.ContentTopic), "@", msg.Timestamp, "@", utils.GetUnixEpochFrom(lightNode.Timesource().Now()) )
|
|
|
|
log.Info(str)
|
|
|
|
if _, err = f.WriteString(str); err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
}
|
2023-01-21 11:37:33 +00:00
|
|
|
log.Error("Out of the Write loop: Message channel closed - timeout")
|
2023-01-18 11:57:05 +00:00
|
|
|
stopC <- struct{}{}
|
|
|
|
}()
|
2023-01-21 11:37:33 +00:00
|
|
|
// add extra 20sec + 5% as a grace period to receive as much as possible
|
|
|
|
filterWait := conf.Duration +
|
|
|
|
common.InterPubSubDelay * time.Second +
|
|
|
|
conf.Duration/100*common.GraceWait
|
2023-01-18 11:57:05 +00:00
|
|
|
|
2023-01-21 11:37:33 +00:00
|
|
|
log.Info("Will be waiting for ", filterWait, ", excess ", common.GraceWait, "% from ", conf.Duration)
|
|
|
|
<-time.After(filterWait)
|
|
|
|
log.Error(conf.Duration, " elapsed, closing the " + nodeType + " node!");
|
2023-01-18 11:57:05 +00:00
|
|
|
|
|
|
|
// shut the nodes down
|
2023-01-21 11:37:33 +00:00
|
|
|
subNode.Stop()
|
2023-01-18 11:57:05 +00:00
|
|
|
}
|