Removing discovery-proxy from sources (#1445)

It was going to be used as proxy from discv5 to rendezvous for les nodes.
We are not using it at the moment, and not clear when we will be using it
and if we are going to use it all.

Additionally it has some memory leaks that need to be fixed, so it is better
to remove it for now and restore once/if we will need it again.
This commit is contained in:
Dmitry Shulyak 2019-04-18 10:39:55 +03:00 committed by GitHub
parent 2c0c0fff24
commit 442a12e996
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 9 additions and 440 deletions

View File

@ -48,7 +48,6 @@ gotest_extraflags =
DOCKER_IMAGE_NAME ?= statusteam/status-go DOCKER_IMAGE_NAME ?= statusteam/status-go
BOOTNODE_IMAGE_NAME ?= statusteam/bootnode BOOTNODE_IMAGE_NAME ?= statusteam/bootnode
PROXY_IMAGE_NAME ?= statusteam/discovery-proxy
STATUSD_PRUNE_IMAGE_NAME ?= statusteam/statusd-prune STATUSD_PRUNE_IMAGE_NAME ?= statusteam/statusd-prune
DOCKER_IMAGE_CUSTOM_TAG ?= $(RELEASE_TAG) DOCKER_IMAGE_CUSTOM_TAG ?= $(RELEASE_TAG)
@ -102,10 +101,6 @@ bootnode: ##@build Build discovery v5 bootnode using status-go deps
go build -i -o $(GOBIN)/bootnode -v -tags '$(BUILD_TAGS)' $(BUILD_FLAGS) ./cmd/bootnode/ go build -i -o $(GOBIN)/bootnode -v -tags '$(BUILD_TAGS)' $(BUILD_FLAGS) ./cmd/bootnode/
@echo "Compilation done." @echo "Compilation done."
proxy: ##@build Build proxy for rendezvous servers using status-go deps
go build -i -o $(GOBIN)/proxy -v -tags '$(BUILD_TAGS)' $(BUILD_FLAGS) ./cmd/proxy/
@echo "Compilation done."
node-canary: ##@build Build P2P node canary using status-go deps node-canary: ##@build Build P2P node canary using status-go deps
go build -i -o $(GOBIN)/node-canary -v -tags '$(BUILD_TAGS)' $(BUILD_FLAGS) ./cmd/node-canary/ go build -i -o $(GOBIN)/node-canary -v -tags '$(BUILD_TAGS)' $(BUILD_FLAGS) ./cmd/node-canary/
@echo "Compilation done." @echo "Compilation done."
@ -161,14 +156,6 @@ bootnode-image:
-t $(BOOTNODE_IMAGE_NAME):$(DOCKER_IMAGE_CUSTOM_TAG) \ -t $(BOOTNODE_IMAGE_NAME):$(DOCKER_IMAGE_CUSTOM_TAG) \
-t $(BOOTNODE_IMAGE_NAME):latest -t $(BOOTNODE_IMAGE_NAME):latest
proxy-image:
@echo "Building docker image for proxy..."
docker build --file _assets/build/Dockerfile-proxy . \
--build-arg "build_tags=$(BUILD_TAGS)" \
--build-arg "build_flags=$(BUILD_FLAGS)" \
-t $(PROXY_IMAGE_NAME):$(DOCKER_IMAGE_CUSTOM_TAG) \
-t $(PROXY_IMAGE_NAME):latest
push-docker-images: docker-image bootnode-image push-docker-images: docker-image bootnode-image
docker push $(BOOTNODE_IMAGE_NAME):$(DOCKER_IMAGE_CUSTOM_TAG) docker push $(BOOTNODE_IMAGE_NAME):$(DOCKER_IMAGE_CUSTOM_TAG)
docker push $(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_CUSTOM_TAG) docker push $(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_CUSTOM_TAG)

View File

@ -1,15 +0,0 @@
FROM golang:1.10-alpine as builder
ARG build_tags
RUN apk add --no-cache make gcc musl-dev linux-headers
RUN mkdir -p /go/src/github.com/status-im/status-go
ADD . /go/src/github.com/status-im/status-go
RUN cd /go/src/github.com/status-im/status-go && make proxy BUILD_TAGS="$build_tags"
FROM alpine:latest
RUN apk add --no-cache ca-certificates bash
COPY --from=builder /go/src/github.com/status-im/status-go/build/bin/proxy /usr/local/bin/

View File

@ -1,44 +0,0 @@
package main
import (
"errors"
"strconv"
"strings"
)
// ErrorEmpty returned when value is empty.
var ErrorEmpty = errors.New("empty value not allowed")
// StringSlice is a type of flag that allows setting multiple string values.
type StringSlice []string
func (s *StringSlice) String() string {
return "string slice"
}
// Set trims space from string and stores it.
func (s *StringSlice) Set(value string) error {
trimmed := strings.TrimSpace(value)
if len(trimmed) == 0 {
return ErrorEmpty
}
*s = append(*s, trimmed)
return nil
}
// IntSlice is a type of flag that allows setting multiple int values.
type IntSlice []int
func (s *IntSlice) String() string {
return "int slice"
}
// Set trims space from string and stores it.
func (s *IntSlice) Set(value string) error {
val, err := strconv.Atoi(value)
if err != nil {
return err
}
*s = append(*s, val)
return nil
}

View File

@ -1,112 +0,0 @@
package main
import (
"flag"
"fmt"
"strings"
"sync"
"time"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/discv5"
"github.com/ethereum/go-ethereum/params"
ma "github.com/multiformats/go-multiaddr"
"github.com/status-im/status-go/discovery"
sparams "github.com/status-im/status-go/params"
)
var (
laddr = flag.String("laddr", "0.0.0.0:31143", "Listening address for discovery v5.")
verbosity = flag.String("v", "info", "Logger verbosity")
rendezvousNodes = StringSlice{}
bootnodes = StringSlice{}
topics = StringSlice{}
les = IntSlice{}
useEthereum = flag.Bool("use-ethereum-boot", false, "If true ethereum bootnodes will be used.")
limit = flag.Int("limit", 100, "Limit the number of proxied nodes.")
livenessWindow = flag.Duration("liveness-window", 10*time.Minute, "Stop proxying record if it wasn't found again during specified window.")
)
func main() {
flag.Var(&rendezvousNodes, "rendezvous-node", "Rendezvous server.")
flag.Var(&bootnodes, "bootnode", "Discovery v5 node.")
flag.Var(&les, "les", "Proxy les topic for a given network.")
flag.Var(&topics, "topic", "Topic that will be proxied")
flag.Parse()
level, err := log.LvlFromString(strings.ToLower(*verbosity))
if err != nil {
panic(fmt.Errorf("unable to get logger level from string %s: %v", *verbosity, err))
}
filteredHandler := log.LvlFilterHandler(level, log.StderrHandler)
log.Root().SetHandler(filteredHandler)
for _, net := range les {
if t := sparams.LesTopic(net); len(t) != 0 {
topics = append(topics, t)
}
}
key, err := crypto.GenerateKey()
if err != nil {
log.Crit("unable to generate a key", "error", err)
}
rst := []string(bootnodes)
if *useEthereum {
rst = append(rst, params.DiscoveryV5Bootnodes...)
}
v5 := discovery.NewDiscV5(key, *laddr, parseNodesV5(rst))
if err := v5.Start(); err != nil {
log.Crit("unable to start discovery v5", "address", *laddr, "error", err)
}
rendezvousServers := parseMultiaddrs(rendezvousNodes)
var wg sync.WaitGroup
stop := make(chan struct{})
defer close(stop)
for _, t := range topics {
log.Info("proxying records for", "topic", t, "bootnodes", rst, "rendezvous servers", rendezvousNodes)
t := t
wg.Add(1)
go func() {
if err := discovery.ProxyToRendezvous(v5, stop, &event.Feed{}, discovery.ProxyOptions{
Topic: t,
Servers: rendezvousServers,
Limit: *limit,
LivenessWindow: *livenessWindow,
}); err != nil {
log.Error("proxying to rendezvous servers failed", "servers", rendezvousNodes, "topic", t, "error", err)
}
wg.Done()
}()
}
wg.Wait()
}
func parseMultiaddrs(nodes []string) []ma.Multiaddr {
var (
rst = make([]ma.Multiaddr, len(nodes))
err error
)
for i := range nodes {
rst[i], err = ma.NewMultiaddr(nodes[i])
if err != nil {
log.Crit("unable to parse mutliaddr", "source", nodes[i], "error", err)
}
}
return rst
}
func parseNodesV5(nodes []string) []*discv5.Node {
var (
rst = make([]*discv5.Node, len(nodes))
err error
)
for i := range nodes {
rst[i], err = discv5.ParseNode(nodes[i])
if err != nil {
log.Crit("Failed to parse enode", "source", nodes[i], "err", err)
}
}
return rst
}

View File

@ -1,139 +0,0 @@
package discovery
import (
"fmt"
"sync"
"time"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/discv5"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
ma "github.com/multiformats/go-multiaddr"
)
const (
proxyStart = "start"
proxyStop = "stop"
)
type proxyEvent struct {
ID discv5.NodeID
Type string
Time time.Time
}
type ProxyOptions struct {
Topic string
Servers []ma.Multiaddr
Limit int
LivenessWindow time.Duration
}
// ProxyToRendezvous proxies records discovered using original to rendezvous servers for specified topic.
func ProxyToRendezvous(original Discovery, stop chan struct{}, feed *event.Feed, opts ProxyOptions) error {
var (
identities = map[discv5.NodeID]*Rendezvous{}
lastSeen = map[discv5.NodeID]time.Time{}
closers = map[discv5.NodeID]chan struct{}{}
period = make(chan time.Duration, 1)
found = make(chan *discv5.Node, 10)
lookup = make(chan bool)
total = 0
livenessWatcher = time.NewTicker(opts.LivenessWindow / 10)
wg sync.WaitGroup
)
defer livenessWatcher.Stop()
period <- 1 * time.Second
wg.Add(1)
go func() {
if err := original.Discover(opts.Topic, period, found, lookup); err != nil {
log.Error("discover request failed", "topic", opts.Topic, "error", err)
}
wg.Done()
}()
for {
select {
case <-stop:
close(period)
wg.Wait()
return nil
case <-lookup:
case <-livenessWatcher.C:
for n := range identities {
if _, exist := lastSeen[n]; !exist {
continue
}
// closeRequest is sent every time window after record was seen.
// record must be discovered again during same time window otherwise it will be removed.
if time.Since(lastSeen[n]) >= opts.LivenessWindow {
close(closers[n])
_ = identities[n].Stop()
delete(identities, n)
delete(lastSeen, n)
delete(closers, n)
total--
log.Info("proxy for a record was removed", "identity", n.String(), "total", total)
feed.Send(proxyEvent{n, proxyStop, time.Now()})
}
}
case n := <-found:
_, exist := identities[n.ID]
// skip new record if we reached a limit.
if !exist && total == opts.Limit {
continue
}
lastSeen[n.ID] = time.Now()
if exist {
log.Debug("received an update for existing identity", "identity", n.String())
continue
}
feed.Send(proxyEvent{n.ID, proxyStart, lastSeen[n.ID]})
total++
log.Info("proxying new record", "topic", opts.Topic, "identity", n.String(), "total", total)
record, err := makeProxiedENR(n)
if err != nil {
log.Error("error converting discovered node to ENR", "node", n.String(), "error", err)
}
r := NewRendezvousWithENR(opts.Servers, record)
identities[n.ID] = r
closers[n.ID] = make(chan struct{})
if err := r.Start(); err != nil {
log.Error("unable to start rendezvous proxying", "servers", opts.Servers, "error", err)
}
wg.Add(1)
go func() {
if err := r.Register(opts.Topic, closers[n.ID]); err != nil {
log.Error("register error", "topic", opts.Topic, "error", err)
}
wg.Done()
}()
}
}
}
func makeProxiedENR(n *discv5.Node) (enr.Record, error) {
record := enr.Record{}
record.Set(enr.IP(n.IP))
record.Set(enr.TCP(n.TCP))
record.Set(enr.UDP(n.UDP))
record.Set(Proxied(n.ID))
key, err := crypto.GenerateKey() // we need separate key for each identity, records are stored based on it
if err != nil {
return record, fmt.Errorf("unable to generate private key. error : %v", err)
}
if err := enode.SignV4(&record, key); err != nil {
return record, fmt.Errorf("unable to sign enr record. error: %v", err)
}
return record, nil
}
// Proxied is an Entry for ENR
type Proxied discv5.NodeID
// ENRKey returns unique key that is used by ENR.
func (p Proxied) ENRKey() string {
return "proxied"
}

View File

@ -1,103 +0,0 @@
package discovery
import (
"context"
"fmt"
"sync"
"testing"
"time"
"github.com/ethereum/go-ethereum/event"
ma "github.com/multiformats/go-multiaddr"
"github.com/status-im/rendezvous"
"github.com/stretchr/testify/require"
)
func TestProxyToRendezvous(t *testing.T) {
var (
topic = "test"
id = 101
limited = 102
limit = 1
reg = newRegistry()
original = &fake{id: 110, registry: reg, started: true}
srv = makeTestRendezvousServer(t, "/ip4/127.0.0.1/tcp/7788")
stop = make(chan struct{})
feed = &event.Feed{}
liveness = 100 * time.Millisecond
wg sync.WaitGroup
)
client, err := rendezvous.NewEphemeral()
require.NoError(t, err)
reg.Add(topic, id)
reg.Add(topic, limited)
wg.Add(1)
events := make(chan proxyEvent, 10)
sub := feed.Subscribe(events)
defer sub.Unsubscribe()
go func() {
defer wg.Done()
require.NoError(t, ProxyToRendezvous(original, stop, feed, ProxyOptions{
Topic: topic,
Servers: []ma.Multiaddr{srv.Addr()},
Limit: limit,
LivenessWindow: liveness,
}))
}()
require.NoError(t, Consistently(func() (bool, error) {
records, err := client.Discover(context.TODO(), srv.Addr(), topic, 10)
if err != nil && len(records) < limit {
return true, nil
}
if len(records) > limit {
return false, fmt.Errorf("more records than expected: %d != %d", len(records), limit)
}
var proxied Proxied
if err := records[0].Load(&proxied); err != nil {
return false, err
}
if proxied[0] != byte(id) {
return false, fmt.Errorf("returned %v instead of %v", proxied[0], id)
}
return true, nil
}, time.Second, 100*time.Millisecond))
close(stop)
wg.Wait()
eventSlice := []proxyEvent{}
func() {
for {
select {
case e := <-events:
eventSlice = append(eventSlice, e)
default:
return
}
}
}()
require.Len(t, eventSlice, 2)
require.Equal(t, byte(id), eventSlice[0].ID[0])
require.Equal(t, proxyStart, eventSlice[0].Type)
require.Equal(t, byte(id), eventSlice[1].ID[0])
require.Equal(t, proxyStop, eventSlice[1].Type)
require.True(t, eventSlice[1].Time.Sub(eventSlice[0].Time) > liveness)
}
func Consistently(f func() (bool, error), timeout, period time.Duration) (err error) {
timer := time.After(timeout)
ticker := time.Tick(period)
var cont bool
for {
select {
case <-timer:
return err
case <-ticker:
cont, err = f()
if cont {
continue
}
if err != nil {
return err
}
}
}
}

View File

@ -232,22 +232,17 @@ func (r *Rendezvous) Discover(
func enrToNode(record enr.Record) (*discv5.Node, error) { func enrToNode(record enr.Record) (*discv5.Node, error) {
var ( var (
key enode.Secp256k1 key enode.Secp256k1
ip enr.IP ip enr.IP
tport enr.TCP tport enr.TCP
uport enr.UDP uport enr.UDP
proxied Proxied nodeID discv5.NodeID
nodeID discv5.NodeID
) )
if err := record.Load(&proxied); err == nil { if err := record.Load(&key); err != nil {
nodeID = discv5.NodeID(proxied) return nil, err
} else {
if err := record.Load(&key); err != nil {
return nil, err
}
ecdsaKey := ecdsa.PublicKey(key)
nodeID = discv5.PubkeyID(&ecdsaKey)
} }
ecdsaKey := ecdsa.PublicKey(key)
nodeID = discv5.PubkeyID(&ecdsaKey)
if err := record.Load(&ip); err != nil { if err := record.Load(&ip); err != nil {
return nil, err return nil, err
} }