status-go/services/shhext/mailservers/connmanager_test.go

314 lines
8.3 KiB
Go
Raw Normal View History

Mail peer store and connection manager (#1295) This change implements connection manager that monitors 3 types of events: 1. update of the selected mail servers 2. disconnect from a mail server 3. errors for requesting mail history When selected mail servers provided we will try to connect with as many as possible, and later disconnect the surplus. For example if we want to connect with one mail server and 3 were selected, we try to connect with all (3), and later disconnect with 2. It will to establish connection with live mail server faster. If mail server disconnects we will choose any other mail server from the list of selected. Unless we have only one mail server. In such case we don't have any other choice and we will leave things as is. If request for history was expired we will disconnect such peer and try to find another one. We will follow same rules as described above. We will have two components that will rely on this logic: 1. requesting history If target peer is provided we will use that peer, otherwise we will request history from any selected mail server that is connected at the time of request. 2. confirmation from selected mail server Confirmation from any selected mail server will bee used to send a feedback that envelope was sent. I will add several extensions, but probably in separate PRs: 1. prioritize connection with mail server that was used before reboot 2. disconnect from mail servers if history request wasn't expired but failed. 3. wait some time in RequestsMessage RPC to establish connection with any mail server Currently this feature is hidden, as certain changes will be necessary in status-react. partially implements: https://github.com/status-im/status-go/issues/1285
2018-12-05 13:57:05 +00:00
package mailservers
import (
"fmt"
"sync"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/status-im/status-go/t/utils"
"github.com/status-im/whisper/whisperv6"
"github.com/stretchr/testify/require"
)
type fakePeerEvents struct {
mu sync.Mutex
nodes map[enode.ID]struct{}
input chan *p2p.PeerEvent
}
func (f *fakePeerEvents) Nodes() []enode.ID {
f.mu.Lock()
rst := make([]enode.ID, 0, len(f.nodes))
for n := range f.nodes {
rst = append(rst, n)
}
f.mu.Unlock()
return rst
}
func (f *fakePeerEvents) AddPeer(node *enode.Node) {
f.mu.Lock()
f.nodes[node.ID()] = struct{}{}
f.mu.Unlock()
if f.input == nil {
return
}
f.input <- &p2p.PeerEvent{
Peer: node.ID(),
Type: p2p.PeerEventTypeAdd,
}
}
func (f *fakePeerEvents) RemovePeer(node *enode.Node) {
f.mu.Lock()
delete(f.nodes, node.ID())
f.mu.Unlock()
if f.input == nil {
return
}
f.input <- &p2p.PeerEvent{
Peer: node.ID(),
Type: p2p.PeerEventTypeDrop,
}
}
func newFakePeerAdderRemover() *fakePeerEvents {
return &fakePeerEvents{nodes: map[enode.ID]struct{}{}}
}
func (f *fakePeerEvents) SubscribeEvents(output chan *p2p.PeerEvent) event.Subscription {
return event.NewSubscription(func(quit <-chan struct{}) error {
for {
select {
case <-quit:
return nil
case ev := <-f.input:
// will block the same way as in any feed
output <- ev
}
}
})
}
func newFakeServer() *fakePeerEvents {
srv := newFakePeerAdderRemover()
srv.input = make(chan *p2p.PeerEvent, 20)
return srv
}
type fakeEnvelopeEvents struct {
input chan whisperv6.EnvelopeEvent
}
func (f fakeEnvelopeEvents) SubscribeEnvelopeEvents(output chan<- whisperv6.EnvelopeEvent) event.Subscription {
return event.NewSubscription(func(quit <-chan struct{}) error {
for {
select {
case <-quit:
return nil
case ev := <-f.input:
// will block the same way as in any feed
output <- ev
}
}
})
}
func newFakeEnvelopesEvents() fakeEnvelopeEvents {
return fakeEnvelopeEvents{
input: make(chan whisperv6.EnvelopeEvent),
}
}
func getNRandomNodes(t *testing.T, n int) map[enode.ID]*enode.Node {
rst := map[enode.ID]*enode.Node{}
for i := 0; i < n; i++ {
n, err := RandomeNode()
require.NoError(t, err)
rst[n.ID()] = n
}
return rst
}
func mergeOldIntoNew(old, new map[enode.ID]*enode.Node) {
for n := range old {
new[n] = old[n]
}
}
func TestReplaceNodes(t *testing.T) {
type testCase struct {
description string
old map[enode.ID]*enode.Node
new map[enode.ID]*enode.Node
target int
}
for _, tc := range []testCase{
{
"InitialReplace",
getNRandomNodes(t, 0),
getNRandomNodes(t, 3),
2,
},
{
"FullReplace",
getNRandomNodes(t, 3),
getNRandomNodes(t, 3),
2,
},
} {
t.Run(tc.description, func(t *testing.T) {
peers := newFakePeerAdderRemover()
replaceNodes(peers, tc.target, peers.nodes, nil, tc.old)
require.Len(t, peers.nodes, len(tc.old))
for n := range peers.nodes {
require.Contains(t, tc.old, n)
}
replaceNodes(peers, tc.target, peers.nodes, tc.old, tc.new)
require.Len(t, peers.nodes, len(tc.new))
for n := range peers.nodes {
require.Contains(t, tc.new, n)
}
})
}
}
func TestPartialReplaceNodesBelowTarget(t *testing.T) {
peers := newFakePeerAdderRemover()
old := getNRandomNodes(t, 1)
new := getNRandomNodes(t, 2)
replaceNodes(peers, 2, peers.nodes, nil, old)
mergeOldIntoNew(old, new)
replaceNodes(peers, 2, peers.nodes, old, new)
require.Len(t, peers.nodes, len(new))
}
func TestPartialReplaceNodesAboveTarget(t *testing.T) {
peers := newFakePeerAdderRemover()
old := getNRandomNodes(t, 1)
new := getNRandomNodes(t, 2)
replaceNodes(peers, 1, peers.nodes, nil, old)
mergeOldIntoNew(old, new)
replaceNodes(peers, 1, peers.nodes, old, new)
require.Len(t, peers.nodes, 1)
}
func TestConnectionManagerAddDrop(t *testing.T) {
server := newFakeServer()
whisper := newFakeEnvelopesEvents()
target := 1
connmanager := NewConnectionManager(server, whisper, target)
connmanager.Start()
defer connmanager.Stop()
nodes := []*enode.Node{}
for _, n := range getNRandomNodes(t, 3) {
nodes = append(nodes, n)
}
// Send 3 random nodes to connection manager.
connmanager.Notify(nodes)
var initial enode.ID
// Wait till connection manager establishes connection with 1 peer.
require.NoError(t, utils.Eventually(func() error {
nodes := server.Nodes()
if len(nodes) != target {
return fmt.Errorf("unexpected number of connected servers: %d", len(nodes))
}
initial = nodes[0]
return nil
}, time.Second, 100*time.Millisecond))
// Send an event that peer was dropped.
select {
case server.input <- &p2p.PeerEvent{Peer: initial, Type: p2p.PeerEventTypeDrop}:
case <-time.After(time.Second):
require.FailNow(t, "can't send a drop event")
}
// Connection manager should establish connection with any other peer from initial list.
require.NoError(t, utils.Eventually(func() error {
nodes := server.Nodes()
if len(nodes) != target {
return fmt.Errorf("unexpected number of connected servers: %d", len(nodes))
}
if nodes[0] == initial {
return fmt.Errorf("connected node wasn't changed from %s", initial)
}
return nil
}, time.Second, 100*time.Millisecond))
}
func TestConnectionManagerReplace(t *testing.T) {
server := newFakeServer()
whisper := newFakeEnvelopesEvents()
target := 1
connmanager := NewConnectionManager(server, whisper, target)
connmanager.Start()
defer connmanager.Stop()
nodes := []*enode.Node{}
for _, n := range getNRandomNodes(t, 3) {
nodes = append(nodes, n)
}
// Send a single node to connection manager.
connmanager.Notify(nodes[:1])
// Wait until this node will get connected.
require.NoError(t, utils.Eventually(func() error {
connected := server.Nodes()
if len(connected) != target {
return fmt.Errorf("unexpected number of connected servers: %d", len(connected))
}
if nodes[0].ID() != connected[0] {
return fmt.Errorf("connected with a wrong peer. expected %s, got %s", nodes[0].ID(), connected[0])
}
return nil
}, time.Second, 100*time.Millisecond))
// Replace previously sent node with 2 different nodes.
connmanager.Notify(nodes[1:])
// Wait until connection manager replaces node connected in the first round.
require.NoError(t, utils.Eventually(func() error {
connected := server.Nodes()
if len(connected) != target {
return fmt.Errorf("unexpected number of connected servers: %d", len(connected))
}
switch connected[0] {
case nodes[1].ID():
case nodes[2].ID():
default:
return fmt.Errorf("connected with unexpected peer. got %s, expected %+v", connected[0], nodes[1:])
}
return nil
}, time.Second, 100*time.Millisecond))
}
func TestConnectionChangedAfterExpiry(t *testing.T) {
server := newFakeServer()
whisper := newFakeEnvelopesEvents()
target := 1
connmanager := NewConnectionManager(server, whisper, target)
connmanager.Start()
defer connmanager.Stop()
nodes := []*enode.Node{}
for _, n := range getNRandomNodes(t, 2) {
nodes = append(nodes, n)
}
// Send two random nodes to connection manager.
connmanager.Notify(nodes)
var initial enode.ID
// Wait until connection manager establishes connection with one node.
require.NoError(t, utils.Eventually(func() error {
nodes := server.Nodes()
if len(nodes) != target {
return fmt.Errorf("unexpected number of connected servers: %d", len(nodes))
}
initial = nodes[0]
return nil
}, time.Second, 100*time.Millisecond))
hash := common.Hash{1}
// Send event that history request for connected peer was sent.
select {
case whisper.input <- whisperv6.EnvelopeEvent{
Event: whisperv6.EventMailServerRequestSent, Peer: initial, Hash: hash}:
case <-time.After(time.Second):
require.FailNow(t, "can't send a 'sent' event")
}
// And eventually expired.
select {
case whisper.input <- whisperv6.EnvelopeEvent{
Event: whisperv6.EventMailServerRequestExpired, Peer: initial, Hash: hash}:
case <-time.After(time.Second):
require.FailNow(t, "can't send an 'expiry' event")
}
require.NoError(t, utils.Eventually(func() error {
nodes := server.Nodes()
if len(nodes) != target {
return fmt.Errorf("unexpected number of connected servers: %d", len(nodes))
}
if nodes[0] == initial {
return fmt.Errorf("connected node wasn't changed from %s", initial)
}
return nil
}, time.Second, 100*time.Millisecond))
}