Move concurrent backend test from e2e to unit (#849)

This commit is contained in:
Adam Babik 2018-04-20 17:39:53 +02:00 committed by GitHub
parent 2f2dfe16c0
commit 07b2c3d750
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 344 additions and 209 deletions

View File

@ -7,6 +7,7 @@ import (
"io/ioutil"
"os"
"path/filepath"
"sync"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/keystore"
@ -31,7 +32,9 @@ type GethServiceProvider interface {
// Manager represents account manager interface.
type Manager struct {
geth GethServiceProvider
geth GethServiceProvider
mu sync.RWMutex
selectedAccount *SelectedExtKey // account that was processed during the last call to SelectAccount()
}
@ -73,6 +76,9 @@ func (m *Manager) CreateAccount(password string) (address, pubKey, mnemonic stri
// CKD#2 is used as root for master accounts (when parentAddress is "").
// Otherwise (when parentAddress != ""), child is derived directly from parent.
func (m *Manager) CreateChildAccount(parentAddress, password string) (address, pubKey string, err error) {
m.mu.Lock()
defer m.mu.Unlock()
keyStore, err := m.geth.AccountKeyStore()
if err != nil {
return "", "", err
@ -206,6 +212,9 @@ func (m *Manager) VerifyAccountPassword(keyStoreDir, address, password string) (
// SelectAccount selects current account, by verifying that address has corresponding account which can be decrypted
// using provided password. Once verification is done, all previous identities are removed).
func (m *Manager) SelectAccount(address, password string) error {
m.mu.Lock()
defer m.mu.Unlock()
keyStore, err := m.geth.AccountKeyStore()
if err != nil {
return err
@ -237,6 +246,9 @@ func (m *Manager) SelectAccount(address, password string) error {
// SelectedAccount returns currently selected account
func (m *Manager) SelectedAccount() (*SelectedExtKey, error) {
m.mu.RLock()
defer m.mu.RUnlock()
if m.selectedAccount == nil {
return nil, ErrNoAccountSelected
}
@ -244,10 +256,11 @@ func (m *Manager) SelectedAccount() (*SelectedExtKey, error) {
}
// Logout clears selectedAccount.
func (m *Manager) Logout() error {
m.selectedAccount = nil
func (m *Manager) Logout() {
m.mu.Lock()
defer m.mu.Unlock()
return nil
m.selectedAccount = nil
}
// importExtendedKey processes incoming extended key, extracts required info and creates corresponding account key.
@ -278,6 +291,9 @@ func (m *Manager) importExtendedKey(extKey *extkeys.ExtendedKey, password string
// Accounts returns list of addresses for selected account, including
// subaccounts.
func (m *Manager) Accounts() ([]gethcommon.Address, error) {
m.mu.RLock()
defer m.mu.RUnlock()
am, err := m.geth.AccountManager()
if err != nil {
return nil, err

View File

@ -328,8 +328,7 @@ func (s *ManagerTestSuite) TestCreateChildAccount() {
}
func (s *ManagerTestSuite) TestLogout() {
err := s.accManager.Logout()
s.Nil(err)
s.accManager.Logout()
s.Nil(s.accManager.selectedAccount)
}

View File

@ -110,7 +110,19 @@ func (b *StatusBackend) IsNodeRunning() bool {
func (b *StatusBackend) StartNode(config *params.NodeConfig) error {
b.mu.Lock()
defer b.mu.Unlock()
return b.startNode(config)
if err := b.startNode(config); err != nil {
signal.Send(signal.Envelope{
Type: signal.EventNodeCrashed,
Event: signal.NodeCrashEvent{
Error: err,
},
})
return err
}
return nil
}
func (b *StatusBackend) startNode(config *params.NodeConfig) (err error) {
@ -120,29 +132,29 @@ func (b *StatusBackend) startNode(config *params.NodeConfig) (err error) {
}
}()
err = b.statusNode.Start(config)
if err != nil {
signal.Send(signal.Envelope{
Type: signal.EventNodeCrashed,
Event: signal.NodeCrashEvent{
Error: err,
},
})
return err
if err = b.statusNode.Start(config); err != nil {
return
}
signal.Send(signal.Envelope{Type: signal.EventNodeStarted})
b.transactor.SetNetworkID(config.NetworkID)
b.transactor.SetRPC(b.statusNode.RPCClient(), rpc.DefaultCallTimeout)
b.personalAPI.SetRPC(b.statusNode.RPCPrivateClient(), rpc.DefaultCallTimeout)
if err := b.registerHandlers(); err != nil {
if err = b.registerHandlers(); err != nil {
b.log.Error("Handler registration failed", "err", err)
return
}
if err := b.ReSelectAccount(); err != nil {
b.log.Info("Handlers registered")
if err = b.ReSelectAccount(); err != nil {
b.log.Error("Reselect account failed", "err", err)
return
}
b.log.Info("Account reselected")
signal.Send(signal.Envelope{Type: signal.EventNodeReady})
return nil
}
@ -164,9 +176,13 @@ func (b *StatusBackend) stopNode() error {
// RestartNode restart running Status node, fails if node is not running
func (b *StatusBackend) RestartNode() error {
b.mu.Lock()
defer b.mu.Unlock()
if !b.IsNodeRunning() {
return node.ErrNoRunningNode
}
newcfg := *(b.statusNode.Config())
if err := b.stopNode(); err != nil {
return err
@ -290,6 +306,9 @@ func (b *StatusBackend) registerHandlers() error {
// ConnectionChange handles network state changes logic.
func (b *StatusBackend) ConnectionChange(state ConnectionState) {
b.mu.Lock()
defer b.mu.Unlock()
b.log.Info("Network state change", "old", b.connectionState, "new", state)
b.connectionState = state
@ -308,15 +327,19 @@ func (b *StatusBackend) AppStateChange(state AppState) {
// Logout clears whisper identities.
func (b *StatusBackend) Logout() error {
whisperService, err := b.statusNode.WhisperService()
if err != nil {
switch err {
case node.ErrServiceUnknown: // Whisper was never registered
case nil:
if err := whisperService.DeleteKeyPairs(); err != nil {
return fmt.Errorf("%s: %v", ErrWhisperClearIdentitiesFailure, err)
}
default:
return err
}
err = whisperService.DeleteKeyPairs()
if err != nil {
return fmt.Errorf("%s: %v", ErrWhisperClearIdentitiesFailure, err)
}
return b.AccountManager().Logout()
b.AccountManager().Logout()
return nil
}
// ReSelectAccount selects previously selected account, often, after node restart.
@ -326,13 +349,16 @@ func (b *StatusBackend) ReSelectAccount() error {
return nil
}
whisperService, err := b.statusNode.WhisperService()
if err != nil {
switch err {
case node.ErrServiceUnknown: // Whisper was never registered
case nil:
if err := whisperService.SelectKeyPair(selectedAccount.AccountKey.PrivateKey); err != nil {
return ErrWhisperIdentityInjectionFailure
}
default:
return err
}
if err := whisperService.SelectKeyPair(selectedAccount.AccountKey.PrivateKey); err != nil {
return ErrWhisperIdentityInjectionFailure
}
return nil
}
@ -350,14 +376,15 @@ func (b *StatusBackend) SelectAccount(address, password string) error {
}
whisperService, err := b.statusNode.WhisperService()
if err != nil {
switch err {
case node.ErrServiceUnknown: // Whisper was never registered
case nil:
if err := whisperService.SelectKeyPair(acc.AccountKey.PrivateKey); err != nil {
return ErrWhisperIdentityInjectionFailure
}
default:
return err
}
err = whisperService.SelectKeyPair(acc.AccountKey.PrivateKey)
if err != nil {
return ErrWhisperIdentityInjectionFailure
}
return nil
}

250
geth/api/backend_test.go Normal file
View File

@ -0,0 +1,250 @@
package api
import (
"fmt"
"math/rand"
"sync"
"testing"
"github.com/status-im/status-go/geth/node"
"github.com/status-im/status-go/geth/params"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestBackendStartNodeConcurrently(t *testing.T) {
backend := NewStatusBackend()
config := params.NodeConfig{}
count := 2
resultCh := make(chan error)
var wg sync.WaitGroup
wg.Add(count)
for i := 0; i < count; i++ {
go func() {
resultCh <- backend.StartNode(&config)
wg.Done()
}()
}
// close channel as otherwise for loop never finishes
go func() { wg.Wait(); close(resultCh) }()
var results []error
for err := range resultCh {
results = append(results, err)
}
require.Contains(t, results, nil)
require.Contains(t, results, node.ErrNodeRunning)
err := backend.StopNode()
require.NoError(t, err)
}
func TestBackendRestartNodeConcurrently(t *testing.T) {
backend := NewStatusBackend()
config := params.NodeConfig{}
count := 3
err := backend.StartNode(&config)
require.NoError(t, err)
defer func() {
require.NoError(t, backend.StopNode())
}()
var wg sync.WaitGroup
wg.Add(count)
for i := 0; i < count; i++ {
go func(idx int) {
assert.NoError(t, backend.RestartNode())
wg.Done()
}(i)
}
wg.Wait()
}
// TODO(adam): add concurrent tests for ResetChainData()
func TestBackendGettersConcurrently(t *testing.T) {
backend := NewStatusBackend()
config := params.NodeConfig{}
err := backend.StartNode(&config)
require.NoError(t, err)
defer func() {
require.NoError(t, backend.StopNode())
}()
var wg sync.WaitGroup
wg.Add(1)
go func() {
assert.NotNil(t, backend.StatusNode())
wg.Done()
}()
wg.Add(1)
go func() {
assert.NotNil(t, backend.AccountManager())
wg.Done()
}()
wg.Add(1)
go func() {
assert.NotNil(t, backend.JailManager())
wg.Done()
}()
wg.Add(1)
go func() {
assert.NotNil(t, backend.PersonalAPI())
wg.Done()
}()
wg.Add(1)
go func() {
assert.NotNil(t, backend.Transactor())
wg.Done()
}()
wg.Add(1)
go func() {
assert.NotNil(t, backend.PendingSignRequests())
wg.Done()
}()
wg.Add(1)
go func() {
assert.True(t, backend.IsNodeRunning())
wg.Done()
}()
wg.Add(1)
go func() {
assert.True(t, backend.IsNodeRunning())
wg.Done()
}()
wg.Wait()
}
func TestBackendAccountsConcurrently(t *testing.T) {
backend := NewStatusBackend()
config := params.NodeConfig{}
err := backend.StartNode(&config)
require.NoError(t, err)
defer func() {
require.NoError(t, backend.StopNode())
}()
var wgCreateAccounts sync.WaitGroup
count := 3
addressCh := make(chan [2]string, count) // use buffered channel to avoid blocking
// create new accounts concurrently
for i := 0; i < count; i++ {
wgCreateAccounts.Add(1)
go func(pass string) {
address, _, _, err := backend.AccountManager().CreateAccount(pass)
assert.NoError(t, err)
addressCh <- [...]string{address, pass}
wgCreateAccounts.Done()
}("password-00" + string(i))
}
// close addressCh as otherwise for loop never finishes
go func() { wgCreateAccounts.Wait(); close(addressCh) }()
// select, reselect or logout concurrently
var wg sync.WaitGroup
for tuple := range addressCh {
wg.Add(1)
go func(tuple [2]string) {
assert.NoError(t, backend.SelectAccount(tuple[0], tuple[1]))
wg.Done()
}(tuple)
wg.Add(1)
go func() {
assert.NoError(t, backend.ReSelectAccount())
wg.Done()
}()
wg.Add(1)
go func() {
assert.NoError(t, backend.Logout())
wg.Done()
}()
}
wg.Wait()
}
func TestBackendConnectionChangesConcurrently(t *testing.T) {
connections := []ConnectionType{ConnectionUnknown, ConnectionCellular, ConnectionWifi}
backend := NewStatusBackend()
count := 3
var wg sync.WaitGroup
for i := 0; i < count; i++ {
wg.Add(1)
go func() {
connIdx := rand.Intn(len(connections))
backend.ConnectionChange(ConnectionState{
Offline: false,
Type: connections[connIdx],
Expensive: false,
})
wg.Done()
}()
}
wg.Wait()
}
func TestBackendCallRPCConcurrently(t *testing.T) {
backend := NewStatusBackend()
config := params.NodeConfig{}
count := 3
err := backend.StartNode(&config)
require.NoError(t, err)
defer func() {
require.NoError(t, backend.StopNode())
}()
var wg sync.WaitGroup
for i := 0; i < count; i++ {
wg.Add(1)
go func(idx int) {
result := backend.CallRPC(fmt.Sprintf(
`{"jsonrpc":"2.0","method":"web3_clientVersion","params":[],"id":%d}`,
idx+1,
))
assert.NotContains(t, result, "error")
wg.Done()
}(i)
wg.Add(1)
go func(idx int) {
result := backend.CallPrivateRPC(fmt.Sprintf(
`{"jsonrpc":"2.0","method":"web3_clientVersion","params":[],"id":%d}`,
idx+1,
))
assert.NotContains(t, result, "error")
wg.Done()
}(i)
}
wg.Wait()
}
// TODO(adam): add concurrent tests for: SendTransaction, ApproveSignRequest, DiscardSignRequest

View File

@ -33,6 +33,7 @@ var (
ErrNoGethNode = errors.New("geth node is not available")
ErrNoRunningNode = errors.New("there is no running node")
ErrAccountKeyStoreMissing = errors.New("account key store is not set")
ErrServiceUnknown = errors.New("service unknown")
)
// StatusNode abstracts contained geth node and provides helper methods to
@ -348,7 +349,7 @@ func (n *StatusNode) gethService(serviceInstance interface{}) error {
}
if err := n.gethNode.Service(serviceInstance); err != nil {
return fmt.Errorf("service unavailable: %v", err)
return err
}
return nil
@ -356,12 +357,22 @@ func (n *StatusNode) gethService(serviceInstance interface{}) error {
// LightEthereumService exposes reference to LES service running on top of the node
func (n *StatusNode) LightEthereumService() (l *les.LightEthereum, err error) {
return l, n.gethService(&l)
err = n.gethService(&l)
if err == node.ErrServiceUnknown {
err = ErrServiceUnknown
}
return
}
// WhisperService exposes reference to Whisper service running on top of the node
func (n *StatusNode) WhisperService() (w *whisper.Whisper, err error) {
return w, n.gethService(&w)
err = n.gethService(&w)
if err == node.ErrServiceUnknown {
err = ErrServiceUnknown
}
return
}
// AccountManager exposes reference to node's accounts manager

View File

@ -244,7 +244,7 @@ func (s *AccountsTestSuite) TestSelectedAccountOnRestart() {
defer s.StopTestBackend()
// now logout, and make sure that on restart no account is selected (i.e. logout works properly)
s.NoError(s.Backend.AccountManager().Logout())
s.NoError(s.Backend.Logout())
s.RestartTestNode()
selectedAccount, err = s.Backend.AccountManager().SelectedAccount()

View File

@ -2,15 +2,9 @@ package api_test
import (
"io/ioutil"
"math/rand"
"os"
"testing"
"time"
"github.com/ethereum/go-ethereum/log"
"github.com/status-im/status-go/geth/account"
"github.com/status-im/status-go/geth/jail"
"github.com/status-im/status-go/geth/node"
"github.com/status-im/status-go/geth/params"
"github.com/status-im/status-go/t/e2e"
. "github.com/status-im/status-go/t/utils"
@ -25,168 +19,6 @@ type APIBackendTestSuite struct {
e2e.BackendTestSuite
}
// FIXME(tiabc): There's also a test with the same name in geth/node/manager_test.go
// so this test should only check StatusBackend logic with a mocked version of the underlying StatusNode.
func (s *APIBackendTestSuite) TestRaceConditions() {
require := s.Require()
require.NotNil(s.Backend)
cnt := 25
progress := make(chan struct{}, cnt)
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
nodeConfig1, err := MakeTestNodeConfig(GetNetworkID())
require.NoError(err)
nodeConfig2, err := MakeTestNodeConfig(GetNetworkID())
require.NoError(err)
nodeConfigs := []*params.NodeConfig{nodeConfig1, nodeConfig2}
var funcsToTest = []func(*params.NodeConfig){
func(config *params.NodeConfig) {
log.Info("StartNode()")
err := s.Backend.StartNode(config)
s.T().Logf("StartNode() for network: %d, error: %v", config.NetworkID, err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("StopNode()")
err := s.Backend.StopNode()
s.T().Logf("StopNode() for network: %d, error: %v", config.NetworkID, err)
progress <- struct{}{}
},
// func(config *params.NodeConfig) {
// log.Info("ResetChainData()")
// _, err := s.Backend.ResetChainData()
// s.T().Logf("ResetChainData(), error: %v", err)
// progress <- struct{}{}
// },
func(config *params.NodeConfig) {
log.Info("RestartNode()")
err := s.Backend.RestartNode()
s.T().Logf("RestartNode(), error: %v", err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("StatusNode()")
instance := s.Backend.StatusNode()
s.NotNil(instance)
s.IsType(&node.StatusNode{}, instance)
s.T().Logf("StatusNode(), result: %v", instance)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("AccountManager()")
instance := s.Backend.AccountManager()
s.NotNil(instance)
s.IsType(&account.Manager{}, instance)
s.T().Logf("Manager(), result: %v", instance)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("JailManager()")
instance := s.Backend.JailManager()
s.NotNil(instance)
s.IsType(&jail.Jail{}, instance)
s.T().Logf("JailManager(), result: %v", instance)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("CreateAccount()")
address, pubKey, mnemonic, err := s.Backend.AccountManager().CreateAccount("password")
s.T().Logf("CreateAccount(), error: %v (address: %v, pubKey: %v, mnemonic: %v)", err, address, pubKey, mnemonic)
if err != nil {
progress <- struct{}{}
return
}
// SelectAccount
log.Info("CreateAccount()")
err = s.Backend.SelectAccount(address, "password")
s.T().Logf("SelectAccount(%v, %v), error: %v", address, "password", err)
// CreateChildAccount
log.Info("CreateChildAccount()")
address, pubKey, err = s.Backend.AccountManager().CreateChildAccount(address, "password")
s.T().Logf("CreateAccount(), error: %v (address: %v, pubKey: %v)", err, address, pubKey)
// RecoverAccount
log.Info("RecoverAccount()")
address, pubKey, err = s.Backend.AccountManager().RecoverAccount("password", mnemonic)
s.T().Logf("RecoverAccount(), error: %v (address: %v, pubKey: %v)", err, address, pubKey)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("VerifyAccountPassword()")
_, err := s.Backend.AccountManager().VerifyAccountPassword(config.KeyStoreDir, "0x0", "bar")
s.T().Logf("VerifyAccountPassword(), err: %v", err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("Logout()")
s.T().Logf("Logout(), result: %v", s.Backend.AccountManager().Logout())
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("IsNodeRunning()")
s.T().Logf("IsNodeRunning(), result: %v", s.Backend.IsNodeRunning())
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("ApproveSignRequest()")
result := s.Backend.ApproveSignRequest("id", "password")
s.T().Logf("ApproveSignRequest(), error: %v", result.Error)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("DiscardSignRequest()")
s.T().Logf("DiscardSignRequest(), error: %v", s.Backend.DiscardSignRequest("id"))
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("ApproveSignRequests()")
ids := []string{"id1", "id2"}
s.T().Logf("ApproveSignRequests(), result: %v", s.Backend.ApproveSignRequests(ids, "password"))
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("DiscardSignRequests()")
ids := []string{"id1", "id2"}
s.T().Logf("DiscardSignRequests(), result: %v", s.Backend.DiscardSignRequests(ids))
progress <- struct{}{}
},
}
// increase StartNode()/StopNode() population
for i := 0; i < 5; i++ {
funcsToTest = append(funcsToTest, funcsToTest[0], funcsToTest[1])
}
for i := 0; i < cnt; i++ {
randConfig := nodeConfigs[rnd.Intn(len(nodeConfigs))]
randFunc := funcsToTest[rnd.Intn(len(funcsToTest))]
if rnd.Intn(100) > 75 { // introduce random delays
time.Sleep(500 * time.Millisecond)
}
go randFunc(randConfig)
}
for range progress {
cnt--
if cnt <= 0 {
break
}
}
time.Sleep(2 * time.Second) // so that we see some logs
if err := s.Backend.StopNode(); err != node.ErrNoRunningNode && err != nil {
s.NoError(err, "unexpected error")
}
}
// FIXME(tiabc): There's also a test with the same name in geth/node/manager_test.go
// so this test should only check StatusBackend logic with a mocked version of the underlying StatusNode.
func (s *APIBackendTestSuite) TestNetworkSwitching() {

View File

@ -218,8 +218,6 @@ func (s *TransactionsTestSuite) TestSendContractTxCollision() {
}
s.testSendContractTx(initFunc, nil, "")
s.NoError(s.Backend.AccountManager().Logout())
// Scenario 2: Both fields are filled with different values, expect an error
inverted := func(source []byte) []byte {
inverse := make([]byte, len(source))
@ -349,6 +347,8 @@ func (s *TransactionsTestSuite) testSendContractTx(setInputAndDataValue initFunc
s.Equal(txHashCheck.Bytes(), signRequestResult, "transaction hash returned from SendTransaction is invalid")
s.False(reflect.DeepEqual(txHashCheck, gethcommon.Hash{}), "transaction was never queued or completed")
s.Zero(s.PendingSignRequests().Count(), "tx queue must be empty at this point")
s.NoError(s.Backend.Logout())
}
func (s *TransactionsTestSuite) TestSendEther() {

View File

@ -195,7 +195,7 @@ func (s *WhisperTestSuite) TestSelectedAccountOnRestart() {
s.False(whisperService.HasKeyPair(pubKey1), "identity should not be present, but it is still present in whisper")
// now logout, and make sure that on restart no account is selected (i.e. logout works properly)
s.NoError(s.Backend.AccountManager().Logout())
s.NoError(s.Backend.Logout())
s.RestartTestNode()
whisperService = s.WhisperService()
s.False(whisperService.HasKeyPair(pubKey2), "identity not injected into whisper")