node manager: make API async, fully cover with tests

- API is async
- Node manager, backend and API modules have more that 90% coverage
- For each level (node manager, backend, public API) random testing
  was used to ensure that we don't have race conditions
This commit is contained in:
Victor Farazdagi 2017-05-25 16:14:52 +03:00
parent 35eb15fed7
commit 0e2839b705
16 changed files with 880 additions and 424 deletions

View File

@ -32,13 +32,20 @@ func StartNode(configJSON *C.char) *C.char {
return makeJSONResponse(err)
}
err = statusAPI.StartNodeNonBlocking(config)
_, err = statusAPI.StartNodeAsync(config)
return makeJSONResponse(err)
}
//export StopNode
func StopNode() *C.char {
return makeJSONResponse(statusAPI.StopNode())
_, err := statusAPI.StopNodeAsync()
return makeJSONResponse(err)
}
//export ResetChainData
func ResetChainData() *C.char {
_, err := statusAPI.ResetChainDataAsync()
return makeJSONResponse(err)
}
//export ResumeNode
@ -47,11 +54,6 @@ func ResumeNode() *C.char {
return makeJSONResponse(err)
}
//export ResetChainData
func ResetChainData() *C.char {
return makeJSONResponse(statusAPI.ResetChainData())
}
//export StopNodeRPCServer
func StopNodeRPCServer() *C.char {
err := fmt.Errorf("%v: %v", common.ErrDeprecatedMethod.Error(), "StopNodeRPCServer")

View File

@ -335,7 +335,6 @@ func testStopResumeNode(t *testing.T) bool {
response := common.APIResponse{}
rawResponse = StartNode(C.CString(nodeConfigJSON))
if err = json.Unmarshal([]byte(C.GoString(rawResponse)), &response); err != nil {
t.Errorf("cannot decode StartNode response (%s): %v", C.GoString(rawResponse), err)
return false

View File

@ -1,8 +1,6 @@
package api
import (
"sync"
"github.com/ethereum/go-ethereum/accounts/keystore"
gethcommon "github.com/ethereum/go-ethereum/common"
"github.com/status-im/status-go/geth/common"
@ -11,7 +9,6 @@ import (
// StatusAPI provides API to access Status related functionality.
type StatusAPI struct {
sync.Mutex
b *StatusBackend
}
@ -24,70 +21,53 @@ func NewStatusAPI() *StatusAPI {
// NodeManager returns reference to node manager
func (api *StatusAPI) NodeManager() common.NodeManager {
api.Lock()
defer api.Unlock()
return api.b.NodeManager()
}
// AccountManager returns reference to account manager
func (api *StatusAPI) AccountManager() common.AccountManager {
api.Lock()
defer api.Unlock()
return api.b.AccountManager()
}
// JailManager returns reference to jail
func (api *StatusAPI) JailManager() common.JailManager {
api.Lock()
defer api.Unlock()
return api.b.JailManager()
}
// StartNode start Status node, fails if node is already started
func (api *StatusAPI) StartNode(config *params.NodeConfig) error {
api.Lock()
defer api.Unlock()
nodeStarted, err := api.b.StartNode(config)
if err != nil {
return err
}
<-nodeStarted // do not return up until backend is ready
<-nodeStarted
return nil
}
// StartNodeNonBlocking start Status node, fails if node is already started
// Returns immediately w/o waiting for node to start (relies on listening
// for node.started signal)
func (api *StatusAPI) StartNodeNonBlocking(config *params.NodeConfig) error {
api.Lock()
defer api.Unlock()
_, err := api.b.StartNode(config)
if err != nil {
return err
}
return nil
// StartNodeAsync start Status node, fails if node is already started
// Returns immediately w/o waiting for node to start (see node.ready)
func (api *StatusAPI) StartNodeAsync(config *params.NodeConfig) (<-chan struct{}, error) {
return api.b.StartNode(config)
}
// StopNode stop Status node. Stopped node cannot be resumed.
func (api *StatusAPI) StopNode() error {
api.Lock()
defer api.Unlock()
nodeStopped, err := api.b.StopNode()
if err != nil {
return err
}
<-nodeStopped
return nil
}
// StopNodeAsync stop Status node. Stopped node cannot be resumed.
// Returns immediately, w/o waiting for node to stop (see node.stopped)
func (api *StatusAPI) StopNodeAsync() (<-chan struct{}, error) {
return api.b.StopNode()
}
// RestartNode restart running Status node, fails if node is not running
func (api *StatusAPI) RestartNode() error {
api.Lock()
defer api.Unlock()
nodeStarted, err := api.b.RestartNode()
if err != nil {
return err
@ -96,12 +76,14 @@ func (api *StatusAPI) RestartNode() error {
return nil
}
// RestartNodeAsync restart running Status node, in async manner
func (api *StatusAPI) RestartNodeAsync() (<-chan struct{}, error) {
return api.b.RestartNode()
}
// ResetChainData remove chain data from data directory.
// Node is stopped, and new node is started, with clean data directory.
func (api *StatusAPI) ResetChainData() error {
api.Lock()
defer api.Unlock()
nodeStarted, err := api.b.ResetChainData()
if err != nil {
return err
@ -110,20 +92,9 @@ func (api *StatusAPI) ResetChainData() error {
return nil
}
// PopulateStaticPeers connects current node with our publicly available LES/SHH/Swarm cluster
func (api *StatusAPI) PopulateStaticPeers() error {
api.Lock()
defer api.Unlock()
return api.b.nodeManager.PopulateStaticPeers()
}
// AddPeer adds new static peer node
func (api *StatusAPI) AddPeer(url string) error {
api.Lock()
defer api.Unlock()
return api.b.nodeManager.AddPeer(url)
// ResetChainDataAsync remove chain data from data directory, in async manner
func (api *StatusAPI) ResetChainDataAsync() (<-chan struct{}, error) {
return api.b.ResetChainData()
}
// CreateAccount creates an internal geth account
@ -131,113 +102,73 @@ func (api *StatusAPI) AddPeer(url string) error {
// Public key of CKD#1 is returned, with CKD#2 securely encoded into account key file (to be used for
// sub-account derivations)
func (api *StatusAPI) CreateAccount(password string) (address, pubKey, mnemonic string, err error) {
api.Lock()
defer api.Unlock()
return api.b.CreateAccount(password)
return api.b.AccountManager().CreateAccount(password)
}
// CreateChildAccount creates sub-account for an account identified by parent address.
// CKD#2 is used as root for master accounts (when parentAddress is "").
// Otherwise (when parentAddress != ""), child is derived directly from parent.
func (api *StatusAPI) CreateChildAccount(parentAddress, password string) (address, pubKey string, err error) {
api.Lock()
defer api.Unlock()
return api.b.CreateChildAccount(parentAddress, password)
return api.b.AccountManager().CreateChildAccount(parentAddress, password)
}
// RecoverAccount re-creates master key using given details.
// Once master key is re-generated, it is inserted into keystore (if not already there).
func (api *StatusAPI) RecoverAccount(password, mnemonic string) (address, pubKey string, err error) {
api.Lock()
defer api.Unlock()
return api.b.RecoverAccount(password, mnemonic)
return api.b.AccountManager().RecoverAccount(password, mnemonic)
}
// VerifyAccountPassword tries to decrypt a given account key file, with a provided password.
// If no error is returned, then account is considered verified.
func (api *StatusAPI) VerifyAccountPassword(keyStoreDir, address, password string) (*keystore.Key, error) {
api.Lock()
defer api.Unlock()
return api.b.VerifyAccountPassword(keyStoreDir, address, password)
return api.b.AccountManager().VerifyAccountPassword(keyStoreDir, address, password)
}
// SelectAccount selects current account, by verifying that address has corresponding account which can be decrypted
// using provided password. Once verification is done, decrypted key is injected into Whisper (as a single identity,
// all previous identities are removed).
func (api *StatusAPI) SelectAccount(address, password string) error {
api.Lock()
defer api.Unlock()
return api.b.SelectAccount(address, password)
return api.b.AccountManager().SelectAccount(address, password)
}
// Logout clears whisper identities
func (api *StatusAPI) Logout() error {
api.Lock()
defer api.Unlock()
return api.b.Logout()
return api.b.AccountManager().Logout()
}
// CompleteTransaction instructs backend to complete sending of a given transaction
func (api *StatusAPI) CompleteTransaction(id, password string) (gethcommon.Hash, error) {
api.Lock()
defer api.Unlock()
return api.b.CompleteTransaction(id, password)
}
// CompleteTransactions instructs backend to complete sending of multiple transactions
func (api *StatusAPI) CompleteTransactions(ids, password string) map[string]common.RawCompleteTransactionResult {
api.Lock()
defer api.Unlock()
return api.b.CompleteTransactions(ids, password)
}
// DiscardTransaction discards a given transaction from transaction queue
func (api *StatusAPI) DiscardTransaction(id string) error {
api.Lock()
defer api.Unlock()
return api.b.DiscardTransaction(id)
}
// DiscardTransactions discards given multiple transactions from transaction queue
func (api *StatusAPI) DiscardTransactions(ids string) map[string]common.RawDiscardTransactionResult {
api.Lock()
defer api.Unlock()
return api.b.DiscardTransactions(ids)
}
// Parse creates a new jail cell context, with the given chatID as identifier.
// New context executes provided JavaScript code, right after the initialization.
func (api *StatusAPI) JailParse(chatID string, js string) string {
api.Lock()
defer api.Unlock()
return api.b.jailManager.Parse(chatID, js)
}
// Call executes given JavaScript function w/i a jail cell context identified by the chatID.
// Jail cell is clonned before call is executed i.e. all calls execute w/i their own contexts.
func (api *StatusAPI) JailCall(chatID string, path string, args string) string {
api.Lock()
defer api.Unlock()
return api.b.jailManager.Call(chatID, path, args)
}
// BaseJS allows to setup initial JavaScript to be loaded on each jail.Parse()
func (api *StatusAPI) JailBaseJS(js string) {
api.Lock()
defer api.Unlock()
api.b.jailManager.BaseJS(js)
}

View File

@ -1,10 +1,11 @@
package api_test
import (
"fmt"
"math/rand"
"testing"
"time"
"github.com/ethereum/go-ethereum/log"
"github.com/status-im/status-go/geth/api"
"github.com/status-im/status-go/geth/params"
. "github.com/status-im/status-go/geth/testing"
@ -27,40 +28,71 @@ func (s *APITestSuite) SetupTest() {
require.IsType(&api.StatusAPI{}, statusAPI)
s.api = statusAPI
}
func (s *APITestSuite) TestStartStopRaces() {
func (s *APITestSuite) TestRaceConditions() {
require := s.Require()
require.NotNil(s.api)
nodeConfig, err := MakeTestNodeConfig(params.RinkebyNetworkID)
cnt := 25
progress := make(chan struct{}, cnt)
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
nodeConfig1, err := MakeTestNodeConfig(params.RopstenNetworkID)
require.NoError(err)
progress := make(chan struct{}, 100)
nodeConfig2, err := MakeTestNodeConfig(params.RinkebyNetworkID)
require.NoError(err)
start := func() {
fmt.Println("start node")
s.api.StartNode(nodeConfig)
progress <- struct{}{}
}
stop := func() {
fmt.Println("stop node")
s.api.StopNode()
progress <- struct{}{}
nodeConfigs := []*params.NodeConfig{nodeConfig1, nodeConfig2}
var funcsToTest = []func(*params.NodeConfig){
func(config *params.NodeConfig) {
log.Info("StartNodeAsync()")
_, err := s.api.StartNodeAsync(config)
s.T().Logf("StartNodeAsync() for network: %d, error: %v", config.NetworkID, err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("StopNodeAsync()")
_, err := s.api.StopNodeAsync()
s.T().Logf("StopNodeAsync(), error: %v", err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("RestartNodeAsync()")
_, err := s.api.RestartNodeAsync()
s.T().Logf("RestartNodeAsync(), error: %v", err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("ResetChainDataAsync()")
_, err := s.api.ResetChainDataAsync()
s.T().Logf("ResetChainDataAsync(), error: %v", err)
progress <- struct{}{}
},
}
// start one node and sync it a bit
start()
time.Sleep(5 * time.Second)
for i := 0; i < 20; i++ {
go start()
go stop()
// increase StartNode()/StopNode() population
for i := 0; i < 5; i++ {
funcsToTest = append(funcsToTest, funcsToTest[0], funcsToTest[1])
}
for i := 0; i < cnt; i++ {
randConfig := nodeConfigs[rnd.Intn(len(nodeConfigs))]
randFunc := funcsToTest[rnd.Intn(len(funcsToTest))]
if rnd.Intn(100) > 75 { // introduce random delays
time.Sleep(500 * time.Millisecond)
}
go randFunc(randConfig)
}
cnt := 0
for range progress {
cnt += 1
if cnt >= 40 {
cnt -= 1
if cnt <= 0 {
break
}
}
time.Sleep(2 * time.Second) // so that we see some logs
s.api.StopNode() // just in case we have a node running
}

View File

@ -1,7 +1,8 @@
package api
import (
"github.com/ethereum/go-ethereum/accounts/keystore"
"sync"
gethcommon "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/les"
"github.com/ethereum/go-ethereum/log"
@ -13,6 +14,8 @@ import (
// StatusBackend implements Status.im service
type StatusBackend struct {
sync.Mutex
nodeReady chan struct{} // channel to wait for when node is fully ready
nodeManager common.NodeManager
accountManager common.AccountManager
txQueueManager common.TxQueueManager
@ -55,102 +58,110 @@ func (m *StatusBackend) IsNodeRunning() bool {
// StartNode start Status node, fails if node is already started
func (m *StatusBackend) StartNode(config *params.NodeConfig) (<-chan struct{}, error) {
backendReady := make(chan struct{})
m.Lock()
defer m.Unlock()
if m.nodeReady != nil {
return nil, node.ErrNodeExists
}
nodeStarted, err := m.nodeManager.StartNode(config)
if err != nil {
return nil, err
}
go m.onNodeStart(backendReady, nodeStarted)
return backendReady, err
m.nodeReady = make(chan struct{}, 1)
go m.onNodeStart(nodeStarted, m.nodeReady) // waits on nodeStarted, writes to backendReady
return m.nodeReady, err
}
func (m *StatusBackend) onNodeStart(backendReady chan struct{}, nodeStarted <-chan struct{}) {
defer close(backendReady)
// onNodeStart does everything required to prepare backend
func (m *StatusBackend) onNodeStart(nodeStarted <-chan struct{}, backendReady chan struct{}) {
<-nodeStarted
if err := m.registerHandlers(); err != nil {
log.Error("Handler registration failed", "err", err)
}
m.accountManager.ReSelectAccount()
log.Info("Account reselected")
close(backendReady)
node.SendSignal(node.SignalEnvelope{
Type: node.EventNodeReady,
Event: struct{}{},
})
}
// StopNode stop Status node. Stopped node cannot be resumed.
func (m *StatusBackend) StopNode() (<-chan struct{}, error) {
m.Lock()
defer m.Unlock()
if m.nodeReady == nil {
return nil, node.ErrNoRunningNode
}
<-m.nodeReady
nodeStopped, err := m.nodeManager.StopNode()
if err != nil {
return nil, err
}
backendStopped := make(chan struct{}, 1)
go func() {
<-nodeStopped
m.Lock()
m.nodeReady = nil
m.Unlock()
close(backendStopped)
}()
return backendStopped, nil
}
// RestartNode restart running Status node, fails if node is not running
func (m *StatusBackend) RestartNode() (<-chan struct{}, error) {
backendReady := make(chan struct{})
m.Lock()
defer m.Unlock()
if m.nodeReady == nil {
return nil, node.ErrNoRunningNode
}
<-m.nodeReady
nodeRestarted, err := m.nodeManager.RestartNode()
if err != nil {
return nil, err
}
go m.onNodeStart(backendReady, nodeRestarted)
return backendReady, err
}
m.nodeReady = make(chan struct{}, 1)
go m.onNodeStart(nodeRestarted, m.nodeReady) // waits on nodeRestarted, writes to backendReady
// StopNode stop Status node. Stopped node cannot be resumed.
func (m *StatusBackend) StopNode() error {
return m.nodeManager.StopNode()
return m.nodeReady, err
}
// ResetChainData remove chain data from data directory.
// Node is stopped, and new node is started, with clean data directory.
func (m *StatusBackend) ResetChainData() (<-chan struct{}, error) {
backendReady := make(chan struct{})
nodeRestarted, err := m.nodeManager.ResetChainData()
m.Lock()
defer m.Unlock()
if m.nodeReady == nil {
return nil, node.ErrNoRunningNode
}
<-m.nodeReady
nodeReset, err := m.nodeManager.ResetChainData()
if err != nil {
return nil, err
}
go m.onNodeStart(backendReady, nodeRestarted)
return backendReady, err
}
m.nodeReady = make(chan struct{}, 1)
go m.onNodeStart(nodeReset, m.nodeReady) // waits on nodeReset, writes to backendReady
// CreateAccount creates an internal geth account
// BIP44-compatible keys are generated: CKD#1 is stored as account key, CKD#2 stored as sub-account root
// Public key of CKD#1 is returned, with CKD#2 securely encoded into account key file (to be used for
// sub-account derivations)
func (m *StatusBackend) CreateAccount(password string) (address, pubKey, mnemonic string, err error) {
return m.accountManager.CreateAccount(password)
}
// CreateChildAccount creates sub-account for an account identified by parent address.
// CKD#2 is used as root for master accounts (when parentAddress is "").
// Otherwise (when parentAddress != ""), child is derived directly from parent.
func (m *StatusBackend) CreateChildAccount(parentAddress, password string) (address, pubKey string, err error) {
return m.accountManager.CreateChildAccount(parentAddress, password)
}
// RecoverAccount re-creates master key using given details.
// Once master key is re-generated, it is inserted into keystore (if not already there).
func (m *StatusBackend) RecoverAccount(password, mnemonic string) (address, pubKey string, err error) {
return m.accountManager.RecoverAccount(password, mnemonic)
}
// VerifyAccountPassword tries to decrypt a given account key file, with a provided password.
// If no error is returned, then account is considered verified.
func (m *StatusBackend) VerifyAccountPassword(keyStoreDir, address, password string) (*keystore.Key, error) {
return m.accountManager.VerifyAccountPassword(keyStoreDir, address, password)
}
// SelectAccount selects current account, by verifying that address has corresponding account which can be decrypted
// using provided password. Once verification is done, decrypted key is injected into Whisper (as a single identity,
// all previous identities are removed).
func (m *StatusBackend) SelectAccount(address, password string) error {
return m.accountManager.SelectAccount(address, password)
}
// ReSelectAccount selects previously selected account, often, after node restart.
func (m *StatusBackend) ReSelectAccount() error {
return m.accountManager.ReSelectAccount()
}
// Logout clears whisper identities
func (m *StatusBackend) Logout() error {
return m.accountManager.Logout()
}
// SelectedAccount returns currently selected account
func (m *StatusBackend) SelectedAccount() (*common.SelectedExtKey, error) {
return m.accountManager.SelectedAccount()
return m.nodeReady, err
}
// CompleteTransaction instructs backend to complete sending of a given transaction
@ -194,13 +205,5 @@ func (m *StatusBackend) registerHandlers() error {
lightEthereum.StatusBackend.SetTransactionReturnHandler(m.txQueueManager.TransactionReturnHandler())
log.Info("Registered handler", "fn", "TransactionReturnHandler")
m.ReSelectAccount()
log.Info("Account reselected")
node.SendSignal(node.SignalEnvelope{
Type: node.EventNodeReady,
Event: struct{}{},
})
return nil
}

View File

@ -35,7 +35,7 @@ func (s *BackendTestSuite) TestAccountsList() {
require.Zero(len(accounts), "accounts returned, while there should be none (we haven't logged in yet)")
// create an account
address, _, _, err := s.backend.CreateAccount(TestConfig.Account1.Password)
address, _, _, err := s.backend.AccountManager().CreateAccount(TestConfig.Account1.Password)
require.NoError(err)
// ensure that there is still no accounts returned
@ -43,7 +43,7 @@ func (s *BackendTestSuite) TestAccountsList() {
require.Zero(len(accounts), "accounts returned, while there should be none (we haven't logged in yet)")
// select account (sub-accounts will be created for this key)
err = s.backend.SelectAccount(address, TestConfig.Account1.Password)
err = s.backend.AccountManager().SelectAccount(address, TestConfig.Account1.Password)
require.NoError(err, "account selection failed")
// at this point main account should show up
@ -53,7 +53,7 @@ func (s *BackendTestSuite) TestAccountsList() {
fmt.Sprintf("main account is not retured as the first key: got %s, expected %s", accounts[0].Hex(), "0x"+address))
// create sub-account 1
subAccount1, subPubKey1, err := s.backend.CreateChildAccount("", TestConfig.Account1.Password)
subAccount1, subPubKey1, err := s.backend.AccountManager().CreateChildAccount("", TestConfig.Account1.Password)
require.NoError(err, "cannot create sub-account")
// now we expect to see both main account and sub-account 1
@ -63,7 +63,7 @@ func (s *BackendTestSuite) TestAccountsList() {
require.Equal(string(accounts[1].Hex()), "0x"+subAccount1, "subAcount1 not returned")
// create sub-account 2, index automatically progresses
subAccount2, subPubKey2, err := s.backend.CreateChildAccount("", TestConfig.Account1.Password)
subAccount2, subPubKey2, err := s.backend.AccountManager().CreateChildAccount("", TestConfig.Account1.Password)
require.NoError(err, "cannot create sub-account")
require.False(subAccount1 == subAccount2 || subPubKey1 == subPubKey2, "sub-account index auto-increament failed")
@ -93,7 +93,7 @@ func (s *BackendTestSuite) TestCreateChildAccount() {
require.NotNil(keyStore)
// create an account
address, pubKey, mnemonic, err := s.backend.CreateAccount(TestConfig.Account1.Password)
address, pubKey, mnemonic, err := s.backend.AccountManager().CreateAccount(TestConfig.Account1.Password)
require.NoError(err)
s.T().Logf("Account created: {address: %s, key: %s, mnemonic:%s}", address, pubKey, mnemonic)
@ -106,28 +106,28 @@ func (s *BackendTestSuite) TestCreateChildAccount() {
require.NotNil(key.ExtendedKey, "CKD#2 has not been generated for new account")
// try creating sub-account, w/o selecting main account i.e. w/o login to main account
_, _, err = s.backend.CreateChildAccount("", TestConfig.Account1.Password)
_, _, err = s.backend.AccountManager().CreateChildAccount("", TestConfig.Account1.Password)
require.EqualError(node.ErrNoAccountSelected, err.Error(), "expected error is not returned (tried to create sub-account w/o login)")
err = s.backend.SelectAccount(address, TestConfig.Account1.Password)
err = s.backend.AccountManager().SelectAccount(address, TestConfig.Account1.Password)
require.NoError(err, "cannot select account")
// try to create sub-account with wrong password
_, _, err = s.backend.CreateChildAccount("", "wrong password")
_, _, err = s.backend.AccountManager().CreateChildAccount("", "wrong password")
expectedErr := errors.New("cannot retrieve a valid key for a given account: could not decrypt key with given passphrase")
require.EqualError(expectedErr, err.Error(), "create sub-account with wrong password")
// create sub-account (from implicit parent)
subAccount1, subPubKey1, err := s.backend.CreateChildAccount("", TestConfig.Account1.Password)
subAccount1, subPubKey1, err := s.backend.AccountManager().CreateChildAccount("", TestConfig.Account1.Password)
require.NoError(err, "cannot create sub-account")
// make sure that sub-account index automatically progresses
subAccount2, subPubKey2, err := s.backend.CreateChildAccount("", TestConfig.Account1.Password)
subAccount2, subPubKey2, err := s.backend.AccountManager().CreateChildAccount("", TestConfig.Account1.Password)
require.NoError(err)
require.False(subAccount1 == subAccount2 || subPubKey1 == subPubKey2, "sub-account index auto-increament failed")
// create sub-account (from explicit parent)
subAccount3, subPubKey3, err := s.backend.CreateChildAccount(subAccount2, TestConfig.Account1.Password)
subAccount3, subPubKey3, err := s.backend.AccountManager().CreateChildAccount(subAccount2, TestConfig.Account1.Password)
require.NoError(err)
require.False(subAccount1 == subAccount3 || subPubKey1 == subPubKey3 || subAccount2 == subAccount3 || subPubKey2 == subPubKey3)
}
@ -144,12 +144,12 @@ func (s *BackendTestSuite) TestRecoverAccount() {
require.NotNil(keyStore)
// create an account
address, pubKey, mnemonic, err := s.backend.CreateAccount(TestConfig.Account1.Password)
address, pubKey, mnemonic, err := s.backend.AccountManager().CreateAccount(TestConfig.Account1.Password)
require.NoError(err)
s.T().Logf("Account created: {address: %s, key: %s, mnemonic:%s}", address, pubKey, mnemonic)
// try recovering using password + mnemonic
addressCheck, pubKeyCheck, err := s.backend.RecoverAccount(TestConfig.Account1.Password, mnemonic)
addressCheck, pubKeyCheck, err := s.backend.AccountManager().RecoverAccount(TestConfig.Account1.Password, mnemonic)
require.NoError(err, "recover account failed")
require.False(address != addressCheck || pubKey != pubKeyCheck, "incorrect accound details recovered")
@ -163,7 +163,7 @@ func (s *BackendTestSuite) TestRecoverAccount() {
require.NoError(keyStore.Delete(account, TestConfig.Account1.Password), "cannot remove account")
addressCheck, pubKeyCheck, err = s.backend.RecoverAccount(TestConfig.Account1.Password, mnemonic)
addressCheck, pubKeyCheck, err = s.backend.AccountManager().RecoverAccount(TestConfig.Account1.Password, mnemonic)
require.NoError(err, "recover account failed (for non-cached account)")
require.False(address != addressCheck || pubKey != pubKeyCheck,
"incorrect account details recovered (for non-cached account)")
@ -174,7 +174,7 @@ func (s *BackendTestSuite) TestRecoverAccount() {
require.Equal(extChild2String, key.ExtendedKey.String(), "CKD#2 key mismatch")
// make sure that calling import several times, just returns from cache (no error is expected)
addressCheck, pubKeyCheck, err = s.backend.RecoverAccount(TestConfig.Account1.Password, mnemonic)
addressCheck, pubKeyCheck, err = s.backend.AccountManager().RecoverAccount(TestConfig.Account1.Password, mnemonic)
require.NoError(err, "recover account failed (for non-cached account)")
require.False(address != addressCheck || pubKey != pubKeyCheck,
"incorrect account details recovered (for non-cached account)")
@ -184,7 +184,7 @@ func (s *BackendTestSuite) TestRecoverAccount() {
// make sure that identity is not (yet injected)
require.False(whisperService.HasKeyPair(pubKeyCheck), "identity already present in whisper")
require.NoError(s.backend.SelectAccount(addressCheck, TestConfig.Account1.Password))
require.NoError(s.backend.AccountManager().SelectAccount(addressCheck, TestConfig.Account1.Password))
require.True(whisperService.HasKeyPair(pubKeyCheck), "identity not injected into whisper")
}
@ -199,11 +199,11 @@ func (s *BackendTestSuite) TestSelectAccount() {
whisperService := s.WhisperService()
// create an account
address1, pubKey1, _, err := s.backend.CreateAccount(TestConfig.Account1.Password)
address1, pubKey1, _, err := s.backend.AccountManager().CreateAccount(TestConfig.Account1.Password)
require.NoError(err)
s.T().Logf("Account created: {address: %s, key: %s}", address1, pubKey1)
address2, pubKey2, _, err := s.backend.CreateAccount(TestConfig.Account1.Password)
address2, pubKey2, _, err := s.backend.AccountManager().CreateAccount(TestConfig.Account1.Password)
require.NoError(err)
s.T().Logf("Account created: {address: %s, key: %s}", address2, pubKey2)
@ -211,17 +211,17 @@ func (s *BackendTestSuite) TestSelectAccount() {
require.False(whisperService.HasKeyPair(pubKey1), "identity already present in whisper")
// try selecting with wrong password
err = s.backend.SelectAccount(address1, "wrongPassword")
err = s.backend.AccountManager().SelectAccount(address1, "wrongPassword")
expectedErr := errors.New("cannot retrieve a valid key for a given account: could not decrypt key with given passphrase")
require.EqualError(expectedErr, err.Error(), "select account is expected to throw error: wrong password used")
err = s.backend.SelectAccount(address1, TestConfig.Account1.Password)
err = s.backend.AccountManager().SelectAccount(address1, TestConfig.Account1.Password)
require.NoError(err)
require.True(whisperService.HasKeyPair(pubKey1), "identity not injected into whisper")
// select another account, make sure that previous account is wiped out from Whisper cache
require.False(whisperService.HasKeyPair(pubKey2), "identity already present in whisper")
require.NoError(s.backend.SelectAccount(address2, TestConfig.Account1.Password))
require.NoError(s.backend.AccountManager().SelectAccount(address2, TestConfig.Account1.Password))
require.True(whisperService.HasKeyPair(pubKey2), "identity not injected into whisper")
require.False(whisperService.HasKeyPair(pubKey1), "identity should be removed, but it is still present in whisper")
}
@ -236,15 +236,15 @@ func (s *BackendTestSuite) TestLogout() {
whisperService := s.WhisperService()
// create an account
address, pubKey, _, err := s.backend.CreateAccount(TestConfig.Account1.Password)
address, pubKey, _, err := s.backend.AccountManager().CreateAccount(TestConfig.Account1.Password)
require.NoError(err)
// make sure that identity doesn't exist (yet) in Whisper
require.False(whisperService.HasKeyPair(pubKey), "identity already present in whisper")
require.NoError(s.backend.SelectAccount(address, TestConfig.Account1.Password))
require.NoError(s.backend.AccountManager().SelectAccount(address, TestConfig.Account1.Password))
require.True(whisperService.HasKeyPair(pubKey), "identity not injected into whisper")
require.NoError(s.backend.Logout())
require.NoError(s.backend.AccountManager().Logout())
require.False(whisperService.HasKeyPair(pubKey), "identity not cleared from whisper")
}
@ -258,42 +258,44 @@ func (s *BackendTestSuite) TestSelectedAccountOnRestart() {
whisperService := s.WhisperService()
// create test accounts
address1, pubKey1, _, err := s.backend.CreateAccount(TestConfig.Account1.Password)
address1, pubKey1, _, err := s.backend.AccountManager().CreateAccount(TestConfig.Account1.Password)
require.NoError(err)
address2, pubKey2, _, err := s.backend.CreateAccount(TestConfig.Account1.Password)
address2, pubKey2, _, err := s.backend.AccountManager().CreateAccount(TestConfig.Account1.Password)
require.NoError(err)
// make sure that identity is not (yet injected)
require.False(whisperService.HasKeyPair(pubKey1), "identity already present in whisper")
// make sure that no account is selected by default
selectedAccount, err := s.backend.SelectedAccount()
selectedAccount, err := s.backend.AccountManager().SelectedAccount()
require.EqualError(node.ErrNoAccountSelected, err.Error(), "account selected, but should not be")
require.Nil(selectedAccount)
// select account
err = s.backend.SelectAccount(address1, "wrongPassword")
err = s.backend.AccountManager().SelectAccount(address1, "wrongPassword")
expectedErr := errors.New("cannot retrieve a valid key for a given account: could not decrypt key with given passphrase")
require.EqualError(expectedErr, err.Error())
require.NoError(s.backend.SelectAccount(address1, TestConfig.Account1.Password))
require.NoError(s.backend.AccountManager().SelectAccount(address1, TestConfig.Account1.Password))
require.True(whisperService.HasKeyPair(pubKey1), "identity not injected into whisper")
// select another account, make sure that previous account is wiped out from Whisper cache
require.False(whisperService.HasKeyPair(pubKey2), "identity already present in whisper")
require.NoError(s.backend.SelectAccount(address2, TestConfig.Account1.Password))
require.NoError(s.backend.AccountManager().SelectAccount(address2, TestConfig.Account1.Password))
require.True(whisperService.HasKeyPair(pubKey2), "identity not injected into whisper")
require.False(whisperService.HasKeyPair(pubKey1), "identity should be removed, but it is still present in whisper")
// stop node (and all of its sub-protocols)
nodeConfig, err := s.NodeManager.NodeConfig()
nodeConfig, err := s.backend.NodeManager().NodeConfig()
require.NoError(err)
require.NotNil(nodeConfig)
preservedNodeConfig := *nodeConfig
require.NoError(s.NodeManager.StopNode())
nodeStoped, err := s.backend.StopNode()
require.NoError(err)
<-nodeStoped
// make sure that account is still selected
selectedAccount, err = s.backend.SelectedAccount()
selectedAccount, err = s.backend.AccountManager().SelectedAccount()
require.NoError(err)
require.NotNil(selectedAccount)
require.Equal(selectedAccount.Address.Hex(), "0x"+address2, "incorrect address selected")
@ -304,7 +306,7 @@ func (s *BackendTestSuite) TestSelectedAccountOnRestart() {
<-nodeStarted
// re-check selected account (account2 MUST be selected)
selectedAccount, err = s.backend.SelectedAccount()
selectedAccount, err = s.backend.AccountManager().SelectedAccount()
require.NoError(err)
require.NotNil(selectedAccount)
require.Equal(selectedAccount.Address.Hex(), "0x"+address2, "incorrect address selected")
@ -323,13 +325,13 @@ func (s *BackendTestSuite) TestSelectedAccountOnRestart() {
require.False(whisperService.HasKeyPair(pubKey1), "identity should not be present, but it is still present in whisper")
// now logout, and make sure that on restart no account is selected (i.e. logout works properly)
require.NoError(s.backend.Logout())
require.NoError(s.backend.AccountManager().Logout())
s.RestartTestNode()
whisperService = s.WhisperService()
require.False(whisperService.HasKeyPair(pubKey2), "identity not injected into whisper")
require.False(whisperService.HasKeyPair(pubKey1), "identity should not be present, but it is still present in whisper")
selectedAccount, err = s.backend.SelectedAccount()
selectedAccount, err = s.backend.AccountManager().SelectedAccount()
require.EqualError(node.ErrNoAccountSelected, err.Error())
require.Nil(selectedAccount)
}

View File

@ -65,7 +65,7 @@ func (s *BackendTestSuite) TestJailContractDeployment() {
//t.Logf("Transaction queued (will be completed shortly): {id: %s}\n", event["id"].(string))
s.NoError(s.backend.SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
s.NoError(s.backend.AccountManager().SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
var err error
txHash, err = s.backend.CompleteTransaction(event["id"].(string), TestConfig.Account1.Password)
@ -117,7 +117,7 @@ func (s *BackendTestSuite) TestJailSendQueuedTransaction() {
require.NotNil(jailInstance)
// log into account from which transactions will be sent
require.NoError(s.backend.SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
require.NoError(s.backend.AccountManager().SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
txParams := `{
"from": "` + TestConfig.Account1.Address + `",
@ -310,7 +310,7 @@ func (s *BackendTestSuite) TestGasEstimation() {
//t.Logf("Transaction queued (will be completed shortly): {id: %s}\n", event["id"].(string))
s.NoError(s.backend.SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
s.NoError(s.backend.AccountManager().SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
var err error
txHash, err = s.backend.CompleteTransaction(event["id"].(string), TestConfig.Account1.Password)

View File

@ -1,13 +1,16 @@
package api_test
import (
"math/rand"
"testing"
"time"
"github.com/ethereum/go-ethereum/les"
"github.com/ethereum/go-ethereum/log"
whisper "github.com/ethereum/go-ethereum/whisper/whisperv5"
"github.com/status-im/status-go/geth/api"
"github.com/status-im/status-go/geth/common"
"github.com/status-im/status-go/geth/jail"
"github.com/status-im/status-go/geth/node"
"github.com/status-im/status-go/geth/params"
. "github.com/status-im/status-go/geth/testing"
@ -19,7 +22,7 @@ func TestBackendTestSuite(t *testing.T) {
}
type BackendTestSuite struct {
BaseTestSuite
suite.Suite
backend *api.StatusBackend
}
@ -29,7 +32,6 @@ func (s *BackendTestSuite) SetupTest() {
require.NotNil(backend)
require.IsType(&api.StatusBackend{}, backend)
s.backend = backend
s.NodeManager = backend.NodeManager()
}
func (s *BackendTestSuite) StartTestBackend(networkID int) {
@ -54,7 +56,9 @@ func (s *BackendTestSuite) StopTestBackend() {
require := s.Require()
require.NotNil(s.backend)
require.True(s.backend.IsNodeRunning())
require.NoError(s.backend.StopNode())
backendStopped, err := s.backend.StopNode()
require.NoError(err)
<-backendStopped
require.False(s.backend.IsNodeRunning())
}
@ -110,38 +114,195 @@ func (s *BackendTestSuite) TestNodeStartStop() {
// try stopping non-started node
require.False(s.backend.IsNodeRunning())
err = s.backend.StopNode()
if s.Error(err) {
require.IsType(node.ErrNoRunningNode, err)
}
nodeStopped, err := s.backend.StopNode()
require.EqualError(err, node.ErrNoRunningNode.Error())
require.Nil(nodeStopped)
require.False(s.backend.IsNodeRunning())
nodeStarted, err := s.backend.StartNode(nodeConfig)
require.NoError(err)
require.NotNil(nodeStarted)
<-nodeStarted // wait till node is started
require.True(s.backend.IsNodeRunning())
// try starting another node (w/o stopping the previously started node)
_, err = s.backend.StartNode(nodeConfig)
if s.Error(err) {
require.IsType(node.ErrNodeAlreadyExists, err)
}
nodeStarted, err = s.backend.StartNode(nodeConfig)
require.EqualError(err, node.ErrNodeExists.Error())
require.Nil(nodeStarted)
// now stop node, and make sure that a new node, on different network can be started
err = s.backend.StopNode()
nodeStopped, err = s.backend.StopNode()
require.NoError(err)
require.NotNil(nodeStopped)
<-nodeStopped
// start new node with exactly the same config
require.False(s.backend.IsNodeRunning())
nodeStarted, err = s.backend.StartNode(nodeConfig)
require.NoError(err)
defer s.StopTestNode()
require.NotNil(nodeStarted)
defer s.backend.StopNode()
<-nodeStarted
require.True(s.backend.IsNodeRunning())
}
func (s *BackendTestSuite) TestRaceConditions() {
require := s.Require()
require.NotNil(s.backend)
cnt := 25
progress := make(chan struct{}, cnt)
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
nodeConfig1, err := MakeTestNodeConfig(params.RopstenNetworkID)
require.NoError(err)
nodeConfig2, err := MakeTestNodeConfig(params.RinkebyNetworkID)
require.NoError(err)
nodeConfigs := []*params.NodeConfig{nodeConfig1, nodeConfig2}
var funcsToTest = []func(*params.NodeConfig){
func(config *params.NodeConfig) {
log.Info("StartNode()")
_, err := s.backend.StartNode(config)
s.T().Logf("StartNode() for network: %d, error: %v", config.NetworkID, err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("StopNode()")
_, err := s.backend.StopNode()
s.T().Logf("StopNode() for network: %d, error: %v", config.NetworkID, err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("ResetChainData()")
_, err := s.backend.ResetChainData()
s.T().Logf("ResetChainData(), error: %v", err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("RestartNode()")
_, err := s.backend.RestartNode()
s.T().Logf("RestartNode(), error: %v", err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("NodeManager()")
instance := s.backend.NodeManager()
s.NotNil(instance)
s.IsType(&node.NodeManager{}, instance)
s.T().Logf("NodeManager(), result: %v", instance)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("AccountManager()")
instance := s.backend.AccountManager()
s.NotNil(instance)
s.IsType(&node.AccountManager{}, instance)
s.T().Logf("AccountManager(), result: %v", instance)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("JailManager()")
instance := s.backend.JailManager()
s.NotNil(instance)
s.IsType(&jail.Jail{}, instance)
s.T().Logf("JailManager(), result: %v", instance)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("CreateAccount()")
address, pubKey, mnemonic, err := s.backend.AccountManager().CreateAccount("password")
s.T().Logf("CreateAccount(), error: %v (address: %v, pubKey: %v, mnemonic: %v)", err, address, pubKey, mnemonic)
if err == nil {
// SelectAccount
log.Info("CreateAccount()")
err = s.backend.AccountManager().SelectAccount(address, "password")
s.T().Logf("SelectAccount(%v, %v), error: %v", address, "password", err)
// CreateChildAccount
log.Info("CreateChildAccount()")
address, pubKey, err := s.backend.AccountManager().CreateChildAccount(address, "password")
s.T().Logf("CreateAccount(), error: %v (address: %v, pubKey: %v)", err, address, pubKey)
// RecoverAccount
log.Info("RecoverAccount()")
address, pubKey, err = s.backend.AccountManager().RecoverAccount("password", mnemonic)
s.T().Logf("RecoverAccount(), error: %v (address: %v, pubKey: %v)", err, address, pubKey)
}
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("VerifyAccountPassword()")
_, err := s.backend.AccountManager().VerifyAccountPassword(config.KeyStoreDir, "0x0", "bar")
s.T().Logf("VerifyAccountPassword(), err: %v", err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("Logout()")
s.T().Logf("Logout(), result: %v", s.backend.AccountManager().Logout())
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("IsNodeRunning()")
s.T().Logf("IsNodeRunning(), result: %v", s.backend.IsNodeRunning())
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("CompleteTransaction()")
_, err := s.backend.CompleteTransaction("id", "password")
s.T().Logf("CompleteTransaction(), error: %v", err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("DiscardTransaction()")
s.T().Logf("DiscardTransaction(), error: %v", s.backend.DiscardTransaction("id"))
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("CompleteTransactions()")
s.T().Logf("CompleteTransactions(), result: %v", s.backend.CompleteTransactions(`["id1","id2"]`, "password"))
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("DiscardTransactions()")
s.T().Logf("DiscardTransactions(), result: %v", s.backend.DiscardTransactions(`["id1","id2"]`))
progress <- struct{}{}
},
}
// increase StartNode()/StopNode() population
for i := 0; i < 5; i++ {
funcsToTest = append(funcsToTest, funcsToTest[0], funcsToTest[1])
}
for i := 0; i < cnt; i++ {
randConfig := nodeConfigs[rnd.Intn(len(nodeConfigs))]
randFunc := funcsToTest[rnd.Intn(len(funcsToTest))]
if rnd.Intn(100) > 75 { // introduce random delays
time.Sleep(500 * time.Millisecond)
}
go randFunc(randConfig)
}
for range progress {
cnt -= 1
if cnt <= 0 {
break
}
}
time.Sleep(2 * time.Second) // so that we see some logs
nodeStopped, _ := s.backend.StopNode() // just in case we have a node running
if nodeStopped != nil {
<-nodeStopped
}
}
func (s *BackendTestSuite) TestNetworkSwitching() {
require := s.Require()
require.NotNil(s.backend)
@ -157,11 +318,12 @@ func (s *BackendTestSuite) TestNetworkSwitching() {
<-nodeStarted // wait till node is started
require.True(s.backend.IsNodeRunning())
s.FirstBlockHash("0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d")
FirstBlockHash(require, s.backend.NodeManager(), "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d")
// now stop node, and make sure that a new node, on different network can be started
err = s.backend.StopNode()
nodeStopped, err := s.backend.StopNode()
require.NoError(err)
<-nodeStopped
// start new node with completely different config
nodeConfig, err = MakeTestNodeConfig(params.RinkebyNetworkID)
@ -175,9 +337,11 @@ func (s *BackendTestSuite) TestNetworkSwitching() {
require.True(s.backend.IsNodeRunning())
// make sure we are on another network indeed
s.FirstBlockHash("0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177")
FirstBlockHash(require, s.backend.NodeManager(), "0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177")
require.NoError(s.backend.StopNode())
nodeStopped, err = s.backend.StopNode()
require.NoError(err)
<-nodeStopped
}
func (s *BackendTestSuite) TestResetChainData() {
@ -196,7 +360,7 @@ func (s *BackendTestSuite) TestResetChainData() {
s.True(s.backend.IsNodeRunning()) // new node, with previous config should be running
// make sure we can read the first byte, and it is valid (for Rinkeby)
s.FirstBlockHash("0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177")
FirstBlockHash(require, s.backend.NodeManager(), "0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177")
}
func (s *BackendTestSuite) TestRestartNode() {
@ -206,7 +370,7 @@ func (s *BackendTestSuite) TestRestartNode() {
s.StartTestBackend(params.RinkebyNetworkID)
defer s.StopTestBackend()
s.FirstBlockHash("0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177")
FirstBlockHash(require, s.backend.NodeManager(), "0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177")
s.True(s.backend.IsNodeRunning())
nodeRestarted, err := s.backend.RestartNode()
@ -215,5 +379,5 @@ func (s *BackendTestSuite) TestRestartNode() {
s.True(s.backend.IsNodeRunning()) // new node, with previous config should be running
// make sure we can read the first byte, and it is valid (for Rinkeby)
s.FirstBlockHash("0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177")
FirstBlockHash(require, s.backend.NodeManager(), "0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177")
}

View File

@ -31,7 +31,7 @@ func (s *BackendTestSuite) TestSendContractTx() {
require.NotNil(backend)
// create an account
sampleAddress, _, _, err := s.backend.CreateAccount(TestConfig.Account1.Password)
sampleAddress, _, _, err := s.backend.AccountManager().CreateAccount(TestConfig.Account1.Password)
require.NoError(err)
// make sure you panic if transaction complete doesn't return
@ -58,7 +58,7 @@ func (s *BackendTestSuite) TestSendContractTx() {
// the second call will also fail (we are logged in as different user)
log.Info("trying to complete with invalid user")
err = s.backend.SelectAccount(sampleAddress, TestConfig.Account1.Password)
err = s.backend.AccountManager().SelectAccount(sampleAddress, TestConfig.Account1.Password)
s.NoError(err)
txHash, err = s.backend.CompleteTransaction(event["id"].(string), TestConfig.Account1.Password)
s.EqualError(err, status.ErrInvalidCompleteTxSender.Error(),
@ -66,7 +66,7 @@ func (s *BackendTestSuite) TestSendContractTx() {
// the third call will work as expected (as we are logged in with correct credentials)
log.Info("trying to complete with correct user, this should suceed")
s.NoError(s.backend.SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
s.NoError(s.backend.AccountManager().SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
txHash, err = s.backend.CompleteTransaction(event["id"].(string), TestConfig.Account1.Password)
s.NoError(err, fmt.Sprintf("cannot complete queued transaction[%v]", event["id"]))
@ -109,7 +109,7 @@ func (s *BackendTestSuite) TestSendEtherTx() {
require.NotNil(backend)
// create an account
sampleAddress, _, _, err := s.backend.CreateAccount(TestConfig.Account1.Password)
sampleAddress, _, _, err := s.backend.AccountManager().CreateAccount(TestConfig.Account1.Password)
require.NoError(err)
// make sure you panic if transaction complete doesn't return
@ -136,7 +136,7 @@ func (s *BackendTestSuite) TestSendEtherTx() {
// the second call will also fail (we are logged in as different user)
log.Info("trying to complete with invalid user")
err = s.backend.SelectAccount(sampleAddress, TestConfig.Account1.Password)
err = s.backend.AccountManager().SelectAccount(sampleAddress, TestConfig.Account1.Password)
s.NoError(err)
txHash, err = s.backend.CompleteTransaction(event["id"].(string), TestConfig.Account1.Password)
s.EqualError(err, status.ErrInvalidCompleteTxSender.Error(),
@ -144,7 +144,7 @@ func (s *BackendTestSuite) TestSendEtherTx() {
// the third call will work as expected (as we are logged in with correct credentials)
log.Info("trying to complete with correct user, this should suceed")
s.NoError(s.backend.SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
s.NoError(s.backend.AccountManager().SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
txHash, err = s.backend.CompleteTransaction(event["id"].(string), TestConfig.Account1.Password)
s.NoError(err, fmt.Sprintf("cannot complete queued transaction[%v]", event["id"]))
@ -181,7 +181,7 @@ func (s *BackendTestSuite) TestDoubleCompleteQueuedTransactions() {
require.NotNil(backend)
// log into account from which transactions will be sent
require.NoError(s.backend.SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
require.NoError(s.backend.AccountManager().SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
// make sure you panic if transaction complete doesn't return
completeQueuedTransaction := make(chan struct{}, 1)
@ -265,7 +265,7 @@ func (s *BackendTestSuite) TestDiscardQueuedTransaction() {
backend.TransactionQueue().Reset()
// log into account from which transactions will be sent
require.NoError(s.backend.SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
require.NoError(s.backend.AccountManager().SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
// make sure you panic if transaction complete doesn't return
completeQueuedTransaction := make(chan struct{}, 1)
@ -346,7 +346,7 @@ func (s *BackendTestSuite) TestCompleteMultipleQueuedTransactions() {
backend.TransactionQueue().Reset()
// log into account from which transactions will be sent
require.NoError(s.backend.SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
require.NoError(s.backend.AccountManager().SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
// make sure you panic if transaction complete doesn't return
testTxCount := 3
@ -461,7 +461,7 @@ func (s *BackendTestSuite) TestDiscardMultipleQueuedTransactions() {
backend.TransactionQueue().Reset()
// log into account from which transactions will be sent
require.NoError(s.backend.SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
require.NoError(s.backend.AccountManager().SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
// make sure you panic if transaction complete doesn't return
testTxCount := 3
@ -594,7 +594,7 @@ func (s *BackendTestSuite) TestNonExistentQueuedTransactions() {
require.NotNil(backend)
// log into account from which transactions will be sent
require.NoError(s.backend.SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
require.NoError(s.backend.AccountManager().SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
// replace transaction notification handler
node.SetDefaultNodeNotificationHandler(func(string) {})
@ -619,13 +619,13 @@ func (s *BackendTestSuite) TestEvictionOfQueuedTransactions() {
backend.TransactionQueue().Reset()
// log into account from which transactions will be sent
require.NoError(s.backend.SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
require.NoError(s.backend.AccountManager().SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
txQueue := backend.TransactionQueue()
var i = 0
txIDs := [status.DefaultTxQueueCap + 5 + 10]status.QueuedTxID{}
backend.SetTransactionQueueHandler(func(queuedTx status.QueuedTx) {
log.Info("tx enqueued", "i", i + 1, "queue size", txQueue.Count(), "id", queuedTx.ID)
log.Info("tx enqueued", "i", i+1, "queue size", txQueue.Count(), "id", queuedTx.ID)
txIDs[i] = queuedTx.ID
i++
})

View File

@ -8,8 +8,8 @@ import (
"testing"
"github.com/ethereum/go-ethereum/log"
"github.com/status-im/status-go/geth/params"
"github.com/status-im/status-go/geth/common"
"github.com/status-im/status-go/geth/params"
)
func TestLogger(t *testing.T) {

View File

@ -45,7 +45,7 @@ type NodeManager interface {
// StopNode stop the running Status node.
// Stopped node cannot be resumed, one starts a new node instead.
StopNode() error
StopNode() (<-chan struct{}, error)
// RestartNode restart running Status node, fails if node is not running
RestartNode() (<-chan struct{}, error)

View File

@ -4,10 +4,8 @@ import (
"errors"
"fmt"
"os"
"os/signal"
"path/filepath"
"sync"
"time"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/keystore"
@ -17,16 +15,13 @@ import (
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/rpc"
whisper "github.com/ethereum/go-ethereum/whisper/whisperv5"
"github.com/status-im/status-go/geth/common"
"github.com/status-im/status-go/geth/params"
)
// errors
var (
ErrNodeAlreadyExists = errors.New("there is a running node already, stop it before starting another one")
ErrNodeExists = errors.New("node is already running")
ErrNoRunningNode = errors.New("there is no running node")
ErrNodeOpTimedOut = errors.New("operation takes too long, timed out")
ErrInvalidRunningNode = errors.New("running node is not correctly initialized")
ErrInvalidNodeManager = errors.New("node manager is not properly initialized")
ErrInvalidWhisperService = errors.New("whisper service is unavailable")
ErrInvalidLightEthereumService = errors.New("LES service is unavailable")
@ -40,6 +35,7 @@ type NodeManager struct {
sync.RWMutex
config *params.NodeConfig // Status node configuration
node *node.Node // reference to Geth P2P stack/node
nodeStarted chan struct{} // channel to wait for start up notifications
nodeStopped chan struct{} // channel to wait for termination notifications
whisperService *whisper.Whisper // reference to Whisper service
lesService *les.LightEthereum // reference to LES service
@ -49,26 +45,7 @@ type NodeManager struct {
// NewNodeManager makes new instance of node manager
func NewNodeManager() *NodeManager {
m := &NodeManager{}
// allow interrupting running nodes
go func() {
sigc := make(chan os.Signal, 1)
signal.Notify(sigc, os.Interrupt)
defer signal.Stop(sigc)
<-sigc
if m.node == nil {
return
}
log.Info("Got interrupt, shutting down...")
go m.node.Stop() // nolint: errcheck
for i := 3; i > 0; i-- {
<-sigc
if i > 1 {
log.Info(fmt.Sprintf("Already shutting down, interrupt %d more times for panic.", i-1))
}
}
panic("interrupted!")
}()
go HaltOnInterruptSignal(m) // allow interrupting running nodes
return m
}
@ -87,30 +64,23 @@ func (m *NodeManager) StartNode(config *params.NodeConfig) (<-chan struct{}, err
// startNode start Status node, fails if node is already started
func (m *NodeManager) startNode(config *params.NodeConfig) (<-chan struct{}, error) {
if m.node != nil || m.nodeStopped != nil {
return nil, ErrNodeAlreadyExists
if m.node != nil || m.nodeStarted != nil {
return nil, ErrNodeExists
}
var err error
m.node, err = MakeNode(config)
ethNode, err := MakeNode(config)
if err != nil {
return nil, err
}
m.config = config // preserve config of successfully created node
nodeStarted := make(chan struct{})
m.nodeStopped = make(chan struct{})
m.nodeStarted = make(chan struct{}, 1)
go func() {
defer HaltOnPanic()
if err := m.node.Start(); err != nil {
m.Lock() // TODO potential deadlock (add test case to prove otherwise)
m.config = nil
m.lesService = nil
m.whisperService = nil
m.rpcClient = nil
m.nodeStopped = nil
m.node = nil
// start underlying node
if err := ethNode.Start(); err != nil {
close(m.nodeStarted)
m.Lock()
m.nodeStarted = nil
m.Unlock()
SendSignal(SignalEnvelope{
Type: EventNodeCrashed,
@ -118,48 +88,91 @@ func (m *NodeManager) startNode(config *params.NodeConfig) (<-chan struct{}, err
Error: fmt.Errorf("%v: %v", ErrNodeStartFailure, err).Error(),
},
})
close(nodeStarted)
return
}
// node is ready, use it
m.onNodeStarted(nodeStarted)
m.Lock()
m.node = ethNode
m.nodeStopped = make(chan struct{}, 1)
m.config = config
m.Unlock()
// underlying node is started, every method can use it, we use it immediately
go func() {
if err := m.PopulateStaticPeers(); err != nil {
log.Error("Static peers population", "error", err)
}
}()
// notify all subscribers that Status node is started
close(m.nodeStarted)
SendSignal(SignalEnvelope{
Type: EventNodeStarted,
Event: struct{}{},
})
// wait up until underlying node is stopped
m.node.Wait()
// notify m.Stop() that node has been stopped
close(m.nodeStopped)
log.Info("Node is stopped")
}()
return nodeStarted, nil
return m.nodeStarted, nil
}
// onNodeStarted extra processing once we have running node
func (m *NodeManager) onNodeStarted(nodeStarted chan struct{}) {
// post-start processing
if err := m.populateStaticPeers(); err != nil {
log.Error("Static peers population", "error", err)
// StopNode stop Status node. Stopped node cannot be resumed.
func (m *NodeManager) StopNode() (<-chan struct{}, error) {
if m == nil {
return nil, ErrInvalidNodeManager
}
// obtain node info
enode := "none"
if server := m.node.Server(); server != nil {
if nodeInfo := server.NodeInfo(); nodeInfo != nil {
enode = nodeInfo.Enode
log.Info("Node is ready", "enode", enode)
}
m.Lock()
defer m.Unlock()
return m.stopNode()
}
// stopNode stop Status node. Stopped node cannot be resumed.
func (m *NodeManager) stopNode() (<-chan struct{}, error) {
if m.node == nil || m.nodeStarted == nil || m.nodeStopped == nil {
return nil, ErrNoRunningNode
}
<-m.nodeStarted // make sure you operate on fully started node
// now attempt to stop
if err := m.node.Stop(); err != nil {
return nil, err
}
// notify all subscribers that node is started
SendSignal(SignalEnvelope{
Type: EventNodeStarted,
Event: struct{}{},
})
close(nodeStarted)
nodeStopped := make(chan struct{}, 1)
go func() {
<-m.nodeStopped // Status node is stopped (code after Wait() is executed)
log.Info("Ready to reset node")
// wait up until node is stopped
m.node.Wait()
SendSignal(SignalEnvelope{
Type: EventNodeStopped,
Event: struct{}{},
})
close(m.nodeStopped)
log.Info("Node is stopped", "enode", enode)
// reset node params
m.Lock()
m.config = nil
m.lesService = nil
m.whisperService = nil
m.rpcClient = nil
m.nodeStarted = nil
m.node = nil
m.Unlock()
close(nodeStopped) // Status node is stopped, and we can create another
log.Info("Node manager resets node params")
// notify application that it can send more requests now
SendSignal(SignalEnvelope{
Type: EventNodeStopped,
Event: struct{}{},
})
log.Info("Node manager notifed app, that node has stopped")
}()
return nodeStopped, nil
}
// IsNodeRunning confirm that node is running
@ -171,50 +184,13 @@ func (m *NodeManager) IsNodeRunning() bool {
m.RLock()
defer m.RUnlock()
return m.node != nil && m.nodeStopped != nil
}
// StopNode stop Status node. Stopped node cannot be resumed.
func (m *NodeManager) StopNode() error {
if m == nil {
return ErrInvalidNodeManager
// make sure that node is fully started
if m.node == nil || m.nodeStarted == nil {
return false
}
<-m.nodeStarted
m.Lock()
defer m.Unlock()
return m.stopNode()
}
// stopNode stop Status node. Stopped node cannot be resumed.
func (m *NodeManager) stopNode() error {
if m.node == nil {
return ErrNoRunningNode
}
if m.nodeStopped == nil { // node may be running, but required channel not set
return ErrInvalidRunningNode
}
if err := m.node.Stop(); err != nil {
return err
}
// wait till the previous node is fully stopped
select {
case <-m.nodeStopped:
// pass
case <-time.After(30 * time.Second):
return fmt.Errorf("%v: %s", ErrNodeOpTimedOut, common.NameOf(m.StopNode))
}
m.config = nil
m.lesService = nil
m.whisperService = nil
m.rpcClient = nil
m.nodeStopped = nil
m.node = nil
return nil
return true
}
// Node returns underlying Status node
@ -226,9 +202,11 @@ func (m *NodeManager) Node() (*node.Node, error) {
m.RLock()
defer m.RUnlock()
if m.node == nil {
// make sure that node is fully started
if m.node == nil || m.nodeStarted == nil {
return nil, ErrNoRunningNode
}
<-m.nodeStarted
return m.node, nil
}
@ -247,9 +225,11 @@ func (m *NodeManager) PopulateStaticPeers() error {
// populateStaticPeers connects current node with our publicly available LES/SHH/Swarm cluster
func (m *NodeManager) populateStaticPeers() error {
if m.node == nil {
// make sure that node is fully started
if m.node == nil || m.nodeStarted == nil {
return ErrNoRunningNode
}
<-m.nodeStarted
if !m.config.BootClusterConfig.Enabled {
log.Info("Boot cluster is disabled")
@ -286,9 +266,11 @@ func (m *NodeManager) AddPeer(url string) error {
// addPeer adds new static peer node
func (m *NodeManager) addPeer(url string) error {
if m == nil || m.node == nil {
// make sure that node is fully started
if m.node == nil || m.nodeStarted == nil {
return ErrNoRunningNode
}
<-m.nodeStarted
server := m.node.Server()
if server == nil {
@ -315,15 +297,28 @@ func (m *NodeManager) ResetChainData() (<-chan struct{}, error) {
m.Lock()
defer m.Unlock()
if m.node == nil {
return m.resetChainData()
}
// resetChainData remove chain data from data directory.
// Node is stopped, and new node is started, with clean data directory.
func (m *NodeManager) resetChainData() (<-chan struct{}, error) {
// make sure that node is fully started
if m.node == nil || m.nodeStarted == nil {
return nil, ErrNoRunningNode
}
<-m.nodeStarted
prevConfig := *m.config
if err := m.stopNode(); err != nil {
nodeStopped, err := m.stopNode()
if err != nil {
return nil, err
}
m.Unlock()
<-nodeStopped
m.Lock()
chainDataDir := filepath.Join(prevConfig.DataDir, prevConfig.Name, "lightchaindata")
if _, err := os.Stat(chainDataDir); os.IsNotExist(err) {
return nil, err
@ -336,7 +331,7 @@ func (m *NodeManager) ResetChainData() (<-chan struct{}, error) {
Type: EventChainDataRemoved,
Event: struct{}{},
})
log.Info("chaindata removed", "dir", chainDataDir)
log.Info("Chain data has been removed", "dir", chainDataDir)
return m.startNode(&prevConfig)
}
@ -350,15 +345,27 @@ func (m *NodeManager) RestartNode() (<-chan struct{}, error) {
m.Lock()
defer m.Unlock()
if m.node == nil {
return m.restartNode()
}
// restartNode restart running Status node, fails if node is not running
func (m *NodeManager) restartNode() (<-chan struct{}, error) {
// make sure that node is fully started
if m.node == nil || m.nodeStarted == nil {
return nil, ErrNoRunningNode
}
<-m.nodeStarted
prevConfig := *m.config
if err := m.stopNode(); err != nil {
nodeStopped, err := m.stopNode()
if err != nil {
return nil, err
}
m.Unlock()
<-nodeStopped
m.Lock()
return m.startNode(&prevConfig)
}
@ -371,9 +378,11 @@ func (m *NodeManager) NodeConfig() (*params.NodeConfig, error) {
m.RLock()
defer m.RUnlock()
if m.node == nil {
// make sure that node is fully started
if m.node == nil || m.nodeStarted == nil {
return nil, ErrNoRunningNode
}
<-m.nodeStarted
return m.config, nil
}
@ -387,9 +396,11 @@ func (m *NodeManager) LightEthereumService() (*les.LightEthereum, error) {
m.RLock()
defer m.RUnlock()
if m.node == nil {
// make sure that node is fully started
if m.node == nil || m.nodeStarted == nil {
return nil, ErrNoRunningNode
}
<-m.nodeStarted
if m.lesService == nil {
if err := m.node.Service(&m.lesService); err != nil {
@ -414,9 +425,11 @@ func (m *NodeManager) WhisperService() (*whisper.Whisper, error) {
m.RLock()
defer m.RUnlock()
if m.node == nil {
// make sure that node is fully started
if m.node == nil || m.nodeStarted == nil {
return nil, ErrNoRunningNode
}
<-m.nodeStarted
if m.whisperService == nil {
if err := m.node.Service(&m.whisperService); err != nil {
@ -441,9 +454,11 @@ func (m *NodeManager) AccountManager() (*accounts.Manager, error) {
m.RLock()
defer m.RUnlock()
if m.node == nil {
// make sure that node is fully started
if m.node == nil || m.nodeStarted == nil {
return nil, ErrNoRunningNode
}
<-m.nodeStarted
accountManager := m.node.AccountManager()
if accountManager == nil {
@ -462,9 +477,11 @@ func (m *NodeManager) AccountKeyStore() (*keystore.KeyStore, error) {
m.RLock()
defer m.RUnlock()
if m.node == nil {
// make sure that node is fully started
if m.node == nil || m.nodeStarted == nil {
return nil, ErrNoRunningNode
}
<-m.nodeStarted
accountManager := m.node.AccountManager()
if accountManager == nil {
@ -493,9 +510,11 @@ func (m *NodeManager) RPCClient() (*rpc.Client, error) {
m.RLock()
defer m.RUnlock()
if m.node == nil {
// make sure that node is fully started
if m.node == nil || m.nodeStarted == nil {
return nil, ErrNoRunningNode
}
<-m.nodeStarted
if m.rpcClient == nil {
var err error

View File

@ -1,12 +1,16 @@
package node_test
import (
"encoding/json"
"fmt"
"math/rand"
"testing"
"time"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/les"
"github.com/ethereum/go-ethereum/log"
gethnode "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/rpc"
whisper "github.com/ethereum/go-ethereum/whisper/whisperv5"
@ -30,7 +34,7 @@ func (s *ManagerTestSuite) SetupTest() {
s.Require().IsType(&node.NodeManager{}, s.NodeManager)
}
func (s *ManagerTestSuite) TestGettingReferencedServices() {
func (s *ManagerTestSuite) TestReferences() {
s.Require().NotNil(s.NodeManager)
var nilNodeManager *node.NodeManager
@ -41,6 +45,60 @@ func (s *ManagerTestSuite) TestGettingReferencedServices() {
initFn func() (interface{}, error)
expectedErr error
}{
{
"null manager, StartNode()",
func() (interface{}, error) {
return nilNodeManager.StartNode(nil)
},
node.ErrInvalidNodeManager,
},
{
"null manager, StopNode()",
func() (interface{}, error) {
return nilNodeManager.StopNode()
},
node.ErrInvalidNodeManager,
},
{
"null manager, RestartNode()",
func() (interface{}, error) {
return nilNodeManager.RestartNode()
},
node.ErrInvalidNodeManager,
},
{
"null manager, ResetChainData()",
func() (interface{}, error) {
return nilNodeManager.ResetChainData()
},
node.ErrInvalidNodeManager,
},
{
"null manager, IsNodeRunning()",
func() (interface{}, error) {
result := nilNodeManager.IsNodeRunning()
var err error
if !result {
err = node.ErrInvalidNodeManager
}
return nil, err
},
node.ErrInvalidNodeManager,
},
{
"null manager, PopulateStaticPeers()",
func() (interface{}, error) {
return nil, nilNodeManager.PopulateStaticPeers()
},
node.ErrInvalidNodeManager,
},
{
"null manager, AddPeer()",
func() (interface{}, error) {
return nil, nilNodeManager.AddPeer("enode://da3bf389a031f33fb55c9f5f54fde8473912402d27fffaa50efd74c0d0515f3a61daf6d52151f2876b19c15828e6f670352bff432b5ec457652e74755e8c864f@51.15.62.116:30303")
},
node.ErrInvalidNodeManager,
},
{
"null manager, get NodeConfig",
func() (interface{}, error) {
@ -90,6 +148,34 @@ func (s *ManagerTestSuite) TestGettingReferencedServices() {
},
node.ErrInvalidNodeManager,
},
{
"non-null manager, no running node, RestartNode()",
func() (interface{}, error) {
return s.NodeManager.RestartNode()
},
node.ErrNoRunningNode,
},
{
"non-null manager, no running node, ResetChainData()",
func() (interface{}, error) {
return s.NodeManager.ResetChainData()
},
node.ErrNoRunningNode,
},
{
"non-null manager, no running node, PopulateStaticPeers()",
func() (interface{}, error) {
return nil, s.NodeManager.PopulateStaticPeers()
},
node.ErrNoRunningNode,
},
{
"non-null manager, no running node, AddPeer()",
func() (interface{}, error) {
return nil, s.NodeManager.AddPeer("enode://da3bf389a031f33fb55c9f5f54fde8473912402d27fffaa50efd74c0d0515f3a61daf6d52151f2876b19c15828e6f670352bff432b5ec457652e74755e8c864f@51.15.62.116:30303")
},
node.ErrNoRunningNode,
},
{
"non-null manager, no running node, get NodeConfig",
func() (interface{}, error) {
@ -223,10 +309,8 @@ func (s *ManagerTestSuite) TestNodeStartStop() {
// try stopping non-started node
require.False(s.NodeManager.IsNodeRunning())
err = s.NodeManager.StopNode()
if s.Error(err) {
require.IsType(node.ErrNoRunningNode, err)
}
_, err = s.NodeManager.StopNode()
require.EqualError(err, node.ErrNoRunningNode.Error())
require.False(s.NodeManager.IsNodeRunning())
nodeStarted, err := s.NodeManager.StartNode(nodeConfig)
@ -237,13 +321,12 @@ func (s *ManagerTestSuite) TestNodeStartStop() {
// try starting another node (w/o stopping the previously started node)
_, err = s.NodeManager.StartNode(nodeConfig)
if s.Error(err) {
require.IsType(node.ErrNodeAlreadyExists, err)
}
require.EqualError(err, node.ErrNodeExists.Error())
// now stop node, and make sure that a new node, on different network can be started
err = s.NodeManager.StopNode()
nodeStopped, err := s.NodeManager.StopNode()
require.NoError(err)
<-nodeStopped
// start new node with exactly the same config
require.False(s.NodeManager.IsNodeRunning())
@ -271,11 +354,12 @@ func (s *ManagerTestSuite) TestNetworkSwitching() {
<-nodeStarted // wait till node is started
require.True(s.NodeManager.IsNodeRunning())
s.FirstBlockHash("0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d")
FirstBlockHash(require, s.NodeManager, "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d")
// now stop node, and make sure that a new node, on different network can be started
err = s.NodeManager.StopNode()
nodeStopped, err := s.NodeManager.StopNode()
require.NoError(err)
<-nodeStopped
// start new node with completely different config
nodeConfig, err = MakeTestNodeConfig(params.RinkebyNetworkID)
@ -289,7 +373,7 @@ func (s *ManagerTestSuite) TestNetworkSwitching() {
require.True(s.NodeManager.IsNodeRunning())
// make sure we are on another network indeed
s.FirstBlockHash("0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177")
FirstBlockHash(require, s.NodeManager, "0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177")
s.StopTestNode()
}
@ -310,5 +394,195 @@ func (s *ManagerTestSuite) TestResetChainData() {
s.True(s.NodeManager.IsNodeRunning()) // new node, with previous config should be running
// make sure we can read the first byte, and it is valid (for Rinkeby)
s.FirstBlockHash("0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177")
FirstBlockHash(require, s.NodeManager, "0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177")
}
func (s *ManagerTestSuite) TestRestartNode() {
require := s.Require()
require.NotNil(s.NodeManager)
s.StartTestNode(params.RinkebyNetworkID)
defer s.StopTestNode()
s.True(s.NodeManager.IsNodeRunning())
nodeReady, err := s.NodeManager.RestartNode()
require.NoError(err)
<-nodeReady
s.True(s.NodeManager.IsNodeRunning()) // new node, with previous config should be running
// make sure we can read the first byte, and it is valid (for Rinkeby)
FirstBlockHash(require, s.NodeManager, "0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177")
}
func (s *ManagerTestSuite) TestRaceConditions() {
require := s.Require()
require.NotNil(s.NodeManager)
cnt := 25
progress := make(chan struct{}, cnt)
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
nodeConfig1, err := MakeTestNodeConfig(params.RopstenNetworkID)
require.NoError(err)
nodeConfig2, err := MakeTestNodeConfig(params.RinkebyNetworkID)
require.NoError(err)
nodeConfigs := []*params.NodeConfig{nodeConfig1, nodeConfig2}
var funcsToTest = []func(*params.NodeConfig){
func(config *params.NodeConfig) {
log.Info("StartNode()")
_, err := s.NodeManager.StartNode(config)
s.T().Logf("StartNode() for network: %d, error: %v", config.NetworkID, err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("StopNode()")
_, err := s.NodeManager.StopNode()
s.T().Logf("StopNode() for network: %d, error: %v", config.NetworkID, err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("Node()")
_, err := s.NodeManager.Node()
s.T().Logf("Node(), error: %v", err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("IsNodeRunning()")
s.T().Logf("IsNodeRunning(), result: %v", s.NodeManager.IsNodeRunning())
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("PopulateStaticPeers()")
s.T().Logf("PopulateStaticPeers(), error: %v", s.NodeManager.PopulateStaticPeers())
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("ResetChainData()")
_, err := s.NodeManager.ResetChainData()
s.T().Logf("ResetChainData(), error: %v", err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("RestartNode()")
_, err := s.NodeManager.RestartNode()
s.T().Logf("RestartNode(), error: %v", err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("NodeConfig()")
_, err := s.NodeManager.NodeConfig()
s.T().Logf("NodeConfig(), error: %v", err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("LightEthereumService()")
_, err := s.NodeManager.LightEthereumService()
s.T().Logf("LightEthereumService(), error: %v", err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("WhisperService()")
_, err := s.NodeManager.WhisperService()
s.T().Logf("WhisperService(), error: %v", err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("AccountManager()")
_, err := s.NodeManager.AccountManager()
s.T().Logf("AccountManager(), error: %v", err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("AccountKeyStore()")
_, err := s.NodeManager.AccountKeyStore()
s.T().Logf("AccountKeyStore(), error: %v", err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("RPCClient()")
_, err := s.NodeManager.RPCClient()
s.T().Logf("RPCClient(), error: %v", err)
progress <- struct{}{}
},
}
// increase StartNode()/StopNode() population
for i := 0; i < 5; i++ {
funcsToTest = append(funcsToTest, funcsToTest[0], funcsToTest[1])
}
for i := 0; i < cnt; i++ {
randConfig := nodeConfigs[rnd.Intn(len(nodeConfigs))]
randFunc := funcsToTest[rnd.Intn(len(funcsToTest))]
if rnd.Intn(100) > 75 { // introduce random delays
time.Sleep(500 * time.Millisecond)
}
go randFunc(randConfig)
}
for range progress {
cnt -= 1
if cnt <= 0 {
break
}
}
time.Sleep(2 * time.Second) // so that we see some logs
nodeStopped, _ := s.NodeManager.StopNode() // just in case we have a node running
if nodeStopped != nil {
<-nodeStopped
}
}
func (s *ManagerTestSuite) TestNodeStartCrash() {
require := s.Require()
require.NotNil(s.NodeManager)
nodeConfig, err := MakeTestNodeConfig(params.RinkebyNetworkID)
require.NoError(err)
// start node outside the manager (on the same port), so that manager node.Start() method fails
outsideNode, err := node.MakeNode(nodeConfig)
require.NoError(outsideNode.Start())
// let's listen for node.crashed signal
signalReceived := false
node.SetDefaultNodeNotificationHandler(func(jsonEvent string) {
log.Info("Notification Received", "event", jsonEvent)
var envelope node.SignalEnvelope
err := json.Unmarshal([]byte(jsonEvent), &envelope)
s.NoError(err, fmt.Sprintf("cannot unmarshal JSON: %s", jsonEvent))
if envelope.Type == node.EventNodeCrashed {
signalReceived = true
}
})
// now try starting using node manager
nodeStarted, err := s.NodeManager.StartNode(nodeConfig)
require.NoError(err) // no error is thrown, as node is started in separate routine
<-nodeStarted // no deadlock either, as manager should close the channel on error
require.False(s.NodeManager.IsNodeRunning())
time.Sleep(2 * time.Second) // allow signal to propagate
require.True(signalReceived, "node crash signal is expected")
// stop outside node, and re-try
require.NoError(outsideNode.Stop())
signalReceived = false
nodeStarted, err = s.NodeManager.StartNode(nodeConfig)
require.NoError(err) // again, no error
<-nodeStarted // no deadlock, and no signal this time, manager should be able to start node
require.True(s.NodeManager.IsNodeRunning())
time.Sleep(2 * time.Second) // allow signal to propagate
require.False(signalReceived, "node should start w/o crash signal")
// cleanup
s.NodeManager.StopNode()
node.ResetDefaultNodeNotificationHandler()
}

View File

@ -51,6 +51,11 @@ func SetDefaultNodeNotificationHandler(fn NodeNotificationHandler) {
notificationHandler = fn
}
// ReetDefaultNodeNotificationHandler sets notification handler to default one
func ResetDefaultNodeNotificationHandler() {
notificationHandler = TriggerDefaultNodeNotificationHandler
}
// TriggerDefaultNodeNotificationHandler triggers default notification handler (helpful in tests)
func TriggerDefaultNodeNotificationHandler(jsonEvent string) {
log.Info("Notification received", "event", jsonEvent)

View File

@ -2,7 +2,10 @@ package node
import (
"fmt"
"os"
"os/signal"
"github.com/ethereum/go-ethereum/log"
"github.com/status-im/status-go/geth/common"
)
@ -22,3 +25,23 @@ func HaltOnPanic() {
common.Fatalf(err) // os.exit(1) is called internally
}
}
// HaltOnInterruptSignal stops node and panics if you press Ctrl-C enough times
func HaltOnInterruptSignal(nodeManager *NodeManager) {
sigc := make(chan os.Signal, 1)
signal.Notify(sigc, os.Interrupt)
defer signal.Stop(sigc)
<-sigc
if nodeManager.node == nil {
return
}
log.Info("Got interrupt, shutting down...")
go nodeManager.node.Stop() // nolint: errcheck
for i := 3; i > 0; i-- {
<-sigc
if i > 1 {
log.Info(fmt.Sprintf("Already shutting down, interrupt %d more times for panic.", i-1))
}
}
panic("interrupted!")
}

View File

@ -12,6 +12,7 @@ import (
gethcommon "github.com/ethereum/go-ethereum/common"
"github.com/status-im/status-go/geth/common"
"github.com/status-im/status-go/geth/params"
assertions "github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
)
@ -81,20 +82,21 @@ func (s *BaseTestSuite) StopTestNode() {
require := s.Require()
require.NotNil(s.NodeManager)
require.True(s.NodeManager.IsNodeRunning())
require.NoError(s.NodeManager.StopNode())
nodeStopped, err := s.NodeManager.StopNode()
require.NoError(err)
<-nodeStopped
require.False(s.NodeManager.IsNodeRunning())
}
func (s *BaseTestSuite) FirstBlockHash(expectedHash string) {
require := s.Require()
require.NotNil(s.NodeManager)
func FirstBlockHash(require *assertions.Assertions, nodeManager common.NodeManager, expectedHash string) {
require.NotNil(nodeManager)
var firstBlock struct {
Hash gethcommon.Hash `json:"hash"`
}
// obtain RPC client for running node
runningNode, err := s.NodeManager.Node()
runningNode, err := nodeManager.Node()
require.NoError(err)
require.NotNil(runningNode)
@ -105,7 +107,7 @@ func (s *BaseTestSuite) FirstBlockHash(expectedHash string) {
err = rpcClient.CallContext(context.Background(), &firstBlock, "eth_getBlockByNumber", "0x0", true)
require.NoError(err)
s.Equal(expectedHash, firstBlock.Hash.Hex())
require.Equal(expectedHash, firstBlock.Hash.Hex())
}
func MakeTestNodeConfig(networkID int) (*params.NodeConfig, error) {