Merge pull request #175 from farazdagi/feature/expose-in-proc-rpc-server

Feature: expose in proc RPC server
This commit is contained in:
Roman Volosovskyi 2017-05-29 18:04:01 +03:00 committed by GitHub
commit d1d62686d9
27 changed files with 1359 additions and 364 deletions

View File

@ -32,13 +32,26 @@ func StartNode(configJSON *C.char) *C.char {
return makeJSONResponse(err)
}
err = statusAPI.StartNodeNonBlocking(config)
_, err = statusAPI.StartNodeAsync(config)
return makeJSONResponse(err)
}
//export StopNode
func StopNode() *C.char {
return makeJSONResponse(statusAPI.StopNode())
_, err := statusAPI.StopNodeAsync()
return makeJSONResponse(err)
}
//export ResetChainData
func ResetChainData() *C.char {
_, err := statusAPI.ResetChainDataAsync()
return makeJSONResponse(err)
}
//export CallRPC
func CallRPC(inputJSON *C.char) *C.char {
outputJSON := statusAPI.CallRPC(C.GoString(inputJSON))
return C.CString(outputJSON)
}
//export ResumeNode
@ -47,11 +60,6 @@ func ResumeNode() *C.char {
return makeJSONResponse(err)
}
//export ResetChainData
func ResetChainData() *C.char {
return makeJSONResponse(statusAPI.ResetChainData())
}
//export StopNodeRPCServer
func StopNodeRPCServer() *C.char {
err := fmt.Errorf("%v: %v", common.ErrDeprecatedMethod.Error(), "StopNodeRPCServer")

View File

@ -55,6 +55,10 @@ func testExportedAPI(t *testing.T, done chan struct{}) {
"stop/resume node",
testStopResumeNode,
},
{
"call RPC on in-proc handler",
testCallRPC,
},
{
"create main and child accounts",
testCreateChildAccount,
@ -335,7 +339,6 @@ func testStopResumeNode(t *testing.T) bool {
response := common.APIResponse{}
rawResponse = StartNode(C.CString(nodeConfigJSON))
if err = json.Unmarshal([]byte(C.GoString(rawResponse)), &response); err != nil {
t.Errorf("cannot decode StartNode response (%s): %v", C.GoString(rawResponse), err)
return false
@ -372,6 +375,18 @@ func testStopResumeNode(t *testing.T) bool {
return true
}
func testCallRPC(t *testing.T) bool {
expected := `{"jsonrpc":"2.0","id":64,"result":"0x47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad"}` + "\n"
rawResponse := CallRPC(C.CString(`{"jsonrpc":"2.0","method":"web3_sha3","params":["0x68656c6c6f20776f726c64"],"id":64}`))
received := C.GoString(rawResponse)
if expected != received {
t.Errorf("unexpected reponse: expected: %v, got: %v", expected, received)
return false
}
return true
}
func testCreateChildAccount(t *testing.T) bool {
// to make sure that we start with empty account (which might get populated during previous tests)
if err := statusAPI.Logout(); err != nil {

View File

@ -40,25 +40,29 @@ func (api *StatusAPI) StartNode(config *params.NodeConfig) error {
if err != nil {
return err
}
<-nodeStarted // do not return up until backend is ready
<-nodeStarted
return nil
}
// StartNodeNonBlocking start Status node, fails if node is already started
// Returns immediately w/o waiting for node to start (relies on listening
// for node.started signal)
func (api *StatusAPI) StartNodeNonBlocking(config *params.NodeConfig) error {
_, err := api.b.StartNode(config)
if err != nil {
return err
}
return nil
// StartNodeAsync start Status node, fails if node is already started
// Returns immediately w/o waiting for node to start (see node.ready)
func (api *StatusAPI) StartNodeAsync(config *params.NodeConfig) (<-chan struct{}, error) {
return api.b.StartNode(config)
}
// StopNode stop Status node. Stopped node cannot be resumed.
func (api *StatusAPI) StopNode() error {
nodeStopped, err := api.b.StopNode()
if err != nil {
return err
}
<-nodeStopped
return nil
}
// StopNodeAsync stop Status node. Stopped node cannot be resumed.
// Returns immediately, w/o waiting for node to stop (see node.stopped)
func (api *StatusAPI) StopNodeAsync() (<-chan struct{}, error) {
return api.b.StopNode()
}
@ -72,6 +76,11 @@ func (api *StatusAPI) RestartNode() error {
return nil
}
// RestartNodeAsync restart running Status node, in async manner
func (api *StatusAPI) RestartNodeAsync() (<-chan struct{}, error) {
return api.b.RestartNode()
}
// ResetChainData remove chain data from data directory.
// Node is stopped, and new node is started, with clean data directory.
func (api *StatusAPI) ResetChainData() error {
@ -83,14 +92,14 @@ func (api *StatusAPI) ResetChainData() error {
return nil
}
// PopulateStaticPeers connects current node with our publicly available LES/SHH/Swarm cluster
func (api *StatusAPI) PopulateStaticPeers() error {
return api.b.nodeManager.PopulateStaticPeers()
// ResetChainDataAsync remove chain data from data directory, in async manner
func (api *StatusAPI) ResetChainDataAsync() (<-chan struct{}, error) {
return api.b.ResetChainData()
}
// AddPeer adds new static peer node
func (api *StatusAPI) AddPeer(url string) error {
return api.b.nodeManager.AddPeer(url)
// CallRPC executes RPC request on node's in-proc RPC server
func (api *StatusAPI) CallRPC(inputJSON string) string {
return api.b.CallRPC(inputJSON)
}
// CreateAccount creates an internal geth account
@ -98,38 +107,38 @@ func (api *StatusAPI) AddPeer(url string) error {
// Public key of CKD#1 is returned, with CKD#2 securely encoded into account key file (to be used for
// sub-account derivations)
func (api *StatusAPI) CreateAccount(password string) (address, pubKey, mnemonic string, err error) {
return api.b.CreateAccount(password)
return api.b.AccountManager().CreateAccount(password)
}
// CreateChildAccount creates sub-account for an account identified by parent address.
// CKD#2 is used as root for master accounts (when parentAddress is "").
// Otherwise (when parentAddress != ""), child is derived directly from parent.
func (api *StatusAPI) CreateChildAccount(parentAddress, password string) (address, pubKey string, err error) {
return api.b.CreateChildAccount(parentAddress, password)
return api.b.AccountManager().CreateChildAccount(parentAddress, password)
}
// RecoverAccount re-creates master key using given details.
// Once master key is re-generated, it is inserted into keystore (if not already there).
func (api *StatusAPI) RecoverAccount(password, mnemonic string) (address, pubKey string, err error) {
return api.b.RecoverAccount(password, mnemonic)
return api.b.AccountManager().RecoverAccount(password, mnemonic)
}
// VerifyAccountPassword tries to decrypt a given account key file, with a provided password.
// If no error is returned, then account is considered verified.
func (api *StatusAPI) VerifyAccountPassword(keyStoreDir, address, password string) (*keystore.Key, error) {
return api.b.VerifyAccountPassword(keyStoreDir, address, password)
return api.b.AccountManager().VerifyAccountPassword(keyStoreDir, address, password)
}
// SelectAccount selects current account, by verifying that address has corresponding account which can be decrypted
// using provided password. Once verification is done, decrypted key is injected into Whisper (as a single identity,
// all previous identities are removed).
func (api *StatusAPI) SelectAccount(address, password string) error {
return api.b.SelectAccount(address, password)
return api.b.AccountManager().SelectAccount(address, password)
}
// Logout clears whisper identities
func (api *StatusAPI) Logout() error {
return api.b.Logout()
return api.b.AccountManager().Logout()
}
// CompleteTransaction instructs backend to complete sending of a given transaction
@ -152,20 +161,19 @@ func (api *StatusAPI) DiscardTransactions(ids string) map[string]common.RawDisca
return api.b.DiscardTransactions(ids)
}
// Parse creates a new jail cell context, with the given chatID as identifier.
// JailParse creates a new jail cell context, with the given chatID as identifier.
// New context executes provided JavaScript code, right after the initialization.
func (api *StatusAPI) JailParse(chatID string, js string) string {
return api.b.jailManager.Parse(chatID, js)
}
// Call executes given JavaScript function w/i a jail cell context identified by the chatID.
// JailCall executes given JavaScript function w/i a jail cell context identified by the chatID.
// Jail cell is clonned before call is executed i.e. all calls execute w/i their own contexts.
func (api *StatusAPI) JailCall(chatID string, path string, args string) string {
return api.b.jailManager.Call(chatID, path, args)
}
// BaseJS allows to setup initial JavaScript to be loaded on each jail.Parse()
// JailBaseJS allows to setup initial JavaScript to be loaded on each jail.Parse()
func (api *StatusAPI) JailBaseJS(js string) {
api.b.jailManager.BaseJS(js)
}

View File

@ -1,8 +1,14 @@
package api_test
import (
"io/ioutil"
"math/rand"
"os"
"strconv"
"testing"
"time"
"github.com/ethereum/go-ethereum/log"
"github.com/status-im/status-go/geth/api"
"github.com/status-im/status-go/geth/params"
. "github.com/status-im/status-go/geth/testing"
@ -26,33 +32,99 @@ func (s *APITestSuite) SetupTest() {
s.api = statusAPI
}
func (s *APITestSuite) TestStartStopRaces() {
func (s *APITestSuite) TestCHTUpdate() {
require := s.Require()
require.NotNil(s.api)
nodeConfig, err := MakeTestNodeConfig(params.RinkebyNetworkID)
tmpDir, err := ioutil.TempDir(os.TempDir(), "cht-updates")
require.NoError(err)
defer os.RemoveAll(tmpDir)
url := "https://gist.githubusercontent.com/farazdagi/3d05d1d3bfa36db7b650c955e23fd7ae/raw/?u=" + strconv.Itoa(int(time.Now().Unix()))
configJSON := `{
"NetworkId": ` + strconv.Itoa(params.RopstenNetworkID) + `,
"DataDir": "` + tmpDir + `",
"LogEnabled": true,
"LogLevel": "INFO",
"LightEthConfig": {
"CHTRootConfigURL": "` + url + `"
}
}`
nodeConfig, err := params.LoadNodeConfig(configJSON)
require.NoError(err)
progress := make(chan struct{}, 100)
// start node
nodeConfig.DevMode = true
s.api.StartNode(nodeConfig)
time.Sleep(TestConfig.Node.SyncSeconds * time.Second)
s.api.StopNode()
}
start := func() {
s.api.StartNode(nodeConfig)
progress <- struct{}{}
}
stop := func() {
s.api.StopNode()
progress <- struct{}{}
func (s *APITestSuite) TestRaceConditions() {
require := s.Require()
require.NotNil(s.api)
cnt := 25
progress := make(chan struct{}, cnt)
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
nodeConfig1, err := MakeTestNodeConfig(params.RopstenNetworkID)
require.NoError(err)
nodeConfig2, err := MakeTestNodeConfig(params.RinkebyNetworkID)
require.NoError(err)
nodeConfigs := []*params.NodeConfig{nodeConfig1, nodeConfig2}
var funcsToTest = []func(*params.NodeConfig){
func(config *params.NodeConfig) {
log.Info("StartNodeAsync()")
_, err := s.api.StartNodeAsync(config)
s.T().Logf("StartNodeAsync() for network: %d, error: %v", config.NetworkID, err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("StopNodeAsync()")
_, err := s.api.StopNodeAsync()
s.T().Logf("StopNodeAsync(), error: %v", err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("RestartNodeAsync()")
_, err := s.api.RestartNodeAsync()
s.T().Logf("RestartNodeAsync(), error: %v", err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("ResetChainDataAsync()")
_, err := s.api.ResetChainDataAsync()
s.T().Logf("ResetChainDataAsync(), error: %v", err)
progress <- struct{}{}
},
}
for i := 0; i < 50; i++ {
go start()
go stop()
// increase StartNode()/StopNode() population
for i := 0; i < 5; i++ {
funcsToTest = append(funcsToTest, funcsToTest[0], funcsToTest[1])
}
for i := 0; i < cnt; i++ {
randConfig := nodeConfigs[rnd.Intn(len(nodeConfigs))]
randFunc := funcsToTest[rnd.Intn(len(funcsToTest))]
if rnd.Intn(100) > 75 { // introduce random delays
time.Sleep(500 * time.Millisecond)
}
go randFunc(randConfig)
}
cnt := 0
for range progress {
cnt += 1
if cnt >= 100 {
cnt -= 1
if cnt <= 0 {
break
}
}
time.Sleep(2 * time.Second) // so that we see some logs
s.api.StopNode() // just in case we have a node running
}

View File

@ -1,7 +1,8 @@
package api
import (
"github.com/ethereum/go-ethereum/accounts/keystore"
"sync"
gethcommon "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/les"
"github.com/ethereum/go-ethereum/log"
@ -13,10 +14,13 @@ import (
// StatusBackend implements Status.im service
type StatusBackend struct {
sync.Mutex
nodeReady chan struct{} // channel to wait for when node is fully ready
nodeManager common.NodeManager
accountManager common.AccountManager
txQueueManager common.TxQueueManager
jailManager common.JailManager
rpcManager common.RPCManager
}
// NewStatusBackend create a new NewStatusBackend instance
@ -30,6 +34,7 @@ func NewStatusBackend() *StatusBackend {
accountManager: accountManager,
txQueueManager: node.NewTxQueueManager(nodeManager, accountManager),
jailManager: jail.New(nodeManager),
rpcManager: node.NewRPCManager(nodeManager),
}
}
@ -55,102 +60,115 @@ func (m *StatusBackend) IsNodeRunning() bool {
// StartNode start Status node, fails if node is already started
func (m *StatusBackend) StartNode(config *params.NodeConfig) (<-chan struct{}, error) {
backendReady := make(chan struct{})
m.Lock()
defer m.Unlock()
if m.nodeReady != nil {
return nil, node.ErrNodeExists
}
nodeStarted, err := m.nodeManager.StartNode(config)
if err != nil {
return nil, err
}
go m.onNodeStart(backendReady, nodeStarted)
return backendReady, err
m.nodeReady = make(chan struct{}, 1)
go m.onNodeStart(nodeStarted, m.nodeReady) // waits on nodeStarted, writes to backendReady
return m.nodeReady, err
}
func (m *StatusBackend) onNodeStart(backendReady chan struct{}, nodeStarted <-chan struct{}) {
defer close(backendReady)
// onNodeStart does everything required to prepare backend
func (m *StatusBackend) onNodeStart(nodeStarted <-chan struct{}, backendReady chan struct{}) {
<-nodeStarted
if err := m.registerHandlers(); err != nil {
log.Error("Handler registration failed", "err", err)
}
m.accountManager.ReSelectAccount()
log.Info("Account reselected")
close(backendReady)
node.SendSignal(node.SignalEnvelope{
Type: node.EventNodeReady,
Event: struct{}{},
})
}
// StopNode stop Status node. Stopped node cannot be resumed.
func (m *StatusBackend) StopNode() (<-chan struct{}, error) {
m.Lock()
defer m.Unlock()
if m.nodeReady == nil {
return nil, node.ErrNoRunningNode
}
<-m.nodeReady
nodeStopped, err := m.nodeManager.StopNode()
if err != nil {
return nil, err
}
backendStopped := make(chan struct{}, 1)
go func() {
<-nodeStopped
m.Lock()
m.nodeReady = nil
m.Unlock()
close(backendStopped)
}()
return backendStopped, nil
}
// RestartNode restart running Status node, fails if node is not running
func (m *StatusBackend) RestartNode() (<-chan struct{}, error) {
backendReady := make(chan struct{})
m.Lock()
defer m.Unlock()
if m.nodeReady == nil {
return nil, node.ErrNoRunningNode
}
<-m.nodeReady
nodeRestarted, err := m.nodeManager.RestartNode()
if err != nil {
return nil, err
}
go m.onNodeStart(backendReady, nodeRestarted)
return backendReady, err
}
m.nodeReady = make(chan struct{}, 1)
go m.onNodeStart(nodeRestarted, m.nodeReady) // waits on nodeRestarted, writes to backendReady
// StopNode stop Status node. Stopped node cannot be resumed.
func (m *StatusBackend) StopNode() error {
return m.nodeManager.StopNode()
return m.nodeReady, err
}
// ResetChainData remove chain data from data directory.
// Node is stopped, and new node is started, with clean data directory.
func (m *StatusBackend) ResetChainData() (<-chan struct{}, error) {
backendReady := make(chan struct{})
nodeRestarted, err := m.nodeManager.ResetChainData()
m.Lock()
defer m.Unlock()
if m.nodeReady == nil {
return nil, node.ErrNoRunningNode
}
<-m.nodeReady
nodeReset, err := m.nodeManager.ResetChainData()
if err != nil {
return nil, err
}
go m.onNodeStart(backendReady, nodeRestarted)
return backendReady, err
m.nodeReady = make(chan struct{}, 1)
go m.onNodeStart(nodeReset, m.nodeReady) // waits on nodeReset, writes to backendReady
return m.nodeReady, err
}
// CreateAccount creates an internal geth account
// BIP44-compatible keys are generated: CKD#1 is stored as account key, CKD#2 stored as sub-account root
// Public key of CKD#1 is returned, with CKD#2 securely encoded into account key file (to be used for
// sub-account derivations)
func (m *StatusBackend) CreateAccount(password string) (address, pubKey, mnemonic string, err error) {
return m.accountManager.CreateAccount(password)
}
// CreateChildAccount creates sub-account for an account identified by parent address.
// CKD#2 is used as root for master accounts (when parentAddress is "").
// Otherwise (when parentAddress != ""), child is derived directly from parent.
func (m *StatusBackend) CreateChildAccount(parentAddress, password string) (address, pubKey string, err error) {
return m.accountManager.CreateChildAccount(parentAddress, password)
}
// RecoverAccount re-creates master key using given details.
// Once master key is re-generated, it is inserted into keystore (if not already there).
func (m *StatusBackend) RecoverAccount(password, mnemonic string) (address, pubKey string, err error) {
return m.accountManager.RecoverAccount(password, mnemonic)
}
// VerifyAccountPassword tries to decrypt a given account key file, with a provided password.
// If no error is returned, then account is considered verified.
func (m *StatusBackend) VerifyAccountPassword(keyStoreDir, address, password string) (*keystore.Key, error) {
return m.accountManager.VerifyAccountPassword(keyStoreDir, address, password)
}
// SelectAccount selects current account, by verifying that address has corresponding account which can be decrypted
// using provided password. Once verification is done, decrypted key is injected into Whisper (as a single identity,
// all previous identities are removed).
func (m *StatusBackend) SelectAccount(address, password string) error {
return m.accountManager.SelectAccount(address, password)
}
// ReSelectAccount selects previously selected account, often, after node restart.
func (m *StatusBackend) ReSelectAccount() error {
return m.accountManager.ReSelectAccount()
}
// Logout clears whisper identities
func (m *StatusBackend) Logout() error {
return m.accountManager.Logout()
}
// SelectedAccount returns currently selected account
func (m *StatusBackend) SelectedAccount() (*common.SelectedExtKey, error) {
return m.accountManager.SelectedAccount()
// CallRPC executes RPC request on node's in-proc RPC server
func (m *StatusBackend) CallRPC(inputJSON string) string {
return m.rpcManager.Call(inputJSON)
}
// CompleteTransaction instructs backend to complete sending of a given transaction
@ -194,13 +212,5 @@ func (m *StatusBackend) registerHandlers() error {
lightEthereum.StatusBackend.SetTransactionReturnHandler(m.txQueueManager.TransactionReturnHandler())
log.Info("Registered handler", "fn", "TransactionReturnHandler")
m.ReSelectAccount()
log.Info("Account reselected")
node.SendSignal(node.SignalEnvelope{
Type: node.EventNodeReady,
Event: struct{}{},
})
return nil
}

View File

@ -35,7 +35,7 @@ func (s *BackendTestSuite) TestAccountsList() {
require.Zero(len(accounts), "accounts returned, while there should be none (we haven't logged in yet)")
// create an account
address, _, _, err := s.backend.CreateAccount(TestConfig.Account1.Password)
address, _, _, err := s.backend.AccountManager().CreateAccount(TestConfig.Account1.Password)
require.NoError(err)
// ensure that there is still no accounts returned
@ -43,7 +43,7 @@ func (s *BackendTestSuite) TestAccountsList() {
require.Zero(len(accounts), "accounts returned, while there should be none (we haven't logged in yet)")
// select account (sub-accounts will be created for this key)
err = s.backend.SelectAccount(address, TestConfig.Account1.Password)
err = s.backend.AccountManager().SelectAccount(address, TestConfig.Account1.Password)
require.NoError(err, "account selection failed")
// at this point main account should show up
@ -53,7 +53,7 @@ func (s *BackendTestSuite) TestAccountsList() {
fmt.Sprintf("main account is not retured as the first key: got %s, expected %s", accounts[0].Hex(), "0x"+address))
// create sub-account 1
subAccount1, subPubKey1, err := s.backend.CreateChildAccount("", TestConfig.Account1.Password)
subAccount1, subPubKey1, err := s.backend.AccountManager().CreateChildAccount("", TestConfig.Account1.Password)
require.NoError(err, "cannot create sub-account")
// now we expect to see both main account and sub-account 1
@ -63,7 +63,7 @@ func (s *BackendTestSuite) TestAccountsList() {
require.Equal(string(accounts[1].Hex()), "0x"+subAccount1, "subAcount1 not returned")
// create sub-account 2, index automatically progresses
subAccount2, subPubKey2, err := s.backend.CreateChildAccount("", TestConfig.Account1.Password)
subAccount2, subPubKey2, err := s.backend.AccountManager().CreateChildAccount("", TestConfig.Account1.Password)
require.NoError(err, "cannot create sub-account")
require.False(subAccount1 == subAccount2 || subPubKey1 == subPubKey2, "sub-account index auto-increament failed")
@ -93,7 +93,7 @@ func (s *BackendTestSuite) TestCreateChildAccount() {
require.NotNil(keyStore)
// create an account
address, pubKey, mnemonic, err := s.backend.CreateAccount(TestConfig.Account1.Password)
address, pubKey, mnemonic, err := s.backend.AccountManager().CreateAccount(TestConfig.Account1.Password)
require.NoError(err)
s.T().Logf("Account created: {address: %s, key: %s, mnemonic:%s}", address, pubKey, mnemonic)
@ -106,28 +106,28 @@ func (s *BackendTestSuite) TestCreateChildAccount() {
require.NotNil(key.ExtendedKey, "CKD#2 has not been generated for new account")
// try creating sub-account, w/o selecting main account i.e. w/o login to main account
_, _, err = s.backend.CreateChildAccount("", TestConfig.Account1.Password)
_, _, err = s.backend.AccountManager().CreateChildAccount("", TestConfig.Account1.Password)
require.EqualError(node.ErrNoAccountSelected, err.Error(), "expected error is not returned (tried to create sub-account w/o login)")
err = s.backend.SelectAccount(address, TestConfig.Account1.Password)
err = s.backend.AccountManager().SelectAccount(address, TestConfig.Account1.Password)
require.NoError(err, "cannot select account")
// try to create sub-account with wrong password
_, _, err = s.backend.CreateChildAccount("", "wrong password")
_, _, err = s.backend.AccountManager().CreateChildAccount("", "wrong password")
expectedErr := errors.New("cannot retrieve a valid key for a given account: could not decrypt key with given passphrase")
require.EqualError(expectedErr, err.Error(), "create sub-account with wrong password")
// create sub-account (from implicit parent)
subAccount1, subPubKey1, err := s.backend.CreateChildAccount("", TestConfig.Account1.Password)
subAccount1, subPubKey1, err := s.backend.AccountManager().CreateChildAccount("", TestConfig.Account1.Password)
require.NoError(err, "cannot create sub-account")
// make sure that sub-account index automatically progresses
subAccount2, subPubKey2, err := s.backend.CreateChildAccount("", TestConfig.Account1.Password)
subAccount2, subPubKey2, err := s.backend.AccountManager().CreateChildAccount("", TestConfig.Account1.Password)
require.NoError(err)
require.False(subAccount1 == subAccount2 || subPubKey1 == subPubKey2, "sub-account index auto-increament failed")
// create sub-account (from explicit parent)
subAccount3, subPubKey3, err := s.backend.CreateChildAccount(subAccount2, TestConfig.Account1.Password)
subAccount3, subPubKey3, err := s.backend.AccountManager().CreateChildAccount(subAccount2, TestConfig.Account1.Password)
require.NoError(err)
require.False(subAccount1 == subAccount3 || subPubKey1 == subPubKey3 || subAccount2 == subAccount3 || subPubKey2 == subPubKey3)
}
@ -144,12 +144,12 @@ func (s *BackendTestSuite) TestRecoverAccount() {
require.NotNil(keyStore)
// create an account
address, pubKey, mnemonic, err := s.backend.CreateAccount(TestConfig.Account1.Password)
address, pubKey, mnemonic, err := s.backend.AccountManager().CreateAccount(TestConfig.Account1.Password)
require.NoError(err)
s.T().Logf("Account created: {address: %s, key: %s, mnemonic:%s}", address, pubKey, mnemonic)
// try recovering using password + mnemonic
addressCheck, pubKeyCheck, err := s.backend.RecoverAccount(TestConfig.Account1.Password, mnemonic)
addressCheck, pubKeyCheck, err := s.backend.AccountManager().RecoverAccount(TestConfig.Account1.Password, mnemonic)
require.NoError(err, "recover account failed")
require.False(address != addressCheck || pubKey != pubKeyCheck, "incorrect accound details recovered")
@ -163,7 +163,7 @@ func (s *BackendTestSuite) TestRecoverAccount() {
require.NoError(keyStore.Delete(account, TestConfig.Account1.Password), "cannot remove account")
addressCheck, pubKeyCheck, err = s.backend.RecoverAccount(TestConfig.Account1.Password, mnemonic)
addressCheck, pubKeyCheck, err = s.backend.AccountManager().RecoverAccount(TestConfig.Account1.Password, mnemonic)
require.NoError(err, "recover account failed (for non-cached account)")
require.False(address != addressCheck || pubKey != pubKeyCheck,
"incorrect account details recovered (for non-cached account)")
@ -174,7 +174,7 @@ func (s *BackendTestSuite) TestRecoverAccount() {
require.Equal(extChild2String, key.ExtendedKey.String(), "CKD#2 key mismatch")
// make sure that calling import several times, just returns from cache (no error is expected)
addressCheck, pubKeyCheck, err = s.backend.RecoverAccount(TestConfig.Account1.Password, mnemonic)
addressCheck, pubKeyCheck, err = s.backend.AccountManager().RecoverAccount(TestConfig.Account1.Password, mnemonic)
require.NoError(err, "recover account failed (for non-cached account)")
require.False(address != addressCheck || pubKey != pubKeyCheck,
"incorrect account details recovered (for non-cached account)")
@ -184,7 +184,7 @@ func (s *BackendTestSuite) TestRecoverAccount() {
// make sure that identity is not (yet injected)
require.False(whisperService.HasKeyPair(pubKeyCheck), "identity already present in whisper")
require.NoError(s.backend.SelectAccount(addressCheck, TestConfig.Account1.Password))
require.NoError(s.backend.AccountManager().SelectAccount(addressCheck, TestConfig.Account1.Password))
require.True(whisperService.HasKeyPair(pubKeyCheck), "identity not injected into whisper")
}
@ -199,11 +199,11 @@ func (s *BackendTestSuite) TestSelectAccount() {
whisperService := s.WhisperService()
// create an account
address1, pubKey1, _, err := s.backend.CreateAccount(TestConfig.Account1.Password)
address1, pubKey1, _, err := s.backend.AccountManager().CreateAccount(TestConfig.Account1.Password)
require.NoError(err)
s.T().Logf("Account created: {address: %s, key: %s}", address1, pubKey1)
address2, pubKey2, _, err := s.backend.CreateAccount(TestConfig.Account1.Password)
address2, pubKey2, _, err := s.backend.AccountManager().CreateAccount(TestConfig.Account1.Password)
require.NoError(err)
s.T().Logf("Account created: {address: %s, key: %s}", address2, pubKey2)
@ -211,17 +211,17 @@ func (s *BackendTestSuite) TestSelectAccount() {
require.False(whisperService.HasKeyPair(pubKey1), "identity already present in whisper")
// try selecting with wrong password
err = s.backend.SelectAccount(address1, "wrongPassword")
err = s.backend.AccountManager().SelectAccount(address1, "wrongPassword")
expectedErr := errors.New("cannot retrieve a valid key for a given account: could not decrypt key with given passphrase")
require.EqualError(expectedErr, err.Error(), "select account is expected to throw error: wrong password used")
err = s.backend.SelectAccount(address1, TestConfig.Account1.Password)
err = s.backend.AccountManager().SelectAccount(address1, TestConfig.Account1.Password)
require.NoError(err)
require.True(whisperService.HasKeyPair(pubKey1), "identity not injected into whisper")
// select another account, make sure that previous account is wiped out from Whisper cache
require.False(whisperService.HasKeyPair(pubKey2), "identity already present in whisper")
require.NoError(s.backend.SelectAccount(address2, TestConfig.Account1.Password))
require.NoError(s.backend.AccountManager().SelectAccount(address2, TestConfig.Account1.Password))
require.True(whisperService.HasKeyPair(pubKey2), "identity not injected into whisper")
require.False(whisperService.HasKeyPair(pubKey1), "identity should be removed, but it is still present in whisper")
}
@ -236,15 +236,15 @@ func (s *BackendTestSuite) TestLogout() {
whisperService := s.WhisperService()
// create an account
address, pubKey, _, err := s.backend.CreateAccount(TestConfig.Account1.Password)
address, pubKey, _, err := s.backend.AccountManager().CreateAccount(TestConfig.Account1.Password)
require.NoError(err)
// make sure that identity doesn't exist (yet) in Whisper
require.False(whisperService.HasKeyPair(pubKey), "identity already present in whisper")
require.NoError(s.backend.SelectAccount(address, TestConfig.Account1.Password))
require.NoError(s.backend.AccountManager().SelectAccount(address, TestConfig.Account1.Password))
require.True(whisperService.HasKeyPair(pubKey), "identity not injected into whisper")
require.NoError(s.backend.Logout())
require.NoError(s.backend.AccountManager().Logout())
require.False(whisperService.HasKeyPair(pubKey), "identity not cleared from whisper")
}
@ -258,42 +258,44 @@ func (s *BackendTestSuite) TestSelectedAccountOnRestart() {
whisperService := s.WhisperService()
// create test accounts
address1, pubKey1, _, err := s.backend.CreateAccount(TestConfig.Account1.Password)
address1, pubKey1, _, err := s.backend.AccountManager().CreateAccount(TestConfig.Account1.Password)
require.NoError(err)
address2, pubKey2, _, err := s.backend.CreateAccount(TestConfig.Account1.Password)
address2, pubKey2, _, err := s.backend.AccountManager().CreateAccount(TestConfig.Account1.Password)
require.NoError(err)
// make sure that identity is not (yet injected)
require.False(whisperService.HasKeyPair(pubKey1), "identity already present in whisper")
// make sure that no account is selected by default
selectedAccount, err := s.backend.SelectedAccount()
selectedAccount, err := s.backend.AccountManager().SelectedAccount()
require.EqualError(node.ErrNoAccountSelected, err.Error(), "account selected, but should not be")
require.Nil(selectedAccount)
// select account
err = s.backend.SelectAccount(address1, "wrongPassword")
err = s.backend.AccountManager().SelectAccount(address1, "wrongPassword")
expectedErr := errors.New("cannot retrieve a valid key for a given account: could not decrypt key with given passphrase")
require.EqualError(expectedErr, err.Error())
require.NoError(s.backend.SelectAccount(address1, TestConfig.Account1.Password))
require.NoError(s.backend.AccountManager().SelectAccount(address1, TestConfig.Account1.Password))
require.True(whisperService.HasKeyPair(pubKey1), "identity not injected into whisper")
// select another account, make sure that previous account is wiped out from Whisper cache
require.False(whisperService.HasKeyPair(pubKey2), "identity already present in whisper")
require.NoError(s.backend.SelectAccount(address2, TestConfig.Account1.Password))
require.NoError(s.backend.AccountManager().SelectAccount(address2, TestConfig.Account1.Password))
require.True(whisperService.HasKeyPair(pubKey2), "identity not injected into whisper")
require.False(whisperService.HasKeyPair(pubKey1), "identity should be removed, but it is still present in whisper")
// stop node (and all of its sub-protocols)
nodeConfig, err := s.NodeManager.NodeConfig()
nodeConfig, err := s.backend.NodeManager().NodeConfig()
require.NoError(err)
require.NotNil(nodeConfig)
preservedNodeConfig := *nodeConfig
require.NoError(s.NodeManager.StopNode())
nodeStoped, err := s.backend.StopNode()
require.NoError(err)
<-nodeStoped
// make sure that account is still selected
selectedAccount, err = s.backend.SelectedAccount()
selectedAccount, err = s.backend.AccountManager().SelectedAccount()
require.NoError(err)
require.NotNil(selectedAccount)
require.Equal(selectedAccount.Address.Hex(), "0x"+address2, "incorrect address selected")
@ -304,7 +306,7 @@ func (s *BackendTestSuite) TestSelectedAccountOnRestart() {
<-nodeStarted
// re-check selected account (account2 MUST be selected)
selectedAccount, err = s.backend.SelectedAccount()
selectedAccount, err = s.backend.AccountManager().SelectedAccount()
require.NoError(err)
require.NotNil(selectedAccount)
require.Equal(selectedAccount.Address.Hex(), "0x"+address2, "incorrect address selected")
@ -323,13 +325,13 @@ func (s *BackendTestSuite) TestSelectedAccountOnRestart() {
require.False(whisperService.HasKeyPair(pubKey1), "identity should not be present, but it is still present in whisper")
// now logout, and make sure that on restart no account is selected (i.e. logout works properly)
require.NoError(s.backend.Logout())
require.NoError(s.backend.AccountManager().Logout())
s.RestartTestNode()
whisperService = s.WhisperService()
require.False(whisperService.HasKeyPair(pubKey2), "identity not injected into whisper")
require.False(whisperService.HasKeyPair(pubKey1), "identity should not be present, but it is still present in whisper")
selectedAccount, err = s.backend.SelectedAccount()
selectedAccount, err = s.backend.AccountManager().SelectedAccount()
require.EqualError(node.ErrNoAccountSelected, err.Error())
require.Nil(selectedAccount)
}

View File

@ -65,7 +65,7 @@ func (s *BackendTestSuite) TestJailContractDeployment() {
//t.Logf("Transaction queued (will be completed shortly): {id: %s}\n", event["id"].(string))
s.NoError(s.backend.SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
s.NoError(s.backend.AccountManager().SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
var err error
txHash, err = s.backend.CompleteTransaction(event["id"].(string), TestConfig.Account1.Password)
@ -117,7 +117,7 @@ func (s *BackendTestSuite) TestJailSendQueuedTransaction() {
require.NotNil(jailInstance)
// log into account from which transactions will be sent
require.NoError(s.backend.SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
require.NoError(s.backend.AccountManager().SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
txParams := `{
"from": "` + TestConfig.Account1.Address + `",
@ -310,7 +310,7 @@ func (s *BackendTestSuite) TestGasEstimation() {
//t.Logf("Transaction queued (will be completed shortly): {id: %s}\n", event["id"].(string))
s.NoError(s.backend.SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
s.NoError(s.backend.AccountManager().SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
var err error
txHash, err = s.backend.CompleteTransaction(event["id"].(string), TestConfig.Account1.Password)

View File

@ -1,13 +1,16 @@
package api_test
import (
"math/rand"
"testing"
"time"
"github.com/ethereum/go-ethereum/les"
"github.com/ethereum/go-ethereum/log"
whisper "github.com/ethereum/go-ethereum/whisper/whisperv5"
"github.com/status-im/status-go/geth/api"
"github.com/status-im/status-go/geth/common"
"github.com/status-im/status-go/geth/jail"
"github.com/status-im/status-go/geth/node"
"github.com/status-im/status-go/geth/params"
. "github.com/status-im/status-go/geth/testing"
@ -19,7 +22,7 @@ func TestBackendTestSuite(t *testing.T) {
}
type BackendTestSuite struct {
BaseTestSuite
suite.Suite
backend *api.StatusBackend
}
@ -29,7 +32,6 @@ func (s *BackendTestSuite) SetupTest() {
require.NotNil(backend)
require.IsType(&api.StatusBackend{}, backend)
s.backend = backend
s.NodeManager = backend.NodeManager()
}
func (s *BackendTestSuite) StartTestBackend(networkID int) {
@ -54,7 +56,9 @@ func (s *BackendTestSuite) StopTestBackend() {
require := s.Require()
require.NotNil(s.backend)
require.True(s.backend.IsNodeRunning())
require.NoError(s.backend.StopNode())
backendStopped, err := s.backend.StopNode()
require.NoError(err)
<-backendStopped
require.False(s.backend.IsNodeRunning())
}
@ -110,38 +114,274 @@ func (s *BackendTestSuite) TestNodeStartStop() {
// try stopping non-started node
require.False(s.backend.IsNodeRunning())
err = s.backend.StopNode()
if s.Error(err) {
require.IsType(node.ErrNoRunningNode, err)
}
nodeStopped, err := s.backend.StopNode()
require.EqualError(err, node.ErrNoRunningNode.Error())
require.Nil(nodeStopped)
require.False(s.backend.IsNodeRunning())
nodeStarted, err := s.backend.StartNode(nodeConfig)
require.NoError(err)
require.NotNil(nodeStarted)
<-nodeStarted // wait till node is started
require.True(s.backend.IsNodeRunning())
// try starting another node (w/o stopping the previously started node)
_, err = s.backend.StartNode(nodeConfig)
if s.Error(err) {
require.IsType(node.ErrNodeAlreadyExists, err)
}
nodeStarted, err = s.backend.StartNode(nodeConfig)
require.EqualError(err, node.ErrNodeExists.Error())
require.Nil(nodeStarted)
// now stop node, and make sure that a new node, on different network can be started
err = s.backend.StopNode()
nodeStopped, err = s.backend.StopNode()
require.NoError(err)
require.NotNil(nodeStopped)
<-nodeStopped
// start new node with exactly the same config
require.False(s.backend.IsNodeRunning())
nodeStarted, err = s.backend.StartNode(nodeConfig)
require.NoError(err)
defer s.StopTestNode()
require.NotNil(nodeStarted)
defer s.backend.StopNode()
<-nodeStarted
require.True(s.backend.IsNodeRunning())
}
func (s *BackendTestSuite) TestCallRPC() {
require := s.Require()
require.NotNil(s.backend)
nodeConfig, err := MakeTestNodeConfig(params.RinkebyNetworkID)
require.NoError(err)
nodeStarted, err := s.backend.StartNode(nodeConfig)
require.NoError(err)
require.NotNil(nodeStarted)
defer s.backend.StopNode()
<-nodeStarted
progress := make(chan struct{}, 25)
type rpcCall struct {
inputJSON string
validator func(resultJSON string)
}
var rpcCalls = []rpcCall{
{
`{"jsonrpc":"2.0","method":"eth_sendTransaction","params":[{
"from": "0xb60e8dd61c5d32be8058bb8eb970870f07233155",
"to": "0xd46e8dd67c5d32be8058bb8eb970870f07244567",
"gas": "0x76c0",
"gasPrice": "0x9184e72a000",
"value": "0x9184e72a",
"data": "0xd46e8dd67c5d32be8d46e8dd67c5d32be8058bb8eb970870f072445675058bb8eb970870f072445675"}],"id":1}`,
func(resultJSON string) {
log.Info("eth_sendTransaction")
s.T().Log("GOT: ", resultJSON)
progress <- struct{}{}
},
},
{
`{"jsonrpc":"2.0","method":"shh_version","params":[],"id":67}`,
func(resultJSON string) {
expected := `{"jsonrpc":"2.0","id":67,"result":"0x5"}` + "\n"
s.Equal(expected, resultJSON)
s.T().Log("shh_version: ", resultJSON)
progress <- struct{}{}
},
},
{
`{"jsonrpc":"2.0","method":"web3_sha3","params":["0x68656c6c6f20776f726c64"],"id":64}`,
func(resultJSON string) {
expected := `{"jsonrpc":"2.0","id":64,"result":"0x47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad"}` + "\n"
s.Equal(expected, resultJSON)
s.T().Log("web3_sha3: ", resultJSON)
progress <- struct{}{}
},
},
{
`{"jsonrpc":"2.0","method":"net_version","params":[],"id":67}`,
func(resultJSON string) {
expected := `{"jsonrpc":"2.0","id":67,"result":"4"}` + "\n"
s.Equal(expected, resultJSON)
s.T().Log("net_version: ", resultJSON)
progress <- struct{}{}
},
},
}
cnt := len(rpcCalls) - 1 // send transaction blocks up until complete/discarded/times out
for _, r := range rpcCalls {
go func(r rpcCall) {
s.T().Logf("Run test: %v", r.inputJSON)
resultJSON := s.backend.CallRPC(r.inputJSON)
r.validator(resultJSON)
}(r)
}
for range progress {
cnt -= 1
if cnt <= 0 {
break
}
}
}
func (s *BackendTestSuite) TestRaceConditions() {
require := s.Require()
require.NotNil(s.backend)
cnt := 25
progress := make(chan struct{}, cnt)
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
nodeConfig1, err := MakeTestNodeConfig(params.RopstenNetworkID)
require.NoError(err)
nodeConfig2, err := MakeTestNodeConfig(params.RinkebyNetworkID)
require.NoError(err)
nodeConfigs := []*params.NodeConfig{nodeConfig1, nodeConfig2}
var funcsToTest = []func(*params.NodeConfig){
func(config *params.NodeConfig) {
log.Info("StartNode()")
_, err := s.backend.StartNode(config)
s.T().Logf("StartNode() for network: %d, error: %v", config.NetworkID, err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("StopNode()")
_, err := s.backend.StopNode()
s.T().Logf("StopNode() for network: %d, error: %v", config.NetworkID, err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("ResetChainData()")
_, err := s.backend.ResetChainData()
s.T().Logf("ResetChainData(), error: %v", err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("RestartNode()")
_, err := s.backend.RestartNode()
s.T().Logf("RestartNode(), error: %v", err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("NodeManager()")
instance := s.backend.NodeManager()
s.NotNil(instance)
s.IsType(&node.NodeManager{}, instance)
s.T().Logf("NodeManager(), result: %v", instance)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("AccountManager()")
instance := s.backend.AccountManager()
s.NotNil(instance)
s.IsType(&node.AccountManager{}, instance)
s.T().Logf("AccountManager(), result: %v", instance)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("JailManager()")
instance := s.backend.JailManager()
s.NotNil(instance)
s.IsType(&jail.Jail{}, instance)
s.T().Logf("JailManager(), result: %v", instance)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("CreateAccount()")
address, pubKey, mnemonic, err := s.backend.AccountManager().CreateAccount("password")
s.T().Logf("CreateAccount(), error: %v (address: %v, pubKey: %v, mnemonic: %v)", err, address, pubKey, mnemonic)
if err == nil {
// SelectAccount
log.Info("CreateAccount()")
err = s.backend.AccountManager().SelectAccount(address, "password")
s.T().Logf("SelectAccount(%v, %v), error: %v", address, "password", err)
// CreateChildAccount
log.Info("CreateChildAccount()")
address, pubKey, err := s.backend.AccountManager().CreateChildAccount(address, "password")
s.T().Logf("CreateAccount(), error: %v (address: %v, pubKey: %v)", err, address, pubKey)
// RecoverAccount
log.Info("RecoverAccount()")
address, pubKey, err = s.backend.AccountManager().RecoverAccount("password", mnemonic)
s.T().Logf("RecoverAccount(), error: %v (address: %v, pubKey: %v)", err, address, pubKey)
}
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("VerifyAccountPassword()")
_, err := s.backend.AccountManager().VerifyAccountPassword(config.KeyStoreDir, "0x0", "bar")
s.T().Logf("VerifyAccountPassword(), err: %v", err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("Logout()")
s.T().Logf("Logout(), result: %v", s.backend.AccountManager().Logout())
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("IsNodeRunning()")
s.T().Logf("IsNodeRunning(), result: %v", s.backend.IsNodeRunning())
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("CompleteTransaction()")
_, err := s.backend.CompleteTransaction("id", "password")
s.T().Logf("CompleteTransaction(), error: %v", err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("DiscardTransaction()")
s.T().Logf("DiscardTransaction(), error: %v", s.backend.DiscardTransaction("id"))
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("CompleteTransactions()")
s.T().Logf("CompleteTransactions(), result: %v", s.backend.CompleteTransactions(`["id1","id2"]`, "password"))
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("DiscardTransactions()")
s.T().Logf("DiscardTransactions(), result: %v", s.backend.DiscardTransactions(`["id1","id2"]`))
progress <- struct{}{}
},
}
// increase StartNode()/StopNode() population
for i := 0; i < 5; i++ {
funcsToTest = append(funcsToTest, funcsToTest[0], funcsToTest[1])
}
for i := 0; i < cnt; i++ {
randConfig := nodeConfigs[rnd.Intn(len(nodeConfigs))]
randFunc := funcsToTest[rnd.Intn(len(funcsToTest))]
if rnd.Intn(100) > 75 { // introduce random delays
time.Sleep(500 * time.Millisecond)
}
go randFunc(randConfig)
}
for range progress {
cnt -= 1
if cnt <= 0 {
break
}
}
time.Sleep(2 * time.Second) // so that we see some logs
nodeStopped, _ := s.backend.StopNode() // just in case we have a node running
if nodeStopped != nil {
<-nodeStopped
}
}
func (s *BackendTestSuite) TestNetworkSwitching() {
require := s.Require()
require.NotNil(s.backend)
@ -157,11 +397,12 @@ func (s *BackendTestSuite) TestNetworkSwitching() {
<-nodeStarted // wait till node is started
require.True(s.backend.IsNodeRunning())
s.FirstBlockHash("0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d")
FirstBlockHash(require, s.backend.NodeManager(), "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d")
// now stop node, and make sure that a new node, on different network can be started
err = s.backend.StopNode()
nodeStopped, err := s.backend.StopNode()
require.NoError(err)
<-nodeStopped
// start new node with completely different config
nodeConfig, err = MakeTestNodeConfig(params.RinkebyNetworkID)
@ -175,9 +416,11 @@ func (s *BackendTestSuite) TestNetworkSwitching() {
require.True(s.backend.IsNodeRunning())
// make sure we are on another network indeed
s.FirstBlockHash("0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177")
FirstBlockHash(require, s.backend.NodeManager(), "0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177")
require.NoError(s.backend.StopNode())
nodeStopped, err = s.backend.StopNode()
require.NoError(err)
<-nodeStopped
}
func (s *BackendTestSuite) TestResetChainData() {
@ -196,7 +439,7 @@ func (s *BackendTestSuite) TestResetChainData() {
s.True(s.backend.IsNodeRunning()) // new node, with previous config should be running
// make sure we can read the first byte, and it is valid (for Rinkeby)
s.FirstBlockHash("0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177")
FirstBlockHash(require, s.backend.NodeManager(), "0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177")
}
func (s *BackendTestSuite) TestRestartNode() {
@ -206,7 +449,7 @@ func (s *BackendTestSuite) TestRestartNode() {
s.StartTestBackend(params.RinkebyNetworkID)
defer s.StopTestBackend()
s.FirstBlockHash("0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177")
FirstBlockHash(require, s.backend.NodeManager(), "0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177")
s.True(s.backend.IsNodeRunning())
nodeRestarted, err := s.backend.RestartNode()
@ -215,5 +458,5 @@ func (s *BackendTestSuite) TestRestartNode() {
s.True(s.backend.IsNodeRunning()) // new node, with previous config should be running
// make sure we can read the first byte, and it is valid (for Rinkeby)
s.FirstBlockHash("0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177")
FirstBlockHash(require, s.backend.NodeManager(), "0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177")
}

View File

@ -31,7 +31,7 @@ func (s *BackendTestSuite) TestSendContractTx() {
require.NotNil(backend)
// create an account
sampleAddress, _, _, err := s.backend.CreateAccount(TestConfig.Account1.Password)
sampleAddress, _, _, err := s.backend.AccountManager().CreateAccount(TestConfig.Account1.Password)
require.NoError(err)
// make sure you panic if transaction complete doesn't return
@ -58,7 +58,7 @@ func (s *BackendTestSuite) TestSendContractTx() {
// the second call will also fail (we are logged in as different user)
log.Info("trying to complete with invalid user")
err = s.backend.SelectAccount(sampleAddress, TestConfig.Account1.Password)
err = s.backend.AccountManager().SelectAccount(sampleAddress, TestConfig.Account1.Password)
s.NoError(err)
txHash, err = s.backend.CompleteTransaction(event["id"].(string), TestConfig.Account1.Password)
s.EqualError(err, status.ErrInvalidCompleteTxSender.Error(),
@ -66,7 +66,7 @@ func (s *BackendTestSuite) TestSendContractTx() {
// the third call will work as expected (as we are logged in with correct credentials)
log.Info("trying to complete with correct user, this should suceed")
s.NoError(s.backend.SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
s.NoError(s.backend.AccountManager().SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
txHash, err = s.backend.CompleteTransaction(event["id"].(string), TestConfig.Account1.Password)
s.NoError(err, fmt.Sprintf("cannot complete queued transaction[%v]", event["id"]))
@ -109,7 +109,7 @@ func (s *BackendTestSuite) TestSendEtherTx() {
require.NotNil(backend)
// create an account
sampleAddress, _, _, err := s.backend.CreateAccount(TestConfig.Account1.Password)
sampleAddress, _, _, err := s.backend.AccountManager().CreateAccount(TestConfig.Account1.Password)
require.NoError(err)
// make sure you panic if transaction complete doesn't return
@ -136,7 +136,7 @@ func (s *BackendTestSuite) TestSendEtherTx() {
// the second call will also fail (we are logged in as different user)
log.Info("trying to complete with invalid user")
err = s.backend.SelectAccount(sampleAddress, TestConfig.Account1.Password)
err = s.backend.AccountManager().SelectAccount(sampleAddress, TestConfig.Account1.Password)
s.NoError(err)
txHash, err = s.backend.CompleteTransaction(event["id"].(string), TestConfig.Account1.Password)
s.EqualError(err, status.ErrInvalidCompleteTxSender.Error(),
@ -144,7 +144,7 @@ func (s *BackendTestSuite) TestSendEtherTx() {
// the third call will work as expected (as we are logged in with correct credentials)
log.Info("trying to complete with correct user, this should suceed")
s.NoError(s.backend.SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
s.NoError(s.backend.AccountManager().SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
txHash, err = s.backend.CompleteTransaction(event["id"].(string), TestConfig.Account1.Password)
s.NoError(err, fmt.Sprintf("cannot complete queued transaction[%v]", event["id"]))
@ -181,7 +181,7 @@ func (s *BackendTestSuite) TestDoubleCompleteQueuedTransactions() {
require.NotNil(backend)
// log into account from which transactions will be sent
require.NoError(s.backend.SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
require.NoError(s.backend.AccountManager().SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
// make sure you panic if transaction complete doesn't return
completeQueuedTransaction := make(chan struct{}, 1)
@ -265,7 +265,7 @@ func (s *BackendTestSuite) TestDiscardQueuedTransaction() {
backend.TransactionQueue().Reset()
// log into account from which transactions will be sent
require.NoError(s.backend.SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
require.NoError(s.backend.AccountManager().SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
// make sure you panic if transaction complete doesn't return
completeQueuedTransaction := make(chan struct{}, 1)
@ -346,7 +346,7 @@ func (s *BackendTestSuite) TestCompleteMultipleQueuedTransactions() {
backend.TransactionQueue().Reset()
// log into account from which transactions will be sent
require.NoError(s.backend.SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
require.NoError(s.backend.AccountManager().SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
// make sure you panic if transaction complete doesn't return
testTxCount := 3
@ -461,7 +461,7 @@ func (s *BackendTestSuite) TestDiscardMultipleQueuedTransactions() {
backend.TransactionQueue().Reset()
// log into account from which transactions will be sent
require.NoError(s.backend.SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
require.NoError(s.backend.AccountManager().SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
// make sure you panic if transaction complete doesn't return
testTxCount := 3
@ -594,7 +594,7 @@ func (s *BackendTestSuite) TestNonExistentQueuedTransactions() {
require.NotNil(backend)
// log into account from which transactions will be sent
require.NoError(s.backend.SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
require.NoError(s.backend.AccountManager().SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
// replace transaction notification handler
node.SetDefaultNodeNotificationHandler(func(string) {})
@ -619,13 +619,13 @@ func (s *BackendTestSuite) TestEvictionOfQueuedTransactions() {
backend.TransactionQueue().Reset()
// log into account from which transactions will be sent
require.NoError(s.backend.SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
require.NoError(s.backend.AccountManager().SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
txQueue := backend.TransactionQueue()
var i = 0
txIDs := [status.DefaultTxQueueCap + 5 + 10]status.QueuedTxID{}
backend.SetTransactionQueueHandler(func(queuedTx status.QueuedTx) {
log.Info("tx enqueued", "i", i + 1, "queue size", txQueue.Count(), "id", queuedTx.ID)
log.Info("tx enqueued", "i", i+1, "queue size", txQueue.Count(), "id", queuedTx.ID)
txIDs[i] = queuedTx.ID
i++
})

View File

@ -8,8 +8,8 @@ import (
"testing"
"github.com/ethereum/go-ethereum/log"
"github.com/status-im/status-go/geth/params"
"github.com/status-im/status-go/geth/common"
"github.com/status-im/status-go/geth/params"
)
func TestLogger(t *testing.T) {

View File

@ -18,6 +18,7 @@ import (
"github.com/status-im/status-go/static"
)
// errors
var (
ErrDeprecatedMethod = errors.New("Method is depricated and will be removed in future release")
)
@ -45,7 +46,7 @@ type NodeManager interface {
// StopNode stop the running Status node.
// Stopped node cannot be resumed, one starts a new node instead.
StopNode() error
StopNode() (<-chan struct{}, error)
// RestartNode restart running Status node, fails if node is not running
RestartNode() (<-chan struct{}, error)
@ -83,6 +84,9 @@ type NodeManager interface {
// RPCClient exposes reference to RPC client connected to the running node
RPCClient() (*rpc.Client, error)
// RPCServer exposes reference to running node's in-proc RPC server/handler
RPCServer() (*rpc.Server, error)
}
// AccountManager defines expected methods for managing Status accounts
@ -129,6 +133,12 @@ type AccountManager interface {
AddressToDecryptedAccount(address, password string) (accounts.Account, *keystore.Key, error)
}
// RPCManager defines expected methods for managing RPC client/server
type RPCManager interface {
// Call executes RPC request on node's in-proc RPC server
Call(inputJSON string) string
}
// RawCompleteTransactionResult is a JSON returned from transaction complete function (used internally)
type RawCompleteTransactionResult struct {
Hash common.Hash

View File

@ -4,10 +4,8 @@ import (
"errors"
"fmt"
"os"
"os/signal"
"path/filepath"
"sync"
"time"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/keystore"
@ -17,22 +15,20 @@ import (
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/rpc"
whisper "github.com/ethereum/go-ethereum/whisper/whisperv5"
"github.com/status-im/status-go/geth/common"
"github.com/status-im/status-go/geth/params"
)
// errors
var (
ErrNodeAlreadyExists = errors.New("there is a running node already, stop it before starting another one")
ErrNodeExists = errors.New("node is already running")
ErrNoRunningNode = errors.New("there is no running node")
ErrNodeOpTimedOut = errors.New("operation takes too long, timed out")
ErrInvalidRunningNode = errors.New("running node is not correctly initialized")
ErrInvalidNodeManager = errors.New("node manager is not properly initialized")
ErrInvalidWhisperService = errors.New("whisper service is unavailable")
ErrInvalidLightEthereumService = errors.New("LES service is unavailable")
ErrInvalidAccountManager = errors.New("could not retrieve account manager")
ErrAccountKeyStoreMissing = errors.New("account key store is not set")
ErrInvalidRPCClient = errors.New("RPC service is unavailable")
ErrInvalidRPCClient = errors.New("RPC client is unavailable")
ErrInvalidRPCServer = errors.New("RPC server is unavailable")
)
// NodeManager manages Status node (which abstracts contained geth node)
@ -40,35 +36,18 @@ type NodeManager struct {
sync.RWMutex
config *params.NodeConfig // Status node configuration
node *node.Node // reference to Geth P2P stack/node
nodeStarted chan struct{} // channel to wait for start up notifications
nodeStopped chan struct{} // channel to wait for termination notifications
whisperService *whisper.Whisper // reference to Whisper service
lesService *les.LightEthereum // reference to LES service
rpcClient *rpc.Client // reference to RPC client
rpcServer *rpc.Server // reference to RPC server
}
// NewNodeManager makes new instance of node manager
func NewNodeManager() *NodeManager {
m := &NodeManager{}
// allow interrupting running nodes
go func() {
sigc := make(chan os.Signal, 1)
signal.Notify(sigc, os.Interrupt)
defer signal.Stop(sigc)
<-sigc
if m.node == nil {
return
}
log.Info("Got interrupt, shutting down...")
go m.node.Stop() // nolint: errcheck
for i := 3; i > 0; i-- {
<-sigc
if i > 1 {
log.Info(fmt.Sprintf("Already shutting down, interrupt %d more times for panic.", i-1))
}
}
panic("interrupted!")
}()
go HaltOnInterruptSignal(m) // allow interrupting running nodes
return m
}
@ -87,30 +66,23 @@ func (m *NodeManager) StartNode(config *params.NodeConfig) (<-chan struct{}, err
// startNode start Status node, fails if node is already started
func (m *NodeManager) startNode(config *params.NodeConfig) (<-chan struct{}, error) {
if m.node != nil || m.nodeStopped != nil {
return nil, ErrNodeAlreadyExists
if m.node != nil || m.nodeStarted != nil {
return nil, ErrNodeExists
}
var err error
m.node, err = MakeNode(config)
ethNode, err := MakeNode(config)
if err != nil {
return nil, err
}
m.config = config // preserve config of successfully created node
nodeStarted := make(chan struct{})
m.nodeStopped = make(chan struct{})
m.nodeStarted = make(chan struct{}, 1)
go func() {
defer HaltOnPanic()
if err := m.node.Start(); err != nil {
m.Lock() // TODO potential deadlock (add test case to prove otherwise)
m.config = nil
m.lesService = nil
m.whisperService = nil
m.rpcClient = nil
m.nodeStopped = nil
m.node = nil
// start underlying node
if err := ethNode.Start(); err != nil {
close(m.nodeStarted)
m.Lock()
m.nodeStarted = nil
m.Unlock()
SendSignal(SignalEnvelope{
Type: EventNodeCrashed,
@ -118,48 +90,92 @@ func (m *NodeManager) startNode(config *params.NodeConfig) (<-chan struct{}, err
Error: fmt.Errorf("%v: %v", ErrNodeStartFailure, err).Error(),
},
})
close(nodeStarted)
return
}
// node is ready, use it
m.onNodeStarted(nodeStarted)
m.Lock()
m.node = ethNode
m.nodeStopped = make(chan struct{}, 1)
m.config = config
m.Unlock()
// underlying node is started, every method can use it, we use it immediately
go func() {
if err := m.PopulateStaticPeers(); err != nil {
log.Error("Static peers population", "error", err)
}
}()
// notify all subscribers that Status node is started
close(m.nodeStarted)
SendSignal(SignalEnvelope{
Type: EventNodeStarted,
Event: struct{}{},
})
// wait up until underlying node is stopped
m.node.Wait()
// notify m.Stop() that node has been stopped
close(m.nodeStopped)
log.Info("Node is stopped")
}()
return nodeStarted, nil
return m.nodeStarted, nil
}
// onNodeStarted extra processing once we have running node
func (m *NodeManager) onNodeStarted(nodeStarted chan struct{}) {
// post-start processing
if err := m.populateStaticPeers(); err != nil {
log.Error("Static peers population", "error", err)
// StopNode stop Status node. Stopped node cannot be resumed.
func (m *NodeManager) StopNode() (<-chan struct{}, error) {
if m == nil {
return nil, ErrInvalidNodeManager
}
// obtain node info
enode := "none"
if server := m.node.Server(); server != nil {
if nodeInfo := server.NodeInfo(); nodeInfo != nil {
enode = nodeInfo.Enode
log.Info("Node is ready", "enode", enode)
}
m.Lock()
defer m.Unlock()
return m.stopNode()
}
// stopNode stop Status node. Stopped node cannot be resumed.
func (m *NodeManager) stopNode() (<-chan struct{}, error) {
if m.node == nil || m.nodeStarted == nil || m.nodeStopped == nil {
return nil, ErrNoRunningNode
}
<-m.nodeStarted // make sure you operate on fully started node
// now attempt to stop
if err := m.node.Stop(); err != nil {
return nil, err
}
// notify all subscribers that node is started
SendSignal(SignalEnvelope{
Type: EventNodeStarted,
Event: struct{}{},
})
close(nodeStarted)
nodeStopped := make(chan struct{}, 1)
go func() {
<-m.nodeStopped // Status node is stopped (code after Wait() is executed)
log.Info("Ready to reset node")
// wait up until node is stopped
m.node.Wait()
SendSignal(SignalEnvelope{
Type: EventNodeStopped,
Event: struct{}{},
})
close(m.nodeStopped)
log.Info("Node is stopped", "enode", enode)
// reset node params
m.Lock()
m.config = nil
m.lesService = nil
m.whisperService = nil
m.rpcClient = nil
m.rpcServer = nil
m.nodeStarted = nil
m.node = nil
m.Unlock()
close(nodeStopped) // Status node is stopped, and we can create another
log.Info("Node manager resets node params")
// notify application that it can send more requests now
SendSignal(SignalEnvelope{
Type: EventNodeStopped,
Event: struct{}{},
})
log.Info("Node manager notifed app, that node has stopped")
}()
return nodeStopped, nil
}
// IsNodeRunning confirm that node is running
@ -171,50 +187,13 @@ func (m *NodeManager) IsNodeRunning() bool {
m.RLock()
defer m.RUnlock()
return m.node != nil && m.nodeStopped != nil
}
// StopNode stop Status node. Stopped node cannot be resumed.
func (m *NodeManager) StopNode() error {
if m == nil {
return ErrInvalidNodeManager
// make sure that node is fully started
if m.node == nil || m.nodeStarted == nil {
return false
}
<-m.nodeStarted
m.Lock()
defer m.Unlock()
return m.stopNode()
}
// stopNode stop Status node. Stopped node cannot be resumed.
func (m *NodeManager) stopNode() error {
if m.node == nil {
return ErrNoRunningNode
}
if m.nodeStopped == nil { // node may be running, but required channel not set
return ErrInvalidRunningNode
}
if err := m.node.Stop(); err != nil {
return err
}
// wait till the previous node is fully stopped
select {
case <-m.nodeStopped:
// pass
case <-time.After(30 * time.Second):
return fmt.Errorf("%v: %s", ErrNodeOpTimedOut, common.NameOf(m.StopNode))
}
m.config = nil
m.lesService = nil
m.whisperService = nil
m.rpcClient = nil
m.nodeStopped = nil
m.node = nil
return nil
return true
}
// Node returns underlying Status node
@ -226,9 +205,11 @@ func (m *NodeManager) Node() (*node.Node, error) {
m.RLock()
defer m.RUnlock()
if m.node == nil {
// make sure that node is fully started
if m.node == nil || m.nodeStarted == nil {
return nil, ErrNoRunningNode
}
<-m.nodeStarted
return m.node, nil
}
@ -247,9 +228,11 @@ func (m *NodeManager) PopulateStaticPeers() error {
// populateStaticPeers connects current node with our publicly available LES/SHH/Swarm cluster
func (m *NodeManager) populateStaticPeers() error {
if m.node == nil {
// make sure that node is fully started
if m.node == nil || m.nodeStarted == nil {
return ErrNoRunningNode
}
<-m.nodeStarted
if !m.config.BootClusterConfig.Enabled {
log.Info("Boot cluster is disabled")
@ -286,9 +269,11 @@ func (m *NodeManager) AddPeer(url string) error {
// addPeer adds new static peer node
func (m *NodeManager) addPeer(url string) error {
if m == nil || m.node == nil {
// make sure that node is fully started
if m.node == nil || m.nodeStarted == nil {
return ErrNoRunningNode
}
<-m.nodeStarted
server := m.node.Server()
if server == nil {
@ -315,15 +300,28 @@ func (m *NodeManager) ResetChainData() (<-chan struct{}, error) {
m.Lock()
defer m.Unlock()
if m.node == nil {
return m.resetChainData()
}
// resetChainData remove chain data from data directory.
// Node is stopped, and new node is started, with clean data directory.
func (m *NodeManager) resetChainData() (<-chan struct{}, error) {
// make sure that node is fully started
if m.node == nil || m.nodeStarted == nil {
return nil, ErrNoRunningNode
}
<-m.nodeStarted
prevConfig := *m.config
if err := m.stopNode(); err != nil {
nodeStopped, err := m.stopNode()
if err != nil {
return nil, err
}
m.Unlock()
<-nodeStopped
m.Lock()
chainDataDir := filepath.Join(prevConfig.DataDir, prevConfig.Name, "lightchaindata")
if _, err := os.Stat(chainDataDir); os.IsNotExist(err) {
return nil, err
@ -336,7 +334,7 @@ func (m *NodeManager) ResetChainData() (<-chan struct{}, error) {
Type: EventChainDataRemoved,
Event: struct{}{},
})
log.Info("chaindata removed", "dir", chainDataDir)
log.Info("Chain data has been removed", "dir", chainDataDir)
return m.startNode(&prevConfig)
}
@ -350,15 +348,27 @@ func (m *NodeManager) RestartNode() (<-chan struct{}, error) {
m.Lock()
defer m.Unlock()
if m.node == nil {
return m.restartNode()
}
// restartNode restart running Status node, fails if node is not running
func (m *NodeManager) restartNode() (<-chan struct{}, error) {
// make sure that node is fully started
if m.node == nil || m.nodeStarted == nil {
return nil, ErrNoRunningNode
}
<-m.nodeStarted
prevConfig := *m.config
if err := m.stopNode(); err != nil {
nodeStopped, err := m.stopNode()
if err != nil {
return nil, err
}
m.Unlock()
<-nodeStopped
m.Lock()
return m.startNode(&prevConfig)
}
@ -371,9 +381,11 @@ func (m *NodeManager) NodeConfig() (*params.NodeConfig, error) {
m.RLock()
defer m.RUnlock()
if m.node == nil {
// make sure that node is fully started
if m.node == nil || m.nodeStarted == nil {
return nil, ErrNoRunningNode
}
<-m.nodeStarted
return m.config, nil
}
@ -387,9 +399,11 @@ func (m *NodeManager) LightEthereumService() (*les.LightEthereum, error) {
m.RLock()
defer m.RUnlock()
if m.node == nil {
// make sure that node is fully started
if m.node == nil || m.nodeStarted == nil {
return nil, ErrNoRunningNode
}
<-m.nodeStarted
if m.lesService == nil {
if err := m.node.Service(&m.lesService); err != nil {
@ -414,9 +428,11 @@ func (m *NodeManager) WhisperService() (*whisper.Whisper, error) {
m.RLock()
defer m.RUnlock()
if m.node == nil {
// make sure that node is fully started
if m.node == nil || m.nodeStarted == nil {
return nil, ErrNoRunningNode
}
<-m.nodeStarted
if m.whisperService == nil {
if err := m.node.Service(&m.whisperService); err != nil {
@ -441,9 +457,11 @@ func (m *NodeManager) AccountManager() (*accounts.Manager, error) {
m.RLock()
defer m.RUnlock()
if m.node == nil {
// make sure that node is fully started
if m.node == nil || m.nodeStarted == nil {
return nil, ErrNoRunningNode
}
<-m.nodeStarted
accountManager := m.node.AccountManager()
if accountManager == nil {
@ -462,9 +480,11 @@ func (m *NodeManager) AccountKeyStore() (*keystore.KeyStore, error) {
m.RLock()
defer m.RUnlock()
if m.node == nil {
// make sure that node is fully started
if m.node == nil || m.nodeStarted == nil {
return nil, ErrNoRunningNode
}
<-m.nodeStarted
accountManager := m.node.AccountManager()
if accountManager == nil {
@ -493,9 +513,11 @@ func (m *NodeManager) RPCClient() (*rpc.Client, error) {
m.RLock()
defer m.RUnlock()
if m.node == nil {
// make sure that node is fully started
if m.node == nil || m.nodeStarted == nil {
return nil, ErrNoRunningNode
}
<-m.nodeStarted
if m.rpcClient == nil {
var err error
@ -512,3 +534,34 @@ func (m *NodeManager) RPCClient() (*rpc.Client, error) {
return m.rpcClient, nil
}
// RPCServer exposes reference to running node's in-proc RPC server/handler
func (m *NodeManager) RPCServer() (*rpc.Server, error) {
if m == nil {
return nil, ErrInvalidNodeManager
}
m.RLock()
defer m.RUnlock()
// make sure that node is fully started
if m.node == nil || m.nodeStarted == nil {
return nil, ErrNoRunningNode
}
<-m.nodeStarted
if m.rpcServer == nil {
var err error
m.rpcServer, err = m.node.InProcRPC()
if err != nil {
log.Error("Cannot expose on-proc RPC server", "error", err)
return nil, ErrInvalidRPCServer
}
}
if m.rpcServer == nil {
return nil, ErrInvalidRPCServer
}
return m.rpcServer, nil
}

View File

@ -1,12 +1,16 @@
package node_test
import (
"encoding/json"
"fmt"
"math/rand"
"testing"
"time"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/les"
"github.com/ethereum/go-ethereum/log"
gethnode "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/rpc"
whisper "github.com/ethereum/go-ethereum/whisper/whisperv5"
@ -30,7 +34,7 @@ func (s *ManagerTestSuite) SetupTest() {
s.Require().IsType(&node.NodeManager{}, s.NodeManager)
}
func (s *ManagerTestSuite) TestGettingReferencedServices() {
func (s *ManagerTestSuite) TestReferences() {
s.Require().NotNil(s.NodeManager)
var nilNodeManager *node.NodeManager
@ -41,6 +45,60 @@ func (s *ManagerTestSuite) TestGettingReferencedServices() {
initFn func() (interface{}, error)
expectedErr error
}{
{
"null manager, StartNode()",
func() (interface{}, error) {
return nilNodeManager.StartNode(nil)
},
node.ErrInvalidNodeManager,
},
{
"null manager, StopNode()",
func() (interface{}, error) {
return nilNodeManager.StopNode()
},
node.ErrInvalidNodeManager,
},
{
"null manager, RestartNode()",
func() (interface{}, error) {
return nilNodeManager.RestartNode()
},
node.ErrInvalidNodeManager,
},
{
"null manager, ResetChainData()",
func() (interface{}, error) {
return nilNodeManager.ResetChainData()
},
node.ErrInvalidNodeManager,
},
{
"null manager, IsNodeRunning()",
func() (interface{}, error) {
result := nilNodeManager.IsNodeRunning()
var err error
if !result {
err = node.ErrInvalidNodeManager
}
return nil, err
},
node.ErrInvalidNodeManager,
},
{
"null manager, PopulateStaticPeers()",
func() (interface{}, error) {
return nil, nilNodeManager.PopulateStaticPeers()
},
node.ErrInvalidNodeManager,
},
{
"null manager, AddPeer()",
func() (interface{}, error) {
return nil, nilNodeManager.AddPeer("enode://da3bf389a031f33fb55c9f5f54fde8473912402d27fffaa50efd74c0d0515f3a61daf6d52151f2876b19c15828e6f670352bff432b5ec457652e74755e8c864f@51.15.62.116:30303")
},
node.ErrInvalidNodeManager,
},
{
"null manager, get NodeConfig",
func() (interface{}, error) {
@ -90,6 +148,41 @@ func (s *ManagerTestSuite) TestGettingReferencedServices() {
},
node.ErrInvalidNodeManager,
},
{
"null manager, get RPC Server",
func() (interface{}, error) {
return nilNodeManager.RPCServer()
},
node.ErrInvalidNodeManager,
},
{
"non-null manager, no running node, RestartNode()",
func() (interface{}, error) {
return s.NodeManager.RestartNode()
},
node.ErrNoRunningNode,
},
{
"non-null manager, no running node, ResetChainData()",
func() (interface{}, error) {
return s.NodeManager.ResetChainData()
},
node.ErrNoRunningNode,
},
{
"non-null manager, no running node, PopulateStaticPeers()",
func() (interface{}, error) {
return nil, s.NodeManager.PopulateStaticPeers()
},
node.ErrNoRunningNode,
},
{
"non-null manager, no running node, AddPeer()",
func() (interface{}, error) {
return nil, s.NodeManager.AddPeer("enode://da3bf389a031f33fb55c9f5f54fde8473912402d27fffaa50efd74c0d0515f3a61daf6d52151f2876b19c15828e6f670352bff432b5ec457652e74755e8c864f@51.15.62.116:30303")
},
node.ErrNoRunningNode,
},
{
"non-null manager, no running node, get NodeConfig",
func() (interface{}, error) {
@ -139,6 +232,13 @@ func (s *ManagerTestSuite) TestGettingReferencedServices() {
},
node.ErrNoRunningNode,
},
{
"non-null manager, no running node, get RPC Server",
func() (interface{}, error) {
return s.NodeManager.RPCServer()
},
node.ErrNoRunningNode,
},
}
for _, testCase := range noNodeTests {
s.T().Log(testCase.name)
@ -204,6 +304,13 @@ func (s *ManagerTestSuite) TestGettingReferencedServices() {
},
&rpc.Client{},
},
{
"node is running, get RPC Server",
func() (interface{}, error) {
return s.NodeManager.RPCServer()
},
&rpc.Server{},
},
}
for _, testCase := range nodeReadyTestCases {
obj, err := testCase.initFn()
@ -223,10 +330,8 @@ func (s *ManagerTestSuite) TestNodeStartStop() {
// try stopping non-started node
require.False(s.NodeManager.IsNodeRunning())
err = s.NodeManager.StopNode()
if s.Error(err) {
require.IsType(node.ErrNoRunningNode, err)
}
_, err = s.NodeManager.StopNode()
require.EqualError(err, node.ErrNoRunningNode.Error())
require.False(s.NodeManager.IsNodeRunning())
nodeStarted, err := s.NodeManager.StartNode(nodeConfig)
@ -237,13 +342,12 @@ func (s *ManagerTestSuite) TestNodeStartStop() {
// try starting another node (w/o stopping the previously started node)
_, err = s.NodeManager.StartNode(nodeConfig)
if s.Error(err) {
require.IsType(node.ErrNodeAlreadyExists, err)
}
require.EqualError(err, node.ErrNodeExists.Error())
// now stop node, and make sure that a new node, on different network can be started
err = s.NodeManager.StopNode()
nodeStopped, err := s.NodeManager.StopNode()
require.NoError(err)
<-nodeStopped
// start new node with exactly the same config
require.False(s.NodeManager.IsNodeRunning())
@ -271,11 +375,12 @@ func (s *ManagerTestSuite) TestNetworkSwitching() {
<-nodeStarted // wait till node is started
require.True(s.NodeManager.IsNodeRunning())
s.FirstBlockHash("0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d")
FirstBlockHash(require, s.NodeManager, "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d")
// now stop node, and make sure that a new node, on different network can be started
err = s.NodeManager.StopNode()
nodeStopped, err := s.NodeManager.StopNode()
require.NoError(err)
<-nodeStopped
// start new node with completely different config
nodeConfig, err = MakeTestNodeConfig(params.RinkebyNetworkID)
@ -289,7 +394,7 @@ func (s *ManagerTestSuite) TestNetworkSwitching() {
require.True(s.NodeManager.IsNodeRunning())
// make sure we are on another network indeed
s.FirstBlockHash("0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177")
FirstBlockHash(require, s.NodeManager, "0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177")
s.StopTestNode()
}
@ -310,5 +415,201 @@ func (s *ManagerTestSuite) TestResetChainData() {
s.True(s.NodeManager.IsNodeRunning()) // new node, with previous config should be running
// make sure we can read the first byte, and it is valid (for Rinkeby)
s.FirstBlockHash("0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177")
FirstBlockHash(require, s.NodeManager, "0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177")
}
func (s *ManagerTestSuite) TestRestartNode() {
require := s.Require()
require.NotNil(s.NodeManager)
s.StartTestNode(params.RinkebyNetworkID)
defer s.StopTestNode()
s.True(s.NodeManager.IsNodeRunning())
nodeReady, err := s.NodeManager.RestartNode()
require.NoError(err)
<-nodeReady
s.True(s.NodeManager.IsNodeRunning()) // new node, with previous config should be running
// make sure we can read the first byte, and it is valid (for Rinkeby)
FirstBlockHash(require, s.NodeManager, "0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177")
}
func (s *ManagerTestSuite) TestRaceConditions() {
require := s.Require()
require.NotNil(s.NodeManager)
cnt := 25
progress := make(chan struct{}, cnt)
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
nodeConfig1, err := MakeTestNodeConfig(params.RopstenNetworkID)
require.NoError(err)
nodeConfig2, err := MakeTestNodeConfig(params.RinkebyNetworkID)
require.NoError(err)
nodeConfigs := []*params.NodeConfig{nodeConfig1, nodeConfig2}
var funcsToTest = []func(*params.NodeConfig){
func(config *params.NodeConfig) {
log.Info("StartNode()")
_, err := s.NodeManager.StartNode(config)
s.T().Logf("StartNode() for network: %d, error: %v", config.NetworkID, err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("StopNode()")
_, err := s.NodeManager.StopNode()
s.T().Logf("StopNode() for network: %d, error: %v", config.NetworkID, err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("Node()")
_, err := s.NodeManager.Node()
s.T().Logf("Node(), error: %v", err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("IsNodeRunning()")
s.T().Logf("IsNodeRunning(), result: %v", s.NodeManager.IsNodeRunning())
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("PopulateStaticPeers()")
s.T().Logf("PopulateStaticPeers(), error: %v", s.NodeManager.PopulateStaticPeers())
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("ResetChainData()")
_, err := s.NodeManager.ResetChainData()
s.T().Logf("ResetChainData(), error: %v", err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("RestartNode()")
_, err := s.NodeManager.RestartNode()
s.T().Logf("RestartNode(), error: %v", err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("NodeConfig()")
_, err := s.NodeManager.NodeConfig()
s.T().Logf("NodeConfig(), error: %v", err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("LightEthereumService()")
_, err := s.NodeManager.LightEthereumService()
s.T().Logf("LightEthereumService(), error: %v", err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("WhisperService()")
_, err := s.NodeManager.WhisperService()
s.T().Logf("WhisperService(), error: %v", err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("AccountManager()")
_, err := s.NodeManager.AccountManager()
s.T().Logf("AccountManager(), error: %v", err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("AccountKeyStore()")
_, err := s.NodeManager.AccountKeyStore()
s.T().Logf("AccountKeyStore(), error: %v", err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("RPCClient()")
_, err := s.NodeManager.RPCClient()
s.T().Logf("RPCClient(), error: %v", err)
progress <- struct{}{}
},
func(config *params.NodeConfig) {
log.Info("RPCServer()")
_, err := s.NodeManager.RPCServer()
s.T().Logf("RPCServer(), error: %v", err)
progress <- struct{}{}
},
}
// increase StartNode()/StopNode() population
for i := 0; i < 5; i++ {
funcsToTest = append(funcsToTest, funcsToTest[0], funcsToTest[1])
}
for i := 0; i < cnt; i++ {
randConfig := nodeConfigs[rnd.Intn(len(nodeConfigs))]
randFunc := funcsToTest[rnd.Intn(len(funcsToTest))]
if rnd.Intn(100) > 75 { // introduce random delays
time.Sleep(500 * time.Millisecond)
}
go randFunc(randConfig)
}
for range progress {
cnt -= 1
if cnt <= 0 {
break
}
}
time.Sleep(2 * time.Second) // so that we see some logs
nodeStopped, _ := s.NodeManager.StopNode() // just in case we have a node running
if nodeStopped != nil {
<-nodeStopped
}
}
func (s *ManagerTestSuite) TestNodeStartCrash() {
require := s.Require()
require.NotNil(s.NodeManager)
nodeConfig, err := MakeTestNodeConfig(params.RinkebyNetworkID)
require.NoError(err)
// start node outside the manager (on the same port), so that manager node.Start() method fails
outsideNode, err := node.MakeNode(nodeConfig)
require.NoError(outsideNode.Start())
// let's listen for node.crashed signal
signalReceived := false
node.SetDefaultNodeNotificationHandler(func(jsonEvent string) {
log.Info("Notification Received", "event", jsonEvent)
var envelope node.SignalEnvelope
err := json.Unmarshal([]byte(jsonEvent), &envelope)
s.NoError(err, fmt.Sprintf("cannot unmarshal JSON: %s", jsonEvent))
if envelope.Type == node.EventNodeCrashed {
signalReceived = true
}
})
// now try starting using node manager
nodeStarted, err := s.NodeManager.StartNode(nodeConfig)
require.NoError(err) // no error is thrown, as node is started in separate routine
<-nodeStarted // no deadlock either, as manager should close the channel on error
require.False(s.NodeManager.IsNodeRunning())
time.Sleep(2 * time.Second) // allow signal to propagate
require.True(signalReceived, "node crash signal is expected")
// stop outside node, and re-try
require.NoError(outsideNode.Stop())
signalReceived = false
nodeStarted, err = s.NodeManager.StartNode(nodeConfig)
require.NoError(err) // again, no error
<-nodeStarted // no deadlock, and no signal this time, manager should be able to start node
require.True(s.NodeManager.IsNodeRunning())
time.Sleep(2 * time.Second) // allow signal to propagate
require.False(signalReceived, "node should start w/o crash signal")
// cleanup
s.NodeManager.StopNode()
node.ResetDefaultNodeNotificationHandler()
}

View File

@ -149,7 +149,7 @@ func updateCHT(eth *les.LightEthereum, config *params.NodeConfig) {
Dev string `json:"dev"`
}
loadCHTLists := func() ([]MsgCHTRoot, error) {
url := "https://gist.githubusercontent.com/farazdagi/a8d36e2818b3b2b6074d691da63a0c36/raw/?u=" + strconv.Itoa(int(time.Now().Unix()))
url := config.LightEthConfig.CHTRootConfigURL + "?u=" + strconv.Itoa(int(time.Now().Unix()))
client := &http.Client{Timeout: 5 * time.Second}
r, err := client.Get(url)
if err != nil {
@ -181,7 +181,7 @@ func updateCHT(eth *les.LightEthereum, config *params.NodeConfig) {
Number: root.Number,
Root: gethcommon.HexToHash(chtRoot),
})
log.Info("Loaded CHT from net", "CHT", chtRoot, "number", root.Number)
log.Info("Loaded CHT from net", "CHT", chtRoot, "number", root.Number, "dev", config.DevMode)
return
}
}

108
geth/node/rpc.go Normal file
View File

@ -0,0 +1,108 @@
package node
import (
"encoding/json"
"errors"
"net/http"
"net/http/httptest"
"strings"
"sync"
"time"
"github.com/ethereum/go-ethereum/les/status"
"github.com/ethereum/go-ethereum/log"
"github.com/status-im/status-go/geth/common"
)
const (
jsonrpcVersion = "2.0"
)
type jsonRequest struct {
Method string `json:"method"`
Version string `json:"jsonrpc"`
ID int `json:"id,omitempty"`
Payload json.RawMessage `json:"params,omitempty"`
}
type jsonError struct {
Code int `json:"code"`
Message string `json:"message"`
Data interface{} `json:"data,omitempty"`
}
type jsonErrResponse struct {
Version string `json:"jsonrpc"`
ID interface{} `json:"id,omitempty"`
Error jsonError `json:"error"`
}
// RPCManager abstract RPC management API (for both client and server)
type RPCManager struct {
sync.Mutex
requestID int
nodeManager common.NodeManager
}
// errors
var (
ErrInvalidMethod = errors.New("method does not exist")
ErrRPCServerTimeout = errors.New("RPC server cancelled call due to timeout")
ErrRPCServerCallFailed = errors.New("RPC server cannot complete request")
)
// NewRPCManager returns new instance of RPC client
func NewRPCManager(nodeManager common.NodeManager) *RPCManager {
return &RPCManager{
nodeManager: nodeManager,
}
}
// Call executes RPC request on node's in-proc RPC server
func (c *RPCManager) Call(inputJSON string) string {
server, err := c.nodeManager.RPCServer()
if err != nil {
return c.makeJSONErrorResponse(err)
}
// allow HTTP requests to block w/o
outputJSON := make(chan string, 1)
go func() {
httpReq := httptest.NewRequest("POST", "/", strings.NewReader(inputJSON))
rr := httptest.NewRecorder()
server.ServeHTTP(rr, httpReq)
// Check the status code is what we expect.
if respStatus := rr.Code; respStatus != http.StatusOK {
log.Error("handler returned wrong status code", "got", respStatus, "want", http.StatusOK)
outputJSON <- c.makeJSONErrorResponse(ErrRPCServerCallFailed)
return
}
// everything is ok, return
outputJSON <- rr.Body.String()
}()
// wait till call is complete
select {
case out := <-outputJSON:
return out
case <-time.After((status.DefaultTxSendCompletionTimeout + 10) * time.Minute): // give up eventually
// pass
}
return c.makeJSONErrorResponse(ErrRPCServerTimeout)
}
// makeJSONErrorResponse returns error as JSON response
func (c *RPCManager) makeJSONErrorResponse(err error) string {
response := jsonErrResponse{
Version: jsonrpcVersion,
Error: jsonError{
Message: err.Error(),
},
}
outBytes, _ := json.Marshal(&response)
return string(outBytes)
}

111
geth/node/rpc_test.go Normal file
View File

@ -0,0 +1,111 @@
package node_test
import (
"testing"
"github.com/ethereum/go-ethereum/log"
"github.com/status-im/status-go/geth/node"
"github.com/status-im/status-go/geth/params"
. "github.com/status-im/status-go/geth/testing"
"github.com/stretchr/testify/suite"
)
func TestRPCTestSuite(t *testing.T) {
suite.Run(t, new(RPCTestSuite))
}
type RPCTestSuite struct {
BaseTestSuite
}
func (s *RPCTestSuite) SetupTest() {
require := s.Require()
s.NodeManager = node.NewNodeManager()
require.NotNil(s.NodeManager)
require.IsType(&node.NodeManager{}, s.NodeManager)
}
func (s *RPCTestSuite) TestCallRPC() {
require := s.Require()
require.NotNil(s.NodeManager)
rpcClient := node.NewRPCManager(s.NodeManager)
require.NotNil(rpcClient)
nodeConfig, err := MakeTestNodeConfig(params.RinkebyNetworkID)
require.NoError(err)
nodeConfig.IPCEnabled = false
nodeConfig.WSEnabled = false
nodeConfig.HTTPHost = "" // to make sure that no HTTP interface is started
nodeStarted, err := s.NodeManager.StartNode(nodeConfig)
require.NoError(err)
require.NotNil(nodeConfig)
defer s.NodeManager.StopNode()
<-nodeStarted
progress := make(chan struct{}, 25)
type rpcCall struct {
inputJSON string
validator func(resultJSON string)
}
var rpcCalls = []rpcCall{
{
`{"jsonrpc":"2.0","method":"eth_sendTransaction","params":[{
"from": "0xb60e8dd61c5d32be8058bb8eb970870f07233155",
"to": "0xd46e8dd67c5d32be8058bb8eb970870f07244567",
"gas": "0x76c0",
"gasPrice": "0x9184e72a000",
"value": "0x9184e72a",
"data": "0xd46e8dd67c5d32be8d46e8dd67c5d32be8058bb8eb970870f072445675058bb8eb970870f072445675"}],"id":1}`,
func(resultJSON string) {
log.Info("eth_sendTransaction")
s.T().Log("GOT: ", resultJSON)
progress <- struct{}{}
},
},
{
`{"jsonrpc":"2.0","method":"shh_version","params":[],"id":67}`,
func(resultJSON string) {
expected := `{"jsonrpc":"2.0","id":67,"result":"0x5"}` + "\n"
s.Equal(expected, resultJSON)
s.T().Log("shh_version: ", resultJSON)
progress <- struct{}{}
},
},
{
`{"jsonrpc":"2.0","method":"web3_sha3","params":["0x68656c6c6f20776f726c64"],"id":64}`,
func(resultJSON string) {
expected := `{"jsonrpc":"2.0","id":64,"result":"0x47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad"}` + "\n"
s.Equal(expected, resultJSON)
s.T().Log("web3_sha3: ", resultJSON)
progress <- struct{}{}
},
},
{
`{"jsonrpc":"2.0","method":"net_version","params":[],"id":67}`,
func(resultJSON string) {
expected := `{"jsonrpc":"2.0","id":67,"result":"4"}` + "\n"
s.Equal(expected, resultJSON)
s.T().Log("net_version: ", resultJSON)
progress <- struct{}{}
},
},
}
cnt := len(rpcCalls) - 1 // send transaction blocks up until complete/discarded/times out
for _, r := range rpcCalls {
go func(r rpcCall) {
s.T().Logf("Run test: %v", r.inputJSON)
resultJSON := rpcClient.Call(r.inputJSON)
r.validator(resultJSON)
}(r)
}
for range progress {
cnt -= 1
if cnt <= 0 {
break
}
}
}

View File

@ -51,6 +51,11 @@ func SetDefaultNodeNotificationHandler(fn NodeNotificationHandler) {
notificationHandler = fn
}
// ReetDefaultNodeNotificationHandler sets notification handler to default one
func ResetDefaultNodeNotificationHandler() {
notificationHandler = TriggerDefaultNodeNotificationHandler
}
// TriggerDefaultNodeNotificationHandler triggers default notification handler (helpful in tests)
func TriggerDefaultNodeNotificationHandler(jsonEvent string) {
log.Info("Notification received", "event", jsonEvent)

View File

@ -2,7 +2,10 @@ package node
import (
"fmt"
"os"
"os/signal"
"github.com/ethereum/go-ethereum/log"
"github.com/status-im/status-go/geth/common"
)
@ -22,3 +25,23 @@ func HaltOnPanic() {
common.Fatalf(err) // os.exit(1) is called internally
}
}
// HaltOnInterruptSignal stops node and panics if you press Ctrl-C enough times
func HaltOnInterruptSignal(nodeManager *NodeManager) {
sigc := make(chan os.Signal, 1)
signal.Notify(sigc, os.Interrupt)
defer signal.Stop(sigc)
<-sigc
if nodeManager.node == nil {
return
}
log.Info("Got interrupt, shutting down...")
go nodeManager.node.Stop() // nolint: errcheck
for i := 3; i > 0; i-- {
<-sigc
if i > 1 {
log.Info(fmt.Sprintf("Already shutting down, interrupt %d more times for panic.", i-1))
}
}
panic("interrupted!")
}

View File

@ -49,6 +49,10 @@ type LightEthConfig struct {
// DatabaseCache is memory (in MBs) allocated to internal caching (min 16MB / database forced)
DatabaseCache int
// CHTRootConfigURL defines URL to file containing hard-coded CHT roots
// TODO remove this hack, once CHT sync is implemented on LES side
CHTRootConfigURL string
}
// FirebaseConfig holds FCM-related configuration
@ -233,8 +237,9 @@ func NewNodeConfig(dataDir string, networkID uint64, devMode bool) (*NodeConfig,
LogLevel: LogLevel,
LogToStderr: LogToStderr,
LightEthConfig: &LightEthConfig{
Enabled: true,
DatabaseCache: DatabaseCache,
Enabled: true,
DatabaseCache: DatabaseCache,
CHTRootConfigURL: CHTRootConfigURL,
},
BootClusterConfig: &BootClusterConfig{
Enabled: true,

View File

@ -51,6 +51,10 @@ const (
// DatabaseCache is memory (in MBs) allocated to internal caching (min 16MB / database forced)
DatabaseCache = 16
// CHTRootConfigURL defines URL to file containing hard-coded CHT roots
// TODO remove this hack, once CHT sync is implemented on LES side
CHTRootConfigURL = "https://gist.githubusercontent.com/farazdagi/a8d36e2818b3b2b6074d691da63a0c36/raw/"
// LogFile defines where to write logs to
LogFile = "geth.log"

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -12,6 +12,7 @@ import (
gethcommon "github.com/ethereum/go-ethereum/common"
"github.com/status-im/status-go/geth/common"
"github.com/status-im/status-go/geth/params"
assertions "github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
)
@ -81,20 +82,21 @@ func (s *BaseTestSuite) StopTestNode() {
require := s.Require()
require.NotNil(s.NodeManager)
require.True(s.NodeManager.IsNodeRunning())
require.NoError(s.NodeManager.StopNode())
nodeStopped, err := s.NodeManager.StopNode()
require.NoError(err)
<-nodeStopped
require.False(s.NodeManager.IsNodeRunning())
}
func (s *BaseTestSuite) FirstBlockHash(expectedHash string) {
require := s.Require()
require.NotNil(s.NodeManager)
func FirstBlockHash(require *assertions.Assertions, nodeManager common.NodeManager, expectedHash string) {
require.NotNil(nodeManager)
var firstBlock struct {
Hash gethcommon.Hash `json:"hash"`
}
// obtain RPC client for running node
runningNode, err := s.NodeManager.Node()
runningNode, err := nodeManager.Node()
require.NoError(err)
require.NotNil(runningNode)
@ -105,7 +107,7 @@ func (s *BaseTestSuite) FirstBlockHash(expectedHash string) {
err = rpcClient.CallContext(context.Background(), &firstBlock, "eth_getBlockByNumber", "0x0", true)
require.NoError(err)
s.Equal(expectedHash, firstBlock.Hash.Hex())
require.Equal(expectedHash, firstBlock.Hash.Hex())
}
func MakeTestNodeConfig(networkID int) (*params.NodeConfig, error) {

View File

@ -124,7 +124,7 @@ func scriptsWeb3Js() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "scripts/web3.js", size: 496217, mode: os.FileMode(420), modTime: time.Unix(1495573395, 0)}
info := bindataFileInfo{name: "scripts/web3.js", size: 496217, mode: os.FileMode(420), modTime: time.Unix(1495916191, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -269,7 +269,7 @@ func configLinter_exclude_listTxt() (*asset, error) {
return a, nil
}
var _configTestDataJson = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x84\x8d\x31\x6b\xc3\x40\x0c\x46\x77\xff\x0a\xa1\xb9\xc3\xc9\xae\xe4\xb3\xb7\x6c\x9d\x8a\xc1\x81\xce\x3a\x9d\x3c\xfa\xc0\x97\xd2\x96\x92\xff\x5e\x8e\x06\x02\x85\x92\x41\x20\x1e\x7c\xef\x7d\x77\x00\xf8\x5a\xb2\xe3\x0c\xed\x07\xc0\xf5\x6b\xb7\xd5\xad\xec\xb9\xe2\x0c\x14\x9e\x7e\xf1\xcb\xf9\xbc\x2c\xe5\xb8\xe0\x0c\x51\x9e\xf9\x46\xdf\xd6\x3b\x93\x0e\xe0\xda\x38\x9e\xcc\xca\xfb\x7e\xa1\xbb\xf4\x94\xf3\xe1\xb5\x09\x31\x7c\x6a\xd6\x8d\x38\xa4\x29\xb0\x6d\xec\xa2\xe3\x18\x9d\x79\x70\x62\xa5\x61\x4a\x22\x14\x53\x4a\x23\xde\x22\x8b\xd6\xfa\x51\x8e\xdc\xd6\x5a\xf3\xd6\x0e\xff\xc6\xfa\x7f\x62\xc2\x16\x88\xa3\xa8\x06\x73\xe2\x3e\x0e\x6c\x63\x8c\x6a\x2e\xc2\x3e\x91\xa6\x81\xfb\x31\xc5\x87\xb1\xee\xfa\x13\x00\x00\xff\xff\x93\xde\xa0\x86\x2d\x01\x00\x00")
var _configTestDataJson = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x84\x8d\x31\x6b\xc4\x30\x0c\x46\xf7\xfc\x0a\xa1\xb9\x83\x1d\x57\xb2\x93\xed\xb6\x4e\x25\x90\x83\xce\xb2\xac\x8c\x31\xc4\x57\xda\x52\xee\xbf\x17\xd3\x83\x83\x42\xb9\x41\x20\x1e\x7c\xef\x7d\x0f\x00\xf8\x5a\x8b\xe1\x0c\xfd\x07\xc0\xf5\x6b\xd7\xd5\xb4\xee\xa5\xe1\x0c\xf1\xe9\x97\xbe\x9c\xcf\xcb\x52\x8f\x0b\xce\x90\xf8\x99\x6e\xf4\x6d\xbd\x33\x1e\x00\xae\x9d\xe3\x49\xb5\xbe\xef\x17\x7f\x77\x9e\x4a\x39\xac\x75\x1f\xba\x4f\x29\xb2\x79\x72\x79\x72\xa4\x1b\x19\x4b\x8c\xc9\x88\x82\x79\x12\x1f\xa6\xcc\xec\x53\xce\x39\xe2\x2d\xb2\x48\x6b\x1f\xf5\x28\x7d\x2d\xad\x6c\xfd\xf0\x6f\x6c\xfc\x27\xc6\xa4\xce\x53\x62\x11\xa7\xe6\x69\x4c\x81\x34\xa6\x24\x6a\xcc\x64\x93\x97\x1c\x68\x8c\x39\x3d\x8c\x0d\xd7\x9f\x00\x00\x00\xff\xff\xb4\xe1\xf5\x0c\x2c\x01\x00\x00")
func configTestDataJsonBytes() ([]byte, error) {
return bindataRead(
@ -284,7 +284,7 @@ func configTestDataJson() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "config/test-data.json", size: 301, mode: os.FileMode(420), modTime: time.Unix(1495573389, 0)}
info := bindataFileInfo{name: "config/test-data.json", size: 300, mode: os.FileMode(420), modTime: time.Unix(1495916186, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -404,7 +404,7 @@ func testdataJailCommandsJs() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "testdata/jail/commands.js", size: 7677, mode: os.FileMode(420), modTime: time.Unix(1492580435, 0)}
info := bindataFileInfo{name: "testdata/jail/commands.js", size: 7677, mode: os.FileMode(420), modTime: time.Unix(1495640010, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -424,7 +424,7 @@ func testdataJailStatusJs() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "testdata/jail/status.js", size: 3402, mode: os.FileMode(420), modTime: time.Unix(1492580435, 0)}
info := bindataFileInfo{name: "testdata/jail/status.js", size: 3402, mode: os.FileMode(420), modTime: time.Unix(1495640010, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -444,7 +444,7 @@ func testdataJailTxSendContextNoMessageIdJs() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "testdata/jail/tx-send/context-no-message-id.js", size: 1793, mode: os.FileMode(420), modTime: time.Unix(1492580435, 0)}
info := bindataFileInfo{name: "testdata/jail/tx-send/context-no-message-id.js", size: 1793, mode: os.FileMode(420), modTime: time.Unix(1495640010, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -464,7 +464,7 @@ func testdataJailTxSendMessageIdNoContextJs() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "testdata/jail/tx-send/message-id-no-context.js", size: 1875, mode: os.FileMode(420), modTime: time.Unix(1492580435, 0)}
info := bindataFileInfo{name: "testdata/jail/tx-send/message-id-no-context.js", size: 1875, mode: os.FileMode(420), modTime: time.Unix(1495640010, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -484,7 +484,7 @@ func testdataJailTxSendNoMessageIdOrContextJs() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "testdata/jail/tx-send/no-message-id-or-context.js", size: 1354, mode: os.FileMode(420), modTime: time.Unix(1492580435, 0)}
info := bindataFileInfo{name: "testdata/jail/tx-send/no-message-id-or-context.js", size: 1354, mode: os.FileMode(420), modTime: time.Unix(1495640010, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -504,7 +504,7 @@ func testdataJailTxSendTxSendJs() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "testdata/jail/tx-send/tx-send.js", size: 2987, mode: os.FileMode(420), modTime: time.Unix(1492580435, 0)}
info := bindataFileInfo{name: "testdata/jail/tx-send/tx-send.js", size: 2987, mode: os.FileMode(420), modTime: time.Unix(1495640010, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
@ -524,7 +524,7 @@ func testdataNodeTestSol() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "testdata/node/test.sol", size: 119, mode: os.FileMode(420), modTime: time.Unix(1488290438, 0)}
info := bindataFileInfo{name: "testdata/node/test.sol", size: 119, mode: os.FileMode(420), modTime: time.Unix(1495640010, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}

View File

@ -1,6 +1,6 @@
{
"Node": {
"SyncSeconds": 10,
"SyncSeconds": 7,
"HTTPPort": 8645,
"WSPort": 8646
},

View File

@ -568,6 +568,18 @@ func (n *Node) Attach() (*rpc.Client, error) {
return rpc.DialInProc(n.inprocHandler), nil
}
// InProcRPC exposes in-proc RPC server
func (n *Node) InProcRPC() (*rpc.Server, error) {
n.lock.RLock()
defer n.lock.RUnlock()
if n.server == nil || n.inprocHandler == nil {
return nil, ErrNodeStopped
}
return n.inprocHandler, nil
}
// Server retrieves the currently running P2P network layer. This method is meant
// only to inspect fields of the currently running server, life cycle management
// should be left to this Node entity.