[#797] Simplify node.Manager public api and rename it to StatusNode
- [x] [#797] : Remove unused methods PopulateStaticPeers, ReconnectStaticPeers, removeStaticPeers, removePeer - [x] [#797] : Rename node.Manager to node. StatusNode and simplify its public api - [x] [#797] : Rename all references to nodeManager to statusNode
This commit is contained in:
parent
74ce515ab2
commit
359b3621e9
|
@ -129,7 +129,7 @@ func main() {
|
|||
}
|
||||
|
||||
// handle interrupt signals
|
||||
interruptCh := haltOnInterruptSignal(backend.NodeManager())
|
||||
interruptCh := haltOnInterruptSignal(backend.StatusNode())
|
||||
|
||||
// Check if debugging CLI connection shall be enabled.
|
||||
if *cliEnabled {
|
||||
|
@ -147,15 +147,15 @@ func main() {
|
|||
|
||||
// Run stats server.
|
||||
if *statsEnabled {
|
||||
go startCollectingStats(interruptCh, backend.NodeManager())
|
||||
go startCollectingStats(interruptCh, backend.StatusNode())
|
||||
}
|
||||
|
||||
// Sync blockchain and stop.
|
||||
if *syncAndExit >= 0 {
|
||||
exitCode := syncAndStopNode(interruptCh, backend.NodeManager(), *syncAndExit)
|
||||
exitCode := syncAndStopNode(interruptCh, backend.StatusNode(), *syncAndExit)
|
||||
// Call was interrupted. Wait for graceful shutdown.
|
||||
if exitCode == -1 {
|
||||
if node, err := backend.NodeManager().Node(); err == nil && node != nil {
|
||||
if node, err := backend.StatusNode().GethNode(); err == nil && node != nil {
|
||||
node.Wait()
|
||||
}
|
||||
return
|
||||
|
@ -164,7 +164,7 @@ func main() {
|
|||
os.Exit(exitCode)
|
||||
}
|
||||
|
||||
node, err := backend.NodeManager().Node()
|
||||
node, err := backend.StatusNode().GethNode()
|
||||
if err != nil {
|
||||
logger.Error("Getting node failed", "error", err)
|
||||
return
|
||||
|
@ -182,11 +182,11 @@ func startDebug(backend *api.StatusBackend) error {
|
|||
}
|
||||
|
||||
// startCollectingStats collects various stats about the node and other protocols like Whisper.
|
||||
func startCollectingStats(interruptCh <-chan struct{}, nodeManager *node.Manager) {
|
||||
func startCollectingStats(interruptCh <-chan struct{}, statusNode *node.StatusNode) {
|
||||
|
||||
logger.Info("Starting stats", "stats", *statsAddr)
|
||||
|
||||
node, err := nodeManager.Node()
|
||||
node, err := statusNode.GethNode()
|
||||
if err != nil {
|
||||
logger.Error("Failed to run metrics because could not get node", "error", err)
|
||||
return
|
||||
|
@ -332,7 +332,7 @@ Options:
|
|||
// haltOnInterruptSignal catches interrupt signal (SIGINT) and
|
||||
// stops the node. It times out after 5 seconds
|
||||
// if the node can not be stopped.
|
||||
func haltOnInterruptSignal(nodeManager *node.Manager) <-chan struct{} {
|
||||
func haltOnInterruptSignal(statusNode *node.StatusNode) <-chan struct{} {
|
||||
interruptCh := make(chan struct{})
|
||||
go func() {
|
||||
signalCh := make(chan os.Signal, 1)
|
||||
|
@ -341,7 +341,7 @@ func haltOnInterruptSignal(nodeManager *node.Manager) <-chan struct{} {
|
|||
<-signalCh
|
||||
close(interruptCh)
|
||||
logger.Info("Got interrupt, shutting down...")
|
||||
if err := nodeManager.StopNode(); err != nil {
|
||||
if err := statusNode.Stop(); err != nil {
|
||||
logger.Error("Failed to stop node", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@ func createContextFromTimeout(timeout int) (context.Context, context.CancelFunc)
|
|||
// It returns an exit code (`0` if successful or `1` in case of error)
|
||||
// that can be used in `os.Exit` to exit immediately when the function returns.
|
||||
// The special exit code `-1` is used if execution was interrupted.
|
||||
func syncAndStopNode(interruptCh <-chan struct{}, nodeManager *node.Manager, timeout int) (exitCode int) {
|
||||
func syncAndStopNode(interruptCh <-chan struct{}, statusNode *node.StatusNode, timeout int) (exitCode int) {
|
||||
|
||||
logger.Info("syncAndStopNode: node will synchronize the chain and exit", "timeoutInMins", timeout)
|
||||
|
||||
|
@ -29,7 +29,7 @@ func syncAndStopNode(interruptCh <-chan struct{}, nodeManager *node.Manager, tim
|
|||
doneSync := make(chan struct{})
|
||||
errSync := make(chan error)
|
||||
go func() {
|
||||
if err := nodeManager.EnsureSync(ctx); err != nil {
|
||||
if err := statusNode.EnsureSync(ctx); err != nil {
|
||||
errSync <- err
|
||||
}
|
||||
close(doneSync)
|
||||
|
@ -46,7 +46,7 @@ func syncAndStopNode(interruptCh <-chan struct{}, nodeManager *node.Manager, tim
|
|||
return -1
|
||||
}
|
||||
|
||||
if err := nodeManager.StopNode(); err != nil {
|
||||
if err := statusNode.Stop(); err != nil {
|
||||
logger.Error("syncAndStopNode: failed to stop the node", "error", err)
|
||||
return 1
|
||||
}
|
||||
|
|
|
@ -34,9 +34,9 @@ func NewStatusAPIWithBackend(b *StatusBackend) *StatusAPI {
|
|||
}
|
||||
}
|
||||
|
||||
// NodeManager returns reference to node manager
|
||||
func (api *StatusAPI) NodeManager() *node.Manager {
|
||||
return api.b.NodeManager()
|
||||
// StatusNode returns reference to StatusNode.
|
||||
func (api *StatusAPI) StatusNode() *node.StatusNode {
|
||||
return api.b.StatusNode()
|
||||
}
|
||||
|
||||
// AccountManager returns reference to account manager
|
||||
|
|
|
@ -33,7 +33,7 @@ var (
|
|||
// StatusBackend implements Status.im service
|
||||
type StatusBackend struct {
|
||||
mu sync.Mutex
|
||||
nodeManager *node.Manager
|
||||
statusNode *node.StatusNode
|
||||
accountManager *account.Manager
|
||||
txQueueManager *transactions.Manager
|
||||
jailManager jail.Manager
|
||||
|
@ -46,14 +46,14 @@ type StatusBackend struct {
|
|||
func NewStatusBackend() *StatusBackend {
|
||||
defer log.Info("Status backend initialized")
|
||||
|
||||
nodeManager := node.NewManager()
|
||||
accountManager := account.NewManager(nodeManager)
|
||||
txQueueManager := transactions.NewManager(nodeManager)
|
||||
jailManager := jail.New(nodeManager)
|
||||
statusNode := node.New()
|
||||
accountManager := account.NewManager(statusNode)
|
||||
txQueueManager := transactions.NewManager(statusNode)
|
||||
jailManager := jail.New(statusNode)
|
||||
notificationManager := fcm.NewNotification(fcmServerKey)
|
||||
|
||||
return &StatusBackend{
|
||||
nodeManager: nodeManager,
|
||||
statusNode: statusNode,
|
||||
accountManager: accountManager,
|
||||
jailManager: jailManager,
|
||||
txQueueManager: txQueueManager,
|
||||
|
@ -62,9 +62,9 @@ func NewStatusBackend() *StatusBackend {
|
|||
}
|
||||
}
|
||||
|
||||
// NodeManager returns reference to node manager
|
||||
func (b *StatusBackend) NodeManager() *node.Manager {
|
||||
return b.nodeManager
|
||||
// StatusNode returns reference to node manager
|
||||
func (b *StatusBackend) StatusNode() *node.StatusNode {
|
||||
return b.statusNode
|
||||
}
|
||||
|
||||
// AccountManager returns reference to account manager
|
||||
|
@ -84,7 +84,7 @@ func (b *StatusBackend) TxQueueManager() *transactions.Manager {
|
|||
|
||||
// IsNodeRunning confirm that node is running
|
||||
func (b *StatusBackend) IsNodeRunning() bool {
|
||||
return b.nodeManager.IsNodeRunning()
|
||||
return b.statusNode.IsRunning()
|
||||
}
|
||||
|
||||
// StartNode start Status node, fails if node is already started
|
||||
|
@ -100,7 +100,7 @@ func (b *StatusBackend) startNode(config *params.NodeConfig) (err error) {
|
|||
err = fmt.Errorf("node crashed on start: %v", err)
|
||||
}
|
||||
}()
|
||||
err = b.nodeManager.StartNode(config)
|
||||
err = b.statusNode.Start(config)
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case node.RPCClientError:
|
||||
|
@ -145,7 +145,7 @@ func (b *StatusBackend) stopNode() error {
|
|||
b.txQueueManager.Stop()
|
||||
b.jailManager.Stop()
|
||||
defer signal.Send(signal.Envelope{Type: signal.EventNodeStopped})
|
||||
return b.nodeManager.StopNode()
|
||||
return b.statusNode.Stop()
|
||||
}
|
||||
|
||||
// RestartNode restart running Status node, fails if node is not running
|
||||
|
@ -153,7 +153,7 @@ func (b *StatusBackend) RestartNode() error {
|
|||
if !b.IsNodeRunning() {
|
||||
return node.ErrNoRunningNode
|
||||
}
|
||||
config, err := b.nodeManager.NodeConfig()
|
||||
config, err := b.statusNode.Config()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -169,7 +169,7 @@ func (b *StatusBackend) RestartNode() error {
|
|||
func (b *StatusBackend) ResetChainData() error {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
config, err := b.nodeManager.NodeConfig()
|
||||
config, err := b.statusNode.Config()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -178,7 +178,7 @@ func (b *StatusBackend) ResetChainData() error {
|
|||
return err
|
||||
}
|
||||
// config is cleaned when node is stopped
|
||||
if err := b.nodeManager.ResetChainData(&newcfg); err != nil {
|
||||
if err := b.statusNode.ResetChainData(&newcfg); err != nil {
|
||||
return err
|
||||
}
|
||||
signal.Send(signal.Envelope{Type: signal.EventChainDataRemoved})
|
||||
|
@ -187,7 +187,7 @@ func (b *StatusBackend) ResetChainData() error {
|
|||
|
||||
// CallRPC executes RPC request on node's in-proc RPC server
|
||||
func (b *StatusBackend) CallRPC(inputJSON string) string {
|
||||
client := b.nodeManager.RPCClient()
|
||||
client := b.statusNode.RPCClient()
|
||||
return client.CallRaw(inputJSON)
|
||||
}
|
||||
|
||||
|
@ -213,7 +213,7 @@ func (b *StatusBackend) getVerifiedAccount(password string) (*account.SelectedEx
|
|||
b.log.Error("failed to get a selected account", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
config, err := b.NodeManager().NodeConfig()
|
||||
config, err := b.StatusNode().Config()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -269,7 +269,7 @@ func (b *StatusBackend) DiscardTransactions(ids []string) map[string]error {
|
|||
|
||||
// registerHandlers attaches Status callback handlers to running node
|
||||
func (b *StatusBackend) registerHandlers() error {
|
||||
rpcClient := b.NodeManager().RPCClient()
|
||||
rpcClient := b.StatusNode().RPCClient()
|
||||
if rpcClient == nil {
|
||||
return node.ErrRPCClient
|
||||
}
|
||||
|
@ -299,7 +299,7 @@ func (b *StatusBackend) AppStateChange(state AppState) {
|
|||
|
||||
// Logout clears whisper identities.
|
||||
func (b *StatusBackend) Logout() error {
|
||||
whisperService, err := b.nodeManager.WhisperService()
|
||||
whisperService, err := b.statusNode.WhisperService()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -317,7 +317,7 @@ func (b *StatusBackend) ReSelectAccount() error {
|
|||
if selectedAccount == nil || err == account.ErrNoAccountSelected {
|
||||
return nil
|
||||
}
|
||||
whisperService, err := b.nodeManager.WhisperService()
|
||||
whisperService, err := b.statusNode.WhisperService()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -341,7 +341,7 @@ func (b *StatusBackend) SelectAccount(address, password string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
whisperService, err := b.nodeManager.WhisperService()
|
||||
whisperService, err := b.statusNode.WhisperService()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
|
||||
// ServiceProvider provides node and required services.
|
||||
type ServiceProvider interface {
|
||||
Node() (*node.Node, error)
|
||||
GethNode() (*node.Node, error)
|
||||
WhisperService() (*whisper.Whisper, error)
|
||||
}
|
||||
|
||||
|
|
|
@ -80,7 +80,7 @@ func (api *PublicAPI) RequestMessages(_ context.Context, r MessagesRequest) (boo
|
|||
return false, err
|
||||
}
|
||||
|
||||
node, err := api.provider.Node()
|
||||
node, err := api.provider.GethNode()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
|
|
@ -43,14 +43,14 @@ func TestRequestMessagesFailures(t *testing.T) {
|
|||
|
||||
// invalid MailServer enode address
|
||||
provider.EXPECT().WhisperService().Return(nil, nil)
|
||||
provider.EXPECT().Node().Return(nil, nil)
|
||||
provider.EXPECT().GethNode().Return(nil, nil)
|
||||
result, err = api.RequestMessages(context.TODO(), MessagesRequest{MailServerPeer: "invalid-address"})
|
||||
require.False(t, result)
|
||||
require.EqualError(t, err, "invalid mailServerPeer value: invalid URL scheme, want \"enode\"")
|
||||
|
||||
// non-existent symmetric key
|
||||
provider.EXPECT().WhisperService().Return(shh, nil)
|
||||
provider.EXPECT().Node().Return(nil, nil)
|
||||
provider.EXPECT().GethNode().Return(nil, nil)
|
||||
result, err = api.RequestMessages(context.TODO(), MessagesRequest{
|
||||
MailServerPeer: mailServerPeer,
|
||||
})
|
||||
|
@ -61,7 +61,7 @@ func TestRequestMessagesFailures(t *testing.T) {
|
|||
symKeyID, symKeyErr := shh.AddSymKeyFromPassword("some-pass")
|
||||
require.NoError(t, symKeyErr)
|
||||
provider.EXPECT().WhisperService().Return(shh, nil)
|
||||
provider.EXPECT().Node().Return(nodeA, nil)
|
||||
provider.EXPECT().GethNode().Return(nodeA, nil)
|
||||
result, err = api.RequestMessages(context.TODO(), MessagesRequest{
|
||||
MailServerPeer: mailServerPeer,
|
||||
SymKeyID: symKeyID,
|
||||
|
|
|
@ -5,10 +5,11 @@
|
|||
package mailservice
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
|
||||
node "github.com/ethereum/go-ethereum/node"
|
||||
whisperv6 "github.com/ethereum/go-ethereum/whisper/whisperv6"
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
// MockServiceProvider is a mock of ServiceProvider interface
|
||||
|
@ -34,17 +35,17 @@ func (m *MockServiceProvider) EXPECT() *MockServiceProviderMockRecorder {
|
|||
return m.recorder
|
||||
}
|
||||
|
||||
// Node mocks base method
|
||||
func (m *MockServiceProvider) Node() (*node.Node, error) {
|
||||
ret := m.ctrl.Call(m, "Node")
|
||||
// GethNode mocks base method
|
||||
func (m *MockServiceProvider) GethNode() (*node.Node, error) {
|
||||
ret := m.ctrl.Call(m, "GethNode")
|
||||
ret0, _ := ret[0].(*node.Node)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// Node indicates an expected call of Node
|
||||
func (mr *MockServiceProviderMockRecorder) Node() *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Node", reflect.TypeOf((*MockServiceProvider)(nil).Node))
|
||||
// GethNode indicates an expected call of Node
|
||||
func (mr *MockServiceProviderMockRecorder) GethNode() *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GethNode", reflect.TypeOf((*MockServiceProvider)(nil).GethNode))
|
||||
}
|
||||
|
||||
// WhisperService mocks base method
|
||||
|
|
|
@ -119,8 +119,6 @@ func defaultEmbeddedNodeConfig(config *params.NodeConfig) *node.Config {
|
|||
}
|
||||
|
||||
if config.ClusterConfig.Enabled {
|
||||
// TODO(themue) Should static nodes always be set? Had been done via
|
||||
// PopulateStaticPeers() before.
|
||||
nc.P2P.StaticNodes = parseNodes(config.ClusterConfig.StaticNodes)
|
||||
nc.P2P.BootstrapNodes = parseNodes(config.ClusterConfig.BootNodes)
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
var (
|
||||
ErrNodeExists = errors.New("node is already running")
|
||||
ErrNoRunningNode = errors.New("there is no running node")
|
||||
ErrInvalidNodeManager = errors.New("node manager is not properly initialized")
|
||||
ErrInvalidStatusNode = errors.New("status node is not properly initialized")
|
||||
ErrInvalidWhisperService = errors.New("whisper service is unavailable")
|
||||
ErrInvalidLightEthereumService = errors.New("LES service is unavailable")
|
||||
ErrInvalidAccountManager = errors.New("could not retrieve account manager")
|
||||
|
@ -40,10 +40,9 @@ type RPCClientError error
|
|||
// EthNodeError is reported when node crashed on start up.
|
||||
type EthNodeError error
|
||||
|
||||
// Manager manages Status node (which abstracts contained geth node)
|
||||
// nolint: golint
|
||||
// should be fixed at https://github.com/status-im/status-go/issues/200
|
||||
type Manager struct {
|
||||
// StatusNode abstracts contained geth node and provides helper methods to
|
||||
// interact with it.
|
||||
type StatusNode struct {
|
||||
mu sync.RWMutex
|
||||
config *params.NodeConfig // Status node configuration
|
||||
node *node.Node // reference to Geth P2P stack/node
|
||||
|
@ -54,23 +53,23 @@ type Manager struct {
|
|||
log log.Logger
|
||||
}
|
||||
|
||||
// NewManager makes new instance of node manager
|
||||
func NewManager() *Manager {
|
||||
return &Manager{
|
||||
log: log.New("package", "status-go/geth/node.Manager"),
|
||||
// New makes new instance of StatusNode.
|
||||
func New() *StatusNode {
|
||||
return &StatusNode{
|
||||
log: log.New("package", "status-go/geth/node.StatusNode"),
|
||||
}
|
||||
}
|
||||
|
||||
// StartNode start Status node, fails if node is already started
|
||||
func (m *Manager) StartNode(config *params.NodeConfig) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
return m.startNode(config)
|
||||
// Start starts current StatusNode, will fail if it's already started.
|
||||
func (n *StatusNode) Start(config *params.NodeConfig) error {
|
||||
n.mu.Lock()
|
||||
defer n.mu.Unlock()
|
||||
return n.start(config)
|
||||
}
|
||||
|
||||
// startNode start Status node, fails if node is already started
|
||||
func (m *Manager) startNode(config *params.NodeConfig) error {
|
||||
if err := m.isNodeAvailable(); err == nil {
|
||||
// start starts current StatusNode, will fail if it's already started.
|
||||
func (n *StatusNode) start(config *params.NodeConfig) error {
|
||||
if err := n.isAvailable(); err == nil {
|
||||
return ErrNodeExists
|
||||
}
|
||||
|
||||
|
@ -78,12 +77,12 @@ func (m *Manager) startNode(config *params.NodeConfig) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.node = ethNode
|
||||
m.config = config
|
||||
n.node = ethNode
|
||||
n.config = config
|
||||
|
||||
// activate MailService required for Offline Inboxing
|
||||
if err := ethNode.Register(func(_ *node.ServiceContext) (node.Service, error) {
|
||||
return mailservice.New(m), nil
|
||||
return mailservice.New(n), nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -93,47 +92,47 @@ func (m *Manager) startNode(config *params.NodeConfig) error {
|
|||
return EthNodeError(err)
|
||||
}
|
||||
// init RPC client for this node
|
||||
localRPCClient, err := m.node.Attach()
|
||||
localRPCClient, err := n.node.Attach()
|
||||
if err == nil {
|
||||
m.rpcClient, err = rpc.NewClient(localRPCClient, m.config.UpstreamConfig)
|
||||
n.rpcClient, err = rpc.NewClient(localRPCClient, n.config.UpstreamConfig)
|
||||
}
|
||||
if err != nil {
|
||||
m.log.Error("Failed to create an RPC client", "error", err)
|
||||
n.log.Error("Failed to create an RPC client", "error", err)
|
||||
return RPCClientError(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// StopNode stop Status node. Stopped node cannot be resumed.
|
||||
func (m *Manager) StopNode() error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
return m.stopNode()
|
||||
// Stop will stop current StatusNode. A stopped node cannot be resumed.
|
||||
func (n *StatusNode) Stop() error {
|
||||
n.mu.Lock()
|
||||
defer n.mu.Unlock()
|
||||
return n.stop()
|
||||
}
|
||||
|
||||
// stopNode stop Status node. Stopped node cannot be resumed.
|
||||
func (m *Manager) stopNode() error {
|
||||
if err := m.isNodeAvailable(); err != nil {
|
||||
// stop will stop current StatusNode. A stopped node cannot be resumed.
|
||||
func (n *StatusNode) stop() error {
|
||||
if err := n.isAvailable(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := m.node.Stop(); err != nil {
|
||||
if err := n.node.Stop(); err != nil {
|
||||
return err
|
||||
}
|
||||
m.node = nil
|
||||
m.config = nil
|
||||
m.lesService = nil
|
||||
m.whisperService = nil
|
||||
m.rpcClient = nil
|
||||
n.node = nil
|
||||
n.config = nil
|
||||
n.lesService = nil
|
||||
n.whisperService = nil
|
||||
n.rpcClient = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// ResetChainData removes chain data if node is not running.
|
||||
func (m *Manager) ResetChainData(config *params.NodeConfig) error {
|
||||
if m.IsNodeRunning() {
|
||||
func (n *StatusNode) ResetChainData(config *params.NodeConfig) error {
|
||||
if n.IsRunning() {
|
||||
return ErrNodeExists
|
||||
}
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
n.mu.Lock()
|
||||
defer n.mu.Unlock()
|
||||
chainDataDir := filepath.Join(config.DataDir, config.Name, "lightchaindata")
|
||||
if _, err := os.Stat(chainDataDir); os.IsNotExist(err) {
|
||||
// is it really an error, if we want to remove it as next step?
|
||||
|
@ -141,190 +140,183 @@ func (m *Manager) ResetChainData(config *params.NodeConfig) error {
|
|||
}
|
||||
err := os.RemoveAll(chainDataDir)
|
||||
if err == nil {
|
||||
m.log.Info("Chain data has been removed", "dir", chainDataDir)
|
||||
n.log.Info("Chain data has been removed", "dir", chainDataDir)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// IsNodeRunning confirm that node is running
|
||||
func (m *Manager) IsNodeRunning() bool {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
// IsRunning confirm that node is running.
|
||||
func (n *StatusNode) IsRunning() bool {
|
||||
n.mu.RLock()
|
||||
defer n.mu.RUnlock()
|
||||
|
||||
if err := m.isNodeAvailable(); err != nil {
|
||||
if err := n.isAvailable(); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Node returns underlying Status node
|
||||
func (m *Manager) Node() (*node.Node, error) {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
// GethNode returns underlying geth node.
|
||||
func (n *StatusNode) GethNode() (*node.Node, error) {
|
||||
n.mu.RLock()
|
||||
defer n.mu.RUnlock()
|
||||
|
||||
if err := m.isNodeAvailable(); err != nil {
|
||||
if err := n.isAvailable(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m.node, nil
|
||||
}
|
||||
|
||||
// PopulateStaticPeers connects current node with our publicly available LES/SHH/Swarm cluster
|
||||
func (m *Manager) PopulateStaticPeers() error {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
return m.populateStaticPeers()
|
||||
return n.node, nil
|
||||
}
|
||||
|
||||
// populateStaticPeers connects current node with our publicly available LES/SHH/Swarm cluster
|
||||
func (m *Manager) populateStaticPeers() error {
|
||||
if err := m.isNodeAvailable(); err != nil {
|
||||
func (n *StatusNode) populateStaticPeers() error {
|
||||
if err := n.isAvailable(); err != nil {
|
||||
return err
|
||||
}
|
||||
if !m.config.ClusterConfig.Enabled {
|
||||
m.log.Info("Static peers are disabled")
|
||||
if !n.config.ClusterConfig.Enabled {
|
||||
n.log.Info("Static peers are disabled")
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, enode := range m.config.ClusterConfig.StaticNodes {
|
||||
err := m.addPeer(enode)
|
||||
for _, enode := range n.config.ClusterConfig.StaticNodes {
|
||||
err := n.addPeer(enode)
|
||||
if err != nil {
|
||||
m.log.Warn("Static peer addition failed", "error", err)
|
||||
n.log.Warn("Static peer addition failed", "error", err)
|
||||
continue
|
||||
}
|
||||
m.log.Info("Static peer added", "enode", enode)
|
||||
n.log.Info("Static peer added", "enode", enode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) removeStaticPeers() error {
|
||||
if !m.config.ClusterConfig.Enabled {
|
||||
m.log.Info("Static peers are disabled")
|
||||
func (n *StatusNode) removeStaticPeers() error {
|
||||
if !n.config.ClusterConfig.Enabled {
|
||||
n.log.Info("Static peers are disabled")
|
||||
return nil
|
||||
}
|
||||
server := m.node.Server()
|
||||
server := n.node.Server()
|
||||
if server == nil {
|
||||
return ErrNoRunningNode
|
||||
}
|
||||
for _, enode := range m.config.ClusterConfig.StaticNodes {
|
||||
err := m.removePeer(enode)
|
||||
for _, enode := range n.config.ClusterConfig.StaticNodes {
|
||||
err := n.removePeer(enode)
|
||||
if err != nil {
|
||||
m.log.Warn("Static peer deletion failed", "error", err)
|
||||
n.log.Warn("Static peer deletion failed", "error", err)
|
||||
return err
|
||||
}
|
||||
m.log.Info("Static peer deleted", "enode", enode)
|
||||
n.log.Info("Static peer deleted", "enode", enode)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReconnectStaticPeers removes and adds static peers to a server.
|
||||
func (m *Manager) ReconnectStaticPeers() error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
if err := m.removeStaticPeers(); err != nil {
|
||||
func (n *StatusNode) ReconnectStaticPeers() error {
|
||||
n.mu.Lock()
|
||||
defer n.mu.Unlock()
|
||||
if err := n.removeStaticPeers(); err != nil {
|
||||
return err
|
||||
}
|
||||
return m.populateStaticPeers()
|
||||
return n.populateStaticPeers()
|
||||
}
|
||||
|
||||
// AddPeer adds new static peer node
|
||||
func (m *Manager) AddPeer(url string) error {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
if err := m.isNodeAvailable(); err != nil {
|
||||
func (n *StatusNode) AddPeer(url string) error {
|
||||
n.mu.RLock()
|
||||
defer n.mu.RUnlock()
|
||||
if err := n.isAvailable(); err != nil {
|
||||
return err
|
||||
}
|
||||
return m.addPeer(url)
|
||||
return n.addPeer(url)
|
||||
}
|
||||
|
||||
// addPeer adds new static peer node
|
||||
func (m *Manager) addPeer(url string) error {
|
||||
func (n *StatusNode) addPeer(url string) error {
|
||||
// Try to add the url as a static peer and return
|
||||
parsedNode, err := discover.ParseNode(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.node.Server().AddPeer(parsedNode)
|
||||
n.node.Server().AddPeer(parsedNode)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) removePeer(url string) error {
|
||||
func (n *StatusNode) removePeer(url string) error {
|
||||
parsedNode, err := discover.ParseNode(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.node.Server().RemovePeer(parsedNode)
|
||||
n.node.Server().RemovePeer(parsedNode)
|
||||
return nil
|
||||
}
|
||||
|
||||
// PeerCount returns the number of connected peers.
|
||||
func (m *Manager) PeerCount() int {
|
||||
if !m.IsNodeRunning() {
|
||||
func (n *StatusNode) PeerCount() int {
|
||||
if !n.IsRunning() {
|
||||
return 0
|
||||
}
|
||||
return m.node.Server().PeerCount()
|
||||
return n.node.Server().PeerCount()
|
||||
}
|
||||
|
||||
// NodeConfig exposes reference to running node's configuration
|
||||
func (m *Manager) NodeConfig() (*params.NodeConfig, error) {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
// Config exposes reference to running node's configuration
|
||||
func (n *StatusNode) Config() (*params.NodeConfig, error) {
|
||||
n.mu.RLock()
|
||||
defer n.mu.RUnlock()
|
||||
|
||||
if err := m.isNodeAvailable(); err != nil {
|
||||
if err := n.isAvailable(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m.config, nil
|
||||
return n.config, nil
|
||||
}
|
||||
|
||||
// LightEthereumService exposes reference to LES service running on top of the node
|
||||
func (m *Manager) LightEthereumService() (*les.LightEthereum, error) {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
func (n *StatusNode) LightEthereumService() (*les.LightEthereum, error) {
|
||||
n.mu.RLock()
|
||||
defer n.mu.RUnlock()
|
||||
|
||||
if err := m.isNodeAvailable(); err != nil {
|
||||
if err := n.isAvailable(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if m.lesService == nil {
|
||||
if err := m.node.Service(&m.lesService); err != nil {
|
||||
m.log.Warn("Cannot obtain LES service", "error", err)
|
||||
if n.lesService == nil {
|
||||
if err := n.node.Service(&n.lesService); err != nil {
|
||||
n.log.Warn("Cannot obtain LES service", "error", err)
|
||||
return nil, ErrInvalidLightEthereumService
|
||||
}
|
||||
}
|
||||
if m.lesService == nil {
|
||||
if n.lesService == nil {
|
||||
return nil, ErrInvalidLightEthereumService
|
||||
}
|
||||
return m.lesService, nil
|
||||
return n.lesService, nil
|
||||
}
|
||||
|
||||
// WhisperService exposes reference to Whisper service running on top of the node
|
||||
func (m *Manager) WhisperService() (*whisper.Whisper, error) {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
func (n *StatusNode) WhisperService() (*whisper.Whisper, error) {
|
||||
n.mu.RLock()
|
||||
defer n.mu.RUnlock()
|
||||
|
||||
if err := m.isNodeAvailable(); err != nil {
|
||||
if err := n.isAvailable(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if m.whisperService == nil {
|
||||
if err := m.node.Service(&m.whisperService); err != nil {
|
||||
m.log.Warn("Cannot obtain whisper service", "error", err)
|
||||
if n.whisperService == nil {
|
||||
if err := n.node.Service(&n.whisperService); err != nil {
|
||||
n.log.Warn("Cannot obtain whisper service", "error", err)
|
||||
return nil, ErrInvalidWhisperService
|
||||
}
|
||||
}
|
||||
if m.whisperService == nil {
|
||||
if n.whisperService == nil {
|
||||
return nil, ErrInvalidWhisperService
|
||||
}
|
||||
return m.whisperService, nil
|
||||
return n.whisperService, nil
|
||||
}
|
||||
|
||||
// AccountManager exposes reference to node's accounts manager
|
||||
func (m *Manager) AccountManager() (*accounts.Manager, error) {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
func (n *StatusNode) AccountManager() (*accounts.Manager, error) {
|
||||
n.mu.RLock()
|
||||
defer n.mu.RUnlock()
|
||||
|
||||
if err := m.isNodeAvailable(); err != nil {
|
||||
if err := n.isAvailable(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
accountManager := m.node.AccountManager()
|
||||
accountManager := n.node.AccountManager()
|
||||
if accountManager == nil {
|
||||
return nil, ErrInvalidAccountManager
|
||||
}
|
||||
|
@ -332,14 +324,14 @@ func (m *Manager) AccountManager() (*accounts.Manager, error) {
|
|||
}
|
||||
|
||||
// AccountKeyStore exposes reference to accounts key store
|
||||
func (m *Manager) AccountKeyStore() (*keystore.KeyStore, error) {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
func (n *StatusNode) AccountKeyStore() (*keystore.KeyStore, error) {
|
||||
n.mu.RLock()
|
||||
defer n.mu.RUnlock()
|
||||
|
||||
if err := m.isNodeAvailable(); err != nil {
|
||||
if err := n.isAvailable(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
accountManager := m.node.AccountManager()
|
||||
accountManager := n.node.AccountManager()
|
||||
if accountManager == nil {
|
||||
return nil, ErrInvalidAccountManager
|
||||
}
|
||||
|
@ -358,15 +350,15 @@ func (m *Manager) AccountKeyStore() (*keystore.KeyStore, error) {
|
|||
}
|
||||
|
||||
// RPCClient exposes reference to RPC client connected to the running node.
|
||||
func (m *Manager) RPCClient() *rpc.Client {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
return m.rpcClient
|
||||
func (n *StatusNode) RPCClient() *rpc.Client {
|
||||
n.mu.Lock()
|
||||
defer n.mu.Unlock()
|
||||
return n.rpcClient
|
||||
}
|
||||
|
||||
// isNodeAvailable check if we have a node running and make sure is fully started
|
||||
func (m *Manager) isNodeAvailable() error {
|
||||
if m.node == nil || m.node.Server() == nil {
|
||||
// isAvailable check if we have a node running and make sure is fully started
|
||||
func (n *StatusNode) isAvailable() error {
|
||||
if n.node == nil || n.node.Server() == nil {
|
||||
return ErrNoRunningNode
|
||||
}
|
||||
return nil
|
||||
|
@ -377,18 +369,18 @@ const tickerResolution = time.Second
|
|||
|
||||
// EnsureSync waits until blockchain synchronization
|
||||
// is complete and returns.
|
||||
func (m *Manager) EnsureSync(ctx context.Context) error {
|
||||
func (n *StatusNode) EnsureSync(ctx context.Context) error {
|
||||
// Don't wait for any blockchain sync for the
|
||||
// local private chain as blocks are never mined.
|
||||
if m.config.NetworkID == params.StatusChainNetworkID {
|
||||
if n.config.NetworkID == params.StatusChainNetworkID {
|
||||
return nil
|
||||
}
|
||||
|
||||
return m.ensureSync(ctx)
|
||||
return n.ensureSync(ctx)
|
||||
}
|
||||
|
||||
func (m *Manager) ensureSync(ctx context.Context) error {
|
||||
les, err := m.LightEthereumService()
|
||||
func (n *StatusNode) ensureSync(ctx context.Context) error {
|
||||
les, err := n.LightEthereumService()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get LES service: %v", err)
|
||||
}
|
||||
|
@ -399,8 +391,8 @@ func (m *Manager) ensureSync(ctx context.Context) error {
|
|||
}
|
||||
|
||||
progress := downloader.Progress()
|
||||
if m.PeerCount() > 0 && progress.CurrentBlock >= progress.HighestBlock {
|
||||
m.log.Debug("Synchronization completed", "current block", progress.CurrentBlock, "highest block", progress.HighestBlock)
|
||||
if n.PeerCount() > 0 && progress.CurrentBlock >= progress.HighestBlock {
|
||||
n.log.Debug("Synchronization completed", "current block", progress.CurrentBlock, "highest block", progress.HighestBlock)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -415,23 +407,23 @@ func (m *Manager) ensureSync(ctx context.Context) error {
|
|||
case <-ctx.Done():
|
||||
return errors.New("timeout during node synchronization")
|
||||
case <-ticker.C:
|
||||
if m.PeerCount() == 0 {
|
||||
m.log.Debug("No established connections with any peers, continue waiting for a sync")
|
||||
if n.PeerCount() == 0 {
|
||||
n.log.Debug("No established connections with any peers, continue waiting for a sync")
|
||||
continue
|
||||
}
|
||||
if downloader.Synchronising() {
|
||||
m.log.Debug("Synchronization is in progress")
|
||||
n.log.Debug("Synchronization is in progress")
|
||||
continue
|
||||
}
|
||||
progress = downloader.Progress()
|
||||
if progress.CurrentBlock >= progress.HighestBlock {
|
||||
m.log.Info("Synchronization completed", "current block", progress.CurrentBlock, "highest block", progress.HighestBlock)
|
||||
n.log.Info("Synchronization completed", "current block", progress.CurrentBlock, "highest block", progress.HighestBlock)
|
||||
return nil
|
||||
}
|
||||
m.log.Debug("Synchronization is not finished", "current", progress.CurrentBlock, "highest", progress.HighestBlock)
|
||||
n.log.Debug("Synchronization is not finished", "current", progress.CurrentBlock, "highest", progress.HighestBlock)
|
||||
case <-progressTicker.C:
|
||||
progress = downloader.Progress()
|
||||
m.log.Warn("Synchronization is not finished", "current", progress.CurrentBlock, "highest", progress.HighestBlock)
|
||||
n.log.Warn("Synchronization is not finished", "current", progress.CurrentBlock, "highest", progress.HighestBlock)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -445,7 +445,7 @@ func NotifyUsers(message, payloadJSON, tokensArray *C.char) (outCBytes *C.char)
|
|||
// AddPeer adds an enode as a peer.
|
||||
//export AddPeer
|
||||
func AddPeer(enode *C.char) *C.char {
|
||||
err := statusAPI.NodeManager().AddPeer(C.GoString(enode))
|
||||
err := statusAPI.StatusNode().AddPeer(C.GoString(enode))
|
||||
return makeJSONResponse(err)
|
||||
}
|
||||
|
||||
|
|
|
@ -255,7 +255,7 @@ func testResetChainData(t *testing.T) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
EnsureNodeSync(statusAPI.NodeManager())
|
||||
EnsureNodeSync(statusAPI.StatusNode())
|
||||
testCompleteTransaction(t)
|
||||
|
||||
return true
|
||||
|
@ -267,7 +267,7 @@ func testStopResumeNode(t *testing.T) bool { //nolint: gocyclo
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
whisperService, err := statusAPI.NodeManager().WhisperService()
|
||||
whisperService, err := statusAPI.StatusNode().WhisperService()
|
||||
if err != nil {
|
||||
t.Errorf("whisper service not running: %v", err)
|
||||
}
|
||||
|
@ -354,7 +354,7 @@ func testStopResumeNode(t *testing.T) bool { //nolint: gocyclo
|
|||
time.Sleep(10 * time.Second) // allow to start (instead of using blocking version of start, of filter event)
|
||||
|
||||
// now, verify that we still have account logged in
|
||||
whisperService, err = statusAPI.NodeManager().WhisperService()
|
||||
whisperService, err = statusAPI.StatusNode().WhisperService()
|
||||
if err != nil {
|
||||
t.Errorf("whisper service not running: %v", err)
|
||||
}
|
||||
|
@ -386,7 +386,7 @@ func testCreateChildAccount(t *testing.T) bool { //nolint: gocyclo
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
keyStore, err := statusAPI.NodeManager().AccountKeyStore()
|
||||
keyStore, err := statusAPI.StatusNode().AccountKeyStore()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return false
|
||||
|
@ -517,7 +517,7 @@ func testCreateChildAccount(t *testing.T) bool { //nolint: gocyclo
|
|||
}
|
||||
|
||||
func testRecoverAccount(t *testing.T) bool { //nolint: gocyclo
|
||||
keyStore, _ := statusAPI.NodeManager().AccountKeyStore()
|
||||
keyStore, _ := statusAPI.StatusNode().AccountKeyStore()
|
||||
|
||||
// create an account
|
||||
address, pubKey, mnemonic, err := statusAPI.CreateAccount(TestConfig.Account1.Password)
|
||||
|
@ -608,7 +608,7 @@ func testRecoverAccount(t *testing.T) bool { //nolint: gocyclo
|
|||
}
|
||||
|
||||
// time to login with recovered data
|
||||
whisperService, err := statusAPI.NodeManager().WhisperService()
|
||||
whisperService, err := statusAPI.StatusNode().WhisperService()
|
||||
if err != nil {
|
||||
t.Errorf("whisper service not running: %v", err)
|
||||
}
|
||||
|
@ -631,7 +631,7 @@ func testRecoverAccount(t *testing.T) bool { //nolint: gocyclo
|
|||
|
||||
func testAccountSelect(t *testing.T) bool { //nolint: gocyclo
|
||||
// test to see if the account was injected in whisper
|
||||
whisperService, err := statusAPI.NodeManager().WhisperService()
|
||||
whisperService, err := statusAPI.StatusNode().WhisperService()
|
||||
if err != nil {
|
||||
t.Errorf("whisper service not running: %v", err)
|
||||
}
|
||||
|
@ -714,7 +714,7 @@ func testAccountSelect(t *testing.T) bool { //nolint: gocyclo
|
|||
}
|
||||
|
||||
func testAccountLogout(t *testing.T) bool {
|
||||
whisperService, err := statusAPI.NodeManager().WhisperService()
|
||||
whisperService, err := statusAPI.StatusNode().WhisperService()
|
||||
if err != nil {
|
||||
t.Errorf("whisper service not running: %v", err)
|
||||
return false
|
||||
|
@ -771,7 +771,7 @@ func testCompleteTransaction(t *testing.T) bool {
|
|||
txQueue := txQueueManager.TransactionQueue()
|
||||
|
||||
txQueue.Reset()
|
||||
EnsureNodeSync(statusAPI.NodeManager())
|
||||
EnsureNodeSync(statusAPI.StatusNode())
|
||||
|
||||
// log into account from which transactions will be sent
|
||||
if err := statusAPI.SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password); err != nil {
|
||||
|
@ -1439,7 +1439,7 @@ func startTestNode(t *testing.T) <-chan struct{} {
|
|||
// sync
|
||||
if syncRequired {
|
||||
t.Logf("Sync is required")
|
||||
EnsureNodeSync(statusAPI.NodeManager())
|
||||
EnsureNodeSync(statusAPI.StatusNode())
|
||||
} else {
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
|
|
|
@ -57,11 +57,15 @@ func consumeUntil(events <-chan *p2p.PeerEvent, f func(ev *p2p.PeerEvent) bool,
|
|||
}
|
||||
}
|
||||
|
||||
// TestStaticPeersReconnect : it tests how long it takes to reconnect with
|
||||
// peers after losing connection. This is something we will have to support
|
||||
// in order for mobile devices to reconnect fast if network connectivity
|
||||
// is lost for ~30s.
|
||||
func (s *PeersTestSuite) TestStaticPeersReconnect() {
|
||||
// both on rinkeby and ropsten we can expect atleast 2 peers connected
|
||||
expectedPeersCount := 2
|
||||
events := make(chan *p2p.PeerEvent, 10)
|
||||
node, err := s.backend.NodeManager().Node()
|
||||
node, err := s.backend.StatusNode().GethNode()
|
||||
s.Require().NoError(err)
|
||||
|
||||
subscription := node.Server().SubscribeEvents(events)
|
||||
|
@ -92,7 +96,7 @@ func (s *PeersTestSuite) TestStaticPeersReconnect() {
|
|||
s.Require().NoError(s.controller.Disable())
|
||||
before = time.Now()
|
||||
go func() {
|
||||
s.NoError(s.backend.NodeManager().ReconnectStaticPeers())
|
||||
s.NoError(s.backend.StatusNode().ReconnectStaticPeers())
|
||||
}()
|
||||
s.Require().NoError(consumeUntil(events, func(ev *p2p.PeerEvent) bool {
|
||||
log.Info("tests", "event", ev)
|
||||
|
|
|
@ -76,7 +76,7 @@ func (s *SyncTestSuite) consumeExpectedEvent(subscription *event.TypeMuxSubscrip
|
|||
}
|
||||
|
||||
func (s *SyncTestSuite) TestSyncChain() {
|
||||
les, err := s.backend.NodeManager().LightEthereumService()
|
||||
les, err := s.backend.StatusNode().LightEthereumService()
|
||||
s.Require().NoError(err)
|
||||
subscription := les.EventMux().Subscribe(
|
||||
downloader.StartEvent{}, downloader.DoneEvent{}, downloader.FailedEvent{})
|
||||
|
|
|
@ -26,7 +26,7 @@ func (s *AccountsTestSuite) TestRPCEthAccounts() {
|
|||
err := s.Backend.SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password)
|
||||
s.NoError(err)
|
||||
|
||||
rpcClient := s.Backend.NodeManager().RPCClient()
|
||||
rpcClient := s.Backend.StatusNode().RPCClient()
|
||||
s.NotNil(rpcClient)
|
||||
|
||||
expectedResponse := `{"jsonrpc":"2.0","id":1,"result":["` + strings.ToLower(TestConfig.Account1.Address) + `"]}`
|
||||
|
@ -53,7 +53,7 @@ func (s *AccountsTestSuite) TestRPCEthAccountsWithUpstream() {
|
|||
err = s.Backend.SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password)
|
||||
s.NoError(err)
|
||||
|
||||
rpcClient := s.Backend.NodeManager().RPCClient()
|
||||
rpcClient := s.Backend.StatusNode().RPCClient()
|
||||
s.NotNil(rpcClient)
|
||||
|
||||
expectedResponse := `{"jsonrpc":"2.0","id":1,"result":["` + strings.ToLower(TestConfig.Account1.Address) + `"]}`
|
||||
|
|
|
@ -84,7 +84,7 @@ func (s *AccountsTestSuite) TestCreateChildAccount() {
|
|||
s.StartTestBackend()
|
||||
defer s.StopTestBackend()
|
||||
|
||||
keyStore, err := s.Backend.NodeManager().AccountKeyStore()
|
||||
keyStore, err := s.Backend.StatusNode().AccountKeyStore()
|
||||
s.NoError(err)
|
||||
s.NotNil(keyStore)
|
||||
|
||||
|
@ -132,7 +132,7 @@ func (s *AccountsTestSuite) TestRecoverAccount() {
|
|||
s.StartTestBackend()
|
||||
defer s.StopTestBackend()
|
||||
|
||||
keyStore, err := s.Backend.NodeManager().AccountKeyStore()
|
||||
keyStore, err := s.Backend.StatusNode().AccountKeyStore()
|
||||
s.NoError(err)
|
||||
|
||||
// create an acc
|
||||
|
@ -219,7 +219,7 @@ func (s *AccountsTestSuite) TestSelectedAccountOnRestart() {
|
|||
s.NoError(s.Backend.SelectAccount(address2, TestConfig.Account1.Password))
|
||||
|
||||
// stop node (and all of its sub-protocols)
|
||||
nodeConfig, err := s.Backend.NodeManager().NodeConfig()
|
||||
nodeConfig, err := s.Backend.StatusNode().Config()
|
||||
s.NoError(err)
|
||||
preservedNodeConfig := *nodeConfig
|
||||
s.NoError(s.Backend.StopNode())
|
||||
|
|
|
@ -26,7 +26,7 @@ type APIBackendTestSuite struct {
|
|||
}
|
||||
|
||||
// FIXME(tiabc): There's also a test with the same name in geth/node/manager_test.go
|
||||
// so this test should only check StatusBackend logic with a mocked version of the underlying NodeManager.
|
||||
// so this test should only check StatusBackend logic with a mocked version of the underlying StatusNode.
|
||||
func (s *APIBackendTestSuite) TestRaceConditions() {
|
||||
require := s.Require()
|
||||
require.NotNil(s.Backend)
|
||||
|
@ -69,11 +69,11 @@ func (s *APIBackendTestSuite) TestRaceConditions() {
|
|||
progress <- struct{}{}
|
||||
},
|
||||
func(config *params.NodeConfig) {
|
||||
log.Info("NodeManager()")
|
||||
instance := s.Backend.NodeManager()
|
||||
log.Info("StatusNode()")
|
||||
instance := s.Backend.StatusNode()
|
||||
s.NotNil(instance)
|
||||
s.IsType(&node.Manager{}, instance)
|
||||
s.T().Logf("NodeManager(), result: %v", instance)
|
||||
s.IsType(&node.StatusNode{}, instance)
|
||||
s.T().Logf("StatusNode(), result: %v", instance)
|
||||
progress <- struct{}{}
|
||||
},
|
||||
func(config *params.NodeConfig) {
|
||||
|
@ -188,7 +188,7 @@ func (s *APIBackendTestSuite) TestRaceConditions() {
|
|||
}
|
||||
|
||||
// FIXME(tiabc): There's also a test with the same name in geth/node/manager_test.go
|
||||
// so this test should only check StatusBackend logic with a mocked version of the underlying NodeManager.
|
||||
// so this test should only check StatusBackend logic with a mocked version of the underlying StatusNode.
|
||||
func (s *APIBackendTestSuite) TestNetworkSwitching() {
|
||||
// get Ropsten config
|
||||
nodeConfig, err := MakeTestNodeConfig(GetNetworkID())
|
||||
|
@ -198,7 +198,7 @@ func (s *APIBackendTestSuite) TestNetworkSwitching() {
|
|||
s.NoError(s.Backend.StartNode(nodeConfig))
|
||||
s.True(s.Backend.IsNodeRunning())
|
||||
|
||||
firstHash, err := e2e.FirstBlockHash(s.Backend.NodeManager())
|
||||
firstHash, err := e2e.FirstBlockHash(s.Backend.StatusNode())
|
||||
s.NoError(err)
|
||||
s.Equal(GetHeadHash(), firstHash)
|
||||
|
||||
|
@ -214,7 +214,7 @@ func (s *APIBackendTestSuite) TestNetworkSwitching() {
|
|||
s.True(s.Backend.IsNodeRunning())
|
||||
|
||||
// make sure we are on another network indeed
|
||||
firstHash, err = e2e.FirstBlockHash(s.Backend.NodeManager())
|
||||
firstHash, err = e2e.FirstBlockHash(s.Backend.StatusNode())
|
||||
s.NoError(err)
|
||||
s.Equal(GetHeadHash(), firstHash)
|
||||
|
||||
|
@ -234,20 +234,20 @@ func (s *APIBackendTestSuite) TestResetChainData() {
|
|||
s.StartTestBackend(e2e.WithDataDir(path))
|
||||
defer s.StopTestBackend()
|
||||
|
||||
EnsureNodeSync(s.Backend.NodeManager())
|
||||
EnsureNodeSync(s.Backend.StatusNode())
|
||||
|
||||
require.NoError(s.Backend.ResetChainData())
|
||||
|
||||
s.True(s.Backend.IsNodeRunning()) // new node, with previous config should be running
|
||||
|
||||
// make sure we can read the first byte, and it is valid (for Rinkeby)
|
||||
firstHash, err := e2e.FirstBlockHash(s.Backend.NodeManager())
|
||||
firstHash, err := e2e.FirstBlockHash(s.Backend.StatusNode())
|
||||
s.NoError(err)
|
||||
s.Equal(GetHeadHash(), firstHash)
|
||||
}
|
||||
|
||||
// FIXME(tiabc): There's also a test with the same name in geth/node/manager_test.go
|
||||
// so this test should only check StatusBackend logic with a mocked version of the underlying NodeManager.
|
||||
// so this test should only check StatusBackend logic with a mocked version of the underlying StatusNode.
|
||||
func (s *APIBackendTestSuite) TestRestartNode() {
|
||||
require := s.Require()
|
||||
require.NotNil(s.Backend)
|
||||
|
@ -260,7 +260,7 @@ func (s *APIBackendTestSuite) TestRestartNode() {
|
|||
s.NoError(s.Backend.StartNode(nodeConfig))
|
||||
s.True(s.Backend.IsNodeRunning())
|
||||
|
||||
firstHash, err := e2e.FirstBlockHash(s.Backend.NodeManager())
|
||||
firstHash, err := e2e.FirstBlockHash(s.Backend.StatusNode())
|
||||
s.NoError(err)
|
||||
s.Equal(GetHeadHash(), firstHash)
|
||||
|
||||
|
@ -269,7 +269,7 @@ func (s *APIBackendTestSuite) TestRestartNode() {
|
|||
s.True(s.Backend.IsNodeRunning()) // new node, with previous config should be running
|
||||
|
||||
// make sure we can read the first byte, and it is valid (for Rinkeby)
|
||||
firstHash, err = e2e.FirstBlockHash(s.Backend.NodeManager())
|
||||
firstHash, err = e2e.FirstBlockHash(s.Backend.StatusNode())
|
||||
s.NoError(err)
|
||||
s.Equal(GetHeadHash(), firstHash)
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@ func (s *JailRPCTestSuite) TestJailRPCSend() {
|
|||
s.StartTestBackend()
|
||||
defer s.StopTestBackend()
|
||||
|
||||
EnsureNodeSync(s.Backend.NodeManager())
|
||||
EnsureNodeSync(s.Backend.StatusNode())
|
||||
|
||||
// load Status JS and add test command to it
|
||||
s.jail.SetBaseJS(baseStatusJSCode)
|
||||
|
@ -97,7 +97,7 @@ func (s *JailRPCTestSuite) TestRegressionGetTransactionReceipt() {
|
|||
s.StartTestBackend()
|
||||
defer s.StopTestBackend()
|
||||
|
||||
rpcClient := s.Backend.NodeManager().RPCClient()
|
||||
rpcClient := s.Backend.StatusNode().RPCClient()
|
||||
s.NotNil(rpcClient)
|
||||
|
||||
// note: transaction hash is assumed to be invalid
|
||||
|
@ -110,7 +110,7 @@ func (s *JailRPCTestSuite) TestContractDeployment() {
|
|||
s.StartTestBackend()
|
||||
defer s.StopTestBackend()
|
||||
|
||||
EnsureNodeSync(s.Backend.NodeManager())
|
||||
EnsureNodeSync(s.Backend.StatusNode())
|
||||
|
||||
// obtain VM for a given chat (to send custom JS to jailed version of Send())
|
||||
s.jail.CreateAndInitCell(testChatID)
|
||||
|
@ -193,7 +193,7 @@ func (s *JailRPCTestSuite) TestJailVMPersistence() {
|
|||
s.StartTestBackend()
|
||||
defer s.StopTestBackend()
|
||||
|
||||
EnsureNodeSync(s.Backend.NodeManager())
|
||||
EnsureNodeSync(s.Backend.StatusNode())
|
||||
|
||||
// log into account from which transactions will be sent
|
||||
err := s.Backend.SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password)
|
||||
|
|
|
@ -31,13 +31,13 @@ func TestJailTestSuite(t *testing.T) {
|
|||
}
|
||||
|
||||
type JailTestSuite struct {
|
||||
e2e.NodeManagerTestSuite
|
||||
e2e.StatusNodeTestSuite
|
||||
Jail jail.Manager
|
||||
}
|
||||
|
||||
func (s *JailTestSuite) SetupTest() {
|
||||
s.NodeManager = node.NewManager()
|
||||
s.Jail = jail.New(s.NodeManager)
|
||||
s.StatusNode = node.New()
|
||||
s.Jail = jail.New(s.StatusNode)
|
||||
}
|
||||
|
||||
func (s *JailTestSuite) TearDownTest() {
|
||||
|
|
|
@ -23,11 +23,11 @@ func TestManagerTestSuite(t *testing.T) {
|
|||
}
|
||||
|
||||
type ManagerTestSuite struct {
|
||||
e2e.NodeManagerTestSuite
|
||||
e2e.StatusNodeTestSuite
|
||||
}
|
||||
|
||||
func (s *ManagerTestSuite) SetupTest() {
|
||||
s.NodeManager = node.NewManager()
|
||||
s.StatusNode = node.New()
|
||||
}
|
||||
|
||||
func (s *ManagerTestSuite) TestReferencesWithoutStartedNode() {
|
||||
|
@ -36,66 +36,59 @@ func (s *ManagerTestSuite) TestReferencesWithoutStartedNode() {
|
|||
initFn func() (interface{}, error)
|
||||
expectedErr error
|
||||
}{
|
||||
{
|
||||
"non-null manager, no running node, PopulateStaticPeers()",
|
||||
func() (interface{}, error) {
|
||||
return nil, s.NodeManager.PopulateStaticPeers()
|
||||
},
|
||||
node.ErrNoRunningNode,
|
||||
},
|
||||
{
|
||||
"non-null manager, no running node, AddPeer()",
|
||||
func() (interface{}, error) {
|
||||
return nil, s.NodeManager.AddPeer("enode://da3bf389a031f33fb55c9f5f54fde8473912402d27fffaa50efd74c0d0515f3a61daf6d52151f2876b19c15828e6f670352bff432b5ec457652e74755e8c864f@51.15.62.116:30303")
|
||||
return nil, s.StatusNode.AddPeer("enode://da3bf389a031f33fb55c9f5f54fde8473912402d27fffaa50efd74c0d0515f3a61daf6d52151f2876b19c15828e6f670352bff432b5ec457652e74755e8c864f@51.15.62.116:30303")
|
||||
},
|
||||
node.ErrNoRunningNode,
|
||||
},
|
||||
{
|
||||
"non-null manager, no running node, get NodeConfig",
|
||||
func() (interface{}, error) {
|
||||
return s.NodeManager.NodeConfig()
|
||||
return s.StatusNode.Config()
|
||||
},
|
||||
node.ErrNoRunningNode,
|
||||
},
|
||||
{
|
||||
"non-null manager, no running node, get Node",
|
||||
func() (interface{}, error) {
|
||||
return s.NodeManager.Node()
|
||||
return s.StatusNode.GethNode()
|
||||
},
|
||||
node.ErrNoRunningNode,
|
||||
},
|
||||
{
|
||||
"non-null manager, no running node, get LES",
|
||||
func() (interface{}, error) {
|
||||
return s.NodeManager.LightEthereumService()
|
||||
return s.StatusNode.LightEthereumService()
|
||||
},
|
||||
node.ErrNoRunningNode,
|
||||
},
|
||||
{
|
||||
"non-null manager, no running node, get Whisper",
|
||||
func() (interface{}, error) {
|
||||
return s.NodeManager.WhisperService()
|
||||
return s.StatusNode.WhisperService()
|
||||
},
|
||||
node.ErrNoRunningNode,
|
||||
},
|
||||
{
|
||||
"non-null manager, no running node, get AccountManager",
|
||||
func() (interface{}, error) {
|
||||
return s.NodeManager.AccountManager()
|
||||
return s.StatusNode.AccountManager()
|
||||
},
|
||||
node.ErrNoRunningNode,
|
||||
},
|
||||
{
|
||||
"non-null manager, no running node, get AccountKeyStore",
|
||||
func() (interface{}, error) {
|
||||
return s.NodeManager.AccountKeyStore()
|
||||
return s.StatusNode.AccountKeyStore()
|
||||
},
|
||||
node.ErrNoRunningNode,
|
||||
},
|
||||
{
|
||||
"non-null manager, no running node, get RPC Client",
|
||||
func() (interface{}, error) {
|
||||
return s.NodeManager.RPCClient(), nil
|
||||
return s.StatusNode.RPCClient(), nil
|
||||
},
|
||||
nil,
|
||||
},
|
||||
|
@ -123,49 +116,49 @@ func (s *ManagerTestSuite) TestReferencesWithStartedNode() {
|
|||
{
|
||||
"node is running, get NodeConfig",
|
||||
func() (interface{}, error) {
|
||||
return s.NodeManager.NodeConfig()
|
||||
return s.StatusNode.Config()
|
||||
},
|
||||
¶ms.NodeConfig{},
|
||||
},
|
||||
{
|
||||
"node is running, get Node",
|
||||
func() (interface{}, error) {
|
||||
return s.NodeManager.Node()
|
||||
return s.StatusNode.GethNode()
|
||||
},
|
||||
&gethnode.Node{},
|
||||
},
|
||||
{
|
||||
"node is running, get LES",
|
||||
func() (interface{}, error) {
|
||||
return s.NodeManager.LightEthereumService()
|
||||
return s.StatusNode.LightEthereumService()
|
||||
},
|
||||
&les.LightEthereum{},
|
||||
},
|
||||
{
|
||||
"node is running, get Whisper",
|
||||
func() (interface{}, error) {
|
||||
return s.NodeManager.WhisperService()
|
||||
return s.StatusNode.WhisperService()
|
||||
},
|
||||
&whisper.Whisper{},
|
||||
},
|
||||
{
|
||||
"node is running, get AccountManager",
|
||||
func() (interface{}, error) {
|
||||
return s.NodeManager.AccountManager()
|
||||
return s.StatusNode.AccountManager()
|
||||
},
|
||||
&accounts.Manager{},
|
||||
},
|
||||
{
|
||||
"node is running, get AccountKeyStore",
|
||||
func() (interface{}, error) {
|
||||
return s.NodeManager.AccountKeyStore()
|
||||
return s.StatusNode.AccountKeyStore()
|
||||
},
|
||||
&keystore.KeyStore{},
|
||||
},
|
||||
{
|
||||
"node is running, get RPC Client",
|
||||
func() (interface{}, error) {
|
||||
return s.NodeManager.RPCClient(), nil
|
||||
return s.StatusNode.RPCClient(), nil
|
||||
},
|
||||
&rpc.Client{},
|
||||
},
|
||||
|
@ -184,67 +177,67 @@ func (s *ManagerTestSuite) TestNodeStartStop() {
|
|||
s.NoError(err)
|
||||
|
||||
// try stopping non-started node
|
||||
s.False(s.NodeManager.IsNodeRunning())
|
||||
s.False(s.StatusNode.IsRunning())
|
||||
time.Sleep(100 * time.Millisecond) //https://github.com/status-im/status-go/issues/429#issuecomment-339663163
|
||||
s.Equal(node.ErrNoRunningNode, s.NodeManager.StopNode())
|
||||
s.Equal(node.ErrNoRunningNode, s.StatusNode.Stop())
|
||||
|
||||
// start node
|
||||
s.False(s.NodeManager.IsNodeRunning())
|
||||
s.NoError(s.NodeManager.StartNode(nodeConfig))
|
||||
s.False(s.StatusNode.IsRunning())
|
||||
s.NoError(s.StatusNode.Start(nodeConfig))
|
||||
// wait till node is started
|
||||
s.True(s.NodeManager.IsNodeRunning())
|
||||
s.True(s.StatusNode.IsRunning())
|
||||
|
||||
// try starting another node (w/o stopping the previously started node)
|
||||
s.Equal(node.ErrNodeExists, s.NodeManager.StartNode(nodeConfig))
|
||||
s.Equal(node.ErrNodeExists, s.StatusNode.Start(nodeConfig))
|
||||
|
||||
// now stop node
|
||||
time.Sleep(100 * time.Millisecond) //https://github.com/status-im/status-go/issues/429#issuecomment-339663163
|
||||
s.NoError(s.NodeManager.StopNode())
|
||||
s.False(s.NodeManager.IsNodeRunning())
|
||||
s.NoError(s.StatusNode.Stop())
|
||||
s.False(s.StatusNode.IsRunning())
|
||||
|
||||
// start new node with exactly the same config
|
||||
s.NoError(s.NodeManager.StartNode(nodeConfig))
|
||||
s.True(s.NodeManager.IsNodeRunning())
|
||||
s.NoError(s.StatusNode.Start(nodeConfig))
|
||||
s.True(s.StatusNode.IsRunning())
|
||||
|
||||
// finally stop the node
|
||||
time.Sleep(100 * time.Millisecond) //https://github.com/status-im/status-go/issues/429#issuecomment-339663163
|
||||
s.NoError(s.NodeManager.StopNode())
|
||||
s.NoError(s.StatusNode.Stop())
|
||||
}
|
||||
|
||||
func (s *ManagerTestSuite) TestNetworkSwitching() {
|
||||
// get Ropsten config
|
||||
nodeConfig, err := MakeTestNodeConfig(GetNetworkID())
|
||||
s.NoError(err)
|
||||
s.False(s.NodeManager.IsNodeRunning())
|
||||
s.NoError(s.NodeManager.StartNode(nodeConfig))
|
||||
s.False(s.StatusNode.IsRunning())
|
||||
s.NoError(s.StatusNode.Start(nodeConfig))
|
||||
// wait till node is started
|
||||
s.Require().True(s.NodeManager.IsNodeRunning())
|
||||
s.Require().True(s.StatusNode.IsRunning())
|
||||
|
||||
firstHash, err := e2e.FirstBlockHash(s.NodeManager)
|
||||
firstHash, err := e2e.FirstBlockHash(s.StatusNode)
|
||||
s.NoError(err)
|
||||
s.Equal(GetHeadHash(), firstHash)
|
||||
|
||||
// now stop node, and make sure that a new node, on different network can be started
|
||||
time.Sleep(100 * time.Millisecond) //https://github.com/status-im/status-go/issues/429#issuecomment-339663163
|
||||
s.NoError(s.NodeManager.StopNode())
|
||||
s.False(s.NodeManager.IsNodeRunning())
|
||||
s.NoError(s.StatusNode.Stop())
|
||||
s.False(s.StatusNode.IsRunning())
|
||||
|
||||
// start new node with completely different config
|
||||
nodeConfig, err = MakeTestNodeConfig(params.RinkebyNetworkID)
|
||||
s.NoError(err)
|
||||
s.NoError(s.NodeManager.StartNode(nodeConfig))
|
||||
s.True(s.NodeManager.IsNodeRunning())
|
||||
s.NoError(s.StatusNode.Start(nodeConfig))
|
||||
s.True(s.StatusNode.IsRunning())
|
||||
|
||||
// make sure we are on another network indeed
|
||||
firstHash, err = e2e.FirstBlockHash(s.NodeManager)
|
||||
firstHash, err = e2e.FirstBlockHash(s.StatusNode)
|
||||
s.NoError(err)
|
||||
s.Equal(GetHeadHashFromNetworkID(params.RinkebyNetworkID), firstHash)
|
||||
|
||||
time.Sleep(100 * time.Millisecond) //https://github.com/status-im/status-go/issues/429#issuecomment-339663163
|
||||
s.NoError(s.NodeManager.StopNode())
|
||||
s.NoError(s.StatusNode.Stop())
|
||||
}
|
||||
|
||||
func (s *ManagerTestSuite) TestStartNodeWithUpstreamEnabled() {
|
||||
func (s *ManagerTestSuite) TestStartWithUpstreamEnabled() {
|
||||
if GetNetworkID() == params.StatusChainNetworkID {
|
||||
s.T().Skip()
|
||||
}
|
||||
|
@ -258,11 +251,11 @@ func (s *ManagerTestSuite) TestStartNodeWithUpstreamEnabled() {
|
|||
nodeConfig.UpstreamConfig.Enabled = true
|
||||
nodeConfig.UpstreamConfig.URL = networkURL
|
||||
|
||||
s.NoError(s.NodeManager.StartNode(nodeConfig))
|
||||
s.True(s.NodeManager.IsNodeRunning())
|
||||
s.NoError(s.StatusNode.Start(nodeConfig))
|
||||
s.True(s.StatusNode.IsRunning())
|
||||
|
||||
time.Sleep(100 * time.Millisecond) //https://github.com/status-im/status-go/issues/429#issuecomment-339663163
|
||||
s.NoError(s.NodeManager.StopNode())
|
||||
s.NoError(s.StatusNode.Stop())
|
||||
}
|
||||
|
||||
// TODO(adam): race conditions should be tested with -race flag and unit tests, if possible.
|
||||
|
@ -283,84 +276,79 @@ func (s *ManagerTestSuite) TestStartNodeWithUpstreamEnabled() {
|
|||
//
|
||||
// var funcsToTest = []func(*params.NodeConfig){
|
||||
// func(config *params.NodeConfig) {
|
||||
// log.Info("StartNode()")
|
||||
// _, err := s.NodeManager.StartNode(config)
|
||||
// s.T().Logf("StartNode() for network: %d, error: %v", config.NetworkID, err)
|
||||
// log.Info("Start()")
|
||||
// _, err := s.StatusNode.Start(config)
|
||||
// s.T().Logf("Start() for network: %d, error: %v", config.NetworkID, err)
|
||||
// progress <- struct{}{}
|
||||
// },
|
||||
// func(config *params.NodeConfig) {
|
||||
// _, err := s.NodeManager.StopNode()
|
||||
// s.T().Logf("StopNode() for network: %d, error: %v", config.NetworkID, err)
|
||||
// _, err := s.StatusNode.Stop()
|
||||
// s.T().Logf("Stop() for network: %d, error: %v", config.NetworkID, err)
|
||||
// progress <- struct{}{}
|
||||
// },
|
||||
// func(config *params.NodeConfig) {
|
||||
// log.Info("Node()")
|
||||
// _, err := s.NodeManager.Node()
|
||||
// _, err := s.StatusNode.GethNode()
|
||||
// s.T().Logf("Node(), error: %v", err)
|
||||
// progress <- struct{}{}
|
||||
// },
|
||||
// func(config *params.NodeConfig) {
|
||||
// log.Info("IsNodeRunning()")
|
||||
// s.T().Logf("IsNodeRunning(), result: %v", s.NodeManager.IsNodeRunning())
|
||||
// progress <- struct{}{}
|
||||
// },
|
||||
// func(config *params.NodeConfig) {
|
||||
// log.Info("PopulateStaticPeers()")
|
||||
// s.T().Logf("PopulateBootNodes(), error: %v", s.NodeManager.PopulateStaticPeers())
|
||||
// log.Info("IsRunning()")
|
||||
// s.T().Logf("IsRunning(), result: %v", s.StatusNode.IsRunning())
|
||||
// progress <- struct{}{}
|
||||
// },
|
||||
// // TODO(adam): quarantined until it uses a different datadir
|
||||
// // as otherwise it wipes out cached blockchain data.
|
||||
// // func(config *params.NodeConfig) {
|
||||
// // log.Info("ResetChainData()")
|
||||
// // _, err := s.NodeManager.ResetChainData()
|
||||
// // _, err := s.StatusNode.ResetChainData()
|
||||
// // s.T().Logf("ResetChainData(), error: %v", err)
|
||||
// // progress <- struct{}{}
|
||||
// // },
|
||||
// func(config *params.NodeConfig) {
|
||||
// log.Info("RestartNode()")
|
||||
// _, err := s.NodeManager.RestartNode()
|
||||
// _, err := s.StatusNode.RestartNode()
|
||||
// s.T().Logf("RestartNode(), error: %v", err)
|
||||
// progress <- struct{}{}
|
||||
// },
|
||||
// func(config *params.NodeConfig) {
|
||||
// log.Info("NodeConfig()")
|
||||
// _, err := s.NodeManager.NodeConfig()
|
||||
// s.T().Logf("NodeConfig(), error: %v", err)
|
||||
// log.Info("Config()")
|
||||
// _, err := s.StatusNode.Config()
|
||||
// s.T().Logf("Config(), error: %v", err)
|
||||
// progress <- struct{}{}
|
||||
// },
|
||||
// func(config *params.NodeConfig) {
|
||||
// log.Info("LightEthereumService()")
|
||||
// _, err := s.NodeManager.LightEthereumService()
|
||||
// _, err := s.StatusNode.LightEthereumService()
|
||||
// s.T().Logf("LightEthereumService(), error: %v", err)
|
||||
// progress <- struct{}{}
|
||||
// },
|
||||
// func(config *params.NodeConfig) {
|
||||
// log.Info("WhisperService()")
|
||||
// _, err := s.NodeManager.WhisperService()
|
||||
// _, err := s.StatusNode.WhisperService()
|
||||
// s.T().Logf("WhisperService(), error: %v", err)
|
||||
// progress <- struct{}{}
|
||||
// },
|
||||
// func(config *params.NodeConfig) {
|
||||
// log.Info("AccountManager()")
|
||||
// _, err := s.NodeManager.AccountManager()
|
||||
// _, err := s.StatusNode.AccountManager()
|
||||
// s.T().Logf("AccountManager(), error: %v", err)
|
||||
// progress <- struct{}{}
|
||||
// },
|
||||
// func(config *params.NodeConfig) {
|
||||
// log.Info("AccountKeyStore()")
|
||||
// _, err := s.NodeManager.AccountKeyStore()
|
||||
// _, err := s.StatusNode.AccountKeyStore()
|
||||
// s.T().Logf("AccountKeyStore(), error: %v", err)
|
||||
// progress <- struct{}{}
|
||||
// },
|
||||
// func(config *params.NodeConfig) {
|
||||
// log.Info("RPCClient()")
|
||||
// s.NodeManager.RPCClient()
|
||||
// s.StatusNode.RPCClient()
|
||||
// progress <- struct{}{}
|
||||
// },
|
||||
// }
|
||||
//
|
||||
// // increase StartNode()/StopNode() population
|
||||
// // increase Start()/Stop() population
|
||||
// for i := 0; i < 5; i++ {
|
||||
// funcsToTest = append(funcsToTest, funcsToTest[0], funcsToTest[1])
|
||||
// }
|
||||
|
@ -383,7 +371,7 @@ func (s *ManagerTestSuite) TestStartNodeWithUpstreamEnabled() {
|
|||
// }
|
||||
//
|
||||
// time.Sleep(2 * time.Second) // so that we see some logs
|
||||
// nodeStopped, _ := s.NodeManager.StopNode() // just in case we have a node running
|
||||
// nodeStopped, _ := s.StatusNode.Stop() // just in case we have a node running
|
||||
//
|
||||
// if nodeStopped != nil {
|
||||
// <-nodeStopped
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
)
|
||||
|
||||
type RPCClientTestSuite struct {
|
||||
e2e.NodeManagerTestSuite
|
||||
e2e.StatusNodeTestSuite
|
||||
}
|
||||
|
||||
func TestRPCClientTestSuite(t *testing.T) {
|
||||
|
@ -19,8 +19,8 @@ func TestRPCClientTestSuite(t *testing.T) {
|
|||
}
|
||||
|
||||
func (s *RPCClientTestSuite) SetupTest() {
|
||||
s.NodeManager = node.NewManager()
|
||||
s.NotNil(s.NodeManager)
|
||||
s.StatusNode = node.New()
|
||||
s.NotNil(s.StatusNode)
|
||||
}
|
||||
|
||||
func (s *RPCClientTestSuite) TestNewClient() {
|
||||
|
|
|
@ -22,12 +22,12 @@ func TestRPCTestSuite(t *testing.T) {
|
|||
}
|
||||
|
||||
type RPCTestSuite struct {
|
||||
e2e.NodeManagerTestSuite
|
||||
e2e.StatusNodeTestSuite
|
||||
}
|
||||
|
||||
func (s *RPCTestSuite) SetupTest() {
|
||||
s.NodeManager = node.NewManager()
|
||||
s.NotNil(s.NodeManager)
|
||||
s.StatusNode = node.New()
|
||||
s.NotNil(s.StatusNode)
|
||||
}
|
||||
|
||||
func (s *RPCTestSuite) TestCallRPC() {
|
||||
|
@ -51,9 +51,9 @@ func (s *RPCTestSuite) TestCallRPC() {
|
|||
nodeConfig.UpstreamConfig.URL = networkURL
|
||||
}
|
||||
|
||||
s.NoError(s.NodeManager.StartNode(nodeConfig))
|
||||
s.NoError(s.StatusNode.Start(nodeConfig))
|
||||
|
||||
rpcClient := s.NodeManager.RPCClient()
|
||||
rpcClient := s.StatusNode.RPCClient()
|
||||
s.NotNil(rpcClient)
|
||||
|
||||
type rpcCall struct {
|
||||
|
@ -120,7 +120,7 @@ func (s *RPCTestSuite) TestCallRPC() {
|
|||
case <-done:
|
||||
}
|
||||
|
||||
s.NoError(s.NodeManager.StopNode())
|
||||
s.NoError(s.StatusNode.Stop())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -129,15 +129,15 @@ func (s *RPCTestSuite) TestCallRawResult() {
|
|||
nodeConfig, err := MakeTestNodeConfig(GetNetworkID())
|
||||
s.NoError(err)
|
||||
|
||||
s.NoError(s.NodeManager.StartNode(nodeConfig))
|
||||
s.NoError(s.StatusNode.Start(nodeConfig))
|
||||
|
||||
client := s.NodeManager.RPCClient()
|
||||
client := s.StatusNode.RPCClient()
|
||||
s.NotNil(client)
|
||||
|
||||
jsonResult := client.CallRaw(`{"jsonrpc":"2.0","method":"shh_version","params":[],"id":67}`)
|
||||
s.Equal(`{"jsonrpc":"2.0","id":67,"result":"6.0"}`, jsonResult)
|
||||
|
||||
s.NoError(s.NodeManager.StopNode())
|
||||
s.NoError(s.StatusNode.Stop())
|
||||
}
|
||||
|
||||
// TestCallRawResultGetTransactionReceipt checks if returned response
|
||||
|
@ -147,15 +147,15 @@ func (s *RPCTestSuite) TestCallRawResultGetTransactionReceipt() {
|
|||
nodeConfig, err := MakeTestNodeConfig(GetNetworkID())
|
||||
s.NoError(err)
|
||||
|
||||
s.NoError(s.NodeManager.StartNode(nodeConfig))
|
||||
s.NoError(s.StatusNode.Start(nodeConfig))
|
||||
|
||||
client := s.NodeManager.RPCClient()
|
||||
client := s.StatusNode.RPCClient()
|
||||
s.NotNil(client)
|
||||
|
||||
jsonResult := client.CallRaw(`{"jsonrpc":"2.0","method":"eth_getTransactionReceipt","params":["0x0ca0d8f2422f62bea77e24ed17db5711a77fa72064cccbb8e53c53b699cd3b34"],"id":5}`)
|
||||
s.Equal(`{"jsonrpc":"2.0","id":5,"error":{"code":-32000,"message":"unknown transaction"}}`, jsonResult)
|
||||
|
||||
s.NoError(s.NodeManager.StopNode())
|
||||
s.NoError(s.StatusNode.Stop())
|
||||
}
|
||||
|
||||
// TestCallContextResult checks if result passed to CallContext
|
||||
|
@ -164,9 +164,9 @@ func (s *RPCTestSuite) TestCallContextResult() {
|
|||
s.StartTestNode()
|
||||
defer s.StopTestNode()
|
||||
|
||||
EnsureNodeSync(s.NodeManager)
|
||||
EnsureNodeSync(s.StatusNode)
|
||||
|
||||
client := s.NodeManager.RPCClient()
|
||||
client := s.StatusNode.RPCClient()
|
||||
s.NotNil(client)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
|
||||
|
|
|
@ -14,10 +14,10 @@ import (
|
|||
"github.com/stretchr/testify/suite"
|
||||
)
|
||||
|
||||
// NodeManagerTestSuite defines a test suit with NodeManager.
|
||||
type NodeManagerTestSuite struct {
|
||||
// StatusNodeTestSuite defines a test suite with StatusNode.
|
||||
type StatusNodeTestSuite struct {
|
||||
suite.Suite
|
||||
NodeManager *node.Manager
|
||||
StatusNode *node.StatusNode
|
||||
}
|
||||
|
||||
// All general log messages in this package should be routed through this logger.
|
||||
|
@ -37,9 +37,9 @@ func init() {
|
|||
}
|
||||
}
|
||||
|
||||
// StartTestNode initiazes a NodeManager instances with configuration retrieved
|
||||
// StartTestNode initiazes a StatusNode instances with configuration retrieved
|
||||
// from the test config.
|
||||
func (s *NodeManagerTestSuite) StartTestNode(opts ...TestNodeOption) {
|
||||
func (s *StatusNodeTestSuite) StartTestNode(opts ...TestNodeOption) {
|
||||
nodeConfig, err := MakeTestNodeConfig(GetNetworkID())
|
||||
s.NoError(err)
|
||||
|
||||
|
@ -51,17 +51,17 @@ func (s *NodeManagerTestSuite) StartTestNode(opts ...TestNodeOption) {
|
|||
// import account keys
|
||||
s.NoError(importTestAccounts(nodeConfig.KeyStoreDir))
|
||||
|
||||
s.False(s.NodeManager.IsNodeRunning())
|
||||
s.NoError(s.NodeManager.StartNode(nodeConfig))
|
||||
s.True(s.NodeManager.IsNodeRunning())
|
||||
s.False(s.StatusNode.IsRunning())
|
||||
s.NoError(s.StatusNode.Start(nodeConfig))
|
||||
s.True(s.StatusNode.IsRunning())
|
||||
}
|
||||
|
||||
// StopTestNode attempts to stop initialized NodeManager.
|
||||
func (s *NodeManagerTestSuite) StopTestNode() {
|
||||
s.NotNil(s.NodeManager)
|
||||
s.True(s.NodeManager.IsNodeRunning())
|
||||
s.NoError(s.NodeManager.StopNode())
|
||||
s.False(s.NodeManager.IsNodeRunning())
|
||||
// StopTestNode attempts to stop initialized StatusNode.
|
||||
func (s *StatusNodeTestSuite) StopTestNode() {
|
||||
s.NotNil(s.StatusNode)
|
||||
s.True(s.StatusNode.IsRunning())
|
||||
s.NoError(s.StatusNode.Stop())
|
||||
s.False(s.StatusNode.IsRunning())
|
||||
}
|
||||
|
||||
// BackendTestSuite is a test suite with api.StatusBackend initialized
|
||||
|
@ -117,7 +117,7 @@ func (s *BackendTestSuite) RestartTestNode() {
|
|||
|
||||
// WhisperService returns a reference to the Whisper service.
|
||||
func (s *BackendTestSuite) WhisperService() *whisper.Whisper {
|
||||
whisperService, err := s.Backend.NodeManager().WhisperService()
|
||||
whisperService, err := s.Backend.StatusNode().WhisperService()
|
||||
s.NoError(err)
|
||||
s.NotNil(whisperService)
|
||||
|
||||
|
@ -126,7 +126,7 @@ func (s *BackendTestSuite) WhisperService() *whisper.Whisper {
|
|||
|
||||
// LightEthereumService returns a reference to the LES service.
|
||||
func (s *BackendTestSuite) LightEthereumService() *les.LightEthereum {
|
||||
lightEthereum, err := s.Backend.NodeManager().LightEthereumService()
|
||||
lightEthereum, err := s.Backend.StatusNode().LightEthereumService()
|
||||
s.NoError(err)
|
||||
s.NotNil(lightEthereum)
|
||||
|
||||
|
|
|
@ -26,10 +26,10 @@ func WithDataDir(path string) TestNodeOption {
|
|||
}
|
||||
}
|
||||
|
||||
// FirstBlockHash validates Attach operation for the NodeManager.
|
||||
func FirstBlockHash(nodeManager *node.Manager) (string, error) {
|
||||
// FirstBlockHash validates Attach operation for the StatusNode.
|
||||
func FirstBlockHash(statusNode *node.StatusNode) (string, error) {
|
||||
// obtain RPC client for running node
|
||||
runningNode, err := nodeManager.Node()
|
||||
runningNode, err := statusNode.GethNode()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@ func (s *TransactionsTestSuite) TestCallRPCSendTransaction() {
|
|||
s.StartTestBackend()
|
||||
defer s.StopTestBackend()
|
||||
|
||||
EnsureNodeSync(s.Backend.NodeManager())
|
||||
EnsureNodeSync(s.Backend.StatusNode())
|
||||
|
||||
err := s.Backend.SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password)
|
||||
s.NoError(err)
|
||||
|
@ -187,7 +187,7 @@ func (s *TransactionsTestSuite) testSendContractTx(setInputAndDataValue initFunc
|
|||
s.StartTestBackend()
|
||||
defer s.StopTestBackend()
|
||||
|
||||
EnsureNodeSync(s.Backend.NodeManager())
|
||||
EnsureNodeSync(s.Backend.StatusNode())
|
||||
|
||||
sampleAddress, _, _, err := s.Backend.AccountManager().CreateAccount(TestConfig.Account1.Password)
|
||||
s.NoError(err)
|
||||
|
@ -282,7 +282,7 @@ func (s *TransactionsTestSuite) TestSendEther() {
|
|||
s.StartTestBackend()
|
||||
defer s.StopTestBackend()
|
||||
|
||||
EnsureNodeSync(s.Backend.NodeManager())
|
||||
EnsureNodeSync(s.Backend.StatusNode())
|
||||
|
||||
// create an account
|
||||
sampleAddress, _, _, err := s.Backend.AccountManager().CreateAccount(TestConfig.Account1.Password)
|
||||
|
@ -419,7 +419,7 @@ func (s *TransactionsTestSuite) TestDoubleCompleteQueuedTransactions() {
|
|||
s.StartTestBackend()
|
||||
defer s.StopTestBackend()
|
||||
|
||||
EnsureNodeSync(s.Backend.NodeManager())
|
||||
EnsureNodeSync(s.Backend.StatusNode())
|
||||
|
||||
// log into account from which transactions will be sent
|
||||
s.NoError(s.Backend.SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password))
|
||||
|
@ -493,7 +493,7 @@ func (s *TransactionsTestSuite) TestDiscardQueuedTransaction() {
|
|||
s.StartTestBackend()
|
||||
defer s.StopTestBackend()
|
||||
|
||||
EnsureNodeSync(s.Backend.NodeManager())
|
||||
EnsureNodeSync(s.Backend.StatusNode())
|
||||
|
||||
// reset queue
|
||||
s.Backend.TxQueueManager().TransactionQueue().Reset()
|
||||
|
@ -583,7 +583,7 @@ func (s *TransactionsTestSuite) TestDiscardMultipleQueuedTransactions() {
|
|||
s.StartTestBackend()
|
||||
defer s.StopTestBackend()
|
||||
|
||||
EnsureNodeSync(s.Backend.NodeManager())
|
||||
EnsureNodeSync(s.Backend.StatusNode())
|
||||
|
||||
// reset queue
|
||||
s.Backend.TxQueueManager().TransactionQueue().Reset()
|
||||
|
@ -780,7 +780,7 @@ func (s *TransactionsTestSuite) TestCompleteMultipleQueuedTransactionsUpstream()
|
|||
func (s *TransactionsTestSuite) setupLocalNode() {
|
||||
s.StartTestBackend()
|
||||
|
||||
EnsureNodeSync(s.Backend.NodeManager())
|
||||
EnsureNodeSync(s.Backend.StatusNode())
|
||||
}
|
||||
|
||||
func (s *TransactionsTestSuite) setupUpstreamNode() {
|
||||
|
|
|
@ -17,11 +17,11 @@ func TestMailServiceSuite(t *testing.T) {
|
|||
}
|
||||
|
||||
type MailServiceSuite struct {
|
||||
e2e.NodeManagerTestSuite
|
||||
e2e.StatusNodeTestSuite
|
||||
}
|
||||
|
||||
func (s *MailServiceSuite) SetupTest() {
|
||||
s.NodeManager = node.NewManager()
|
||||
s.StatusNode = node.New()
|
||||
}
|
||||
|
||||
// TestShhRequestMessagesRPCMethodAvailability tests if `shh_requestMessages` is available
|
||||
|
@ -34,7 +34,7 @@ func (s *MailServiceSuite) TestShhRequestMessagesRPCMethodAvailability() {
|
|||
})
|
||||
defer s.StopTestNode()
|
||||
|
||||
client := s.NodeManager.RPCClient()
|
||||
client := s.StatusNode.RPCClient()
|
||||
r.NotNil(client)
|
||||
|
||||
// This error means that the method is available through inproc communication
|
||||
|
|
|
@ -31,23 +31,23 @@ func (s *WhisperMailboxSuite) TestRequestMessageFromMailboxAsync() {
|
|||
// Start mailbox and status node.
|
||||
mailboxBackend, stop := s.startMailboxBackend()
|
||||
defer stop()
|
||||
mailboxNode, err := mailboxBackend.NodeManager().Node()
|
||||
mailboxNode, err := mailboxBackend.StatusNode().GethNode()
|
||||
s.Require().NoError(err)
|
||||
mailboxEnode := mailboxNode.Server().NodeInfo().Enode
|
||||
|
||||
sender, stop := s.startBackend("sender")
|
||||
defer stop()
|
||||
node, err := sender.NodeManager().Node()
|
||||
node, err := sender.StatusNode().GethNode()
|
||||
s.Require().NoError(err)
|
||||
|
||||
s.Require().NotEqual(mailboxEnode, node.Server().NodeInfo().Enode)
|
||||
|
||||
err = sender.NodeManager().AddPeer(mailboxEnode)
|
||||
err = sender.StatusNode().AddPeer(mailboxEnode)
|
||||
s.Require().NoError(err)
|
||||
// Wait async processes on adding peer.
|
||||
time.Sleep(time.Second)
|
||||
|
||||
senderWhisperService, err := sender.NodeManager().WhisperService()
|
||||
senderWhisperService, err := sender.StatusNode().WhisperService()
|
||||
s.Require().NoError(err)
|
||||
|
||||
// Mark mailbox node trusted.
|
||||
|
@ -63,7 +63,7 @@ func (s *WhisperMailboxSuite) TestRequestMessageFromMailboxAsync() {
|
|||
MailServerKeyID, err := senderWhisperService.AddSymKeyFromPassword(password)
|
||||
s.Require().NoError(err)
|
||||
|
||||
rpcClient := sender.NodeManager().RPCClient()
|
||||
rpcClient := sender.StatusNode().RPCClient()
|
||||
s.Require().NotNil(rpcClient)
|
||||
|
||||
// Create topic.
|
||||
|
@ -138,30 +138,30 @@ func (s *WhisperMailboxSuite) TestRequestMessagesInGroupChat() {
|
|||
defer stop()
|
||||
|
||||
// Add mailbox to static peers.
|
||||
mailboxNode, err := mailboxBackend.NodeManager().Node()
|
||||
mailboxNode, err := mailboxBackend.StatusNode().GethNode()
|
||||
s.Require().NoError(err)
|
||||
mailboxEnode := mailboxNode.Server().NodeInfo().Enode
|
||||
|
||||
err = aliceBackend.NodeManager().AddPeer(mailboxEnode)
|
||||
err = aliceBackend.StatusNode().AddPeer(mailboxEnode)
|
||||
s.Require().NoError(err)
|
||||
err = bobBackend.NodeManager().AddPeer(mailboxEnode)
|
||||
err = bobBackend.StatusNode().AddPeer(mailboxEnode)
|
||||
s.Require().NoError(err)
|
||||
err = charlieBackend.NodeManager().AddPeer(mailboxEnode)
|
||||
err = charlieBackend.StatusNode().AddPeer(mailboxEnode)
|
||||
s.Require().NoError(err)
|
||||
// Wait async processes on adding peer.
|
||||
time.Sleep(time.Second)
|
||||
|
||||
// Get whisper service.
|
||||
aliceWhisperService, err := aliceBackend.NodeManager().WhisperService()
|
||||
aliceWhisperService, err := aliceBackend.StatusNode().WhisperService()
|
||||
s.Require().NoError(err)
|
||||
bobWhisperService, err := bobBackend.NodeManager().WhisperService()
|
||||
bobWhisperService, err := bobBackend.StatusNode().WhisperService()
|
||||
s.Require().NoError(err)
|
||||
charlieWhisperService, err := charlieBackend.NodeManager().WhisperService()
|
||||
charlieWhisperService, err := charlieBackend.StatusNode().WhisperService()
|
||||
s.Require().NoError(err)
|
||||
// Get rpc client.
|
||||
aliceRPCClient := aliceBackend.NodeManager().RPCClient()
|
||||
bobRPCClient := bobBackend.NodeManager().RPCClient()
|
||||
charlieRPCClient := charlieBackend.NodeManager().RPCClient()
|
||||
aliceRPCClient := aliceBackend.StatusNode().RPCClient()
|
||||
bobRPCClient := bobBackend.StatusNode().RPCClient()
|
||||
charlieRPCClient := charlieBackend.StatusNode().RPCClient()
|
||||
|
||||
// Bob and charlie add the mailserver key.
|
||||
password := "status-offline-inbox"
|
||||
|
@ -286,11 +286,11 @@ func (s *WhisperMailboxSuite) TestSendMessageWithoutSubscription() {
|
|||
time.Sleep((whisper.DefaultSyncAllowance + 1) * time.Second)
|
||||
|
||||
// Get whisper service.
|
||||
aliceWhisperService, err := aliceBackend.NodeManager().WhisperService()
|
||||
aliceWhisperService, err := aliceBackend.StatusNode().WhisperService()
|
||||
s.Require().NoError(err)
|
||||
|
||||
// Get rpc client.
|
||||
aliceRPCClient := aliceBackend.NodeManager().RPCClient()
|
||||
aliceRPCClient := aliceBackend.StatusNode().RPCClient()
|
||||
|
||||
// Generate group chat symkey and topic.
|
||||
groupChatKeyID, err := aliceWhisperService.GenerateSymKey()
|
||||
|
|
|
@ -27,10 +27,10 @@ func (s *WhisperTestSuite) TestWhisperFilterRace() {
|
|||
s.StartTestBackend()
|
||||
defer s.StopTestBackend()
|
||||
|
||||
whisperService, err := s.Backend.NodeManager().WhisperService()
|
||||
whisperService, err := s.Backend.StatusNode().WhisperService()
|
||||
s.NoError(err)
|
||||
|
||||
accountManager := account.NewManager(s.Backend.NodeManager())
|
||||
accountManager := account.NewManager(s.Backend.StatusNode())
|
||||
s.NotNil(accountManager)
|
||||
|
||||
whisperAPI := whisper.NewPublicWhisperAPI(whisperService)
|
||||
|
@ -92,7 +92,7 @@ func (s *WhisperTestSuite) TestSelectAccount() {
|
|||
s.StartTestBackend()
|
||||
defer s.StopTestBackend()
|
||||
|
||||
whisperService, err := s.Backend.NodeManager().WhisperService()
|
||||
whisperService, err := s.Backend.StatusNode().WhisperService()
|
||||
s.NoError(err)
|
||||
|
||||
// create an acc
|
||||
|
@ -116,7 +116,7 @@ func (s *WhisperTestSuite) TestLogout() {
|
|||
s.StartTestBackend()
|
||||
defer s.StopTestBackend()
|
||||
|
||||
whisperService, err := s.Backend.NodeManager().WhisperService()
|
||||
whisperService, err := s.Backend.StatusNode().WhisperService()
|
||||
s.NoError(err)
|
||||
|
||||
// create an account
|
||||
|
@ -167,7 +167,7 @@ func (s *WhisperTestSuite) TestSelectedAccountOnRestart() {
|
|||
s.False(whisperService.HasKeyPair(pubKey1), "identity should be removed, but it is still present in whisper")
|
||||
|
||||
// stop node (and all of its sub-protocols)
|
||||
nodeConfig, err := s.Backend.NodeManager().NodeConfig()
|
||||
nodeConfig, err := s.Backend.StatusNode().Config()
|
||||
s.NoError(err)
|
||||
preservedNodeConfig := *nodeConfig
|
||||
s.NoError(s.Backend.StopNode())
|
||||
|
|
|
@ -103,7 +103,7 @@ func LoadFromFile(filename string) string {
|
|||
// of unrelated methods.
|
||||
type LightEthereumProvider interface {
|
||||
// NodeConfig returns reference to running node's configuration
|
||||
NodeConfig() (*params.NodeConfig, error)
|
||||
Config() (*params.NodeConfig, error)
|
||||
// LightEthereumService exposes reference to LES service running on top of the node
|
||||
LightEthereumService() (*les.LightEthereum, error)
|
||||
// PeerCount returns number of connected peers
|
||||
|
@ -113,7 +113,7 @@ type LightEthereumProvider interface {
|
|||
// EnsureNodeSync waits until node synchronzation is done to continue
|
||||
// with tests afterwards. Panics in case of an error or a timeout.
|
||||
func EnsureNodeSync(lesProvider LightEthereumProvider) {
|
||||
nc, err := lesProvider.NodeConfig()
|
||||
nc, err := lesProvider.Config()
|
||||
if err != nil {
|
||||
panic("can't retrieve NodeConfig")
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue