Add default peer limits configuration (#830)
* Add default peer limits configuration If discovery is enabled for a given cluster - we will set a default expected number of peers for each enabled service. For example: - if cluster is rinkeby has a discovery enabled we will check which services are enabled - if whisper is enabled we will set min and max limits by default - if les is enabled and infura is not used we will set limits too When statusd is used - configuration must be provided using configuration supported by statusd. * Fix deadlock in les peer set
This commit is contained in:
parent
a45b05969a
commit
2f2dfe16c0
|
@ -0,0 +1,41 @@
|
|||
diff --git c/les/peer.go w/les/peer.go
|
||||
index caf568077..5eb41cff9 100644
|
||||
--- c/les/peer.go
|
||||
+++ w/les/peer.go
|
||||
@@ -543,19 +543,24 @@ func (ps *peerSet) notify(n peerSetNotify) {
|
||||
// Register injects a new peer into the working set, or returns an error if the
|
||||
// peer is already known.
|
||||
func (ps *peerSet) Register(p *peer) error {
|
||||
- ps.lock.Lock()
|
||||
- if ps.closed {
|
||||
- return errClosed
|
||||
+ peers, err := func() ([]peerSetNotify, error) {
|
||||
+ ps.lock.Lock()
|
||||
+ defer ps.lock.Unlock()
|
||||
+ if ps.closed {
|
||||
+ return nil, errClosed
|
||||
+ }
|
||||
+ if _, ok := ps.peers[p.id]; ok {
|
||||
+ return nil, errAlreadyRegistered
|
||||
+ }
|
||||
+ ps.peers[p.id] = p
|
||||
+ p.sendQueue = newExecQueue(100)
|
||||
+ peers := make([]peerSetNotify, len(ps.notifyList))
|
||||
+ copy(peers, ps.notifyList)
|
||||
+ return peers, nil
|
||||
+ }()
|
||||
+ if err != nil {
|
||||
+ return err
|
||||
}
|
||||
- if _, ok := ps.peers[p.id]; ok {
|
||||
- return errAlreadyRegistered
|
||||
- }
|
||||
- ps.peers[p.id] = p
|
||||
- p.sendQueue = newExecQueue(100)
|
||||
- peers := make([]peerSetNotify, len(ps.notifyList))
|
||||
- copy(peers, ps.notifyList)
|
||||
- ps.lock.Unlock()
|
||||
-
|
||||
for _, n := range peers {
|
||||
n.registerPeer(p)
|
||||
}
|
|
@ -126,10 +126,9 @@ func defaultEmbeddedNodeConfig(config *params.NodeConfig) *node.Config {
|
|||
}
|
||||
|
||||
if config.ClusterConfig != nil && config.ClusterConfig.Enabled {
|
||||
nc.P2P.StaticNodes = parseNodes(config.ClusterConfig.StaticNodes)
|
||||
nc.P2P.BootstrapNodesV5 = parseNodesV5(config.ClusterConfig.BootNodes)
|
||||
nc.P2P.StaticNodes = parseNodes(config.ClusterConfig.StaticNodes)
|
||||
}
|
||||
|
||||
return nc
|
||||
}
|
||||
|
||||
|
@ -153,7 +152,6 @@ func activateLightEthService(stack *node.Node, config *params.NodeConfig) error
|
|||
ethConf.SyncMode = downloader.LightSync
|
||||
ethConf.NetworkId = config.NetworkID
|
||||
ethConf.DatabaseCache = config.LightEthConfig.DatabaseCache
|
||||
|
||||
return stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
return les.New(ctx, ðConf)
|
||||
})
|
||||
|
|
|
@ -97,7 +97,7 @@ func (n *StatusNode) Start(config *params.NodeConfig, services ...node.ServiceCo
|
|||
return err
|
||||
}
|
||||
|
||||
if n.gethNode.Server().DiscV5 != nil {
|
||||
if n.config.Discovery {
|
||||
return n.startPeerPool()
|
||||
}
|
||||
|
||||
|
@ -198,7 +198,7 @@ func (n *StatusNode) stop() error {
|
|||
}
|
||||
|
||||
func (n *StatusNode) stopPeerPool() error {
|
||||
if n.gethNode.Server().DiscV5 == nil {
|
||||
if !n.config.Discovery {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -1,47 +1,36 @@
|
|||
package params
|
||||
|
||||
type subClusterData struct {
|
||||
Number int `json:"number"`
|
||||
Hash string `json:"hash"`
|
||||
type cluster struct {
|
||||
NetworkID int `json:"networkID"`
|
||||
Discovery bool `json:"discovery"`
|
||||
StaticNodes []string `json:"staticnodes"`
|
||||
BootNodes []string `json:"bootnodes"`
|
||||
}
|
||||
|
||||
type clusterData struct {
|
||||
NetworkID int `json:"networkID"`
|
||||
Prod subClusterData `json:"prod"`
|
||||
Dev subClusterData `json:"dev"`
|
||||
}
|
||||
|
||||
var ropstenCluster = clusterData{
|
||||
var ropstenCluster = cluster{
|
||||
NetworkID: 3,
|
||||
Prod: subClusterData{StaticNodes: []string{
|
||||
StaticNodes: []string{
|
||||
"enode://dffef3874011709b12d1e540d83ddb19a9db8614ad9151d05bcf813585e45cbebba5aaea223fe315786c401d8cecb1ad2de9f179680c536ea30311fb21fa934b@188.166.100.178:30303",
|
||||
"enode://03f3661686d30509d621dbe5ee2e3082923f25e94fd41a2dd8dd34bb12a0c4e8fbde52247c6c55e86dc209a8e7c4a5ae56058c65f7b01734d3ab73818b44e2a3@188.166.33.47:30303",
|
||||
}},
|
||||
Dev: subClusterData{StaticNodes: []string{
|
||||
"enode://dffef3874011709b12d1e540d83ddb19a9db8614ad9151d05bcf813585e45cbebba5aaea223fe315786c401d8cecb1ad2de9f179680c536ea30311fb21fa934b@188.166.100.178:30303",
|
||||
"enode://03f3661686d30509d621dbe5ee2e3082923f25e94fd41a2dd8dd34bb12a0c4e8fbde52247c6c55e86dc209a8e7c4a5ae56058c65f7b01734d3ab73818b44e2a3@188.166.33.47:30303",
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
var rinkebyCluster = clusterData{
|
||||
var rinkebyCluster = cluster{
|
||||
NetworkID: 4,
|
||||
Prod: subClusterData{StaticNodes: []string{
|
||||
"enode://fda3f6273a0f2da4ac5858d1f52e5afaf9def281121be3d37558c67d4d9ca26c6ad7a0520b2cd7454120fb770e86d5760487c9924b2166e65485f606e56d60fc@51.15.69.144:30303",
|
||||
Discovery: true,
|
||||
BootNodes: []string{
|
||||
"enode://1b843c7697f6fc42a1f606fb3cfaac54e025f06789dc20ad9278be3388967cf21e3a1b1e4be51faecd66c2c3adef12e942b4fcdeb8727657abe60636efb6224f@206.189.6.46:30404",
|
||||
"enode://b29100c8468e3e6604817174a15e4d71627458b0dcdbeea169ab2eb4ab2bbc6f24adbb175826726cec69db8fdba6c0dd60b3da598e530ede562180d300728659@206.189.6.48:30404",
|
||||
},
|
||||
StaticNodes: []string{
|
||||
"enode://ba41aa829287a0a9076d9bffed97c8ce2e491b99873288c9e886f16fd575306ac6c656db4fbf814f5a9021aec004ffa9c0ae8650f92fd10c12eeb7c364593eb3@51.15.69.147:30303",
|
||||
"enode://28ecf5272b560ca951f4cd7f1eb8bd62da5853b026b46db432c4b01797f5b0114819a090a72acd7f32685365ecd8e00450074fa0673039aefe10f3fb666e0f3f@51.15.76.249:30303",
|
||||
}},
|
||||
Dev: subClusterData{StaticNodes: []string{
|
||||
"enode://7512c8f6e7ffdcc723cf77e602a1de9d8cc2e8ad35db309464819122cd773857131aee390fec33894db13da730c8432bb248eed64039e3810e156e979b2847cb@51.15.78.243:30303",
|
||||
"enode://1cc27a5a41130a5c8b90db5b2273dc28f7b56f3edfc0dcc57b665d451274b26541e8de49ea7a074281906a82209b9600239c981163b6ff85c3038a8e2bc5d8b8@51.15.68.93:30303",
|
||||
"enode://798d17064141b8f88df718028a8272b943d1cb8e696b3dab56519c70b77b1d3469b56b6f4ce3788457646808f5c7299e9116626f2281f30b959527b969a71e4f@51.15.75.244:30303",
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
var mainnetCluster = clusterData{
|
||||
var mainnetCluster = cluster{
|
||||
NetworkID: 1,
|
||||
Prod: subClusterData{},
|
||||
Dev: subClusterData{StaticNodes: []string{
|
||||
StaticNodes: []string{
|
||||
"enode://3aeaff0868b19e03fabe33e6e0fcc821094e1601be44edd6f45e3f0171ed964e13623e49987bddd6c517304d2a45dfe66da51e47b2e11d59c4b30cd6094db43d@163.172.176.22:30303",
|
||||
"enode://687343483ca41132a16c9ab67b49e9997a34ec38ddb6dd60bf45f9a0ea4c50362f902553d813af44ab1cdb246fc384d4c74b4437c15cefe3bb0e87b399dbb5bb@163.172.176.22:30403",
|
||||
"enode://2a3d6c1c86546831e5bb2684ff0ed6d931bdacf3c6cd344706452a1e78c41442d38c62317096175dcea6517959f40ac789f76356348e0a17ee53563cbdf2db48@163.172.176.22:30503",
|
||||
|
@ -49,7 +38,7 @@ var mainnetCluster = clusterData{
|
|||
"enode://7afd119c549a7ab02b3f7bd77ef3490b6d660d5c49d0734a0c8bb23195ced4ace0bf5cde673cd5cfd07dd8d759277f3d8408eb73dc3c217bbe00f0027d06eee9@51.15.85.243:30403",
|
||||
"enode://da8af0869e4e8047f21c1ac016b94a7b7d8e935dddd28d4272f88a1ceaee7c15e7deec9b6fd195ed3bc43748893111ebf2b2479ff44a8025ab8d598f3c97b589@51.15.85.243:30503",
|
||||
"enode://7ebaa6a8ce2547f10e34fab9cc5626b86d67934a86e1fb36145c0b89fcc7b9315dd6d0a8cc5808d11a55bdc14c78ff675ca956dfec53837b4f1a97392b15ec23@51.15.35.110:30303",
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
var defaultClusters = []clusterData{ropstenCluster, rinkebyCluster, mainnetCluster}
|
||||
var defaultClusters = []cluster{ropstenCluster, rinkebyCluster, mainnetCluster}
|
||||
|
|
|
@ -206,6 +206,7 @@ type NodeConfig struct {
|
|||
// DevMode is true when given configuration is to be used during development.
|
||||
// For production, this flag should be turned off, so that more strict requirements
|
||||
// are applied to node's configuration
|
||||
// DEPRECATED.
|
||||
DevMode bool
|
||||
|
||||
// NetworkID sets network to use for selecting peers to connect to
|
||||
|
@ -338,7 +339,9 @@ func NewNodeConfig(dataDir string, clstrCfgFile string, networkID uint64, devMod
|
|||
NotificationTriggerURL: FirebaseNotificationTriggerURL,
|
||||
},
|
||||
},
|
||||
SwarmConfig: &SwarmConfig{},
|
||||
SwarmConfig: &SwarmConfig{},
|
||||
RegisterTopics: []discv5.Topic{},
|
||||
RequireTopics: map[discv5.Topic]Limits{},
|
||||
}
|
||||
|
||||
// adjust dependent values
|
||||
|
@ -468,7 +471,7 @@ func (c *NodeConfig) updateConfig() error {
|
|||
if err := c.updateClusterConfig(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.updatePeerLimits()
|
||||
return c.updateRelativeDirsConfig()
|
||||
}
|
||||
|
||||
|
@ -547,7 +550,7 @@ func (c *NodeConfig) updateClusterConfig() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
var clusters []clusterData
|
||||
var clusters []cluster
|
||||
if c.ClusterConfigFile != "" {
|
||||
// Load cluster configuration from external file.
|
||||
configFile, err := ioutil.ReadFile(c.ClusterConfigFile)
|
||||
|
@ -564,10 +567,9 @@ func (c *NodeConfig) updateClusterConfig() error {
|
|||
|
||||
for _, cluster := range clusters {
|
||||
if cluster.NetworkID == int(c.NetworkID) {
|
||||
c.ClusterConfig.StaticNodes = cluster.Prod.StaticNodes
|
||||
if c.DevMode {
|
||||
c.ClusterConfig.StaticNodes = cluster.Dev.StaticNodes
|
||||
}
|
||||
c.Discovery = cluster.Discovery
|
||||
c.ClusterConfig.BootNodes = cluster.BootNodes
|
||||
c.ClusterConfig.StaticNodes = cluster.StaticNodes
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -595,6 +597,17 @@ func (c *NodeConfig) updateRelativeDirsConfig() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// updatePeerLimits will set default peer limits expectations based on enabled services.
|
||||
func (c *NodeConfig) updatePeerLimits() {
|
||||
if !c.Discovery {
|
||||
return
|
||||
}
|
||||
if c.WhisperConfig.Enabled {
|
||||
c.RequireTopics[WhisperDiscv5Topic] = WhisperDiscv5Limits
|
||||
// TODO(dshulyak) register mailserver limits when we will change how they are handled.
|
||||
}
|
||||
}
|
||||
|
||||
// String dumps config object as nicely indented JSON
|
||||
func (c *NodeConfig) String() string {
|
||||
data, _ := json.MarshalIndent(c, "", " ")
|
||||
|
|
|
@ -17,23 +17,17 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var clusterConfigData = []byte(`[
|
||||
var clusterConfigData = []byte(`
|
||||
[
|
||||
{
|
||||
"networkID": 3,
|
||||
"prod": {
|
||||
"staticnodes": [
|
||||
"enode://7ab298cedc4185a894d21d8a4615262ec6bdce66c9b6783878258e0d5b31013d30c9038932432f70e5b2b6a5cd323bf820554fcb22fbc7b45367889522e9c449@10.1.1.1:30303",
|
||||
"enode://f59e8701f18c79c5cbc7618dc7bb928d44dc2f5405c7d693dad97da2d8585975942ec6fd36d3fe608bfdc7270a34a4dd00f38cfe96b2baa24f7cd0ac28d382a1@10.1.1.2:30303"
|
||||
]
|
||||
},
|
||||
"dev": {
|
||||
"staticnodes": [
|
||||
"enode://7ab298cedc4185a894d21d8a4615262ec6bdce66c9b6783878258e0d5b31013d30c9038932432f70e5b2b6a5cd323bf820554fcb22fbc7b45367889522e9c449@10.1.1.1:30303",
|
||||
"enode://f59e8701f18c79c5cbc7618dc7bb928d44dc2f5405c7d693dad97da2d8585975942ec6fd36d3fe608bfdc7270a34a4dd00f38cfe96b2baa24f7cd0ac28d382a1@10.1.1.2:30303"
|
||||
]
|
||||
}
|
||||
"staticnodes": [
|
||||
"enode://7ab298cedc4185a894d21d8a4615262ec6bdce66c9b6783878258e0d5b31013d30c9038932432f70e5b2b6a5cd323bf820554fcb22fbc7b45367889522e9c449@10.1.1.1:30303",
|
||||
"enode://f59e8701f18c79c5cbc7618dc7bb928d44dc2f5405c7d693dad97da2d8585975942ec6fd36d3fe608bfdc7270a34a4dd00f38cfe96b2baa24f7cd0ac28d382a1@10.1.1.2:30303"
|
||||
]
|
||||
}
|
||||
]`)
|
||||
]
|
||||
`)
|
||||
|
||||
var loadConfigTestCases = []struct {
|
||||
name string
|
||||
|
@ -309,24 +303,8 @@ var loadConfigTestCases = []struct {
|
|||
func(t *testing.T, dataDir string, nodeConfig *params.NodeConfig, err error) {
|
||||
require.NoError(t, err)
|
||||
require.True(t, nodeConfig.ClusterConfig.Enabled, "cluster configuration is expected to be enabled by default")
|
||||
|
||||
enodes := nodeConfig.ClusterConfig.StaticNodes
|
||||
require.True(t, len(enodes) >= 3)
|
||||
},
|
||||
},
|
||||
{
|
||||
`select cluster configuration (Rinkeby Prod)`,
|
||||
`{
|
||||
"NetworkId": 4,
|
||||
"DataDir": "$TMPDIR",
|
||||
"DevMode": false
|
||||
}`,
|
||||
func(t *testing.T, dataDir string, nodeConfig *params.NodeConfig, err error) {
|
||||
require.NoError(t, err)
|
||||
require.True(t, nodeConfig.ClusterConfig.Enabled, "cluster configuration is expected to be enabled by default")
|
||||
|
||||
enodes := nodeConfig.ClusterConfig.StaticNodes
|
||||
require.True(t, len(enodes) >= 3)
|
||||
require.True(t, nodeConfig.Discovery)
|
||||
require.True(t, len(nodeConfig.ClusterConfig.BootNodes) >= 2)
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -343,21 +321,6 @@ var loadConfigTestCases = []struct {
|
|||
require.True(t, len(enodes) >= 2)
|
||||
},
|
||||
},
|
||||
{
|
||||
`select cluster configuration (Mainnet Prod)`,
|
||||
`{
|
||||
"NetworkId": 1,
|
||||
"DataDir": "$TMPDIR",
|
||||
"DevMode": false
|
||||
}`,
|
||||
func(t *testing.T, dataDir string, nodeConfig *params.NodeConfig, err error) {
|
||||
require.NoError(t, err)
|
||||
require.True(t, nodeConfig.ClusterConfig.Enabled, "cluster confguration is expected to be enabled by default")
|
||||
|
||||
enodes := nodeConfig.ClusterConfig.StaticNodes
|
||||
require.True(t, len(enodes) == 0)
|
||||
},
|
||||
},
|
||||
{
|
||||
`default DevMode (true)`,
|
||||
`{
|
||||
|
@ -397,6 +360,20 @@ var loadConfigTestCases = []struct {
|
|||
require.True(t, nodeConfig.WhisperConfig.LightClient)
|
||||
},
|
||||
},
|
||||
{
|
||||
`default peer limits`,
|
||||
`{
|
||||
"NetworkId": 4,
|
||||
"DataDir": "$TMPDIR"
|
||||
}`,
|
||||
func(t *testing.T, dataDir string, nodeConfig *params.NodeConfig, err error) {
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, nodeConfig.RequireTopics)
|
||||
require.True(t, nodeConfig.Discovery)
|
||||
require.Contains(t, nodeConfig.RequireTopics, params.WhisperDiscv5Topic)
|
||||
require.Equal(t, params.WhisperDiscv5Limits, nodeConfig.RequireTopics[params.WhisperDiscv5Topic])
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// TestLoadNodeConfig tests loading JSON configuration and setting default values.
|
||||
|
@ -413,10 +390,11 @@ func TestLoadNodeConfig(t *testing.T) {
|
|||
t.Log(tmpDir)
|
||||
|
||||
for _, testCase := range loadConfigTestCases {
|
||||
t.Log("test: " + testCase.name)
|
||||
testCase.configJSON = strings.Replace(testCase.configJSON, "$TMPDIR", tmpDir, -1)
|
||||
nodeConfig, err := params.LoadNodeConfig(testCase.configJSON)
|
||||
testCase.validator(t, tmpDir, nodeConfig, err)
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
testCase.configJSON = strings.Replace(testCase.configJSON, "$TMPDIR", tmpDir, -1)
|
||||
nodeConfig, err := params.LoadNodeConfig(testCase.configJSON)
|
||||
testCase.validator(t, tmpDir, nodeConfig, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
package params
|
||||
|
||||
import "github.com/ethereum/go-ethereum/p2p/discv5"
|
||||
|
||||
const (
|
||||
// ClientIdentifier is client identifier to advertise over the network
|
||||
ClientIdentifier = "StatusIM"
|
||||
|
@ -109,4 +111,12 @@ const (
|
|||
|
||||
// StatusChainNetworkID is id of a test network (private chain)
|
||||
StatusChainNetworkID = 777
|
||||
|
||||
// WhisperDiscv5Topic used to register and search for whisper peers using discovery v5.
|
||||
WhisperDiscv5Topic = discv5.Topic("whisper")
|
||||
)
|
||||
|
||||
var (
|
||||
// WhisperDiscv5Limits declares min and max limits for peers with whisper topic.
|
||||
WhisperDiscv5Limits = Limits{2, 2}
|
||||
)
|
||||
|
|
|
@ -70,7 +70,6 @@ Update config for tests with new accounts `static/config/public-chain-accounts.j
|
|||
Embed keys as a binary data, you will need to install `npm` tool and web3.js lib:
|
||||
|
||||
```bash
|
||||
(cd _assets && npm install web3)
|
||||
make generate
|
||||
```
|
||||
|
||||
|
|
|
@ -494,7 +494,7 @@ func (s *TransactionsTestSuite) TestDoubleCompleteQueuedTransactions() {
|
|||
|
||||
receivedErrMessage := event["error_message"].(string)
|
||||
expectedErrMessage := "could not decrypt key with given passphrase"
|
||||
s.Equal(receivedErrMessage, expectedErrMessage)
|
||||
s.Equal(expectedErrMessage, receivedErrMessage)
|
||||
|
||||
receivedErrCode := event["error_code"].(string)
|
||||
s.Equal("2", receivedErrCode)
|
||||
|
|
|
@ -543,19 +543,24 @@ func (ps *peerSet) notify(n peerSetNotify) {
|
|||
// Register injects a new peer into the working set, or returns an error if the
|
||||
// peer is already known.
|
||||
func (ps *peerSet) Register(p *peer) error {
|
||||
ps.lock.Lock()
|
||||
if ps.closed {
|
||||
return errClosed
|
||||
peers, err := func() ([]peerSetNotify, error) {
|
||||
ps.lock.Lock()
|
||||
defer ps.lock.Unlock()
|
||||
if ps.closed {
|
||||
return nil, errClosed
|
||||
}
|
||||
if _, ok := ps.peers[p.id]; ok {
|
||||
return nil, errAlreadyRegistered
|
||||
}
|
||||
ps.peers[p.id] = p
|
||||
p.sendQueue = newExecQueue(100)
|
||||
peers := make([]peerSetNotify, len(ps.notifyList))
|
||||
copy(peers, ps.notifyList)
|
||||
return peers, nil
|
||||
}()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, ok := ps.peers[p.id]; ok {
|
||||
return errAlreadyRegistered
|
||||
}
|
||||
ps.peers[p.id] = p
|
||||
p.sendQueue = newExecQueue(100)
|
||||
peers := make([]peerSetNotify, len(ps.notifyList))
|
||||
copy(peers, ps.notifyList)
|
||||
ps.lock.Unlock()
|
||||
|
||||
for _, n := range peers {
|
||||
n.registerPeer(p)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue