fix: specify clusterid as 16 when using sharding fleet so that metadata proto negotiation doesn't fail

This commit is contained in:
Prem Chaitanya Prathi 2023-11-23 14:11:05 +05:30 committed by richΛrd
parent d198ac63fc
commit 9510ad0f5d
5 changed files with 10 additions and 1 deletions

View File

@ -23,6 +23,7 @@ const defaultMnemonicLength = 12
const walletAccountDefaultName = "Ethereum account"
const keystoreRelativePath = "keystore"
const defaultKeycardPairingDataFile = "/ethereum/mainnet_rpc/keycard/pairings.json"
const wakuClusterID uint16 = 16 // as per https://rfc.vac.dev/spec/51/#static-sharding 16 is assigned for Status app
var paths = []string{pathWalletRoot, pathEIP1581, pathDefaultChat, pathDefaultWallet}
@ -115,7 +116,9 @@ func SetFleet(fleet string, nodeConfig *params.NodeConfig) error {
return err
}
nodeConfig.ClusterConfig = *clusterConfig
if fleet == shardsTest {
nodeConfig.ClusterConfig.ClusterID = wakuClusterID
}
nodeConfig.ClusterConfig.WakuNodes = defaultWakuNodes[fleet]
nodeConfig.ClusterConfig.DiscV5BootstrapNodes = defaultWakuNodes[fleet]

View File

@ -330,6 +330,7 @@ func (b *StatusNode) wakuV2Service(nodeConfig *params.NodeConfig, telemetryServe
DefaultShardPubsubTopic: shard.DefaultShardPubsubTopic(),
UseShardAsDefaultTopic: nodeConfig.WakuV2Config.UseShardAsDefaultTopic,
TelemetryServerURL: telemetryServerURL,
ClusterId: nodeConfig.ClusterConfig.ClusterID,
}
if nodeConfig.WakuV2Config.MaxMessageSize > 0 {

View File

@ -271,6 +271,9 @@ type ClusterConfig struct {
// DiscV5Nodes is a list of enr to be used for ambient discovery
DiscV5BootstrapNodes []string
//Waku network identifier
ClusterID uint16
}
// String dumps config object as nicely indented JSON

View File

@ -48,6 +48,7 @@ type Config struct {
EnableFilterFullNode bool `toml:",omitempty"`
DefaultShardPubsubTopic string `toml:",omitempty"`
UseShardAsDefaultTopic bool `toml:",omitempty"`
ClusterId uint16 `toml:",omitempty"`
}
var DefaultConfig = Config{

View File

@ -285,6 +285,7 @@ func New(nodeKey string, fleet string, cfg *Config, logger *zap.Logger, appDB *s
node.WithKeepAlive(time.Duration(cfg.KeepAliveInterval) * time.Second),
node.WithMaxPeerConnections(cfg.DiscoveryLimit),
node.WithLogger(logger),
node.WithClusterID(cfg.ClusterId),
}
if cfg.EnableDiscV5 {