mirror of
https://github.com/logos-messaging/logos-delivery.git
synced 2026-03-01 22:53:25 +00:00
Added full JSON to NodeConfig support
This commit is contained in:
parent
12d3471355
commit
c269a363ad
@ -1,7 +1,9 @@
|
||||
{.used.}
|
||||
|
||||
import std/options, results, stint, testutils/unittests
|
||||
import json_serialization
|
||||
import waku/api/api_conf, waku/factory/waku_conf, waku/factory/networks_config
|
||||
import waku/common/logging
|
||||
|
||||
suite "LibWaku Conf - toWakuConf":
|
||||
test "Minimal configuration":
|
||||
@ -298,3 +300,709 @@ suite "LibWaku Conf - toWakuConf":
|
||||
check:
|
||||
wakuConf.staticNodes.len == 1
|
||||
wakuConf.staticNodes[0] == entryNodes[1]
|
||||
|
||||
suite "NodeConfig JSON - complete format":
|
||||
test "Full NodeConfig from complete JSON with field validation":
|
||||
## Given
|
||||
let jsonStr =
|
||||
"""
|
||||
{
|
||||
"mode": "Core",
|
||||
"protocolsConfig": {
|
||||
"entryNodes": ["enrtree://TREE@nodes.example.com"],
|
||||
"staticStoreNodes": ["/ip4/1.2.3.4/tcp/80/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc"],
|
||||
"clusterId": 10,
|
||||
"autoShardingConfig": {
|
||||
"numShardsInCluster": 4
|
||||
},
|
||||
"messageValidation": {
|
||||
"maxMessageSize": "100 KiB",
|
||||
"rlnConfig": null
|
||||
}
|
||||
},
|
||||
"networkingConfig": {
|
||||
"listenIpv4": "192.168.1.1",
|
||||
"p2pTcpPort": 7000,
|
||||
"discv5UdpPort": 7001
|
||||
},
|
||||
"ethRpcEndpoints": ["http://localhost:8545"],
|
||||
"p2pReliability": true,
|
||||
"logLevel": "WARN",
|
||||
"logFormat": "TEXT"
|
||||
}
|
||||
"""
|
||||
|
||||
## When
|
||||
let config = decodeNodeConfigFromJson(jsonStr)
|
||||
|
||||
## Then — check every field
|
||||
check:
|
||||
config.mode == WakuMode.Core
|
||||
config.ethRpcEndpoints == @["http://localhost:8545"]
|
||||
config.p2pReliability == true
|
||||
config.logLevel == LogLevel.WARN
|
||||
config.logFormat == LogFormat.TEXT
|
||||
|
||||
check:
|
||||
config.networkingConfig.listenIpv4 == "192.168.1.1"
|
||||
config.networkingConfig.p2pTcpPort == 7000
|
||||
config.networkingConfig.discv5UdpPort == 7001
|
||||
|
||||
let pc = config.protocolsConfig
|
||||
check:
|
||||
pc.entryNodes == @["enrtree://TREE@nodes.example.com"]
|
||||
pc.staticStoreNodes ==
|
||||
@[
|
||||
"/ip4/1.2.3.4/tcp/80/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc"
|
||||
]
|
||||
pc.clusterId == 10
|
||||
pc.autoShardingConfig.numShardsInCluster == 4
|
||||
pc.messageValidation.maxMessageSize == "100 KiB"
|
||||
pc.messageValidation.rlnConfig.isNone()
|
||||
|
||||
test "Full NodeConfig with RlnConfig present":
|
||||
## Given
|
||||
let jsonStr =
|
||||
"""
|
||||
{
|
||||
"mode": "Edge",
|
||||
"protocolsConfig": {
|
||||
"entryNodes": [],
|
||||
"clusterId": 1,
|
||||
"messageValidation": {
|
||||
"maxMessageSize": "150 KiB",
|
||||
"rlnConfig": {
|
||||
"contractAddress": "0x1234567890ABCDEF1234567890ABCDEF12345678",
|
||||
"chainId": 5,
|
||||
"epochSizeSec": 600
|
||||
}
|
||||
}
|
||||
},
|
||||
"networkingConfig": {
|
||||
"listenIpv4": "0.0.0.0",
|
||||
"p2pTcpPort": 60000,
|
||||
"discv5UdpPort": 9000
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
## When
|
||||
let config = decodeNodeConfigFromJson(jsonStr)
|
||||
|
||||
## Then
|
||||
check config.mode == WakuMode.Edge
|
||||
|
||||
let mv = config.protocolsConfig.messageValidation
|
||||
check:
|
||||
mv.maxMessageSize == "150 KiB"
|
||||
mv.rlnConfig.isSome()
|
||||
let rln = mv.rlnConfig.get()
|
||||
check:
|
||||
rln.contractAddress == "0x1234567890ABCDEF1234567890ABCDEF12345678"
|
||||
rln.chainId == 5'u
|
||||
rln.epochSizeSec == 600'u64
|
||||
|
||||
test "Round-trip encode/decode preserves all fields":
|
||||
## Given
|
||||
let original = NodeConfig.init(
|
||||
mode = Edge,
|
||||
protocolsConfig = ProtocolsConfig.init(
|
||||
entryNodes = @["enrtree://TREE@example.com"],
|
||||
staticStoreNodes =
|
||||
@[
|
||||
"/ip4/1.2.3.4/tcp/80/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc"
|
||||
],
|
||||
clusterId = 42,
|
||||
autoShardingConfig = AutoShardingConfig(numShardsInCluster: 16),
|
||||
messageValidation = MessageValidation(
|
||||
maxMessageSize: "256 KiB",
|
||||
rlnConfig: some(
|
||||
RlnConfig(
|
||||
contractAddress: "0xAABBCCDDEEFF00112233445566778899AABBCCDD",
|
||||
chainId: 137,
|
||||
epochSizeSec: 300,
|
||||
)
|
||||
),
|
||||
),
|
||||
),
|
||||
networkingConfig =
|
||||
NetworkingConfig(listenIpv4: "10.0.0.1", p2pTcpPort: 9090, discv5UdpPort: 9091),
|
||||
ethRpcEndpoints = @["https://rpc.example.com"],
|
||||
p2pReliability = true,
|
||||
logLevel = LogLevel.DEBUG,
|
||||
logFormat = LogFormat.JSON,
|
||||
)
|
||||
|
||||
## When
|
||||
let decoded = decodeNodeConfigFromJson(Json.encode(original))
|
||||
|
||||
## Then — check field by field
|
||||
check:
|
||||
decoded.mode == original.mode
|
||||
decoded.ethRpcEndpoints == original.ethRpcEndpoints
|
||||
decoded.p2pReliability == original.p2pReliability
|
||||
decoded.logLevel == original.logLevel
|
||||
decoded.logFormat == original.logFormat
|
||||
decoded.networkingConfig.listenIpv4 == original.networkingConfig.listenIpv4
|
||||
decoded.networkingConfig.p2pTcpPort == original.networkingConfig.p2pTcpPort
|
||||
decoded.networkingConfig.discv5UdpPort == original.networkingConfig.discv5UdpPort
|
||||
decoded.protocolsConfig.entryNodes == original.protocolsConfig.entryNodes
|
||||
decoded.protocolsConfig.staticStoreNodes ==
|
||||
original.protocolsConfig.staticStoreNodes
|
||||
decoded.protocolsConfig.clusterId == original.protocolsConfig.clusterId
|
||||
decoded.protocolsConfig.autoShardingConfig.numShardsInCluster ==
|
||||
original.protocolsConfig.autoShardingConfig.numShardsInCluster
|
||||
decoded.protocolsConfig.messageValidation.maxMessageSize ==
|
||||
original.protocolsConfig.messageValidation.maxMessageSize
|
||||
decoded.protocolsConfig.messageValidation.rlnConfig.isSome()
|
||||
|
||||
let decodedRln = decoded.protocolsConfig.messageValidation.rlnConfig.get()
|
||||
let originalRln = original.protocolsConfig.messageValidation.rlnConfig.get()
|
||||
check:
|
||||
decodedRln.contractAddress == originalRln.contractAddress
|
||||
decodedRln.chainId == originalRln.chainId
|
||||
decodedRln.epochSizeSec == originalRln.epochSizeSec
|
||||
|
||||
suite "NodeConfig JSON - partial format with defaults":
|
||||
test "Minimal NodeConfig - empty object uses all defaults":
|
||||
## Given
|
||||
let config = decodeNodeConfigFromJson("{}")
|
||||
let defaultConfig = NodeConfig.init()
|
||||
|
||||
## Then — compare field by field against defaults
|
||||
check:
|
||||
config.mode == defaultConfig.mode
|
||||
config.ethRpcEndpoints == defaultConfig.ethRpcEndpoints
|
||||
config.p2pReliability == defaultConfig.p2pReliability
|
||||
config.logLevel == defaultConfig.logLevel
|
||||
config.logFormat == defaultConfig.logFormat
|
||||
config.networkingConfig.listenIpv4 == defaultConfig.networkingConfig.listenIpv4
|
||||
config.networkingConfig.p2pTcpPort == defaultConfig.networkingConfig.p2pTcpPort
|
||||
config.networkingConfig.discv5UdpPort ==
|
||||
defaultConfig.networkingConfig.discv5UdpPort
|
||||
config.protocolsConfig.entryNodes == defaultConfig.protocolsConfig.entryNodes
|
||||
config.protocolsConfig.staticStoreNodes ==
|
||||
defaultConfig.protocolsConfig.staticStoreNodes
|
||||
config.protocolsConfig.clusterId == defaultConfig.protocolsConfig.clusterId
|
||||
config.protocolsConfig.autoShardingConfig.numShardsInCluster ==
|
||||
defaultConfig.protocolsConfig.autoShardingConfig.numShardsInCluster
|
||||
config.protocolsConfig.messageValidation.maxMessageSize ==
|
||||
defaultConfig.protocolsConfig.messageValidation.maxMessageSize
|
||||
config.protocolsConfig.messageValidation.rlnConfig.isSome() ==
|
||||
defaultConfig.protocolsConfig.messageValidation.rlnConfig.isSome()
|
||||
|
||||
test "Minimal NodeConfig keeps network preset defaults":
|
||||
## Given
|
||||
let config = decodeNodeConfigFromJson("{}")
|
||||
|
||||
## Then
|
||||
check:
|
||||
config.protocolsConfig.entryNodes == TheWakuNetworkPreset.entryNodes
|
||||
config.protocolsConfig.messageValidation.rlnConfig.isSome()
|
||||
|
||||
test "NodeConfig with only mode specified":
|
||||
## Given
|
||||
let config = decodeNodeConfigFromJson("""{"mode": "Edge"}""")
|
||||
|
||||
## Then
|
||||
check:
|
||||
config.mode == WakuMode.Edge
|
||||
## Remaining fields get defaults
|
||||
config.logLevel == LogLevel.INFO
|
||||
config.logFormat == LogFormat.TEXT
|
||||
config.p2pReliability == false
|
||||
config.ethRpcEndpoints == newSeq[string]()
|
||||
|
||||
test "ProtocolsConfig partial - optional fields get defaults":
|
||||
## Given — only entryNodes and clusterId provided
|
||||
let jsonStr =
|
||||
"""
|
||||
{
|
||||
"protocolsConfig": {
|
||||
"entryNodes": ["enrtree://X@y.com"],
|
||||
"clusterId": 5
|
||||
},
|
||||
"networkingConfig": {
|
||||
"listenIpv4": "0.0.0.0",
|
||||
"p2pTcpPort": 60000,
|
||||
"discv5UdpPort": 9000
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
## When
|
||||
let config = decodeNodeConfigFromJson(jsonStr)
|
||||
|
||||
## Then — required fields are set, optionals get defaults
|
||||
check:
|
||||
config.protocolsConfig.entryNodes == @["enrtree://X@y.com"]
|
||||
config.protocolsConfig.clusterId == 5
|
||||
config.protocolsConfig.staticStoreNodes == newSeq[string]()
|
||||
config.protocolsConfig.autoShardingConfig.numShardsInCluster ==
|
||||
DefaultAutoShardingConfig.numShardsInCluster
|
||||
config.protocolsConfig.messageValidation.maxMessageSize ==
|
||||
DefaultMessageValidation.maxMessageSize
|
||||
config.protocolsConfig.messageValidation.rlnConfig.isNone()
|
||||
|
||||
test "MessageValidation partial - rlnConfig omitted defaults to none":
|
||||
## Given
|
||||
let jsonStr =
|
||||
"""
|
||||
{
|
||||
"protocolsConfig": {
|
||||
"entryNodes": [],
|
||||
"clusterId": 1,
|
||||
"messageValidation": {
|
||||
"maxMessageSize": "200 KiB"
|
||||
}
|
||||
},
|
||||
"networkingConfig": {
|
||||
"listenIpv4": "0.0.0.0",
|
||||
"p2pTcpPort": 60000,
|
||||
"discv5UdpPort": 9000
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
## When
|
||||
let config = decodeNodeConfigFromJson(jsonStr)
|
||||
|
||||
## Then
|
||||
check:
|
||||
config.protocolsConfig.messageValidation.maxMessageSize == "200 KiB"
|
||||
config.protocolsConfig.messageValidation.rlnConfig.isNone()
|
||||
|
||||
test "logLevel and logFormat omitted use defaults":
|
||||
## Given
|
||||
let jsonStr =
|
||||
"""
|
||||
{
|
||||
"mode": "Core",
|
||||
"protocolsConfig": {
|
||||
"entryNodes": [],
|
||||
"clusterId": 1
|
||||
},
|
||||
"networkingConfig": {
|
||||
"listenIpv4": "0.0.0.0",
|
||||
"p2pTcpPort": 60000,
|
||||
"discv5UdpPort": 9000
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
## When
|
||||
let config = decodeNodeConfigFromJson(jsonStr)
|
||||
|
||||
## Then
|
||||
check:
|
||||
config.logLevel == LogLevel.INFO
|
||||
config.logFormat == LogFormat.TEXT
|
||||
|
||||
suite "NodeConfig JSON - unsupported fields raise errors":
|
||||
test "Unknown field at NodeConfig level raises":
|
||||
let jsonStr =
|
||||
"""
|
||||
{
|
||||
"mode": "Core",
|
||||
"unknownTopLevel": true
|
||||
}
|
||||
"""
|
||||
|
||||
var raised = false
|
||||
try:
|
||||
discard decodeNodeConfigFromJson(jsonStr)
|
||||
except SerializationError:
|
||||
raised = true
|
||||
check raised
|
||||
|
||||
test "Typo in NodeConfig field name raises":
|
||||
let jsonStr =
|
||||
"""
|
||||
{
|
||||
"modes": "Core"
|
||||
}
|
||||
"""
|
||||
|
||||
var raised = false
|
||||
try:
|
||||
discard decodeNodeConfigFromJson(jsonStr)
|
||||
except SerializationError:
|
||||
raised = true
|
||||
check raised
|
||||
|
||||
test "Unknown field in ProtocolsConfig raises":
|
||||
let jsonStr =
|
||||
"""
|
||||
{
|
||||
"protocolsConfig": {
|
||||
"entryNodes": [],
|
||||
"clusterId": 1,
|
||||
"futureField": "something"
|
||||
},
|
||||
"networkingConfig": {
|
||||
"listenIpv4": "0.0.0.0",
|
||||
"p2pTcpPort": 60000,
|
||||
"discv5UdpPort": 9000
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
var raised = false
|
||||
try:
|
||||
discard decodeNodeConfigFromJson(jsonStr)
|
||||
except SerializationError:
|
||||
raised = true
|
||||
check raised
|
||||
|
||||
test "Unknown field in NetworkingConfig raises":
|
||||
let jsonStr =
|
||||
"""
|
||||
{
|
||||
"protocolsConfig": {
|
||||
"entryNodes": [],
|
||||
"clusterId": 1
|
||||
},
|
||||
"networkingConfig": {
|
||||
"listenIpv4": "0.0.0.0",
|
||||
"p2pTcpPort": 60000,
|
||||
"discv5UdpPort": 9000,
|
||||
"futureNetworkField": "value"
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
var raised = false
|
||||
try:
|
||||
discard decodeNodeConfigFromJson(jsonStr)
|
||||
except SerializationError:
|
||||
raised = true
|
||||
check raised
|
||||
|
||||
test "Unknown field in MessageValidation raises":
|
||||
let jsonStr =
|
||||
"""
|
||||
{
|
||||
"protocolsConfig": {
|
||||
"entryNodes": [],
|
||||
"clusterId": 1,
|
||||
"messageValidation": {
|
||||
"maxMessageSize": "150 KiB",
|
||||
"maxMesssageSize": "typo"
|
||||
}
|
||||
},
|
||||
"networkingConfig": {
|
||||
"listenIpv4": "0.0.0.0",
|
||||
"p2pTcpPort": 60000,
|
||||
"discv5UdpPort": 9000
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
var raised = false
|
||||
try:
|
||||
discard decodeNodeConfigFromJson(jsonStr)
|
||||
except SerializationError:
|
||||
raised = true
|
||||
check raised
|
||||
|
||||
test "Unknown field in RlnConfig raises":
|
||||
let jsonStr =
|
||||
"""
|
||||
{
|
||||
"protocolsConfig": {
|
||||
"entryNodes": [],
|
||||
"clusterId": 1,
|
||||
"messageValidation": {
|
||||
"maxMessageSize": "150 KiB",
|
||||
"rlnConfig": {
|
||||
"contractAddress": "0xABCDEF1234567890ABCDEF1234567890ABCDEF12",
|
||||
"chainId": 1,
|
||||
"epochSizeSec": 600,
|
||||
"unknownRlnField": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"networkingConfig": {
|
||||
"listenIpv4": "0.0.0.0",
|
||||
"p2pTcpPort": 60000,
|
||||
"discv5UdpPort": 9000
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
var raised = false
|
||||
try:
|
||||
discard decodeNodeConfigFromJson(jsonStr)
|
||||
except SerializationError:
|
||||
raised = true
|
||||
check raised
|
||||
|
||||
test "Unknown field in AutoShardingConfig raises":
|
||||
let jsonStr =
|
||||
"""
|
||||
{
|
||||
"protocolsConfig": {
|
||||
"entryNodes": [],
|
||||
"clusterId": 1,
|
||||
"autoShardingConfig": {
|
||||
"numShardsInCluster": 8,
|
||||
"shardPrefix": "extra"
|
||||
}
|
||||
},
|
||||
"networkingConfig": {
|
||||
"listenIpv4": "0.0.0.0",
|
||||
"p2pTcpPort": 60000,
|
||||
"discv5UdpPort": 9000
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
var raised = false
|
||||
try:
|
||||
discard decodeNodeConfigFromJson(jsonStr)
|
||||
except SerializationError:
|
||||
raised = true
|
||||
check raised
|
||||
|
||||
suite "NodeConfig JSON - missing required fields":
|
||||
test "Missing 'entryNodes' in ProtocolsConfig":
|
||||
let jsonStr =
|
||||
"""
|
||||
{
|
||||
"protocolsConfig": {
|
||||
"clusterId": 1
|
||||
},
|
||||
"networkingConfig": {
|
||||
"listenIpv4": "0.0.0.0",
|
||||
"p2pTcpPort": 60000,
|
||||
"discv5UdpPort": 9000
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
var raised = false
|
||||
try:
|
||||
discard decodeNodeConfigFromJson(jsonStr)
|
||||
except SerializationError:
|
||||
raised = true
|
||||
check raised
|
||||
|
||||
test "Missing 'clusterId' in ProtocolsConfig":
|
||||
let jsonStr =
|
||||
"""
|
||||
{
|
||||
"protocolsConfig": {
|
||||
"entryNodes": []
|
||||
},
|
||||
"networkingConfig": {
|
||||
"listenIpv4": "0.0.0.0",
|
||||
"p2pTcpPort": 60000,
|
||||
"discv5UdpPort": 9000
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
var raised = false
|
||||
try:
|
||||
discard decodeNodeConfigFromJson(jsonStr)
|
||||
except SerializationError:
|
||||
raised = true
|
||||
check raised
|
||||
|
||||
test "Missing required fields in NetworkingConfig":
|
||||
let jsonStr =
|
||||
"""
|
||||
{
|
||||
"protocolsConfig": {
|
||||
"entryNodes": [],
|
||||
"clusterId": 1
|
||||
},
|
||||
"networkingConfig": {
|
||||
"listenIpv4": "0.0.0.0"
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
var raised = false
|
||||
try:
|
||||
discard decodeNodeConfigFromJson(jsonStr)
|
||||
except SerializationError:
|
||||
raised = true
|
||||
check raised
|
||||
|
||||
test "Missing 'numShardsInCluster' in AutoShardingConfig":
|
||||
let jsonStr =
|
||||
"""
|
||||
{
|
||||
"protocolsConfig": {
|
||||
"entryNodes": [],
|
||||
"clusterId": 1,
|
||||
"autoShardingConfig": {}
|
||||
},
|
||||
"networkingConfig": {
|
||||
"listenIpv4": "0.0.0.0",
|
||||
"p2pTcpPort": 60000,
|
||||
"discv5UdpPort": 9000
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
var raised = false
|
||||
try:
|
||||
discard decodeNodeConfigFromJson(jsonStr)
|
||||
except SerializationError:
|
||||
raised = true
|
||||
check raised
|
||||
|
||||
test "Missing required fields in RlnConfig":
|
||||
let jsonStr =
|
||||
"""
|
||||
{
|
||||
"protocolsConfig": {
|
||||
"entryNodes": [],
|
||||
"clusterId": 1,
|
||||
"messageValidation": {
|
||||
"maxMessageSize": "150 KiB",
|
||||
"rlnConfig": {
|
||||
"contractAddress": "0xABCD"
|
||||
}
|
||||
}
|
||||
},
|
||||
"networkingConfig": {
|
||||
"listenIpv4": "0.0.0.0",
|
||||
"p2pTcpPort": 60000,
|
||||
"discv5UdpPort": 9000
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
var raised = false
|
||||
try:
|
||||
discard decodeNodeConfigFromJson(jsonStr)
|
||||
except SerializationError:
|
||||
raised = true
|
||||
check raised
|
||||
|
||||
test "Missing 'maxMessageSize' in MessageValidation":
|
||||
let jsonStr =
|
||||
"""
|
||||
{
|
||||
"protocolsConfig": {
|
||||
"entryNodes": [],
|
||||
"clusterId": 1,
|
||||
"messageValidation": {
|
||||
"rlnConfig": null
|
||||
}
|
||||
},
|
||||
"networkingConfig": {
|
||||
"listenIpv4": "0.0.0.0",
|
||||
"p2pTcpPort": 60000,
|
||||
"discv5UdpPort": 9000
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
var raised = false
|
||||
try:
|
||||
discard decodeNodeConfigFromJson(jsonStr)
|
||||
except SerializationError:
|
||||
raised = true
|
||||
check raised
|
||||
|
||||
suite "NodeConfig JSON - invalid values":
|
||||
test "Invalid enum value for mode":
|
||||
var raised = false
|
||||
try:
|
||||
discard decodeNodeConfigFromJson("""{"mode": "InvalidMode"}""")
|
||||
except SerializationError:
|
||||
raised = true
|
||||
check raised
|
||||
|
||||
test "Invalid enum value for logLevel":
|
||||
var raised = false
|
||||
try:
|
||||
discard decodeNodeConfigFromJson("""{"logLevel": "SUPERVERBOSE"}""")
|
||||
except SerializationError:
|
||||
raised = true
|
||||
check raised
|
||||
|
||||
test "Wrong type for clusterId (string instead of number)":
|
||||
let jsonStr =
|
||||
"""
|
||||
{
|
||||
"protocolsConfig": {
|
||||
"entryNodes": [],
|
||||
"clusterId": "not-a-number"
|
||||
},
|
||||
"networkingConfig": {
|
||||
"listenIpv4": "0.0.0.0",
|
||||
"p2pTcpPort": 60000,
|
||||
"discv5UdpPort": 9000
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
var raised = false
|
||||
try:
|
||||
discard decodeNodeConfigFromJson(jsonStr)
|
||||
except SerializationError:
|
||||
raised = true
|
||||
check raised
|
||||
|
||||
test "Completely invalid JSON syntax":
|
||||
var raised = false
|
||||
try:
|
||||
discard decodeNodeConfigFromJson("""{ not valid json at all }""")
|
||||
except SerializationError:
|
||||
raised = true
|
||||
check raised
|
||||
|
||||
suite "NodeConfig JSON -> WakuConf integration":
|
||||
test "Decoded config translates to valid WakuConf":
|
||||
## Given
|
||||
let jsonStr =
|
||||
"""
|
||||
{
|
||||
"mode": "Core",
|
||||
"protocolsConfig": {
|
||||
"entryNodes": [
|
||||
"enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im"
|
||||
],
|
||||
"staticStoreNodes": [
|
||||
"/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc"
|
||||
],
|
||||
"clusterId": 55,
|
||||
"autoShardingConfig": {
|
||||
"numShardsInCluster": 6
|
||||
},
|
||||
"messageValidation": {
|
||||
"maxMessageSize": "256 KiB",
|
||||
"rlnConfig": null
|
||||
}
|
||||
},
|
||||
"networkingConfig": {
|
||||
"listenIpv4": "0.0.0.0",
|
||||
"p2pTcpPort": 60000,
|
||||
"discv5UdpPort": 9000
|
||||
},
|
||||
"ethRpcEndpoints": ["http://localhost:8545"],
|
||||
"p2pReliability": true,
|
||||
"logLevel": "INFO",
|
||||
"logFormat": "TEXT"
|
||||
}
|
||||
"""
|
||||
|
||||
## When
|
||||
let nodeConfig = decodeNodeConfigFromJson(jsonStr)
|
||||
let wakuConfRes = toWakuConf(nodeConfig)
|
||||
|
||||
## Then
|
||||
require wakuConfRes.isOk()
|
||||
let wakuConf = wakuConfRes.get()
|
||||
require wakuConf.validate().isOk()
|
||||
check:
|
||||
wakuConf.clusterId == 55
|
||||
wakuConf.shardingConf.numShardsInCluster == 6
|
||||
wakuConf.maxMessageSizeBytes == 256'u64 * 1024'u64
|
||||
wakuConf.staticNodes.len == 1
|
||||
wakuConf.p2pReliability == true
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
import std/[net, options]
|
||||
|
||||
import results
|
||||
import json_serialization, json_serialization/std/options as json_options
|
||||
|
||||
import
|
||||
waku/common/utils/parse_size_units,
|
||||
@ -10,6 +11,8 @@ import
|
||||
waku/factory/networks_config,
|
||||
./entry_nodes
|
||||
|
||||
export json_serialization, json_options
|
||||
|
||||
type AutoShardingConfig* {.requiresInit.} = object
|
||||
numShardsInCluster*: uint16
|
||||
|
||||
@ -111,6 +114,46 @@ proc init*(
|
||||
logFormat: logFormat,
|
||||
)
|
||||
|
||||
# -- Getters for ProtocolsConfig (private fields) - used for testing --
|
||||
|
||||
proc entryNodes*(c: ProtocolsConfig): seq[string] =
|
||||
c.entryNodes
|
||||
|
||||
proc staticStoreNodes*(c: ProtocolsConfig): seq[string] =
|
||||
c.staticStoreNodes
|
||||
|
||||
proc clusterId*(c: ProtocolsConfig): uint16 =
|
||||
c.clusterId
|
||||
|
||||
proc autoShardingConfig*(c: ProtocolsConfig): AutoShardingConfig =
|
||||
c.autoShardingConfig
|
||||
|
||||
proc messageValidation*(c: ProtocolsConfig): MessageValidation =
|
||||
c.messageValidation
|
||||
|
||||
# -- Getters for NodeConfig (private fields) - used for testing --
|
||||
|
||||
proc mode*(c: NodeConfig): WakuMode =
|
||||
c.mode
|
||||
|
||||
proc protocolsConfig*(c: NodeConfig): ProtocolsConfig =
|
||||
c.protocolsConfig
|
||||
|
||||
proc networkingConfig*(c: NodeConfig): NetworkingConfig =
|
||||
c.networkingConfig
|
||||
|
||||
proc ethRpcEndpoints*(c: NodeConfig): seq[string] =
|
||||
c.ethRpcEndpoints
|
||||
|
||||
proc p2pReliability*(c: NodeConfig): bool =
|
||||
c.p2pReliability
|
||||
|
||||
proc logLevel*(c: NodeConfig): LogLevel =
|
||||
c.logLevel
|
||||
|
||||
proc logFormat*(c: NodeConfig): LogFormat =
|
||||
c.logFormat
|
||||
|
||||
proc toWakuConf*(nodeConfig: NodeConfig): Result[WakuConf, string] =
|
||||
var b = WakuConfBuilder.init()
|
||||
|
||||
@ -225,3 +268,260 @@ proc toWakuConf*(nodeConfig: NodeConfig): Result[WakuConf, string] =
|
||||
return err("Failed to validate configuration: " & error)
|
||||
|
||||
return ok(wakuConf)
|
||||
|
||||
# ---- JSON serialization (writeValue / readValue) ----
|
||||
# ---------- AutoShardingConfig ----------
|
||||
|
||||
proc writeValue*(w: var JsonWriter, val: AutoShardingConfig) {.raises: [IOError].} =
|
||||
w.beginRecord()
|
||||
w.writeField("numShardsInCluster", val.numShardsInCluster)
|
||||
w.endRecord()
|
||||
|
||||
proc readValue*(
|
||||
r: var JsonReader, val: var AutoShardingConfig
|
||||
) {.raises: [SerializationError, IOError].} =
|
||||
var numShardsInCluster: Option[uint16]
|
||||
|
||||
for fieldName in readObjectFields(r):
|
||||
case fieldName
|
||||
of "numShardsInCluster":
|
||||
numShardsInCluster = some(r.readValue(uint16))
|
||||
else:
|
||||
r.raiseUnexpectedField(fieldName, "AutoShardingConfig")
|
||||
|
||||
if numShardsInCluster.isNone():
|
||||
r.raiseUnexpectedValue("Missing required field 'numShardsInCluster'")
|
||||
|
||||
val = AutoShardingConfig(numShardsInCluster: numShardsInCluster.get())
|
||||
|
||||
# ---------- RlnConfig ----------
|
||||
|
||||
proc writeValue*(w: var JsonWriter, val: RlnConfig) {.raises: [IOError].} =
|
||||
w.beginRecord()
|
||||
w.writeField("contractAddress", val.contractAddress)
|
||||
w.writeField("chainId", val.chainId)
|
||||
w.writeField("epochSizeSec", val.epochSizeSec)
|
||||
w.endRecord()
|
||||
|
||||
proc readValue*(
|
||||
r: var JsonReader, val: var RlnConfig
|
||||
) {.raises: [SerializationError, IOError].} =
|
||||
var
|
||||
contractAddress: Option[string]
|
||||
chainId: Option[uint]
|
||||
epochSizeSec: Option[uint64]
|
||||
|
||||
for fieldName in readObjectFields(r):
|
||||
case fieldName
|
||||
of "contractAddress":
|
||||
contractAddress = some(r.readValue(string))
|
||||
of "chainId":
|
||||
chainId = some(r.readValue(uint))
|
||||
of "epochSizeSec":
|
||||
epochSizeSec = some(r.readValue(uint64))
|
||||
else:
|
||||
r.raiseUnexpectedField(fieldName, "RlnConfig")
|
||||
|
||||
if contractAddress.isNone():
|
||||
r.raiseUnexpectedValue("Missing required field 'contractAddress'")
|
||||
if chainId.isNone():
|
||||
r.raiseUnexpectedValue("Missing required field 'chainId'")
|
||||
if epochSizeSec.isNone():
|
||||
r.raiseUnexpectedValue("Missing required field 'epochSizeSec'")
|
||||
|
||||
val = RlnConfig(
|
||||
contractAddress: contractAddress.get(),
|
||||
chainId: chainId.get(),
|
||||
epochSizeSec: epochSizeSec.get(),
|
||||
)
|
||||
|
||||
# ---------- NetworkingConfig ----------
|
||||
|
||||
proc writeValue*(w: var JsonWriter, val: NetworkingConfig) {.raises: [IOError].} =
|
||||
w.beginRecord()
|
||||
w.writeField("listenIpv4", val.listenIpv4)
|
||||
w.writeField("p2pTcpPort", val.p2pTcpPort)
|
||||
w.writeField("discv5UdpPort", val.discv5UdpPort)
|
||||
w.endRecord()
|
||||
|
||||
proc readValue*(
|
||||
r: var JsonReader, val: var NetworkingConfig
|
||||
) {.raises: [SerializationError, IOError].} =
|
||||
var
|
||||
listenIpv4: Option[string]
|
||||
p2pTcpPort: Option[uint16]
|
||||
discv5UdpPort: Option[uint16]
|
||||
|
||||
for fieldName in readObjectFields(r):
|
||||
case fieldName
|
||||
of "listenIpv4":
|
||||
listenIpv4 = some(r.readValue(string))
|
||||
of "p2pTcpPort":
|
||||
p2pTcpPort = some(r.readValue(uint16))
|
||||
of "discv5UdpPort":
|
||||
discv5UdpPort = some(r.readValue(uint16))
|
||||
else:
|
||||
r.raiseUnexpectedField(fieldName, "NetworkingConfig")
|
||||
|
||||
if listenIpv4.isNone():
|
||||
r.raiseUnexpectedValue("Missing required field 'listenIpv4'")
|
||||
if p2pTcpPort.isNone():
|
||||
r.raiseUnexpectedValue("Missing required field 'p2pTcpPort'")
|
||||
if discv5UdpPort.isNone():
|
||||
r.raiseUnexpectedValue("Missing required field 'discv5UdpPort'")
|
||||
|
||||
val = NetworkingConfig(
|
||||
listenIpv4: listenIpv4.get(),
|
||||
p2pTcpPort: p2pTcpPort.get(),
|
||||
discv5UdpPort: discv5UdpPort.get(),
|
||||
)
|
||||
|
||||
# ---------- MessageValidation ----------
|
||||
|
||||
proc writeValue*(w: var JsonWriter, val: MessageValidation) {.raises: [IOError].} =
|
||||
w.beginRecord()
|
||||
w.writeField("maxMessageSize", val.maxMessageSize)
|
||||
w.writeField("rlnConfig", val.rlnConfig)
|
||||
w.endRecord()
|
||||
|
||||
proc readValue*(
|
||||
r: var JsonReader, val: var MessageValidation
|
||||
) {.raises: [SerializationError, IOError].} =
|
||||
var
|
||||
maxMessageSize: Option[string]
|
||||
rlnConfig: Option[Option[RlnConfig]]
|
||||
|
||||
for fieldName in readObjectFields(r):
|
||||
case fieldName
|
||||
of "maxMessageSize":
|
||||
maxMessageSize = some(r.readValue(string))
|
||||
of "rlnConfig":
|
||||
rlnConfig = some(r.readValue(Option[RlnConfig]))
|
||||
else:
|
||||
r.raiseUnexpectedField(fieldName, "MessageValidation")
|
||||
|
||||
if maxMessageSize.isNone():
|
||||
r.raiseUnexpectedValue("Missing required field 'maxMessageSize'")
|
||||
|
||||
val = MessageValidation(
|
||||
maxMessageSize: maxMessageSize.get(), rlnConfig: rlnConfig.get(none(RlnConfig))
|
||||
)
|
||||
|
||||
# ---------- ProtocolsConfig ----------
|
||||
|
||||
proc writeValue*(w: var JsonWriter, val: ProtocolsConfig) {.raises: [IOError].} =
|
||||
w.beginRecord()
|
||||
w.writeField("entryNodes", val.entryNodes)
|
||||
w.writeField("staticStoreNodes", val.staticStoreNodes)
|
||||
w.writeField("clusterId", val.clusterId)
|
||||
w.writeField("autoShardingConfig", val.autoShardingConfig)
|
||||
w.writeField("messageValidation", val.messageValidation)
|
||||
w.endRecord()
|
||||
|
||||
proc readValue*(
|
||||
r: var JsonReader, val: var ProtocolsConfig
|
||||
) {.raises: [SerializationError, IOError].} =
|
||||
var
|
||||
entryNodes: Option[seq[string]]
|
||||
staticStoreNodes: Option[seq[string]]
|
||||
clusterId: Option[uint16]
|
||||
autoShardingConfig: Option[AutoShardingConfig]
|
||||
messageValidation: Option[MessageValidation]
|
||||
|
||||
for fieldName in readObjectFields(r):
|
||||
case fieldName
|
||||
of "entryNodes":
|
||||
entryNodes = some(r.readValue(seq[string]))
|
||||
of "staticStoreNodes":
|
||||
staticStoreNodes = some(r.readValue(seq[string]))
|
||||
of "clusterId":
|
||||
clusterId = some(r.readValue(uint16))
|
||||
of "autoShardingConfig":
|
||||
autoShardingConfig = some(r.readValue(AutoShardingConfig))
|
||||
of "messageValidation":
|
||||
messageValidation = some(r.readValue(MessageValidation))
|
||||
else:
|
||||
r.raiseUnexpectedField(fieldName, "ProtocolsConfig")
|
||||
|
||||
if entryNodes.isNone():
|
||||
r.raiseUnexpectedValue("Missing required field 'entryNodes'")
|
||||
if clusterId.isNone():
|
||||
r.raiseUnexpectedValue("Missing required field 'clusterId'")
|
||||
|
||||
val = ProtocolsConfig.init(
|
||||
entryNodes = entryNodes.get(),
|
||||
staticStoreNodes = staticStoreNodes.get(@[]),
|
||||
clusterId = clusterId.get(),
|
||||
autoShardingConfig = autoShardingConfig.get(DefaultAutoShardingConfig),
|
||||
messageValidation = messageValidation.get(DefaultMessageValidation),
|
||||
)
|
||||
|
||||
# ---------- NodeConfig ----------
|
||||
|
||||
proc writeValue*(w: var JsonWriter, val: NodeConfig) {.raises: [IOError].} =
|
||||
w.beginRecord()
|
||||
w.writeField("mode", val.mode)
|
||||
w.writeField("protocolsConfig", val.protocolsConfig)
|
||||
w.writeField("networkingConfig", val.networkingConfig)
|
||||
w.writeField("ethRpcEndpoints", val.ethRpcEndpoints)
|
||||
w.writeField("p2pReliability", val.p2pReliability)
|
||||
w.writeField("logLevel", val.logLevel)
|
||||
w.writeField("logFormat", val.logFormat)
|
||||
w.endRecord()
|
||||
|
||||
proc readValue*(
|
||||
r: var JsonReader, val: var NodeConfig
|
||||
) {.raises: [SerializationError, IOError].} =
|
||||
var
|
||||
mode: Option[WakuMode]
|
||||
protocolsConfig: Option[ProtocolsConfig]
|
||||
networkingConfig: Option[NetworkingConfig]
|
||||
ethRpcEndpoints: Option[seq[string]]
|
||||
p2pReliability: Option[bool]
|
||||
logLevel: Option[LogLevel]
|
||||
logFormat: Option[LogFormat]
|
||||
|
||||
for fieldName in readObjectFields(r):
|
||||
case fieldName
|
||||
of "mode":
|
||||
mode = some(r.readValue(WakuMode))
|
||||
of "protocolsConfig":
|
||||
protocolsConfig = some(r.readValue(ProtocolsConfig))
|
||||
of "networkingConfig":
|
||||
networkingConfig = some(r.readValue(NetworkingConfig))
|
||||
of "ethRpcEndpoints":
|
||||
ethRpcEndpoints = some(r.readValue(seq[string]))
|
||||
of "p2pReliability":
|
||||
p2pReliability = some(r.readValue(bool))
|
||||
of "logLevel":
|
||||
logLevel = some(r.readValue(LogLevel))
|
||||
of "logFormat":
|
||||
logFormat = some(r.readValue(LogFormat))
|
||||
else:
|
||||
r.raiseUnexpectedField(fieldName, "NodeConfig")
|
||||
|
||||
val = NodeConfig.init(
|
||||
mode = mode.get(WakuMode.Core),
|
||||
protocolsConfig = protocolsConfig.get(TheWakuNetworkPreset),
|
||||
networkingConfig = networkingConfig.get(DefaultNetworkingConfig),
|
||||
ethRpcEndpoints = ethRpcEndpoints.get(@[]),
|
||||
p2pReliability = p2pReliability.get(false),
|
||||
logLevel = logLevel.get(LogLevel.INFO),
|
||||
logFormat = logFormat.get(LogFormat.TEXT),
|
||||
)
|
||||
|
||||
# ---------- Decode helper ----------
|
||||
# Json.decode returns T via `result`, which conflicts with {.requiresInit.}
|
||||
# on Nim 2.x. This helper avoids the issue by using readValue into a var.
|
||||
|
||||
proc decodeNodeConfigFromJson*(
|
||||
jsonStr: string
|
||||
): NodeConfig {.raises: [SerializationError].} =
|
||||
var val = NodeConfig.init() # default-initialized
|
||||
try:
|
||||
var stream = unsafeMemoryInput(jsonStr)
|
||||
var reader = JsonReader[DefaultFlavor].init(stream)
|
||||
reader.readValue(val)
|
||||
except IOError as err:
|
||||
raise (ref SerializationError)(msg: err.msg)
|
||||
return val
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user