Improve multinode suite for better debug options, including logging to file

There is a 503 "sales unavailable" error
This commit is contained in:
Eric 2023-09-29 20:02:30 +10:00
parent b5c4be351d
commit f10cf41e4d
No known key found for this signature in database
3 changed files with 134 additions and 75 deletions

View File

@ -83,7 +83,7 @@ proc postAvailability*(
"maxCollateral": maxCollateral,
}
let response = client.http.post(url, $json)
assert response.status == "200 OK"
doAssert response.status == "200 OK", "expected 200 OK, got " & response.status & ", body: " & response.body
Availability.fromJson(response.body.parseJson)
proc getAvailabilities*(client: CodexClient): ?!seq[Availability] =

View File

@ -6,6 +6,7 @@ import std/sequtils
import std/strutils
import std/sequtils
import std/strutils
import std/sugar
import pkg/chronicles
import ../ethertest
import ./codexclient
@ -23,18 +24,23 @@ type
datadir*: string
ethAccount*: Address
StartNodes* = object
clients*: uint
providers*: uint
validators*: uint
DebugNodes* = object
client*: bool
provider*: bool
validator*: bool
topics*: string
clients*: StartNodeConfig
providers*: StartNodeConfig
validators*: StartNodeConfig
StartNodeConfig* = object
numNodes*: int
cliOptions*: seq[CliOption]
logFile*: ?string
logTopics*: seq[string]
debugEnabled*: bool
Role* {.pure.} = enum
Client,
Provider,
Validator
CliOption* = object of RootObj
nodeIdx*: ?int
key*: string
value*: string
proc new*(_: type RunningNode,
role: Role,
@ -48,29 +54,67 @@ proc new*(_: type RunningNode,
datadir: datadir,
ethAccount: ethAccount)
proc init*(_: type StartNodes,
clients, providers, validators: uint): StartNodes =
StartNodes(clients: clients, providers: providers, validators: validators)
proc nodes*(config: StartNodeConfig, numNodes: int): StartNodeConfig =
if numNodes < 0:
raise newException(ValueError, "numNodes must be >= 0")
proc init*(_: type DebugNodes,
client, provider, validator: bool,
topics: string = "validator,proving,market"): DebugNodes =
DebugNodes(client: client, provider: provider, validator: validator,
topics: topics)
var startConfig = config
startConfig.numNodes = numNodes
return startConfig
proc simulateProofFailuresFor*(
config: StartNodeConfig,
providerIdx: int,
failEveryNProofs: int
): StartNodeConfig =
if providerIdx > config.numNodes - 1:
raise newException(ValueError, "provider index out of bounds")
var startConfig = config
startConfig.cliOptions.add(
CliOption(
nodeIdx: some providerIdx,
key: "--simulate-proof-failures",
value: $failEveryNProofs
)
)
return startConfig
proc debug*(config: StartNodeConfig, enabled = true): StartNodeConfig =
## output log in stdout
var startConfig = config
startConfig.debugEnabled = enabled
return startConfig
proc withLogFile*(
config: StartNodeConfig,
file: bool | string
): StartNodeConfig =
var startConfig = config
when file is bool:
if not file: startConfig.logFile = none string
else: startConfig.logFile =
some currentSourcePath.parentDir() / "codex" & $index & ".log"
else:
if file.len <= 0:
raise newException(ValueError, "file path length must be > 0")
startConfig.logFile = some file
return startConfig
proc withLogTopics*(
config: StartNodeConfig,
topics: varargs[string]
): StartNodeConfig =
var startConfig = config
startConfig.logTopics = startConfig.logTopics.concat(@topics)
return startConfig
template multinodesuite*(name: string,
startNodes: StartNodes, debugConfig: DebugConfig, body: untyped) =
if (debugConfig.client or debugConfig.provider) and
(enabledLogLevel > LogLevel.TRACE or
enabledLogLevel == LogLevel.NONE):
echo ""
echo "More test debug logging is available by running the tests with " &
"'-d:chronicles_log_level=TRACE " &
"-d:chronicles_disabled_topics=websock " &
"-d:chronicles_default_output_device=stdout " &
"-d:chronicles_sinks=textlines'"
echo ""
startNodes: StartNodes, body: untyped) =
ethersuite name:
@ -78,26 +122,28 @@ template multinodesuite*(name: string,
var bootstrap: string
proc newNodeProcess(index: int,
addlOptions: seq[string],
debug: bool): (NodeProcess, string, Address) =
config: StartNodeConfig
): (NodeProcess, string, Address) =
if index > accounts.len - 1:
raiseAssert("Cannot start node at index " & $index &
", not enough eth accounts.")
let datadir = getTempDir() / "Codex" & $index
let logdir = currentSourcePath.parentDir()
# let logdir = currentSourcePath.parentDir()
var options = @[
"--api-port=" & $(8080 + index),
"--data-dir=" & datadir,
"--nat=127.0.0.1",
"--disc-ip=127.0.0.1",
"--disc-port=" & $(8090 + index),
"--eth-account=" & $accounts[index],
"--log-file=" & (logdir / "codex" & $index & ".log")]
.concat(addlOptions)
if debug: options.add "--log-level=INFO;TRACE: " & debugConfig.topics
let node = startNode(options, debug = debug)
"--eth-account=" & $accounts[index]]
if logFile =? config.logFile:
options.add "--log-file=" & logFile
if config.logTopics.len > 0:
options.add "--log-level=INFO;TRACE: " & config.logTopics.join(",")
let node = startNode(options, config.debugEnabled)
node.waitUntilStarted()
(node, datadir, accounts[index])
@ -106,48 +152,47 @@ template multinodesuite*(name: string,
proc startClientNode() =
let index = running.len
let (node, datadir, account) = newNodeProcess(
index, @["--persistence"], debugConfig.client)
var config = startNodes.clients
config.cliOptions.add CliOption(key: "--persistence")
let (node, datadir, account) = newNodeProcess(index, config)
let restClient = newCodexClient(index)
running.add RunningNode.new(Role.Client, node, restClient, datadir,
account)
if debugConfig.client:
if config.debugEnabled:
debug "started new client node and codex client",
restApiPort = 8080 + index, discPort = 8090 + index, account
proc startProviderNode(cliOptions: seq[CliOption]) =
proc startProviderNode(cliOptions: seq[CliOption] = @[]) =
let index = running.len
var options = @[
"--bootstrap-node=" & bootstrap,
"--persistence"
]
var config = startNodes.providers
config.cliOptions = config.cliOptions.concat(cliOptions)
config.cliOptions.add CliOption(key: "--bootstrap-node", value: bootstrap)
config.cliOptions.add CliOption(key: "--persistence")
for cliOption in cliOptions:
var option = cliOption.key
if cliOption.value.len > 0:
option &= "=" & cliOption.value
options.add option
config.cliOptions = config.cliOptions.filter(
o => (let idx = o.nodeIdx |? index; idx == index)
)
let (node, datadir, account) = newNodeProcess(index, options,
debugConfig.provider)
let (node, datadir, account) = newNodeProcess(index, config)
let restClient = newCodexClient(index)
running.add RunningNode.new(Role.Provider, node, restClient, datadir,
account)
if debugConfig.provider:
if config.debugEnabled:
debug "started new provider node and codex client",
restApiPort = 8080 + index, discPort = 8090 + index, account,
cliOptions = options.join(",")
cliOptions = config.cliOptions.join(",")
proc startValidatorNode() =
let index = running.len
let (node, datadir, account) = newNodeProcess(index, @[
"--bootstrap-node=" & bootstrap,
"--validator"],
debugConfig.validator)
var config = startNodes.validators
config.cliOptions.add CliOption(key: "--bootstrap-node", value: bootstrap)
config.cliOptions.add CliOption(key: "--validator")
let (node, datadir, account) = newNodeProcess(index, config)
let restClient = newCodexClient(index)
running.add RunningNode.new(Role.Validator, node, restClient, datadir,
account)
if debugConfig.validator:
if config.debugEnabled:
debug "started new validator node and codex client",
restApiPort = 8080 + index, discPort = 8090 + index, account
@ -161,18 +206,15 @@ template multinodesuite*(name: string,
running.filter(proc(r: RunningNode): bool = r.role == Role.Validator)
setup:
for i in 0..<startNodes.clients:
for i in 0..<startNodes.clients.numNodes:
startClientNode()
if i == 0:
bootstrap = running[0].restClient.info()["spr"].getStr()
for i in 0..<startNodes.providers:
let cliOptions = startNodes.providerCliOptions.filter(
proc(o: CliOption): bool = o.nodeIdx == i
)
startProviderNode(cliOptions)
for i in 0..<startNodes.providers.numNodes:
startProviderNode()
for i in 0..<startNodes.validators:
for i in 0..<startNodes.validators.numNodes:
startValidatorNode()
teardown:

View File

@ -236,22 +236,40 @@ multinodesuite "Simulate invalid proofs",
await subscription.unsubscribe()
multinodesuite "Simulate invalid proofs",
StartNodes.init(clients=1, providers=2, validators=1)
.simulateProofFailuresFor(providerIdx = 0, failEveryNProofs = 2),
DebugConfig.init(client=false, provider=true, validator=false, topics="marketplace,sales,proving,reservations,node,JSONRPC-HTTP-CLIENT,JSONRPC-WS-CLIENT,ethers"):
StartNodes(
clients: StartNodeConfig().nodes(1),
providers:
StartNodeConfig()
.nodes(2)
.simulateProofFailuresFor(providerIdx=0, failEveryNProofs=2)
.debug()
.withLogTopics("marketplace",
"sales",
"proving",
"reservations",
"node",
"JSONRPC-HTTP-CLIENT",
"JSONRPC-WS-CLIENT",
"ethers"
),
validators: StartNodeConfig().nodes(1)
):
# .simulateProofFailuresFor(providerIdx = 0, failEveryNProofs = 2),
# DebugConfig.init(client=false, provider=true, validator=false, topics="marketplace,sales,proving,reservations,node,JSONRPC-HTTP-CLIENT,JSONRPC-WS-CLIENT,ethers"):
proc purchaseStateIs(client: CodexClient, id: PurchaseId, state: string): bool =
client.getPurchase(id).option.?state == some state
var marketplace: Marketplace
var period: uint64
var slotId: SlotId
setup:
marketplace = Marketplace.new(Marketplace.address, provider)
let config = await marketplace.config()
period = config.proofs.period.truncate(uint64)
slotId = SlotId(array[32, byte].default) # ensure we aren't reusing from prev test
# Our Hardhat configuration does use automine, which means that time tracked by `provider.currentTime()` is not
# advanced until blocks are mined and that happens only when transaction is submitted.
@ -259,9 +277,7 @@ multinodesuite "Simulate invalid proofs",
await provider.advanceTime(1.u256)
proc periods(p: int): uint64 =
# when p is uint:
p.uint64 * period
# else: p.uint * period
p.uint64 * period
proc advanceToNextPeriod {.async.} =
let periodicity = Periodicity(seconds: period.u256)
@ -295,7 +311,8 @@ multinodesuite "Simulate invalid proofs",
duration=duration.u256,
proofProbability=proofProbability.u256,
collateral=100.u256,
reward=400.u256
reward=400.u256,
nodes=2'u
).get
check eventually client.purchaseStateIs(id, "started")
return id