Update integration tests to use cli commands

Integration tests have been updated to use the new cli commands. The api for usage in the integration tests has also changed a bit.

Proofs tests have been updated to use 5 nodes and 8 blocks of data. The remaining integration tests also need to be updated.

# Conflicts:
#	tests/integration/multinodes.nim
#	tests/integration/testproofs.nim
This commit is contained in:
Eric 2024-02-21 16:09:43 +11:00
parent 14bf49ed81
commit dd8af58314
No known key found for this signature in database
10 changed files with 620 additions and 350 deletions

View File

@ -38,7 +38,7 @@ import ./stores
import ./units
import ./utils
export units, net, codextypes
export units, net, codextypes, logutils
export
DefaultQuotaBytes,

View File

@ -1,10 +1,7 @@
import pkg/questionable
type
CliOption* = object of RootObj
nodeIdx*: ?int
key*: string
value*: string
CliOption* = object
key*: string # option key, including `--`
value*: string # option value
proc `$`*(option: CliOption): string =
var res = option.key

View File

@ -1,61 +1,294 @@
import std/options
import std/os
import std/sequtils
import std/strutils
import std/sugar
import std/tables
from pkg/chronicles import LogLevel
import pkg/codex/conf
import pkg/codex/units
import pkg/confutils
import pkg/confutils/defs
import libp2p except setup
import pkg/questionable
import ./clioption
import ./nodeconfig
export nodeconfig
export clioption
export confutils
type
CodexConfig* = ref object of NodeConfig
numNodes*: int
cliOptions*: seq[CliOption]
logTopics*: seq[string]
CodexConfigs* = object
configs*: seq[CodexConfig]
CodexConfig* = object
cliOptions: Table[StartUpCmd, Table[string, CliOption]]
cliPersistenceOptions: Table[PersistenceCmd, Table[string, CliOption]]
parsedCli: CodexConf
debugEnabled*: bool
CodexConfigError* = object of CatchableError
proc nodes*(config: CodexConfig, numNodes: int): CodexConfig =
if numNodes < 0:
raise newException(ValueError, "numNodes must be >= 0")
proc cliArgs*(config: CodexConfig): seq[string] {.gcsafe, raises: [CodexConfigError].}
var startConfig = config
startConfig.numNodes = numNodes
proc raiseCodexConfigError(msg: string) {.raises: [CodexConfigError].} =
raise newException(CodexConfigError, msg)
template convertError(body) =
try:
body
except CatchableError as e:
raiseCodexConfigError e.msg
proc init*(_: type CodexConfigs, nodes = 1): CodexConfigs {.raises: [].} =
CodexConfigs(configs: newSeq[CodexConfig](nodes))
func nodes*(self: CodexConfigs): int =
self.configs.len
proc checkBounds(self: CodexConfigs, idx: int) {.raises: [CodexConfigError].} =
if idx notin 0..<self.configs.len:
raiseCodexConfigError "index must be in bounds of the number of nodes"
proc validateCliArgs(
config: var CodexConfig,
msg: string) {.gcsafe, raises: [CodexConfigError].} =
proc postFix(msg: string): string =
if msg.len > 0:
": " & msg
else: ""
try:
config.parsedCli = CodexConf.load(cmdLine = config.cliArgs,
quitOnFailure = false)
except ConfigurationError as e:
raiseCodexConfigError msg & e.msg.postFix
except Exception as e:
## TODO: remove once proper exception handling added to nim-confutils
raiseCodexConfigError msg & e.msg.postFix
proc addCliOption*(
config: var CodexConfig,
group = PersistenceCmd.noCmd,
cliOption: CliOption) {.gcsafe, raises: [CodexConfigError].} =
var options = config.cliPersistenceOptions.getOrDefault(group)
options[cliOption.key] = cliOption # overwrite if already exists
config.cliPersistenceOptions[group] = options
config.validateCliArgs("Invalid cli arg " & $cliOption)
proc addCliOption*(
config: var CodexConfig,
group = PersistenceCmd.noCmd,
key: string, value = "") {.gcsafe, raises: [CodexConfigError].} =
config.addCliOption(group, CliOption(key: key, value: value))
proc addCliOption*(
config: var CodexConfig,
group = StartUpCmd.noCmd,
cliOption: CliOption) {.gcsafe, raises: [CodexConfigError].} =
var options = config.cliOptions.getOrDefault(group)
options[cliOption.key] = cliOption # overwrite if already exists
config.cliOptions[group] = options
config.validateCliArgs("Invalid cli arg " & $cliOption)
proc addCliOption*(
config: var CodexConfig,
group = StartUpCmd.noCmd,
key: string, value = "") {.gcsafe, raises: [CodexConfigError].} =
config.addCliOption(group, CliOption(key: key, value: value))
proc addCliOption*(
config: var CodexConfig,
cliOption: CliOption) {.gcsafe, raises: [CodexConfigError].} =
config.addCliOption(StartUpCmd.noCmd, cliOption)
proc addCliOption*(
config: var CodexConfig,
key: string, value = "") {.gcsafe, raises: [CodexConfigError].} =
config.addCliOption(StartUpCmd.noCmd, CliOption(key: key, value: value))
proc cliArgs*(
config: CodexConfig): seq[string] {.gcsafe, raises: [CodexConfigError].} =
## converts CodexConfig cli options and command groups in a sequence of args
## and filters out cli options by node index if provided in the CliOption
var args: seq[string] = @[]
convertError:
for cmd in StartUpCmd:
if config.cliOptions.hasKey(cmd):
if cmd != StartUpCmd.noCmd:
args.add $cmd
var opts = config.cliOptions[cmd].values.toSeq
args = args.concat( opts.map(o => $o) )
for cmd in PersistenceCmd:
if config.cliPersistenceOptions.hasKey(cmd):
if cmd != PersistenceCmd.noCmd:
args.add $cmd
var opts = config.cliPersistenceOptions[cmd].values.toSeq
args = args.concat( opts.map(o => $o) )
return args
proc logFile*(config: CodexConfig): ?string {.raises: [CodexConfigError].} =
config.parsedCli.logFile
proc logLevel*(config: CodexConfig): LogLevel {.raises: [CodexConfigError].} =
convertError:
return parseEnum[LogLevel](config.parsedCli.logLevel.toUpperAscii)
proc debug*(
self: CodexConfigs,
idx: int,
enabled = true): CodexConfigs {.raises: [CodexConfigError].} =
## output log in stdout for a specific node in the group
self.checkBounds idx
var startConfig = self
startConfig.configs[idx].debugEnabled = enabled
return startConfig
proc simulateProofFailuresFor*(
config: CodexConfig,
providerIdx: int,
proc debug*(self: CodexConfigs, enabled = true): CodexConfigs {.raises: [].} =
## output log in stdout for all nodes in group
var startConfig = self
for config in startConfig.configs.mitems:
config.debugEnabled = enabled
return startConfig
proc withLogFile*(
self: CodexConfigs,
idx: int): CodexConfigs {.raises: [CodexConfigError].} =
self.checkBounds idx
var startConfig = self
startConfig.configs[idx].addCliOption("--log-file", "<updated_in_test>")
return startConfig
proc withLogFile*(
self: CodexConfigs): CodexConfigs {.raises: [CodexConfigError].} =
## typically called from test, sets config such that a log file should be
## created
var startConfig = self
for config in startConfig.configs.mitems:
config.addCliOption("--log-file", "<updated_in_test>")
return startConfig
proc withLogFile*(
self: var CodexConfig,
logFile: string) {.raises: [CodexConfigError].} = #: CodexConfigs =
## typically called internally from the test suite, sets a log file path to
## be created during the test run, for a specified node in the group
# var config = self
self.addCliOption("--log-file", logFile)
# return startConfig
proc withLogLevel*(
self: CodexConfig,
level: LogLevel | string): CodexConfig {.raises: [CodexConfigError].} =
var config = self
config.addCliOption("--log-level", $level)
return config
proc withLogLevel*(
self: CodexConfigs,
idx: int,
level: LogLevel | string): CodexConfigs {.raises: [CodexConfigError].} =
self.checkBounds idx
var startConfig = self
startConfig.configs[idx].addCliOption("--log-level", $level)
return startConfig
proc withLogLevel*(
self: CodexConfigs,
level: LogLevel | string): CodexConfigs {.raises: [CodexConfigError].} =
var startConfig = self
for config in startConfig.configs.mitems:
config.addCliOption("--log-level", $level)
return startConfig
proc withSimulateProofFailures*(
self: CodexConfigs,
idx: int,
failEveryNProofs: int
): CodexConfig =
): CodexConfigs {.raises: [CodexConfigError].} =
if providerIdx > config.numNodes - 1:
raise newException(ValueError, "provider index out of bounds")
self.checkBounds idx
var startConfig = config
startConfig.cliOptions.add(
CliOption(
nodeIdx: some providerIdx,
key: "--simulate-proof-failures",
value: $failEveryNProofs
)
)
var startConfig = self
startConfig.configs[idx].addCliOption(
StartUpCmd.persistence, "--simulate-proof-failures", $failEveryNProofs)
return startConfig
proc withSimulateProofFailures*(
self: CodexConfigs,
failEveryNProofs: int): CodexConfigs {.raises: [CodexConfigError].} =
var startConfig = self
for config in startConfig.configs.mitems:
config.addCliOption(
StartUpCmd.persistence, "--simulate-proof-failures", $failEveryNProofs)
return startConfig
proc logLevelWithTopics(
config: CodexConfig,
topics: varargs[string]): string {.raises: [CodexConfigError].} =
convertError:
var logLevel = LogLevel.INFO
logLevel = parseEnum[LogLevel](config.parsedCli.logLevel.toUpperAscii)
let level = $logLevel & ";TRACE: " & topics.join(",")
return level
proc withLogTopics*(
config: CodexConfig,
topics: varargs[string]
): CodexConfig =
self: CodexConfigs,
idx: int,
topics: varargs[string]): CodexConfigs {.raises: [CodexConfigError].} =
var startConfig = config
startConfig.logTopics = startConfig.logTopics.concat(@topics)
self.checkBounds idx
convertError:
let config = self.configs[idx]
let level = config.logLevelWithTopics(topics)
var startConfig = self
return startConfig.withLogLevel(idx, level)
proc withLogTopics*(
self: CodexConfigs,
topics: varargs[string]
): CodexConfigs {.raises: [CodexConfigError].} =
var startConfig = self
for config in startConfig.configs.mitems:
let level = config.logLevelWithTopics(topics)
config = config.withLogLevel(level)
return startConfig
proc withStorageQuota*(
config: CodexConfig,
quota: NBytes
): CodexConfig =
self: CodexConfigs,
idx: int,
quota: NBytes): CodexConfigs {.raises: [CodexConfigError].} =
var startConfig = config
startConfig.cliOptions.add(
CliOption(key: "--storage-quota", value: $quota)
)
self.checkBounds idx
var startConfig = self
startConfig.configs[idx].addCliOption("--storage-quota", $quota)
return startConfig
proc withStorageQuota*(
self: CodexConfigs,
quota: NBytes): CodexConfigs {.raises: [CodexConfigError].} =
var startConfig = self
for config in startConfig.configs.mitems:
config.addCliOption("--storage-quota", $quota)
return startConfig

View File

@ -40,17 +40,17 @@ method onOutputLineCaptured(node: CodexProcess, line: string) =
discard
proc dataDir(node: CodexProcess): string =
let config = CodexConf.load(cmdLine = node.arguments)
let config = CodexConf.load(cmdLine = node.arguments, quitOnFailure = false)
return config.dataDir.string
proc ethAccount*(node: CodexProcess): Address =
let config = CodexConf.load(cmdLine = node.arguments)
let config = CodexConf.load(cmdLine = node.arguments, quitOnFailure = false)
without ethAccount =? config.ethAccount:
raiseAssert "eth account not set"
return Address(ethAccount)
proc apiUrl*(node: CodexProcess): string =
let config = CodexConf.load(cmdLine = node.arguments)
let config = CodexConf.load(cmdLine = node.arguments, quitOnFailure = false)
return "http://" & config.apiBindAddress & ":" & $config.apiPort & "/api/codex/v1"
proc client*(node: CodexProcess): CodexClient =

View File

@ -1,6 +1,15 @@
import ./nodeconfig
export nodeconfig
type
HardhatConfig* = ref object of NodeConfig
HardhatConfig* = object
logFile*: bool
debugEnabled*: bool
proc debug*(self: HardhatConfig, enabled = true): HardhatConfig =
## output log in stdout
var config = self
config.debugEnabled = enabled
return config
proc withLogFile*(self: HardhatConfig, logToFile: bool = true): HardhatConfig =
var config = self
config.logFile = logToFile
return config

View File

@ -3,13 +3,16 @@ import std/sequtils
import std/strutils
import std/sugar
import std/times
import pkg/codex/conf
import pkg/codex/logutils
import pkg/chronos/transports/stream
import pkg/ethers
import ./hardhatprocess
import pkg/questionable
import ./codexconfig
import ./codexprocess
import ./hardhatconfig
import ./codexconfig
import ./hardhatprocess
import ./nodeconfigs
import ../asynctest
import ../checktest
@ -24,11 +27,6 @@ type
RunningNode* = ref object
role*: Role
node*: NodeProcess
NodeConfigs* = object
clients*: CodexConfig
providers*: CodexConfig
validators*: CodexConfig
hardhat*: HardhatConfig
Role* {.pure.} = enum
Client,
Provider,
@ -69,6 +67,8 @@ template multinodesuite*(name: string, body: untyped) =
var accounts {.inject, used.}: seq[Address]
var snapshot: JsonNode
proc teardownImpl(): Future[void] {.gcsafe.}
template test(tname, startNodeConfigs, tbody) =
currentTestName = tname
nodeConfigs = startNodeConfigs
@ -79,6 +79,7 @@ template multinodesuite*(name: string, body: untyped) =
var sanitized = pathSegment
for invalid in invalidFilenameChars.items:
sanitized = sanitized.replace(invalid, '_')
.replace(' ', '_')
sanitized
proc getLogFile(role: Role, index: ?int): string =
@ -87,7 +88,7 @@ template multinodesuite*(name: string, body: untyped) =
var logDir = currentSourcePath.parentDir() /
"logs" /
sanitize($starttime & " " & name) /
sanitize($starttime & "__" & name) /
sanitize($currentTestName)
createDir(logDir)
@ -116,14 +117,15 @@ template multinodesuite*(name: string, body: untyped) =
return node
proc newCodexProcess(roleIdx: int,
config: CodexConfig,
conf: CodexConfig,
role: Role
): Future[NodeProcess] {.async.} =
let nodeIdx = running.len
var conf = config
var config = conf
if nodeIdx > accounts.len - 1:
await teardownImpl()
raiseAssert("Cannot start node at nodeIdx " & $nodeIdx &
", not enough eth accounts.")
@ -131,30 +133,30 @@ template multinodesuite*(name: string, body: untyped) =
sanitize($starttime) /
sanitize($role & "_" & $roleIdx)
if conf.logFile:
let updatedLogFile = getLogFile(role, some roleIdx)
conf.cliOptions.add CliOption(key: "--log-file", value: updatedLogFile)
try:
if config.logFile.isSome:
let updatedLogFile = getLogFile(role, some roleIdx)
config.withLogFile(updatedLogFile)
let logLevel = conf.logLevel |? LogLevel.INFO
if conf.logTopics.len > 0:
conf.cliOptions.add CliOption(
key: "--log-level",
value: $logLevel & ";TRACE: " & conf.logTopics.join(",")
)
else:
conf.cliOptions.add CliOption(key: "--log-level", value: $logLevel)
config.addCliOption("--api-port", $ await nextFreePort(8080 + nodeIdx))
config.addCliOption("--data-dir", datadir)
config.addCliOption("--nat", "127.0.0.1")
config.addCliOption("--listen-addrs", "/ip4/127.0.0.1/tcp/0")
config.addCliOption("--disc-ip", "127.0.0.1")
config.addCliOption("--disc-port", $ await nextFreePort(8090 + nodeIdx))
var args = conf.cliOptions.map(o => $o)
.concat(@[
"--api-port=" & $ await nextFreePort(8080 + nodeIdx),
"--data-dir=" & datadir,
"--nat=127.0.0.1",
"--listen-addrs=/ip4/127.0.0.1/tcp/0",
"--disc-ip=127.0.0.1",
"--disc-port=" & $ await nextFreePort(8090 + nodeIdx),
"--eth-account=" & $accounts[nodeIdx]])
except CodexConfigError as e:
fatal "invalid cli option", error = e.msg
echo "[FATAL] invalid cli option ", e.msg
await teardownImpl()
fail()
return
let node = await CodexProcess.startNode(args, conf.debugEnabled, $role & $roleIdx)
let node = await CodexProcess.startNode(
config.cliArgs,
config.debugEnabled,
$role & $roleIdx
)
await node.waitUntilStarted()
trace "node started", nodeName = $role & $roleIdx
@ -184,80 +186,36 @@ template multinodesuite*(name: string, body: untyped) =
if r.role == Role.Validator:
CodexProcess(r.node)
proc startHardhatNode(): Future[NodeProcess] {.async.} =
var config = nodeConfigs.hardhat
proc startHardhatNode(config: HardhatConfig): Future[NodeProcess] {.async.} =
return await newHardhatProcess(config, Role.Hardhat)
proc startClientNode(): Future[NodeProcess] {.async.} =
proc startClientNode(conf: CodexConfig): Future[NodeProcess] {.async.} =
let clientIdx = clients().len
var config = nodeConfigs.clients
config.cliOptions.add CliOption(key: "persistence")
var config = conf
config.addCliOption(StartUpCmd.persistence, "--eth-account", $accounts[running.len])
return await newCodexProcess(clientIdx, config, Role.Client)
proc startProviderNode(): Future[NodeProcess] {.async.} =
proc startProviderNode(conf: CodexConfig): Future[NodeProcess] {.async.} =
let providerIdx = providers().len
var config = nodeConfigs.providers
config.cliOptions.add CliOption(key: "--bootstrap-node", value: bootstrap)
config.cliOptions.add CliOption(key: "persistence")
# filter out provider options by provided index
config.cliOptions = config.cliOptions.filter(
o => (let idx = o.nodeIdx |? providerIdx; idx == providerIdx)
)
var config = conf
config.addCliOption("--bootstrap-node", bootstrap)
config.addCliOption(StartUpCmd.persistence, "--eth-account", $accounts[running.len])
config.addCliOption(PersistenceCmd.prover, "--circom-r1cs", "tests/circuits/fixtures/proof_main.r1cs")
config.addCliOption(PersistenceCmd.prover, "--circom-wasm", "tests/circuits/fixtures/proof_main.wasm")
config.addCliOption(PersistenceCmd.prover, "--circom-zkey", "tests/circuits/fixtures/proof_main.zkey")
return await newCodexProcess(providerIdx, config, Role.Provider)
proc startValidatorNode(): Future[NodeProcess] {.async.} =
proc startValidatorNode(conf: CodexConfig): Future[NodeProcess] {.async.} =
let validatorIdx = validators().len
var config = nodeConfigs.validators
config.cliOptions.add CliOption(key: "--bootstrap-node", value: bootstrap)
config.cliOptions.add CliOption(key: "--validator")
var config = conf
config.addCliOption("--bootstrap-node", bootstrap)
config.addCliOption(StartUpCmd.persistence, "--eth-account", $accounts[running.len])
config.addCliOption(StartUpCmd.persistence, "--validator")
return await newCodexProcess(validatorIdx, config, Role.Validator)
setup:
if not nodeConfigs.hardhat.isNil:
let node = await startHardhatNode()
running.add RunningNode(role: Role.Hardhat, node: node)
try:
ethProvider = JsonRpcProvider.new("ws://localhost:8545")
# if hardhat was NOT started by the test, take a snapshot so it can be
# reverted in the test teardown
if nodeConfigs.hardhat.isNil:
snapshot = await send(ethProvider, "evm_snapshot")
accounts = await ethProvider.listAccounts()
except CatchableError as e:
fatal "failed to connect to hardhat", error = e.msg
raiseAssert "Hardhat not running. Run hardhat manually before executing tests, or include a HardhatConfig in the test setup."
if not nodeConfigs.clients.isNil:
for i in 0..<nodeConfigs.clients.numNodes:
let node = await startClientNode()
running.add RunningNode(
role: Role.Client,
node: node
)
if i == 0:
bootstrap = CodexProcess(node).client.info()["spr"].getStr()
if not nodeConfigs.providers.isNil:
for i in 0..<nodeConfigs.providers.numNodes:
let node = await startProviderNode()
running.add RunningNode(
role: Role.Provider,
node: node
)
if not nodeConfigs.validators.isNil:
for i in 0..<nodeConfigs.validators.numNodes:
let node = await startValidatorNode()
running.add RunningNode(
role: Role.Validator,
node: node
)
teardown:
proc teardownImpl {.async.} =
for nodes in @[validators(), clients(), providers()]:
for node in nodes:
await node.stop() # also stops rest client
@ -273,4 +231,52 @@ template multinodesuite*(name: string, body: untyped) =
running = @[]
setup:
if var conf =? nodeConfigs.hardhat:
let node = await startHardhatNode(conf)
running.add RunningNode(role: Role.Hardhat, node: node)
try:
ethProvider = JsonRpcProvider.new("ws://localhost:8545")
# if hardhat was NOT started by the test, take a snapshot so it can be
# reverted in the test teardown
if nodeConfigs.hardhat.isNone:
snapshot = await send(ethProvider, "evm_snapshot")
accounts = await ethProvider.listAccounts()
except CatchableError as e:
fatal "failed to connect to hardhat", error = e.msg
echo "[FATAL] Hardhat not running. Run hardhat manually before executing tests, or include a HardhatConfig in the test setup."
await teardownImpl()
fail()
return
if var clients =? nodeConfigs.clients:
for config in clients.configs:
let node = await startClientNode(config)
running.add RunningNode(
role: Role.Client,
node: node
)
if clients().len == 1:
bootstrap = CodexProcess(node).client.info()["spr"].getStr()
if var providers =? nodeConfigs.providers:
for config in providers.configs.mitems:
let node = await startProviderNode(config)
running.add RunningNode(
role: Role.Provider,
node: node
)
if var validators =? nodeConfigs.validators:
for config in validators.configs.mitems:
let node = await startValidatorNode(config)
running.add RunningNode(
role: Role.Validator,
node: node
)
teardown:
await teardownImpl()
body

View File

@ -0,0 +1,11 @@
import pkg/questionable
import ./codexconfig
import ./hardhatconfig
type
NodeConfigs* = object
clients*: ?CodexConfigs
providers*: ?CodexConfigs
validators*: ?CodexConfigs
hardhat*: ?HardhatConfig

View File

@ -54,6 +54,8 @@ method start*(node: NodeProcess) {.base, async.} =
processOptions = poptions
try:
if node.debug:
echo "starting codex node with args: ", node.arguments.join(" ")
node.process = await startProcess(
node.executable,
node.workingDir,
@ -149,10 +151,11 @@ proc waitUntilStarted*(node: NodeProcess) {.async.} =
let started = newFuture[void]()
try:
discard node.captureOutput(node.startedOutput, started).track(node)
await started.wait(5.seconds)
await started.wait(35.seconds) # allow enough time for proof generation
except AsyncTimeoutError as e:
# attempt graceful shutdown in case node was partially started, prevent
# zombies
# TODO: raise error here so that all nodes can be shutdown gracefully
await node.stop()
raiseAssert "node did not output '" & node.startedOutput & "'"

View File

@ -58,11 +58,11 @@ proc startNode*(args: openArray[string], debug: string | bool = false): NodeProc
node
proc dataDir(node: NodeProcess): string =
let config = CodexConf.load(cmdLine = node.arguments)
let config = CodexConf.load(cmdLine = node.arguments, quitOnFailure = false)
config.dataDir.string
proc apiUrl(node: NodeProcess): string =
let config = CodexConf.load(cmdLine = node.arguments)
let config = CodexConf.load(cmdLine = node.arguments, quitOnFailure = false)
"http://" & config.apiBindAddress & ":" & $config.apiPort & "/api/codex/v1"
proc client*(node: NodeProcess): CodexClient =

View File

@ -1,4 +1,3 @@
import std/math
from std/times import inMilliseconds
import pkg/codex/logutils
import pkg/stew/byteutils
@ -7,6 +6,7 @@ import ../contracts/deployment
import ../codex/helpers
import ../examples
import ./marketplacesuite
import ./nodeconfigs
export chronicles
@ -18,34 +18,40 @@ marketplacesuite "Hosts submit regular proofs":
test "hosts submit periodic proofs for slots they fill", NodeConfigs(
# Uncomment to start Hardhat automatically, typically so logs can be inspected locally
# hardhat: HardhatConfig().withLogFile(),
hardhat:
HardhatConfig.none,
clients:
CodexConfig()
.nodes(1)
CodexConfigs.init(nodes=1)
# .debug() # uncomment to enable console log output
.withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
.withLogTopics("node"),
.withLogTopics("node")
.some,
providers:
CodexConfig()
.nodes(1)
CodexConfigs.init(nodes=5)
# .debug() # uncomment to enable console log output
.withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
.withLogTopics("marketplace", "sales", "reservations", "node"),
.withLogTopics("marketplace", "sales", "reservations", "node")
.some,
):
let client0 = clients()[0].client
let totalPeriods = 50
let datasetSizeInBlocks = 2
let datasetSizeInBlocks = 8
let data = await RandomChunker.example(blocks=1)
createAvailabilities(data.len, totalPeriods.periods)
let data = await RandomChunker.example(blocks=datasetSizeInBlocks)
# dataset size = 8 block, with 5 nodes, the slot size = 4 blocks, give each
# node enough availability to fill one slot only
createAvailabilities((DefaultBlockSize * 4.NBytes).Natural, totalPeriods.periods)
let cid = client0.upload(data).get
let purchaseId = await client0.requestStorage(
cid,
duration=totalPeriods.periods,
expiry=30.periods,
nodes=5,
tolerance=1,
origDatasetSizeInBlocks = datasetSizeInBlocks)
check eventually client0.purchaseStateIs(purchaseId, "started")
@ -61,240 +67,245 @@ marketplacesuite "Hosts submit regular proofs":
await subscription.unsubscribe()
# marketplacesuite "Simulate invalid proofs":
marketplacesuite "Simulate invalid proofs":
# # TODO: these are very loose tests in that they are not testing EXACTLY how
# # proofs were marked as missed by the validator. These tests should be
# # tightened so that they are showing, as an integration test, that specific
# # proofs are being marked as missed by the validator.
# TODO: these are very loose tests in that they are not testing EXACTLY how
# proofs were marked as missed by the validator. These tests should be
# tightened so that they are showing, as an integration test, that specific
# proofs are being marked as missed by the validator.
# test "slot is freed after too many invalid proofs submitted", NodeConfigs(
# # Uncomment to start Hardhat automatically, typically so logs can be inspected locally
# # hardhat: HardhatConfig().withLogFile(),
test "slot is freed after too many invalid proofs submitted", NodeConfigs(
# Uncomment to start Hardhat automatically, typically so logs can be inspected locally
hardhat:
HardhatConfig.none,
# clients:
# CodexConfig()
# .nodes(1)
# # .debug() # uncomment to enable console log output
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
# .withLogTopics("node"),
clients:
CodexConfigs.init(nodes=1)
# .debug() # uncomment to enable console log output
.withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
.withLogTopics("node")
.some,
# providers:
# CodexConfig()
# .nodes(1)
# .simulateProofFailuresFor(providerIdx=0, failEveryNProofs=1)
# # .debug() # uncomment to enable console log output
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
# .withLogTopics("marketplace", "sales", "reservations", "node"),
providers:
CodexConfigs.init(nodes=5)
.withSimulateProofFailures(idx=0, failEveryNProofs=1)
# .debug() # uncomment to enable console log output
.withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
.withLogTopics("marketplace", "sales", "reservations", "node")
.some,
# validators:
# CodexConfig()
# .nodes(1)
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
# # .debug() # uncomment to enable console log output
# .withLogTopics("validator", "onchain", "ethers")
# ):
# let client0 = clients()[0].client
# let totalPeriods = 50
validators:
CodexConfigs.init(nodes=1)
.withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
# .debug() # uncomment to enable console log output
.withLogTopics("validator", "onchain", "ethers")
.some
):
let client0 = clients()[0].client
let totalPeriods = 50
# let datasetSizeInBlocks = 2
# let data = await RandomChunker.example(blocks=datasetSizeInBlocks)
# createAvailabilities(data.len, totalPeriods.periods)
let datasetSizeInBlocks = 8
let data = await RandomChunker.example(blocks=datasetSizeInBlocks)
# dataset size = 8 block, with 5 nodes, the slot size = 4 blocks, give each
# node enough availability to fill one slot only
createAvailabilities((DefaultBlockSize * 4.NBytes).Natural, totalPeriods.periods)
# let cid = client0.upload(data).get
let cid = client0.upload(data).get
# let purchaseId = await client0.requestStorage(
# cid,
# duration=totalPeriods.periods,
# origDatasetSizeInBlocks=datasetSizeInBlocks)
# let requestId = client0.requestId(purchaseId).get
let purchaseId = await client0.requestStorage(
cid,
duration=totalPeriods.periods,
expiry=30.periods,
nodes=5,
tolerance=1,
origDatasetSizeInBlocks=datasetSizeInBlocks)
let requestId = client0.requestId(purchaseId).get
# check eventually client0.purchaseStateIs(purchaseId, "started")
check eventually client0.purchaseStateIs(purchaseId, "started")
# var slotWasFreed = false
# proc onSlotFreed(event: SlotFreed) =
# if event.requestId == requestId and
# event.slotIndex == 0.u256: # assume only one slot, so index 0
# slotWasFreed = true
var slotWasFreed = false
proc onSlotFreed(event: SlotFreed) =
if event.requestId == requestId and
event.slotIndex == 0.u256: # assume only one slot, so index 0
slotWasFreed = true
# let subscription = await marketplace.subscribe(SlotFreed, onSlotFreed)
let subscription = await marketplace.subscribe(SlotFreed, onSlotFreed)
# let currentPeriod = await getCurrentPeriod()
# check eventuallyP(slotWasFreed, currentPeriod + totalPeriods.u256 + 1)
let currentPeriod = await getCurrentPeriod()
check eventuallyP(slotWasFreed, currentPeriod + totalPeriods.u256 + 1)
# await subscription.unsubscribe()
await subscription.unsubscribe()
# test "slot is not freed when not enough invalid proofs submitted", NodeConfigs(
# # Uncomment to start Hardhat automatically, typically so logs can be inspected locally
# # hardhat: HardhatConfig().withLogFile(),
test "slot is not freed when not enough invalid proofs submitted", NodeConfigs(
# Uncomment to start Hardhat automatically, typically so logs can be inspected locally
hardhat: HardhatConfig.none,
# clients:
# CodexConfig()
# .nodes(1)
# .debug() # uncomment to enable console log output
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
# .withLogTopics("node"),
clients:
CodexConfigs.init(nodes=1)
# .debug() # uncomment to enable console log output
.withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
.withLogTopics("node")
.some,
# providers:
# CodexConfig()
# .nodes(1)
# .simulateProofFailuresFor(providerIdx=0, failEveryNProofs=3)
# .debug() # uncomment to enable console log output
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
# .withLogTopics("marketplace", "sales", "reservations", "node"),
providers:
CodexConfigs.init(nodes=5)
.withSimulateProofFailures(idx=0, failEveryNProofs=3)
# .debug() # uncomment to enable console log output
.withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
.withLogTopics("marketplace", "sales", "reservations", "node")
.some,
# validators:
# CodexConfig()
# .nodes(1)
# .debug()
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
# .withLogTopics("validator", "onchain", "ethers")
# ):
# let client0 = clients()[0].client
# let totalPeriods = 25
validators:
CodexConfigs.init(nodes=1)
# .debug()
.withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
.withLogTopics("validator", "onchain", "ethers")
.some
):
let client0 = clients()[0].client
let totalPeriods = 25
# let datasetSizeInBlocks = 2
# let data = await RandomChunker.example(blocks=datasetSizeInBlocks)
# createAvailabilities(data.len, totalPeriods.periods)
let datasetSizeInBlocks = 8
let data = await RandomChunker.example(blocks=datasetSizeInBlocks)
# dataset size = 8 block, with 5 nodes, the slot size = 4 blocks, give each
# node enough availability to fill one slot only
createAvailabilities((DefaultBlockSize * 4.NBytes).Natural, totalPeriods.periods)
# let cid = client0.upload(data).get
let cid = client0.upload(data).get
# let purchaseId = await client0.requestStorage(
# cid,
# duration=totalPeriods.periods,
# origDatasetSizeInBlocks=datasetSizeInBlocks)
# let requestId = client0.requestId(purchaseId).get
let purchaseId = await client0.requestStorage(
cid,
duration=totalPeriods.periods,
expiry=30.periods,
nodes=5,
tolerance=1,
origDatasetSizeInBlocks=datasetSizeInBlocks)
let requestId = client0.requestId(purchaseId).get
# check eventually client0.purchaseStateIs(purchaseId, "started")
check eventually client0.purchaseStateIs(purchaseId, "started")
# var slotWasFreed = false
# proc onSlotFreed(event: SlotFreed) =
# if event.requestId == requestId and
# event.slotIndex == 0.u256:
# slotWasFreed = true
var slotWasFreed = false
proc onSlotFreed(event: SlotFreed) =
if event.requestId == requestId and
event.slotIndex == 0.u256:
slotWasFreed = true
# let subscription = await marketplace.subscribe(SlotFreed, onSlotFreed)
let subscription = await marketplace.subscribe(SlotFreed, onSlotFreed)
# # check not freed
# let currentPeriod = await getCurrentPeriod()
# check not eventuallyP(slotWasFreed, currentPeriod + totalPeriods.u256 + 1)
# check not freed
let currentPeriod = await getCurrentPeriod()
check not eventuallyP(slotWasFreed, currentPeriod + totalPeriods.u256 + 1)
# await subscription.unsubscribe()
await subscription.unsubscribe()
# TODO: uncomment once fixed
# test "host that submits invalid proofs is paid out less", NodeConfigs(
# # Uncomment to start Hardhat automatically, typically so logs can be inspected locally
# # hardhat: HardhatConfig().withLogFile(),
test "host that submits invalid proofs is paid out less", NodeConfigs(
# Uncomment to start Hardhat automatically, typically so logs can be inspected locally
# hardhat: HardhatConfig().withLogFile(),
hardhat: HardhatConfig.none,
clients:
CodexConfigs.init(nodes=1)
.debug() # uncomment to enable console log output.debug()
.withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
.withLogTopics("node", "erasure", "clock", "purchases", "slotsbuilder")
.some,
# clients:
# CodexConfig()
# .nodes(1)
# # .debug() # uncomment to enable console log output.debug()
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
# .withLogTopics("node", "erasure", "clock", "purchases"),
providers:
CodexConfigs.init(nodes=5)
.withSimulateProofFailures(idx=0, failEveryNProofs=2)
.debug(idx=0) # uncomment to enable console log output
.withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
.withLogTopics("marketplace", "sales", "reservations", "node", "slotsbuilder")
.some,
# providers:
# CodexConfig()
# .nodes(3)
# .simulateProofFailuresFor(providerIdx=0, failEveryNProofs=2)
# # .debug() # uncomment to enable console log output
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
# .withLogTopics("marketplace", "sales", "reservations", "node"),
validators:
CodexConfigs.init(nodes=1)
# .debug()
.withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
.withLogTopics("validator")
.some
):
let client0 = clients()[0].client
let providers = providers()
let totalPeriods = 25
# validators:
# CodexConfig()
# .nodes(1)
# # .debug()
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
# .withLogTopics("validator")
# ):
# let client0 = clients()[0].client
# let provider0 = providers()[0]
# let provider1 = providers()[1]
# let provider2 = providers()[2]
# let totalPeriods = 25
let datasetSizeInBlocks = 8
let data = await RandomChunker.example(blocks=datasetSizeInBlocks)
# original data = 3 blocks so slot size will be 4 blocks
let slotSize = (DefaultBlockSize * 4.NBytes).Natural.u256
# let datasetSizeInBlocks = 3
# let data = await RandomChunker.example(blocks=datasetSizeInBlocks)
# # original data = 3 blocks so slot size will be 4 blocks
# let slotSize = (DefaultBlockSize * 4.NBytes).Natural.u256
discard providers[0].client.postAvailability(
size=slotSize, # should match 1 slot only
duration=totalPeriods.periods.u256,
minPrice=300.u256,
maxCollateral=200.u256
)
# discard provider0.client.postAvailability(
# size=slotSize, # should match 1 slot only
# duration=totalPeriods.periods.u256,
# minPrice=300.u256,
# maxCollateral=200.u256
# )
let cid = client0.upload(data).get
# let cid = client0.upload(data).get
let purchaseId = await client0.requestStorage(
cid,
duration=totalPeriods.periods,
expiry=10.periods,
nodes=5,
tolerance=1,
origDatasetSizeInBlocks=datasetSizeInBlocks
)
# let purchaseId = await client0.requestStorage(
# cid,
# duration=totalPeriods.periods,
# expiry=10.periods,
# nodes=3,
# tolerance=1,
# origDatasetSizeInBlocks=datasetSizeInBlocks
# )
without requestId =? client0.requestId(purchaseId):
fail()
# without requestId =? client0.requestId(purchaseId):
# fail()
var filledSlotIds: seq[SlotId] = @[]
proc onSlotFilled(event: SlotFilled) =
let slotId = slotId(event.requestId, event.slotIndex)
filledSlotIds.add slotId
# var filledSlotIds: seq[SlotId] = @[]
# proc onSlotFilled(event: SlotFilled) =
# let slotId = slotId(event.requestId, event.slotIndex)
# filledSlotIds.add slotId
let subscription = await marketplace.subscribe(SlotFilled, onSlotFilled)
# let subscription = await marketplace.subscribe(SlotFilled, onSlotFilled)
# wait til first slot is filled
check eventually filledSlotIds.len > 0
# # wait til first slot is filled
# check eventually filledSlotIds.len > 0
template waitForSlotFilled(provider: CodexProcess, idx: int) =
discard provider.client.postAvailability(
size=slotSize, # should match 1 slot only
duration=totalPeriods.periods.u256,
minPrice=300.u256,
maxCollateral=200.u256
)
# # now add availability for providers 1 and 2, which should allow them to to
# # put the remaining slots in their queues
# discard provider1.client.postAvailability(
# size=slotSize, # should match 1 slot only
# duration=totalPeriods.periods.u256,
# minPrice=300.u256,
# maxCollateral=200.u256
# )
check eventually filledSlotIds.len > idx
# check eventually filledSlotIds.len > 1
# TODO: becausee we now have 5+ slots to fill plus proof generation, this
# may take way too long. Another idea is to update the SlotFilled contract
# event to include the host that filled the slot. With that, we can use
# `onSlotFilled` to build a provider > slotIdx table in memory and use that
# to check sale states
for i in 1..<providers.len:
# now add availability for remaining providers, which should allow them to
# to put the remaining slots in their queues. They need to fill slots
# one-by-one so we can track their slot idx/ids
let provider = providers[i]
provider.waitForSlotFilled(i)
# discard provider2.client.postAvailability(
# size=slotSize, # should match 1 slot only
# duration=totalPeriods.periods.u256,
# minPrice=300.u256,
# maxCollateral=200.u256
# )
# check eventually filledSlotIds.len > 2
# Wait til remaining providers are in the Proving state.
for i in 1..<providers.len:
check eventually providers[i].client.saleStateIs(filledSlotIds[i], "SaleProving")
# # Wait til second slot is filled. SaleFilled happens too quickly, check SaleProving instead.
# check eventually provider1.client.saleStateIs(filledSlotIds[1], "SaleProving")
# check eventually provider2.client.saleStateIs(filledSlotIds[2], "SaleProving")
# contract should now be started
check eventually client0.purchaseStateIs(purchaseId, "started")
# check eventually client0.purchaseStateIs(purchaseId, "started")
# all providers should now be able to reach the SalePayout state once the
# contract has finishe
let currentPeriod = await getCurrentPeriod()
for i in 0..<providers.len:
check eventuallyP(
# SaleFinished happens too quickly, check SalePayout instead
providers[i].client.saleStateIs(filledSlotIds[i], "SalePayout"),
currentPeriod + totalPeriods.u256 + 1)
# let currentPeriod = await getCurrentPeriod()
# check eventuallyP(
# # SaleFinished happens too quickly, check SalePayout instead
# provider0.client.saleStateIs(filledSlotIds[0], "SalePayout"),
# currentPeriod + totalPeriods.u256 + 1)
check eventually(
(await token.balanceOf(providers[1].ethAccount)) >
(await token.balanceOf(providers[0].ethAccount))
)
# check eventuallyP(
# # SaleFinished happens too quickly, check SalePayout instead
# provider1.client.saleStateIs(filledSlotIds[1], "SalePayout"),
# currentPeriod + totalPeriods.u256 + 1)
# check eventuallyP(
# # SaleFinished happens too quickly, check SalePayout instead
# provider2.client.saleStateIs(filledSlotIds[2], "SalePayout"),
# currentPeriod + totalPeriods.u256 + 1)
# check eventually(
# (await token.balanceOf(provider1.ethAccount)) >
# (await token.balanceOf(provider0.ethAccount))
# )
# await subscription.unsubscribe()
await subscription.unsubscribe()