Prover CLI updates (#735)

* rework cli to accept circuit params

* check circom files extension

* adding new required cli changes

* don't use ufcs

* persistence is a command now

* use `nimOldCaseObjects` switch for nim confutils compat

* misc

* Update cli integration tests

* Fix: simulateProofFailures option is not for validator

* moving circom params under `prover` command

* update tests

* Use circuit assets from codex-contract-eth in tests

* Add "prover" cli command to tests

* use correct stores

* make `verifier` a cmd option

* update circuit artifacts path

* fix cli tests

* Update integration tests to use cli commands

Integration tests have been updated to use the new cli commands. The api for usage in the integration tests has also changed a bit.

Proofs tests have been updated to use 5 nodes and 8 blocks of data. The remaining integration tests also need to be updated.

* remove parsedCli from CodexConfig

Instead, parse the cli args on the fly when needed

* remove unneeded gcsafes

* graceful shutdowns

Where possible, do not raise assert, as other nodes in the test may already be running. Instead, raise exceptions, catch them in multinodes.nim, and attempt to do a teardown before failing the test.

`abortOnError` is set to true so that `fail()` will quit immediately, after teardown has been run.

* update testmarketplace to new api, with valid EC params

---------

Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
This commit is contained in:
markspanbroek 2024-03-12 10:57:13 +01:00 committed by GitHub
parent 8589e63d34
commit 293c676f22
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
18 changed files with 940 additions and 522 deletions

View File

@ -23,6 +23,7 @@ import ./codex/codex
import ./codex/logutils import ./codex/logutils
import ./codex/units import ./codex/units
import ./codex/utils/keyutils import ./codex/utils/keyutils
import ./codex/codextypes
export codex, conf, libp2p, chronos, logutils export codex, conf, libp2p, chronos, logutils
@ -54,9 +55,6 @@ when isMainModule:
config.setupLogging() config.setupLogging()
config.setupMetrics() config.setupMetrics()
case config.cmd:
of StartUpCommand.noCommand:
if config.nat == ValidIpAddress.init(IPv4_any()): if config.nat == ValidIpAddress.init(IPv4_any()):
error "`--nat` cannot be set to the any (`0.0.0.0`) address" error "`--nat` cannot be set to the any (`0.0.0.0`) address"
quit QuitFailure quit QuitFailure
@ -90,7 +88,11 @@ when isMainModule:
config.dataDir / config.netPrivKeyFile config.dataDir / config.netPrivKeyFile
privateKey = setupKey(keyPath).expect("Should setup private key!") privateKey = setupKey(keyPath).expect("Should setup private key!")
server = CodexServer.new(config, privateKey) server = try:
CodexServer.new(config, privateKey)
except Exception as exc:
error "Failed to start Codex", msg = exc.msg
quit QuitFailure
## Ctrl+C handling ## Ctrl+C handling
proc doShutdown() = proc doShutdown() =
@ -135,8 +137,12 @@ when isMainModule:
state = CodexStatus.Running state = CodexStatus.Running
while state == CodexStatus.Running: while state == CodexStatus.Running:
try:
# poll chronos # poll chronos
chronos.poll() chronos.poll()
except Exception as exc:
error "Unhandled exception in async proc, aborting", msg = exc.msg
quit QuitFailure
try: try:
# signal handlers guarantee that the shutdown Future will # signal handlers guarantee that the shutdown Future will
@ -147,6 +153,3 @@ when isMainModule:
quit QuitFailure quit QuitFailure
notice "Exited codex" notice "Exited codex"
of StartUpCommand.initNode:
discard

View File

@ -22,12 +22,14 @@ import pkg/stew/io2
import pkg/stew/shims/net as stewnet import pkg/stew/shims/net as stewnet
import pkg/datastore import pkg/datastore
import pkg/ethers except Rng import pkg/ethers except Rng
import pkg/stew/io2
import ./node import ./node
import ./conf import ./conf
import ./rng import ./rng
import ./rest/api import ./rest/api
import ./stores import ./stores
import ./slots
import ./blockexchange import ./blockexchange
import ./utils/fileutils import ./utils/fileutils
import ./erasure import ./erasure
@ -72,17 +74,9 @@ proc bootstrapInteractions(
config = s.config config = s.config
repo = s.repoStore repo = s.repoStore
if not config.persistence and not config.validator:
if config.ethAccount.isSome or config.ethPrivateKey.isSome:
warn "Ethereum account was set, but neither persistence nor validator is enabled"
return
if not config.ethAccount.isSome and not config.ethPrivateKey.isSome:
if config.persistence: if config.persistence:
if not config.ethAccount.isSome and not config.ethPrivateKey.isSome:
error "Persistence enabled, but no Ethereum account was set" error "Persistence enabled, but no Ethereum account was set"
if config.validator:
error "Validator enabled, but no Ethereum account was set"
quit QuitFailure quit QuitFailure
let provider = JsonRpcProvider.new(config.ethProvider) let provider = JsonRpcProvider.new(config.ethProvider)
@ -139,6 +133,7 @@ proc bootstrapInteractions(
let sales = Sales.new(market, clock, repo, proofFailures) let sales = Sales.new(market, clock, repo, proofFailures)
client = some ClientInteractions.new(clock, purchasing) client = some ClientInteractions.new(clock, purchasing)
host = some HostInteractions.new(clock, sales) host = some HostInteractions.new(clock, sales)
if config.validator: if config.validator:
let validation = Validation.new(clock, market, config.validatorMaxSlots) let validation = Validation.new(clock, market, config.validatorMaxSlots)
validator = some ValidatorInteractions.new(clock, validation) validator = some ValidatorInteractions.new(clock, validation)
@ -265,11 +260,41 @@ proc new*(
blockDiscovery = DiscoveryEngine.new(repoStore, peerStore, network, discovery, pendingBlocks) blockDiscovery = DiscoveryEngine.new(repoStore, peerStore, network, discovery, pendingBlocks)
engine = BlockExcEngine.new(repoStore, wallet, network, blockDiscovery, peerStore, pendingBlocks) engine = BlockExcEngine.new(repoStore, wallet, network, blockDiscovery, peerStore, pendingBlocks)
store = NetworkStore.new(engine, repoStore) store = NetworkStore.new(engine, repoStore)
prover = if config.prover:
if not fileAccessible($config.circomR1cs, {AccessFlags.Read}) and
endsWith($config.circomR1cs, ".r1cs"):
error "Circom R1CS file not accessible"
raise (ref Defect)(
msg: "r1cs file not readable, doesn't exist or wrong extension (.r1cs)")
if not fileAccessible($config.circomWasm, {AccessFlags.Read}) and
endsWith($config.circomWasm, ".wasm"):
error "Circom wasm file not accessible"
raise (ref Defect)(
msg: "wasm file not readable, doesn't exist or wrong extension (.wasm)")
let zkey = if not config.circomNoZkey:
if not fileAccessible($config.circomZkey, {AccessFlags.Read}) and
endsWith($config.circomZkey, ".zkey"):
error "Circom zkey file not accessible"
raise (ref Defect)(
msg: "zkey file not readable, doesn't exist or wrong extension (.zkey)")
$config.circomZkey
else: ""
some Prover.new(
store,
CircomCompat.init($config.circomR1cs, $config.circomWasm, zkey),
config.numProofSamples)
else:
none Prover
codexNode = CodexNodeRef.new( codexNode = CodexNodeRef.new(
switch = switch, switch = switch,
networkStore = store, networkStore = store,
engine = engine, engine = engine,
prover = prover,
discovery = discovery) discovery = discovery)
restServer = RestServerRef.new( restServer = RestServerRef.new(

View File

@ -7,9 +7,7 @@
## This file may not be copied, modified, or distributed except according to ## This file may not be copied, modified, or distributed except according to
## those terms. ## those terms.
import pkg/upraises {.push raises: [].}
push: {.upraises: [].}
import std/os import std/os
import std/terminal import std/terminal
@ -33,30 +31,47 @@ import pkg/ethers
import pkg/questionable import pkg/questionable
import pkg/questionable/results import pkg/questionable/results
import ./codextypes
import ./discovery import ./discovery
import ./logutils import ./logutils
import ./stores import ./stores
import ./units import ./units
import ./utils import ./utils
export units export units, net, codextypes, logutils
export net
export export
DefaultQuotaBytes, DefaultQuotaBytes,
DefaultBlockTtl, DefaultBlockTtl,
DefaultBlockMaintenanceInterval, DefaultBlockMaintenanceInterval,
DefaultNumberOfBlocksToMaintainPerInterval DefaultNumberOfBlocksToMaintainPerInterval
proc defaultDataDir*(): string =
let dataDir = when defined(windows):
"AppData" / "Roaming" / "Codex"
elif defined(macosx):
"Library" / "Application Support" / "Codex"
else:
".cache" / "codex"
getHomeDir() / dataDir
const const
codex_enable_api_debug_peers* {.booldefine.} = false codex_enable_api_debug_peers* {.booldefine.} = false
codex_enable_proof_failures* {.booldefine.} = false codex_enable_proof_failures* {.booldefine.} = false
codex_use_hardhat* {.booldefine.} = false codex_use_hardhat* {.booldefine.} = false
codex_enable_log_counter* {.booldefine.} = false codex_enable_log_counter* {.booldefine.} = false
DefaultDataDir* = defaultDataDir()
type type
StartUpCommand* {.pure.} = enum StartUpCmd* {.pure.} = enum
noCommand, noCmd
initNode persistence
PersistenceCmd* {.pure.} = enum
noCmd
prover
LogKind* {.pure.} = enum LogKind* {.pure.} = enum
Auto = "auto" Auto = "auto"
@ -106,17 +121,11 @@ type
dataDir* {. dataDir* {.
desc: "The directory where codex will store configuration and data" desc: "The directory where codex will store configuration and data"
defaultValue: defaultDataDir() defaultValue: DefaultDataDir
defaultValueDesc: "" defaultValueDesc: $DefaultDataDir
abbr: "d" abbr: "d"
name: "data-dir" }: OutDir name: "data-dir" }: OutDir
case cmd* {.
command
defaultValue: noCommand }: StartUpCommand
of noCommand:
listenAddrs* {. listenAddrs* {.
desc: "Multi Addresses to listen on" desc: "Multi Addresses to listen on"
defaultValue: @[ defaultValue: @[
@ -220,12 +229,17 @@ type
name: "cache-size" name: "cache-size"
abbr: "c" }: NBytes abbr: "c" }: NBytes
persistence* {. logFile* {.
desc: "Enables persistence mechanism, requires an Ethereum node" desc: "Logs to file"
defaultValue: false defaultValue: string.none
name: "persistence" name: "log-file"
.}: bool hidden
.}: Option[string]
case cmd* {.
defaultValue: noCmd
command }: StartUpCmd
of persistence:
ethProvider* {. ethProvider* {.
desc: "The URL of the JSON-RPC API of the Ethereum node" desc: "The URL of the JSON-RPC API of the Ethereum node"
defaultValue: "ws://localhost:8545" defaultValue: "ws://localhost:8545"
@ -235,21 +249,32 @@ type
ethAccount* {. ethAccount* {.
desc: "The Ethereum account that is used for storage contracts" desc: "The Ethereum account that is used for storage contracts"
defaultValue: EthAddress.none defaultValue: EthAddress.none
defaultValueDesc: ""
name: "eth-account" name: "eth-account"
.}: Option[EthAddress] .}: Option[EthAddress]
ethPrivateKey* {. ethPrivateKey* {.
desc: "File containing Ethereum private key for storage contracts" desc: "File containing Ethereum private key for storage contracts"
defaultValue: string.none defaultValue: string.none
defaultValueDesc: ""
name: "eth-private-key" name: "eth-private-key"
.}: Option[string] .}: Option[string]
marketplaceAddress* {. marketplaceAddress* {.
desc: "Address of deployed Marketplace contract" desc: "Address of deployed Marketplace contract"
defaultValue: EthAddress.none defaultValue: EthAddress.none
defaultValueDesc: ""
name: "marketplace-address" name: "marketplace-address"
.}: Option[EthAddress] .}: Option[EthAddress]
# TODO: should go behind a feature flag
simulateProofFailures* {.
desc: "Simulates proof failures once every N proofs. 0 = disabled."
defaultValue: 0
name: "simulate-proof-failures"
hidden
.}: int
validator* {. validator* {.
desc: "Enables validator, requires an Ethereum node" desc: "Enables validator, requires an Ethereum node"
defaultValue: false defaultValue: false
@ -262,28 +287,85 @@ type
name: "validator-max-slots" name: "validator-max-slots"
.}: int .}: int
simulateProofFailures* {. case persistenceCmd* {.
desc: "Simulates proof failures once every N proofs. 0 = disabled." defaultValue: noCmd
defaultValue: 0 command }: PersistenceCmd
name: "simulate-proof-failures"
hidden
.}: int
logFile* {. of PersistenceCmd.prover:
desc: "Logs to file" circomR1cs* {.
defaultValue: string.none desc: "The r1cs file for the storage circuit"
name: "log-file" defaultValue: $DefaultDataDir / "circuits" / "proof_main.r1cs"
hidden defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.r1cs"
.}: Option[string] name: "circom-r1cs"
.}: InputFile
of initNode: circomWasm* {.
desc: "The wasm file for the storage circuit"
defaultValue: $DefaultDataDir / "circuits" / "proof_main.wasm"
defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.wasm"
name: "circom-wasm"
.}: InputFile
circomZkey* {.
desc: "The zkey file for the storage circuit"
defaultValue: $DefaultDataDir / "circuits" / "proof_main.zkey"
defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.zkey"
name: "circom-zkey"
.}: InputFile
# TODO: should probably be hidden and behind a feature flag
circomNoZkey* {.
desc: "Ignore the zkey file - use only for testing!"
defaultValue: false
name: "circom-no-zkey"
.}: bool
numProofSamples* {.
desc: "Number of samples to prove"
defaultValue: DefaultSamplesNum
defaultValueDesc: $DefaultSamplesNum
name: "proof-samples" }: int
maxSlotDepth* {.
desc: "The maximum depth of the slot tree"
defaultValue: DefaultMaxSlotDepth
defaultValueDesc: $DefaultMaxSlotDepth
name: "max-slot-depth" }: int
maxDatasetDepth* {.
desc: "The maximum depth of the dataset tree"
defaultValue: DefaultMaxDatasetDepth
defaultValueDesc: $DefaultMaxDatasetDepth
name: "max-dataset-depth" }: int
maxBlockDepth* {.
desc: "The maximum depth of the network block merkle tree"
defaultValue: DefaultBlockDepth
defaultValueDesc: $DefaultBlockDepth
name: "max-block-depth" }: int
maxCellElms* {.
desc: "The maximum number of elements in a cell"
defaultValue: DefaultCellElms
defaultValueDesc: $DefaultCellElms
name: "max-cell-elements" }: int
of PersistenceCmd.noCmd:
discard discard
of StartUpCmd.noCmd:
discard # end of persistence
EthAddress* = ethers.Address EthAddress* = ethers.Address
logutils.formatIt(LogFormat.textLines, EthAddress): it.short0xHexLog logutils.formatIt(LogFormat.textLines, EthAddress): it.short0xHexLog
logutils.formatIt(LogFormat.json, EthAddress): %it logutils.formatIt(LogFormat.json, EthAddress): %it
func persistence*(self: CodexConf): bool =
self.cmd == StartUpCmd.persistence
func prover*(self: CodexConf): bool =
self.persistence and self.persistenceCmd == PersistenceCmd.prover
proc getCodexVersion(): string = proc getCodexVersion(): string =
let tag = strip(staticExec("git tag")) let tag = strip(staticExec("git tag"))
if tag.isEmptyOrWhitespace: if tag.isEmptyOrWhitespace:
@ -308,16 +390,6 @@ const
"Codex revision: " & codexRevision & "\p" & "Codex revision: " & codexRevision & "\p" &
nimBanner nimBanner
proc defaultDataDir*(): string =
let dataDir = when defined(windows):
"AppData" / "Roaming" / "Codex"
elif defined(macosx):
"Library" / "Application Support" / "Codex"
else:
".cache" / "codex"
getHomeDir() / dataDir
proc parseCmdArg*(T: typedesc[MultiAddress], proc parseCmdArg*(T: typedesc[MultiAddress],
input: string): MultiAddress input: string): MultiAddress
{.upraises: [ValueError, LPError].} = {.upraises: [ValueError, LPError].} =
@ -356,7 +428,7 @@ proc parseCmdArg*(T: type Duration, val: string): T =
var dur: Duration var dur: Duration
let count = parseDuration(val, dur) let count = parseDuration(val, dur)
if count == 0: if count == 0:
warn "Invalid duration parse", dur=dur warn "Cannot parse duration", dur = dur
quit QuitFailure quit QuitFailure
dur dur

View File

@ -116,6 +116,9 @@ switch("define", "chronicles_sinks=textlines[dynamic],json[dynamic],textlines[dy
switch("define", "use_asm_syntax_intel=false") switch("define", "use_asm_syntax_intel=false")
switch("define", "ctt_asm=false") switch("define", "ctt_asm=false")
# Allow the use of old-style case objects for nim config compatibility
switch("define", "nimOldCaseObjects")
# begin Nimble config (version 1) # begin Nimble config (version 1)
when system.fileExists("nimble.paths"): when system.fileExists("nimble.paths"):
include "nimble.paths" include "nimble.paths"

View File

@ -20,4 +20,3 @@ proc address*(_: type Marketplace, dummyVerifier = false): Address =
hardhatMarketWithDummyVerifier hardhatMarketWithDummyVerifier
else: else:
hardhatMarketAddress hardhatMarketAddress

View File

@ -13,10 +13,19 @@ method getChainId*(provider: MockProvider): Future[UInt256] {.async.} =
return provider.chainId return provider.chainId
proc configFactory(): CodexConf = proc configFactory(): CodexConf =
CodexConf(cmd: noCommand, nat: ValidIpAddress.init("127.0.0.1"), discoveryIp: ValidIpAddress.init(IPv4_any()), metricsAddress: ValidIpAddress.init("127.0.0.1")) CodexConf(
cmd: StartUpCmd.persistence,
nat: ValidIpAddress.init("127.0.0.1"),
discoveryIp: ValidIpAddress.init(IPv4_any()),
metricsAddress: ValidIpAddress.init("127.0.0.1"))
proc configFactory(marketplace: Option[EthAddress]): CodexConf = proc configFactory(marketplace: Option[EthAddress]): CodexConf =
CodexConf(cmd: noCommand, nat: ValidIpAddress.init("127.0.0.1"), discoveryIp: ValidIpAddress.init(IPv4_any()), metricsAddress: ValidIpAddress.init("127.0.0.1"), marketplaceAddress: marketplace) CodexConf(
cmd: StartUpCmd.persistence,
nat: ValidIpAddress.init("127.0.0.1"),
discoveryIp: ValidIpAddress.init(IPv4_any()),
metricsAddress: ValidIpAddress.init("127.0.0.1"),
marketplaceAddress: marketplace)
asyncchecksuite "Deployment": asyncchecksuite "Deployment":
let provider = MockProvider() let provider = MockProvider()

View File

@ -1,10 +1,7 @@
import pkg/questionable
type type
CliOption* = object of RootObj CliOption* = object
nodeIdx*: ?int key*: string # option key, including `--`
key*: string value*: string # option value
value*: string
proc `$`*(option: CliOption): string = proc `$`*(option: CliOption): string =
var res = option.key var res = option.key

View File

@ -1,61 +1,295 @@
import std/options import std/options
import std/os
import std/sequtils import std/sequtils
import std/strutils
import std/sugar
import std/tables
from pkg/chronicles import LogLevel
import pkg/codex/conf
import pkg/codex/units import pkg/codex/units
import pkg/confutils
import pkg/confutils/defs
import libp2p except setup
import pkg/questionable
import ./clioption import ./clioption
import ./nodeconfig
export nodeconfig
export clioption export clioption
export confutils
type type
CodexConfig* = ref object of NodeConfig CodexConfigs* = object
numNodes*: int configs*: seq[CodexConfig]
cliOptions*: seq[CliOption] CodexConfig* = object
logTopics*: seq[string] cliOptions: Table[StartUpCmd, Table[string, CliOption]]
cliPersistenceOptions: Table[PersistenceCmd, Table[string, CliOption]]
debugEnabled*: bool
CodexConfigError* = object of CatchableError
proc nodes*(config: CodexConfig, numNodes: int): CodexConfig = proc cliArgs*(config: CodexConfig): seq[string] {.gcsafe, raises: [CodexConfigError].}
if numNodes < 0:
raise newException(ValueError, "numNodes must be >= 0")
var startConfig = config proc raiseCodexConfigError(msg: string) {.raises: [CodexConfigError].} =
startConfig.numNodes = numNodes raise newException(CodexConfigError, msg)
return startConfig
proc simulateProofFailuresFor*( template convertError(body) =
try:
body
except CatchableError as e:
raiseCodexConfigError e.msg
proc init*(_: type CodexConfigs, nodes = 1): CodexConfigs {.raises: [].} =
CodexConfigs(configs: newSeq[CodexConfig](nodes))
func nodes*(self: CodexConfigs): int =
self.configs.len
proc checkBounds(self: CodexConfigs, idx: int) {.raises: [CodexConfigError].} =
if idx notin 0..<self.configs.len:
raiseCodexConfigError "index must be in bounds of the number of nodes"
proc buildConfig(
config: CodexConfig, config: CodexConfig,
providerIdx: int, msg: string): CodexConf {.raises: [CodexConfigError].} =
failEveryNProofs: int
): CodexConfig =
if providerIdx > config.numNodes - 1: proc postFix(msg: string): string =
raise newException(ValueError, "provider index out of bounds") if msg.len > 0:
": " & msg
else: ""
var startConfig = config try:
startConfig.cliOptions.add( return CodexConf.load(cmdLine = config.cliArgs, quitOnFailure = false)
CliOption( except ConfigurationError as e:
nodeIdx: some providerIdx, raiseCodexConfigError msg & e.msg.postFix
key: "--simulate-proof-failures", except Exception as e:
value: $failEveryNProofs ## TODO: remove once proper exception handling added to nim-confutils
) raiseCodexConfigError msg & e.msg.postFix
)
proc addCliOption*(
config: var CodexConfig,
group = PersistenceCmd.noCmd,
cliOption: CliOption) {.raises: [CodexConfigError].} =
var options = config.cliPersistenceOptions.getOrDefault(group)
options[cliOption.key] = cliOption # overwrite if already exists
config.cliPersistenceOptions[group] = options
discard config.buildConfig("Invalid cli arg " & $cliOption)
proc addCliOption*(
config: var CodexConfig,
group = PersistenceCmd.noCmd,
key: string, value = "") {.raises: [CodexConfigError].} =
config.addCliOption(group, CliOption(key: key, value: value))
proc addCliOption*(
config: var CodexConfig,
group = StartUpCmd.noCmd,
cliOption: CliOption) {.raises: [CodexConfigError].} =
var options = config.cliOptions.getOrDefault(group)
options[cliOption.key] = cliOption # overwrite if already exists
config.cliOptions[group] = options
discard config.buildConfig("Invalid cli arg " & $cliOption)
proc addCliOption*(
config: var CodexConfig,
group = StartUpCmd.noCmd,
key: string, value = "") {.raises: [CodexConfigError].} =
config.addCliOption(group, CliOption(key: key, value: value))
proc addCliOption*(
config: var CodexConfig,
cliOption: CliOption) {.raises: [CodexConfigError].} =
config.addCliOption(StartUpCmd.noCmd, cliOption)
proc addCliOption*(
config: var CodexConfig,
key: string, value = "") {.raises: [CodexConfigError].} =
config.addCliOption(StartUpCmd.noCmd, CliOption(key: key, value: value))
proc cliArgs*(
config: CodexConfig): seq[string] {.gcsafe, raises: [CodexConfigError].} =
## converts CodexConfig cli options and command groups in a sequence of args
## and filters out cli options by node index if provided in the CliOption
var args: seq[string] = @[]
convertError:
for cmd in StartUpCmd:
if config.cliOptions.hasKey(cmd):
if cmd != StartUpCmd.noCmd:
args.add $cmd
var opts = config.cliOptions[cmd].values.toSeq
args = args.concat( opts.map(o => $o) )
for cmd in PersistenceCmd:
if config.cliPersistenceOptions.hasKey(cmd):
if cmd != PersistenceCmd.noCmd:
args.add $cmd
var opts = config.cliPersistenceOptions[cmd].values.toSeq
args = args.concat( opts.map(o => $o) )
return args
proc logFile*(config: CodexConfig): ?string {.raises: [CodexConfigError].} =
let built = config.buildConfig("Invalid codex config cli params")
built.logFile
proc logLevel*(config: CodexConfig): LogLevel {.raises: [CodexConfigError].} =
convertError:
let built = config.buildConfig("Invalid codex config cli params")
return parseEnum[LogLevel](built.logLevel.toUpperAscii)
proc debug*(
self: CodexConfigs,
idx: int,
enabled = true): CodexConfigs {.raises: [CodexConfigError].} =
## output log in stdout for a specific node in the group
self.checkBounds idx
var startConfig = self
startConfig.configs[idx].debugEnabled = enabled
return startConfig return startConfig
proc debug*(self: CodexConfigs, enabled = true): CodexConfigs {.raises: [].} =
## output log in stdout for all nodes in group
var startConfig = self
for config in startConfig.configs.mitems:
config.debugEnabled = enabled
return startConfig
proc withLogFile*(
self: CodexConfigs,
idx: int): CodexConfigs {.raises: [CodexConfigError].} =
self.checkBounds idx
var startConfig = self
startConfig.configs[idx].addCliOption("--log-file", "<updated_in_test>")
return startConfig
proc withLogFile*(
self: CodexConfigs): CodexConfigs {.raises: [CodexConfigError].} =
## typically called from test, sets config such that a log file should be
## created
var startConfig = self
for config in startConfig.configs.mitems:
config.addCliOption("--log-file", "<updated_in_test>")
return startConfig
proc withLogFile*(
self: var CodexConfig,
logFile: string) {.raises: [CodexConfigError].} = #: CodexConfigs =
## typically called internally from the test suite, sets a log file path to
## be created during the test run, for a specified node in the group
# var config = self
self.addCliOption("--log-file", logFile)
# return startConfig
proc withLogLevel*(
self: CodexConfig,
level: LogLevel | string): CodexConfig {.raises: [CodexConfigError].} =
var config = self
config.addCliOption("--log-level", $level)
return config
proc withLogLevel*(
self: CodexConfigs,
idx: int,
level: LogLevel | string): CodexConfigs {.raises: [CodexConfigError].} =
self.checkBounds idx
var startConfig = self
startConfig.configs[idx].addCliOption("--log-level", $level)
return startConfig
proc withLogLevel*(
self: CodexConfigs,
level: LogLevel | string): CodexConfigs {.raises: [CodexConfigError].} =
var startConfig = self
for config in startConfig.configs.mitems:
config.addCliOption("--log-level", $level)
return startConfig
proc withSimulateProofFailures*(
self: CodexConfigs,
idx: int,
failEveryNProofs: int
): CodexConfigs {.raises: [CodexConfigError].} =
self.checkBounds idx
var startConfig = self
startConfig.configs[idx].addCliOption(
StartUpCmd.persistence, "--simulate-proof-failures", $failEveryNProofs)
return startConfig
proc withSimulateProofFailures*(
self: CodexConfigs,
failEveryNProofs: int): CodexConfigs {.raises: [CodexConfigError].} =
var startConfig = self
for config in startConfig.configs.mitems:
config.addCliOption(
StartUpCmd.persistence, "--simulate-proof-failures", $failEveryNProofs)
return startConfig
proc logLevelWithTopics(
config: CodexConfig,
topics: varargs[string]): string {.raises: [CodexConfigError].} =
convertError:
var logLevel = LogLevel.INFO
let built = config.buildConfig("Invalid codex config cli params")
logLevel = parseEnum[LogLevel](built.logLevel.toUpperAscii)
let level = $logLevel & ";TRACE: " & topics.join(",")
return level
proc withLogTopics*( proc withLogTopics*(
config: CodexConfig, self: CodexConfigs,
topics: varargs[string] idx: int,
): CodexConfig = topics: varargs[string]): CodexConfigs {.raises: [CodexConfigError].} =
var startConfig = config self.checkBounds idx
startConfig.logTopics = startConfig.logTopics.concat(@topics)
convertError:
let config = self.configs[idx]
let level = config.logLevelWithTopics(topics)
var startConfig = self
return startConfig.withLogLevel(idx, level)
proc withLogTopics*(
self: CodexConfigs,
topics: varargs[string]
): CodexConfigs {.raises: [CodexConfigError].} =
var startConfig = self
for config in startConfig.configs.mitems:
let level = config.logLevelWithTopics(topics)
config = config.withLogLevel(level)
return startConfig return startConfig
proc withStorageQuota*( proc withStorageQuota*(
config: CodexConfig, self: CodexConfigs,
quota: NBytes idx: int,
): CodexConfig = quota: NBytes): CodexConfigs {.raises: [CodexConfigError].} =
var startConfig = config self.checkBounds idx
startConfig.cliOptions.add(
CliOption(key: "--storage-quota", value: $quota) var startConfig = self
) startConfig.configs[idx].addCliOption("--storage-quota", $quota)
return startConfig
proc withStorageQuota*(
self: CodexConfigs,
quota: NBytes): CodexConfigs {.raises: [CodexConfigError].} =
var startConfig = self
for config in startConfig.configs.mitems:
config.addCliOption("--storage-quota", $quota)
return startConfig return startConfig

View File

@ -40,17 +40,17 @@ method onOutputLineCaptured(node: CodexProcess, line: string) =
discard discard
proc dataDir(node: CodexProcess): string = proc dataDir(node: CodexProcess): string =
let config = CodexConf.load(cmdLine = node.arguments) let config = CodexConf.load(cmdLine = node.arguments, quitOnFailure = false)
return config.dataDir.string return config.dataDir.string
proc ethAccount*(node: CodexProcess): Address = proc ethAccount*(node: CodexProcess): Address =
let config = CodexConf.load(cmdLine = node.arguments) let config = CodexConf.load(cmdLine = node.arguments, quitOnFailure = false)
without ethAccount =? config.ethAccount: without ethAccount =? config.ethAccount:
raiseAssert "eth account not set" raiseAssert "eth account not set"
return Address(ethAccount) return Address(ethAccount)
proc apiUrl*(node: CodexProcess): string = proc apiUrl*(node: CodexProcess): string =
let config = CodexConf.load(cmdLine = node.arguments) let config = CodexConf.load(cmdLine = node.arguments, quitOnFailure = false)
return "http://" & config.apiBindAddress & ":" & $config.apiPort & "/api/codex/v1" return "http://" & config.apiBindAddress & ":" & $config.apiPort & "/api/codex/v1"
proc client*(node: CodexProcess): CodexClient = proc client*(node: CodexProcess): CodexClient =

View File

@ -1,6 +1,15 @@
import ./nodeconfig
export nodeconfig
type type
HardhatConfig* = ref object of NodeConfig HardhatConfig* = object
logFile*: bool
debugEnabled*: bool
proc debug*(self: HardhatConfig, enabled = true): HardhatConfig =
## output log in stdout
var config = self
config.debugEnabled = enabled
return config
proc withLogFile*(self: HardhatConfig, logToFile: bool = true): HardhatConfig =
var config = self
config.logFile = logToFile
return config

View File

@ -3,13 +3,16 @@ import std/sequtils
import std/strutils import std/strutils
import std/sugar import std/sugar
import std/times import std/times
import pkg/codex/conf
import pkg/codex/logutils import pkg/codex/logutils
import pkg/chronos/transports/stream import pkg/chronos/transports/stream
import pkg/ethers import pkg/ethers
import ./hardhatprocess import pkg/questionable
import ./codexconfig
import ./codexprocess import ./codexprocess
import ./hardhatconfig import ./hardhatconfig
import ./codexconfig import ./hardhatprocess
import ./nodeconfigs
import ../asynctest import ../asynctest
import ../checktest import ../checktest
@ -24,16 +27,15 @@ type
RunningNode* = ref object RunningNode* = ref object
role*: Role role*: Role
node*: NodeProcess node*: NodeProcess
NodeConfigs* = object
clients*: CodexConfig
providers*: CodexConfig
validators*: CodexConfig
hardhat*: HardhatConfig
Role* {.pure.} = enum Role* {.pure.} = enum
Client, Client,
Provider, Provider,
Validator, Validator,
Hardhat Hardhat
MultiNodeSuiteError = object of CatchableError
proc raiseMultiNodeSuiteError(msg: string) =
raise newException(MultiNodeSuiteError, msg)
proc nextFreePort(startPort: int): Future[int] {.async.} = proc nextFreePort(startPort: int): Future[int] {.async.} =
@ -79,6 +81,7 @@ template multinodesuite*(name: string, body: untyped) =
var sanitized = pathSegment var sanitized = pathSegment
for invalid in invalidFilenameChars.items: for invalid in invalidFilenameChars.items:
sanitized = sanitized.replace(invalid, '_') sanitized = sanitized.replace(invalid, '_')
.replace(' ', '_')
sanitized sanitized
proc getLogFile(role: Role, index: ?int): string = proc getLogFile(role: Role, index: ?int): string =
@ -87,7 +90,7 @@ template multinodesuite*(name: string, body: untyped) =
var logDir = currentSourcePath.parentDir() / var logDir = currentSourcePath.parentDir() /
"logs" / "logs" /
sanitize($starttime & " " & name) / sanitize($starttime & "__" & name) /
sanitize($currentTestName) sanitize($currentTestName)
createDir(logDir) createDir(logDir)
@ -110,53 +113,56 @@ template multinodesuite*(name: string, body: untyped) =
args.add "--log-file=" & updatedLogFile args.add "--log-file=" & updatedLogFile
let node = await HardhatProcess.startNode(args, config.debugEnabled, "hardhat") let node = await HardhatProcess.startNode(args, config.debugEnabled, "hardhat")
try:
await node.waitUntilStarted() await node.waitUntilStarted()
except NodeProcessError as e:
raiseMultiNodeSuiteError "hardhat node not started: " & e.msg
trace "hardhat node started" trace "hardhat node started"
return node return node
proc newCodexProcess(roleIdx: int, proc newCodexProcess(roleIdx: int,
config: CodexConfig, conf: CodexConfig,
role: Role role: Role
): Future[NodeProcess] {.async.} = ): Future[NodeProcess] {.async.} =
let nodeIdx = running.len let nodeIdx = running.len
var conf = config var config = conf
if nodeIdx > accounts.len - 1: if nodeIdx > accounts.len - 1:
raiseAssert("Cannot start node at nodeIdx " & $nodeIdx & raiseMultiNodeSuiteError "Cannot start node at nodeIdx " & $nodeIdx &
", not enough eth accounts.") ", not enough eth accounts."
let datadir = getTempDir() / "Codex" / let datadir = getTempDir() / "Codex" /
sanitize($starttime) / sanitize($starttime) /
sanitize($role & "_" & $roleIdx) sanitize($role & "_" & $roleIdx)
if conf.logFile: try:
if config.logFile.isSome:
let updatedLogFile = getLogFile(role, some roleIdx) let updatedLogFile = getLogFile(role, some roleIdx)
conf.cliOptions.add CliOption(key: "--log-file", value: updatedLogFile) config.withLogFile(updatedLogFile)
let logLevel = conf.logLevel |? LogLevel.INFO config.addCliOption("--api-port", $ await nextFreePort(8080 + nodeIdx))
if conf.logTopics.len > 0: config.addCliOption("--data-dir", datadir)
conf.cliOptions.add CliOption( config.addCliOption("--nat", "127.0.0.1")
key: "--log-level", config.addCliOption("--listen-addrs", "/ip4/127.0.0.1/tcp/0")
value: $logLevel & ";TRACE: " & conf.logTopics.join(",") config.addCliOption("--disc-ip", "127.0.0.1")
config.addCliOption("--disc-port", $ await nextFreePort(8090 + nodeIdx))
except CodexConfigError as e:
raiseMultiNodeSuiteError "invalid cli option, error: " & e.msg
let node = await CodexProcess.startNode(
config.cliArgs,
config.debugEnabled,
$role & $roleIdx
) )
else:
conf.cliOptions.add CliOption(key: "--log-level", value: $logLevel)
var args = conf.cliOptions.map(o => $o) try:
.concat(@[
"--api-port=" & $ await nextFreePort(8080 + nodeIdx),
"--data-dir=" & datadir,
"--nat=127.0.0.1",
"--listen-addrs=/ip4/127.0.0.1/tcp/0",
"--disc-ip=127.0.0.1",
"--disc-port=" & $ await nextFreePort(8090 + nodeIdx),
"--eth-account=" & $accounts[nodeIdx]])
let node = await CodexProcess.startNode(args, conf.debugEnabled, $role & $roleIdx)
await node.waitUntilStarted() await node.waitUntilStarted()
trace "node started", nodeName = $role & $roleIdx trace "node started", nodeName = $role & $roleIdx
except NodeProcessError as e:
raiseMultiNodeSuiteError "node not started, error: " & e.msg
return node return node
@ -184,85 +190,36 @@ template multinodesuite*(name: string, body: untyped) =
if r.role == Role.Validator: if r.role == Role.Validator:
CodexProcess(r.node) CodexProcess(r.node)
proc startHardhatNode(): Future[NodeProcess] {.async.} = proc startHardhatNode(config: HardhatConfig): Future[NodeProcess] {.async.} =
var config = nodeConfigs.hardhat
return await newHardhatProcess(config, Role.Hardhat) return await newHardhatProcess(config, Role.Hardhat)
proc startClientNode(): Future[NodeProcess] {.async.} = proc startClientNode(conf: CodexConfig): Future[NodeProcess] {.async.} =
let clientIdx = clients().len let clientIdx = clients().len
var config = nodeConfigs.clients var config = conf
config.cliOptions.add CliOption(key: "--persistence") config.addCliOption(StartUpCmd.persistence, "--eth-account", $accounts[running.len])
return await newCodexProcess(clientIdx, config, Role.Client) return await newCodexProcess(clientIdx, config, Role.Client)
proc startProviderNode(): Future[NodeProcess] {.async.} = proc startProviderNode(conf: CodexConfig): Future[NodeProcess] {.async.} =
let providerIdx = providers().len let providerIdx = providers().len
var config = nodeConfigs.providers var config = conf
config.cliOptions.add CliOption(key: "--bootstrap-node", value: bootstrap) config.addCliOption("--bootstrap-node", bootstrap)
config.cliOptions.add CliOption(key: "--persistence") config.addCliOption(StartUpCmd.persistence, "--eth-account", $accounts[running.len])
config.addCliOption(PersistenceCmd.prover, "--circom-r1cs", "tests/circuits/fixtures/proof_main.r1cs")
# filter out provider options by provided index config.addCliOption(PersistenceCmd.prover, "--circom-wasm", "tests/circuits/fixtures/proof_main.wasm")
config.cliOptions = config.cliOptions.filter( config.addCliOption(PersistenceCmd.prover, "--circom-zkey", "tests/circuits/fixtures/proof_main.zkey")
o => (let idx = o.nodeIdx |? providerIdx; idx == providerIdx)
)
return await newCodexProcess(providerIdx, config, Role.Provider) return await newCodexProcess(providerIdx, config, Role.Provider)
proc startValidatorNode(): Future[NodeProcess] {.async.} = proc startValidatorNode(conf: CodexConfig): Future[NodeProcess] {.async.} =
let validatorIdx = validators().len let validatorIdx = validators().len
var config = nodeConfigs.validators var config = conf
config.cliOptions.add CliOption(key: "--bootstrap-node", value: bootstrap) config.addCliOption("--bootstrap-node", bootstrap)
config.cliOptions.add CliOption(key: "--validator") config.addCliOption(StartUpCmd.persistence, "--eth-account", $accounts[running.len])
config.addCliOption(StartUpCmd.persistence, "--validator")
return await newCodexProcess(validatorIdx, config, Role.Validator) return await newCodexProcess(validatorIdx, config, Role.Validator)
setup: proc teardownImpl() {.async.} =
if not nodeConfigs.hardhat.isNil:
let node = await startHardhatNode()
running.add RunningNode(role: Role.Hardhat, node: node)
try:
# Workaround for https://github.com/NomicFoundation/hardhat/issues/2053
# Do not use websockets, but use http and polling to stop subscriptions
# from being removed after 5 minutes
ethProvider = JsonRpcProvider.new("http://localhost:8545")
# if hardhat was NOT started by the test, take a snapshot so it can be
# reverted in the test teardown
if nodeConfigs.hardhat.isNil:
snapshot = await send(ethProvider, "evm_snapshot")
# ensure that we have a recent block with a fresh timestamp
discard await send(ethProvider, "evm_mine")
accounts = await ethProvider.listAccounts()
except CatchableError as e:
fatal "failed to connect to hardhat", error = e.msg
raiseAssert "Hardhat not running. Run hardhat manually before executing tests, or include a HardhatConfig in the test setup."
if not nodeConfigs.clients.isNil:
for i in 0..<nodeConfigs.clients.numNodes:
let node = await startClientNode()
running.add RunningNode(
role: Role.Client,
node: node
)
if i == 0:
bootstrap = CodexProcess(node).client.info()["spr"].getStr()
if not nodeConfigs.providers.isNil:
for i in 0..<nodeConfigs.providers.numNodes:
let node = await startProviderNode()
running.add RunningNode(
role: Role.Provider,
node: node
)
if not nodeConfigs.validators.isNil:
for i in 0..<nodeConfigs.validators.numNodes:
let node = await startValidatorNode()
running.add RunningNode(
role: Role.Validator,
node: node
)
teardown:
for nodes in @[validators(), clients(), providers()]: for nodes in @[validators(), clients(), providers()]:
for node in nodes: for node in nodes:
await node.stop() # also stops rest client await node.stop() # also stops rest client
@ -278,4 +235,77 @@ template multinodesuite*(name: string, body: untyped) =
running = @[] running = @[]
template failAndTeardownOnError(message: string, tryBody: untyped) =
try:
tryBody
except CatchableError as er:
fatal message, error=er.msg
echo "[FATAL] ", message, ": ", er.msg
await teardownImpl()
when declared(teardownAllIMPL):
teardownAllIMPL()
fail()
quit(1)
setup:
if var conf =? nodeConfigs.hardhat:
try:
let node = await startHardhatNode(conf)
running.add RunningNode(role: Role.Hardhat, node: node)
except CatchableError as e:
echo "failed to start hardhat node"
fail()
quit(1)
try:
# Workaround for https://github.com/NomicFoundation/hardhat/issues/2053
# Do not use websockets, but use http and polling to stop subscriptions
# from being removed after 5 minutes
ethProvider = JsonRpcProvider.new("http://localhost:8545")
# if hardhat was NOT started by the test, take a snapshot so it can be
# reverted in the test teardown
if nodeConfigs.hardhat.isNone:
snapshot = await send(ethProvider, "evm_snapshot")
# ensure that we have a recent block with a fresh timestamp
discard await send(ethProvider, "evm_mine")
accounts = await ethProvider.listAccounts()
except CatchableError as e:
echo "Hardhat not running. Run hardhat manually " &
"before executing tests, or include a " &
"HardhatConfig in the test setup."
fail()
quit(1)
if var clients =? nodeConfigs.clients:
failAndTeardownOnError "failed to start client nodes":
for config in clients.configs:
let node = await startClientNode(config)
running.add RunningNode(
role: Role.Client,
node: node
)
if clients().len == 1:
bootstrap = CodexProcess(node).client.info()["spr"].getStr()
if var providers =? nodeConfigs.providers:
failAndTeardownOnError "failed to start provider nodes":
for config in providers.configs.mitems:
let node = await startProviderNode(config)
running.add RunningNode(
role: Role.Provider,
node: node
)
if var validators =? nodeConfigs.validators:
failAndTeardownOnError "failed to start validator nodes":
for config in validators.configs.mitems:
let node = await startValidatorNode(config)
running.add RunningNode(
role: Role.Validator,
node: node
)
teardown:
await teardownImpl()
body body

View File

@ -0,0 +1,11 @@
import pkg/questionable
import ./codexconfig
import ./hardhatconfig
type
NodeConfigs* = object
clients*: ?CodexConfigs
providers*: ?CodexConfigs
validators*: ?CodexConfigs
hardhat*: ?HardhatConfig

View File

@ -23,6 +23,7 @@ type
debug: bool debug: bool
trackedFutures*: TrackedFutures trackedFutures*: TrackedFutures
name*: string name*: string
NodeProcessError* = object of CatchableError
method workingDir(node: NodeProcess): string {.base.} = method workingDir(node: NodeProcess): string {.base.} =
raiseAssert "not implemented" raiseAssert "not implemented"
@ -54,6 +55,8 @@ method start*(node: NodeProcess) {.base, async.} =
processOptions = poptions processOptions = poptions
try: try:
if node.debug:
echo "starting codex node with args: ", node.arguments.join(" ")
node.process = await startProcess( node.process = await startProcess(
node.executable, node.executable,
node.workingDir, node.workingDir,
@ -149,12 +152,15 @@ proc waitUntilStarted*(node: NodeProcess) {.async.} =
let started = newFuture[void]() let started = newFuture[void]()
try: try:
discard node.captureOutput(node.startedOutput, started).track(node) discard node.captureOutput(node.startedOutput, started).track(node)
await started.wait(5.seconds) await started.wait(35.seconds) # allow enough time for proof generation
except AsyncTimeoutError as e: except AsyncTimeoutError:
# attempt graceful shutdown in case node was partially started, prevent # attempt graceful shutdown in case node was partially started, prevent
# zombies # zombies
await node.stop() await node.stop()
raiseAssert "node did not output '" & node.startedOutput & "'" # raise error here so that all nodes (not just this one) can be
# shutdown gracefully
raise newException(NodeProcessError, "node did not output '" &
node.startedOutput & "'")
proc restart*(node: NodeProcess) {.async.} = proc restart*(node: NodeProcess) {.async.} =
await node.stop() await node.stop()

View File

@ -58,11 +58,11 @@ proc startNode*(args: openArray[string], debug: string | bool = false): NodeProc
node node
proc dataDir(node: NodeProcess): string = proc dataDir(node: NodeProcess): string =
let config = CodexConf.load(cmdLine = node.arguments) let config = CodexConf.load(cmdLine = node.arguments, quitOnFailure = false)
config.dataDir.string config.dataDir.string
proc apiUrl(node: NodeProcess): string = proc apiUrl(node: NodeProcess): string =
let config = CodexConf.load(cmdLine = node.arguments) let config = CodexConf.load(cmdLine = node.arguments, quitOnFailure = false)
"http://" & config.apiBindAddress & ":" & $config.apiPort & "/api/codex/v1" "http://" & config.apiBindAddress & ":" & $config.apiPort & "/api/codex/v1"
proc client*(node: NodeProcess): CodexClient = proc client*(node: NodeProcess): CodexClient =

View File

@ -1,40 +1,50 @@
import std/unittest import std/unittest
import std/tempfiles import std/tempfiles
import codex/conf
import codex/utils/fileutils import codex/utils/fileutils
import ./nodes import ./nodes
suite "Command line interface": suite "Command line interface":
let account = "4242424242424242424242424242424242424242"
let key = "4242424242424242424242424242424242424242424242424242424242424242" let key = "4242424242424242424242424242424242424242424242424242424242424242"
test "complains when persistence is enabled without ethereum account": test "complains when persistence is enabled without ethereum account":
let node = startNode(@["--persistence"]) let node = startNode(@[
"persistence"
])
node.waitUntilOutput("Persistence enabled, but no Ethereum account was set") node.waitUntilOutput("Persistence enabled, but no Ethereum account was set")
node.stop() node.stop()
test "complains when validator is enabled without ethereum account":
let node = startNode(@["--validator"])
node.waitUntilOutput("Validator enabled, but no Ethereum account was set")
node.stop()
test "complains when ethereum account is set when not needed":
let node = startNode(@["--eth-account=" & account])
node.waitUntilOutput("Ethereum account was set, but neither persistence nor validator is enabled")
node.stop()
test "complains when ethereum private key is set when not needed":
let keyFile = genTempPath("", "")
discard secureWriteFile(keyFile, key)
let node = startNode(@["--eth-private-key=" & keyFile])
node.waitUntilOutput("Ethereum account was set, but neither persistence nor validator is enabled")
node.stop()
discard removeFile(keyFile)
test "complains when ethereum private key file has wrong permissions": test "complains when ethereum private key file has wrong permissions":
let unsafeKeyFile = genTempPath("", "") let unsafeKeyFile = genTempPath("", "")
discard unsafeKeyFile.writeFile(key, 0o666) discard unsafeKeyFile.writeFile(key, 0o666)
let node = startNode(@["--persistence", "--eth-private-key=" & unsafeKeyFile]) let node = startNode(@[
"persistence",
"--eth-private-key=" & unsafeKeyFile])
node.waitUntilOutput("Ethereum private key file does not have safe file permissions") node.waitUntilOutput("Ethereum private key file does not have safe file permissions")
node.stop() node.stop()
discard removeFile(unsafeKeyFile) discard removeFile(unsafeKeyFile)
test "complains when persistence is enabled without accessible r1cs file":
let node = startNode(@["persistence", "prover"])
node.waitUntilOutput("r1cs file not readable, doesn't exist or wrong extension (.r1cs)")
node.stop()
test "complains when persistence is enabled without accessible wasm file":
let node = startNode(@[
"persistence",
"prover",
"--circom-r1cs=tests/circuits/fixtures/proof_main.r1cs"
])
node.waitUntilOutput("wasm file not readable, doesn't exist or wrong extension (.wasm)")
node.stop()
test "complains when persistence is enabled without accessible zkey file":
let node = startNode(@[
"persistence",
"prover",
"--circom-r1cs=tests/circuits/fixtures/proof_main.r1cs",
"--circom-wasm=tests/circuits/fixtures/proof_main.wasm"
])
node.waitUntilOutput("zkey file not readable, doesn't exist or wrong extension (.zkey)")
node.stop()

View File

@ -1,7 +1,7 @@
import std/math
import pkg/stew/byteutils import pkg/stew/byteutils
import pkg/codex/units import pkg/codex/units
import ./marketplacesuite import ./marketplacesuite
import ./nodeconfigs
import ../examples import ../examples
marketplacesuite "Marketplace payouts": marketplacesuite "Marketplace payouts":
@ -9,21 +9,21 @@ marketplacesuite "Marketplace payouts":
test "expired request partially pays out for stored time", test "expired request partially pays out for stored time",
NodeConfigs( NodeConfigs(
# Uncomment to start Hardhat automatically, typically so logs can be inspected locally # Uncomment to start Hardhat automatically, typically so logs can be inspected locally
# hardhat: HardhatConfig().withLogFile() hardhat: HardhatConfig.none,
clients: clients:
CodexConfig() CodexConfigs.init(nodes=1)
.nodes(1),
# .debug() # uncomment to enable console log output.debug() # .debug() # uncomment to enable console log output.debug()
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log # .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
# .withLogTopics("node", "erasure"), # .withLogTopics("node", "erasure")
.some,
providers: providers:
CodexConfig() CodexConfigs.init(nodes=1)
.nodes(1)
# .debug() # uncomment to enable console log output # .debug() # uncomment to enable console log output
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log # .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
# .withLogTopics("node", "marketplace", "sales", "reservations", "node", "proving", "clock"), # .withLogTopics("node", "marketplace", "sales", "reservations", "node", "proving", "clock")
.some,
): ):
let reward = 400.u256 let reward = 400.u256
let duration = 10.periods let duration = 10.periods

View File

@ -1,4 +1,3 @@
import std/math
from std/times import inMilliseconds from std/times import inMilliseconds
import pkg/codex/logutils import pkg/codex/logutils
import pkg/stew/byteutils import pkg/stew/byteutils
@ -7,8 +6,9 @@ import ../contracts/deployment
import ../codex/helpers import ../codex/helpers
import ../examples import ../examples
import ./marketplacesuite import ./marketplacesuite
import ./nodeconfigs
export chronicles export logutils
logScope: logScope:
topics = "integration test proofs" topics = "integration test proofs"
@ -18,21 +18,22 @@ marketplacesuite "Hosts submit regular proofs":
test "hosts submit periodic proofs for slots they fill", NodeConfigs( test "hosts submit periodic proofs for slots they fill", NodeConfigs(
# Uncomment to start Hardhat automatically, typically so logs can be inspected locally # Uncomment to start Hardhat automatically, typically so logs can be inspected locally
# hardhat: HardhatConfig().withLogFile(), hardhat:
HardhatConfig.none,
clients: clients:
CodexConfig() CodexConfigs.init(nodes=1)
.nodes(1),
# .debug() # uncomment to enable console log output # .debug() # uncomment to enable console log output
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log # .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
# .withLogTopics("node"), # .withLogTopics("node")
.some,
providers: providers:
CodexConfig() CodexConfigs.init(nodes=1)
.nodes(1)
# .debug() # uncomment to enable console log output # .debug() # uncomment to enable console log output
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log # .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
# .withLogTopics("marketplace", "sales", "reservations", "node"), # .withLogTopics("marketplace", "sales", "reservations", "node")
.some,
): ):
let client0 = clients()[0].client let client0 = clients()[0].client
let expiry = 5.periods let expiry = 5.periods
@ -72,29 +73,30 @@ marketplacesuite "Simulate invalid proofs":
test "slot is freed after too many invalid proofs submitted", NodeConfigs( test "slot is freed after too many invalid proofs submitted", NodeConfigs(
# Uncomment to start Hardhat automatically, typically so logs can be inspected locally # Uncomment to start Hardhat automatically, typically so logs can be inspected locally
# hardhat: HardhatConfig().withLogFile(), hardhat:
HardhatConfig.none,
clients: clients:
CodexConfig() CodexConfigs.init(nodes=1)
.nodes(1),
# .debug() # uncomment to enable console log output # .debug() # uncomment to enable console log output
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log # .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
# .withLogTopics("node", "clock"), # .withLogTopics("node", "clock")
.some,
providers: providers:
CodexConfig() CodexConfigs.init(nodes=1)
.nodes(1) .withSimulateProofFailures(idx=0, failEveryNProofs=1)
.simulateProofFailuresFor(providerIdx=0, failEveryNProofs=1),
# .debug() # uncomment to enable console log output # .debug() # uncomment to enable console log output
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log # .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
# .withLogTopics("marketplace", "sales", "reservations", "node", "clock"), # .withLogTopics("marketplace", "sales", "reservations", "node", "clock")
.some,
validators: validators:
CodexConfig() CodexConfigs.init(nodes=1)
.nodes(1)
# .debug() # uncomment to enable console log output # .debug() # uncomment to enable console log output
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log # .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
# .withLogTopics("validator", "onchain", "ethers", "clock") # .withLogTopics("validator", "onchain", "ethers", "clock")
.some
): ):
let client0 = clients()[0].client let client0 = clients()[0].client
let expiry = 5.periods let expiry = 5.periods
@ -130,29 +132,29 @@ marketplacesuite "Simulate invalid proofs":
test "slot is not freed when not enough invalid proofs submitted", NodeConfigs( test "slot is not freed when not enough invalid proofs submitted", NodeConfigs(
# Uncomment to start Hardhat automatically, typically so logs can be inspected locally # Uncomment to start Hardhat automatically, typically so logs can be inspected locally
# hardhat: HardhatConfig().withLogFile(), hardhat: HardhatConfig.none,
clients: clients:
CodexConfig() CodexConfigs.init(nodes=1)
.nodes(1),
# .debug() # uncomment to enable console log output # .debug() # uncomment to enable console log output
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log # .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
# .withLogTopics("marketplace", "sales", "reservations", "node", "clock"), # .withLogTopics("marketplace", "sales", "reservations", "node", "clock")
.some,
providers: providers:
CodexConfig() CodexConfigs.init(nodes=1)
.nodes(1) .withSimulateProofFailures(idx=0, failEveryNProofs=1)
.simulateProofFailuresFor(providerIdx=0, failEveryNProofs=1),
# .debug() # uncomment to enable console log output # .debug() # uncomment to enable console log output
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log # .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
# .withLogTopics("marketplace", "sales", "reservations", "node"), # .withLogTopics("marketplace", "sales", "reservations", "node")
.some,
validators: validators:
CodexConfig() CodexConfigs.init(nodes=1)
.nodes(1)
# .debug() # .debug()
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log # .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
# .withLogTopics("validator", "onchain", "ethers", "clock") # .withLogTopics("validator", "onchain", "ethers", "clock")
.some
): ):
let client0 = clients()[0].client let client0 = clients()[0].client
let expiry = 5.periods let expiry = 5.periods

View File

@ -38,7 +38,11 @@ template twonodessuite*(name: string, debug1, debug2: string, body) =
"--disc-ip=127.0.0.1", "--disc-ip=127.0.0.1",
"--disc-port=8090", "--disc-port=8090",
"--listen-addrs=/ip4/127.0.0.1/tcp/0", "--listen-addrs=/ip4/127.0.0.1/tcp/0",
"--persistence", "persistence",
"prover",
"--circom-r1cs=vendor/codex-contracts-eth/verifier/networks/hardhat/proof_main.r1cs",
"--circom-wasm=vendor/codex-contracts-eth/verifier/networks/hardhat/proof_main.wasm",
"--circom-zkey=vendor/codex-contracts-eth/verifier/networks/hardhat/proof_main.zkey",
"--eth-account=" & $account1 "--eth-account=" & $account1
] ]
@ -58,7 +62,11 @@ template twonodessuite*(name: string, debug1, debug2: string, body) =
"--disc-port=8091", "--disc-port=8091",
"--listen-addrs=/ip4/127.0.0.1/tcp/0", "--listen-addrs=/ip4/127.0.0.1/tcp/0",
"--bootstrap-node=" & bootstrap, "--bootstrap-node=" & bootstrap,
"--persistence", "persistence",
"prover",
"--circom-r1cs=vendor/codex-contracts-eth/verifier/networks/hardhat/proof_main.r1cs",
"--circom-wasm=vendor/codex-contracts-eth/verifier/networks/hardhat/proof_main.wasm",
"--circom-zkey=vendor/codex-contracts-eth/verifier/networks/hardhat/proof_main.zkey",
"--eth-account=" & $account2 "--eth-account=" & $account2
] ]