Prover CLI updates (#735)

* rework cli to accept circuit params

* check circom files extension

* adding new required cli changes

* don't use ufcs

* persistence is a command now

* use `nimOldCaseObjects` switch for nim confutils compat

* misc

* Update cli integration tests

* Fix: simulateProofFailures option is not for validator

* moving circom params under `prover` command

* update tests

* Use circuit assets from codex-contract-eth in tests

* Add "prover" cli command to tests

* use correct stores

* make `verifier` a cmd option

* update circuit artifacts path

* fix cli tests

* Update integration tests to use cli commands

Integration tests have been updated to use the new cli commands. The api for usage in the integration tests has also changed a bit.

Proofs tests have been updated to use 5 nodes and 8 blocks of data. The remaining integration tests also need to be updated.

* remove parsedCli from CodexConfig

Instead, parse the cli args on the fly when needed

* remove unneeded gcsafes

* graceful shutdowns

Where possible, do not raise assert, as other nodes in the test may already be running. Instead, raise exceptions, catch them in multinodes.nim, and attempt to do a teardown before failing the test.

`abortOnError` is set to true so that `fail()` will quit immediately, after teardown has been run.

* update testmarketplace to new api, with valid EC params

---------

Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
This commit is contained in:
markspanbroek 2024-03-12 10:57:13 +01:00 committed by GitHub
parent 8589e63d34
commit 293c676f22
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
18 changed files with 940 additions and 522 deletions

161
codex.nim
View File

@ -23,6 +23,7 @@ import ./codex/codex
import ./codex/logutils import ./codex/logutils
import ./codex/units import ./codex/units
import ./codex/utils/keyutils import ./codex/utils/keyutils
import ./codex/codextypes
export codex, conf, libp2p, chronos, logutils export codex, conf, libp2p, chronos, logutils
@ -54,99 +55,101 @@ when isMainModule:
config.setupLogging() config.setupLogging()
config.setupMetrics() config.setupMetrics()
case config.cmd: if config.nat == ValidIpAddress.init(IPv4_any()):
of StartUpCommand.noCommand: error "`--nat` cannot be set to the any (`0.0.0.0`) address"
quit QuitFailure
if config.nat == ValidIpAddress.init(IPv4_any()): if config.nat == ValidIpAddress.init("127.0.0.1"):
error "`--nat` cannot be set to the any (`0.0.0.0`) address" warn "`--nat` is set to loopback, your node wont properly announce over the DHT"
if not(checkAndCreateDataDir((config.dataDir).string)):
# We are unable to access/create data folder or data folder's
# permissions are insecure.
quit QuitFailure
trace "Data dir initialized", dir = $config.dataDir
if not(checkAndCreateDataDir((config.dataDir / "repo"))):
# We are unable to access/create data folder or data folder's
# permissions are insecure.
quit QuitFailure
trace "Repo dir initialized", dir = config.dataDir / "repo"
var
state: CodexStatus
shutdown: Future[void]
let
keyPath =
if isAbsolute(config.netPrivKeyFile):
config.netPrivKeyFile
else:
config.dataDir / config.netPrivKeyFile
privateKey = setupKey(keyPath).expect("Should setup private key!")
server = try:
CodexServer.new(config, privateKey)
except Exception as exc:
error "Failed to start Codex", msg = exc.msg
quit QuitFailure quit QuitFailure
if config.nat == ValidIpAddress.init("127.0.0.1"): ## Ctrl+C handling
warn "`--nat` is set to loopback, your node wont properly announce over the DHT" proc doShutdown() =
shutdown = server.stop()
state = CodexStatus.Stopping
if not(checkAndCreateDataDir((config.dataDir).string)): notice "Stopping Codex"
# We are unable to access/create data folder or data folder's
# permissions are insecure.
quit QuitFailure
trace "Data dir initialized", dir = $config.dataDir proc controlCHandler() {.noconv.} =
when defined(windows):
# workaround for https://github.com/nim-lang/Nim/issues/4057
try:
setupForeignThreadGc()
except Exception as exc: raiseAssert exc.msg # shouldn't happen
notice "Shutting down after having received SIGINT"
if not(checkAndCreateDataDir((config.dataDir / "repo"))): doShutdown()
# We are unable to access/create data folder or data folder's
# permissions are insecure.
quit QuitFailure
trace "Repo dir initialized", dir = config.dataDir / "repo" try:
setControlCHook(controlCHandler)
except Exception as exc: # TODO Exception
warn "Cannot set ctrl-c handler", msg = exc.msg
var # equivalent SIGTERM handler
state: CodexStatus when defined(posix):
shutdown: Future[void] proc SIGTERMHandler(signal: cint) {.noconv.} =
notice "Shutting down after having received SIGTERM"
let
keyPath =
if isAbsolute(config.netPrivKeyFile):
config.netPrivKeyFile
else:
config.dataDir / config.netPrivKeyFile
privateKey = setupKey(keyPath).expect("Should setup private key!")
server = CodexServer.new(config, privateKey)
## Ctrl+C handling
proc doShutdown() =
shutdown = server.stop()
state = CodexStatus.Stopping
notice "Stopping Codex"
proc controlCHandler() {.noconv.} =
when defined(windows):
# workaround for https://github.com/nim-lang/Nim/issues/4057
try:
setupForeignThreadGc()
except Exception as exc: raiseAssert exc.msg # shouldn't happen
notice "Shutting down after having received SIGINT"
doShutdown() doShutdown()
c_signal(ansi_c.SIGTERM, SIGTERMHandler)
try:
waitFor server.start()
except CatchableError as error:
error "Codex failed to start", error = error.msg
# XXX ideally we'd like to issue a stop instead of quitting cold turkey,
# but this would mean we'd have to fix the implementation of all
# services so they won't crash if we attempt to stop them before they
# had a chance to start (currently you'll get a SISGSEV if you try to).
quit QuitFailure
state = CodexStatus.Running
while state == CodexStatus.Running:
try: try:
setControlCHook(controlCHandler)
except Exception as exc: # TODO Exception
warn "Cannot set ctrl-c handler", msg = exc.msg
# equivalent SIGTERM handler
when defined(posix):
proc SIGTERMHandler(signal: cint) {.noconv.} =
notice "Shutting down after having received SIGTERM"
doShutdown()
c_signal(ansi_c.SIGTERM, SIGTERMHandler)
try:
waitFor server.start()
except CatchableError as error:
error "Codex failed to start", error = error.msg
# XXX ideally we'd like to issue a stop instead of quitting cold turkey,
# but this would mean we'd have to fix the implementation of all
# services so they won't crash if we attempt to stop them before they
# had a chance to start (currently you'll get a SISGSEV if you try to).
quit QuitFailure
state = CodexStatus.Running
while state == CodexStatus.Running:
# poll chronos # poll chronos
chronos.poll() chronos.poll()
except Exception as exc:
try: error "Unhandled exception in async proc, aborting", msg = exc.msg
# signal handlers guarantee that the shutdown Future will
# be assigned before state switches to Stopping
waitFor shutdown
except CatchableError as error:
error "Codex didn't shutdown correctly", error = error.msg
quit QuitFailure quit QuitFailure
notice "Exited codex" try:
# signal handlers guarantee that the shutdown Future will
# be assigned before state switches to Stopping
waitFor shutdown
except CatchableError as error:
error "Codex didn't shutdown correctly", error = error.msg
quit QuitFailure
of StartUpCommand.initNode: notice "Exited codex"
discard

View File

@ -22,12 +22,14 @@ import pkg/stew/io2
import pkg/stew/shims/net as stewnet import pkg/stew/shims/net as stewnet
import pkg/datastore import pkg/datastore
import pkg/ethers except Rng import pkg/ethers except Rng
import pkg/stew/io2
import ./node import ./node
import ./conf import ./conf
import ./rng import ./rng
import ./rest/api import ./rest/api
import ./stores import ./stores
import ./slots
import ./blockexchange import ./blockexchange
import ./utils/fileutils import ./utils/fileutils
import ./erasure import ./erasure
@ -72,78 +74,71 @@ proc bootstrapInteractions(
config = s.config config = s.config
repo = s.repoStore repo = s.repoStore
if not config.persistence and not config.validator:
if config.ethAccount.isSome or config.ethPrivateKey.isSome:
warn "Ethereum account was set, but neither persistence nor validator is enabled"
return
if not config.ethAccount.isSome and not config.ethPrivateKey.isSome:
if config.persistence:
error "Persistence enabled, but no Ethereum account was set"
if config.validator:
error "Validator enabled, but no Ethereum account was set"
quit QuitFailure
let provider = JsonRpcProvider.new(config.ethProvider)
await waitForSync(provider)
var signer: Signer
if account =? config.ethAccount:
signer = provider.getSigner(account)
elif keyFile =? config.ethPrivateKey:
without isSecure =? checkSecureFile(keyFile):
error "Could not check file permissions: does Ethereum private key file exist?"
quit QuitFailure
if not isSecure:
error "Ethereum private key file does not have safe file permissions"
quit QuitFailure
without key =? keyFile.readAllChars():
error "Unable to read Ethereum private key file"
quit QuitFailure
without wallet =? EthWallet.new(key.strip(), provider):
error "Invalid Ethereum private key in file"
quit QuitFailure
signer = wallet
let deploy = Deployment.new(provider, config)
without marketplaceAddress =? await deploy.address(Marketplace):
error "No Marketplace address was specified or there is no known address for the current network"
quit QuitFailure
let marketplace = Marketplace.new(marketplaceAddress, signer)
let market = OnChainMarket.new(marketplace)
let clock = OnChainClock.new(provider)
var client: ?ClientInteractions
var host: ?HostInteractions
var validator: ?ValidatorInteractions
if config.validator or config.persistence:
s.codexNode.clock = clock
else:
s.codexNode.clock = SystemClock()
if config.persistence: if config.persistence:
# This is used for simulation purposes. Normal nodes won't be compiled with this flag if not config.ethAccount.isSome and not config.ethPrivateKey.isSome:
# and hence the proof failure will always be 0. error "Persistence enabled, but no Ethereum account was set"
when codex_enable_proof_failures: quit QuitFailure
let proofFailures = config.simulateProofFailures
if proofFailures > 0: let provider = JsonRpcProvider.new(config.ethProvider)
warn "Enabling proof failure simulation!" await waitForSync(provider)
var signer: Signer
if account =? config.ethAccount:
signer = provider.getSigner(account)
elif keyFile =? config.ethPrivateKey:
without isSecure =? checkSecureFile(keyFile):
error "Could not check file permissions: does Ethereum private key file exist?"
quit QuitFailure
if not isSecure:
error "Ethereum private key file does not have safe file permissions"
quit QuitFailure
without key =? keyFile.readAllChars():
error "Unable to read Ethereum private key file"
quit QuitFailure
without wallet =? EthWallet.new(key.strip(), provider):
error "Invalid Ethereum private key in file"
quit QuitFailure
signer = wallet
let deploy = Deployment.new(provider, config)
without marketplaceAddress =? await deploy.address(Marketplace):
error "No Marketplace address was specified or there is no known address for the current network"
quit QuitFailure
let marketplace = Marketplace.new(marketplaceAddress, signer)
let market = OnChainMarket.new(marketplace)
let clock = OnChainClock.new(provider)
var client: ?ClientInteractions
var host: ?HostInteractions
var validator: ?ValidatorInteractions
if config.validator or config.persistence:
s.codexNode.clock = clock
else: else:
let proofFailures = 0 s.codexNode.clock = SystemClock()
if config.simulateProofFailures > 0:
warn "Proof failure simulation is not enabled for this build! Configuration ignored"
let purchasing = Purchasing.new(market, clock) if config.persistence:
let sales = Sales.new(market, clock, repo, proofFailures) # This is used for simulation purposes. Normal nodes won't be compiled with this flag
client = some ClientInteractions.new(clock, purchasing) # and hence the proof failure will always be 0.
host = some HostInteractions.new(clock, sales) when codex_enable_proof_failures:
if config.validator: let proofFailures = config.simulateProofFailures
let validation = Validation.new(clock, market, config.validatorMaxSlots) if proofFailures > 0:
validator = some ValidatorInteractions.new(clock, validation) warn "Enabling proof failure simulation!"
else:
let proofFailures = 0
if config.simulateProofFailures > 0:
warn "Proof failure simulation is not enabled for this build! Configuration ignored"
s.codexNode.contracts = (client, host, validator) let purchasing = Purchasing.new(market, clock)
let sales = Sales.new(market, clock, repo, proofFailures)
client = some ClientInteractions.new(clock, purchasing)
host = some HostInteractions.new(clock, sales)
if config.validator:
let validation = Validation.new(clock, market, config.validatorMaxSlots)
validator = some ValidatorInteractions.new(clock, validation)
s.codexNode.contracts = (client, host, validator)
proc start*(s: CodexServer) {.async.} = proc start*(s: CodexServer) {.async.} =
trace "Starting codex node", config = $s.config trace "Starting codex node", config = $s.config
@ -265,11 +260,41 @@ proc new*(
blockDiscovery = DiscoveryEngine.new(repoStore, peerStore, network, discovery, pendingBlocks) blockDiscovery = DiscoveryEngine.new(repoStore, peerStore, network, discovery, pendingBlocks)
engine = BlockExcEngine.new(repoStore, wallet, network, blockDiscovery, peerStore, pendingBlocks) engine = BlockExcEngine.new(repoStore, wallet, network, blockDiscovery, peerStore, pendingBlocks)
store = NetworkStore.new(engine, repoStore) store = NetworkStore.new(engine, repoStore)
prover = if config.prover:
if not fileAccessible($config.circomR1cs, {AccessFlags.Read}) and
endsWith($config.circomR1cs, ".r1cs"):
error "Circom R1CS file not accessible"
raise (ref Defect)(
msg: "r1cs file not readable, doesn't exist or wrong extension (.r1cs)")
if not fileAccessible($config.circomWasm, {AccessFlags.Read}) and
endsWith($config.circomWasm, ".wasm"):
error "Circom wasm file not accessible"
raise (ref Defect)(
msg: "wasm file not readable, doesn't exist or wrong extension (.wasm)")
let zkey = if not config.circomNoZkey:
if not fileAccessible($config.circomZkey, {AccessFlags.Read}) and
endsWith($config.circomZkey, ".zkey"):
error "Circom zkey file not accessible"
raise (ref Defect)(
msg: "zkey file not readable, doesn't exist or wrong extension (.zkey)")
$config.circomZkey
else: ""
some Prover.new(
store,
CircomCompat.init($config.circomR1cs, $config.circomWasm, zkey),
config.numProofSamples)
else:
none Prover
codexNode = CodexNodeRef.new( codexNode = CodexNodeRef.new(
switch = switch, switch = switch,
networkStore = store, networkStore = store,
engine = engine, engine = engine,
prover = prover,
discovery = discovery) discovery = discovery)
restServer = RestServerRef.new( restServer = RestServerRef.new(

View File

@ -7,9 +7,7 @@
## This file may not be copied, modified, or distributed except according to ## This file may not be copied, modified, or distributed except according to
## those terms. ## those terms.
import pkg/upraises {.push raises: [].}
push: {.upraises: [].}
import std/os import std/os
import std/terminal import std/terminal
@ -33,30 +31,47 @@ import pkg/ethers
import pkg/questionable import pkg/questionable
import pkg/questionable/results import pkg/questionable/results
import ./codextypes
import ./discovery import ./discovery
import ./logutils import ./logutils
import ./stores import ./stores
import ./units import ./units
import ./utils import ./utils
export units export units, net, codextypes, logutils
export net
export export
DefaultQuotaBytes, DefaultQuotaBytes,
DefaultBlockTtl, DefaultBlockTtl,
DefaultBlockMaintenanceInterval, DefaultBlockMaintenanceInterval,
DefaultNumberOfBlocksToMaintainPerInterval DefaultNumberOfBlocksToMaintainPerInterval
proc defaultDataDir*(): string =
let dataDir = when defined(windows):
"AppData" / "Roaming" / "Codex"
elif defined(macosx):
"Library" / "Application Support" / "Codex"
else:
".cache" / "codex"
getHomeDir() / dataDir
const const
codex_enable_api_debug_peers* {.booldefine.} = false codex_enable_api_debug_peers* {.booldefine.} = false
codex_enable_proof_failures* {.booldefine.} = false codex_enable_proof_failures* {.booldefine.} = false
codex_use_hardhat* {.booldefine.} = false codex_use_hardhat* {.booldefine.} = false
codex_enable_log_counter* {.booldefine.} = false codex_enable_log_counter* {.booldefine.} = false
DefaultDataDir* = defaultDataDir()
type type
StartUpCommand* {.pure.} = enum StartUpCmd* {.pure.} = enum
noCommand, noCmd
initNode persistence
PersistenceCmd* {.pure.} = enum
noCmd
prover
LogKind* {.pure.} = enum LogKind* {.pure.} = enum
Auto = "auto" Auto = "auto"
@ -106,126 +121,125 @@ type
dataDir* {. dataDir* {.
desc: "The directory where codex will store configuration and data" desc: "The directory where codex will store configuration and data"
defaultValue: defaultDataDir() defaultValue: DefaultDataDir
defaultValueDesc: "" defaultValueDesc: $DefaultDataDir
abbr: "d" abbr: "d"
name: "data-dir" }: OutDir name: "data-dir" }: OutDir
listenAddrs* {.
desc: "Multi Addresses to listen on"
defaultValue: @[
MultiAddress.init("/ip4/0.0.0.0/tcp/0")
.expect("Should init multiaddress")]
defaultValueDesc: "/ip4/0.0.0.0/tcp/0"
abbr: "i"
name: "listen-addrs" }: seq[MultiAddress]
# TODO: change this once we integrate nat support
nat* {.
desc: "IP Addresses to announce behind a NAT"
defaultValue: ValidIpAddress.init("127.0.0.1")
defaultValueDesc: "127.0.0.1"
abbr: "a"
name: "nat" }: ValidIpAddress
discoveryIp* {.
desc: "Discovery listen address"
defaultValue: ValidIpAddress.init(IPv4_any())
defaultValueDesc: "0.0.0.0"
abbr: "e"
name: "disc-ip" }: ValidIpAddress
discoveryPort* {.
desc: "Discovery (UDP) port"
defaultValue: 8090.Port
defaultValueDesc: "8090"
abbr: "u"
name: "disc-port" }: Port
netPrivKeyFile* {.
desc: "Source of network (secp256k1) private key file path or name"
defaultValue: "key"
name: "net-privkey" }: string
bootstrapNodes* {.
desc: "Specifies one or more bootstrap nodes to use when connecting to the network"
abbr: "b"
name: "bootstrap-node" }: seq[SignedPeerRecord]
maxPeers* {.
desc: "The maximum number of peers to connect to"
defaultValue: 160
name: "max-peers" }: int
agentString* {.
defaultValue: "Codex"
desc: "Node agent string which is used as identifier in network"
name: "agent-string" }: string
apiBindAddress* {.
desc: "The REST API bind address"
defaultValue: "127.0.0.1"
name: "api-bindaddr"
}: string
apiPort* {.
desc: "The REST Api port",
defaultValue: 8080.Port
defaultValueDesc: "8080"
name: "api-port"
abbr: "p" }: Port
repoKind* {.
desc: "Backend for main repo store (fs, sqlite)"
defaultValueDesc: "fs"
defaultValue: repoFS
name: "repo-kind" }: RepoKind
storageQuota* {.
desc: "The size of the total storage quota dedicated to the node"
defaultValue: DefaultQuotaBytes
defaultValueDesc: $DefaultQuotaBytes
name: "storage-quota"
abbr: "q" }: NBytes
blockTtl* {.
desc: "Default block timeout in seconds - 0 disables the ttl"
defaultValue: DefaultBlockTtl
defaultValueDesc: $DefaultBlockTtl
name: "block-ttl"
abbr: "t" }: Duration
blockMaintenanceInterval* {.
desc: "Time interval in seconds - determines frequency of block maintenance cycle: how often blocks are checked for expiration and cleanup"
defaultValue: DefaultBlockMaintenanceInterval
defaultValueDesc: $DefaultBlockMaintenanceInterval
name: "block-mi" }: Duration
blockMaintenanceNumberOfBlocks* {.
desc: "Number of blocks to check every maintenance cycle"
defaultValue: DefaultNumberOfBlocksToMaintainPerInterval
defaultValueDesc: $DefaultNumberOfBlocksToMaintainPerInterval
name: "block-mn" }: int
cacheSize* {.
desc: "The size of the block cache, 0 disables the cache - might help on slow hardrives"
defaultValue: 0
defaultValueDesc: "0"
name: "cache-size"
abbr: "c" }: NBytes
logFile* {.
desc: "Logs to file"
defaultValue: string.none
name: "log-file"
hidden
.}: Option[string]
case cmd* {. case cmd* {.
command defaultValue: noCmd
defaultValue: noCommand }: StartUpCommand command }: StartUpCmd
of persistence:
of noCommand:
listenAddrs* {.
desc: "Multi Addresses to listen on"
defaultValue: @[
MultiAddress.init("/ip4/0.0.0.0/tcp/0")
.expect("Should init multiaddress")]
defaultValueDesc: "/ip4/0.0.0.0/tcp/0"
abbr: "i"
name: "listen-addrs" }: seq[MultiAddress]
# TODO: change this once we integrate nat support
nat* {.
desc: "IP Addresses to announce behind a NAT"
defaultValue: ValidIpAddress.init("127.0.0.1")
defaultValueDesc: "127.0.0.1"
abbr: "a"
name: "nat" }: ValidIpAddress
discoveryIp* {.
desc: "Discovery listen address"
defaultValue: ValidIpAddress.init(IPv4_any())
defaultValueDesc: "0.0.0.0"
abbr: "e"
name: "disc-ip" }: ValidIpAddress
discoveryPort* {.
desc: "Discovery (UDP) port"
defaultValue: 8090.Port
defaultValueDesc: "8090"
abbr: "u"
name: "disc-port" }: Port
netPrivKeyFile* {.
desc: "Source of network (secp256k1) private key file path or name"
defaultValue: "key"
name: "net-privkey" }: string
bootstrapNodes* {.
desc: "Specifies one or more bootstrap nodes to use when connecting to the network"
abbr: "b"
name: "bootstrap-node" }: seq[SignedPeerRecord]
maxPeers* {.
desc: "The maximum number of peers to connect to"
defaultValue: 160
name: "max-peers" }: int
agentString* {.
defaultValue: "Codex"
desc: "Node agent string which is used as identifier in network"
name: "agent-string" }: string
apiBindAddress* {.
desc: "The REST API bind address"
defaultValue: "127.0.0.1"
name: "api-bindaddr"
}: string
apiPort* {.
desc: "The REST Api port",
defaultValue: 8080.Port
defaultValueDesc: "8080"
name: "api-port"
abbr: "p" }: Port
repoKind* {.
desc: "Backend for main repo store (fs, sqlite)"
defaultValueDesc: "fs"
defaultValue: repoFS
name: "repo-kind" }: RepoKind
storageQuota* {.
desc: "The size of the total storage quota dedicated to the node"
defaultValue: DefaultQuotaBytes
defaultValueDesc: $DefaultQuotaBytes
name: "storage-quota"
abbr: "q" }: NBytes
blockTtl* {.
desc: "Default block timeout in seconds - 0 disables the ttl"
defaultValue: DefaultBlockTtl
defaultValueDesc: $DefaultBlockTtl
name: "block-ttl"
abbr: "t" }: Duration
blockMaintenanceInterval* {.
desc: "Time interval in seconds - determines frequency of block maintenance cycle: how often blocks are checked for expiration and cleanup"
defaultValue: DefaultBlockMaintenanceInterval
defaultValueDesc: $DefaultBlockMaintenanceInterval
name: "block-mi" }: Duration
blockMaintenanceNumberOfBlocks* {.
desc: "Number of blocks to check every maintenance cycle"
defaultValue: DefaultNumberOfBlocksToMaintainPerInterval
defaultValueDesc: $DefaultNumberOfBlocksToMaintainPerInterval
name: "block-mn" }: int
cacheSize* {.
desc: "The size of the block cache, 0 disables the cache - might help on slow hardrives"
defaultValue: 0
defaultValueDesc: "0"
name: "cache-size"
abbr: "c" }: NBytes
persistence* {.
desc: "Enables persistence mechanism, requires an Ethereum node"
defaultValue: false
name: "persistence"
.}: bool
ethProvider* {. ethProvider* {.
desc: "The URL of the JSON-RPC API of the Ethereum node" desc: "The URL of the JSON-RPC API of the Ethereum node"
defaultValue: "ws://localhost:8545" defaultValue: "ws://localhost:8545"
@ -235,21 +249,32 @@ type
ethAccount* {. ethAccount* {.
desc: "The Ethereum account that is used for storage contracts" desc: "The Ethereum account that is used for storage contracts"
defaultValue: EthAddress.none defaultValue: EthAddress.none
defaultValueDesc: ""
name: "eth-account" name: "eth-account"
.}: Option[EthAddress] .}: Option[EthAddress]
ethPrivateKey* {. ethPrivateKey* {.
desc: "File containing Ethereum private key for storage contracts" desc: "File containing Ethereum private key for storage contracts"
defaultValue: string.none defaultValue: string.none
defaultValueDesc: ""
name: "eth-private-key" name: "eth-private-key"
.}: Option[string] .}: Option[string]
marketplaceAddress* {. marketplaceAddress* {.
desc: "Address of deployed Marketplace contract" desc: "Address of deployed Marketplace contract"
defaultValue: EthAddress.none defaultValue: EthAddress.none
defaultValueDesc: ""
name: "marketplace-address" name: "marketplace-address"
.}: Option[EthAddress] .}: Option[EthAddress]
# TODO: should go behind a feature flag
simulateProofFailures* {.
desc: "Simulates proof failures once every N proofs. 0 = disabled."
defaultValue: 0
name: "simulate-proof-failures"
hidden
.}: int
validator* {. validator* {.
desc: "Enables validator, requires an Ethereum node" desc: "Enables validator, requires an Ethereum node"
defaultValue: false defaultValue: false
@ -262,28 +287,85 @@ type
name: "validator-max-slots" name: "validator-max-slots"
.}: int .}: int
simulateProofFailures* {. case persistenceCmd* {.
desc: "Simulates proof failures once every N proofs. 0 = disabled." defaultValue: noCmd
defaultValue: 0 command }: PersistenceCmd
name: "simulate-proof-failures"
hidden
.}: int
logFile* {. of PersistenceCmd.prover:
desc: "Logs to file" circomR1cs* {.
defaultValue: string.none desc: "The r1cs file for the storage circuit"
name: "log-file" defaultValue: $DefaultDataDir / "circuits" / "proof_main.r1cs"
hidden defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.r1cs"
.}: Option[string] name: "circom-r1cs"
.}: InputFile
of initNode: circomWasm* {.
discard desc: "The wasm file for the storage circuit"
defaultValue: $DefaultDataDir / "circuits" / "proof_main.wasm"
defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.wasm"
name: "circom-wasm"
.}: InputFile
circomZkey* {.
desc: "The zkey file for the storage circuit"
defaultValue: $DefaultDataDir / "circuits" / "proof_main.zkey"
defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.zkey"
name: "circom-zkey"
.}: InputFile
# TODO: should probably be hidden and behind a feature flag
circomNoZkey* {.
desc: "Ignore the zkey file - use only for testing!"
defaultValue: false
name: "circom-no-zkey"
.}: bool
numProofSamples* {.
desc: "Number of samples to prove"
defaultValue: DefaultSamplesNum
defaultValueDesc: $DefaultSamplesNum
name: "proof-samples" }: int
maxSlotDepth* {.
desc: "The maximum depth of the slot tree"
defaultValue: DefaultMaxSlotDepth
defaultValueDesc: $DefaultMaxSlotDepth
name: "max-slot-depth" }: int
maxDatasetDepth* {.
desc: "The maximum depth of the dataset tree"
defaultValue: DefaultMaxDatasetDepth
defaultValueDesc: $DefaultMaxDatasetDepth
name: "max-dataset-depth" }: int
maxBlockDepth* {.
desc: "The maximum depth of the network block merkle tree"
defaultValue: DefaultBlockDepth
defaultValueDesc: $DefaultBlockDepth
name: "max-block-depth" }: int
maxCellElms* {.
desc: "The maximum number of elements in a cell"
defaultValue: DefaultCellElms
defaultValueDesc: $DefaultCellElms
name: "max-cell-elements" }: int
of PersistenceCmd.noCmd:
discard
of StartUpCmd.noCmd:
discard # end of persistence
EthAddress* = ethers.Address EthAddress* = ethers.Address
logutils.formatIt(LogFormat.textLines, EthAddress): it.short0xHexLog logutils.formatIt(LogFormat.textLines, EthAddress): it.short0xHexLog
logutils.formatIt(LogFormat.json, EthAddress): %it logutils.formatIt(LogFormat.json, EthAddress): %it
func persistence*(self: CodexConf): bool =
self.cmd == StartUpCmd.persistence
func prover*(self: CodexConf): bool =
self.persistence and self.persistenceCmd == PersistenceCmd.prover
proc getCodexVersion(): string = proc getCodexVersion(): string =
let tag = strip(staticExec("git tag")) let tag = strip(staticExec("git tag"))
if tag.isEmptyOrWhitespace: if tag.isEmptyOrWhitespace:
@ -308,16 +390,6 @@ const
"Codex revision: " & codexRevision & "\p" & "Codex revision: " & codexRevision & "\p" &
nimBanner nimBanner
proc defaultDataDir*(): string =
let dataDir = when defined(windows):
"AppData" / "Roaming" / "Codex"
elif defined(macosx):
"Library" / "Application Support" / "Codex"
else:
".cache" / "codex"
getHomeDir() / dataDir
proc parseCmdArg*(T: typedesc[MultiAddress], proc parseCmdArg*(T: typedesc[MultiAddress],
input: string): MultiAddress input: string): MultiAddress
{.upraises: [ValueError, LPError].} = {.upraises: [ValueError, LPError].} =
@ -326,7 +398,7 @@ proc parseCmdArg*(T: typedesc[MultiAddress],
if res.isOk: if res.isOk:
ma = res.get() ma = res.get()
else: else:
warn "Invalid MultiAddress", input=input, error=res.error() warn "Invalid MultiAddress", input=input, error = res.error()
quit QuitFailure quit QuitFailure
ma ma
@ -334,10 +406,10 @@ proc parseCmdArg*(T: type SignedPeerRecord, uri: string): T =
var res: SignedPeerRecord var res: SignedPeerRecord
try: try:
if not res.fromURI(uri): if not res.fromURI(uri):
warn "Invalid SignedPeerRecord uri", uri=uri warn "Invalid SignedPeerRecord uri", uri = uri
quit QuitFailure quit QuitFailure
except CatchableError as exc: except CatchableError as exc:
warn "Invalid SignedPeerRecord uri", uri=uri, error=exc.msg warn "Invalid SignedPeerRecord uri", uri = uri, error = exc.msg
quit QuitFailure quit QuitFailure
res res
@ -348,7 +420,7 @@ proc parseCmdArg*(T: type NBytes, val: string): T =
var num = 0'i64 var num = 0'i64
let count = parseSize(val, num, alwaysBin = true) let count = parseSize(val, num, alwaysBin = true)
if count == 0: if count == 0:
warn "Invalid number of bytes", nbytes=val warn "Invalid number of bytes", nbytes = val
quit QuitFailure quit QuitFailure
NBytes(num) NBytes(num)
@ -356,7 +428,7 @@ proc parseCmdArg*(T: type Duration, val: string): T =
var dur: Duration var dur: Duration
let count = parseDuration(val, dur) let count = parseDuration(val, dur)
if count == 0: if count == 0:
warn "Invalid duration parse", dur=dur warn "Cannot parse duration", dur = dur
quit QuitFailure quit QuitFailure
dur dur

View File

@ -116,6 +116,9 @@ switch("define", "chronicles_sinks=textlines[dynamic],json[dynamic],textlines[dy
switch("define", "use_asm_syntax_intel=false") switch("define", "use_asm_syntax_intel=false")
switch("define", "ctt_asm=false") switch("define", "ctt_asm=false")
# Allow the use of old-style case objects for nim config compatibility
switch("define", "nimOldCaseObjects")
# begin Nimble config (version 1) # begin Nimble config (version 1)
when system.fileExists("nimble.paths"): when system.fileExists("nimble.paths"):
include "nimble.paths" include "nimble.paths"

View File

@ -20,4 +20,3 @@ proc address*(_: type Marketplace, dummyVerifier = false): Address =
hardhatMarketWithDummyVerifier hardhatMarketWithDummyVerifier
else: else:
hardhatMarketAddress hardhatMarketAddress

View File

@ -13,10 +13,19 @@ method getChainId*(provider: MockProvider): Future[UInt256] {.async.} =
return provider.chainId return provider.chainId
proc configFactory(): CodexConf = proc configFactory(): CodexConf =
CodexConf(cmd: noCommand, nat: ValidIpAddress.init("127.0.0.1"), discoveryIp: ValidIpAddress.init(IPv4_any()), metricsAddress: ValidIpAddress.init("127.0.0.1")) CodexConf(
cmd: StartUpCmd.persistence,
nat: ValidIpAddress.init("127.0.0.1"),
discoveryIp: ValidIpAddress.init(IPv4_any()),
metricsAddress: ValidIpAddress.init("127.0.0.1"))
proc configFactory(marketplace: Option[EthAddress]): CodexConf = proc configFactory(marketplace: Option[EthAddress]): CodexConf =
CodexConf(cmd: noCommand, nat: ValidIpAddress.init("127.0.0.1"), discoveryIp: ValidIpAddress.init(IPv4_any()), metricsAddress: ValidIpAddress.init("127.0.0.1"), marketplaceAddress: marketplace) CodexConf(
cmd: StartUpCmd.persistence,
nat: ValidIpAddress.init("127.0.0.1"),
discoveryIp: ValidIpAddress.init(IPv4_any()),
metricsAddress: ValidIpAddress.init("127.0.0.1"),
marketplaceAddress: marketplace)
asyncchecksuite "Deployment": asyncchecksuite "Deployment":
let provider = MockProvider() let provider = MockProvider()

View File

@ -1,10 +1,7 @@
import pkg/questionable
type type
CliOption* = object of RootObj CliOption* = object
nodeIdx*: ?int key*: string # option key, including `--`
key*: string value*: string # option value
value*: string
proc `$`*(option: CliOption): string = proc `$`*(option: CliOption): string =
var res = option.key var res = option.key

View File

@ -1,61 +1,295 @@
import std/options import std/options
import std/os
import std/sequtils import std/sequtils
import std/strutils
import std/sugar
import std/tables
from pkg/chronicles import LogLevel
import pkg/codex/conf
import pkg/codex/units import pkg/codex/units
import pkg/confutils
import pkg/confutils/defs
import libp2p except setup
import pkg/questionable
import ./clioption import ./clioption
import ./nodeconfig
export nodeconfig
export clioption export clioption
export confutils
type type
CodexConfig* = ref object of NodeConfig CodexConfigs* = object
numNodes*: int configs*: seq[CodexConfig]
cliOptions*: seq[CliOption] CodexConfig* = object
logTopics*: seq[string] cliOptions: Table[StartUpCmd, Table[string, CliOption]]
cliPersistenceOptions: Table[PersistenceCmd, Table[string, CliOption]]
debugEnabled*: bool
CodexConfigError* = object of CatchableError
proc nodes*(config: CodexConfig, numNodes: int): CodexConfig = proc cliArgs*(config: CodexConfig): seq[string] {.gcsafe, raises: [CodexConfigError].}
if numNodes < 0:
raise newException(ValueError, "numNodes must be >= 0")
var startConfig = config proc raiseCodexConfigError(msg: string) {.raises: [CodexConfigError].} =
startConfig.numNodes = numNodes raise newException(CodexConfigError, msg)
return startConfig
proc simulateProofFailuresFor*( template convertError(body) =
try:
body
except CatchableError as e:
raiseCodexConfigError e.msg
proc init*(_: type CodexConfigs, nodes = 1): CodexConfigs {.raises: [].} =
CodexConfigs(configs: newSeq[CodexConfig](nodes))
func nodes*(self: CodexConfigs): int =
self.configs.len
proc checkBounds(self: CodexConfigs, idx: int) {.raises: [CodexConfigError].} =
if idx notin 0..<self.configs.len:
raiseCodexConfigError "index must be in bounds of the number of nodes"
proc buildConfig(
config: CodexConfig, config: CodexConfig,
providerIdx: int, msg: string): CodexConf {.raises: [CodexConfigError].} =
failEveryNProofs: int
): CodexConfig =
if providerIdx > config.numNodes - 1: proc postFix(msg: string): string =
raise newException(ValueError, "provider index out of bounds") if msg.len > 0:
": " & msg
else: ""
var startConfig = config try:
startConfig.cliOptions.add( return CodexConf.load(cmdLine = config.cliArgs, quitOnFailure = false)
CliOption( except ConfigurationError as e:
nodeIdx: some providerIdx, raiseCodexConfigError msg & e.msg.postFix
key: "--simulate-proof-failures", except Exception as e:
value: $failEveryNProofs ## TODO: remove once proper exception handling added to nim-confutils
) raiseCodexConfigError msg & e.msg.postFix
)
proc addCliOption*(
config: var CodexConfig,
group = PersistenceCmd.noCmd,
cliOption: CliOption) {.raises: [CodexConfigError].} =
var options = config.cliPersistenceOptions.getOrDefault(group)
options[cliOption.key] = cliOption # overwrite if already exists
config.cliPersistenceOptions[group] = options
discard config.buildConfig("Invalid cli arg " & $cliOption)
proc addCliOption*(
config: var CodexConfig,
group = PersistenceCmd.noCmd,
key: string, value = "") {.raises: [CodexConfigError].} =
config.addCliOption(group, CliOption(key: key, value: value))
proc addCliOption*(
config: var CodexConfig,
group = StartUpCmd.noCmd,
cliOption: CliOption) {.raises: [CodexConfigError].} =
var options = config.cliOptions.getOrDefault(group)
options[cliOption.key] = cliOption # overwrite if already exists
config.cliOptions[group] = options
discard config.buildConfig("Invalid cli arg " & $cliOption)
proc addCliOption*(
config: var CodexConfig,
group = StartUpCmd.noCmd,
key: string, value = "") {.raises: [CodexConfigError].} =
config.addCliOption(group, CliOption(key: key, value: value))
proc addCliOption*(
config: var CodexConfig,
cliOption: CliOption) {.raises: [CodexConfigError].} =
config.addCliOption(StartUpCmd.noCmd, cliOption)
proc addCliOption*(
config: var CodexConfig,
key: string, value = "") {.raises: [CodexConfigError].} =
config.addCliOption(StartUpCmd.noCmd, CliOption(key: key, value: value))
proc cliArgs*(
config: CodexConfig): seq[string] {.gcsafe, raises: [CodexConfigError].} =
## converts CodexConfig cli options and command groups in a sequence of args
## and filters out cli options by node index if provided in the CliOption
var args: seq[string] = @[]
convertError:
for cmd in StartUpCmd:
if config.cliOptions.hasKey(cmd):
if cmd != StartUpCmd.noCmd:
args.add $cmd
var opts = config.cliOptions[cmd].values.toSeq
args = args.concat( opts.map(o => $o) )
for cmd in PersistenceCmd:
if config.cliPersistenceOptions.hasKey(cmd):
if cmd != PersistenceCmd.noCmd:
args.add $cmd
var opts = config.cliPersistenceOptions[cmd].values.toSeq
args = args.concat( opts.map(o => $o) )
return args
proc logFile*(config: CodexConfig): ?string {.raises: [CodexConfigError].} =
let built = config.buildConfig("Invalid codex config cli params")
built.logFile
proc logLevel*(config: CodexConfig): LogLevel {.raises: [CodexConfigError].} =
convertError:
let built = config.buildConfig("Invalid codex config cli params")
return parseEnum[LogLevel](built.logLevel.toUpperAscii)
proc debug*(
self: CodexConfigs,
idx: int,
enabled = true): CodexConfigs {.raises: [CodexConfigError].} =
## output log in stdout for a specific node in the group
self.checkBounds idx
var startConfig = self
startConfig.configs[idx].debugEnabled = enabled
return startConfig return startConfig
proc debug*(self: CodexConfigs, enabled = true): CodexConfigs {.raises: [].} =
## output log in stdout for all nodes in group
var startConfig = self
for config in startConfig.configs.mitems:
config.debugEnabled = enabled
return startConfig
proc withLogFile*(
self: CodexConfigs,
idx: int): CodexConfigs {.raises: [CodexConfigError].} =
self.checkBounds idx
var startConfig = self
startConfig.configs[idx].addCliOption("--log-file", "<updated_in_test>")
return startConfig
proc withLogFile*(
self: CodexConfigs): CodexConfigs {.raises: [CodexConfigError].} =
## typically called from test, sets config such that a log file should be
## created
var startConfig = self
for config in startConfig.configs.mitems:
config.addCliOption("--log-file", "<updated_in_test>")
return startConfig
proc withLogFile*(
self: var CodexConfig,
logFile: string) {.raises: [CodexConfigError].} = #: CodexConfigs =
## typically called internally from the test suite, sets a log file path to
## be created during the test run, for a specified node in the group
# var config = self
self.addCliOption("--log-file", logFile)
# return startConfig
proc withLogLevel*(
self: CodexConfig,
level: LogLevel | string): CodexConfig {.raises: [CodexConfigError].} =
var config = self
config.addCliOption("--log-level", $level)
return config
proc withLogLevel*(
self: CodexConfigs,
idx: int,
level: LogLevel | string): CodexConfigs {.raises: [CodexConfigError].} =
self.checkBounds idx
var startConfig = self
startConfig.configs[idx].addCliOption("--log-level", $level)
return startConfig
proc withLogLevel*(
self: CodexConfigs,
level: LogLevel | string): CodexConfigs {.raises: [CodexConfigError].} =
var startConfig = self
for config in startConfig.configs.mitems:
config.addCliOption("--log-level", $level)
return startConfig
proc withSimulateProofFailures*(
self: CodexConfigs,
idx: int,
failEveryNProofs: int
): CodexConfigs {.raises: [CodexConfigError].} =
self.checkBounds idx
var startConfig = self
startConfig.configs[idx].addCliOption(
StartUpCmd.persistence, "--simulate-proof-failures", $failEveryNProofs)
return startConfig
proc withSimulateProofFailures*(
self: CodexConfigs,
failEveryNProofs: int): CodexConfigs {.raises: [CodexConfigError].} =
var startConfig = self
for config in startConfig.configs.mitems:
config.addCliOption(
StartUpCmd.persistence, "--simulate-proof-failures", $failEveryNProofs)
return startConfig
proc logLevelWithTopics(
config: CodexConfig,
topics: varargs[string]): string {.raises: [CodexConfigError].} =
convertError:
var logLevel = LogLevel.INFO
let built = config.buildConfig("Invalid codex config cli params")
logLevel = parseEnum[LogLevel](built.logLevel.toUpperAscii)
let level = $logLevel & ";TRACE: " & topics.join(",")
return level
proc withLogTopics*( proc withLogTopics*(
config: CodexConfig, self: CodexConfigs,
topics: varargs[string] idx: int,
): CodexConfig = topics: varargs[string]): CodexConfigs {.raises: [CodexConfigError].} =
var startConfig = config self.checkBounds idx
startConfig.logTopics = startConfig.logTopics.concat(@topics)
convertError:
let config = self.configs[idx]
let level = config.logLevelWithTopics(topics)
var startConfig = self
return startConfig.withLogLevel(idx, level)
proc withLogTopics*(
self: CodexConfigs,
topics: varargs[string]
): CodexConfigs {.raises: [CodexConfigError].} =
var startConfig = self
for config in startConfig.configs.mitems:
let level = config.logLevelWithTopics(topics)
config = config.withLogLevel(level)
return startConfig return startConfig
proc withStorageQuota*( proc withStorageQuota*(
config: CodexConfig, self: CodexConfigs,
quota: NBytes idx: int,
): CodexConfig = quota: NBytes): CodexConfigs {.raises: [CodexConfigError].} =
var startConfig = config self.checkBounds idx
startConfig.cliOptions.add(
CliOption(key: "--storage-quota", value: $quota) var startConfig = self
) startConfig.configs[idx].addCliOption("--storage-quota", $quota)
return startConfig
proc withStorageQuota*(
self: CodexConfigs,
quota: NBytes): CodexConfigs {.raises: [CodexConfigError].} =
var startConfig = self
for config in startConfig.configs.mitems:
config.addCliOption("--storage-quota", $quota)
return startConfig return startConfig

View File

@ -40,17 +40,17 @@ method onOutputLineCaptured(node: CodexProcess, line: string) =
discard discard
proc dataDir(node: CodexProcess): string = proc dataDir(node: CodexProcess): string =
let config = CodexConf.load(cmdLine = node.arguments) let config = CodexConf.load(cmdLine = node.arguments, quitOnFailure = false)
return config.dataDir.string return config.dataDir.string
proc ethAccount*(node: CodexProcess): Address = proc ethAccount*(node: CodexProcess): Address =
let config = CodexConf.load(cmdLine = node.arguments) let config = CodexConf.load(cmdLine = node.arguments, quitOnFailure = false)
without ethAccount =? config.ethAccount: without ethAccount =? config.ethAccount:
raiseAssert "eth account not set" raiseAssert "eth account not set"
return Address(ethAccount) return Address(ethAccount)
proc apiUrl*(node: CodexProcess): string = proc apiUrl*(node: CodexProcess): string =
let config = CodexConf.load(cmdLine = node.arguments) let config = CodexConf.load(cmdLine = node.arguments, quitOnFailure = false)
return "http://" & config.apiBindAddress & ":" & $config.apiPort & "/api/codex/v1" return "http://" & config.apiBindAddress & ":" & $config.apiPort & "/api/codex/v1"
proc client*(node: CodexProcess): CodexClient = proc client*(node: CodexProcess): CodexClient =

View File

@ -1,6 +1,15 @@
import ./nodeconfig
export nodeconfig
type type
HardhatConfig* = ref object of NodeConfig HardhatConfig* = object
logFile*: bool
debugEnabled*: bool
proc debug*(self: HardhatConfig, enabled = true): HardhatConfig =
## output log in stdout
var config = self
config.debugEnabled = enabled
return config
proc withLogFile*(self: HardhatConfig, logToFile: bool = true): HardhatConfig =
var config = self
config.logFile = logToFile
return config

View File

@ -3,13 +3,16 @@ import std/sequtils
import std/strutils import std/strutils
import std/sugar import std/sugar
import std/times import std/times
import pkg/codex/conf
import pkg/codex/logutils import pkg/codex/logutils
import pkg/chronos/transports/stream import pkg/chronos/transports/stream
import pkg/ethers import pkg/ethers
import ./hardhatprocess import pkg/questionable
import ./codexconfig
import ./codexprocess import ./codexprocess
import ./hardhatconfig import ./hardhatconfig
import ./codexconfig import ./hardhatprocess
import ./nodeconfigs
import ../asynctest import ../asynctest
import ../checktest import ../checktest
@ -24,16 +27,15 @@ type
RunningNode* = ref object RunningNode* = ref object
role*: Role role*: Role
node*: NodeProcess node*: NodeProcess
NodeConfigs* = object
clients*: CodexConfig
providers*: CodexConfig
validators*: CodexConfig
hardhat*: HardhatConfig
Role* {.pure.} = enum Role* {.pure.} = enum
Client, Client,
Provider, Provider,
Validator, Validator,
Hardhat Hardhat
MultiNodeSuiteError = object of CatchableError
proc raiseMultiNodeSuiteError(msg: string) =
raise newException(MultiNodeSuiteError, msg)
proc nextFreePort(startPort: int): Future[int] {.async.} = proc nextFreePort(startPort: int): Future[int] {.async.} =
@ -79,6 +81,7 @@ template multinodesuite*(name: string, body: untyped) =
var sanitized = pathSegment var sanitized = pathSegment
for invalid in invalidFilenameChars.items: for invalid in invalidFilenameChars.items:
sanitized = sanitized.replace(invalid, '_') sanitized = sanitized.replace(invalid, '_')
.replace(' ', '_')
sanitized sanitized
proc getLogFile(role: Role, index: ?int): string = proc getLogFile(role: Role, index: ?int): string =
@ -87,7 +90,7 @@ template multinodesuite*(name: string, body: untyped) =
var logDir = currentSourcePath.parentDir() / var logDir = currentSourcePath.parentDir() /
"logs" / "logs" /
sanitize($starttime & " " & name) / sanitize($starttime & "__" & name) /
sanitize($currentTestName) sanitize($currentTestName)
createDir(logDir) createDir(logDir)
@ -110,53 +113,56 @@ template multinodesuite*(name: string, body: untyped) =
args.add "--log-file=" & updatedLogFile args.add "--log-file=" & updatedLogFile
let node = await HardhatProcess.startNode(args, config.debugEnabled, "hardhat") let node = await HardhatProcess.startNode(args, config.debugEnabled, "hardhat")
await node.waitUntilStarted() try:
await node.waitUntilStarted()
except NodeProcessError as e:
raiseMultiNodeSuiteError "hardhat node not started: " & e.msg
trace "hardhat node started" trace "hardhat node started"
return node return node
proc newCodexProcess(roleIdx: int, proc newCodexProcess(roleIdx: int,
config: CodexConfig, conf: CodexConfig,
role: Role role: Role
): Future[NodeProcess] {.async.} = ): Future[NodeProcess] {.async.} =
let nodeIdx = running.len let nodeIdx = running.len
var conf = config var config = conf
if nodeIdx > accounts.len - 1: if nodeIdx > accounts.len - 1:
raiseAssert("Cannot start node at nodeIdx " & $nodeIdx & raiseMultiNodeSuiteError "Cannot start node at nodeIdx " & $nodeIdx &
", not enough eth accounts.") ", not enough eth accounts."
let datadir = getTempDir() / "Codex" / let datadir = getTempDir() / "Codex" /
sanitize($starttime) / sanitize($starttime) /
sanitize($role & "_" & $roleIdx) sanitize($role & "_" & $roleIdx)
if conf.logFile: try:
let updatedLogFile = getLogFile(role, some roleIdx) if config.logFile.isSome:
conf.cliOptions.add CliOption(key: "--log-file", value: updatedLogFile) let updatedLogFile = getLogFile(role, some roleIdx)
config.withLogFile(updatedLogFile)
let logLevel = conf.logLevel |? LogLevel.INFO config.addCliOption("--api-port", $ await nextFreePort(8080 + nodeIdx))
if conf.logTopics.len > 0: config.addCliOption("--data-dir", datadir)
conf.cliOptions.add CliOption( config.addCliOption("--nat", "127.0.0.1")
key: "--log-level", config.addCliOption("--listen-addrs", "/ip4/127.0.0.1/tcp/0")
value: $logLevel & ";TRACE: " & conf.logTopics.join(",") config.addCliOption("--disc-ip", "127.0.0.1")
) config.addCliOption("--disc-port", $ await nextFreePort(8090 + nodeIdx))
else:
conf.cliOptions.add CliOption(key: "--log-level", value: $logLevel)
var args = conf.cliOptions.map(o => $o) except CodexConfigError as e:
.concat(@[ raiseMultiNodeSuiteError "invalid cli option, error: " & e.msg
"--api-port=" & $ await nextFreePort(8080 + nodeIdx),
"--data-dir=" & datadir,
"--nat=127.0.0.1",
"--listen-addrs=/ip4/127.0.0.1/tcp/0",
"--disc-ip=127.0.0.1",
"--disc-port=" & $ await nextFreePort(8090 + nodeIdx),
"--eth-account=" & $accounts[nodeIdx]])
let node = await CodexProcess.startNode(args, conf.debugEnabled, $role & $roleIdx) let node = await CodexProcess.startNode(
await node.waitUntilStarted() config.cliArgs,
trace "node started", nodeName = $role & $roleIdx config.debugEnabled,
$role & $roleIdx
)
try:
await node.waitUntilStarted()
trace "node started", nodeName = $role & $roleIdx
except NodeProcessError as e:
raiseMultiNodeSuiteError "node not started, error: " & e.msg
return node return node
@ -184,85 +190,36 @@ template multinodesuite*(name: string, body: untyped) =
if r.role == Role.Validator: if r.role == Role.Validator:
CodexProcess(r.node) CodexProcess(r.node)
proc startHardhatNode(): Future[NodeProcess] {.async.} = proc startHardhatNode(config: HardhatConfig): Future[NodeProcess] {.async.} =
var config = nodeConfigs.hardhat
return await newHardhatProcess(config, Role.Hardhat) return await newHardhatProcess(config, Role.Hardhat)
proc startClientNode(): Future[NodeProcess] {.async.} = proc startClientNode(conf: CodexConfig): Future[NodeProcess] {.async.} =
let clientIdx = clients().len let clientIdx = clients().len
var config = nodeConfigs.clients var config = conf
config.cliOptions.add CliOption(key: "--persistence") config.addCliOption(StartUpCmd.persistence, "--eth-account", $accounts[running.len])
return await newCodexProcess(clientIdx, config, Role.Client) return await newCodexProcess(clientIdx, config, Role.Client)
proc startProviderNode(): Future[NodeProcess] {.async.} = proc startProviderNode(conf: CodexConfig): Future[NodeProcess] {.async.} =
let providerIdx = providers().len let providerIdx = providers().len
var config = nodeConfigs.providers var config = conf
config.cliOptions.add CliOption(key: "--bootstrap-node", value: bootstrap) config.addCliOption("--bootstrap-node", bootstrap)
config.cliOptions.add CliOption(key: "--persistence") config.addCliOption(StartUpCmd.persistence, "--eth-account", $accounts[running.len])
config.addCliOption(PersistenceCmd.prover, "--circom-r1cs", "tests/circuits/fixtures/proof_main.r1cs")
# filter out provider options by provided index config.addCliOption(PersistenceCmd.prover, "--circom-wasm", "tests/circuits/fixtures/proof_main.wasm")
config.cliOptions = config.cliOptions.filter( config.addCliOption(PersistenceCmd.prover, "--circom-zkey", "tests/circuits/fixtures/proof_main.zkey")
o => (let idx = o.nodeIdx |? providerIdx; idx == providerIdx)
)
return await newCodexProcess(providerIdx, config, Role.Provider) return await newCodexProcess(providerIdx, config, Role.Provider)
proc startValidatorNode(): Future[NodeProcess] {.async.} = proc startValidatorNode(conf: CodexConfig): Future[NodeProcess] {.async.} =
let validatorIdx = validators().len let validatorIdx = validators().len
var config = nodeConfigs.validators var config = conf
config.cliOptions.add CliOption(key: "--bootstrap-node", value: bootstrap) config.addCliOption("--bootstrap-node", bootstrap)
config.cliOptions.add CliOption(key: "--validator") config.addCliOption(StartUpCmd.persistence, "--eth-account", $accounts[running.len])
config.addCliOption(StartUpCmd.persistence, "--validator")
return await newCodexProcess(validatorIdx, config, Role.Validator) return await newCodexProcess(validatorIdx, config, Role.Validator)
setup: proc teardownImpl() {.async.} =
if not nodeConfigs.hardhat.isNil:
let node = await startHardhatNode()
running.add RunningNode(role: Role.Hardhat, node: node)
try:
# Workaround for https://github.com/NomicFoundation/hardhat/issues/2053
# Do not use websockets, but use http and polling to stop subscriptions
# from being removed after 5 minutes
ethProvider = JsonRpcProvider.new("http://localhost:8545")
# if hardhat was NOT started by the test, take a snapshot so it can be
# reverted in the test teardown
if nodeConfigs.hardhat.isNil:
snapshot = await send(ethProvider, "evm_snapshot")
# ensure that we have a recent block with a fresh timestamp
discard await send(ethProvider, "evm_mine")
accounts = await ethProvider.listAccounts()
except CatchableError as e:
fatal "failed to connect to hardhat", error = e.msg
raiseAssert "Hardhat not running. Run hardhat manually before executing tests, or include a HardhatConfig in the test setup."
if not nodeConfigs.clients.isNil:
for i in 0..<nodeConfigs.clients.numNodes:
let node = await startClientNode()
running.add RunningNode(
role: Role.Client,
node: node
)
if i == 0:
bootstrap = CodexProcess(node).client.info()["spr"].getStr()
if not nodeConfigs.providers.isNil:
for i in 0..<nodeConfigs.providers.numNodes:
let node = await startProviderNode()
running.add RunningNode(
role: Role.Provider,
node: node
)
if not nodeConfigs.validators.isNil:
for i in 0..<nodeConfigs.validators.numNodes:
let node = await startValidatorNode()
running.add RunningNode(
role: Role.Validator,
node: node
)
teardown:
for nodes in @[validators(), clients(), providers()]: for nodes in @[validators(), clients(), providers()]:
for node in nodes: for node in nodes:
await node.stop() # also stops rest client await node.stop() # also stops rest client
@ -278,4 +235,77 @@ template multinodesuite*(name: string, body: untyped) =
running = @[] running = @[]
template failAndTeardownOnError(message: string, tryBody: untyped) =
try:
tryBody
except CatchableError as er:
fatal message, error=er.msg
echo "[FATAL] ", message, ": ", er.msg
await teardownImpl()
when declared(teardownAllIMPL):
teardownAllIMPL()
fail()
quit(1)
setup:
if var conf =? nodeConfigs.hardhat:
try:
let node = await startHardhatNode(conf)
running.add RunningNode(role: Role.Hardhat, node: node)
except CatchableError as e:
echo "failed to start hardhat node"
fail()
quit(1)
try:
# Workaround for https://github.com/NomicFoundation/hardhat/issues/2053
# Do not use websockets, but use http and polling to stop subscriptions
# from being removed after 5 minutes
ethProvider = JsonRpcProvider.new("http://localhost:8545")
# if hardhat was NOT started by the test, take a snapshot so it can be
# reverted in the test teardown
if nodeConfigs.hardhat.isNone:
snapshot = await send(ethProvider, "evm_snapshot")
# ensure that we have a recent block with a fresh timestamp
discard await send(ethProvider, "evm_mine")
accounts = await ethProvider.listAccounts()
except CatchableError as e:
echo "Hardhat not running. Run hardhat manually " &
"before executing tests, or include a " &
"HardhatConfig in the test setup."
fail()
quit(1)
if var clients =? nodeConfigs.clients:
failAndTeardownOnError "failed to start client nodes":
for config in clients.configs:
let node = await startClientNode(config)
running.add RunningNode(
role: Role.Client,
node: node
)
if clients().len == 1:
bootstrap = CodexProcess(node).client.info()["spr"].getStr()
if var providers =? nodeConfigs.providers:
failAndTeardownOnError "failed to start provider nodes":
for config in providers.configs.mitems:
let node = await startProviderNode(config)
running.add RunningNode(
role: Role.Provider,
node: node
)
if var validators =? nodeConfigs.validators:
failAndTeardownOnError "failed to start validator nodes":
for config in validators.configs.mitems:
let node = await startValidatorNode(config)
running.add RunningNode(
role: Role.Validator,
node: node
)
teardown:
await teardownImpl()
body body

View File

@ -0,0 +1,11 @@
import pkg/questionable
import ./codexconfig
import ./hardhatconfig
type
NodeConfigs* = object
clients*: ?CodexConfigs
providers*: ?CodexConfigs
validators*: ?CodexConfigs
hardhat*: ?HardhatConfig

View File

@ -23,6 +23,7 @@ type
debug: bool debug: bool
trackedFutures*: TrackedFutures trackedFutures*: TrackedFutures
name*: string name*: string
NodeProcessError* = object of CatchableError
method workingDir(node: NodeProcess): string {.base.} = method workingDir(node: NodeProcess): string {.base.} =
raiseAssert "not implemented" raiseAssert "not implemented"
@ -54,6 +55,8 @@ method start*(node: NodeProcess) {.base, async.} =
processOptions = poptions processOptions = poptions
try: try:
if node.debug:
echo "starting codex node with args: ", node.arguments.join(" ")
node.process = await startProcess( node.process = await startProcess(
node.executable, node.executable,
node.workingDir, node.workingDir,
@ -149,12 +152,15 @@ proc waitUntilStarted*(node: NodeProcess) {.async.} =
let started = newFuture[void]() let started = newFuture[void]()
try: try:
discard node.captureOutput(node.startedOutput, started).track(node) discard node.captureOutput(node.startedOutput, started).track(node)
await started.wait(5.seconds) await started.wait(35.seconds) # allow enough time for proof generation
except AsyncTimeoutError as e: except AsyncTimeoutError:
# attempt graceful shutdown in case node was partially started, prevent # attempt graceful shutdown in case node was partially started, prevent
# zombies # zombies
await node.stop() await node.stop()
raiseAssert "node did not output '" & node.startedOutput & "'" # raise error here so that all nodes (not just this one) can be
# shutdown gracefully
raise newException(NodeProcessError, "node did not output '" &
node.startedOutput & "'")
proc restart*(node: NodeProcess) {.async.} = proc restart*(node: NodeProcess) {.async.} =
await node.stop() await node.stop()

View File

@ -58,11 +58,11 @@ proc startNode*(args: openArray[string], debug: string | bool = false): NodeProc
node node
proc dataDir(node: NodeProcess): string = proc dataDir(node: NodeProcess): string =
let config = CodexConf.load(cmdLine = node.arguments) let config = CodexConf.load(cmdLine = node.arguments, quitOnFailure = false)
config.dataDir.string config.dataDir.string
proc apiUrl(node: NodeProcess): string = proc apiUrl(node: NodeProcess): string =
let config = CodexConf.load(cmdLine = node.arguments) let config = CodexConf.load(cmdLine = node.arguments, quitOnFailure = false)
"http://" & config.apiBindAddress & ":" & $config.apiPort & "/api/codex/v1" "http://" & config.apiBindAddress & ":" & $config.apiPort & "/api/codex/v1"
proc client*(node: NodeProcess): CodexClient = proc client*(node: NodeProcess): CodexClient =

View File

@ -1,40 +1,50 @@
import std/unittest import std/unittest
import std/tempfiles import std/tempfiles
import codex/conf
import codex/utils/fileutils import codex/utils/fileutils
import ./nodes import ./nodes
suite "Command line interface": suite "Command line interface":
let account = "4242424242424242424242424242424242424242"
let key = "4242424242424242424242424242424242424242424242424242424242424242" let key = "4242424242424242424242424242424242424242424242424242424242424242"
test "complains when persistence is enabled without ethereum account": test "complains when persistence is enabled without ethereum account":
let node = startNode(@["--persistence"]) let node = startNode(@[
"persistence"
])
node.waitUntilOutput("Persistence enabled, but no Ethereum account was set") node.waitUntilOutput("Persistence enabled, but no Ethereum account was set")
node.stop() node.stop()
test "complains when validator is enabled without ethereum account":
let node = startNode(@["--validator"])
node.waitUntilOutput("Validator enabled, but no Ethereum account was set")
node.stop()
test "complains when ethereum account is set when not needed":
let node = startNode(@["--eth-account=" & account])
node.waitUntilOutput("Ethereum account was set, but neither persistence nor validator is enabled")
node.stop()
test "complains when ethereum private key is set when not needed":
let keyFile = genTempPath("", "")
discard secureWriteFile(keyFile, key)
let node = startNode(@["--eth-private-key=" & keyFile])
node.waitUntilOutput("Ethereum account was set, but neither persistence nor validator is enabled")
node.stop()
discard removeFile(keyFile)
test "complains when ethereum private key file has wrong permissions": test "complains when ethereum private key file has wrong permissions":
let unsafeKeyFile = genTempPath("", "") let unsafeKeyFile = genTempPath("", "")
discard unsafeKeyFile.writeFile(key, 0o666) discard unsafeKeyFile.writeFile(key, 0o666)
let node = startNode(@["--persistence", "--eth-private-key=" & unsafeKeyFile]) let node = startNode(@[
"persistence",
"--eth-private-key=" & unsafeKeyFile])
node.waitUntilOutput("Ethereum private key file does not have safe file permissions") node.waitUntilOutput("Ethereum private key file does not have safe file permissions")
node.stop() node.stop()
discard removeFile(unsafeKeyFile) discard removeFile(unsafeKeyFile)
test "complains when persistence is enabled without accessible r1cs file":
let node = startNode(@["persistence", "prover"])
node.waitUntilOutput("r1cs file not readable, doesn't exist or wrong extension (.r1cs)")
node.stop()
test "complains when persistence is enabled without accessible wasm file":
let node = startNode(@[
"persistence",
"prover",
"--circom-r1cs=tests/circuits/fixtures/proof_main.r1cs"
])
node.waitUntilOutput("wasm file not readable, doesn't exist or wrong extension (.wasm)")
node.stop()
test "complains when persistence is enabled without accessible zkey file":
let node = startNode(@[
"persistence",
"prover",
"--circom-r1cs=tests/circuits/fixtures/proof_main.r1cs",
"--circom-wasm=tests/circuits/fixtures/proof_main.wasm"
])
node.waitUntilOutput("zkey file not readable, doesn't exist or wrong extension (.zkey)")
node.stop()

View File

@ -1,7 +1,7 @@
import std/math
import pkg/stew/byteutils import pkg/stew/byteutils
import pkg/codex/units import pkg/codex/units
import ./marketplacesuite import ./marketplacesuite
import ./nodeconfigs
import ../examples import ../examples
marketplacesuite "Marketplace payouts": marketplacesuite "Marketplace payouts":
@ -9,21 +9,21 @@ marketplacesuite "Marketplace payouts":
test "expired request partially pays out for stored time", test "expired request partially pays out for stored time",
NodeConfigs( NodeConfigs(
# Uncomment to start Hardhat automatically, typically so logs can be inspected locally # Uncomment to start Hardhat automatically, typically so logs can be inspected locally
# hardhat: HardhatConfig().withLogFile() hardhat: HardhatConfig.none,
clients: clients:
CodexConfig() CodexConfigs.init(nodes=1)
.nodes(1),
# .debug() # uncomment to enable console log output.debug() # .debug() # uncomment to enable console log output.debug()
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log # .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
# .withLogTopics("node", "erasure"), # .withLogTopics("node", "erasure")
.some,
providers: providers:
CodexConfig() CodexConfigs.init(nodes=1)
.nodes(1)
# .debug() # uncomment to enable console log output # .debug() # uncomment to enable console log output
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log # .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
# .withLogTopics("node", "marketplace", "sales", "reservations", "node", "proving", "clock"), # .withLogTopics("node", "marketplace", "sales", "reservations", "node", "proving", "clock")
.some,
): ):
let reward = 400.u256 let reward = 400.u256
let duration = 10.periods let duration = 10.periods

View File

@ -1,4 +1,3 @@
import std/math
from std/times import inMilliseconds from std/times import inMilliseconds
import pkg/codex/logutils import pkg/codex/logutils
import pkg/stew/byteutils import pkg/stew/byteutils
@ -7,8 +6,9 @@ import ../contracts/deployment
import ../codex/helpers import ../codex/helpers
import ../examples import ../examples
import ./marketplacesuite import ./marketplacesuite
import ./nodeconfigs
export chronicles export logutils
logScope: logScope:
topics = "integration test proofs" topics = "integration test proofs"
@ -18,21 +18,22 @@ marketplacesuite "Hosts submit regular proofs":
test "hosts submit periodic proofs for slots they fill", NodeConfigs( test "hosts submit periodic proofs for slots they fill", NodeConfigs(
# Uncomment to start Hardhat automatically, typically so logs can be inspected locally # Uncomment to start Hardhat automatically, typically so logs can be inspected locally
# hardhat: HardhatConfig().withLogFile(), hardhat:
HardhatConfig.none,
clients: clients:
CodexConfig() CodexConfigs.init(nodes=1)
.nodes(1),
# .debug() # uncomment to enable console log output # .debug() # uncomment to enable console log output
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log # .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
# .withLogTopics("node"), # .withLogTopics("node")
.some,
providers: providers:
CodexConfig() CodexConfigs.init(nodes=1)
.nodes(1)
# .debug() # uncomment to enable console log output # .debug() # uncomment to enable console log output
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log # .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
# .withLogTopics("marketplace", "sales", "reservations", "node"), # .withLogTopics("marketplace", "sales", "reservations", "node")
.some,
): ):
let client0 = clients()[0].client let client0 = clients()[0].client
let expiry = 5.periods let expiry = 5.periods
@ -72,29 +73,30 @@ marketplacesuite "Simulate invalid proofs":
test "slot is freed after too many invalid proofs submitted", NodeConfigs( test "slot is freed after too many invalid proofs submitted", NodeConfigs(
# Uncomment to start Hardhat automatically, typically so logs can be inspected locally # Uncomment to start Hardhat automatically, typically so logs can be inspected locally
# hardhat: HardhatConfig().withLogFile(), hardhat:
HardhatConfig.none,
clients: clients:
CodexConfig() CodexConfigs.init(nodes=1)
.nodes(1),
# .debug() # uncomment to enable console log output # .debug() # uncomment to enable console log output
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log # .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
# .withLogTopics("node", "clock"), # .withLogTopics("node", "clock")
.some,
providers: providers:
CodexConfig() CodexConfigs.init(nodes=1)
.nodes(1) .withSimulateProofFailures(idx=0, failEveryNProofs=1)
.simulateProofFailuresFor(providerIdx=0, failEveryNProofs=1),
# .debug() # uncomment to enable console log output # .debug() # uncomment to enable console log output
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log # .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
# .withLogTopics("marketplace", "sales", "reservations", "node", "clock"), # .withLogTopics("marketplace", "sales", "reservations", "node", "clock")
.some,
validators: validators:
CodexConfig() CodexConfigs.init(nodes=1)
.nodes(1)
# .debug() # uncomment to enable console log output # .debug() # uncomment to enable console log output
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log # .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
# .withLogTopics("validator", "onchain", "ethers", "clock") # .withLogTopics("validator", "onchain", "ethers", "clock")
.some
): ):
let client0 = clients()[0].client let client0 = clients()[0].client
let expiry = 5.periods let expiry = 5.periods
@ -130,29 +132,29 @@ marketplacesuite "Simulate invalid proofs":
test "slot is not freed when not enough invalid proofs submitted", NodeConfigs( test "slot is not freed when not enough invalid proofs submitted", NodeConfigs(
# Uncomment to start Hardhat automatically, typically so logs can be inspected locally # Uncomment to start Hardhat automatically, typically so logs can be inspected locally
# hardhat: HardhatConfig().withLogFile(), hardhat: HardhatConfig.none,
clients: clients:
CodexConfig() CodexConfigs.init(nodes=1)
.nodes(1),
# .debug() # uncomment to enable console log output # .debug() # uncomment to enable console log output
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log # .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
# .withLogTopics("marketplace", "sales", "reservations", "node", "clock"), # .withLogTopics("marketplace", "sales", "reservations", "node", "clock")
.some,
providers: providers:
CodexConfig() CodexConfigs.init(nodes=1)
.nodes(1) .withSimulateProofFailures(idx=0, failEveryNProofs=1)
.simulateProofFailuresFor(providerIdx=0, failEveryNProofs=1),
# .debug() # uncomment to enable console log output # .debug() # uncomment to enable console log output
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log # .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
# .withLogTopics("marketplace", "sales", "reservations", "node"), # .withLogTopics("marketplace", "sales", "reservations", "node")
.some,
validators: validators:
CodexConfig() CodexConfigs.init(nodes=1)
.nodes(1)
# .debug() # .debug()
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log # .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
# .withLogTopics("validator", "onchain", "ethers", "clock") # .withLogTopics("validator", "onchain", "ethers", "clock")
.some
): ):
let client0 = clients()[0].client let client0 = clients()[0].client
let expiry = 5.periods let expiry = 5.periods

View File

@ -38,7 +38,11 @@ template twonodessuite*(name: string, debug1, debug2: string, body) =
"--disc-ip=127.0.0.1", "--disc-ip=127.0.0.1",
"--disc-port=8090", "--disc-port=8090",
"--listen-addrs=/ip4/127.0.0.1/tcp/0", "--listen-addrs=/ip4/127.0.0.1/tcp/0",
"--persistence", "persistence",
"prover",
"--circom-r1cs=vendor/codex-contracts-eth/verifier/networks/hardhat/proof_main.r1cs",
"--circom-wasm=vendor/codex-contracts-eth/verifier/networks/hardhat/proof_main.wasm",
"--circom-zkey=vendor/codex-contracts-eth/verifier/networks/hardhat/proof_main.zkey",
"--eth-account=" & $account1 "--eth-account=" & $account1
] ]
@ -58,7 +62,11 @@ template twonodessuite*(name: string, debug1, debug2: string, body) =
"--disc-port=8091", "--disc-port=8091",
"--listen-addrs=/ip4/127.0.0.1/tcp/0", "--listen-addrs=/ip4/127.0.0.1/tcp/0",
"--bootstrap-node=" & bootstrap, "--bootstrap-node=" & bootstrap,
"--persistence", "persistence",
"prover",
"--circom-r1cs=vendor/codex-contracts-eth/verifier/networks/hardhat/proof_main.r1cs",
"--circom-wasm=vendor/codex-contracts-eth/verifier/networks/hardhat/proof_main.wasm",
"--circom-zkey=vendor/codex-contracts-eth/verifier/networks/hardhat/proof_main.zkey",
"--eth-account=" & $account2 "--eth-account=" & $account2
] ]