Prover CLI updates (#735)
* rework cli to accept circuit params * check circom files extension * adding new required cli changes * don't use ufcs * persistence is a command now * use `nimOldCaseObjects` switch for nim confutils compat * misc * Update cli integration tests * Fix: simulateProofFailures option is not for validator * moving circom params under `prover` command * update tests * Use circuit assets from codex-contract-eth in tests * Add "prover" cli command to tests * use correct stores * make `verifier` a cmd option * update circuit artifacts path * fix cli tests * Update integration tests to use cli commands Integration tests have been updated to use the new cli commands. The api for usage in the integration tests has also changed a bit. Proofs tests have been updated to use 5 nodes and 8 blocks of data. The remaining integration tests also need to be updated. * remove parsedCli from CodexConfig Instead, parse the cli args on the fly when needed * remove unneeded gcsafes * graceful shutdowns Where possible, do not raise assert, as other nodes in the test may already be running. Instead, raise exceptions, catch them in multinodes.nim, and attempt to do a teardown before failing the test. `abortOnError` is set to true so that `fail()` will quit immediately, after teardown has been run. * update testmarketplace to new api, with valid EC params --------- Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com> Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
This commit is contained in:
parent
8589e63d34
commit
293c676f22
161
codex.nim
161
codex.nim
|
@ -23,6 +23,7 @@ import ./codex/codex
|
|||
import ./codex/logutils
|
||||
import ./codex/units
|
||||
import ./codex/utils/keyutils
|
||||
import ./codex/codextypes
|
||||
|
||||
export codex, conf, libp2p, chronos, logutils
|
||||
|
||||
|
@ -54,99 +55,101 @@ when isMainModule:
|
|||
config.setupLogging()
|
||||
config.setupMetrics()
|
||||
|
||||
case config.cmd:
|
||||
of StartUpCommand.noCommand:
|
||||
if config.nat == ValidIpAddress.init(IPv4_any()):
|
||||
error "`--nat` cannot be set to the any (`0.0.0.0`) address"
|
||||
quit QuitFailure
|
||||
|
||||
if config.nat == ValidIpAddress.init(IPv4_any()):
|
||||
error "`--nat` cannot be set to the any (`0.0.0.0`) address"
|
||||
if config.nat == ValidIpAddress.init("127.0.0.1"):
|
||||
warn "`--nat` is set to loopback, your node wont properly announce over the DHT"
|
||||
|
||||
if not(checkAndCreateDataDir((config.dataDir).string)):
|
||||
# We are unable to access/create data folder or data folder's
|
||||
# permissions are insecure.
|
||||
quit QuitFailure
|
||||
|
||||
trace "Data dir initialized", dir = $config.dataDir
|
||||
|
||||
if not(checkAndCreateDataDir((config.dataDir / "repo"))):
|
||||
# We are unable to access/create data folder or data folder's
|
||||
# permissions are insecure.
|
||||
quit QuitFailure
|
||||
|
||||
trace "Repo dir initialized", dir = config.dataDir / "repo"
|
||||
|
||||
var
|
||||
state: CodexStatus
|
||||
shutdown: Future[void]
|
||||
|
||||
let
|
||||
keyPath =
|
||||
if isAbsolute(config.netPrivKeyFile):
|
||||
config.netPrivKeyFile
|
||||
else:
|
||||
config.dataDir / config.netPrivKeyFile
|
||||
|
||||
privateKey = setupKey(keyPath).expect("Should setup private key!")
|
||||
server = try:
|
||||
CodexServer.new(config, privateKey)
|
||||
except Exception as exc:
|
||||
error "Failed to start Codex", msg = exc.msg
|
||||
quit QuitFailure
|
||||
|
||||
if config.nat == ValidIpAddress.init("127.0.0.1"):
|
||||
warn "`--nat` is set to loopback, your node wont properly announce over the DHT"
|
||||
## Ctrl+C handling
|
||||
proc doShutdown() =
|
||||
shutdown = server.stop()
|
||||
state = CodexStatus.Stopping
|
||||
|
||||
if not(checkAndCreateDataDir((config.dataDir).string)):
|
||||
# We are unable to access/create data folder or data folder's
|
||||
# permissions are insecure.
|
||||
quit QuitFailure
|
||||
notice "Stopping Codex"
|
||||
|
||||
trace "Data dir initialized", dir = $config.dataDir
|
||||
proc controlCHandler() {.noconv.} =
|
||||
when defined(windows):
|
||||
# workaround for https://github.com/nim-lang/Nim/issues/4057
|
||||
try:
|
||||
setupForeignThreadGc()
|
||||
except Exception as exc: raiseAssert exc.msg # shouldn't happen
|
||||
notice "Shutting down after having received SIGINT"
|
||||
|
||||
if not(checkAndCreateDataDir((config.dataDir / "repo"))):
|
||||
# We are unable to access/create data folder or data folder's
|
||||
# permissions are insecure.
|
||||
quit QuitFailure
|
||||
doShutdown()
|
||||
|
||||
trace "Repo dir initialized", dir = config.dataDir / "repo"
|
||||
try:
|
||||
setControlCHook(controlCHandler)
|
||||
except Exception as exc: # TODO Exception
|
||||
warn "Cannot set ctrl-c handler", msg = exc.msg
|
||||
|
||||
var
|
||||
state: CodexStatus
|
||||
shutdown: Future[void]
|
||||
|
||||
let
|
||||
keyPath =
|
||||
if isAbsolute(config.netPrivKeyFile):
|
||||
config.netPrivKeyFile
|
||||
else:
|
||||
config.dataDir / config.netPrivKeyFile
|
||||
|
||||
privateKey = setupKey(keyPath).expect("Should setup private key!")
|
||||
server = CodexServer.new(config, privateKey)
|
||||
|
||||
## Ctrl+C handling
|
||||
proc doShutdown() =
|
||||
shutdown = server.stop()
|
||||
state = CodexStatus.Stopping
|
||||
|
||||
notice "Stopping Codex"
|
||||
|
||||
proc controlCHandler() {.noconv.} =
|
||||
when defined(windows):
|
||||
# workaround for https://github.com/nim-lang/Nim/issues/4057
|
||||
try:
|
||||
setupForeignThreadGc()
|
||||
except Exception as exc: raiseAssert exc.msg # shouldn't happen
|
||||
notice "Shutting down after having received SIGINT"
|
||||
# equivalent SIGTERM handler
|
||||
when defined(posix):
|
||||
proc SIGTERMHandler(signal: cint) {.noconv.} =
|
||||
notice "Shutting down after having received SIGTERM"
|
||||
|
||||
doShutdown()
|
||||
|
||||
c_signal(ansi_c.SIGTERM, SIGTERMHandler)
|
||||
|
||||
try:
|
||||
waitFor server.start()
|
||||
except CatchableError as error:
|
||||
error "Codex failed to start", error = error.msg
|
||||
# XXX ideally we'd like to issue a stop instead of quitting cold turkey,
|
||||
# but this would mean we'd have to fix the implementation of all
|
||||
# services so they won't crash if we attempt to stop them before they
|
||||
# had a chance to start (currently you'll get a SISGSEV if you try to).
|
||||
quit QuitFailure
|
||||
|
||||
state = CodexStatus.Running
|
||||
while state == CodexStatus.Running:
|
||||
try:
|
||||
setControlCHook(controlCHandler)
|
||||
except Exception as exc: # TODO Exception
|
||||
warn "Cannot set ctrl-c handler", msg = exc.msg
|
||||
|
||||
# equivalent SIGTERM handler
|
||||
when defined(posix):
|
||||
proc SIGTERMHandler(signal: cint) {.noconv.} =
|
||||
notice "Shutting down after having received SIGTERM"
|
||||
|
||||
doShutdown()
|
||||
|
||||
c_signal(ansi_c.SIGTERM, SIGTERMHandler)
|
||||
|
||||
try:
|
||||
waitFor server.start()
|
||||
except CatchableError as error:
|
||||
error "Codex failed to start", error = error.msg
|
||||
# XXX ideally we'd like to issue a stop instead of quitting cold turkey,
|
||||
# but this would mean we'd have to fix the implementation of all
|
||||
# services so they won't crash if we attempt to stop them before they
|
||||
# had a chance to start (currently you'll get a SISGSEV if you try to).
|
||||
quit QuitFailure
|
||||
|
||||
state = CodexStatus.Running
|
||||
while state == CodexStatus.Running:
|
||||
# poll chronos
|
||||
chronos.poll()
|
||||
|
||||
try:
|
||||
# signal handlers guarantee that the shutdown Future will
|
||||
# be assigned before state switches to Stopping
|
||||
waitFor shutdown
|
||||
except CatchableError as error:
|
||||
error "Codex didn't shutdown correctly", error = error.msg
|
||||
except Exception as exc:
|
||||
error "Unhandled exception in async proc, aborting", msg = exc.msg
|
||||
quit QuitFailure
|
||||
|
||||
notice "Exited codex"
|
||||
try:
|
||||
# signal handlers guarantee that the shutdown Future will
|
||||
# be assigned before state switches to Stopping
|
||||
waitFor shutdown
|
||||
except CatchableError as error:
|
||||
error "Codex didn't shutdown correctly", error = error.msg
|
||||
quit QuitFailure
|
||||
|
||||
of StartUpCommand.initNode:
|
||||
discard
|
||||
notice "Exited codex"
|
||||
|
|
161
codex/codex.nim
161
codex/codex.nim
|
@ -22,12 +22,14 @@ import pkg/stew/io2
|
|||
import pkg/stew/shims/net as stewnet
|
||||
import pkg/datastore
|
||||
import pkg/ethers except Rng
|
||||
import pkg/stew/io2
|
||||
|
||||
import ./node
|
||||
import ./conf
|
||||
import ./rng
|
||||
import ./rest/api
|
||||
import ./stores
|
||||
import ./slots
|
||||
import ./blockexchange
|
||||
import ./utils/fileutils
|
||||
import ./erasure
|
||||
|
@ -72,78 +74,71 @@ proc bootstrapInteractions(
|
|||
config = s.config
|
||||
repo = s.repoStore
|
||||
|
||||
|
||||
if not config.persistence and not config.validator:
|
||||
if config.ethAccount.isSome or config.ethPrivateKey.isSome:
|
||||
warn "Ethereum account was set, but neither persistence nor validator is enabled"
|
||||
return
|
||||
|
||||
if not config.ethAccount.isSome and not config.ethPrivateKey.isSome:
|
||||
if config.persistence:
|
||||
error "Persistence enabled, but no Ethereum account was set"
|
||||
if config.validator:
|
||||
error "Validator enabled, but no Ethereum account was set"
|
||||
quit QuitFailure
|
||||
|
||||
let provider = JsonRpcProvider.new(config.ethProvider)
|
||||
await waitForSync(provider)
|
||||
var signer: Signer
|
||||
if account =? config.ethAccount:
|
||||
signer = provider.getSigner(account)
|
||||
elif keyFile =? config.ethPrivateKey:
|
||||
without isSecure =? checkSecureFile(keyFile):
|
||||
error "Could not check file permissions: does Ethereum private key file exist?"
|
||||
quit QuitFailure
|
||||
if not isSecure:
|
||||
error "Ethereum private key file does not have safe file permissions"
|
||||
quit QuitFailure
|
||||
without key =? keyFile.readAllChars():
|
||||
error "Unable to read Ethereum private key file"
|
||||
quit QuitFailure
|
||||
without wallet =? EthWallet.new(key.strip(), provider):
|
||||
error "Invalid Ethereum private key in file"
|
||||
quit QuitFailure
|
||||
signer = wallet
|
||||
|
||||
let deploy = Deployment.new(provider, config)
|
||||
without marketplaceAddress =? await deploy.address(Marketplace):
|
||||
error "No Marketplace address was specified or there is no known address for the current network"
|
||||
quit QuitFailure
|
||||
|
||||
let marketplace = Marketplace.new(marketplaceAddress, signer)
|
||||
let market = OnChainMarket.new(marketplace)
|
||||
let clock = OnChainClock.new(provider)
|
||||
|
||||
var client: ?ClientInteractions
|
||||
var host: ?HostInteractions
|
||||
var validator: ?ValidatorInteractions
|
||||
|
||||
if config.validator or config.persistence:
|
||||
s.codexNode.clock = clock
|
||||
else:
|
||||
s.codexNode.clock = SystemClock()
|
||||
|
||||
if config.persistence:
|
||||
# This is used for simulation purposes. Normal nodes won't be compiled with this flag
|
||||
# and hence the proof failure will always be 0.
|
||||
when codex_enable_proof_failures:
|
||||
let proofFailures = config.simulateProofFailures
|
||||
if proofFailures > 0:
|
||||
warn "Enabling proof failure simulation!"
|
||||
if not config.ethAccount.isSome and not config.ethPrivateKey.isSome:
|
||||
error "Persistence enabled, but no Ethereum account was set"
|
||||
quit QuitFailure
|
||||
|
||||
let provider = JsonRpcProvider.new(config.ethProvider)
|
||||
await waitForSync(provider)
|
||||
var signer: Signer
|
||||
if account =? config.ethAccount:
|
||||
signer = provider.getSigner(account)
|
||||
elif keyFile =? config.ethPrivateKey:
|
||||
without isSecure =? checkSecureFile(keyFile):
|
||||
error "Could not check file permissions: does Ethereum private key file exist?"
|
||||
quit QuitFailure
|
||||
if not isSecure:
|
||||
error "Ethereum private key file does not have safe file permissions"
|
||||
quit QuitFailure
|
||||
without key =? keyFile.readAllChars():
|
||||
error "Unable to read Ethereum private key file"
|
||||
quit QuitFailure
|
||||
without wallet =? EthWallet.new(key.strip(), provider):
|
||||
error "Invalid Ethereum private key in file"
|
||||
quit QuitFailure
|
||||
signer = wallet
|
||||
|
||||
let deploy = Deployment.new(provider, config)
|
||||
without marketplaceAddress =? await deploy.address(Marketplace):
|
||||
error "No Marketplace address was specified or there is no known address for the current network"
|
||||
quit QuitFailure
|
||||
|
||||
let marketplace = Marketplace.new(marketplaceAddress, signer)
|
||||
let market = OnChainMarket.new(marketplace)
|
||||
let clock = OnChainClock.new(provider)
|
||||
|
||||
var client: ?ClientInteractions
|
||||
var host: ?HostInteractions
|
||||
var validator: ?ValidatorInteractions
|
||||
|
||||
if config.validator or config.persistence:
|
||||
s.codexNode.clock = clock
|
||||
else:
|
||||
let proofFailures = 0
|
||||
if config.simulateProofFailures > 0:
|
||||
warn "Proof failure simulation is not enabled for this build! Configuration ignored"
|
||||
s.codexNode.clock = SystemClock()
|
||||
|
||||
let purchasing = Purchasing.new(market, clock)
|
||||
let sales = Sales.new(market, clock, repo, proofFailures)
|
||||
client = some ClientInteractions.new(clock, purchasing)
|
||||
host = some HostInteractions.new(clock, sales)
|
||||
if config.validator:
|
||||
let validation = Validation.new(clock, market, config.validatorMaxSlots)
|
||||
validator = some ValidatorInteractions.new(clock, validation)
|
||||
if config.persistence:
|
||||
# This is used for simulation purposes. Normal nodes won't be compiled with this flag
|
||||
# and hence the proof failure will always be 0.
|
||||
when codex_enable_proof_failures:
|
||||
let proofFailures = config.simulateProofFailures
|
||||
if proofFailures > 0:
|
||||
warn "Enabling proof failure simulation!"
|
||||
else:
|
||||
let proofFailures = 0
|
||||
if config.simulateProofFailures > 0:
|
||||
warn "Proof failure simulation is not enabled for this build! Configuration ignored"
|
||||
|
||||
s.codexNode.contracts = (client, host, validator)
|
||||
let purchasing = Purchasing.new(market, clock)
|
||||
let sales = Sales.new(market, clock, repo, proofFailures)
|
||||
client = some ClientInteractions.new(clock, purchasing)
|
||||
host = some HostInteractions.new(clock, sales)
|
||||
|
||||
if config.validator:
|
||||
let validation = Validation.new(clock, market, config.validatorMaxSlots)
|
||||
validator = some ValidatorInteractions.new(clock, validation)
|
||||
|
||||
s.codexNode.contracts = (client, host, validator)
|
||||
|
||||
proc start*(s: CodexServer) {.async.} =
|
||||
trace "Starting codex node", config = $s.config
|
||||
|
@ -265,11 +260,41 @@ proc new*(
|
|||
blockDiscovery = DiscoveryEngine.new(repoStore, peerStore, network, discovery, pendingBlocks)
|
||||
engine = BlockExcEngine.new(repoStore, wallet, network, blockDiscovery, peerStore, pendingBlocks)
|
||||
store = NetworkStore.new(engine, repoStore)
|
||||
prover = if config.prover:
|
||||
if not fileAccessible($config.circomR1cs, {AccessFlags.Read}) and
|
||||
endsWith($config.circomR1cs, ".r1cs"):
|
||||
error "Circom R1CS file not accessible"
|
||||
raise (ref Defect)(
|
||||
msg: "r1cs file not readable, doesn't exist or wrong extension (.r1cs)")
|
||||
|
||||
if not fileAccessible($config.circomWasm, {AccessFlags.Read}) and
|
||||
endsWith($config.circomWasm, ".wasm"):
|
||||
error "Circom wasm file not accessible"
|
||||
raise (ref Defect)(
|
||||
msg: "wasm file not readable, doesn't exist or wrong extension (.wasm)")
|
||||
|
||||
let zkey = if not config.circomNoZkey:
|
||||
if not fileAccessible($config.circomZkey, {AccessFlags.Read}) and
|
||||
endsWith($config.circomZkey, ".zkey"):
|
||||
error "Circom zkey file not accessible"
|
||||
raise (ref Defect)(
|
||||
msg: "zkey file not readable, doesn't exist or wrong extension (.zkey)")
|
||||
|
||||
$config.circomZkey
|
||||
else: ""
|
||||
|
||||
some Prover.new(
|
||||
store,
|
||||
CircomCompat.init($config.circomR1cs, $config.circomWasm, zkey),
|
||||
config.numProofSamples)
|
||||
else:
|
||||
none Prover
|
||||
|
||||
codexNode = CodexNodeRef.new(
|
||||
switch = switch,
|
||||
networkStore = store,
|
||||
engine = engine,
|
||||
prover = prover,
|
||||
discovery = discovery)
|
||||
|
||||
restServer = RestServerRef.new(
|
||||
|
|
378
codex/conf.nim
378
codex/conf.nim
|
@ -7,9 +7,7 @@
|
|||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import pkg/upraises
|
||||
|
||||
push: {.upraises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/os
|
||||
import std/terminal
|
||||
|
@ -33,30 +31,47 @@ import pkg/ethers
|
|||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
|
||||
import ./codextypes
|
||||
import ./discovery
|
||||
import ./logutils
|
||||
import ./stores
|
||||
import ./units
|
||||
import ./utils
|
||||
|
||||
export units
|
||||
export net
|
||||
export units, net, codextypes, logutils
|
||||
|
||||
export
|
||||
DefaultQuotaBytes,
|
||||
DefaultBlockTtl,
|
||||
DefaultBlockMaintenanceInterval,
|
||||
DefaultNumberOfBlocksToMaintainPerInterval
|
||||
|
||||
proc defaultDataDir*(): string =
|
||||
let dataDir = when defined(windows):
|
||||
"AppData" / "Roaming" / "Codex"
|
||||
elif defined(macosx):
|
||||
"Library" / "Application Support" / "Codex"
|
||||
else:
|
||||
".cache" / "codex"
|
||||
|
||||
getHomeDir() / dataDir
|
||||
|
||||
const
|
||||
codex_enable_api_debug_peers* {.booldefine.} = false
|
||||
codex_enable_proof_failures* {.booldefine.} = false
|
||||
codex_use_hardhat* {.booldefine.} = false
|
||||
codex_enable_log_counter* {.booldefine.} = false
|
||||
|
||||
DefaultDataDir* = defaultDataDir()
|
||||
|
||||
type
|
||||
StartUpCommand* {.pure.} = enum
|
||||
noCommand,
|
||||
initNode
|
||||
StartUpCmd* {.pure.} = enum
|
||||
noCmd
|
||||
persistence
|
||||
|
||||
PersistenceCmd* {.pure.} = enum
|
||||
noCmd
|
||||
prover
|
||||
|
||||
LogKind* {.pure.} = enum
|
||||
Auto = "auto"
|
||||
|
@ -106,126 +121,125 @@ type
|
|||
|
||||
dataDir* {.
|
||||
desc: "The directory where codex will store configuration and data"
|
||||
defaultValue: defaultDataDir()
|
||||
defaultValueDesc: ""
|
||||
defaultValue: DefaultDataDir
|
||||
defaultValueDesc: $DefaultDataDir
|
||||
abbr: "d"
|
||||
name: "data-dir" }: OutDir
|
||||
|
||||
listenAddrs* {.
|
||||
desc: "Multi Addresses to listen on"
|
||||
defaultValue: @[
|
||||
MultiAddress.init("/ip4/0.0.0.0/tcp/0")
|
||||
.expect("Should init multiaddress")]
|
||||
defaultValueDesc: "/ip4/0.0.0.0/tcp/0"
|
||||
abbr: "i"
|
||||
name: "listen-addrs" }: seq[MultiAddress]
|
||||
|
||||
# TODO: change this once we integrate nat support
|
||||
nat* {.
|
||||
desc: "IP Addresses to announce behind a NAT"
|
||||
defaultValue: ValidIpAddress.init("127.0.0.1")
|
||||
defaultValueDesc: "127.0.0.1"
|
||||
abbr: "a"
|
||||
name: "nat" }: ValidIpAddress
|
||||
|
||||
discoveryIp* {.
|
||||
desc: "Discovery listen address"
|
||||
defaultValue: ValidIpAddress.init(IPv4_any())
|
||||
defaultValueDesc: "0.0.0.0"
|
||||
abbr: "e"
|
||||
name: "disc-ip" }: ValidIpAddress
|
||||
|
||||
discoveryPort* {.
|
||||
desc: "Discovery (UDP) port"
|
||||
defaultValue: 8090.Port
|
||||
defaultValueDesc: "8090"
|
||||
abbr: "u"
|
||||
name: "disc-port" }: Port
|
||||
|
||||
netPrivKeyFile* {.
|
||||
desc: "Source of network (secp256k1) private key file path or name"
|
||||
defaultValue: "key"
|
||||
name: "net-privkey" }: string
|
||||
|
||||
bootstrapNodes* {.
|
||||
desc: "Specifies one or more bootstrap nodes to use when connecting to the network"
|
||||
abbr: "b"
|
||||
name: "bootstrap-node" }: seq[SignedPeerRecord]
|
||||
|
||||
maxPeers* {.
|
||||
desc: "The maximum number of peers to connect to"
|
||||
defaultValue: 160
|
||||
name: "max-peers" }: int
|
||||
|
||||
agentString* {.
|
||||
defaultValue: "Codex"
|
||||
desc: "Node agent string which is used as identifier in network"
|
||||
name: "agent-string" }: string
|
||||
|
||||
apiBindAddress* {.
|
||||
desc: "The REST API bind address"
|
||||
defaultValue: "127.0.0.1"
|
||||
name: "api-bindaddr"
|
||||
}: string
|
||||
|
||||
apiPort* {.
|
||||
desc: "The REST Api port",
|
||||
defaultValue: 8080.Port
|
||||
defaultValueDesc: "8080"
|
||||
name: "api-port"
|
||||
abbr: "p" }: Port
|
||||
|
||||
repoKind* {.
|
||||
desc: "Backend for main repo store (fs, sqlite)"
|
||||
defaultValueDesc: "fs"
|
||||
defaultValue: repoFS
|
||||
name: "repo-kind" }: RepoKind
|
||||
|
||||
storageQuota* {.
|
||||
desc: "The size of the total storage quota dedicated to the node"
|
||||
defaultValue: DefaultQuotaBytes
|
||||
defaultValueDesc: $DefaultQuotaBytes
|
||||
name: "storage-quota"
|
||||
abbr: "q" }: NBytes
|
||||
|
||||
blockTtl* {.
|
||||
desc: "Default block timeout in seconds - 0 disables the ttl"
|
||||
defaultValue: DefaultBlockTtl
|
||||
defaultValueDesc: $DefaultBlockTtl
|
||||
name: "block-ttl"
|
||||
abbr: "t" }: Duration
|
||||
|
||||
blockMaintenanceInterval* {.
|
||||
desc: "Time interval in seconds - determines frequency of block maintenance cycle: how often blocks are checked for expiration and cleanup"
|
||||
defaultValue: DefaultBlockMaintenanceInterval
|
||||
defaultValueDesc: $DefaultBlockMaintenanceInterval
|
||||
name: "block-mi" }: Duration
|
||||
|
||||
blockMaintenanceNumberOfBlocks* {.
|
||||
desc: "Number of blocks to check every maintenance cycle"
|
||||
defaultValue: DefaultNumberOfBlocksToMaintainPerInterval
|
||||
defaultValueDesc: $DefaultNumberOfBlocksToMaintainPerInterval
|
||||
name: "block-mn" }: int
|
||||
|
||||
cacheSize* {.
|
||||
desc: "The size of the block cache, 0 disables the cache - might help on slow hardrives"
|
||||
defaultValue: 0
|
||||
defaultValueDesc: "0"
|
||||
name: "cache-size"
|
||||
abbr: "c" }: NBytes
|
||||
|
||||
logFile* {.
|
||||
desc: "Logs to file"
|
||||
defaultValue: string.none
|
||||
name: "log-file"
|
||||
hidden
|
||||
.}: Option[string]
|
||||
|
||||
case cmd* {.
|
||||
command
|
||||
defaultValue: noCommand }: StartUpCommand
|
||||
|
||||
of noCommand:
|
||||
|
||||
listenAddrs* {.
|
||||
desc: "Multi Addresses to listen on"
|
||||
defaultValue: @[
|
||||
MultiAddress.init("/ip4/0.0.0.0/tcp/0")
|
||||
.expect("Should init multiaddress")]
|
||||
defaultValueDesc: "/ip4/0.0.0.0/tcp/0"
|
||||
abbr: "i"
|
||||
name: "listen-addrs" }: seq[MultiAddress]
|
||||
|
||||
# TODO: change this once we integrate nat support
|
||||
nat* {.
|
||||
desc: "IP Addresses to announce behind a NAT"
|
||||
defaultValue: ValidIpAddress.init("127.0.0.1")
|
||||
defaultValueDesc: "127.0.0.1"
|
||||
abbr: "a"
|
||||
name: "nat" }: ValidIpAddress
|
||||
|
||||
discoveryIp* {.
|
||||
desc: "Discovery listen address"
|
||||
defaultValue: ValidIpAddress.init(IPv4_any())
|
||||
defaultValueDesc: "0.0.0.0"
|
||||
abbr: "e"
|
||||
name: "disc-ip" }: ValidIpAddress
|
||||
|
||||
discoveryPort* {.
|
||||
desc: "Discovery (UDP) port"
|
||||
defaultValue: 8090.Port
|
||||
defaultValueDesc: "8090"
|
||||
abbr: "u"
|
||||
name: "disc-port" }: Port
|
||||
|
||||
netPrivKeyFile* {.
|
||||
desc: "Source of network (secp256k1) private key file path or name"
|
||||
defaultValue: "key"
|
||||
name: "net-privkey" }: string
|
||||
|
||||
bootstrapNodes* {.
|
||||
desc: "Specifies one or more bootstrap nodes to use when connecting to the network"
|
||||
abbr: "b"
|
||||
name: "bootstrap-node" }: seq[SignedPeerRecord]
|
||||
|
||||
maxPeers* {.
|
||||
desc: "The maximum number of peers to connect to"
|
||||
defaultValue: 160
|
||||
name: "max-peers" }: int
|
||||
|
||||
agentString* {.
|
||||
defaultValue: "Codex"
|
||||
desc: "Node agent string which is used as identifier in network"
|
||||
name: "agent-string" }: string
|
||||
|
||||
apiBindAddress* {.
|
||||
desc: "The REST API bind address"
|
||||
defaultValue: "127.0.0.1"
|
||||
name: "api-bindaddr"
|
||||
}: string
|
||||
|
||||
apiPort* {.
|
||||
desc: "The REST Api port",
|
||||
defaultValue: 8080.Port
|
||||
defaultValueDesc: "8080"
|
||||
name: "api-port"
|
||||
abbr: "p" }: Port
|
||||
|
||||
repoKind* {.
|
||||
desc: "Backend for main repo store (fs, sqlite)"
|
||||
defaultValueDesc: "fs"
|
||||
defaultValue: repoFS
|
||||
name: "repo-kind" }: RepoKind
|
||||
|
||||
storageQuota* {.
|
||||
desc: "The size of the total storage quota dedicated to the node"
|
||||
defaultValue: DefaultQuotaBytes
|
||||
defaultValueDesc: $DefaultQuotaBytes
|
||||
name: "storage-quota"
|
||||
abbr: "q" }: NBytes
|
||||
|
||||
blockTtl* {.
|
||||
desc: "Default block timeout in seconds - 0 disables the ttl"
|
||||
defaultValue: DefaultBlockTtl
|
||||
defaultValueDesc: $DefaultBlockTtl
|
||||
name: "block-ttl"
|
||||
abbr: "t" }: Duration
|
||||
|
||||
blockMaintenanceInterval* {.
|
||||
desc: "Time interval in seconds - determines frequency of block maintenance cycle: how often blocks are checked for expiration and cleanup"
|
||||
defaultValue: DefaultBlockMaintenanceInterval
|
||||
defaultValueDesc: $DefaultBlockMaintenanceInterval
|
||||
name: "block-mi" }: Duration
|
||||
|
||||
blockMaintenanceNumberOfBlocks* {.
|
||||
desc: "Number of blocks to check every maintenance cycle"
|
||||
defaultValue: DefaultNumberOfBlocksToMaintainPerInterval
|
||||
defaultValueDesc: $DefaultNumberOfBlocksToMaintainPerInterval
|
||||
name: "block-mn" }: int
|
||||
|
||||
cacheSize* {.
|
||||
desc: "The size of the block cache, 0 disables the cache - might help on slow hardrives"
|
||||
defaultValue: 0
|
||||
defaultValueDesc: "0"
|
||||
name: "cache-size"
|
||||
abbr: "c" }: NBytes
|
||||
|
||||
persistence* {.
|
||||
desc: "Enables persistence mechanism, requires an Ethereum node"
|
||||
defaultValue: false
|
||||
name: "persistence"
|
||||
.}: bool
|
||||
|
||||
defaultValue: noCmd
|
||||
command }: StartUpCmd
|
||||
of persistence:
|
||||
ethProvider* {.
|
||||
desc: "The URL of the JSON-RPC API of the Ethereum node"
|
||||
defaultValue: "ws://localhost:8545"
|
||||
|
@ -235,21 +249,32 @@ type
|
|||
ethAccount* {.
|
||||
desc: "The Ethereum account that is used for storage contracts"
|
||||
defaultValue: EthAddress.none
|
||||
defaultValueDesc: ""
|
||||
name: "eth-account"
|
||||
.}: Option[EthAddress]
|
||||
|
||||
ethPrivateKey* {.
|
||||
desc: "File containing Ethereum private key for storage contracts"
|
||||
defaultValue: string.none
|
||||
defaultValueDesc: ""
|
||||
name: "eth-private-key"
|
||||
.}: Option[string]
|
||||
|
||||
marketplaceAddress* {.
|
||||
desc: "Address of deployed Marketplace contract"
|
||||
defaultValue: EthAddress.none
|
||||
defaultValueDesc: ""
|
||||
name: "marketplace-address"
|
||||
.}: Option[EthAddress]
|
||||
|
||||
# TODO: should go behind a feature flag
|
||||
simulateProofFailures* {.
|
||||
desc: "Simulates proof failures once every N proofs. 0 = disabled."
|
||||
defaultValue: 0
|
||||
name: "simulate-proof-failures"
|
||||
hidden
|
||||
.}: int
|
||||
|
||||
validator* {.
|
||||
desc: "Enables validator, requires an Ethereum node"
|
||||
defaultValue: false
|
||||
|
@ -262,28 +287,85 @@ type
|
|||
name: "validator-max-slots"
|
||||
.}: int
|
||||
|
||||
simulateProofFailures* {.
|
||||
desc: "Simulates proof failures once every N proofs. 0 = disabled."
|
||||
defaultValue: 0
|
||||
name: "simulate-proof-failures"
|
||||
hidden
|
||||
.}: int
|
||||
case persistenceCmd* {.
|
||||
defaultValue: noCmd
|
||||
command }: PersistenceCmd
|
||||
|
||||
logFile* {.
|
||||
desc: "Logs to file"
|
||||
defaultValue: string.none
|
||||
name: "log-file"
|
||||
hidden
|
||||
.}: Option[string]
|
||||
of PersistenceCmd.prover:
|
||||
circomR1cs* {.
|
||||
desc: "The r1cs file for the storage circuit"
|
||||
defaultValue: $DefaultDataDir / "circuits" / "proof_main.r1cs"
|
||||
defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.r1cs"
|
||||
name: "circom-r1cs"
|
||||
.}: InputFile
|
||||
|
||||
of initNode:
|
||||
discard
|
||||
circomWasm* {.
|
||||
desc: "The wasm file for the storage circuit"
|
||||
defaultValue: $DefaultDataDir / "circuits" / "proof_main.wasm"
|
||||
defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.wasm"
|
||||
name: "circom-wasm"
|
||||
.}: InputFile
|
||||
|
||||
circomZkey* {.
|
||||
desc: "The zkey file for the storage circuit"
|
||||
defaultValue: $DefaultDataDir / "circuits" / "proof_main.zkey"
|
||||
defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.zkey"
|
||||
name: "circom-zkey"
|
||||
.}: InputFile
|
||||
|
||||
# TODO: should probably be hidden and behind a feature flag
|
||||
circomNoZkey* {.
|
||||
desc: "Ignore the zkey file - use only for testing!"
|
||||
defaultValue: false
|
||||
name: "circom-no-zkey"
|
||||
.}: bool
|
||||
|
||||
numProofSamples* {.
|
||||
desc: "Number of samples to prove"
|
||||
defaultValue: DefaultSamplesNum
|
||||
defaultValueDesc: $DefaultSamplesNum
|
||||
name: "proof-samples" }: int
|
||||
|
||||
maxSlotDepth* {.
|
||||
desc: "The maximum depth of the slot tree"
|
||||
defaultValue: DefaultMaxSlotDepth
|
||||
defaultValueDesc: $DefaultMaxSlotDepth
|
||||
name: "max-slot-depth" }: int
|
||||
|
||||
maxDatasetDepth* {.
|
||||
desc: "The maximum depth of the dataset tree"
|
||||
defaultValue: DefaultMaxDatasetDepth
|
||||
defaultValueDesc: $DefaultMaxDatasetDepth
|
||||
name: "max-dataset-depth" }: int
|
||||
|
||||
maxBlockDepth* {.
|
||||
desc: "The maximum depth of the network block merkle tree"
|
||||
defaultValue: DefaultBlockDepth
|
||||
defaultValueDesc: $DefaultBlockDepth
|
||||
name: "max-block-depth" }: int
|
||||
|
||||
maxCellElms* {.
|
||||
desc: "The maximum number of elements in a cell"
|
||||
defaultValue: DefaultCellElms
|
||||
defaultValueDesc: $DefaultCellElms
|
||||
name: "max-cell-elements" }: int
|
||||
of PersistenceCmd.noCmd:
|
||||
discard
|
||||
|
||||
of StartUpCmd.noCmd:
|
||||
discard # end of persistence
|
||||
|
||||
EthAddress* = ethers.Address
|
||||
|
||||
logutils.formatIt(LogFormat.textLines, EthAddress): it.short0xHexLog
|
||||
logutils.formatIt(LogFormat.json, EthAddress): %it
|
||||
|
||||
func persistence*(self: CodexConf): bool =
|
||||
self.cmd == StartUpCmd.persistence
|
||||
|
||||
func prover*(self: CodexConf): bool =
|
||||
self.persistence and self.persistenceCmd == PersistenceCmd.prover
|
||||
|
||||
proc getCodexVersion(): string =
|
||||
let tag = strip(staticExec("git tag"))
|
||||
if tag.isEmptyOrWhitespace:
|
||||
|
@ -308,16 +390,6 @@ const
|
|||
"Codex revision: " & codexRevision & "\p" &
|
||||
nimBanner
|
||||
|
||||
proc defaultDataDir*(): string =
|
||||
let dataDir = when defined(windows):
|
||||
"AppData" / "Roaming" / "Codex"
|
||||
elif defined(macosx):
|
||||
"Library" / "Application Support" / "Codex"
|
||||
else:
|
||||
".cache" / "codex"
|
||||
|
||||
getHomeDir() / dataDir
|
||||
|
||||
proc parseCmdArg*(T: typedesc[MultiAddress],
|
||||
input: string): MultiAddress
|
||||
{.upraises: [ValueError, LPError].} =
|
||||
|
@ -326,7 +398,7 @@ proc parseCmdArg*(T: typedesc[MultiAddress],
|
|||
if res.isOk:
|
||||
ma = res.get()
|
||||
else:
|
||||
warn "Invalid MultiAddress", input=input, error=res.error()
|
||||
warn "Invalid MultiAddress", input=input, error = res.error()
|
||||
quit QuitFailure
|
||||
ma
|
||||
|
||||
|
@ -334,10 +406,10 @@ proc parseCmdArg*(T: type SignedPeerRecord, uri: string): T =
|
|||
var res: SignedPeerRecord
|
||||
try:
|
||||
if not res.fromURI(uri):
|
||||
warn "Invalid SignedPeerRecord uri", uri=uri
|
||||
warn "Invalid SignedPeerRecord uri", uri = uri
|
||||
quit QuitFailure
|
||||
except CatchableError as exc:
|
||||
warn "Invalid SignedPeerRecord uri", uri=uri, error=exc.msg
|
||||
warn "Invalid SignedPeerRecord uri", uri = uri, error = exc.msg
|
||||
quit QuitFailure
|
||||
res
|
||||
|
||||
|
@ -348,7 +420,7 @@ proc parseCmdArg*(T: type NBytes, val: string): T =
|
|||
var num = 0'i64
|
||||
let count = parseSize(val, num, alwaysBin = true)
|
||||
if count == 0:
|
||||
warn "Invalid number of bytes", nbytes=val
|
||||
warn "Invalid number of bytes", nbytes = val
|
||||
quit QuitFailure
|
||||
NBytes(num)
|
||||
|
||||
|
@ -356,7 +428,7 @@ proc parseCmdArg*(T: type Duration, val: string): T =
|
|||
var dur: Duration
|
||||
let count = parseDuration(val, dur)
|
||||
if count == 0:
|
||||
warn "Invalid duration parse", dur=dur
|
||||
warn "Cannot parse duration", dur = dur
|
||||
quit QuitFailure
|
||||
dur
|
||||
|
||||
|
|
|
@ -116,6 +116,9 @@ switch("define", "chronicles_sinks=textlines[dynamic],json[dynamic],textlines[dy
|
|||
switch("define", "use_asm_syntax_intel=false")
|
||||
switch("define", "ctt_asm=false")
|
||||
|
||||
# Allow the use of old-style case objects for nim config compatibility
|
||||
switch("define", "nimOldCaseObjects")
|
||||
|
||||
# begin Nimble config (version 1)
|
||||
when system.fileExists("nimble.paths"):
|
||||
include "nimble.paths"
|
||||
|
|
|
@ -20,4 +20,3 @@ proc address*(_: type Marketplace, dummyVerifier = false): Address =
|
|||
hardhatMarketWithDummyVerifier
|
||||
else:
|
||||
hardhatMarketAddress
|
||||
|
||||
|
|
|
@ -13,10 +13,19 @@ method getChainId*(provider: MockProvider): Future[UInt256] {.async.} =
|
|||
return provider.chainId
|
||||
|
||||
proc configFactory(): CodexConf =
|
||||
CodexConf(cmd: noCommand, nat: ValidIpAddress.init("127.0.0.1"), discoveryIp: ValidIpAddress.init(IPv4_any()), metricsAddress: ValidIpAddress.init("127.0.0.1"))
|
||||
CodexConf(
|
||||
cmd: StartUpCmd.persistence,
|
||||
nat: ValidIpAddress.init("127.0.0.1"),
|
||||
discoveryIp: ValidIpAddress.init(IPv4_any()),
|
||||
metricsAddress: ValidIpAddress.init("127.0.0.1"))
|
||||
|
||||
proc configFactory(marketplace: Option[EthAddress]): CodexConf =
|
||||
CodexConf(cmd: noCommand, nat: ValidIpAddress.init("127.0.0.1"), discoveryIp: ValidIpAddress.init(IPv4_any()), metricsAddress: ValidIpAddress.init("127.0.0.1"), marketplaceAddress: marketplace)
|
||||
CodexConf(
|
||||
cmd: StartUpCmd.persistence,
|
||||
nat: ValidIpAddress.init("127.0.0.1"),
|
||||
discoveryIp: ValidIpAddress.init(IPv4_any()),
|
||||
metricsAddress: ValidIpAddress.init("127.0.0.1"),
|
||||
marketplaceAddress: marketplace)
|
||||
|
||||
asyncchecksuite "Deployment":
|
||||
let provider = MockProvider()
|
||||
|
|
|
@ -1,10 +1,7 @@
|
|||
import pkg/questionable
|
||||
|
||||
type
|
||||
CliOption* = object of RootObj
|
||||
nodeIdx*: ?int
|
||||
key*: string
|
||||
value*: string
|
||||
CliOption* = object
|
||||
key*: string # option key, including `--`
|
||||
value*: string # option value
|
||||
|
||||
proc `$`*(option: CliOption): string =
|
||||
var res = option.key
|
||||
|
|
|
@ -1,61 +1,295 @@
|
|||
import std/options
|
||||
import std/os
|
||||
import std/sequtils
|
||||
import std/strutils
|
||||
import std/sugar
|
||||
import std/tables
|
||||
from pkg/chronicles import LogLevel
|
||||
import pkg/codex/conf
|
||||
import pkg/codex/units
|
||||
import pkg/confutils
|
||||
import pkg/confutils/defs
|
||||
import libp2p except setup
|
||||
import pkg/questionable
|
||||
import ./clioption
|
||||
import ./nodeconfig
|
||||
|
||||
export nodeconfig
|
||||
export clioption
|
||||
export confutils
|
||||
|
||||
type
|
||||
CodexConfig* = ref object of NodeConfig
|
||||
numNodes*: int
|
||||
cliOptions*: seq[CliOption]
|
||||
logTopics*: seq[string]
|
||||
CodexConfigs* = object
|
||||
configs*: seq[CodexConfig]
|
||||
CodexConfig* = object
|
||||
cliOptions: Table[StartUpCmd, Table[string, CliOption]]
|
||||
cliPersistenceOptions: Table[PersistenceCmd, Table[string, CliOption]]
|
||||
debugEnabled*: bool
|
||||
CodexConfigError* = object of CatchableError
|
||||
|
||||
proc nodes*(config: CodexConfig, numNodes: int): CodexConfig =
|
||||
if numNodes < 0:
|
||||
raise newException(ValueError, "numNodes must be >= 0")
|
||||
proc cliArgs*(config: CodexConfig): seq[string] {.gcsafe, raises: [CodexConfigError].}
|
||||
|
||||
var startConfig = config
|
||||
startConfig.numNodes = numNodes
|
||||
return startConfig
|
||||
proc raiseCodexConfigError(msg: string) {.raises: [CodexConfigError].} =
|
||||
raise newException(CodexConfigError, msg)
|
||||
|
||||
proc simulateProofFailuresFor*(
|
||||
template convertError(body) =
|
||||
try:
|
||||
body
|
||||
except CatchableError as e:
|
||||
raiseCodexConfigError e.msg
|
||||
|
||||
proc init*(_: type CodexConfigs, nodes = 1): CodexConfigs {.raises: [].} =
|
||||
CodexConfigs(configs: newSeq[CodexConfig](nodes))
|
||||
|
||||
func nodes*(self: CodexConfigs): int =
|
||||
self.configs.len
|
||||
|
||||
proc checkBounds(self: CodexConfigs, idx: int) {.raises: [CodexConfigError].} =
|
||||
if idx notin 0..<self.configs.len:
|
||||
raiseCodexConfigError "index must be in bounds of the number of nodes"
|
||||
|
||||
proc buildConfig(
|
||||
config: CodexConfig,
|
||||
providerIdx: int,
|
||||
failEveryNProofs: int
|
||||
): CodexConfig =
|
||||
msg: string): CodexConf {.raises: [CodexConfigError].} =
|
||||
|
||||
if providerIdx > config.numNodes - 1:
|
||||
raise newException(ValueError, "provider index out of bounds")
|
||||
proc postFix(msg: string): string =
|
||||
if msg.len > 0:
|
||||
": " & msg
|
||||
else: ""
|
||||
|
||||
var startConfig = config
|
||||
startConfig.cliOptions.add(
|
||||
CliOption(
|
||||
nodeIdx: some providerIdx,
|
||||
key: "--simulate-proof-failures",
|
||||
value: $failEveryNProofs
|
||||
)
|
||||
)
|
||||
try:
|
||||
return CodexConf.load(cmdLine = config.cliArgs, quitOnFailure = false)
|
||||
except ConfigurationError as e:
|
||||
raiseCodexConfigError msg & e.msg.postFix
|
||||
except Exception as e:
|
||||
## TODO: remove once proper exception handling added to nim-confutils
|
||||
raiseCodexConfigError msg & e.msg.postFix
|
||||
|
||||
proc addCliOption*(
|
||||
config: var CodexConfig,
|
||||
group = PersistenceCmd.noCmd,
|
||||
cliOption: CliOption) {.raises: [CodexConfigError].} =
|
||||
|
||||
var options = config.cliPersistenceOptions.getOrDefault(group)
|
||||
options[cliOption.key] = cliOption # overwrite if already exists
|
||||
config.cliPersistenceOptions[group] = options
|
||||
discard config.buildConfig("Invalid cli arg " & $cliOption)
|
||||
|
||||
proc addCliOption*(
|
||||
config: var CodexConfig,
|
||||
group = PersistenceCmd.noCmd,
|
||||
key: string, value = "") {.raises: [CodexConfigError].} =
|
||||
|
||||
config.addCliOption(group, CliOption(key: key, value: value))
|
||||
|
||||
proc addCliOption*(
|
||||
config: var CodexConfig,
|
||||
group = StartUpCmd.noCmd,
|
||||
cliOption: CliOption) {.raises: [CodexConfigError].} =
|
||||
|
||||
var options = config.cliOptions.getOrDefault(group)
|
||||
options[cliOption.key] = cliOption # overwrite if already exists
|
||||
config.cliOptions[group] = options
|
||||
discard config.buildConfig("Invalid cli arg " & $cliOption)
|
||||
|
||||
proc addCliOption*(
|
||||
config: var CodexConfig,
|
||||
group = StartUpCmd.noCmd,
|
||||
key: string, value = "") {.raises: [CodexConfigError].} =
|
||||
|
||||
config.addCliOption(group, CliOption(key: key, value: value))
|
||||
|
||||
proc addCliOption*(
|
||||
config: var CodexConfig,
|
||||
cliOption: CliOption) {.raises: [CodexConfigError].} =
|
||||
|
||||
config.addCliOption(StartUpCmd.noCmd, cliOption)
|
||||
|
||||
proc addCliOption*(
|
||||
config: var CodexConfig,
|
||||
key: string, value = "") {.raises: [CodexConfigError].} =
|
||||
|
||||
config.addCliOption(StartUpCmd.noCmd, CliOption(key: key, value: value))
|
||||
|
||||
proc cliArgs*(
|
||||
config: CodexConfig): seq[string] {.gcsafe, raises: [CodexConfigError].} =
|
||||
## converts CodexConfig cli options and command groups in a sequence of args
|
||||
## and filters out cli options by node index if provided in the CliOption
|
||||
var args: seq[string] = @[]
|
||||
|
||||
convertError:
|
||||
for cmd in StartUpCmd:
|
||||
if config.cliOptions.hasKey(cmd):
|
||||
if cmd != StartUpCmd.noCmd:
|
||||
args.add $cmd
|
||||
var opts = config.cliOptions[cmd].values.toSeq
|
||||
args = args.concat( opts.map(o => $o) )
|
||||
|
||||
for cmd in PersistenceCmd:
|
||||
if config.cliPersistenceOptions.hasKey(cmd):
|
||||
if cmd != PersistenceCmd.noCmd:
|
||||
args.add $cmd
|
||||
var opts = config.cliPersistenceOptions[cmd].values.toSeq
|
||||
args = args.concat( opts.map(o => $o) )
|
||||
|
||||
return args
|
||||
|
||||
proc logFile*(config: CodexConfig): ?string {.raises: [CodexConfigError].} =
|
||||
let built = config.buildConfig("Invalid codex config cli params")
|
||||
built.logFile
|
||||
|
||||
proc logLevel*(config: CodexConfig): LogLevel {.raises: [CodexConfigError].} =
|
||||
convertError:
|
||||
let built = config.buildConfig("Invalid codex config cli params")
|
||||
return parseEnum[LogLevel](built.logLevel.toUpperAscii)
|
||||
|
||||
proc debug*(
|
||||
self: CodexConfigs,
|
||||
idx: int,
|
||||
enabled = true): CodexConfigs {.raises: [CodexConfigError].} =
|
||||
## output log in stdout for a specific node in the group
|
||||
|
||||
self.checkBounds idx
|
||||
|
||||
var startConfig = self
|
||||
startConfig.configs[idx].debugEnabled = enabled
|
||||
return startConfig
|
||||
|
||||
proc debug*(self: CodexConfigs, enabled = true): CodexConfigs {.raises: [].} =
|
||||
## output log in stdout for all nodes in group
|
||||
var startConfig = self
|
||||
for config in startConfig.configs.mitems:
|
||||
config.debugEnabled = enabled
|
||||
return startConfig
|
||||
|
||||
proc withLogFile*(
|
||||
self: CodexConfigs,
|
||||
idx: int): CodexConfigs {.raises: [CodexConfigError].} =
|
||||
|
||||
self.checkBounds idx
|
||||
|
||||
var startConfig = self
|
||||
startConfig.configs[idx].addCliOption("--log-file", "<updated_in_test>")
|
||||
return startConfig
|
||||
|
||||
proc withLogFile*(
|
||||
self: CodexConfigs): CodexConfigs {.raises: [CodexConfigError].} =
|
||||
## typically called from test, sets config such that a log file should be
|
||||
## created
|
||||
var startConfig = self
|
||||
for config in startConfig.configs.mitems:
|
||||
config.addCliOption("--log-file", "<updated_in_test>")
|
||||
return startConfig
|
||||
|
||||
proc withLogFile*(
|
||||
self: var CodexConfig,
|
||||
logFile: string) {.raises: [CodexConfigError].} = #: CodexConfigs =
|
||||
## typically called internally from the test suite, sets a log file path to
|
||||
## be created during the test run, for a specified node in the group
|
||||
# var config = self
|
||||
self.addCliOption("--log-file", logFile)
|
||||
# return startConfig
|
||||
|
||||
proc withLogLevel*(
|
||||
self: CodexConfig,
|
||||
level: LogLevel | string): CodexConfig {.raises: [CodexConfigError].} =
|
||||
|
||||
var config = self
|
||||
config.addCliOption("--log-level", $level)
|
||||
return config
|
||||
|
||||
proc withLogLevel*(
|
||||
self: CodexConfigs,
|
||||
idx: int,
|
||||
level: LogLevel | string): CodexConfigs {.raises: [CodexConfigError].} =
|
||||
|
||||
self.checkBounds idx
|
||||
|
||||
var startConfig = self
|
||||
startConfig.configs[idx].addCliOption("--log-level", $level)
|
||||
return startConfig
|
||||
|
||||
proc withLogLevel*(
|
||||
self: CodexConfigs,
|
||||
level: LogLevel | string): CodexConfigs {.raises: [CodexConfigError].} =
|
||||
|
||||
var startConfig = self
|
||||
for config in startConfig.configs.mitems:
|
||||
config.addCliOption("--log-level", $level)
|
||||
return startConfig
|
||||
|
||||
proc withSimulateProofFailures*(
|
||||
self: CodexConfigs,
|
||||
idx: int,
|
||||
failEveryNProofs: int
|
||||
): CodexConfigs {.raises: [CodexConfigError].} =
|
||||
|
||||
self.checkBounds idx
|
||||
|
||||
var startConfig = self
|
||||
startConfig.configs[idx].addCliOption(
|
||||
StartUpCmd.persistence, "--simulate-proof-failures", $failEveryNProofs)
|
||||
return startConfig
|
||||
|
||||
proc withSimulateProofFailures*(
|
||||
self: CodexConfigs,
|
||||
failEveryNProofs: int): CodexConfigs {.raises: [CodexConfigError].} =
|
||||
|
||||
var startConfig = self
|
||||
for config in startConfig.configs.mitems:
|
||||
config.addCliOption(
|
||||
StartUpCmd.persistence, "--simulate-proof-failures", $failEveryNProofs)
|
||||
return startConfig
|
||||
|
||||
proc logLevelWithTopics(
|
||||
config: CodexConfig,
|
||||
topics: varargs[string]): string {.raises: [CodexConfigError].} =
|
||||
|
||||
convertError:
|
||||
var logLevel = LogLevel.INFO
|
||||
let built = config.buildConfig("Invalid codex config cli params")
|
||||
logLevel = parseEnum[LogLevel](built.logLevel.toUpperAscii)
|
||||
let level = $logLevel & ";TRACE: " & topics.join(",")
|
||||
return level
|
||||
|
||||
proc withLogTopics*(
|
||||
config: CodexConfig,
|
||||
topics: varargs[string]
|
||||
): CodexConfig =
|
||||
self: CodexConfigs,
|
||||
idx: int,
|
||||
topics: varargs[string]): CodexConfigs {.raises: [CodexConfigError].} =
|
||||
|
||||
var startConfig = config
|
||||
startConfig.logTopics = startConfig.logTopics.concat(@topics)
|
||||
self.checkBounds idx
|
||||
|
||||
convertError:
|
||||
let config = self.configs[idx]
|
||||
let level = config.logLevelWithTopics(topics)
|
||||
var startConfig = self
|
||||
return startConfig.withLogLevel(idx, level)
|
||||
|
||||
proc withLogTopics*(
|
||||
self: CodexConfigs,
|
||||
topics: varargs[string]
|
||||
): CodexConfigs {.raises: [CodexConfigError].} =
|
||||
|
||||
var startConfig = self
|
||||
for config in startConfig.configs.mitems:
|
||||
let level = config.logLevelWithTopics(topics)
|
||||
config = config.withLogLevel(level)
|
||||
return startConfig
|
||||
|
||||
proc withStorageQuota*(
|
||||
config: CodexConfig,
|
||||
quota: NBytes
|
||||
): CodexConfig =
|
||||
self: CodexConfigs,
|
||||
idx: int,
|
||||
quota: NBytes): CodexConfigs {.raises: [CodexConfigError].} =
|
||||
|
||||
var startConfig = config
|
||||
startConfig.cliOptions.add(
|
||||
CliOption(key: "--storage-quota", value: $quota)
|
||||
)
|
||||
self.checkBounds idx
|
||||
|
||||
var startConfig = self
|
||||
startConfig.configs[idx].addCliOption("--storage-quota", $quota)
|
||||
return startConfig
|
||||
|
||||
proc withStorageQuota*(
|
||||
self: CodexConfigs,
|
||||
quota: NBytes): CodexConfigs {.raises: [CodexConfigError].} =
|
||||
|
||||
var startConfig = self
|
||||
for config in startConfig.configs.mitems:
|
||||
config.addCliOption("--storage-quota", $quota)
|
||||
return startConfig
|
||||
|
|
|
@ -40,17 +40,17 @@ method onOutputLineCaptured(node: CodexProcess, line: string) =
|
|||
discard
|
||||
|
||||
proc dataDir(node: CodexProcess): string =
|
||||
let config = CodexConf.load(cmdLine = node.arguments)
|
||||
let config = CodexConf.load(cmdLine = node.arguments, quitOnFailure = false)
|
||||
return config.dataDir.string
|
||||
|
||||
proc ethAccount*(node: CodexProcess): Address =
|
||||
let config = CodexConf.load(cmdLine = node.arguments)
|
||||
let config = CodexConf.load(cmdLine = node.arguments, quitOnFailure = false)
|
||||
without ethAccount =? config.ethAccount:
|
||||
raiseAssert "eth account not set"
|
||||
return Address(ethAccount)
|
||||
|
||||
proc apiUrl*(node: CodexProcess): string =
|
||||
let config = CodexConf.load(cmdLine = node.arguments)
|
||||
let config = CodexConf.load(cmdLine = node.arguments, quitOnFailure = false)
|
||||
return "http://" & config.apiBindAddress & ":" & $config.apiPort & "/api/codex/v1"
|
||||
|
||||
proc client*(node: CodexProcess): CodexClient =
|
||||
|
|
|
@ -1,6 +1,15 @@
|
|||
import ./nodeconfig
|
||||
|
||||
export nodeconfig
|
||||
|
||||
type
|
||||
HardhatConfig* = ref object of NodeConfig
|
||||
HardhatConfig* = object
|
||||
logFile*: bool
|
||||
debugEnabled*: bool
|
||||
|
||||
proc debug*(self: HardhatConfig, enabled = true): HardhatConfig =
|
||||
## output log in stdout
|
||||
var config = self
|
||||
config.debugEnabled = enabled
|
||||
return config
|
||||
|
||||
proc withLogFile*(self: HardhatConfig, logToFile: bool = true): HardhatConfig =
|
||||
var config = self
|
||||
config.logFile = logToFile
|
||||
return config
|
||||
|
|
|
@ -3,13 +3,16 @@ import std/sequtils
|
|||
import std/strutils
|
||||
import std/sugar
|
||||
import std/times
|
||||
import pkg/codex/conf
|
||||
import pkg/codex/logutils
|
||||
import pkg/chronos/transports/stream
|
||||
import pkg/ethers
|
||||
import ./hardhatprocess
|
||||
import pkg/questionable
|
||||
import ./codexconfig
|
||||
import ./codexprocess
|
||||
import ./hardhatconfig
|
||||
import ./codexconfig
|
||||
import ./hardhatprocess
|
||||
import ./nodeconfigs
|
||||
import ../asynctest
|
||||
import ../checktest
|
||||
|
||||
|
@ -24,16 +27,15 @@ type
|
|||
RunningNode* = ref object
|
||||
role*: Role
|
||||
node*: NodeProcess
|
||||
NodeConfigs* = object
|
||||
clients*: CodexConfig
|
||||
providers*: CodexConfig
|
||||
validators*: CodexConfig
|
||||
hardhat*: HardhatConfig
|
||||
Role* {.pure.} = enum
|
||||
Client,
|
||||
Provider,
|
||||
Validator,
|
||||
Hardhat
|
||||
MultiNodeSuiteError = object of CatchableError
|
||||
|
||||
proc raiseMultiNodeSuiteError(msg: string) =
|
||||
raise newException(MultiNodeSuiteError, msg)
|
||||
|
||||
proc nextFreePort(startPort: int): Future[int] {.async.} =
|
||||
|
||||
|
@ -79,6 +81,7 @@ template multinodesuite*(name: string, body: untyped) =
|
|||
var sanitized = pathSegment
|
||||
for invalid in invalidFilenameChars.items:
|
||||
sanitized = sanitized.replace(invalid, '_')
|
||||
.replace(' ', '_')
|
||||
sanitized
|
||||
|
||||
proc getLogFile(role: Role, index: ?int): string =
|
||||
|
@ -87,7 +90,7 @@ template multinodesuite*(name: string, body: untyped) =
|
|||
|
||||
var logDir = currentSourcePath.parentDir() /
|
||||
"logs" /
|
||||
sanitize($starttime & " " & name) /
|
||||
sanitize($starttime & "__" & name) /
|
||||
sanitize($currentTestName)
|
||||
createDir(logDir)
|
||||
|
||||
|
@ -110,53 +113,56 @@ template multinodesuite*(name: string, body: untyped) =
|
|||
args.add "--log-file=" & updatedLogFile
|
||||
|
||||
let node = await HardhatProcess.startNode(args, config.debugEnabled, "hardhat")
|
||||
await node.waitUntilStarted()
|
||||
try:
|
||||
await node.waitUntilStarted()
|
||||
except NodeProcessError as e:
|
||||
raiseMultiNodeSuiteError "hardhat node not started: " & e.msg
|
||||
|
||||
trace "hardhat node started"
|
||||
return node
|
||||
|
||||
proc newCodexProcess(roleIdx: int,
|
||||
config: CodexConfig,
|
||||
conf: CodexConfig,
|
||||
role: Role
|
||||
): Future[NodeProcess] {.async.} =
|
||||
|
||||
let nodeIdx = running.len
|
||||
var conf = config
|
||||
var config = conf
|
||||
|
||||
if nodeIdx > accounts.len - 1:
|
||||
raiseAssert("Cannot start node at nodeIdx " & $nodeIdx &
|
||||
", not enough eth accounts.")
|
||||
raiseMultiNodeSuiteError "Cannot start node at nodeIdx " & $nodeIdx &
|
||||
", not enough eth accounts."
|
||||
|
||||
let datadir = getTempDir() / "Codex" /
|
||||
sanitize($starttime) /
|
||||
sanitize($role & "_" & $roleIdx)
|
||||
|
||||
if conf.logFile:
|
||||
let updatedLogFile = getLogFile(role, some roleIdx)
|
||||
conf.cliOptions.add CliOption(key: "--log-file", value: updatedLogFile)
|
||||
try:
|
||||
if config.logFile.isSome:
|
||||
let updatedLogFile = getLogFile(role, some roleIdx)
|
||||
config.withLogFile(updatedLogFile)
|
||||
|
||||
let logLevel = conf.logLevel |? LogLevel.INFO
|
||||
if conf.logTopics.len > 0:
|
||||
conf.cliOptions.add CliOption(
|
||||
key: "--log-level",
|
||||
value: $logLevel & ";TRACE: " & conf.logTopics.join(",")
|
||||
)
|
||||
else:
|
||||
conf.cliOptions.add CliOption(key: "--log-level", value: $logLevel)
|
||||
config.addCliOption("--api-port", $ await nextFreePort(8080 + nodeIdx))
|
||||
config.addCliOption("--data-dir", datadir)
|
||||
config.addCliOption("--nat", "127.0.0.1")
|
||||
config.addCliOption("--listen-addrs", "/ip4/127.0.0.1/tcp/0")
|
||||
config.addCliOption("--disc-ip", "127.0.0.1")
|
||||
config.addCliOption("--disc-port", $ await nextFreePort(8090 + nodeIdx))
|
||||
|
||||
var args = conf.cliOptions.map(o => $o)
|
||||
.concat(@[
|
||||
"--api-port=" & $ await nextFreePort(8080 + nodeIdx),
|
||||
"--data-dir=" & datadir,
|
||||
"--nat=127.0.0.1",
|
||||
"--listen-addrs=/ip4/127.0.0.1/tcp/0",
|
||||
"--disc-ip=127.0.0.1",
|
||||
"--disc-port=" & $ await nextFreePort(8090 + nodeIdx),
|
||||
"--eth-account=" & $accounts[nodeIdx]])
|
||||
except CodexConfigError as e:
|
||||
raiseMultiNodeSuiteError "invalid cli option, error: " & e.msg
|
||||
|
||||
let node = await CodexProcess.startNode(args, conf.debugEnabled, $role & $roleIdx)
|
||||
await node.waitUntilStarted()
|
||||
trace "node started", nodeName = $role & $roleIdx
|
||||
let node = await CodexProcess.startNode(
|
||||
config.cliArgs,
|
||||
config.debugEnabled,
|
||||
$role & $roleIdx
|
||||
)
|
||||
|
||||
try:
|
||||
await node.waitUntilStarted()
|
||||
trace "node started", nodeName = $role & $roleIdx
|
||||
except NodeProcessError as e:
|
||||
raiseMultiNodeSuiteError "node not started, error: " & e.msg
|
||||
|
||||
return node
|
||||
|
||||
|
@ -184,85 +190,36 @@ template multinodesuite*(name: string, body: untyped) =
|
|||
if r.role == Role.Validator:
|
||||
CodexProcess(r.node)
|
||||
|
||||
proc startHardhatNode(): Future[NodeProcess] {.async.} =
|
||||
var config = nodeConfigs.hardhat
|
||||
proc startHardhatNode(config: HardhatConfig): Future[NodeProcess] {.async.} =
|
||||
return await newHardhatProcess(config, Role.Hardhat)
|
||||
|
||||
proc startClientNode(): Future[NodeProcess] {.async.} =
|
||||
proc startClientNode(conf: CodexConfig): Future[NodeProcess] {.async.} =
|
||||
let clientIdx = clients().len
|
||||
var config = nodeConfigs.clients
|
||||
config.cliOptions.add CliOption(key: "--persistence")
|
||||
var config = conf
|
||||
config.addCliOption(StartUpCmd.persistence, "--eth-account", $accounts[running.len])
|
||||
return await newCodexProcess(clientIdx, config, Role.Client)
|
||||
|
||||
proc startProviderNode(): Future[NodeProcess] {.async.} =
|
||||
proc startProviderNode(conf: CodexConfig): Future[NodeProcess] {.async.} =
|
||||
let providerIdx = providers().len
|
||||
var config = nodeConfigs.providers
|
||||
config.cliOptions.add CliOption(key: "--bootstrap-node", value: bootstrap)
|
||||
config.cliOptions.add CliOption(key: "--persistence")
|
||||
|
||||
# filter out provider options by provided index
|
||||
config.cliOptions = config.cliOptions.filter(
|
||||
o => (let idx = o.nodeIdx |? providerIdx; idx == providerIdx)
|
||||
)
|
||||
var config = conf
|
||||
config.addCliOption("--bootstrap-node", bootstrap)
|
||||
config.addCliOption(StartUpCmd.persistence, "--eth-account", $accounts[running.len])
|
||||
config.addCliOption(PersistenceCmd.prover, "--circom-r1cs", "tests/circuits/fixtures/proof_main.r1cs")
|
||||
config.addCliOption(PersistenceCmd.prover, "--circom-wasm", "tests/circuits/fixtures/proof_main.wasm")
|
||||
config.addCliOption(PersistenceCmd.prover, "--circom-zkey", "tests/circuits/fixtures/proof_main.zkey")
|
||||
|
||||
return await newCodexProcess(providerIdx, config, Role.Provider)
|
||||
|
||||
proc startValidatorNode(): Future[NodeProcess] {.async.} =
|
||||
proc startValidatorNode(conf: CodexConfig): Future[NodeProcess] {.async.} =
|
||||
let validatorIdx = validators().len
|
||||
var config = nodeConfigs.validators
|
||||
config.cliOptions.add CliOption(key: "--bootstrap-node", value: bootstrap)
|
||||
config.cliOptions.add CliOption(key: "--validator")
|
||||
var config = conf
|
||||
config.addCliOption("--bootstrap-node", bootstrap)
|
||||
config.addCliOption(StartUpCmd.persistence, "--eth-account", $accounts[running.len])
|
||||
config.addCliOption(StartUpCmd.persistence, "--validator")
|
||||
|
||||
return await newCodexProcess(validatorIdx, config, Role.Validator)
|
||||
|
||||
setup:
|
||||
if not nodeConfigs.hardhat.isNil:
|
||||
let node = await startHardhatNode()
|
||||
running.add RunningNode(role: Role.Hardhat, node: node)
|
||||
|
||||
try:
|
||||
# Workaround for https://github.com/NomicFoundation/hardhat/issues/2053
|
||||
# Do not use websockets, but use http and polling to stop subscriptions
|
||||
# from being removed after 5 minutes
|
||||
ethProvider = JsonRpcProvider.new("http://localhost:8545")
|
||||
# if hardhat was NOT started by the test, take a snapshot so it can be
|
||||
# reverted in the test teardown
|
||||
if nodeConfigs.hardhat.isNil:
|
||||
snapshot = await send(ethProvider, "evm_snapshot")
|
||||
# ensure that we have a recent block with a fresh timestamp
|
||||
discard await send(ethProvider, "evm_mine")
|
||||
accounts = await ethProvider.listAccounts()
|
||||
except CatchableError as e:
|
||||
fatal "failed to connect to hardhat", error = e.msg
|
||||
raiseAssert "Hardhat not running. Run hardhat manually before executing tests, or include a HardhatConfig in the test setup."
|
||||
|
||||
if not nodeConfigs.clients.isNil:
|
||||
for i in 0..<nodeConfigs.clients.numNodes:
|
||||
let node = await startClientNode()
|
||||
running.add RunningNode(
|
||||
role: Role.Client,
|
||||
node: node
|
||||
)
|
||||
if i == 0:
|
||||
bootstrap = CodexProcess(node).client.info()["spr"].getStr()
|
||||
|
||||
if not nodeConfigs.providers.isNil:
|
||||
for i in 0..<nodeConfigs.providers.numNodes:
|
||||
let node = await startProviderNode()
|
||||
running.add RunningNode(
|
||||
role: Role.Provider,
|
||||
node: node
|
||||
)
|
||||
|
||||
if not nodeConfigs.validators.isNil:
|
||||
for i in 0..<nodeConfigs.validators.numNodes:
|
||||
let node = await startValidatorNode()
|
||||
running.add RunningNode(
|
||||
role: Role.Validator,
|
||||
node: node
|
||||
)
|
||||
|
||||
teardown:
|
||||
proc teardownImpl() {.async.} =
|
||||
for nodes in @[validators(), clients(), providers()]:
|
||||
for node in nodes:
|
||||
await node.stop() # also stops rest client
|
||||
|
@ -278,4 +235,77 @@ template multinodesuite*(name: string, body: untyped) =
|
|||
|
||||
running = @[]
|
||||
|
||||
template failAndTeardownOnError(message: string, tryBody: untyped) =
|
||||
try:
|
||||
tryBody
|
||||
except CatchableError as er:
|
||||
fatal message, error=er.msg
|
||||
echo "[FATAL] ", message, ": ", er.msg
|
||||
await teardownImpl()
|
||||
when declared(teardownAllIMPL):
|
||||
teardownAllIMPL()
|
||||
fail()
|
||||
quit(1)
|
||||
|
||||
setup:
|
||||
if var conf =? nodeConfigs.hardhat:
|
||||
try:
|
||||
let node = await startHardhatNode(conf)
|
||||
running.add RunningNode(role: Role.Hardhat, node: node)
|
||||
except CatchableError as e:
|
||||
echo "failed to start hardhat node"
|
||||
fail()
|
||||
quit(1)
|
||||
|
||||
try:
|
||||
# Workaround for https://github.com/NomicFoundation/hardhat/issues/2053
|
||||
# Do not use websockets, but use http and polling to stop subscriptions
|
||||
# from being removed after 5 minutes
|
||||
ethProvider = JsonRpcProvider.new("http://localhost:8545")
|
||||
# if hardhat was NOT started by the test, take a snapshot so it can be
|
||||
# reverted in the test teardown
|
||||
if nodeConfigs.hardhat.isNone:
|
||||
snapshot = await send(ethProvider, "evm_snapshot")
|
||||
# ensure that we have a recent block with a fresh timestamp
|
||||
discard await send(ethProvider, "evm_mine")
|
||||
accounts = await ethProvider.listAccounts()
|
||||
except CatchableError as e:
|
||||
echo "Hardhat not running. Run hardhat manually " &
|
||||
"before executing tests, or include a " &
|
||||
"HardhatConfig in the test setup."
|
||||
fail()
|
||||
quit(1)
|
||||
|
||||
if var clients =? nodeConfigs.clients:
|
||||
failAndTeardownOnError "failed to start client nodes":
|
||||
for config in clients.configs:
|
||||
let node = await startClientNode(config)
|
||||
running.add RunningNode(
|
||||
role: Role.Client,
|
||||
node: node
|
||||
)
|
||||
if clients().len == 1:
|
||||
bootstrap = CodexProcess(node).client.info()["spr"].getStr()
|
||||
|
||||
if var providers =? nodeConfigs.providers:
|
||||
failAndTeardownOnError "failed to start provider nodes":
|
||||
for config in providers.configs.mitems:
|
||||
let node = await startProviderNode(config)
|
||||
running.add RunningNode(
|
||||
role: Role.Provider,
|
||||
node: node
|
||||
)
|
||||
|
||||
if var validators =? nodeConfigs.validators:
|
||||
failAndTeardownOnError "failed to start validator nodes":
|
||||
for config in validators.configs.mitems:
|
||||
let node = await startValidatorNode(config)
|
||||
running.add RunningNode(
|
||||
role: Role.Validator,
|
||||
node: node
|
||||
)
|
||||
|
||||
teardown:
|
||||
await teardownImpl()
|
||||
|
||||
body
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
import pkg/questionable
|
||||
import ./codexconfig
|
||||
import ./hardhatconfig
|
||||
|
||||
type
|
||||
NodeConfigs* = object
|
||||
clients*: ?CodexConfigs
|
||||
providers*: ?CodexConfigs
|
||||
validators*: ?CodexConfigs
|
||||
hardhat*: ?HardhatConfig
|
||||
|
|
@ -23,6 +23,7 @@ type
|
|||
debug: bool
|
||||
trackedFutures*: TrackedFutures
|
||||
name*: string
|
||||
NodeProcessError* = object of CatchableError
|
||||
|
||||
method workingDir(node: NodeProcess): string {.base.} =
|
||||
raiseAssert "not implemented"
|
||||
|
@ -54,6 +55,8 @@ method start*(node: NodeProcess) {.base, async.} =
|
|||
processOptions = poptions
|
||||
|
||||
try:
|
||||
if node.debug:
|
||||
echo "starting codex node with args: ", node.arguments.join(" ")
|
||||
node.process = await startProcess(
|
||||
node.executable,
|
||||
node.workingDir,
|
||||
|
@ -149,12 +152,15 @@ proc waitUntilStarted*(node: NodeProcess) {.async.} =
|
|||
let started = newFuture[void]()
|
||||
try:
|
||||
discard node.captureOutput(node.startedOutput, started).track(node)
|
||||
await started.wait(5.seconds)
|
||||
except AsyncTimeoutError as e:
|
||||
await started.wait(35.seconds) # allow enough time for proof generation
|
||||
except AsyncTimeoutError:
|
||||
# attempt graceful shutdown in case node was partially started, prevent
|
||||
# zombies
|
||||
await node.stop()
|
||||
raiseAssert "node did not output '" & node.startedOutput & "'"
|
||||
# raise error here so that all nodes (not just this one) can be
|
||||
# shutdown gracefully
|
||||
raise newException(NodeProcessError, "node did not output '" &
|
||||
node.startedOutput & "'")
|
||||
|
||||
proc restart*(node: NodeProcess) {.async.} =
|
||||
await node.stop()
|
||||
|
|
|
@ -58,11 +58,11 @@ proc startNode*(args: openArray[string], debug: string | bool = false): NodeProc
|
|||
node
|
||||
|
||||
proc dataDir(node: NodeProcess): string =
|
||||
let config = CodexConf.load(cmdLine = node.arguments)
|
||||
let config = CodexConf.load(cmdLine = node.arguments, quitOnFailure = false)
|
||||
config.dataDir.string
|
||||
|
||||
proc apiUrl(node: NodeProcess): string =
|
||||
let config = CodexConf.load(cmdLine = node.arguments)
|
||||
let config = CodexConf.load(cmdLine = node.arguments, quitOnFailure = false)
|
||||
"http://" & config.apiBindAddress & ":" & $config.apiPort & "/api/codex/v1"
|
||||
|
||||
proc client*(node: NodeProcess): CodexClient =
|
||||
|
|
|
@ -1,40 +1,50 @@
|
|||
import std/unittest
|
||||
import std/tempfiles
|
||||
import codex/conf
|
||||
import codex/utils/fileutils
|
||||
import ./nodes
|
||||
|
||||
suite "Command line interface":
|
||||
|
||||
let account = "4242424242424242424242424242424242424242"
|
||||
let key = "4242424242424242424242424242424242424242424242424242424242424242"
|
||||
|
||||
test "complains when persistence is enabled without ethereum account":
|
||||
let node = startNode(@["--persistence"])
|
||||
let node = startNode(@[
|
||||
"persistence"
|
||||
])
|
||||
node.waitUntilOutput("Persistence enabled, but no Ethereum account was set")
|
||||
node.stop()
|
||||
|
||||
test "complains when validator is enabled without ethereum account":
|
||||
let node = startNode(@["--validator"])
|
||||
node.waitUntilOutput("Validator enabled, but no Ethereum account was set")
|
||||
node.stop()
|
||||
|
||||
test "complains when ethereum account is set when not needed":
|
||||
let node = startNode(@["--eth-account=" & account])
|
||||
node.waitUntilOutput("Ethereum account was set, but neither persistence nor validator is enabled")
|
||||
node.stop()
|
||||
|
||||
test "complains when ethereum private key is set when not needed":
|
||||
let keyFile = genTempPath("", "")
|
||||
discard secureWriteFile(keyFile, key)
|
||||
let node = startNode(@["--eth-private-key=" & keyFile])
|
||||
node.waitUntilOutput("Ethereum account was set, but neither persistence nor validator is enabled")
|
||||
node.stop()
|
||||
discard removeFile(keyFile)
|
||||
|
||||
test "complains when ethereum private key file has wrong permissions":
|
||||
let unsafeKeyFile = genTempPath("", "")
|
||||
discard unsafeKeyFile.writeFile(key, 0o666)
|
||||
let node = startNode(@["--persistence", "--eth-private-key=" & unsafeKeyFile])
|
||||
let node = startNode(@[
|
||||
"persistence",
|
||||
"--eth-private-key=" & unsafeKeyFile])
|
||||
node.waitUntilOutput("Ethereum private key file does not have safe file permissions")
|
||||
node.stop()
|
||||
discard removeFile(unsafeKeyFile)
|
||||
|
||||
test "complains when persistence is enabled without accessible r1cs file":
|
||||
let node = startNode(@["persistence", "prover"])
|
||||
node.waitUntilOutput("r1cs file not readable, doesn't exist or wrong extension (.r1cs)")
|
||||
node.stop()
|
||||
|
||||
test "complains when persistence is enabled without accessible wasm file":
|
||||
let node = startNode(@[
|
||||
"persistence",
|
||||
"prover",
|
||||
"--circom-r1cs=tests/circuits/fixtures/proof_main.r1cs"
|
||||
])
|
||||
node.waitUntilOutput("wasm file not readable, doesn't exist or wrong extension (.wasm)")
|
||||
node.stop()
|
||||
|
||||
test "complains when persistence is enabled without accessible zkey file":
|
||||
let node = startNode(@[
|
||||
"persistence",
|
||||
"prover",
|
||||
"--circom-r1cs=tests/circuits/fixtures/proof_main.r1cs",
|
||||
"--circom-wasm=tests/circuits/fixtures/proof_main.wasm"
|
||||
])
|
||||
node.waitUntilOutput("zkey file not readable, doesn't exist or wrong extension (.zkey)")
|
||||
node.stop()
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import std/math
|
||||
import pkg/stew/byteutils
|
||||
import pkg/codex/units
|
||||
import ./marketplacesuite
|
||||
import ./nodeconfigs
|
||||
import ../examples
|
||||
|
||||
marketplacesuite "Marketplace payouts":
|
||||
|
@ -9,21 +9,21 @@ marketplacesuite "Marketplace payouts":
|
|||
test "expired request partially pays out for stored time",
|
||||
NodeConfigs(
|
||||
# Uncomment to start Hardhat automatically, typically so logs can be inspected locally
|
||||
# hardhat: HardhatConfig().withLogFile()
|
||||
hardhat: HardhatConfig.none,
|
||||
|
||||
clients:
|
||||
CodexConfig()
|
||||
.nodes(1),
|
||||
CodexConfigs.init(nodes=1)
|
||||
# .debug() # uncomment to enable console log output.debug()
|
||||
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||
# .withLogTopics("node", "erasure"),
|
||||
# .withLogTopics("node", "erasure")
|
||||
.some,
|
||||
|
||||
providers:
|
||||
CodexConfig()
|
||||
.nodes(1)
|
||||
CodexConfigs.init(nodes=1)
|
||||
# .debug() # uncomment to enable console log output
|
||||
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||
# .withLogTopics("node", "marketplace", "sales", "reservations", "node", "proving", "clock"),
|
||||
# .withLogTopics("node", "marketplace", "sales", "reservations", "node", "proving", "clock")
|
||||
.some,
|
||||
):
|
||||
let reward = 400.u256
|
||||
let duration = 10.periods
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
import std/math
|
||||
from std/times import inMilliseconds
|
||||
import pkg/codex/logutils
|
||||
import pkg/stew/byteutils
|
||||
|
@ -7,8 +6,9 @@ import ../contracts/deployment
|
|||
import ../codex/helpers
|
||||
import ../examples
|
||||
import ./marketplacesuite
|
||||
import ./nodeconfigs
|
||||
|
||||
export chronicles
|
||||
export logutils
|
||||
|
||||
logScope:
|
||||
topics = "integration test proofs"
|
||||
|
@ -18,21 +18,22 @@ marketplacesuite "Hosts submit regular proofs":
|
|||
|
||||
test "hosts submit periodic proofs for slots they fill", NodeConfigs(
|
||||
# Uncomment to start Hardhat automatically, typically so logs can be inspected locally
|
||||
# hardhat: HardhatConfig().withLogFile(),
|
||||
hardhat:
|
||||
HardhatConfig.none,
|
||||
|
||||
clients:
|
||||
CodexConfig()
|
||||
.nodes(1),
|
||||
CodexConfigs.init(nodes=1)
|
||||
# .debug() # uncomment to enable console log output
|
||||
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||
# .withLogTopics("node"),
|
||||
# .withLogTopics("node")
|
||||
.some,
|
||||
|
||||
providers:
|
||||
CodexConfig()
|
||||
.nodes(1)
|
||||
CodexConfigs.init(nodes=1)
|
||||
# .debug() # uncomment to enable console log output
|
||||
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||
# .withLogTopics("marketplace", "sales", "reservations", "node"),
|
||||
# .withLogTopics("marketplace", "sales", "reservations", "node")
|
||||
.some,
|
||||
):
|
||||
let client0 = clients()[0].client
|
||||
let expiry = 5.periods
|
||||
|
@ -72,29 +73,30 @@ marketplacesuite "Simulate invalid proofs":
|
|||
|
||||
test "slot is freed after too many invalid proofs submitted", NodeConfigs(
|
||||
# Uncomment to start Hardhat automatically, typically so logs can be inspected locally
|
||||
# hardhat: HardhatConfig().withLogFile(),
|
||||
hardhat:
|
||||
HardhatConfig.none,
|
||||
|
||||
clients:
|
||||
CodexConfig()
|
||||
.nodes(1),
|
||||
CodexConfigs.init(nodes=1)
|
||||
# .debug() # uncomment to enable console log output
|
||||
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||
# .withLogTopics("node", "clock"),
|
||||
# .withLogTopics("node", "clock")
|
||||
.some,
|
||||
|
||||
providers:
|
||||
CodexConfig()
|
||||
.nodes(1)
|
||||
.simulateProofFailuresFor(providerIdx=0, failEveryNProofs=1),
|
||||
CodexConfigs.init(nodes=1)
|
||||
.withSimulateProofFailures(idx=0, failEveryNProofs=1)
|
||||
# .debug() # uncomment to enable console log output
|
||||
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||
# .withLogTopics("marketplace", "sales", "reservations", "node", "clock"),
|
||||
# .withLogTopics("marketplace", "sales", "reservations", "node", "clock")
|
||||
.some,
|
||||
|
||||
validators:
|
||||
CodexConfig()
|
||||
.nodes(1)
|
||||
CodexConfigs.init(nodes=1)
|
||||
# .debug() # uncomment to enable console log output
|
||||
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||
# .withLogTopics("validator", "onchain", "ethers", "clock")
|
||||
.some
|
||||
):
|
||||
let client0 = clients()[0].client
|
||||
let expiry = 5.periods
|
||||
|
@ -130,29 +132,29 @@ marketplacesuite "Simulate invalid proofs":
|
|||
|
||||
test "slot is not freed when not enough invalid proofs submitted", NodeConfigs(
|
||||
# Uncomment to start Hardhat automatically, typically so logs can be inspected locally
|
||||
# hardhat: HardhatConfig().withLogFile(),
|
||||
hardhat: HardhatConfig.none,
|
||||
|
||||
clients:
|
||||
CodexConfig()
|
||||
.nodes(1),
|
||||
CodexConfigs.init(nodes=1)
|
||||
# .debug() # uncomment to enable console log output
|
||||
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||
# .withLogTopics("marketplace", "sales", "reservations", "node", "clock"),
|
||||
# .withLogTopics("marketplace", "sales", "reservations", "node", "clock")
|
||||
.some,
|
||||
|
||||
providers:
|
||||
CodexConfig()
|
||||
.nodes(1)
|
||||
.simulateProofFailuresFor(providerIdx=0, failEveryNProofs=1),
|
||||
CodexConfigs.init(nodes=1)
|
||||
.withSimulateProofFailures(idx=0, failEveryNProofs=1)
|
||||
# .debug() # uncomment to enable console log output
|
||||
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||
# .withLogTopics("marketplace", "sales", "reservations", "node"),
|
||||
# .withLogTopics("marketplace", "sales", "reservations", "node")
|
||||
.some,
|
||||
|
||||
validators:
|
||||
CodexConfig()
|
||||
.nodes(1)
|
||||
CodexConfigs.init(nodes=1)
|
||||
# .debug()
|
||||
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||
# .withLogTopics("validator", "onchain", "ethers", "clock")
|
||||
.some
|
||||
):
|
||||
let client0 = clients()[0].client
|
||||
let expiry = 5.periods
|
||||
|
|
|
@ -38,7 +38,11 @@ template twonodessuite*(name: string, debug1, debug2: string, body) =
|
|||
"--disc-ip=127.0.0.1",
|
||||
"--disc-port=8090",
|
||||
"--listen-addrs=/ip4/127.0.0.1/tcp/0",
|
||||
"--persistence",
|
||||
"persistence",
|
||||
"prover",
|
||||
"--circom-r1cs=vendor/codex-contracts-eth/verifier/networks/hardhat/proof_main.r1cs",
|
||||
"--circom-wasm=vendor/codex-contracts-eth/verifier/networks/hardhat/proof_main.wasm",
|
||||
"--circom-zkey=vendor/codex-contracts-eth/verifier/networks/hardhat/proof_main.zkey",
|
||||
"--eth-account=" & $account1
|
||||
]
|
||||
|
||||
|
@ -58,7 +62,11 @@ template twonodessuite*(name: string, debug1, debug2: string, body) =
|
|||
"--disc-port=8091",
|
||||
"--listen-addrs=/ip4/127.0.0.1/tcp/0",
|
||||
"--bootstrap-node=" & bootstrap,
|
||||
"--persistence",
|
||||
"persistence",
|
||||
"prover",
|
||||
"--circom-r1cs=vendor/codex-contracts-eth/verifier/networks/hardhat/proof_main.r1cs",
|
||||
"--circom-wasm=vendor/codex-contracts-eth/verifier/networks/hardhat/proof_main.wasm",
|
||||
"--circom-zkey=vendor/codex-contracts-eth/verifier/networks/hardhat/proof_main.zkey",
|
||||
"--eth-account=" & $account2
|
||||
]
|
||||
|
||||
|
|
Loading…
Reference in New Issue