rework cli to use commands for `persistence` mode
This commit is contained in:
parent
9e13d2251a
commit
0c3aa00d20
141
codex.nim
141
codex.nim
|
@ -23,6 +23,7 @@ import ./codex/codex
|
|||
import ./codex/logutils
|
||||
import ./codex/units
|
||||
import ./codex/utils/keyutils
|
||||
import ./codex/codextypes
|
||||
|
||||
export codex, conf, libp2p, chronos, logutils
|
||||
|
||||
|
@ -54,91 +55,93 @@ when isMainModule:
|
|||
config.setupLogging()
|
||||
config.setupMetrics()
|
||||
|
||||
case config.cmd:
|
||||
of StartUpCommand.noCommand:
|
||||
if config.nat == ValidIpAddress.init(IPv4_any()):
|
||||
error "`--nat` cannot be set to the any (`0.0.0.0`) address"
|
||||
quit QuitFailure
|
||||
|
||||
if config.nat == ValidIpAddress.init(IPv4_any()):
|
||||
error "`--nat` cannot be set to the any (`0.0.0.0`) address"
|
||||
if config.nat == ValidIpAddress.init("127.0.0.1"):
|
||||
warn "`--nat` is set to loopback, your node wont properly announce over the DHT"
|
||||
|
||||
if not(checkAndCreateDataDir((config.dataDir).string)):
|
||||
# We are unable to access/create data folder or data folder's
|
||||
# permissions are insecure.
|
||||
quit QuitFailure
|
||||
|
||||
trace "Data dir initialized", dir = $config.dataDir
|
||||
|
||||
if not(checkAndCreateDataDir((config.dataDir / "repo"))):
|
||||
# We are unable to access/create data folder or data folder's
|
||||
# permissions are insecure.
|
||||
quit QuitFailure
|
||||
|
||||
trace "Repo dir initialized", dir = config.dataDir / "repo"
|
||||
|
||||
var
|
||||
state: CodexStatus
|
||||
pendingFuts: seq[Future[void]]
|
||||
|
||||
let
|
||||
keyPath =
|
||||
if isAbsolute(config.netPrivKeyFile):
|
||||
config.netPrivKeyFile
|
||||
else:
|
||||
config.dataDir / config.netPrivKeyFile
|
||||
|
||||
privateKey = setupKey(keyPath).expect("Should setup private key!")
|
||||
server = try:
|
||||
CodexServer.new(config, privateKey)
|
||||
except Exception as exc:
|
||||
error "Failed to start Codex", msg = exc.msg
|
||||
quit QuitFailure
|
||||
|
||||
if config.nat == ValidIpAddress.init("127.0.0.1"):
|
||||
warn "`--nat` is set to loopback, your node wont properly announce over the DHT"
|
||||
## Ctrl+C handling
|
||||
proc controlCHandler() {.noconv.} =
|
||||
when defined(windows):
|
||||
# workaround for https://github.com/nim-lang/Nim/issues/4057
|
||||
try:
|
||||
setupForeignThreadGc()
|
||||
except Exception as exc: raiseAssert exc.msg # shouldn't happen
|
||||
notice "Shutting down after having received SIGINT"
|
||||
|
||||
if not(checkAndCreateDataDir((config.dataDir).string)):
|
||||
# We are unable to access/create data folder or data folder's
|
||||
# permissions are insecure.
|
||||
quit QuitFailure
|
||||
pendingFuts.add(server.stop())
|
||||
state = CodexStatus.Stopping
|
||||
|
||||
trace "Data dir initialized", dir = $config.dataDir
|
||||
notice "Stopping Codex"
|
||||
|
||||
if not(checkAndCreateDataDir((config.dataDir / "repo"))):
|
||||
# We are unable to access/create data folder or data folder's
|
||||
# permissions are insecure.
|
||||
quit QuitFailure
|
||||
try:
|
||||
setControlCHook(controlCHandler)
|
||||
except Exception as exc: # TODO Exception
|
||||
warn "Cannot set ctrl-c handler", msg = exc.msg
|
||||
|
||||
trace "Repo dir initialized", dir = config.dataDir / "repo"
|
||||
|
||||
var
|
||||
state: CodexStatus
|
||||
pendingFuts: seq[Future[void]]
|
||||
|
||||
let
|
||||
keyPath =
|
||||
if isAbsolute(config.netPrivKeyFile):
|
||||
config.netPrivKeyFile
|
||||
else:
|
||||
config.dataDir / config.netPrivKeyFile
|
||||
|
||||
privateKey = setupKey(keyPath).expect("Should setup private key!")
|
||||
server = CodexServer.new(config, privateKey)
|
||||
|
||||
## Ctrl+C handling
|
||||
proc controlCHandler() {.noconv.} =
|
||||
when defined(windows):
|
||||
# workaround for https://github.com/nim-lang/Nim/issues/4057
|
||||
try:
|
||||
setupForeignThreadGc()
|
||||
except Exception as exc: raiseAssert exc.msg # shouldn't happen
|
||||
notice "Shutting down after having received SIGINT"
|
||||
# equivalent SIGTERM handler
|
||||
when defined(posix):
|
||||
proc SIGTERMHandler(signal: cint) {.noconv.} =
|
||||
notice "Shutting down after having received SIGTERM"
|
||||
|
||||
pendingFuts.add(server.stop())
|
||||
state = CodexStatus.Stopping
|
||||
|
||||
notice "Stopping Codex"
|
||||
|
||||
c_signal(ansi_c.SIGTERM, SIGTERMHandler)
|
||||
|
||||
pendingFuts.add(server.start())
|
||||
|
||||
state = CodexStatus.Running
|
||||
while state == CodexStatus.Running:
|
||||
try:
|
||||
setControlCHook(controlCHandler)
|
||||
except Exception as exc: # TODO Exception
|
||||
warn "Cannot set ctrl-c handler", msg = exc.msg
|
||||
|
||||
# equivalent SIGTERM handler
|
||||
when defined(posix):
|
||||
proc SIGTERMHandler(signal: cint) {.noconv.} =
|
||||
notice "Shutting down after having received SIGTERM"
|
||||
|
||||
pendingFuts.add(server.stop())
|
||||
state = CodexStatus.Stopping
|
||||
|
||||
notice "Stopping Codex"
|
||||
|
||||
c_signal(ansi_c.SIGTERM, SIGTERMHandler)
|
||||
|
||||
pendingFuts.add(server.start())
|
||||
|
||||
state = CodexStatus.Running
|
||||
while state == CodexStatus.Running:
|
||||
# poll chronos
|
||||
chronos.poll()
|
||||
|
||||
# wait fot futures to finish
|
||||
let res = waitFor allFinished(pendingFuts)
|
||||
state = CodexStatus.Stopped
|
||||
|
||||
if res.anyIt( it.failed ):
|
||||
error "Codex didn't shutdown correctly"
|
||||
except Exception as exc:
|
||||
error "Unhandled exception in async proc, aborting", msg = exc.msg
|
||||
quit QuitFailure
|
||||
|
||||
notice "Exited codex"
|
||||
# wait fot futures to finish
|
||||
let res = waitFor allFinished(pendingFuts)
|
||||
state = CodexStatus.Stopped
|
||||
|
||||
of StartUpCommand.initNode:
|
||||
discard
|
||||
if res.anyIt( it.failed ):
|
||||
error "Codex didn't shutdown correctly"
|
||||
quit QuitFailure
|
||||
|
||||
notice "Exited codex"
|
||||
|
|
130
codex/codex.nim
130
codex/codex.nim
|
@ -22,12 +22,14 @@ import pkg/stew/io2
|
|||
import pkg/stew/shims/net as stewnet
|
||||
import pkg/datastore
|
||||
import pkg/ethers except Rng
|
||||
import pkg/stew/io2
|
||||
|
||||
import ./node
|
||||
import ./conf
|
||||
import ./rng
|
||||
import ./rest/api
|
||||
import ./stores
|
||||
import ./slots
|
||||
import ./blockexchange
|
||||
import ./utils/fileutils
|
||||
import ./erasure
|
||||
|
@ -65,77 +67,70 @@ proc bootstrapInteractions(
|
|||
config = s.config
|
||||
repo = s.repoStore
|
||||
|
||||
|
||||
if not config.persistence and not config.validator:
|
||||
if config.ethAccount.isSome or config.ethPrivateKey.isSome:
|
||||
warn "Ethereum account was set, but neither persistence nor validator is enabled"
|
||||
return
|
||||
|
||||
if not config.ethAccount.isSome and not config.ethPrivateKey.isSome:
|
||||
if config.persistence:
|
||||
error "Persistence enabled, but no Ethereum account was set"
|
||||
if config.validator:
|
||||
error "Validator enabled, but no Ethereum account was set"
|
||||
quit QuitFailure
|
||||
|
||||
let provider = JsonRpcProvider.new(config.ethProvider)
|
||||
var signer: Signer
|
||||
if account =? config.ethAccount:
|
||||
signer = provider.getSigner(account)
|
||||
elif keyFile =? config.ethPrivateKey:
|
||||
without isSecure =? checkSecureFile(keyFile):
|
||||
error "Could not check file permissions: does Ethereum private key file exist?"
|
||||
quit QuitFailure
|
||||
if not isSecure:
|
||||
error "Ethereum private key file does not have safe file permissions"
|
||||
quit QuitFailure
|
||||
without key =? keyFile.readAllChars():
|
||||
error "Unable to read Ethereum private key file"
|
||||
quit QuitFailure
|
||||
without wallet =? EthWallet.new(key.strip(), provider):
|
||||
error "Invalid Ethereum private key in file"
|
||||
quit QuitFailure
|
||||
signer = wallet
|
||||
|
||||
let deploy = Deployment.new(provider, config)
|
||||
without marketplaceAddress =? await deploy.address(Marketplace):
|
||||
error "No Marketplace address was specified or there is no known address for the current network"
|
||||
quit QuitFailure
|
||||
|
||||
let marketplace = Marketplace.new(marketplaceAddress, signer)
|
||||
let market = OnChainMarket.new(marketplace)
|
||||
let clock = OnChainClock.new(provider)
|
||||
|
||||
var client: ?ClientInteractions
|
||||
var host: ?HostInteractions
|
||||
var validator: ?ValidatorInteractions
|
||||
|
||||
if config.validator or config.persistence:
|
||||
s.codexNode.clock = clock
|
||||
else:
|
||||
s.codexNode.clock = SystemClock()
|
||||
|
||||
if config.persistence:
|
||||
# This is used for simulation purposes. Normal nodes won't be compiled with this flag
|
||||
# and hence the proof failure will always be 0.
|
||||
when codex_enable_proof_failures:
|
||||
let proofFailures = config.simulateProofFailures
|
||||
if proofFailures > 0:
|
||||
warn "Enabling proof failure simulation!"
|
||||
if not config.ethAccount.isSome and not config.ethPrivateKey.isSome:
|
||||
error "Persistence enabled, but no Ethereum account was set"
|
||||
quit QuitFailure
|
||||
|
||||
let provider = JsonRpcProvider.new(config.ethProvider)
|
||||
var signer: Signer
|
||||
if account =? config.ethAccount:
|
||||
signer = provider.getSigner(account)
|
||||
elif keyFile =? config.ethPrivateKey:
|
||||
without isSecure =? checkSecureFile(keyFile):
|
||||
error "Could not check file permissions: does Ethereum private key file exist?"
|
||||
quit QuitFailure
|
||||
if not isSecure:
|
||||
error "Ethereum private key file does not have safe file permissions"
|
||||
quit QuitFailure
|
||||
without key =? keyFile.readAllChars():
|
||||
error "Unable to read Ethereum private key file"
|
||||
quit QuitFailure
|
||||
without wallet =? EthWallet.new(key.strip(), provider):
|
||||
error "Invalid Ethereum private key in file"
|
||||
quit QuitFailure
|
||||
signer = wallet
|
||||
|
||||
let deploy = Deployment.new(provider, config)
|
||||
without marketplaceAddress =? await deploy.address(Marketplace):
|
||||
error "No Marketplace address was specified or there is no known address for the current network"
|
||||
quit QuitFailure
|
||||
|
||||
let marketplace = Marketplace.new(marketplaceAddress, signer)
|
||||
let market = OnChainMarket.new(marketplace)
|
||||
let clock = OnChainClock.new(provider)
|
||||
|
||||
var client: ?ClientInteractions
|
||||
var host: ?HostInteractions
|
||||
var validator: ?ValidatorInteractions
|
||||
|
||||
if config.validator or config.persistence:
|
||||
s.codexNode.clock = clock
|
||||
else:
|
||||
let proofFailures = 0
|
||||
if config.simulateProofFailures > 0:
|
||||
warn "Proof failure simulation is not enabled for this build! Configuration ignored"
|
||||
s.codexNode.clock = SystemClock()
|
||||
|
||||
let purchasing = Purchasing.new(market, clock)
|
||||
let sales = Sales.new(market, clock, repo, proofFailures)
|
||||
client = some ClientInteractions.new(clock, purchasing)
|
||||
host = some HostInteractions.new(clock, sales)
|
||||
if config.validator:
|
||||
let validation = Validation.new(clock, market, config.validatorMaxSlots)
|
||||
validator = some ValidatorInteractions.new(clock, validation)
|
||||
if config.persistence:
|
||||
# This is used for simulation purposes. Normal nodes won't be compiled with this flag
|
||||
# and hence the proof failure will always be 0.
|
||||
when codex_enable_proof_failures:
|
||||
let proofFailures = config.simulateProofFailures
|
||||
if proofFailures > 0:
|
||||
warn "Enabling proof failure simulation!"
|
||||
else:
|
||||
let proofFailures = 0
|
||||
if config.simulateProofFailures > 0:
|
||||
warn "Proof failure simulation is not enabled for this build! Configuration ignored"
|
||||
|
||||
s.codexNode.contracts = (client, host, validator)
|
||||
let purchasing = Purchasing.new(market, clock)
|
||||
let sales = Sales.new(market, clock, repo, proofFailures)
|
||||
client = some ClientInteractions.new(clock, purchasing)
|
||||
host = some HostInteractions.new(clock, sales)
|
||||
|
||||
if config.validator:
|
||||
let validation = Validation.new(clock, market, config.validatorMaxSlots)
|
||||
validator = some ValidatorInteractions.new(clock, validation)
|
||||
|
||||
s.codexNode.contracts = (client, host, validator)
|
||||
|
||||
proc start*(s: CodexServer) {.async.} =
|
||||
trace "Starting codex node", config = $s.config
|
||||
|
@ -267,6 +262,7 @@ proc new*(
|
|||
switch = switch,
|
||||
networkStore = store,
|
||||
engine = engine,
|
||||
prover = Prover.none,
|
||||
discovery = discovery)
|
||||
|
||||
restServer = RestServerRef.new(
|
||||
|
|
295
codex/conf.nim
295
codex/conf.nim
|
@ -7,9 +7,7 @@
|
|||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import pkg/upraises
|
||||
|
||||
push: {.upraises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/os
|
||||
import std/terminal
|
||||
|
@ -33,30 +31,43 @@ import pkg/ethers
|
|||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
|
||||
import ./codextypes
|
||||
import ./discovery
|
||||
import ./logutils
|
||||
import ./stores
|
||||
import ./units
|
||||
import ./utils
|
||||
|
||||
export units
|
||||
export net
|
||||
export units, net, codextypes
|
||||
|
||||
export
|
||||
DefaultQuotaBytes,
|
||||
DefaultBlockTtl,
|
||||
DefaultBlockMaintenanceInterval,
|
||||
DefaultNumberOfBlocksToMaintainPerInterval
|
||||
|
||||
proc defaultDataDir*(): string =
|
||||
let dataDir = when defined(windows):
|
||||
"AppData" / "Roaming" / "Codex"
|
||||
elif defined(macosx):
|
||||
"Library" / "Application Support" / "Codex"
|
||||
else:
|
||||
".cache" / "codex"
|
||||
|
||||
getHomeDir() / dataDir
|
||||
|
||||
const
|
||||
codex_enable_api_debug_peers* {.booldefine.} = false
|
||||
codex_enable_proof_failures* {.booldefine.} = false
|
||||
codex_use_hardhat* {.booldefine.} = false
|
||||
codex_enable_log_counter* {.booldefine.} = false
|
||||
|
||||
DefaultDataDir* = defaultDataDir()
|
||||
|
||||
type
|
||||
StartUpCommand* {.pure.} = enum
|
||||
noCommand,
|
||||
initNode
|
||||
StartUpCmd* {.pure.} = enum
|
||||
noCmd
|
||||
persistence
|
||||
|
||||
LogKind* {.pure.} = enum
|
||||
Auto = "auto"
|
||||
|
@ -106,125 +117,125 @@ type
|
|||
|
||||
dataDir* {.
|
||||
desc: "The directory where codex will store configuration and data"
|
||||
defaultValue: defaultDataDir()
|
||||
defaultValueDesc: ""
|
||||
defaultValue: DefaultDataDir
|
||||
defaultValueDesc: $DefaultDataDir
|
||||
abbr: "d"
|
||||
name: "data-dir" }: OutDir
|
||||
|
||||
listenAddrs* {.
|
||||
desc: "Multi Addresses to listen on"
|
||||
defaultValue: @[
|
||||
MultiAddress.init("/ip4/0.0.0.0/tcp/0")
|
||||
.expect("Should init multiaddress")]
|
||||
defaultValueDesc: "/ip4/0.0.0.0/tcp/0"
|
||||
abbr: "i"
|
||||
name: "listen-addrs" }: seq[MultiAddress]
|
||||
|
||||
# TODO: change this once we integrate nat support
|
||||
nat* {.
|
||||
desc: "IP Addresses to announce behind a NAT"
|
||||
defaultValue: ValidIpAddress.init("127.0.0.1")
|
||||
defaultValueDesc: "127.0.0.1"
|
||||
abbr: "a"
|
||||
name: "nat" }: ValidIpAddress
|
||||
|
||||
discoveryIp* {.
|
||||
desc: "Discovery listen address"
|
||||
defaultValue: ValidIpAddress.init(IPv4_any())
|
||||
defaultValueDesc: "0.0.0.0"
|
||||
abbr: "e"
|
||||
name: "disc-ip" }: ValidIpAddress
|
||||
|
||||
discoveryPort* {.
|
||||
desc: "Discovery (UDP) port"
|
||||
defaultValue: 8090.Port
|
||||
defaultValueDesc: "8090"
|
||||
abbr: "u"
|
||||
name: "disc-port" }: Port
|
||||
|
||||
netPrivKeyFile* {.
|
||||
desc: "Source of network (secp256k1) private key file path or name"
|
||||
defaultValue: "key"
|
||||
name: "net-privkey" }: string
|
||||
|
||||
bootstrapNodes* {.
|
||||
desc: "Specifies one or more bootstrap nodes to use when connecting to the network"
|
||||
abbr: "b"
|
||||
name: "bootstrap-node" }: seq[SignedPeerRecord]
|
||||
|
||||
maxPeers* {.
|
||||
desc: "The maximum number of peers to connect to"
|
||||
defaultValue: 160
|
||||
name: "max-peers" }: int
|
||||
|
||||
agentString* {.
|
||||
defaultValue: "Codex"
|
||||
desc: "Node agent string which is used as identifier in network"
|
||||
name: "agent-string" }: string
|
||||
|
||||
apiBindAddress* {.
|
||||
desc: "The REST API bind address"
|
||||
defaultValue: "127.0.0.1"
|
||||
name: "api-bindaddr"
|
||||
}: string
|
||||
|
||||
apiPort* {.
|
||||
desc: "The REST Api port",
|
||||
defaultValue: 8080.Port
|
||||
defaultValueDesc: "8080"
|
||||
name: "api-port"
|
||||
abbr: "p" }: Port
|
||||
|
||||
repoKind* {.
|
||||
desc: "Backend for main repo store (fs, sqlite)"
|
||||
defaultValueDesc: "fs"
|
||||
defaultValue: repoFS
|
||||
name: "repo-kind" }: RepoKind
|
||||
|
||||
storageQuota* {.
|
||||
desc: "The size of the total storage quota dedicated to the node"
|
||||
defaultValue: DefaultQuotaBytes
|
||||
defaultValueDesc: $DefaultQuotaBytes
|
||||
name: "storage-quota"
|
||||
abbr: "q" }: NBytes
|
||||
|
||||
blockTtl* {.
|
||||
desc: "Default block timeout in seconds - 0 disables the ttl"
|
||||
defaultValue: DefaultBlockTtl
|
||||
defaultValueDesc: $DefaultBlockTtl
|
||||
name: "block-ttl"
|
||||
abbr: "t" }: Duration
|
||||
|
||||
blockMaintenanceInterval* {.
|
||||
desc: "Time interval in seconds - determines frequency of block maintenance cycle: how often blocks are checked for expiration and cleanup"
|
||||
defaultValue: DefaultBlockMaintenanceInterval
|
||||
defaultValueDesc: $DefaultBlockMaintenanceInterval
|
||||
name: "block-mi" }: Duration
|
||||
|
||||
blockMaintenanceNumberOfBlocks* {.
|
||||
desc: "Number of blocks to check every maintenance cycle"
|
||||
defaultValue: DefaultNumberOfBlocksToMaintainPerInterval
|
||||
defaultValueDesc: $DefaultNumberOfBlocksToMaintainPerInterval
|
||||
name: "block-mn" }: int
|
||||
|
||||
cacheSize* {.
|
||||
desc: "The size of the block cache, 0 disables the cache - might help on slow hardrives"
|
||||
defaultValue: 0
|
||||
defaultValueDesc: "0"
|
||||
name: "cache-size"
|
||||
abbr: "c" }: NBytes
|
||||
|
||||
logFile* {.
|
||||
desc: "Logs to file"
|
||||
defaultValue: string.none
|
||||
name: "log-file"
|
||||
hidden
|
||||
.}: Option[string]
|
||||
|
||||
case cmd* {.
|
||||
command
|
||||
defaultValue: noCommand }: StartUpCommand
|
||||
|
||||
of noCommand:
|
||||
|
||||
listenAddrs* {.
|
||||
desc: "Multi Addresses to listen on"
|
||||
defaultValue: @[
|
||||
MultiAddress.init("/ip4/0.0.0.0/tcp/0")
|
||||
.expect("Should init multiaddress")]
|
||||
defaultValueDesc: "/ip4/0.0.0.0/tcp/0"
|
||||
abbr: "i"
|
||||
name: "listen-addrs" }: seq[MultiAddress]
|
||||
|
||||
# TODO: change this once we integrate nat support
|
||||
nat* {.
|
||||
desc: "IP Addresses to announce behind a NAT"
|
||||
defaultValue: ValidIpAddress.init("127.0.0.1")
|
||||
defaultValueDesc: "127.0.0.1"
|
||||
abbr: "a"
|
||||
name: "nat" }: ValidIpAddress
|
||||
|
||||
discoveryIp* {.
|
||||
desc: "Discovery listen address"
|
||||
defaultValue: ValidIpAddress.init(IPv4_any())
|
||||
defaultValueDesc: "0.0.0.0"
|
||||
abbr: "e"
|
||||
name: "disc-ip" }: ValidIpAddress
|
||||
|
||||
discoveryPort* {.
|
||||
desc: "Discovery (UDP) port"
|
||||
defaultValue: 8090.Port
|
||||
defaultValueDesc: "8090"
|
||||
abbr: "u"
|
||||
name: "disc-port" }: Port
|
||||
|
||||
netPrivKeyFile* {.
|
||||
desc: "Source of network (secp256k1) private key file path or name"
|
||||
defaultValue: "key"
|
||||
name: "net-privkey" }: string
|
||||
|
||||
bootstrapNodes* {.
|
||||
desc: "Specifies one or more bootstrap nodes to use when connecting to the network"
|
||||
abbr: "b"
|
||||
name: "bootstrap-node" }: seq[SignedPeerRecord]
|
||||
|
||||
maxPeers* {.
|
||||
desc: "The maximum number of peers to connect to"
|
||||
defaultValue: 160
|
||||
name: "max-peers" }: int
|
||||
|
||||
agentString* {.
|
||||
defaultValue: "Codex"
|
||||
desc: "Node agent string which is used as identifier in network"
|
||||
name: "agent-string" }: string
|
||||
|
||||
apiBindAddress* {.
|
||||
desc: "The REST API bind address"
|
||||
defaultValue: "127.0.0.1"
|
||||
name: "api-bindaddr"
|
||||
}: string
|
||||
|
||||
apiPort* {.
|
||||
desc: "The REST Api port",
|
||||
defaultValue: 8080.Port
|
||||
defaultValueDesc: "8080"
|
||||
name: "api-port"
|
||||
abbr: "p" }: Port
|
||||
|
||||
repoKind* {.
|
||||
desc: "Backend for main repo store (fs, sqlite)"
|
||||
defaultValueDesc: "fs"
|
||||
defaultValue: repoFS
|
||||
name: "repo-kind" }: RepoKind
|
||||
|
||||
storageQuota* {.
|
||||
desc: "The size of the total storage quota dedicated to the node"
|
||||
defaultValue: DefaultQuotaBytes
|
||||
defaultValueDesc: $DefaultQuotaBytes
|
||||
name: "storage-quota"
|
||||
abbr: "q" }: NBytes
|
||||
|
||||
blockTtl* {.
|
||||
desc: "Default block timeout in seconds - 0 disables the ttl"
|
||||
defaultValue: DefaultBlockTtl
|
||||
defaultValueDesc: $DefaultBlockTtl
|
||||
name: "block-ttl"
|
||||
abbr: "t" }: Duration
|
||||
|
||||
blockMaintenanceInterval* {.
|
||||
desc: "Time interval in seconds - determines frequency of block maintenance cycle: how often blocks are checked for expiration and cleanup"
|
||||
defaultValue: DefaultBlockMaintenanceInterval
|
||||
defaultValueDesc: $DefaultBlockMaintenanceInterval
|
||||
name: "block-mi" }: Duration
|
||||
|
||||
blockMaintenanceNumberOfBlocks* {.
|
||||
desc: "Number of blocks to check every maintenance cycle"
|
||||
defaultValue: DefaultNumberOfBlocksToMaintainPerInterval
|
||||
defaultValueDesc: $DefaultNumberOfBlocksToMaintainPerInterval
|
||||
name: "block-mn" }: int
|
||||
|
||||
cacheSize* {.
|
||||
desc: "The size of the block cache, 0 disables the cache - might help on slow hardrives"
|
||||
defaultValue: 0
|
||||
defaultValueDesc: "0"
|
||||
name: "cache-size"
|
||||
abbr: "c" }: NBytes
|
||||
|
||||
persistence* {.
|
||||
desc: "Enables persistence mechanism, requires an Ethereum node"
|
||||
defaultValue: false
|
||||
name: "persistence"
|
||||
.}: bool
|
||||
defaultValue: noCmd
|
||||
command }: StartUpCmd
|
||||
of persistence:
|
||||
|
||||
ethProvider* {.
|
||||
desc: "The URL of the JSON-RPC API of the Ethereum node"
|
||||
|
@ -235,18 +246,21 @@ type
|
|||
ethAccount* {.
|
||||
desc: "The Ethereum account that is used for storage contracts"
|
||||
defaultValue: EthAddress.none
|
||||
defaultValueDesc: ""
|
||||
name: "eth-account"
|
||||
.}: Option[EthAddress]
|
||||
|
||||
ethPrivateKey* {.
|
||||
desc: "File containing Ethereum private key for storage contracts"
|
||||
defaultValue: string.none
|
||||
defaultValueDesc: ""
|
||||
name: "eth-private-key"
|
||||
.}: Option[string]
|
||||
|
||||
marketplaceAddress* {.
|
||||
desc: "Address of deployed Marketplace contract"
|
||||
defaultValue: EthAddress.none
|
||||
defaultValueDesc: ""
|
||||
name: "marketplace-address"
|
||||
.}: Option[EthAddress]
|
||||
|
||||
|
@ -262,6 +276,7 @@ type
|
|||
name: "validator-max-slots"
|
||||
.}: int
|
||||
|
||||
# TODO: should go behind a feature flag
|
||||
simulateProofFailures* {.
|
||||
desc: "Simulates proof failures once every N proofs. 0 = disabled."
|
||||
defaultValue: 0
|
||||
|
@ -269,21 +284,17 @@ type
|
|||
hidden
|
||||
.}: int
|
||||
|
||||
logFile* {.
|
||||
desc: "Logs to file"
|
||||
defaultValue: string.none
|
||||
name: "log-file"
|
||||
hidden
|
||||
.}: Option[string]
|
||||
|
||||
of initNode:
|
||||
discard
|
||||
of StartUpCmd.noCmd:
|
||||
discard # end of persistence
|
||||
|
||||
EthAddress* = ethers.Address
|
||||
|
||||
logutils.formatIt(LogFormat.textLines, EthAddress): it.short0xHexLog
|
||||
logutils.formatIt(LogFormat.json, EthAddress): %it
|
||||
|
||||
func persistence*(self: CodexConf): bool =
|
||||
self.cmd == StartUpCmd.persistence
|
||||
|
||||
proc getCodexVersion(): string =
|
||||
let tag = strip(staticExec("git tag"))
|
||||
if tag.isEmptyOrWhitespace:
|
||||
|
@ -308,16 +319,6 @@ const
|
|||
"Codex revision: " & codexRevision & "\p" &
|
||||
nimBanner
|
||||
|
||||
proc defaultDataDir*(): string =
|
||||
let dataDir = when defined(windows):
|
||||
"AppData" / "Roaming" / "Codex"
|
||||
elif defined(macosx):
|
||||
"Library" / "Application Support" / "Codex"
|
||||
else:
|
||||
".cache" / "codex"
|
||||
|
||||
getHomeDir() / dataDir
|
||||
|
||||
proc parseCmdArg*(T: typedesc[MultiAddress],
|
||||
input: string): MultiAddress
|
||||
{.upraises: [ValueError, LPError].} =
|
||||
|
@ -326,7 +327,7 @@ proc parseCmdArg*(T: typedesc[MultiAddress],
|
|||
if res.isOk:
|
||||
ma = res.get()
|
||||
else:
|
||||
warn "Invalid MultiAddress", input=input, error=res.error()
|
||||
warn "Invalid MultiAddress", input=input, error = res.error()
|
||||
quit QuitFailure
|
||||
ma
|
||||
|
||||
|
@ -334,10 +335,10 @@ proc parseCmdArg*(T: type SignedPeerRecord, uri: string): T =
|
|||
var res: SignedPeerRecord
|
||||
try:
|
||||
if not res.fromURI(uri):
|
||||
warn "Invalid SignedPeerRecord uri", uri=uri
|
||||
warn "Invalid SignedPeerRecord uri", uri = uri
|
||||
quit QuitFailure
|
||||
except CatchableError as exc:
|
||||
warn "Invalid SignedPeerRecord uri", uri=uri, error=exc.msg
|
||||
warn "Invalid SignedPeerRecord uri", uri = uri, error = exc.msg
|
||||
quit QuitFailure
|
||||
res
|
||||
|
||||
|
@ -348,7 +349,7 @@ proc parseCmdArg*(T: type NBytes, val: string): T =
|
|||
var num = 0'i64
|
||||
let count = parseSize(val, num, alwaysBin = true)
|
||||
if count == 0:
|
||||
warn "Invalid number of bytes", nbytes=val
|
||||
warn "Invalid number of bytes", nbytes = val
|
||||
quit QuitFailure
|
||||
NBytes(num)
|
||||
|
||||
|
@ -356,7 +357,7 @@ proc parseCmdArg*(T: type Duration, val: string): T =
|
|||
var dur: Duration
|
||||
let count = parseDuration(val, dur)
|
||||
if count == 0:
|
||||
warn "Invalid duration parse", dur=dur
|
||||
warn "Cannot parse duration", dur = dur
|
||||
quit QuitFailure
|
||||
dur
|
||||
|
||||
|
|
|
@ -74,6 +74,8 @@ else:
|
|||
--styleCheck:usages
|
||||
--styleCheck:error
|
||||
--maxLoopIterationsVM:1000000000
|
||||
--fieldChecks:on
|
||||
--warningAsError:"ProveField:on"
|
||||
|
||||
when (NimMajor, NimMinor) >= (1, 4):
|
||||
--warning:"ObservableStores:off"
|
||||
|
@ -116,6 +118,9 @@ switch("define", "chronicles_sinks=textlines[dynamic],json[dynamic],textlines[dy
|
|||
switch("define", "use_asm_syntax_intel=false")
|
||||
switch("define", "ctt_asm=false")
|
||||
|
||||
# Allow the use of old-style case objects for nim config compatibility
|
||||
switch("define", "nimOldCaseObjects")
|
||||
|
||||
# begin Nimble config (version 1)
|
||||
when system.fileExists("nimble.paths"):
|
||||
include "nimble.paths"
|
||||
|
|
|
@ -191,14 +191,14 @@ template multinodesuite*(name: string, body: untyped) =
|
|||
proc startClientNode(): Future[NodeProcess] {.async.} =
|
||||
let clientIdx = clients().len
|
||||
var config = nodeConfigs.clients
|
||||
config.cliOptions.add CliOption(key: "--persistence")
|
||||
config.cliOptions.add CliOption(key: "persistence")
|
||||
return await newCodexProcess(clientIdx, config, Role.Client)
|
||||
|
||||
proc startProviderNode(): Future[NodeProcess] {.async.} =
|
||||
let providerIdx = providers().len
|
||||
var config = nodeConfigs.providers
|
||||
config.cliOptions.add CliOption(key: "--bootstrap-node", value: bootstrap)
|
||||
config.cliOptions.add CliOption(key: "--persistence")
|
||||
config.cliOptions.add CliOption(key: "persistence")
|
||||
|
||||
# filter out provider options by provided index
|
||||
config.cliOptions = config.cliOptions.filter(
|
||||
|
|
|
@ -9,32 +9,14 @@ suite "Command line interface":
|
|||
let key = "4242424242424242424242424242424242424242424242424242424242424242"
|
||||
|
||||
test "complains when persistence is enabled without ethereum account":
|
||||
let node = startNode(@["--persistence"])
|
||||
let node = startNode(@["persistence"])
|
||||
node.waitUntilOutput("Persistence enabled, but no Ethereum account was set")
|
||||
node.stop()
|
||||
|
||||
test "complains when validator is enabled without ethereum account":
|
||||
let node = startNode(@["--validator"])
|
||||
node.waitUntilOutput("Validator enabled, but no Ethereum account was set")
|
||||
node.stop()
|
||||
|
||||
test "complains when ethereum account is set when not needed":
|
||||
let node = startNode(@["--eth-account=" & account])
|
||||
node.waitUntilOutput("Ethereum account was set, but neither persistence nor validator is enabled")
|
||||
node.stop()
|
||||
|
||||
test "complains when ethereum private key is set when not needed":
|
||||
let keyFile = genTempPath("", "")
|
||||
discard secureWriteFile(keyFile, key)
|
||||
let node = startNode(@["--eth-private-key=" & keyFile])
|
||||
node.waitUntilOutput("Ethereum account was set, but neither persistence nor validator is enabled")
|
||||
node.stop()
|
||||
discard removeFile(keyFile)
|
||||
|
||||
test "complains when ethereum private key file has wrong permissions":
|
||||
let unsafeKeyFile = genTempPath("", "")
|
||||
discard unsafeKeyFile.writeFile(key, 0o666)
|
||||
let node = startNode(@["--persistence", "--eth-private-key=" & unsafeKeyFile])
|
||||
let node = startNode(@["persistence", "--eth-private-key=" & unsafeKeyFile])
|
||||
node.waitUntilOutput("Ethereum private key file does not have safe file permissions")
|
||||
node.stop()
|
||||
discard removeFile(unsafeKeyFile)
|
||||
|
|
|
@ -61,125 +61,125 @@ marketplacesuite "Hosts submit regular proofs":
|
|||
await subscription.unsubscribe()
|
||||
|
||||
|
||||
marketplacesuite "Simulate invalid proofs":
|
||||
# marketplacesuite "Simulate invalid proofs":
|
||||
|
||||
# TODO: these are very loose tests in that they are not testing EXACTLY how
|
||||
# proofs were marked as missed by the validator. These tests should be
|
||||
# tightened so that they are showing, as an integration test, that specific
|
||||
# proofs are being marked as missed by the validator.
|
||||
# # TODO: these are very loose tests in that they are not testing EXACTLY how
|
||||
# # proofs were marked as missed by the validator. These tests should be
|
||||
# # tightened so that they are showing, as an integration test, that specific
|
||||
# # proofs are being marked as missed by the validator.
|
||||
|
||||
test "slot is freed after too many invalid proofs submitted", NodeConfigs(
|
||||
# Uncomment to start Hardhat automatically, typically so logs can be inspected locally
|
||||
# hardhat: HardhatConfig().withLogFile(),
|
||||
# test "slot is freed after too many invalid proofs submitted", NodeConfigs(
|
||||
# # Uncomment to start Hardhat automatically, typically so logs can be inspected locally
|
||||
# # hardhat: HardhatConfig().withLogFile(),
|
||||
|
||||
clients:
|
||||
CodexConfig()
|
||||
.nodes(1)
|
||||
# .debug() # uncomment to enable console log output
|
||||
.withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||
.withLogTopics("node"),
|
||||
# clients:
|
||||
# CodexConfig()
|
||||
# .nodes(1)
|
||||
# # .debug() # uncomment to enable console log output
|
||||
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||
# .withLogTopics("node"),
|
||||
|
||||
providers:
|
||||
CodexConfig()
|
||||
.nodes(1)
|
||||
.simulateProofFailuresFor(providerIdx=0, failEveryNProofs=1)
|
||||
# .debug() # uncomment to enable console log output
|
||||
.withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||
.withLogTopics("marketplace", "sales", "reservations", "node"),
|
||||
# providers:
|
||||
# CodexConfig()
|
||||
# .nodes(1)
|
||||
# .simulateProofFailuresFor(providerIdx=0, failEveryNProofs=1)
|
||||
# # .debug() # uncomment to enable console log output
|
||||
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||
# .withLogTopics("marketplace", "sales", "reservations", "node"),
|
||||
|
||||
validators:
|
||||
CodexConfig()
|
||||
.nodes(1)
|
||||
.withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||
# .debug() # uncomment to enable console log output
|
||||
.withLogTopics("validator", "onchain", "ethers")
|
||||
):
|
||||
let client0 = clients()[0].client
|
||||
let totalPeriods = 50
|
||||
# validators:
|
||||
# CodexConfig()
|
||||
# .nodes(1)
|
||||
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||
# # .debug() # uncomment to enable console log output
|
||||
# .withLogTopics("validator", "onchain", "ethers")
|
||||
# ):
|
||||
# let client0 = clients()[0].client
|
||||
# let totalPeriods = 50
|
||||
|
||||
let datasetSizeInBlocks = 2
|
||||
let data = await RandomChunker.example(blocks=datasetSizeInBlocks)
|
||||
createAvailabilities(data.len, totalPeriods.periods)
|
||||
# let datasetSizeInBlocks = 2
|
||||
# let data = await RandomChunker.example(blocks=datasetSizeInBlocks)
|
||||
# createAvailabilities(data.len, totalPeriods.periods)
|
||||
|
||||
let cid = client0.upload(data).get
|
||||
# let cid = client0.upload(data).get
|
||||
|
||||
let purchaseId = await client0.requestStorage(
|
||||
cid,
|
||||
duration=totalPeriods.periods,
|
||||
origDatasetSizeInBlocks=datasetSizeInBlocks)
|
||||
let requestId = client0.requestId(purchaseId).get
|
||||
# let purchaseId = await client0.requestStorage(
|
||||
# cid,
|
||||
# duration=totalPeriods.periods,
|
||||
# origDatasetSizeInBlocks=datasetSizeInBlocks)
|
||||
# let requestId = client0.requestId(purchaseId).get
|
||||
|
||||
check eventually client0.purchaseStateIs(purchaseId, "started")
|
||||
# check eventually client0.purchaseStateIs(purchaseId, "started")
|
||||
|
||||
var slotWasFreed = false
|
||||
proc onSlotFreed(event: SlotFreed) =
|
||||
if event.requestId == requestId and
|
||||
event.slotIndex == 0.u256: # assume only one slot, so index 0
|
||||
slotWasFreed = true
|
||||
# var slotWasFreed = false
|
||||
# proc onSlotFreed(event: SlotFreed) =
|
||||
# if event.requestId == requestId and
|
||||
# event.slotIndex == 0.u256: # assume only one slot, so index 0
|
||||
# slotWasFreed = true
|
||||
|
||||
let subscription = await marketplace.subscribe(SlotFreed, onSlotFreed)
|
||||
# let subscription = await marketplace.subscribe(SlotFreed, onSlotFreed)
|
||||
|
||||
let currentPeriod = await getCurrentPeriod()
|
||||
check eventuallyP(slotWasFreed, currentPeriod + totalPeriods.u256 + 1)
|
||||
# let currentPeriod = await getCurrentPeriod()
|
||||
# check eventuallyP(slotWasFreed, currentPeriod + totalPeriods.u256 + 1)
|
||||
|
||||
await subscription.unsubscribe()
|
||||
# await subscription.unsubscribe()
|
||||
|
||||
test "slot is not freed when not enough invalid proofs submitted", NodeConfigs(
|
||||
# Uncomment to start Hardhat automatically, typically so logs can be inspected locally
|
||||
# hardhat: HardhatConfig().withLogFile(),
|
||||
# test "slot is not freed when not enough invalid proofs submitted", NodeConfigs(
|
||||
# # Uncomment to start Hardhat automatically, typically so logs can be inspected locally
|
||||
# # hardhat: HardhatConfig().withLogFile(),
|
||||
|
||||
clients:
|
||||
CodexConfig()
|
||||
.nodes(1)
|
||||
# .debug() # uncomment to enable console log output
|
||||
.withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||
.withLogTopics("node"),
|
||||
# clients:
|
||||
# CodexConfig()
|
||||
# .nodes(1)
|
||||
# .debug() # uncomment to enable console log output
|
||||
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||
# .withLogTopics("node"),
|
||||
|
||||
providers:
|
||||
CodexConfig()
|
||||
.nodes(1)
|
||||
.simulateProofFailuresFor(providerIdx=0, failEveryNProofs=3)
|
||||
# .debug() # uncomment to enable console log output
|
||||
.withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||
.withLogTopics("marketplace", "sales", "reservations", "node"),
|
||||
# providers:
|
||||
# CodexConfig()
|
||||
# .nodes(1)
|
||||
# .simulateProofFailuresFor(providerIdx=0, failEveryNProofs=3)
|
||||
# .debug() # uncomment to enable console log output
|
||||
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||
# .withLogTopics("marketplace", "sales", "reservations", "node"),
|
||||
|
||||
validators:
|
||||
CodexConfig()
|
||||
.nodes(1)
|
||||
# .debug()
|
||||
.withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||
.withLogTopics("validator", "onchain", "ethers")
|
||||
):
|
||||
let client0 = clients()[0].client
|
||||
let totalPeriods = 25
|
||||
# validators:
|
||||
# CodexConfig()
|
||||
# .nodes(1)
|
||||
# .debug()
|
||||
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||
# .withLogTopics("validator", "onchain", "ethers")
|
||||
# ):
|
||||
# let client0 = clients()[0].client
|
||||
# let totalPeriods = 25
|
||||
|
||||
let datasetSizeInBlocks = 2
|
||||
let data = await RandomChunker.example(blocks=datasetSizeInBlocks)
|
||||
createAvailabilities(data.len, totalPeriods.periods)
|
||||
# let datasetSizeInBlocks = 2
|
||||
# let data = await RandomChunker.example(blocks=datasetSizeInBlocks)
|
||||
# createAvailabilities(data.len, totalPeriods.periods)
|
||||
|
||||
let cid = client0.upload(data).get
|
||||
# let cid = client0.upload(data).get
|
||||
|
||||
let purchaseId = await client0.requestStorage(
|
||||
cid,
|
||||
duration=totalPeriods.periods,
|
||||
origDatasetSizeInBlocks=datasetSizeInBlocks)
|
||||
let requestId = client0.requestId(purchaseId).get
|
||||
# let purchaseId = await client0.requestStorage(
|
||||
# cid,
|
||||
# duration=totalPeriods.periods,
|
||||
# origDatasetSizeInBlocks=datasetSizeInBlocks)
|
||||
# let requestId = client0.requestId(purchaseId).get
|
||||
|
||||
check eventually client0.purchaseStateIs(purchaseId, "started")
|
||||
# check eventually client0.purchaseStateIs(purchaseId, "started")
|
||||
|
||||
var slotWasFreed = false
|
||||
proc onSlotFreed(event: SlotFreed) =
|
||||
if event.requestId == requestId and
|
||||
event.slotIndex == 0.u256:
|
||||
slotWasFreed = true
|
||||
# var slotWasFreed = false
|
||||
# proc onSlotFreed(event: SlotFreed) =
|
||||
# if event.requestId == requestId and
|
||||
# event.slotIndex == 0.u256:
|
||||
# slotWasFreed = true
|
||||
|
||||
let subscription = await marketplace.subscribe(SlotFreed, onSlotFreed)
|
||||
# let subscription = await marketplace.subscribe(SlotFreed, onSlotFreed)
|
||||
|
||||
# check not freed
|
||||
let currentPeriod = await getCurrentPeriod()
|
||||
check not eventuallyP(slotWasFreed, currentPeriod + totalPeriods.u256 + 1)
|
||||
# # check not freed
|
||||
# let currentPeriod = await getCurrentPeriod()
|
||||
# check not eventuallyP(slotWasFreed, currentPeriod + totalPeriods.u256 + 1)
|
||||
|
||||
await subscription.unsubscribe()
|
||||
# await subscription.unsubscribe()
|
||||
|
||||
# TODO: uncomment once fixed
|
||||
# test "host that submits invalid proofs is paid out less", NodeConfigs(
|
||||
|
|
|
@ -38,7 +38,7 @@ template twonodessuite*(name: string, debug1, debug2: string, body) =
|
|||
"--disc-ip=127.0.0.1",
|
||||
"--disc-port=8090",
|
||||
"--listen-addrs=/ip4/127.0.0.1/tcp/0",
|
||||
"--persistence",
|
||||
"persistence",
|
||||
"--eth-account=" & $account1
|
||||
]
|
||||
|
||||
|
@ -58,7 +58,7 @@ template twonodessuite*(name: string, debug1, debug2: string, body) =
|
|||
"--disc-port=8091",
|
||||
"--listen-addrs=/ip4/127.0.0.1/tcp/0",
|
||||
"--bootstrap-node=" & bootstrap,
|
||||
"--persistence",
|
||||
"persistence",
|
||||
"--eth-account=" & $account2
|
||||
]
|
||||
|
||||
|
|
Loading…
Reference in New Issue