allow integration tests to start hardhat locally so logs can be captured

This commit is contained in:
Eric 2023-10-24 14:00:36 +11:00
parent 40ee931d6d
commit 10decd2f28
No known key found for this signature in database
4 changed files with 270 additions and 117 deletions

View File

@ -0,0 +1,119 @@
import pkg/questionable
import pkg/questionable/results
import pkg/confutils
import pkg/chronicles
import pkg/chronos
import pkg/libp2p
import pkg/stew/io2
import std/osproc
import std/os
import std/streams
import std/strutils
import codex/conf
import ./codexclient
import ./nodes
export codexclient
export codexclient
export chronicles
logScope:
topics = "integration testing nodes"
const workingDir = currentSourcePath() / ".." / ".." / ".." / "vendor" / "codex-contracts-eth"
when defined(windows):
const executable = "npmstart.bat"
else:
const executable = "npmstart.sh"
const startedOutput = "Started HTTP and WebSocket JSON-RPC server at"
type
HardhatProcess* = ref object of NodeProcess
logWrite: Future[void]
logFile: ?IoHandle
started: Future[void]
proc writeToLogFile*(node: HardhatProcess, logFilePath: string) {.async.} =
let logFileHandle = openFile(
logFilePath,
{OpenFlags.Write, OpenFlags.Create, OpenFlags.Truncate}
)
without fileHandle =? logFileHandle:
# echo "failed to open hardhat log file, path: ", logFilePath, ", error code: ", $logFileHandle.error
error "failed to open log file",
path = logFilePath,
errorCode = $logFileHandle.error
node.logFile = some fileHandle
node.started = newFuture[void]("hardhat.started")
try:
for line in node.process.outputStream.lines:
if line.contains(startedOutput):
node.started.complete()
if error =? fileHandle.writeFile(line & "\n").errorOption:
error "failed to write to hardhat file", errorCode = error
discard fileHandle.closeFile()
return
await sleepAsync(1.millis)
except CancelledError:
discard
proc start(node: HardhatProcess) =
node.process = osproc.startProcess(
executable,
workingDir,
node.arguments)
for arg in node.arguments:
if arg.contains "--log-file=":
let logFilePath = arg.split("=")[1]
node.logWrite = node.writeToLogFile(logFilePath)
break
proc waitUntilOutput*(node: HardhatProcess, output: string) =
if not node.started.isNil:
waitFor node.started.wait(5000.milliseconds)
return
else:
for line in node.process.outputStream.lines:
if line.contains(output):
return
raiseAssert "node did not output '" & output & "'"
proc waitUntilStarted*(node: HardhatProcess) =
node.waitUntilOutput(startedOutput)
proc startHardhatProcess*(args: openArray[string]): HardhatProcess =
## Starts a Hardhat Node with the specified arguments.
let node = HardhatProcess(arguments: @args)
node.start()
node
proc stop*(node: HardhatProcess) =
if node.process != nil:
node.process.terminate()
discard node.process.waitForExit(timeout=5_000)
node.process.close()
node.process = nil
if not node.logWrite.isNil and not node.logWrite.finished:
waitFor node.logWrite.cancelAndWait()
if logFile =? node.logFile:
discard logFile.closeFile()
proc restart*(node: HardhatProcess) =
node.stop()
node.start()
node.waitUntilStarted()
proc removeDataDir*(node: HardhatProcess) =
discard

View File

@ -11,6 +11,7 @@ import std/times
import pkg/chronicles
import ../ethertest
import ./codexclient
import ./hardhat
import ./nodes
export ethertest
@ -21,23 +22,24 @@ type
RunningNode* = ref object
role*: Role
node*: NodeProcess
restClient*: CodexClient
datadir*: string
ethAccount*: Address
StartNodes* = object
clients*: StartNodeConfig
providers*: StartNodeConfig
validators*: StartNodeConfig
hardhat*: StartHardhatConfig
StartNodeConfig* = object
numNodes*: int
cliOptions*: seq[CliOption]
logFile*: bool
logTopics*: seq[string]
debugEnabled*: bool
StartHardhatConfig* = ref object
logFile*: bool
Role* {.pure.} = enum
Client,
Provider,
Validator
Validator,
Hardhat
CliOption* = object of RootObj
nodeIdx*: ?int
key*: string
@ -51,15 +53,9 @@ proc `$`*(option: CliOption): string =
proc new*(_: type RunningNode,
role: Role,
node: NodeProcess,
restClient: CodexClient,
datadir: string,
ethAccount: Address): RunningNode =
node: NodeProcess): RunningNode =
RunningNode(role: role,
node: node,
restClient: restClient,
datadir: datadir,
ethAccount: ethAccount)
node: node)
proc nodes*(config: StartNodeConfig, numNodes: int): StartNodeConfig =
if numNodes < 0:
@ -126,47 +122,95 @@ proc withLogFile*(
): StartNodeConfig =
var startConfig = config
var logDir = currentSourcePath.parentDir() / "logs" / "{starttime}"
createDir(logDir)
startConfig.logFile = logToFile
return startConfig
proc withLogFile*(
config: StartHardhatConfig,
logToFile: bool = true
): StartHardhatConfig =
var startConfig = config
startConfig.logFile = logToFile
return startConfig
template multinodesuite*(name: string,
startNodes: StartNodes, body: untyped) =
ethersuite name:
asyncchecksuite name:
var provider {.inject, used.}: JsonRpcProvider
var accounts {.inject, used.}: seq[Address]
var running: seq[RunningNode]
var bootstrap: string
let starttime = now().format("yyyy-MM-dd'_'HH:mm:ss")
proc newNodeProcess(index: int,
config: StartNodeConfig
): (NodeProcess, string, Address) =
proc getLogFile(role: Role, index: ?int): string =
var logDir = currentSourcePath.parentDir() / "logs" / $starttime
createDir(logDir)
var fn = $role
if idx =? index:
fn &= "_" & $idx
fn &= ".log"
let fileName = logDir / fn
return fileName
if index > accounts.len - 1:
raiseAssert("Cannot start node at index " & $index &
proc newHardhatProcess(config: StartHardhatConfig, role: Role): NodeProcess =
var options: seq[string] = @[]
if config.logFile:
let updatedLogFile = getLogFile(role, none int)
options.add "--log-file=" & updatedLogFile
let node = startHardhatProcess(options)
node.waitUntilStarted()
debug "started new hardhat node"
return node
proc newNodeProcess(roleIdx: int,
config1: StartNodeConfig,
role: Role
): NodeProcess =
let nodeIdx = running.len
var config = config1
if nodeIdx > accounts.len - 1:
raiseAssert("Cannot start node at nodeIdx " & $nodeIdx &
", not enough eth accounts.")
let datadir = getTempDir() / "Codex" & $index
# let logdir = currentSourcePath.parentDir()
let datadir = getTempDir() / "Codex" / $starttime / $role & "_" & $roleIdx
if config.logFile:
let updatedLogFile = getLogFile(role, some roleIdx)
config.cliOptions.add CliOption(key: "--log-file", value: updatedLogFile)
if config.logTopics.len > 0:
config.cliOptions.add CliOption(key: "--log-level", value: "INFO;TRACE: " & config.logTopics.join(","))
var options = config.cliOptions.map(o => $o)
.concat(@[
"--api-port=" & $(8080 + index),
"--api-port=" & $(8080 + nodeIdx),
"--data-dir=" & datadir,
"--nat=127.0.0.1",
"--listen-addrs=/ip4/127.0.0.1/tcp/0",
"--disc-ip=127.0.0.1",
"--disc-port=" & $(8090 + index),
"--eth-account=" & $accounts[index]])
# if logFile =? config.logFile:
# options.add "--log-file=" & logFile
if config.logTopics.len > 0:
options.add "--log-level=INFO;TRACE: " & config.logTopics.join(",")
"--disc-port=" & $(8090 + nodeIdx),
"--eth-account=" & $accounts[nodeIdx]])
let node = startNode(options, config.debugEnabled)
node.waitUntilStarted()
(node, datadir, accounts[index])
if config.debugEnabled:
debug "started new integration testing node and codex client",
role,
apiUrl = node.apiUrl,
discAddress = node.discoveryAddress,
address = accounts[nodeIdx],
cliOptions = config.cliOptions.join(",")
return node
proc clients(): seq[RunningNode] {.used.} =
running.filter(proc(r: RunningNode): bool = r.role == Role.Client)
@ -177,92 +221,64 @@ template multinodesuite*(name: string,
proc validators(): seq[RunningNode] {.used.} =
running.filter(proc(r: RunningNode): bool = r.role == Role.Validator)
proc newCodexClient(index: int): CodexClient =
CodexClient.new("http://localhost:" & $(8080 + index) & "/api/codex/v1")
proc startHardhatNode(): NodeProcess =
var config = startNodes.hardhat
return newHardhatProcess(config, Role.Hardhat)
proc getLogFile(role: Role, index: int): string =
var logDir = currentSourcePath.parentDir() / "logs" / $starttime
createDir(logDir)
let fn = $role & "_" & $index & ".log"
let fileName = logDir / fn
echo ">>> replace log file name: ", fileName
return fileName
proc startClientNode() =
let index = running.len
proc startClientNode(): NodeProcess =
let clientIdx = clients().len
var config = startNodes.clients
config.cliOptions.add CliOption(key: "--persistence")
if config.logFile:
let updatedLogFile = getLogFile(Role.Client, clientIdx)
config.cliOptions.add CliOption(key: "--log-file", value: updatedLogFile)
let (node, datadir, account) = newNodeProcess(index, config)
let restClient = newCodexClient(index)
running.add RunningNode.new(Role.Client, node, restClient, datadir,
account)
if config.debugEnabled:
debug "started new client node and codex client",
restApiPort = 8080 + index, discPort = 8090 + index, account
return newNodeProcess(clientIdx, config, Role.Client)
proc startProviderNode(cliOptions: seq[CliOption] = @[]) =
let index = running.len
proc startProviderNode(): NodeProcess =
let providerIdx = providers().len
var config = startNodes.providers
config.cliOptions = config.cliOptions.concat(cliOptions)
if config.logFile:
let updatedLogFile = getLogFile(Role.Provider, providerIdx)
config.cliOptions.add CliOption(key: "--log-file", value: updatedLogFile)
config.cliOptions.add CliOption(key: "--bootstrap-node", value: bootstrap)
config.cliOptions.add CliOption(key: "--persistence")
# filter out provider options by provided index
config.cliOptions = config.cliOptions.filter(
o => (let idx = o.nodeIdx |? providerIdx; echo "idx: ", idx, ", index: ", index; idx == providerIdx)
o => (let idx = o.nodeIdx |? providerIdx; idx == providerIdx)
)
let (node, datadir, account) = newNodeProcess(index, config)
let restClient = newCodexClient(index)
running.add RunningNode.new(Role.Provider, node, restClient, datadir,
account)
if config.debugEnabled:
debug "started new provider node and codex client",
restApiPort = 8080 + index, discPort = 8090 + index, account,
cliOptions = config.cliOptions.join(",")
return newNodeProcess(providerIdx, config, Role.Provider)
proc startValidatorNode() =
let index = running.len
let validatorIdx = providers().len
proc startValidatorNode(): NodeProcess =
let validatorIdx = validators().len
var config = startNodes.validators
if config.logFile:
let updatedLogFile = getLogFile(Role.Validator, validatorIdx)
config.cliOptions.add CliOption(key: "--log-file", value: updatedLogFile)
config.cliOptions.add CliOption(key: "--bootstrap-node", value: bootstrap)
config.cliOptions.add CliOption(key: "--validator")
let (node, datadir, account) = newNodeProcess(index, config)
let restClient = newCodexClient(index)
running.add RunningNode.new(Role.Validator, node, restClient, datadir,
account)
if config.debugEnabled:
debug "started new validator node and codex client",
restApiPort = 8080 + index, discPort = 8090 + index, account
return newNodeProcess(validatorIdx, config, Role.Validator)
setup:
if not startNodes.hardhat.isNil:
let node = startHardhatNode()
running.add RunningNode(role: Role.Hardhat, node: node)
echo "Connecting to hardhat on ws://localhost:8545..."
provider = JsonRpcProvider.new("ws://localhost:8545")
accounts = await provider.listAccounts()
for i in 0..<startNodes.clients.numNodes:
startClientNode()
let node = startClientNode()
running.add RunningNode(role: Role.Client, node: node)
if i == 0:
bootstrap = running[0].restClient.info()["spr"].getStr()
bootstrap = node.client.info()["spr"].getStr()
for i in 0..<startNodes.providers.numNodes:
startProviderNode()
let node = startProviderNode()
running.add RunningNode(role: Role.Provider, node: node)
for i in 0..<startNodes.validators.numNodes:
startValidatorNode()
let node = startValidatorNode()
running.add RunningNode(role: Role.Validator, node: node)
teardown:
for r in running:
r.restClient.close()
r.node.stop()
removeDir(r.datadir)
r.node.stop() # also stops rest client
r.node.removeDataDir()
running = @[]
body

View File

@ -21,9 +21,9 @@ const workingDir = currentSourcePath() / ".." / ".." / ".."
const executable = "build" / "codex"
type
NodeProcess* = ref object
process: Process
arguments: seq[string]
NodeProcess* = ref object of RootObj
process*: Process
arguments*: seq[string]
debug: bool
client: ?CodexClient
@ -67,10 +67,14 @@ proc dataDir(node: NodeProcess): string =
let config = CodexConf.load(cmdLine = node.arguments)
config.dataDir.string
proc apiUrl(node: NodeProcess): string =
proc apiUrl*(node: NodeProcess): string =
let config = CodexConf.load(cmdLine = node.arguments)
"http://" & config.apiBindAddress & ":" & $config.apiPort & "/api/codex/v1"
proc discoveryAddress*(node: NodeProcess): string =
let config = CodexConf.load(cmdLine = node.arguments)
$config.discoveryIp & ":" & $config.discoveryPort
proc client*(node: NodeProcess): CodexClient =
if client =? node.client:
return client

View File

@ -241,27 +241,30 @@ logScope:
multinodesuite "Simulate invalid proofs",
StartNodes(
hardhat: StartHardhatConfig()
.withLogFile(),
clients: StartNodeConfig()
.nodes(1)
.debug()
.withLogFile()
.withLogTopics("node"),
providers:
StartNodeConfig()
.nodes(2)
.simulateProofFailuresFor(providerIdx=0, failEveryNProofs=2)
.debug()
.withLogFile()
.withLogTopics("marketplace",
"sales",
"reservations",
"node",
"JSONRPC-HTTP-CLIENT",
"JSONRPC-WS-CLIENT",
"ethers",
"restapi"
),
providers: StartNodeConfig()
.nodes(2)
.simulateProofFailuresFor(providerIdx=0, failEveryNProofs=2)
.debug()
.withLogFile()
.withLogTopics(
"marketplace",
"sales",
"reservations",
"node",
"JSONRPC-HTTP-CLIENT",
"JSONRPC-WS-CLIENT",
"ethers",
"restapi"
),
validators: StartNodeConfig()
.nodes(1)
@ -301,13 +304,20 @@ multinodesuite "Simulate invalid proofs",
duration: uint64 = 12.periods,
expiry: uint64 = 4.periods): Future[PurchaseId] {.async.} =
if clients().len < 1 or providers().len < 1:
raiseAssert("must start at least one client and one provider")
if clients().len < 1 or providers().len < 2:
raiseAssert("must start at least one client and two providers")
let client = clients()[0].restClient
let storageProvider = providers()[0].restClient
let client0 = clients()[0].node.client
let storageProvider0 = providers()[0].node.client
let storageProvider1 = providers()[1].node.client
discard storageProvider.postAvailability(
discard storageProvider0.postAvailability(
size=0xFFFFF.u256,
duration=duration.u256,
minPrice=300.u256,
maxCollateral=200.u256
)
discard storageProvider1.postAvailability(
size=0xFFFFF.u256,
duration=duration.u256,
minPrice=300.u256,
@ -316,11 +326,11 @@ multinodesuite "Simulate invalid proofs",
let rng = rng.Rng.instance()
let chunker = RandomChunker.new(rng, size = DefaultBlockSize * 2, chunkSize = DefaultBlockSize * 2)
let data = await chunker.getBytes()
let cid = client.upload(byteutils.toHex(data)).get
let cid = client0.upload(byteutils.toHex(data)).get
let expiry = (await provider.currentTime()) + expiry.u256
# avoid timing issues by filling the slot at the start of the next period
await advanceToNextPeriod()
let id = client.requestStorage(
let id = client0.requestStorage(
cid,
expiry=expiry,
duration=duration.u256,
@ -329,11 +339,11 @@ multinodesuite "Simulate invalid proofs",
reward=400.u256,
nodes=2'u
).get
check eventually client.purchaseStateIs(id, "started")
check eventually client0.purchaseStateIs(id, "started")
return id
proc waitUntilPurchaseIsFinished(purchaseId: PurchaseId, duration: int) {.async.} =
let client = clients()[0].restClient
let client = clients()[0].node.client
check eventually(client.purchaseStateIs(purchaseId, "finished"), duration * 1000)
# TODO: these are very loose tests in that they are not testing EXACTLY how
@ -342,10 +352,14 @@ multinodesuite "Simulate invalid proofs",
# proofs are being marked as missed by the validator.
test "provider that submits invalid proofs is paid out less":
let totalProofs = 100
let totalProofs = 7
let purchaseId = await waitUntilPurchaseIsStarted(duration=totalProofs.periods)
await waitUntilPurchaseIsFinished(purchaseId, duration=totalProofs.periods.int)
# await waitUntilPurchaseIsFinished(purchaseId, duration=totalProofs.periods.int)
let client = clients()[0].node.client
let duration = totalProofs.periods.int
check eventually(client.purchaseStateIs(purchaseId, "finished"), duration * 1000)
# var slotWasFreed = false
# proc onSlotFreed(event: SlotFreed) =