Add integration test that checks for "nonce too high" in logs
This commit is contained in:
parent
bef1160799
commit
1a69ded089
|
@ -5,6 +5,7 @@ import pkg/chronicles
|
|||
import pkg/chronos/asyncproc
|
||||
import pkg/ethers
|
||||
import pkg/libp2p
|
||||
import pkg/stew/io2
|
||||
import std/os
|
||||
import std/strutils
|
||||
import codex/conf
|
||||
|
@ -40,6 +41,16 @@ method outputLineEndings(node: CodexProcess): string =
|
|||
method onOutputLineCaptured(node: CodexProcess, line: string) =
|
||||
discard
|
||||
|
||||
method logFileContains*(node: CodexProcess, text: string): bool =
|
||||
let config = CodexConf.load(cmdLine = node.arguments, quitOnFailure = false)
|
||||
without logFile =? config.logFile.?string:
|
||||
raiseAssert "codex node does have a --log-file option set (use .withLogFile())"
|
||||
|
||||
without logContents =? logFile.readAllChars:
|
||||
raiseAssert "failed to open codex log file, aborting"
|
||||
|
||||
return logContents.contains(text)
|
||||
|
||||
proc dataDir(node: CodexProcess): string =
|
||||
let config = CodexConf.load(cmdLine = node.arguments, quitOnFailure = false)
|
||||
return config.dataDir.string
|
||||
|
@ -73,4 +84,4 @@ method stop*(node: CodexProcess) {.async.} =
|
|||
node.client = none CodexClient
|
||||
|
||||
method removeDataDir*(node: CodexProcess) =
|
||||
removeDir(node.dataDir)
|
||||
os.removeDir(node.dataDir)
|
||||
|
|
|
@ -40,6 +40,25 @@ method processOptions(node: HardhatProcess): set[AsyncProcessOption] =
|
|||
method outputLineEndings(node: HardhatProcess): string =
|
||||
return "\n"
|
||||
|
||||
method logFileContains*(hardhat: HardhatProcess, text: string): bool =
|
||||
without fileHandle =? hardhat.logFile:
|
||||
raiseAssert "failed to open hardhat log file, aborting"
|
||||
|
||||
without fileSize =? fileHandle.getFileSize:
|
||||
raiseAssert "failed to get current hardhat log file size, aborting"
|
||||
|
||||
if checkFileSize(fileSize).isErr:
|
||||
raiseAssert "file size too big for nim indexing"
|
||||
|
||||
var data = ""
|
||||
data.setLen(fileSize)
|
||||
|
||||
without bytesRead =? readFile(fileHandle,
|
||||
data.toOpenArray(0, len(data) - 1)):
|
||||
raiseAssert "unable to read hardhat log, aborting"
|
||||
|
||||
return data.contains(text)
|
||||
|
||||
proc openLogFile(node: HardhatProcess, logFilePath: string): IoHandle =
|
||||
let logFileHandle = openFile(
|
||||
logFilePath,
|
||||
|
|
|
@ -245,6 +245,26 @@ template multinodesuite*(name: string, body: untyped) =
|
|||
|
||||
return await newCodexProcess(validatorIdx, config, Role.Validator)
|
||||
|
||||
|
||||
proc searchLogs(role: Role, text: string): seq[bool] =
|
||||
var hits: seq[bool] = @[]
|
||||
if role == Role.Hardhat:
|
||||
return @[hardhat().logFileContains(text)]
|
||||
elif role == Role.Client:
|
||||
for client in clients():
|
||||
hits.add client.logFileContains(text)
|
||||
else:
|
||||
for provider in providers():
|
||||
hits.add provider.logFileContains(text)
|
||||
|
||||
return hits
|
||||
|
||||
proc logsContain(role: Role, text: string): bool =
|
||||
return searchLogs(role, text).allIt(it)
|
||||
|
||||
proc logsDoNotContain(role: Role, text: string): bool =
|
||||
return searchLogs(role, text).allIt(not it)
|
||||
|
||||
proc teardownImpl() {.async.} =
|
||||
for nodes in @[validators(), clients(), providers()]:
|
||||
for node in nodes:
|
||||
|
|
|
@ -1,34 +0,0 @@
|
|||
import pkg/chronicles
|
||||
import pkg/questionable
|
||||
|
||||
export chronicles
|
||||
|
||||
type
|
||||
NodeConfig* = ref object of RootObj
|
||||
logFile*: bool
|
||||
logLevel*: ?LogLevel
|
||||
debugEnabled*: bool
|
||||
|
||||
proc debug*[T: NodeConfig](config: T, enabled = true): T =
|
||||
## output log in stdout
|
||||
var startConfig = config
|
||||
startConfig.debugEnabled = enabled
|
||||
return startConfig
|
||||
|
||||
proc withLogFile*[T: NodeConfig](
|
||||
config: T,
|
||||
logToFile: bool = true
|
||||
): T =
|
||||
|
||||
var startConfig = config
|
||||
startConfig.logFile = logToFile
|
||||
return startConfig
|
||||
|
||||
proc withLogLevel*[T: NodeConfig](
|
||||
config: NodeConfig,
|
||||
level: LogLevel
|
||||
): T =
|
||||
|
||||
var startConfig = config
|
||||
startConfig.logLevel = some level
|
||||
return startConfig
|
|
@ -44,6 +44,9 @@ method outputLineEndings(node: NodeProcess): string {.base.} =
|
|||
method onOutputLineCaptured(node: NodeProcess, line: string) {.base.} =
|
||||
raiseAssert "not implemented"
|
||||
|
||||
method logFileContains*(hardhat: NodeProcess): bool {.base.} =
|
||||
raiseAssert "not implemented"
|
||||
|
||||
method start*(node: NodeProcess) {.base, async.} =
|
||||
logScope:
|
||||
nodeName = node.name
|
||||
|
|
|
@ -0,0 +1,79 @@
|
|||
import pkg/stew/byteutils
|
||||
import pkg/codex/units
|
||||
import ../examples
|
||||
import ../contracts/time
|
||||
import ../contracts/deployment
|
||||
import ./marketplacesuite
|
||||
import ./nodeconfigs
|
||||
import ./hardhatconfig
|
||||
|
||||
marketplacesuite "Slot reservations":
|
||||
|
||||
test "nonce does not go too high when reserving slots",
|
||||
NodeConfigs(
|
||||
# Uncomment to start Hardhat automatically, typically so logs can be inspected locally
|
||||
hardhat: HardhatConfig()
|
||||
.withLogFile()
|
||||
.some,
|
||||
|
||||
clients:
|
||||
CodexConfigs.init(nodes=1)
|
||||
# .debug() # uncomment to enable console log output.debug()
|
||||
.withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||
.withLogTopics("node", "erasure", "marketplace")
|
||||
.some,
|
||||
|
||||
providers:
|
||||
CodexConfigs.init(nodes=6)
|
||||
# .debug() # uncomment to enable console log output
|
||||
.withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||
.withLogTopics("node", "marketplace", "sales", "reservations", "proving", "ethers", "statemachine")
|
||||
.some,
|
||||
):
|
||||
let reward = 400.u256
|
||||
let duration = 50.periods
|
||||
let collateral = 200.u256
|
||||
let expiry = 30.periods
|
||||
let data = await RandomChunker.example(blocks=8)
|
||||
let client = clients()[0]
|
||||
let clientApi = client.client
|
||||
|
||||
# provider makes storage available
|
||||
for i in 0..<providers().len:
|
||||
let provider = providers()[i].client
|
||||
discard provider.postAvailability(
|
||||
# make availability size small enough that we can only fill one slot
|
||||
totalSize=(data.len div 2).u256,
|
||||
duration=duration.u256,
|
||||
minPrice=reward,
|
||||
maxCollateral=collateral)
|
||||
|
||||
let cid = clientApi.upload(data).get
|
||||
|
||||
var slotIdxFilled: seq[UInt256] = @[]
|
||||
proc onSlotFilled(event: SlotFilled) =
|
||||
slotIdxFilled.add event.slotIndex
|
||||
proc onSlotReservationsFull(event: SlotReservationsFull) =
|
||||
echo "Slot reservations full for slot ", event.slotIndex
|
||||
|
||||
let subscriptionFull = await marketplace.subscribe(SlotReservationsFull, onSlotReservationsFull)
|
||||
let subscription = await marketplace.subscribe(SlotFilled, onSlotFilled)
|
||||
|
||||
# client requests storage but requires multiple slots to host the content
|
||||
let id = await clientApi.requestStorage(
|
||||
cid,
|
||||
duration=duration,
|
||||
reward=reward,
|
||||
expiry=expiry,
|
||||
collateral=collateral,
|
||||
nodes=5,
|
||||
tolerance=1
|
||||
)
|
||||
|
||||
# wait until all slots filled
|
||||
check eventually(slotIdxFilled.len == 5, timeout=expiry.int * 1000)
|
||||
|
||||
check logsDoNotContain(Role.Provider, "Nonce too high")
|
||||
|
||||
await subscription.unsubscribe()
|
||||
await subscriptionFull.unsubscribe()
|
|
@ -5,6 +5,7 @@ import ./integration/testsales
|
|||
import ./integration/testpurchasing
|
||||
import ./integration/testblockexpiration
|
||||
import ./integration/testmarketplace
|
||||
import ./integration/testslotreservations
|
||||
import ./integration/testproofs
|
||||
import ./integration/testvalidator
|
||||
import ./integration/testecbug
|
||||
|
|
Loading…
Reference in New Issue