fine-tune the tests

This commit is contained in:
Marcin Czenko 2024-10-16 22:35:57 +02:00
parent a8234821b7
commit 012e134fdf
No known key found for this signature in database
GPG Key ID: 33DEA0C8E30937C0
6 changed files with 213 additions and 31 deletions

View File

@ -11,7 +11,6 @@ env:
cache_nonce: 0 # Allows for easily busting actions/cache caches cache_nonce: 0 # Allows for easily busting actions/cache caches
nim_version: pinned nim_version: pinned
concurrency: concurrency:
group: ${{ github.workflow }}-${{ github.ref || github.run_id }} group: ${{ github.workflow }}-${{ github.ref || github.run_id }}
cancel-in-progress: true cancel-in-progress: true
@ -23,17 +22,17 @@ jobs:
matrix: ${{ steps.matrix.outputs.matrix }} matrix: ${{ steps.matrix.outputs.matrix }}
cache_nonce: ${{ env.cache_nonce }} cache_nonce: ${{ env.cache_nonce }}
steps: steps:
- name: Compute matrix - name: Compute matrix
id: matrix id: matrix
uses: fabiocaccamo/create-matrix-action@v4 uses: fabiocaccamo/create-matrix-action@v4
with: with:
matrix: | matrix: |
os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {all}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} os {linux}, cpu {amd64}, builder {ubuntu-20.04}, tests {all}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {macos}, cpu {amd64}, builder {macos-13}, tests {all}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail} os {macos}, cpu {amd64}, builder {macos-13}, tests {all}, nim_version {${{ env.nim_version }}}, shell {bash --noprofile --norc -e -o pipefail}
os {windows}, cpu {amd64}, builder {windows-latest}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {msys2} os {windows}, cpu {amd64}, builder {windows-latest}, tests {unittest}, nim_version {${{ env.nim_version }}}, shell {msys2}
os {windows}, cpu {amd64}, builder {windows-latest}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {msys2} os {windows}, cpu {amd64}, builder {windows-latest}, tests {contract}, nim_version {${{ env.nim_version }}}, shell {msys2}
os {windows}, cpu {amd64}, builder {windows-latest}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {msys2} os {windows}, cpu {amd64}, builder {windows-latest}, tests {integration}, nim_version {${{ env.nim_version }}}, shell {msys2}
os {windows}, cpu {amd64}, builder {windows-latest}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {msys2} os {windows}, cpu {amd64}, builder {windows-latest}, tests {tools}, nim_version {${{ env.nim_version }}}, shell {msys2}
build: build:
needs: matrix needs: matrix

View File

@ -264,7 +264,7 @@ template multinodesuite*(name: string, body: untyped) =
# Workaround for https://github.com/NomicFoundation/hardhat/issues/2053 # Workaround for https://github.com/NomicFoundation/hardhat/issues/2053
# Do not use websockets, but use http and polling to stop subscriptions # Do not use websockets, but use http and polling to stop subscriptions
# from being removed after 5 minutes # from being removed after 5 minutes
ethProvider = JsonRpcProvider.new("https://localhost:8545") ethProvider = JsonRpcProvider.new("http://localhost:8545")
# if hardhat was NOT started by the test, take a snapshot so it can be # if hardhat was NOT started by the test, take a snapshot so it can be
# reverted in the test teardown # reverted in the test teardown
if nodeConfigs.hardhat.isNone: if nodeConfigs.hardhat.isNone:

View File

@ -156,7 +156,8 @@ proc waitUntilStarted*(node: NodeProcess) {.async.} =
let started = newFuture[void]() let started = newFuture[void]()
try: try:
discard node.captureOutput(node.startedOutput, started).track(node) discard node.captureOutput(node.startedOutput, started).track(node)
await started.wait(35.seconds) # allow enough time for proof generation await started.wait(60.seconds) # allow enough time for proof generation
trace "node started"
except AsyncTimeoutError: except AsyncTimeoutError:
# attempt graceful shutdown in case node was partially started, prevent # attempt graceful shutdown in case node was partially started, prevent
# zombies # zombies

View File

@ -1,5 +1,3 @@
import pkg/stew/byteutils
import pkg/codex/units
import ../examples import ../examples
import ../contracts/time import ../contracts/time
import ../contracts/deployment import ../contracts/deployment
@ -155,7 +153,9 @@ marketplacesuite "Marketplace payouts":
without requestId =? clientApi.requestId(id): without requestId =? clientApi.requestId(id):
fail() fail()
let slotId = slotId(requestId, !slotIdxFilled) let slotId = slotId(requestId, !slotIdxFilled)
check eventually(providerApi.saleStateIs(slotId, "SaleCancelled"), timeout=expiry.int * 1000)
check eventually(providerApi.saleStateIs(slotId, "SaleCancelled"),
timeout=expiry.int * 1000)
check eventually ( check eventually (
let endBalanceProvider = (await token.balanceOf(provider.ethAccount)); let endBalanceProvider = (await token.balanceOf(provider.ethAccount));

View File

@ -25,18 +25,18 @@ marketplacesuite "Validation":
clients: clients:
CodexConfigs.init(nodes=1) CodexConfigs.init(nodes=1)
.withEthProvider("http://localhost:8545") .withEthProvider("http://localhost:8545")
.debug() # uncomment to enable console log output # .debug() # uncomment to enable console log output
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
# .withLogTopics("node", "marketplace", "clock") .withLogTopics("purchases", "onchain")
.withLogTopics("node", "purchases", "slotqueue", "market")
.some, .some,
providers: providers:
CodexConfigs.init(nodes=1) CodexConfigs.init(nodes=1)
.withSimulateProofFailures(idx=0, failEveryNProofs=1) .withSimulateProofFailures(idx=0, failEveryNProofs=1)
.debug() # uncomment to enable console log output .withEthProvider("http://localhost:8545")
.withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log # .debug() # uncomment to enable console log output
.withLogTopics("marketplace", "sales", "reservations", "node", "clock", "slotsbuilder") # .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
# .withLogTopics("sales", "onchain")
.some, .some,
validators: validators:
@ -44,7 +44,7 @@ marketplacesuite "Validation":
.withValidationGroups(groups = 2) .withValidationGroups(groups = 2)
.withValidationGroupIndex(idx = 0, groupIndex = 0) .withValidationGroupIndex(idx = 0, groupIndex = 0)
.withValidationGroupIndex(idx = 1, groupIndex = 1) .withValidationGroupIndex(idx = 1, groupIndex = 1)
.debug() # uncomment to enable console log output # .debug() # uncomment to enable console log output
.withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
.withLogTopics("validator") # each topic as a separate string argument .withLogTopics("validator") # each topic as a separate string argument
.some .some
@ -102,17 +102,18 @@ marketplacesuite "Validation":
clients: clients:
CodexConfigs.init(nodes=1) CodexConfigs.init(nodes=1)
.withEthProvider("http://localhost:8545") .withEthProvider("http://localhost:8545")
.debug() # uncomment to enable console log output # .debug() # uncomment to enable console log output
.withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
.withLogTopics("node", "purchases", "slotqueue", "market") .withLogTopics("purchases", "onchain")
.some, .some,
providers: providers:
CodexConfigs.init(nodes=1) CodexConfigs.init(nodes=1)
.withSimulateProofFailures(idx=0, failEveryNProofs=1) .withSimulateProofFailures(idx=0, failEveryNProofs=1)
.debug() # uncomment to enable console log output .withEthProvider("http://localhost:8545")
.withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log # .debug() # uncomment to enable console log output
.withLogTopics("marketplace", "sales", "reservations", "node", "clock", "slotsbuilder") # .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
# .withLogTopics("sales", "onchain")
.some .some
): ):
let client0 = clients()[0].client let client0 = clients()[0].client
@ -153,7 +154,7 @@ marketplacesuite "Validation":
.withValidationGroups(groups = 2) .withValidationGroups(groups = 2)
.withValidationGroupIndex(idx = 0, groupIndex = 0) .withValidationGroupIndex(idx = 0, groupIndex = 0)
.withValidationGroupIndex(idx = 1, groupIndex = 1) .withValidationGroupIndex(idx = 1, groupIndex = 1)
.debug() # uncomment to enable console log output # .debug() # uncomment to enable console log output
.withLogFile() # uncomment to output log file to: # tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log .withLogFile() # uncomment to output log file to: # tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
.withLogTopics("validator") # each topic as a separate string argument .withLogTopics("validator") # each topic as a separate string argument

View File

@ -0,0 +1,181 @@
from std/times import inMilliseconds, initDuration, inSeconds, fromUnix
import pkg/codex/logutils
import ../contracts/time
import ../contracts/deployment
import ../codex/helpers
import ../examples
import ./marketplacesuite
import ./nodeconfigs
export logutils
logScope:
topics = "integration test validation"
marketplacesuite "Validation":
let nodes = 3
let tolerance = 1
let proofProbability = 1
test "validator marks proofs as missing when using validation groups", NodeConfigs(
# Uncomment to start Hardhat automatically, typically so logs can be inspected locally
hardhat:
HardhatConfig.none,
clients:
CodexConfigs.init(nodes=1)
# .debug() # uncomment to enable console log output
.withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
# .withLogTopics("node", "marketplace", "clock")
.withLogTopics("node", "purchases", "slotqueue", "market")
.some,
providers:
CodexConfigs.init(nodes=1)
.withSimulateProofFailures(idx=0, failEveryNProofs=1)
# .debug() # uncomment to enable console log output
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
# .withLogTopics("marketplace", "sales", "reservations", "node", "clock", "slotsbuilder")
.some,
validators:
CodexConfigs.init(nodes=2)
.withValidationGroups(groups = 2)
.withValidationGroupIndex(idx = 0, groupIndex = 0)
.withValidationGroupIndex(idx = 1, groupIndex = 1)
# .debug() # uncomment to enable console log output
.withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
.withLogTopics("validator") # each topic as a separate string argument
.some
):
let client0 = clients()[0].client
let expiry = 5.periods
let duration = expiry + 10.periods
var currentTime = await ethProvider.currentTime()
let requestEndTime = currentTime.truncate(uint64) + duration
let data = await RandomChunker.example(blocks=8)
# TODO: better value for data.len below. This TODO is also present in
# testproofs.nim - we may want to address it or remove the comment.
createAvailabilities(data.len * 2, duration)
let cid = client0.upload(data).get
let purchaseId = await client0.requestStorage(
cid,
expiry=expiry,
duration=duration,
nodes=nodes,
tolerance=tolerance,
proofProbability=proofProbability
)
let requestId = client0.requestId(purchaseId).get
debug "validation suite", purchaseId = purchaseId.toHex, requestId = requestId
check eventually(client0.purchaseStateIs(purchaseId, "started"),
timeout = expiry.int * 1000)
currentTime = await ethProvider.currentTime()
let secondsTillRequestEnd = (requestEndTime - currentTime.truncate(uint64)).int
debug "validation suite", secondsTillRequestEnd = secondsTillRequestEnd.seconds
# Because of Erasure Coding, the expected number of slots being freed
# is tolerance + 1. When more than tolerance slots are freed, the whole
# request will fail. Thus, awaiting for a failing state should
# be sufficient to conclude that validators did their job correctly.
# NOTICE: We actually have to wait for the "errored" state, because
# immediately after withdrawing the funds the purchasing state machine
# transitions to the "errored" state.
check eventually(client0.purchaseStateIs(purchaseId, "errored"),
timeout = (secondsTillRequestEnd + 60) * 1000)
test "validator uses historical state to mark missing proofs", NodeConfigs(
# Uncomment to start Hardhat automatically, typically so logs can be inspected locally
hardhat:
HardhatConfig.none,
clients:
CodexConfigs.init(nodes=1)
# .debug() # uncomment to enable console log output
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
# .withLogTopics("node", "marketplace", "clock")
# .withLogTopics("node", "purchases", "slotqueue", "market")
.some,
providers:
CodexConfigs.init(nodes=1)
.withSimulateProofFailures(idx=0, failEveryNProofs=1)
# .debug() # uncomment to enable console log output
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
# .withLogTopics("marketplace", "sales", "reservations", "node", "clock", "slotsbuilder")
.some
):
let client0 = clients()[0].client
let expiry = 5.periods
let duration = expiry + 10.periods
var currentTime = await ethProvider.currentTime()
let requestEndTime = currentTime.truncate(uint64) + duration
let data = await RandomChunker.example(blocks=8)
# TODO: better value for data.len below. This TODO is also present in
# testproofs.nim - we may want to address it or remove the comment.
createAvailabilities(data.len * 2, duration)
let cid = client0.upload(data).get
let purchaseId = await client0.requestStorage(
cid,
expiry=expiry,
duration=duration,
nodes=nodes,
tolerance=tolerance,
proofProbability=proofProbability
)
let requestId = client0.requestId(purchaseId).get
debug "validation suite", purchaseId = purchaseId.toHex, requestId = requestId
check eventually(client0.purchaseStateIs(purchaseId, "started"),
timeout = expiry.int * 1000)
# just to make sure we have a mined block that separates us
# from the block containing the last SlotFilled event
discard await ethProvider.send("evm_mine")
var validators = CodexConfigs.init(nodes=2)
.withValidationGroups(groups = 2)
.withValidationGroupIndex(idx = 0, groupIndex = 0)
.withValidationGroupIndex(idx = 1, groupIndex = 1)
# .debug() # uncomment to enable console log output
.withLogFile() # uncomment to output log file to:
# tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
.withLogTopics("validator") # each topic as a separate string argument
failAndTeardownOnError "failed to start validator nodes":
for config in validators.configs.mitems:
let node = await startValidatorNode(config)
running.add RunningNode(
role: Role.Validator,
node: node
)
currentTime = await ethProvider.currentTime()
let secondsTillRequestEnd = (requestEndTime - currentTime.truncate(uint64)).int
debug "validation suite", secondsTillRequestEnd = secondsTillRequestEnd.seconds
# Because of Erasure Coding, the expected number of slots being freed
# is tolerance + 1. When more than tolerance slots are freed, the whole
# request will fail. Thus, awaiting for a failing state should
# be sufficient to conclude that validators did their job correctly.
# NOTICE: We actually have to wait for the "errored" state, because
# immediately after withdrawing the funds the purchasing state machine
# transitions to the "errored" state.
check eventually(client0.purchaseStateIs(purchaseId, "errored"),
timeout = (secondsTillRequestEnd + 60) * 1000)