Update multinode suite for better simulateFailedProofs enabling, add proofs test
This commit is contained in:
parent
6ba03b7a14
commit
b5c4be351d
|
@ -2,6 +2,10 @@ import std/os
|
|||
import std/macros
|
||||
import std/json
|
||||
import std/httpclient
|
||||
import std/sequtils
|
||||
import std/strutils
|
||||
import std/sequtils
|
||||
import std/strutils
|
||||
import pkg/chronicles
|
||||
import ../ethertest
|
||||
import ./codexclient
|
||||
|
@ -55,9 +59,9 @@ proc init*(_: type DebugNodes,
|
|||
topics: topics)
|
||||
|
||||
template multinodesuite*(name: string,
|
||||
startNodes: StartNodes, debugNodes: DebugNodes, body: untyped) =
|
||||
startNodes: StartNodes, debugConfig: DebugConfig, body: untyped) =
|
||||
|
||||
if (debugNodes.client or debugNodes.provider) and
|
||||
if (debugConfig.client or debugConfig.provider) and
|
||||
(enabledLogLevel > LogLevel.TRACE or
|
||||
enabledLogLevel == LogLevel.NONE):
|
||||
echo ""
|
||||
|
@ -82,15 +86,17 @@ template multinodesuite*(name: string,
|
|||
", not enough eth accounts.")
|
||||
|
||||
let datadir = getTempDir() / "Codex" & $index
|
||||
let logdir = currentSourcePath.parentDir()
|
||||
var options = @[
|
||||
"--api-port=" & $(8080 + index),
|
||||
"--data-dir=" & datadir,
|
||||
"--nat=127.0.0.1",
|
||||
"--disc-ip=127.0.0.1",
|
||||
"--disc-port=" & $(8090 + index),
|
||||
"--eth-account=" & $accounts[index]]
|
||||
"--eth-account=" & $accounts[index],
|
||||
"--log-file=" & (logdir / "codex" & $index & ".log")]
|
||||
.concat(addlOptions)
|
||||
if debug: options.add "--log-level=INFO;TRACE: " & debugNodes.topics
|
||||
if debug: options.add "--log-level=INFO;TRACE: " & debugConfig.topics
|
||||
let node = startNode(options, debug = debug)
|
||||
node.waitUntilStarted()
|
||||
(node, datadir, accounts[index])
|
||||
|
@ -101,38 +107,47 @@ template multinodesuite*(name: string,
|
|||
proc startClientNode() =
|
||||
let index = running.len
|
||||
let (node, datadir, account) = newNodeProcess(
|
||||
index, @["--persistence"], debugNodes.client)
|
||||
index, @["--persistence"], debugConfig.client)
|
||||
let restClient = newCodexClient(index)
|
||||
running.add RunningNode.new(Role.Client, node, restClient, datadir,
|
||||
account)
|
||||
if debugNodes.client:
|
||||
if debugConfig.client:
|
||||
debug "started new client node and codex client",
|
||||
restApiPort = 8080 + index, discPort = 8090 + index, account
|
||||
|
||||
proc startProviderNode(failEveryNProofs: uint = 0) =
|
||||
proc startProviderNode(cliOptions: seq[CliOption]) =
|
||||
let index = running.len
|
||||
let (node, datadir, account) = newNodeProcess(index, @[
|
||||
var options = @[
|
||||
"--bootstrap-node=" & bootstrap,
|
||||
"--persistence",
|
||||
"--simulate-proof-failures=" & $failEveryNProofs],
|
||||
debugNodes.provider)
|
||||
"--persistence"
|
||||
]
|
||||
|
||||
for cliOption in cliOptions:
|
||||
var option = cliOption.key
|
||||
if cliOption.value.len > 0:
|
||||
option &= "=" & cliOption.value
|
||||
options.add option
|
||||
|
||||
let (node, datadir, account) = newNodeProcess(index, options,
|
||||
debugConfig.provider)
|
||||
let restClient = newCodexClient(index)
|
||||
running.add RunningNode.new(Role.Provider, node, restClient, datadir,
|
||||
account)
|
||||
if debugNodes.provider:
|
||||
if debugConfig.provider:
|
||||
debug "started new provider node and codex client",
|
||||
restApiPort = 8080 + index, discPort = 8090 + index, account
|
||||
restApiPort = 8080 + index, discPort = 8090 + index, account,
|
||||
cliOptions = options.join(",")
|
||||
|
||||
proc startValidatorNode() =
|
||||
let index = running.len
|
||||
let (node, datadir, account) = newNodeProcess(index, @[
|
||||
"--bootstrap-node=" & bootstrap,
|
||||
"--validator"],
|
||||
debugNodes.validator)
|
||||
debugConfig.validator)
|
||||
let restClient = newCodexClient(index)
|
||||
running.add RunningNode.new(Role.Validator, node, restClient, datadir,
|
||||
account)
|
||||
if debugNodes.validator:
|
||||
if debugConfig.validator:
|
||||
debug "started new validator node and codex client",
|
||||
restApiPort = 8080 + index, discPort = 8090 + index, account
|
||||
|
||||
|
@ -152,7 +167,10 @@ template multinodesuite*(name: string,
|
|||
bootstrap = running[0].restClient.info()["spr"].getStr()
|
||||
|
||||
for i in 0..<startNodes.providers:
|
||||
startProviderNode()
|
||||
let cliOptions = startNodes.providerCliOptions.filter(
|
||||
proc(o: CliOption): bool = o.nodeIdx == i
|
||||
)
|
||||
startProviderNode(cliOptions)
|
||||
|
||||
for i in 0..<startNodes.validators:
|
||||
startValidatorNode()
|
||||
|
|
|
@ -6,11 +6,18 @@ import std/osproc
|
|||
import std/os
|
||||
import std/streams
|
||||
import std/strutils
|
||||
import pkg/chronicles
|
||||
import codex/conf
|
||||
import ./codexclient
|
||||
|
||||
export codexclient
|
||||
|
||||
export codexclient
|
||||
export chronicles
|
||||
|
||||
logScope:
|
||||
topics = "integration testing nodes"
|
||||
|
||||
const workingDir = currentSourcePath() / ".." / ".." / ".."
|
||||
const executable = "build" / "codex"
|
||||
|
||||
|
|
|
@ -9,8 +9,10 @@ import ../contracts/deployment
|
|||
import ./twonodes
|
||||
import ./multinodes
|
||||
|
||||
export chronicles
|
||||
|
||||
logScope:
|
||||
topics = "test proofs"
|
||||
topics = "integration test proofs"
|
||||
|
||||
twonodessuite "Proving integration test", debug1=false, debug2=false:
|
||||
let validatorDir = getTempDir() / "CodexValidator"
|
||||
|
@ -58,17 +60,13 @@ twonodessuite "Proving integration test", debug1=false, debug2=false:
|
|||
await provider.advanceTimeTo(endOfPeriod + 1)
|
||||
|
||||
proc startValidator: NodeProcess =
|
||||
let validator = startNode(
|
||||
[
|
||||
startNode([
|
||||
"--data-dir=" & validatorDir,
|
||||
"--api-port=8089",
|
||||
"--disc-port=8099",
|
||||
"--validator",
|
||||
"--eth-account=" & $accounts[2]
|
||||
], debug = false
|
||||
)
|
||||
validator.waitUntilStarted()
|
||||
validator
|
||||
], debug = false)
|
||||
|
||||
proc stopValidator(node: NodeProcess) =
|
||||
node.stop()
|
||||
|
@ -108,8 +106,9 @@ twonodessuite "Proving integration test", debug1=false, debug2=false:
|
|||
stopValidator(validator)
|
||||
|
||||
multinodesuite "Simulate invalid proofs",
|
||||
StartNodes.init(clients=1'u, providers=0'u, validators=1'u),
|
||||
DebugNodes.init(client=false, provider=false, validator=false):
|
||||
StartNodes.init(clients=1, providers=0, validators=1),
|
||||
DebugConfig.init(client=false, provider=false, validator=false):
|
||||
# .simulateProofFailuresFor(providerIdx = 0, failEveryNProofs = 2),
|
||||
|
||||
proc purchaseStateIs(client: CodexClient, id: PurchaseId, state: string): bool =
|
||||
client.getPurchase(id).option.?state == some state
|
||||
|
@ -129,10 +128,8 @@ multinodesuite "Simulate invalid proofs",
|
|||
# As we use in tests provider.currentTime() which uses block timestamp this can lead to synchronization issues.
|
||||
await provider.advanceTime(1.u256)
|
||||
|
||||
proc periods(p: Ordinal | uint): uint64 =
|
||||
when p is uint:
|
||||
p * period
|
||||
else: p.uint * period
|
||||
proc periods(p: int): uint64 =
|
||||
p.uint64 * period
|
||||
|
||||
proc advanceToNextPeriod {.async.} =
|
||||
let periodicity = Periodicity(seconds: period.u256)
|
||||
|
@ -178,9 +175,16 @@ multinodesuite "Simulate invalid proofs",
|
|||
# proofs are being marked as missed by the validator.
|
||||
|
||||
test "slot is freed after too many invalid proofs submitted":
|
||||
let failEveryNProofs = 2'u
|
||||
let totalProofs = 100'u
|
||||
startProviderNode(failEveryNProofs)
|
||||
let failEveryNProofs = 2
|
||||
let totalProofs = 100
|
||||
|
||||
startProviderNode(@[
|
||||
CliOption(
|
||||
nodeIdx: 0,
|
||||
key: "--simulate-proof-failures",
|
||||
value: $failEveryNProofs
|
||||
)
|
||||
])
|
||||
|
||||
await waitUntilPurchaseIsStarted(duration=totalProofs.periods)
|
||||
|
||||
|
@ -202,9 +206,15 @@ multinodesuite "Simulate invalid proofs",
|
|||
await subscription.unsubscribe()
|
||||
|
||||
test "slot is not freed when not enough invalid proofs submitted":
|
||||
let failEveryNProofs = 3'u
|
||||
let totalProofs = 12'u
|
||||
startProviderNode(failEveryNProofs)
|
||||
let failEveryNProofs = 3
|
||||
let totalProofs = 12
|
||||
startProviderNode(@[
|
||||
CliOption(
|
||||
nodeIdx: 0,
|
||||
key: "--simulate-proof-failures",
|
||||
value: $failEveryNProofs
|
||||
)
|
||||
])
|
||||
|
||||
await waitUntilPurchaseIsStarted(duration=totalProofs.periods)
|
||||
|
||||
|
@ -224,3 +234,100 @@ multinodesuite "Simulate invalid proofs",
|
|||
check not slotWasFreed
|
||||
|
||||
await subscription.unsubscribe()
|
||||
|
||||
multinodesuite "Simulate invalid proofs",
|
||||
StartNodes.init(clients=1, providers=2, validators=1)
|
||||
.simulateProofFailuresFor(providerIdx = 0, failEveryNProofs = 2),
|
||||
DebugConfig.init(client=false, provider=true, validator=false, topics="marketplace,sales,proving,reservations,node,JSONRPC-HTTP-CLIENT,JSONRPC-WS-CLIENT,ethers"):
|
||||
|
||||
proc purchaseStateIs(client: CodexClient, id: PurchaseId, state: string): bool =
|
||||
client.getPurchase(id).option.?state == some state
|
||||
|
||||
var marketplace: Marketplace
|
||||
var period: uint64
|
||||
var slotId: SlotId
|
||||
|
||||
setup:
|
||||
marketplace = Marketplace.new(Marketplace.address, provider)
|
||||
let config = await marketplace.config()
|
||||
period = config.proofs.period.truncate(uint64)
|
||||
slotId = SlotId(array[32, byte].default) # ensure we aren't reusing from prev test
|
||||
|
||||
# Our Hardhat configuration does use automine, which means that time tracked by `provider.currentTime()` is not
|
||||
# advanced until blocks are mined and that happens only when transaction is submitted.
|
||||
# As we use in tests provider.currentTime() which uses block timestamp this can lead to synchronization issues.
|
||||
await provider.advanceTime(1.u256)
|
||||
|
||||
proc periods(p: int): uint64 =
|
||||
# when p is uint:
|
||||
p.uint64 * period
|
||||
# else: p.uint * period
|
||||
|
||||
proc advanceToNextPeriod {.async.} =
|
||||
let periodicity = Periodicity(seconds: period.u256)
|
||||
let currentPeriod = periodicity.periodOf(await provider.currentTime())
|
||||
let endOfPeriod = periodicity.periodEnd(currentPeriod)
|
||||
await provider.advanceTimeTo(endOfPeriod + 1)
|
||||
|
||||
proc waitUntilPurchaseIsStarted(proofProbability: uint64 = 1,
|
||||
duration: uint64 = 12.periods,
|
||||
expiry: uint64 = 4.periods): Future[PurchaseId] {.async.} =
|
||||
|
||||
if clients().len < 1 or providers().len < 1:
|
||||
raiseAssert("must start at least one client and one provider")
|
||||
|
||||
let client = clients()[0].restClient
|
||||
let storageProvider = providers()[0].restClient
|
||||
|
||||
discard storageProvider.postAvailability(
|
||||
size=0xFFFFF.u256,
|
||||
duration=duration.u256,
|
||||
minPrice=300.u256,
|
||||
maxCollateral=200.u256
|
||||
)
|
||||
let cid = client.upload("some file contents " & $ getTime().toUnix).get
|
||||
let expiry = (await provider.currentTime()) + expiry.u256
|
||||
# avoid timing issues by filling the slot at the start of the next period
|
||||
await advanceToNextPeriod()
|
||||
let id = client.requestStorage(
|
||||
cid,
|
||||
expiry=expiry,
|
||||
duration=duration.u256,
|
||||
proofProbability=proofProbability.u256,
|
||||
collateral=100.u256,
|
||||
reward=400.u256
|
||||
).get
|
||||
check eventually client.purchaseStateIs(id, "started")
|
||||
return id
|
||||
|
||||
proc waitUntilPurchaseIsFinished(purchaseId: PurchaseId, duration: int) {.async.} =
|
||||
let client = clients()[0].restClient
|
||||
check eventually(client.purchaseStateIs(purchaseId, "finished"), duration * 1000)
|
||||
|
||||
# TODO: these are very loose tests in that they are not testing EXACTLY how
|
||||
# proofs were marked as missed by the validator. These tests should be
|
||||
# tightened so that they are showing, as an integration test, that specific
|
||||
# proofs are being marked as missed by the validator.
|
||||
|
||||
test "provider that submits invalid proofs is paid out less":
|
||||
let totalProofs = 100
|
||||
|
||||
let purchaseId = await waitUntilPurchaseIsStarted(duration=totalProofs.periods)
|
||||
await waitUntilPurchaseIsFinished(purchaseId, duration=totalProofs.periods.int)
|
||||
|
||||
# var slotWasFreed = false
|
||||
# proc onSlotFreed(event: SlotFreed) =
|
||||
# if slotId(event.requestId, event.slotIndex) == slotId:
|
||||
# slotWasFreed = true
|
||||
# let subscription = await marketplace.subscribe(SlotFreed, onSlotFreed)
|
||||
|
||||
# for _ in 0..<totalProofs:
|
||||
# if slotWasFreed:
|
||||
# break
|
||||
# else:
|
||||
# await advanceToNextPeriod()
|
||||
# await sleepAsync(1.seconds)
|
||||
|
||||
# check slotWasFreed
|
||||
|
||||
# await subscription.unsubscribe()
|
|
@ -1 +1 @@
|
|||
Subproject commit d7c9879cf8913e33d89acbcd25c3ce8a1ee5e966
|
||||
Subproject commit b1b4795a609c156685439c1d33d2e2b643e0d545
|
Loading…
Reference in New Issue