fix: unknown state goes to payout when slot state is finished (#555)

Support logging to file

Log the entire config and fix build error

Process slot queue on reservation callback onMarkUnused

Add Reservation object, rename reserve > create

refactor Reservations api to include Reservation CRUD

All tests that use the Reservation module updated

- add requestId and slotIndex to Reservation (hopefully these will prove to be useful when we persist Reservations until request are completed, to add back bytes to Availability)
- add querying of all reservations, with accompanying tests
- change from find to findAvailabilities
- move onCleanUp from SalesContext to SalesAgent as it was getting overwritten for each slot processed
- remove sales agent AFTER deleting reservation, as this was causing some SIGSEGVs
- retrofit testsales and testslotqueue to match updated Reservations module API

Add deletion of inactive reservations on sales load

clean up

add exception message detail util

Apply to onStore errors as we are seeing undetailed errors in the dist tests logs

add missing file

change slotsize to reflect current implemenation

Fix slotSize to reduce by one block

Revert change to slotSize that reduces it by one block

Add additional test check for querying reservations/availabilities

filter past requests based on availability

Because availability filtering on push was removed, when availability is added and past storage request events are queried, those requests need to be filtered by availability before being added to the queue.

Revert "filter past requests based on availability"

This reverts commit 0c2362658b523e0de425794b1fa30ebd53bd30ae.

Add debugging for dist tests

Add cancel on error during filling state

When calling fillSlot, any transaction errors that occur (possibly during estimate gas) will cause that tx to be replaced with a cancellation transaction (a 0-valued tx to ourselves).

more debug logging

fix build

wait for fillSlot to be mined (so that a revert error can be extracted)

fix confirmation of fillSlot

add more debugging

moar debugging

debugging: change echo to trace

bump ethers to add chronicles

fix contracts tests

switch to earlier nim-ethers commit

bump json-rpc and nim-ethers

bump nim-ethers to prevent parsing newHeads log from crashing sales state machine

moar debugging

moar debugging

moar debugging

bump ethers to fix "key not found: data" error

ethers debug logging

bump ethers - better Transaction object deserialization

bump ethers to replay tx with past tx format

bump ethers to add serialization for PastTransaction

ethers bump: better logging, separate logic for revert reason string

fix build

ethers: update revert reason retreival

remove unneeded confirm

ethers: try replay without decrementing blockNumber

ethers: include type and chainId in replayed txs

ethers: add gas into and remove type from replayed txs

ensure gas is being serialized in Transaction

ethers: fix build error

bump ethers: rebased on top of cancel tx due to estimateGas error PR

Fix proving with latest ethers bump (tested on master)

Update multinode suite for better simulateFailedProofs enabling, add proofs test

Improve multinode suite for better debug options, including logging to file

There is a 503 "sales unavailable" error

improve multinode test suite, add logging to file
This commit is contained in:
Adam Uhlíř 2023-09-27 15:57:41 +02:00 committed by Eric
parent 86a6ef9215
commit 3b520fc0c6
No known key found for this signature in database
14 changed files with 524 additions and 247 deletions

1
.gitignore vendored
View File

@ -39,3 +39,4 @@ docker/hostdatadir
docker/prometheus-data docker/prometheus-data
.DS_Store .DS_Store
nim.cfg nim.cfg
tests/integration/logs

View File

@ -2,12 +2,13 @@ import std/sequtils
import std/strutils import std/strutils
import std/sugar import std/sugar
import pkg/chronicles import pkg/chronicles
import pkg/ethers import pkg/ethers except `%`
import pkg/ethers/testing import pkg/ethers/testing
import pkg/upraises import pkg/upraises
import pkg/questionable import pkg/questionable
import ../utils/json
import ../market import ../market
import ./marketplace import ./marketplace except `%`
export market export market
@ -22,6 +23,7 @@ type
EventSubscription = ethers.Subscription EventSubscription = ethers.Subscription
OnChainMarketSubscription = ref object of MarketSubscription OnChainMarketSubscription = ref object of MarketSubscription
eventSubscription: EventSubscription eventSubscription: EventSubscription
OnChainMarketError = object of CatchableError
func new*(_: type OnChainMarket, contract: Marketplace): OnChainMarket = func new*(_: type OnChainMarket, contract: Marketplace): OnChainMarket =
without signer =? contract.signer: without signer =? contract.signer:
@ -31,7 +33,7 @@ func new*(_: type OnChainMarket, contract: Marketplace): OnChainMarket =
signer: signer, signer: signer,
) )
proc approveFunds(market: OnChainMarket, amount: UInt256) {.async.} = proc approveFunds(market: OnChainMarket, amount: UInt256, waitForConfirmations = 0) {.async.} =
debug "Approving tokens", amount debug "Approving tokens", amount
let tokenAddress = await market.contract.token() let tokenAddress = await market.contract.token()
let token = Erc20Token.new(tokenAddress, market.signer) let token = Erc20Token.new(tokenAddress, market.signer)
@ -110,13 +112,22 @@ method getActiveSlot*(market: OnChainMarket,
return none Slot return none Slot
raise e raise e
method fillSlot(market: OnChainMarket, proc cancelTransaction(market: OnChainMarket, nonce: UInt256) {.async.} =
let address = await market.getSigner()
let cancelTx = Transaction(to: address, value: 0.u256, nonce: some nonce)
let populated = await market.signer.populateTransaction(cancelTx)
trace "cancelling transaction to prevent stuck transactions", nonce
discard market.signer.sendTransaction(populated)
method fillSlot*(market: OnChainMarket,
requestId: RequestId, requestId: RequestId,
slotIndex: UInt256, slotIndex: UInt256,
proof: seq[byte], proof: seq[byte],
collateral: UInt256) {.async.} = collateral: UInt256) {.async.} =
await market.approveFunds(collateral)
await market.contract.fillSlot(requestId, slotIndex, proof) await market.approveFunds(collateral, 1)
trace "calling contract fillSlot", slotIndex, requestId
discard await market.contract.fillSlot(requestId, slotIndex, proof).confirm(1)
method freeSlot*(market: OnChainMarket, slotId: SlotId) {.async.} = method freeSlot*(market: OnChainMarket, slotId: SlotId) {.async.} =
await market.contract.freeSlot(slotId) await market.contract.freeSlot(slotId)

View File

@ -43,7 +43,7 @@ proc slashPercentage*(marketplace: Marketplace): UInt256 {.contract, view.}
proc minCollateralThreshold*(marketplace: Marketplace): UInt256 {.contract, view.} proc minCollateralThreshold*(marketplace: Marketplace): UInt256 {.contract, view.}
proc requestStorage*(marketplace: Marketplace, request: StorageRequest) {.contract.} proc requestStorage*(marketplace: Marketplace, request: StorageRequest) {.contract.}
proc fillSlot*(marketplace: Marketplace, requestId: RequestId, slotIndex: UInt256, proof: seq[byte]) {.contract.} proc fillSlot*(marketplace: Marketplace, requestId: RequestId, slotIndex: UInt256, proof: seq[byte]): ?TransactionResponse {.contract.}
proc withdrawFunds*(marketplace: Marketplace, requestId: RequestId) {.contract.} proc withdrawFunds*(marketplace: Marketplace, requestId: RequestId) {.contract.}
proc freeSlot*(marketplace: Marketplace, id: SlotId) {.contract.} proc freeSlot*(marketplace: Marketplace, id: SlotId) {.contract.}
proc getRequest*(marketplace: Marketplace, id: RequestId): StorageRequest {.contract, view.} proc getRequest*(marketplace: Marketplace, id: RequestId): StorageRequest {.contract, view.}

View File

@ -5,6 +5,7 @@ import pkg/ethers/erc20
import ./contracts/requests import ./contracts/requests
import ./clock import ./clock
import ./periods import ./periods
import ./utils/exceptions
export chronos export chronos
export questionable export questionable
@ -76,6 +77,19 @@ method getActiveSlot*(
raiseAssert("not implemented") raiseAssert("not implemented")
method cancelTransaction(market: Market, nonce: UInt256) {.base, async.} =
raiseAssert("not implemented")
template cancelOnError*(market: Market, body) =
try:
body
except JsonRpcProviderError as e:
trace "error encountered, cancelling tx if there's a nonce present", nonce = e.nonce, error = e.msgDetail
writeStackTrace()
if e.nonce.isSome:
# send a 0-valued transaction with the errored nonce to prevent stuck txs
await market.cancelTransaction(!e.nonce)
method fillSlot*(market: Market, method fillSlot*(market: Market,
requestId: RequestId, requestId: RequestId,
slotIndex: UInt256, slotIndex: UInt256,

View File

@ -34,3 +34,4 @@ method run(state: SaleFilling, machine: Machine): Future[?State] {.async.} =
debug "Filling slot", requestId = $data.requestId, slotIndex = $data.slotIndex debug "Filling slot", requestId = $data.requestId, slotIndex = $data.slotIndex
await market.fillSlot(data.requestId, data.slotIndex, state.proof, collateral) await market.fillSlot(data.requestId, data.slotIndex, state.proof, collateral)
debug "Waiting for slot filled event...", requestId = $data.requestId, slotIndex = $data.slotIndex

View File

@ -1,6 +1,7 @@
import std/options import std/options
import pkg/chronicles import pkg/chronicles
import ../../clock import ../../clock
import ../../utils/exceptions
import ../statemachine import ../statemachine
import ../salesagent import ../salesagent
import ../salescontext import ../salescontext
@ -29,8 +30,10 @@ method prove*(
let proof = await onProve(slot) let proof = await onProve(slot)
debug "Submitting proof", currentPeriod = currentPeriod, slotId = $slot.id debug "Submitting proof", currentPeriod = currentPeriod, slotId = $slot.id
await market.submitProof(slot.id, proof) await market.submitProof(slot.id, proof)
except CancelledError:
discard
except CatchableError as e: except CatchableError as e:
error "Submitting proof failed", msg = e.msg error "Submitting proof failed", msg = e.msgDetail
proc proveLoop( proc proveLoop(
state: SaleProving, state: SaleProving,
@ -119,12 +122,12 @@ method run*(state: SaleProving, machine: Machine): Future[?State] {.async.} =
debug "Stopping proving.", requestId = $data.requestId, slotIndex = $data.slotIndex debug "Stopping proving.", requestId = $data.requestId, slotIndex = $data.slotIndex
if not state.loop.isNil: if not state.loop.isNil:
if not state.loop.finished: if not state.loop.finished:
try: try:
await state.loop.cancelAndWait() await state.loop.cancelAndWait()
except CatchableError as e: except CatchableError as e:
error "Error during cancelation of prooving loop", msg = e.msg error "Error during cancellation of proving loop", msg = e.msg
state.loop = nil state.loop = nil
return some State(SalePayout()) return some State(SalePayout())

View File

@ -8,6 +8,7 @@ when codex_enable_proof_failures:
import ../../contracts/requests import ../../contracts/requests
import ../../market import ../../market
import ../../utils/exceptions
import ../salescontext import ../salescontext
import ./proving import ./proving
@ -20,7 +21,7 @@ when codex_enable_proof_failures:
proofCount: int proofCount: int
proc onSubmitProofError(error: ref CatchableError, period: UInt256, slotId: SlotId) = proc onSubmitProofError(error: ref CatchableError, period: UInt256, slotId: SlotId) =
error "Submitting invalid proof failed", period = period, slotId = $slotId, msg = error.msg error "Submitting invalid proof failed", period = period, slotId = $slotId, msg = error.msgDetail
method prove*(state: SaleProvingSimulated, slot: Slot, onProve: OnProve, market: Market, currentPeriod: Period) {.async.} = method prove*(state: SaleProvingSimulated, slot: Slot, onProve: OnProve, market: Market, currentPeriod: Period) {.async.} =
trace "Processing proving in simulated mode" trace "Processing proving in simulated mode"
@ -32,8 +33,8 @@ when codex_enable_proof_failures:
try: try:
warn "Submitting INVALID proof", period = currentPeriod, slotId = slot.id warn "Submitting INVALID proof", period = currentPeriod, slotId = slot.id
await market.submitProof(slot.id, newSeq[byte](0)) await market.submitProof(slot.id, newSeq[byte](0))
except ProviderError as e: except SignerError as e:
if not e.revertReason.contains("Invalid proof"): if not e.msgDetail.contains("Invalid proof"):
onSubmitProofError(e, currentPeriod, slot.id) onSubmitProofError(e, currentPeriod, slot.id)
except CatchableError as e: except CatchableError as e:
onSubmitProofError(e, currentPeriod, slot.id) onSubmitProofError(e, currentPeriod, slot.id)

View File

@ -205,6 +205,9 @@ proc emitRequestFailed*(market: MockMarket, requestId: RequestId) =
subscription.requestId.isNone: subscription.requestId.isNone:
subscription.callback(requestId) subscription.callback(requestId)
proc cancelTransaction(market: Market, nonce: UInt256) {.async.} =
discard
proc fillSlot*(market: MockMarket, proc fillSlot*(market: MockMarket,
requestId: RequestId, requestId: RequestId,
slotIndex: UInt256, slotIndex: UInt256,

View File

@ -42,7 +42,7 @@ ethersuite "Marketplace contracts":
await marketplace.requestStorage(request) await marketplace.requestStorage(request)
switchAccount(host) switchAccount(host)
discard await token.approve(marketplace.address, request.ask.collateral) discard await token.approve(marketplace.address, request.ask.collateral)
await marketplace.fillSlot(request.id, 0.u256, proof) discard await marketplace.fillSlot(request.id, 0.u256, proof)
slotId = request.slotId(0.u256) slotId = request.slotId(0.u256)
proc waitUntilProofRequired(slotId: SlotId) {.async.} = proc waitUntilProofRequired(slotId: SlotId) {.async.} =
@ -57,7 +57,7 @@ ethersuite "Marketplace contracts":
proc startContract() {.async.} = proc startContract() {.async.} =
for slotIndex in 1..<request.ask.slots: for slotIndex in 1..<request.ask.slots:
discard await token.approve(marketplace.address, request.ask.collateral) discard await token.approve(marketplace.address, request.ask.collateral)
await marketplace.fillSlot(request.id, slotIndex.u256, proof) discard await marketplace.fillSlot(request.id, slotIndex.u256, proof)
test "accept marketplace proofs": test "accept marketplace proofs":
switchAccount(host) switchAccount(host)

View File

@ -126,7 +126,7 @@ proc postAvailability*(
"maxCollateral": maxCollateral, "maxCollateral": maxCollateral,
} }
let response = client.http.post(url, $json) let response = client.http.post(url, $json)
assert response.status == "200 OK" doAssert response.status == "200 OK", "expected 200 OK, got " & response.status & ", body: " & response.body
Availability.fromJson(response.body.parseJson) Availability.fromJson(response.body.parseJson)
proc getAvailabilities*(client: CodexClient): ?!seq[Availability] = proc getAvailabilities*(client: CodexClient): ?!seq[Availability] =

View File

@ -1,7 +1,13 @@
import std/os # import std/dirs
import std/macros
import std/json
import std/httpclient import std/httpclient
import std/json
import std/macros
import std/os
import std/sequtils
import std/strformat
import std/strutils
import std/sugar
import std/times
import pkg/chronicles import pkg/chronicles
import ../ethertest import ../ethertest
import ./codexclient import ./codexclient
@ -19,18 +25,29 @@ type
datadir*: string datadir*: string
ethAccount*: Address ethAccount*: Address
StartNodes* = object StartNodes* = object
clients*: uint clients*: StartNodeConfig
providers*: uint providers*: StartNodeConfig
validators*: uint validators*: StartNodeConfig
DebugNodes* = object StartNodeConfig* = object
client*: bool numNodes*: int
provider*: bool cliOptions*: seq[CliOption]
validator*: bool logFile*: bool
topics*: string logTopics*: seq[string]
debugEnabled*: bool
Role* {.pure.} = enum Role* {.pure.} = enum
Client, Client,
Provider, Provider,
Validator Validator
CliOption* = object of RootObj
nodeIdx*: ?int
key*: string
value*: string
proc `$`*(option: CliOption): string =
var res = option.key
if option.value.len > 0:
res &= "=" & option.value
return res
proc new*(_: type RunningNode, proc new*(_: type RunningNode,
role: Role, role: Role,
@ -44,99 +61,113 @@ proc new*(_: type RunningNode,
datadir: datadir, datadir: datadir,
ethAccount: ethAccount) ethAccount: ethAccount)
proc init*(_: type StartNodes, proc nodes*(config: StartNodeConfig, numNodes: int): StartNodeConfig =
clients, providers, validators: uint): StartNodes = if numNodes < 0:
StartNodes(clients: clients, providers: providers, validators: validators) raise newException(ValueError, "numNodes must be >= 0")
proc init*(_: type DebugNodes, var startConfig = config
client, provider, validator: bool, startConfig.numNodes = numNodes
topics: string = "validator,proving,market"): DebugNodes = return startConfig
DebugNodes(client: client, provider: provider, validator: validator,
topics: topics) proc simulateProofFailuresFor*(
config: StartNodeConfig,
providerIdx: int,
failEveryNProofs: int
): StartNodeConfig =
if providerIdx > config.numNodes - 1:
raise newException(ValueError, "provider index out of bounds")
var startConfig = config
startConfig.cliOptions.add(
CliOption(
nodeIdx: some providerIdx,
key: "--simulate-proof-failures",
value: $failEveryNProofs
)
)
return startConfig
proc debug*(config: StartNodeConfig, enabled = true): StartNodeConfig =
## output log in stdout
var startConfig = config
startConfig.debugEnabled = enabled
return startConfig
# proc withLogFile*(
# config: StartNodeConfig,
# file: bool | string
# ): StartNodeConfig =
# var startConfig = config
# when file is bool:
# if not file: startConfig.logFile = none string
# else: startConfig.logFile =
# some currentSourcePath.parentDir() / "codex" & $index & ".log"
# else:
# if file.len <= 0:
# raise newException(ValueError, "file path length must be > 0")
# startConfig.logFile = some file
# return startConfig
proc withLogTopics*(
config: StartNodeConfig,
topics: varargs[string]
): StartNodeConfig =
var startConfig = config
startConfig.logTopics = startConfig.logTopics.concat(@topics)
return startConfig
proc withLogFile*(
config: StartNodeConfig,
logToFile: bool = true
): StartNodeConfig =
var startConfig = config
var logDir = currentSourcePath.parentDir() / "logs" / "{starttime}"
createDir(logDir)
startConfig.logFile = logToFile
return startConfig
template multinodesuite*(name: string, template multinodesuite*(name: string,
startNodes: StartNodes, debugNodes: DebugNodes, body: untyped) = startNodes: StartNodes, body: untyped) =
if (debugNodes.client or debugNodes.provider) and
(enabledLogLevel > LogLevel.TRACE or
enabledLogLevel == LogLevel.NONE):
echo ""
echo "More test debug logging is available by running the tests with " &
"'-d:chronicles_log_level=TRACE " &
"-d:chronicles_disabled_topics=websock " &
"-d:chronicles_default_output_device=stdout " &
"-d:chronicles_sinks=textlines'"
echo ""
ethersuite name: ethersuite name:
var running: seq[RunningNode] var running: seq[RunningNode]
var bootstrap: string var bootstrap: string
let starttime = now().format("yyyy-MM-dd'_'HH:mm:ss")
proc newNodeProcess(index: int, proc newNodeProcess(index: int,
addlOptions: seq[string], config: StartNodeConfig
debug: bool): (NodeProcess, string, Address) = ): (NodeProcess, string, Address) =
if index > accounts.len - 1: if index > accounts.len - 1:
raiseAssert("Cannot start node at index " & $index & raiseAssert("Cannot start node at index " & $index &
", not enough eth accounts.") ", not enough eth accounts.")
let datadir = getTempDir() / "Codex" & $index let datadir = getTempDir() / "Codex" & $index
var options = @[ # let logdir = currentSourcePath.parentDir()
"--api-port=" & $(8080 + index), var options = config.cliOptions.map(o => $o)
"--data-dir=" & datadir, .concat(@[
"--nat=127.0.0.1", "--api-port=" & $(8080 + index),
"--listen-addrs=/ip4/127.0.0.1/tcp/0", "--data-dir=" & datadir,
"--disc-ip=127.0.0.1", "--nat=127.0.0.1",
"--disc-port=" & $(8090 + index), "--listen-addrs=/ip4/127.0.0.1/tcp/0",
"--eth-account=" & $accounts[index]] "--disc-ip=127.0.0.1",
.concat(addlOptions) "--disc-port=" & $(8090 + index),
if debug: options.add "--log-level=INFO;TRACE: " & debugNodes.topics "--eth-account=" & $accounts[index]])
let node = startNode(options, debug = debug) # if logFile =? config.logFile:
# options.add "--log-file=" & logFile
if config.logTopics.len > 0:
options.add "--log-level=INFO;TRACE: " & config.logTopics.join(",")
let node = startNode(options, config.debugEnabled)
node.waitUntilStarted() node.waitUntilStarted()
(node, datadir, accounts[index]) (node, datadir, accounts[index])
proc newCodexClient(index: int): CodexClient =
CodexClient.new("http://localhost:" & $(8080 + index) & "/api/codex/v1")
proc startClientNode() =
let index = running.len
let (node, datadir, account) = newNodeProcess(
index, @["--persistence"], debugNodes.client)
let restClient = newCodexClient(index)
running.add RunningNode.new(Role.Client, node, restClient, datadir,
account)
if debugNodes.client:
debug "started new client node and codex client",
restApiPort = 8080 + index, discPort = 8090 + index, account
proc startProviderNode(failEveryNProofs: uint = 0) =
let index = running.len
let (node, datadir, account) = newNodeProcess(index, @[
"--bootstrap-node=" & bootstrap,
"--persistence",
"--simulate-proof-failures=" & $failEveryNProofs],
debugNodes.provider)
let restClient = newCodexClient(index)
running.add RunningNode.new(Role.Provider, node, restClient, datadir,
account)
if debugNodes.provider:
debug "started new provider node and codex client",
restApiPort = 8080 + index, discPort = 8090 + index, account
proc startValidatorNode() =
let index = running.len
let (node, datadir, account) = newNodeProcess(index, @[
"--bootstrap-node=" & bootstrap,
"--validator"],
debugNodes.validator)
let restClient = newCodexClient(index)
running.add RunningNode.new(Role.Validator, node, restClient, datadir,
account)
if debugNodes.validator:
debug "started new validator node and codex client",
restApiPort = 8080 + index, discPort = 8090 + index, account
proc clients(): seq[RunningNode] {.used.} = proc clients(): seq[RunningNode] {.used.} =
running.filter(proc(r: RunningNode): bool = r.role == Role.Client) running.filter(proc(r: RunningNode): bool = r.role == Role.Client)
@ -146,16 +177,85 @@ template multinodesuite*(name: string,
proc validators(): seq[RunningNode] {.used.} = proc validators(): seq[RunningNode] {.used.} =
running.filter(proc(r: RunningNode): bool = r.role == Role.Validator) running.filter(proc(r: RunningNode): bool = r.role == Role.Validator)
proc newCodexClient(index: int): CodexClient =
CodexClient.new("http://localhost:" & $(8080 + index) & "/api/codex/v1")
proc getLogFile(role: Role, index: int): string =
var logDir = currentSourcePath.parentDir() / "logs" / $starttime
createDir(logDir)
let fn = $role & "_" & $index & ".log"
let fileName = logDir / fn
echo ">>> replace log file name: ", fileName
return fileName
proc startClientNode() =
let index = running.len
let clientIdx = clients().len
var config = startNodes.clients
config.cliOptions.add CliOption(key: "--persistence")
if config.logFile:
let updatedLogFile = getLogFile(Role.Client, clientIdx)
config.cliOptions.add CliOption(key: "--log-file", value: updatedLogFile)
let (node, datadir, account) = newNodeProcess(index, config)
let restClient = newCodexClient(index)
running.add RunningNode.new(Role.Client, node, restClient, datadir,
account)
if config.debugEnabled:
debug "started new client node and codex client",
restApiPort = 8080 + index, discPort = 8090 + index, account
proc startProviderNode(cliOptions: seq[CliOption] = @[]) =
let index = running.len
let providerIdx = providers().len
var config = startNodes.providers
config.cliOptions = config.cliOptions.concat(cliOptions)
if config.logFile:
let updatedLogFile = getLogFile(Role.Provider, providerIdx)
config.cliOptions.add CliOption(key: "--log-file", value: updatedLogFile)
config.cliOptions.add CliOption(key: "--bootstrap-node", value: bootstrap)
config.cliOptions.add CliOption(key: "--persistence")
config.cliOptions = config.cliOptions.filter(
o => (let idx = o.nodeIdx |? providerIdx; echo "idx: ", idx, ", index: ", index; idx == providerIdx)
)
let (node, datadir, account) = newNodeProcess(index, config)
let restClient = newCodexClient(index)
running.add RunningNode.new(Role.Provider, node, restClient, datadir,
account)
if config.debugEnabled:
debug "started new provider node and codex client",
restApiPort = 8080 + index, discPort = 8090 + index, account,
cliOptions = config.cliOptions.join(",")
proc startValidatorNode() =
let index = running.len
let validatorIdx = providers().len
var config = startNodes.validators
if config.logFile:
let updatedLogFile = getLogFile(Role.Validator, validatorIdx)
config.cliOptions.add CliOption(key: "--log-file", value: updatedLogFile)
config.cliOptions.add CliOption(key: "--bootstrap-node", value: bootstrap)
config.cliOptions.add CliOption(key: "--validator")
let (node, datadir, account) = newNodeProcess(index, config)
let restClient = newCodexClient(index)
running.add RunningNode.new(Role.Validator, node, restClient, datadir,
account)
if config.debugEnabled:
debug "started new validator node and codex client",
restApiPort = 8080 + index, discPort = 8090 + index, account
setup: setup:
for i in 0..<startNodes.clients: for i in 0..<startNodes.clients.numNodes:
startClientNode() startClientNode()
if i == 0: if i == 0:
bootstrap = running[0].restClient.info()["spr"].getStr() bootstrap = running[0].restClient.info()["spr"].getStr()
for i in 0..<startNodes.providers: for i in 0..<startNodes.providers.numNodes:
startProviderNode() startProviderNode()
for i in 0..<startNodes.validators: for i in 0..<startNodes.validators.numNodes:
startValidatorNode() startValidatorNode()
teardown: teardown:

View File

@ -11,6 +11,12 @@ import ./codexclient
export codexclient export codexclient
export codexclient
export chronicles
logScope:
topics = "integration testing nodes"
const workingDir = currentSourcePath() / ".." / ".." / ".." const workingDir = currentSourcePath() / ".." / ".." / ".."
const executable = "build" / "codex" const executable = "build" / "codex"

View File

@ -2,138 +2,293 @@ import std/sequtils
import std/os import std/os
from std/times import getTime, toUnix from std/times import getTime, toUnix
import pkg/chronicles import pkg/chronicles
import codex/contracts import pkg/stew/byteutils
import codex/periods import pkg/codex/contracts
import pkg/codex/periods
import pkg/codex/rng
import ../contracts/time import ../contracts/time
import ../contracts/deployment import ../contracts/deployment
import ../codex/helpers
import ./twonodes import ./twonodes
import ./multinodes import ./multinodes
export chronicles
logScope: logScope:
topics = "test proofs" topics = "integration test proofs"
twonodessuite "Proving integration test", debug1=false, debug2=false: # twonodessuite "Proving integration test", debug1=false, debug2=false:
let validatorDir = getTempDir() / "CodexValidator" # let validatorDir = getTempDir() / "CodexValidator"
var marketplace: Marketplace # var marketplace: Marketplace
var period: uint64 # var period: uint64
proc purchaseStateIs(client: CodexClient, id: PurchaseId, state: string): bool = # proc purchaseStateIs(client: CodexClient, id: PurchaseId, state: string): bool =
client.getPurchase(id).option.?state == some state # client.getPurchase(id).option.?state == some state
setup: # setup:
marketplace = Marketplace.new(Marketplace.address, provider) # marketplace = Marketplace.new(Marketplace.address, provider)
period = (await marketplace.config()).proofs.period.truncate(uint64) # period = (await marketplace.config()).proofs.period.truncate(uint64)
# Our Hardhat configuration does use automine, which means that time tracked by `provider.currentTime()` is not # # Our Hardhat configuration does use automine, which means that time tracked by `provider.currentTime()` is not
# advanced until blocks are mined and that happens only when transaction is submitted. # # advanced until blocks are mined and that happens only when transaction is submitted.
# As we use in tests provider.currentTime() which uses block timestamp this can lead to synchronization issues. # # As we use in tests provider.currentTime() which uses block timestamp this can lead to synchronization issues.
await provider.advanceTime(1.u256) # await provider.advanceTime(1.u256)
proc waitUntilPurchaseIsStarted(proofProbability: uint64 = 3, # proc waitUntilPurchaseIsStarted(proofProbability: uint64 = 3,
duration: uint64 = 100 * period, # duration: uint64 = 100 * period,
expiry: uint64 = 30) {.async.} = # expiry: uint64 = 30) {.async.} =
discard client2.postAvailability( # discard client2.postAvailability(
size=0xFFFFF.u256, # size=0xFFFFF.u256,
duration=duration.u256, # duration=duration.u256,
minPrice=300.u256, # minPrice=300.u256,
maxCollateral=200.u256 # maxCollateral=200.u256
) # )
let cid = client1.upload("some file contents").get # let cid = client1.upload("some file contents").get
let expiry = (await provider.currentTime()) + expiry.u256 # let expiry = (await provider.currentTime()) + expiry.u256
let id = client1.requestStorage( # let id = client1.requestStorage(
cid, # cid,
expiry=expiry, # expiry=expiry,
duration=duration.u256, # duration=duration.u256,
proofProbability=proofProbability.u256, # proofProbability=proofProbability.u256,
collateral=100.u256, # collateral=100.u256,
reward=400.u256 # reward=400.u256
).get # ).get
check eventually client1.purchaseStateIs(id, "started") # check eventually client1.purchaseStateIs(id, "started")
proc advanceToNextPeriod {.async.} = # proc advanceToNextPeriod {.async.} =
let periodicity = Periodicity(seconds: period.u256) # let periodicity = Periodicity(seconds: period.u256)
let currentPeriod = periodicity.periodOf(await provider.currentTime()) # let currentPeriod = periodicity.periodOf(await provider.currentTime())
let endOfPeriod = periodicity.periodEnd(currentPeriod) # let endOfPeriod = periodicity.periodEnd(currentPeriod)
await provider.advanceTimeTo(endOfPeriod + 1) # await provider.advanceTimeTo(endOfPeriod + 1)
proc startValidator: NodeProcess = # proc startValidator: NodeProcess =
let validator = startNode( # startNode([
[ # "--data-dir=" & validatorDir,
"--data-dir=" & validatorDir, # "--api-port=8089",
"--api-port=8089", # "--disc-port=8099",
"--disc-port=8099", # "--listen-addrs=/ip4/127.0.0.1/tcp/0",
"--listen-addrs=/ip4/127.0.0.1/tcp/0", # "--validator",
"--validator", # "--eth-account=" & $accounts[2]
"--eth-account=" & $accounts[2] # ], debug = false)
], debug = false
)
validator.waitUntilStarted()
validator
proc stopValidator(node: NodeProcess) = # proc stopValidator(node: NodeProcess) =
node.stop() # node.stop()
removeDir(validatorDir) # removeDir(validatorDir)
test "hosts submit periodic proofs for slots they fill": # test "hosts submit periodic proofs for slots they fill":
await waitUntilPurchaseIsStarted(proofProbability=1) # await waitUntilPurchaseIsStarted(proofProbability=1)
var proofWasSubmitted = false # var proofWasSubmitted = false
proc onProofSubmitted(event: ProofSubmitted) = # proc onProofSubmitted(event: ProofSubmitted) =
proofWasSubmitted = true # proofWasSubmitted = true
let subscription = await marketplace.subscribe(ProofSubmitted, onProofSubmitted) # let subscription = await marketplace.subscribe(ProofSubmitted, onProofSubmitted)
await provider.advanceTime(period.u256) # await provider.advanceTime(period.u256)
check eventually proofWasSubmitted # check eventually proofWasSubmitted
await subscription.unsubscribe() # await subscription.unsubscribe()
test "validator will mark proofs as missing": # test "validator will mark proofs as missing":
let validator = startValidator() # let validator = startValidator()
await waitUntilPurchaseIsStarted(proofProbability=1) # await waitUntilPurchaseIsStarted(proofProbability=1)
node2.stop() # node2.stop()
var slotWasFreed = false # var slotWasFreed = false
proc onSlotFreed(event: SlotFreed) = # proc onSlotFreed(event: SlotFreed) =
slotWasFreed = true # slotWasFreed = true
let subscription = await marketplace.subscribe(SlotFreed, onSlotFreed) # let subscription = await marketplace.subscribe(SlotFreed, onSlotFreed)
for _ in 0..<100: # for _ in 0..<100:
if slotWasFreed: # if slotWasFreed:
break # break
else: # else:
await advanceToNextPeriod() # await advanceToNextPeriod()
await sleepAsync(1.seconds) # await sleepAsync(1.seconds)
check slotWasFreed # check slotWasFreed
await subscription.unsubscribe() # await subscription.unsubscribe()
stopValidator(validator) # stopValidator(validator)
# multinodesuite "Simulate invalid proofs",
# StartNodes.init(clients=1, providers=0, validators=1),
# DebugConfig.init(client=false, provider=false, validator=false):
# # .simulateProofFailuresFor(providerIdx = 0, failEveryNProofs = 2),
# proc purchaseStateIs(client: CodexClient, id: PurchaseId, state: string): bool =
# client.getPurchase(id).option.?state == some state
# var marketplace: Marketplace
# var period: uint64
# var slotId: SlotId
# setup:
# marketplace = Marketplace.new(Marketplace.address, provider)
# let config = await marketplace.config()
# period = config.proofs.period.truncate(uint64)
# slotId = SlotId(array[32, byte].default) # ensure we aren't reusing from prev test
# # Our Hardhat configuration does use automine, which means that time tracked by `provider.currentTime()` is not
# # advanced until blocks are mined and that happens only when transaction is submitted.
# # As we use in tests provider.currentTime() which uses block timestamp this can lead to synchronization issues.
# await provider.advanceTime(1.u256)
# proc periods(p: int): uint64 =
# p.uint64 * period
# proc advanceToNextPeriod {.async.} =
# let periodicity = Periodicity(seconds: period.u256)
# let currentPeriod = periodicity.periodOf(await provider.currentTime())
# let endOfPeriod = periodicity.periodEnd(currentPeriod)
# await provider.advanceTimeTo(endOfPeriod + 1)
# proc waitUntilPurchaseIsStarted(proofProbability: uint64 = 1,
# duration: uint64 = 12.periods,
# expiry: uint64 = 4.periods) {.async.} =
# if clients().len < 1 or providers().len < 1:
# raiseAssert("must start at least one client and one provider")
# let client = clients()[0].restClient
# let storageProvider = providers()[0].restClient
# discard storageProvider.postAvailability(
# size=0xFFFFF.u256,
# duration=duration.u256,
# minPrice=300.u256,
# maxCollateral=200.u256
# )
# let cid = client.upload("some file contents " & $ getTime().toUnix).get
# let expiry = (await provider.currentTime()) + expiry.u256
# # avoid timing issues by filling the slot at the start of the next period
# await advanceToNextPeriod()
# let id = client.requestStorage(
# cid,
# expiry=expiry,
# duration=duration.u256,
# proofProbability=proofProbability.u256,
# collateral=100.u256,
# reward=400.u256
# ).get
# check eventually client.purchaseStateIs(id, "started")
# let purchase = client.getPurchase(id).get
# slotId = slotId(purchase.requestId, 0.u256)
# # TODO: these are very loose tests in that they are not testing EXACTLY how
# # proofs were marked as missed by the validator. These tests should be
# # tightened so that they are showing, as an integration test, that specific
# # proofs are being marked as missed by the validator.
# test "slot is freed after too many invalid proofs submitted":
# let failEveryNProofs = 2
# let totalProofs = 100
# startProviderNode(@[
# CliOption(
# nodeIdx: 0,
# key: "--simulate-proof-failures",
# value: $failEveryNProofs
# )
# ])
# await waitUntilPurchaseIsStarted(duration=totalProofs.periods)
# var slotWasFreed = false
# proc onSlotFreed(event: SlotFreed) =
# if slotId(event.requestId, event.slotIndex) == slotId:
# slotWasFreed = true
# let subscription = await marketplace.subscribe(SlotFreed, onSlotFreed)
# for _ in 0..<totalProofs:
# if slotWasFreed:
# break
# else:
# await advanceToNextPeriod()
# await sleepAsync(1.seconds)
# check slotWasFreed
# await subscription.unsubscribe()
# test "slot is not freed when not enough invalid proofs submitted":
# let failEveryNProofs = 3
# let totalProofs = 12
# startProviderNode(@[
# CliOption(
# nodeIdx: 0,
# key: "--simulate-proof-failures",
# value: $failEveryNProofs
# )
# ])
# await waitUntilPurchaseIsStarted(duration=totalProofs.periods)
# var slotWasFreed = false
# proc onSlotFreed(event: SlotFreed) =
# if slotId(event.requestId, event.slotIndex) == slotId:
# slotWasFreed = true
# let subscription = await marketplace.subscribe(SlotFreed, onSlotFreed)
# for _ in 0..<totalProofs:
# if slotWasFreed:
# break
# else:
# await advanceToNextPeriod()
# await sleepAsync(1.seconds)
# check not slotWasFreed
# await subscription.unsubscribe()
multinodesuite "Simulate invalid proofs", multinodesuite "Simulate invalid proofs",
StartNodes.init(clients=1'u, providers=0'u, validators=1'u), StartNodes(
DebugNodes.init(client=false, provider=false, validator=false): clients: StartNodeConfig()
.nodes(1)
.debug()
.withLogFile(),
providers:
StartNodeConfig()
.nodes(2)
.simulateProofFailuresFor(providerIdx=0, failEveryNProofs=2)
.debug()
.withLogFile()
.withLogTopics("marketplace",
"sales",
"proving",
"reservations",
"node",
"JSONRPC-HTTP-CLIENT",
"JSONRPC-WS-CLIENT",
"ethers",
"restapi"
),
validators: StartNodeConfig()
.nodes(1)
.withLogFile()
):
# .simulateProofFailuresFor(providerIdx = 0, failEveryNProofs = 2),
# DebugConfig.init(client=false, provider=true, validator=false, topics="marketplace,sales,proving,reservations,node,JSONRPC-HTTP-CLIENT,JSONRPC-WS-CLIENT,ethers"):
proc purchaseStateIs(client: CodexClient, id: PurchaseId, state: string): bool = proc purchaseStateIs(client: CodexClient, id: PurchaseId, state: string): bool =
client.getPurchase(id).option.?state == some state client.getPurchase(id).option.?state == some state
var marketplace: Marketplace var marketplace: Marketplace
var period: uint64 var period: uint64
var slotId: SlotId
setup: setup:
marketplace = Marketplace.new(Marketplace.address, provider) marketplace = Marketplace.new(Marketplace.address, provider)
let config = await marketplace.config() let config = await marketplace.config()
period = config.proofs.period.truncate(uint64) period = config.proofs.period.truncate(uint64)
slotId = SlotId(array[32, byte].default) # ensure we aren't reusing from prev test
# Our Hardhat configuration does use automine, which means that time tracked by `provider.currentTime()` is not # Our Hardhat configuration does use automine, which means that time tracked by `provider.currentTime()` is not
# advanced until blocks are mined and that happens only when transaction is submitted. # advanced until blocks are mined and that happens only when transaction is submitted.
# As we use in tests provider.currentTime() which uses block timestamp this can lead to synchronization issues. # As we use in tests provider.currentTime() which uses block timestamp this can lead to synchronization issues.
await provider.advanceTime(1.u256) await provider.advanceTime(1.u256)
proc periods(p: Ordinal | uint): uint64 = proc periods(p: int): uint64 =
when p is uint: p.uint64 * period
p * period
else: p.uint * period
proc advanceToNextPeriod {.async.} = proc advanceToNextPeriod {.async.} =
let periodicity = Periodicity(seconds: period.u256) let periodicity = Periodicity(seconds: period.u256)
@ -143,7 +298,7 @@ multinodesuite "Simulate invalid proofs",
proc waitUntilPurchaseIsStarted(proofProbability: uint64 = 1, proc waitUntilPurchaseIsStarted(proofProbability: uint64 = 1,
duration: uint64 = 12.periods, duration: uint64 = 12.periods,
expiry: uint64 = 4.periods) {.async.} = expiry: uint64 = 4.periods): Future[PurchaseId] {.async.} =
if clients().len < 1 or providers().len < 1: if clients().len < 1 or providers().len < 1:
raiseAssert("must start at least one client and one provider") raiseAssert("must start at least one client and one provider")
@ -157,7 +312,10 @@ multinodesuite "Simulate invalid proofs",
minPrice=300.u256, minPrice=300.u256,
maxCollateral=200.u256 maxCollateral=200.u256
) )
let cid = client.upload("some file contents " & $ getTime().toUnix).get let rng = rng.Rng.instance()
let chunker = RandomChunker.new(rng, size = DefaultBlockSize * 2, chunkSize = DefaultBlockSize * 2)
let data = await chunker.getBytes()
let cid = client.upload(byteutils.toHex(data)).get
let expiry = (await provider.currentTime()) + expiry.u256 let expiry = (await provider.currentTime()) + expiry.u256
# avoid timing issues by filling the slot at the start of the next period # avoid timing issues by filling the slot at the start of the next period
await advanceToNextPeriod() await advanceToNextPeriod()
@ -167,61 +325,40 @@ multinodesuite "Simulate invalid proofs",
duration=duration.u256, duration=duration.u256,
proofProbability=proofProbability.u256, proofProbability=proofProbability.u256,
collateral=100.u256, collateral=100.u256,
reward=400.u256 reward=400.u256,
nodes=2'u
).get ).get
check eventually client.purchaseStateIs(id, "started") check eventually client.purchaseStateIs(id, "started")
let purchase = client.getPurchase(id).get return id
slotId = slotId(purchase.requestId, 0.u256)
proc waitUntilPurchaseIsFinished(purchaseId: PurchaseId, duration: int) {.async.} =
let client = clients()[0].restClient
check eventually(client.purchaseStateIs(purchaseId, "finished"), duration * 1000)
# TODO: these are very loose tests in that they are not testing EXACTLY how # TODO: these are very loose tests in that they are not testing EXACTLY how
# proofs were marked as missed by the validator. These tests should be # proofs were marked as missed by the validator. These tests should be
# tightened so that they are showing, as an integration test, that specific # tightened so that they are showing, as an integration test, that specific
# proofs are being marked as missed by the validator. # proofs are being marked as missed by the validator.
test "slot is freed after too many invalid proofs submitted": test "provider that submits invalid proofs is paid out less":
let failEveryNProofs = 2'u let totalProofs = 100
let totalProofs = 100'u
startProviderNode(failEveryNProofs)
await waitUntilPurchaseIsStarted(duration=totalProofs.periods) let purchaseId = await waitUntilPurchaseIsStarted(duration=totalProofs.periods)
await waitUntilPurchaseIsFinished(purchaseId, duration=totalProofs.periods.int)
var slotWasFreed = false # var slotWasFreed = false
proc onSlotFreed(event: SlotFreed) = # proc onSlotFreed(event: SlotFreed) =
if slotId(event.requestId, event.slotIndex) == slotId: # if slotId(event.requestId, event.slotIndex) == slotId:
slotWasFreed = true # slotWasFreed = true
let subscription = await marketplace.subscribe(SlotFreed, onSlotFreed) # let subscription = await marketplace.subscribe(SlotFreed, onSlotFreed)
for _ in 0..<totalProofs: # for _ in 0..<totalProofs:
if slotWasFreed: # if slotWasFreed:
break # break
else: # else:
await advanceToNextPeriod() # await advanceToNextPeriod()
await sleepAsync(1.seconds) # await sleepAsync(1.seconds)
check slotWasFreed # check slotWasFreed
await subscription.unsubscribe() # await subscription.unsubscribe()
test "slot is not freed when not enough invalid proofs submitted":
let failEveryNProofs = 3'u
let totalProofs = 12'u
startProviderNode(failEveryNProofs)
await waitUntilPurchaseIsStarted(duration=totalProofs.periods)
var slotWasFreed = false
proc onSlotFreed(event: SlotFreed) =
if slotId(event.requestId, event.slotIndex) == slotId:
slotWasFreed = true
let subscription = await marketplace.subscribe(SlotFreed, onSlotFreed)
for _ in 0..<totalProofs:
if slotWasFreed:
break
else:
await advanceToNextPeriod()
await sleepAsync(1.seconds)
check not slotWasFreed
await subscription.unsubscribe()

2
vendor/nim-json-rpc vendored

@ -1 +1 @@
Subproject commit 0bf2bcbe74a18a3c7a709d57108bb7b51e748a92 Subproject commit afc958db67ec34cba4c528348d9ce712aee488d6