Merge branch 'master' into feature/blkexc-peer-selection
# Conflicts: # tests/integration/testsales.nim
This commit is contained in:
commit
4b5c35534d
|
@ -20,9 +20,11 @@ type CodexClient* = ref object
|
|||
|
||||
type CodexClientError* = object of CatchableError
|
||||
|
||||
const HttpClientTimeoutMs = 60 * 1000
|
||||
|
||||
proc new*(_: type CodexClient, baseurl: string): CodexClient =
|
||||
CodexClient(
|
||||
http: newHttpClient(),
|
||||
http: newHttpClient(timeout=HttpClientTimeoutMs),
|
||||
baseurl: baseurl,
|
||||
session: HttpSessionRef.new({HttpClientFlag.Http11Pipeline})
|
||||
)
|
||||
|
@ -247,7 +249,7 @@ proc close*(client: CodexClient) =
|
|||
|
||||
proc restart*(client: CodexClient) =
|
||||
client.http.close()
|
||||
client.http = newHttpClient()
|
||||
client.http = newHttpClient(timeout=HttpClientTimeoutMs)
|
||||
|
||||
proc purchaseStateIs*(client: CodexClient, id: PurchaseId, state: string): bool =
|
||||
client.getPurchase(id).option.?state == some state
|
||||
|
|
|
@ -162,6 +162,8 @@ template multinodesuite*(name: string, body: untyped) =
|
|||
let updatedLogFile = getLogFile(role, some roleIdx)
|
||||
config.withLogFile(updatedLogFile)
|
||||
|
||||
if bootstrap.len > 0:
|
||||
config.addCliOption("--bootstrap-node", bootstrap)
|
||||
config.addCliOption("--api-port", $ await nextFreePort(8080 + nodeIdx))
|
||||
config.addCliOption("--data-dir", datadir)
|
||||
config.addCliOption("--nat", "127.0.0.1")
|
||||
|
@ -223,7 +225,6 @@ template multinodesuite*(name: string, body: untyped) =
|
|||
proc startProviderNode(conf: CodexConfig): Future[NodeProcess] {.async.} =
|
||||
let providerIdx = providers().len
|
||||
var config = conf
|
||||
config.addCliOption("--bootstrap-node", bootstrap)
|
||||
config.addCliOption(StartUpCmd.persistence, "--eth-provider", jsonRpcProviderUrl)
|
||||
config.addCliOption(StartUpCmd.persistence, "--eth-account", $accounts[running.len])
|
||||
config.addCliOption(PersistenceCmd.prover, "--circom-r1cs",
|
||||
|
@ -238,7 +239,6 @@ template multinodesuite*(name: string, body: untyped) =
|
|||
proc startValidatorNode(conf: CodexConfig): Future[NodeProcess] {.async.} =
|
||||
let validatorIdx = validators().len
|
||||
var config = conf
|
||||
config.addCliOption("--bootstrap-node", bootstrap)
|
||||
config.addCliOption(StartUpCmd.persistence, "--eth-provider", jsonRpcProviderUrl)
|
||||
config.addCliOption(StartUpCmd.persistence, "--eth-account", $accounts[running.len])
|
||||
config.addCliOption(StartUpCmd.persistence, "--validator")
|
||||
|
@ -311,7 +311,7 @@ template multinodesuite*(name: string, body: untyped) =
|
|||
role: Role.Client,
|
||||
node: node
|
||||
)
|
||||
if clients().len == 1:
|
||||
if running.len == 1:
|
||||
without ninfo =? CodexProcess(node).client.info():
|
||||
# raise CatchableError instead of Defect (with .get or !) so we
|
||||
# can gracefully shutdown and prevent zombies
|
||||
|
|
|
@ -150,17 +150,20 @@ method stop*(node: NodeProcess) {.base, async.} =
|
|||
|
||||
trace "node stopped"
|
||||
|
||||
proc waitUntilStarted*(node: NodeProcess) {.async.} =
|
||||
proc waitUntilOutput*(node: NodeProcess, output: string) {.async.} =
|
||||
logScope:
|
||||
nodeName = node.name
|
||||
|
||||
trace "waiting until node started"
|
||||
trace "waiting until", output
|
||||
|
||||
let started = newFuture[void]()
|
||||
let fut = node.captureOutput(output, started).track(node)
|
||||
asyncSpawn fut
|
||||
await started.wait(60.seconds) # allow enough time for proof generation
|
||||
|
||||
proc waitUntilStarted*(node: NodeProcess) {.async.} =
|
||||
try:
|
||||
let fut = node.captureOutput(node.startedOutput, started).track(node)
|
||||
asyncSpawn fut
|
||||
await started.wait(60.seconds) # allow enough time for proof generation
|
||||
await node.waitUntilOutput(node.startedOutput)
|
||||
trace "node started"
|
||||
except AsyncTimeoutError:
|
||||
# attempt graceful shutdown in case node was partially started, prevent
|
||||
|
|
|
@ -1,91 +0,0 @@
|
|||
import std/osproc
|
||||
import std/os
|
||||
import std/streams
|
||||
import std/strutils
|
||||
import pkg/codex/conf
|
||||
import pkg/codex/logutils
|
||||
import pkg/confutils
|
||||
import pkg/libp2p
|
||||
import pkg/questionable
|
||||
import ./codexclient
|
||||
|
||||
export codexclient
|
||||
|
||||
const workingDir = currentSourcePath() / ".." / ".." / ".."
|
||||
const executable = "build" / "codex"
|
||||
|
||||
type
|
||||
NodeProcess* = ref object
|
||||
process: Process
|
||||
arguments: seq[string]
|
||||
debug: bool
|
||||
client: ?CodexClient
|
||||
|
||||
proc start(node: NodeProcess) =
|
||||
if node.debug:
|
||||
node.process = osproc.startProcess(
|
||||
executable,
|
||||
workingDir,
|
||||
node.arguments,
|
||||
options={poParentStreams}
|
||||
)
|
||||
else:
|
||||
node.process = osproc.startProcess(
|
||||
executable,
|
||||
workingDir,
|
||||
node.arguments
|
||||
)
|
||||
|
||||
proc waitUntilOutput*(node: NodeProcess, output: string) =
|
||||
if node.debug:
|
||||
raiseAssert "cannot read node output when in debug mode"
|
||||
for line in node.process.outputStream.lines:
|
||||
if line.contains(output):
|
||||
return
|
||||
raiseAssert "node did not output '" & output & "'"
|
||||
|
||||
proc waitUntilStarted*(node: NodeProcess) =
|
||||
if node.debug:
|
||||
sleep(10_000)
|
||||
else:
|
||||
node.waitUntilOutput("Started codex node")
|
||||
|
||||
proc startNode*(args: openArray[string], debug: string | bool = false): NodeProcess =
|
||||
## Starts a Codex Node with the specified arguments.
|
||||
## Set debug to 'true' to see output of the node.
|
||||
let node = NodeProcess(arguments: @args, debug: ($debug != "false"))
|
||||
node.start()
|
||||
node
|
||||
|
||||
proc dataDir(node: NodeProcess): string =
|
||||
let config = CodexConf.load(cmdLine = node.arguments, quitOnFailure = false)
|
||||
config.dataDir.string
|
||||
|
||||
proc apiUrl(node: NodeProcess): string =
|
||||
let config = CodexConf.load(cmdLine = node.arguments, quitOnFailure = false)
|
||||
"http://" & config.apiBindAddress & ":" & $config.apiPort & "/api/codex/v1"
|
||||
|
||||
proc client*(node: NodeProcess): CodexClient =
|
||||
if client =? node.client:
|
||||
return client
|
||||
let client = CodexClient.new(node.apiUrl)
|
||||
node.client = some client
|
||||
client
|
||||
|
||||
proc stop*(node: NodeProcess) =
|
||||
if node.process != nil:
|
||||
node.process.terminate()
|
||||
discard node.process.waitForExit(timeout=5_000)
|
||||
node.process.close()
|
||||
node.process = nil
|
||||
if client =? node.client:
|
||||
node.client = none CodexClient
|
||||
client.close()
|
||||
|
||||
proc restart*(node: NodeProcess) =
|
||||
node.stop()
|
||||
node.start()
|
||||
node.waitUntilStarted()
|
||||
|
||||
proc removeDataDir*(node: NodeProcess) =
|
||||
removeDir(node.dataDir)
|
|
@ -5,10 +5,11 @@ from std/net import TimeoutError
|
|||
|
||||
import pkg/chronos
|
||||
import ../ethertest
|
||||
import ./nodes
|
||||
import ./codexprocess
|
||||
import ./nodeprocess
|
||||
|
||||
ethersuite "Node block expiration tests":
|
||||
var node: NodeProcess
|
||||
var node: CodexProcess
|
||||
var baseurl: string
|
||||
|
||||
let dataDir = getTempDir() / "Codex1"
|
||||
|
@ -18,12 +19,12 @@ ethersuite "Node block expiration tests":
|
|||
baseurl = "http://localhost:8080/api/codex/v1"
|
||||
|
||||
teardown:
|
||||
node.stop()
|
||||
await node.stop()
|
||||
|
||||
dataDir.removeDir()
|
||||
|
||||
proc startTestNode(blockTtlSeconds: int) =
|
||||
node = startNode([
|
||||
proc startTestNode(blockTtlSeconds: int) {.async.} =
|
||||
node = await CodexProcess.startNode(@[
|
||||
"--api-port=8080",
|
||||
"--data-dir=" & dataDir,
|
||||
"--nat=127.0.0.1",
|
||||
|
@ -32,9 +33,11 @@ ethersuite "Node block expiration tests":
|
|||
"--disc-port=8090",
|
||||
"--block-ttl=" & $blockTtlSeconds,
|
||||
"--block-mi=1",
|
||||
"--block-mn=10"
|
||||
], debug = false)
|
||||
node.waitUntilStarted()
|
||||
"--block-mn=10"],
|
||||
false,
|
||||
"cli-test-node"
|
||||
)
|
||||
await node.waitUntilStarted()
|
||||
|
||||
proc uploadTestFile(): string =
|
||||
let client = newHttpClient()
|
||||
|
@ -61,7 +64,7 @@ ethersuite "Node block expiration tests":
|
|||
content.code == Http200
|
||||
|
||||
test "node retains not-expired file":
|
||||
startTestNode(blockTtlSeconds = 10)
|
||||
await startTestNode(blockTtlSeconds = 10)
|
||||
|
||||
let contentId = uploadTestFile()
|
||||
|
||||
|
@ -74,7 +77,7 @@ ethersuite "Node block expiration tests":
|
|||
response.body == content
|
||||
|
||||
test "node deletes expired file":
|
||||
startTestNode(blockTtlSeconds = 1)
|
||||
await startTestNode(blockTtlSeconds = 1)
|
||||
|
||||
let contentId = uploadTestFile()
|
||||
|
||||
|
|
|
@ -1,29 +1,38 @@
|
|||
import std/unittest
|
||||
import std/tempfiles
|
||||
import codex/conf
|
||||
import codex/utils/fileutils
|
||||
import ./nodes
|
||||
import ../asynctest
|
||||
import ../checktest
|
||||
import ./codexprocess
|
||||
import ./nodeprocess
|
||||
import ../examples
|
||||
|
||||
suite "Command line interface":
|
||||
asyncchecksuite "Command line interface":
|
||||
|
||||
let key = "4242424242424242424242424242424242424242424242424242424242424242"
|
||||
|
||||
proc startCodex(args: seq[string]): Future[CodexProcess] {.async.} =
|
||||
return await CodexProcess.startNode(
|
||||
args,
|
||||
false,
|
||||
"cli-test-node"
|
||||
)
|
||||
|
||||
test "complains when persistence is enabled without ethereum account":
|
||||
let node = startNode(@[
|
||||
let node = await startCodex(@[
|
||||
"persistence"
|
||||
])
|
||||
node.waitUntilOutput("Persistence enabled, but no Ethereum account was set")
|
||||
node.stop()
|
||||
await node.waitUntilOutput("Persistence enabled, but no Ethereum account was set")
|
||||
await node.stop()
|
||||
|
||||
test "complains when ethereum private key file has wrong permissions":
|
||||
let unsafeKeyFile = genTempPath("", "")
|
||||
discard unsafeKeyFile.writeFile(key, 0o666)
|
||||
let node = startNode(@[
|
||||
let node = await startCodex(@[
|
||||
"persistence",
|
||||
"--eth-private-key=" & unsafeKeyFile])
|
||||
node.waitUntilOutput("Ethereum private key file does not have safe file permissions")
|
||||
node.stop()
|
||||
await node.waitUntilOutput("Ethereum private key file does not have safe file permissions")
|
||||
await node.stop()
|
||||
discard removeFile(unsafeKeyFile)
|
||||
|
||||
let
|
||||
|
@ -31,27 +40,27 @@ suite "Command line interface":
|
|||
expectedDownloadInstruction = "Proving circuit files are not found. Please run the following to download them:"
|
||||
|
||||
test "suggests downloading of circuit files when persistence is enabled without accessible r1cs file":
|
||||
let node = startNode(@["persistence", "prover", marketplaceArg])
|
||||
node.waitUntilOutput(expectedDownloadInstruction)
|
||||
node.stop()
|
||||
let node = await startCodex(@["persistence", "prover", marketplaceArg])
|
||||
await node.waitUntilOutput(expectedDownloadInstruction)
|
||||
await node.stop()
|
||||
|
||||
test "suggests downloading of circuit files when persistence is enabled without accessible wasm file":
|
||||
let node = startNode(@[
|
||||
let node = await startCodex(@[
|
||||
"persistence",
|
||||
"prover",
|
||||
marketplaceArg,
|
||||
"--circom-r1cs=tests/circuits/fixtures/proof_main.r1cs"
|
||||
])
|
||||
node.waitUntilOutput(expectedDownloadInstruction)
|
||||
node.stop()
|
||||
await node.waitUntilOutput(expectedDownloadInstruction)
|
||||
await node.stop()
|
||||
|
||||
test "suggests downloading of circuit files when persistence is enabled without accessible zkey file":
|
||||
let node = startNode(@[
|
||||
let node = await startCodex(@[
|
||||
"persistence",
|
||||
"prover",
|
||||
marketplaceArg,
|
||||
"--circom-r1cs=tests/circuits/fixtures/proof_main.r1cs",
|
||||
"--circom-wasm=tests/circuits/fixtures/proof_main.wasm"
|
||||
])
|
||||
node.waitUntilOutput(expectedDownloadInstruction)
|
||||
node.stop()
|
||||
await node.waitUntilOutput(expectedDownloadInstruction)
|
||||
await node.stop()
|
||||
|
|
|
@ -5,22 +5,37 @@ import ./marketplacesuite
|
|||
import ./twonodes
|
||||
import ./nodeconfigs
|
||||
|
||||
twonodessuite "Marketplace", debug1 = false, debug2 = false:
|
||||
marketplacesuite "Marketplace":
|
||||
let marketplaceConfig = NodeConfigs(
|
||||
clients: CodexConfigs.init(nodes=1).some,
|
||||
providers: CodexConfigs.init(nodes=1).some,
|
||||
)
|
||||
|
||||
var host: CodexClient
|
||||
var hostAccount: Address
|
||||
var client: CodexClient
|
||||
var clientAccount: Address
|
||||
|
||||
setup:
|
||||
host = providers()[0].client
|
||||
hostAccount = providers()[0].ethAccount
|
||||
client = clients()[0].client
|
||||
clientAccount = clients()[0].ethAccount
|
||||
|
||||
# Our Hardhat configuration does use automine, which means that time tracked by `ethProvider.currentTime()` is not
|
||||
# advanced until blocks are mined and that happens only when transaction is submitted.
|
||||
# As we use in tests ethProvider.currentTime() which uses block timestamp this can lead to synchronization issues.
|
||||
await ethProvider.advanceTime(1.u256)
|
||||
|
||||
test "nodes negotiate contracts on the marketplace":
|
||||
test "nodes negotiate contracts on the marketplace", marketplaceConfig:
|
||||
let size = 0xFFFFFF.u256
|
||||
let data = await RandomChunker.example(blocks=8)
|
||||
# client 2 makes storage available
|
||||
let availability = client2.postAvailability(totalSize=size, duration=20*60.u256, minPrice=300.u256, maxCollateral=300.u256).get
|
||||
# host makes storage available
|
||||
let availability = host.postAvailability(totalSize=size, duration=20*60.u256, minPrice=300.u256, maxCollateral=300.u256).get
|
||||
|
||||
# client 1 requests storage
|
||||
let cid = client1.upload(data).get
|
||||
let id = client1.requestStorage(
|
||||
# client requests storage
|
||||
let cid = client.upload(data).get
|
||||
let id = client.requestStorage(
|
||||
cid,
|
||||
duration=20*60.u256,
|
||||
reward=400.u256,
|
||||
|
@ -30,19 +45,19 @@ twonodessuite "Marketplace", debug1 = false, debug2 = false:
|
|||
nodes = 3,
|
||||
tolerance = 1).get
|
||||
|
||||
check eventually(client1.purchaseStateIs(id, "started"), timeout=10*60*1000)
|
||||
let purchase = client1.getPurchase(id).get
|
||||
check eventually(client.purchaseStateIs(id, "started"), timeout=10*60*1000)
|
||||
let purchase = client.getPurchase(id).get
|
||||
check purchase.error == none string
|
||||
let availabilities = client2.getAvailabilities().get
|
||||
let availabilities = host.getAvailabilities().get
|
||||
check availabilities.len == 1
|
||||
let newSize = availabilities[0].freeSize
|
||||
check newSize > 0 and newSize < size
|
||||
|
||||
let reservations = client2.getAvailabilityReservations(availability.id).get
|
||||
let reservations = host.getAvailabilityReservations(availability.id).get
|
||||
check reservations.len == 3
|
||||
check reservations[0].requestId == purchase.requestId
|
||||
|
||||
test "node slots gets paid out and rest of tokens are returned to client":
|
||||
test "node slots gets paid out and rest of tokens are returned to client", marketplaceConfig:
|
||||
let size = 0xFFFFFF.u256
|
||||
let data = await RandomChunker.example(blocks = 8)
|
||||
let marketplace = Marketplace.new(Marketplace.address, ethProvider.getSigner())
|
||||
|
@ -52,13 +67,13 @@ twonodessuite "Marketplace", debug1 = false, debug2 = false:
|
|||
let duration = 20*60.u256
|
||||
let nodes = 3'u
|
||||
|
||||
# client 2 makes storage available
|
||||
let startBalanceHost = await token.balanceOf(account2)
|
||||
discard client2.postAvailability(totalSize=size, duration=20*60.u256, minPrice=300.u256, maxCollateral=300.u256).get
|
||||
# host makes storage available
|
||||
let startBalanceHost = await token.balanceOf(hostAccount)
|
||||
discard host.postAvailability(totalSize=size, duration=20*60.u256, minPrice=300.u256, maxCollateral=300.u256).get
|
||||
|
||||
# client 1 requests storage
|
||||
let cid = client1.upload(data).get
|
||||
let id = client1.requestStorage(
|
||||
# client requests storage
|
||||
let cid = client.upload(data).get
|
||||
let id = client.requestStorage(
|
||||
cid,
|
||||
duration=duration,
|
||||
reward=reward,
|
||||
|
@ -68,11 +83,11 @@ twonodessuite "Marketplace", debug1 = false, debug2 = false:
|
|||
nodes = nodes,
|
||||
tolerance = 1).get
|
||||
|
||||
check eventually(client1.purchaseStateIs(id, "started"), timeout=10*60*1000)
|
||||
let purchase = client1.getPurchase(id).get
|
||||
check eventually(client.purchaseStateIs(id, "started"), timeout=10*60*1000)
|
||||
let purchase = client.getPurchase(id).get
|
||||
check purchase.error == none string
|
||||
|
||||
let clientBalanceBeforeFinished = await token.balanceOf(account1)
|
||||
let clientBalanceBeforeFinished = await token.balanceOf(clientAccount)
|
||||
|
||||
# Proving mechanism uses blockchain clock to do proving/collect/cleanup round
|
||||
# hence we must use `advanceTime` over `sleepAsync` as Hardhat does mine new blocks
|
||||
|
@ -80,11 +95,11 @@ twonodessuite "Marketplace", debug1 = false, debug2 = false:
|
|||
await ethProvider.advanceTime(duration)
|
||||
|
||||
# Checking that the hosting node received reward for at least the time between <expiry;end>
|
||||
check eventually (await token.balanceOf(account2)) - startBalanceHost >= (duration-5*60)*reward*nodes.u256
|
||||
check eventually (await token.balanceOf(hostAccount)) - startBalanceHost >= (duration-5*60)*reward*nodes.u256
|
||||
|
||||
# Checking that client node receives some funds back that were not used for the host nodes
|
||||
check eventually(
|
||||
(await token.balanceOf(account1)) - clientBalanceBeforeFinished > 0,
|
||||
(await token.balanceOf(clientAccount)) - clientBalanceBeforeFinished > 0,
|
||||
timeout = 10*1000 # give client a bit of time to withdraw its funds
|
||||
)
|
||||
|
||||
|
@ -158,6 +173,8 @@ marketplacesuite "Marketplace payouts":
|
|||
await ethProvider.advanceTime(expiry.u256)
|
||||
check eventually providerApi.saleStateIs(slotId, "SaleCancelled")
|
||||
|
||||
await advanceToNextPeriod()
|
||||
|
||||
check eventually (
|
||||
let endBalanceProvider = (await token.balanceOf(provider.ethAccount));
|
||||
endBalanceProvider > startBalanceProvider and
|
||||
|
|
|
@ -5,16 +5,16 @@ import ./twonodes
|
|||
import ../contracts/time
|
||||
import ../examples
|
||||
|
||||
twonodessuite "Purchasing", debug1 = false, debug2 = false:
|
||||
twonodessuite "Purchasing":
|
||||
|
||||
test "node handles storage request":
|
||||
test "node handles storage request", twoNodesConfig:
|
||||
let data = await RandomChunker.example(blocks=2)
|
||||
let cid = client1.upload(data).get
|
||||
let id1 = client1.requestStorage(cid, duration=100.u256, reward=2.u256, proofProbability=3.u256, expiry=10, collateral=200.u256).get
|
||||
let id2 = client1.requestStorage(cid, duration=400.u256, reward=5.u256, proofProbability=6.u256, expiry=10, collateral=201.u256).get
|
||||
check id1 != id2
|
||||
|
||||
test "node retrieves purchase status":
|
||||
test "node retrieves purchase status", twoNodesConfig:
|
||||
# get one contiguous chunk
|
||||
let rng = rng.Rng.instance()
|
||||
let chunker = RandomChunker.new(rng, size = DefaultBlockSize * 2, chunkSize = DefaultBlockSize * 2)
|
||||
|
@ -40,7 +40,7 @@ twonodessuite "Purchasing", debug1 = false, debug2 = false:
|
|||
check request.ask.maxSlotLoss == 1'u64
|
||||
|
||||
# TODO: We currently do not support encoding single chunks
|
||||
# test "node retrieves purchase status with 1 chunk":
|
||||
# test "node retrieves purchase status with 1 chunk", twoNodesConfig:
|
||||
# let cid = client1.upload("some file contents").get
|
||||
# let id = client1.requestStorage(cid, duration=1.u256, reward=2.u256, proofProbability=3.u256, expiry=30, collateral=200.u256, nodes=2, tolerance=1).get
|
||||
# let request = client1.getPurchase(id).get.request.get
|
||||
|
@ -52,7 +52,7 @@ twonodessuite "Purchasing", debug1 = false, debug2 = false:
|
|||
# check request.ask.slots == 3'u64
|
||||
# check request.ask.maxSlotLoss == 1'u64
|
||||
|
||||
test "node remembers purchase status after restart":
|
||||
test "node remembers purchase status after restart", twoNodesConfig:
|
||||
let data = await RandomChunker.example(blocks=2)
|
||||
let cid = client1.upload(data).get
|
||||
let id = client1.requestStorage(cid,
|
||||
|
@ -65,7 +65,7 @@ twonodessuite "Purchasing", debug1 = false, debug2 = false:
|
|||
tolerance=1.uint).get
|
||||
check eventually(client1.purchaseStateIs(id, "submitted"), timeout = 3*60*1000)
|
||||
|
||||
node1.restart()
|
||||
await node1.restart()
|
||||
client1.restart()
|
||||
|
||||
check eventually(client1.purchaseStateIs(id, "submitted"), timeout = 3*60*1000)
|
||||
|
@ -78,7 +78,7 @@ twonodessuite "Purchasing", debug1 = false, debug2 = false:
|
|||
check request.ask.slots == 3'u64
|
||||
check request.ask.maxSlotLoss == 1'u64
|
||||
|
||||
test "node requires expiry and its value to be in future":
|
||||
test "node requires expiry and its value to be in future", twoNodesConfig:
|
||||
let data = await RandomChunker.example(blocks=2)
|
||||
let cid = client1.upload(data).get
|
||||
|
||||
|
|
|
@ -6,20 +6,20 @@ import ./twonodes
|
|||
import ../examples
|
||||
import json
|
||||
|
||||
twonodessuite "REST API", debug1 = false, debug2 = false:
|
||||
test "nodes can print their peer information":
|
||||
twonodessuite "REST API":
|
||||
test "nodes can print their peer information", twoNodesConfig:
|
||||
check !client1.info() != !client2.info()
|
||||
|
||||
test "nodes can set chronicles log level":
|
||||
test "nodes can set chronicles log level", twoNodesConfig:
|
||||
client1.setLogLevel("DEBUG;TRACE:codex")
|
||||
|
||||
test "node accepts file uploads":
|
||||
test "node accepts file uploads", twoNodesConfig:
|
||||
let cid1 = client1.upload("some file contents").get
|
||||
let cid2 = client1.upload("some other contents").get
|
||||
|
||||
check cid1 != cid2
|
||||
|
||||
test "node shows used and available space":
|
||||
test "node shows used and available space", twoNodesConfig:
|
||||
discard client1.upload("some file contents").get
|
||||
discard client1.postAvailability(totalSize=12.u256, duration=2.u256, minPrice=3.u256, maxCollateral=4.u256).get
|
||||
let space = client1.space().tryGet()
|
||||
|
@ -29,7 +29,7 @@ twonodessuite "REST API", debug1 = false, debug2 = false:
|
|||
space.quotaUsedBytes == 65598.NBytes
|
||||
space.quotaReservedBytes == 12.NBytes
|
||||
|
||||
test "node lists local files":
|
||||
test "node lists local files", twoNodesConfig:
|
||||
let content1 = "some file contents"
|
||||
let content2 = "some other contents"
|
||||
|
||||
|
@ -40,7 +40,7 @@ twonodessuite "REST API", debug1 = false, debug2 = false:
|
|||
check:
|
||||
[cid1, cid2].allIt(it in list.content.mapIt(it.cid))
|
||||
|
||||
test "request storage fails for datasets that are too small":
|
||||
test "request storage fails for datasets that are too small", twoNodesConfig:
|
||||
let cid = client1.upload("some file contents").get
|
||||
let response = client1.requestStorageRaw(cid, duration=10.u256, reward=2.u256, proofProbability=3.u256, collateral=200.u256, expiry=9)
|
||||
|
||||
|
@ -48,7 +48,7 @@ twonodessuite "REST API", debug1 = false, debug2 = false:
|
|||
response.status == "400 Bad Request"
|
||||
response.body == "Dataset too small for erasure parameters, need at least " & $(2*DefaultBlockSize.int) & " bytes"
|
||||
|
||||
test "request storage succeeds for sufficiently sized datasets":
|
||||
test "request storage succeeds for sufficiently sized datasets", twoNodesConfig:
|
||||
let data = await RandomChunker.example(blocks=2)
|
||||
let cid = client1.upload(data).get
|
||||
let response = client1.requestStorageRaw(cid, duration=10.u256, reward=2.u256, proofProbability=3.u256, collateral=200.u256, expiry=9)
|
||||
|
@ -56,7 +56,7 @@ twonodessuite "REST API", debug1 = false, debug2 = false:
|
|||
check:
|
||||
response.status == "200 OK"
|
||||
|
||||
test "request storage fails if tolerance is zero":
|
||||
test "request storage fails if tolerance is zero", twoNodesConfig:
|
||||
let data = await RandomChunker.example(blocks=2)
|
||||
let cid = client1.upload(data).get
|
||||
let duration = 100.u256
|
||||
|
@ -79,7 +79,7 @@ twonodessuite "REST API", debug1 = false, debug2 = false:
|
|||
check responseBefore.status == "400 Bad Request"
|
||||
check responseBefore.body == "Tolerance needs to be bigger then zero"
|
||||
|
||||
test "request storage fails if nodes and tolerance aren't correct":
|
||||
test "request storage fails if nodes and tolerance aren't correct", twoNodesConfig:
|
||||
let data = await RandomChunker.example(blocks=2)
|
||||
let cid = client1.upload(data).get
|
||||
let duration = 100.u256
|
||||
|
@ -104,7 +104,7 @@ twonodessuite "REST API", debug1 = false, debug2 = false:
|
|||
check responseBefore.status == "400 Bad Request"
|
||||
check responseBefore.body == "Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`"
|
||||
|
||||
test "request storage fails if tolerance > nodes (underflow protection)":
|
||||
test "request storage fails if tolerance > nodes (underflow protection)", twoNodesConfig:
|
||||
let data = await RandomChunker.example(blocks=2)
|
||||
let cid = client1.upload(data).get
|
||||
let duration = 100.u256
|
||||
|
@ -129,7 +129,7 @@ twonodessuite "REST API", debug1 = false, debug2 = false:
|
|||
check responseBefore.status == "400 Bad Request"
|
||||
check responseBefore.body == "Invalid parameters: `tolerance` cannot be greater than `nodes`"
|
||||
|
||||
test "request storage succeeds if nodes and tolerance within range":
|
||||
test "request storage succeeds if nodes and tolerance within range", twoNodesConfig:
|
||||
let data = await RandomChunker.example(blocks=2)
|
||||
let cid = client1.upload(data).get
|
||||
let duration = 100.u256
|
||||
|
@ -153,42 +153,42 @@ twonodessuite "REST API", debug1 = false, debug2 = false:
|
|||
|
||||
check responseBefore.status == "200 OK"
|
||||
|
||||
test "node accepts file uploads with content type":
|
||||
test "node accepts file uploads with content type", twoNodesConfig:
|
||||
let headers = newHttpHeaders({"Content-Type": "text/plain"})
|
||||
let response = client1.uploadRaw("some file contents", headers)
|
||||
|
||||
check response.status == "200 OK"
|
||||
check response.body != ""
|
||||
|
||||
test "node accepts file uploads with content disposition":
|
||||
test "node accepts file uploads with content disposition", twoNodesConfig:
|
||||
let headers = newHttpHeaders({"Content-Disposition": "attachment; filename=\"example.txt\""})
|
||||
let response = client1.uploadRaw("some file contents", headers)
|
||||
|
||||
check response.status == "200 OK"
|
||||
check response.body != ""
|
||||
|
||||
test "node accepts file uploads with content disposition without filename":
|
||||
test "node accepts file uploads with content disposition without filename", twoNodesConfig:
|
||||
let headers = newHttpHeaders({"Content-Disposition": "attachment"})
|
||||
let response = client1.uploadRaw("some file contents", headers)
|
||||
|
||||
check response.status == "200 OK"
|
||||
check response.body != ""
|
||||
|
||||
test "upload fails if content disposition contains bad filename":
|
||||
test "upload fails if content disposition contains bad filename", twoNodesConfig:
|
||||
let headers = newHttpHeaders({"Content-Disposition": "attachment; filename=\"exam*ple.txt\""})
|
||||
let response = client1.uploadRaw("some file contents", headers)
|
||||
|
||||
check response.status == "422 Unprocessable Entity"
|
||||
check response.body == "The filename is not valid."
|
||||
|
||||
test "upload fails if content type is invalid":
|
||||
test "upload fails if content type is invalid", twoNodesConfig:
|
||||
let headers = newHttpHeaders({"Content-Type": "hello/world"})
|
||||
let response = client1.uploadRaw("some file contents", headers)
|
||||
|
||||
check response.status == "422 Unprocessable Entity"
|
||||
check response.body == "The MIME type is not valid."
|
||||
|
||||
test "node retrieve the metadata":
|
||||
test "node retrieve the metadata", twoNodesConfig:
|
||||
let headers = newHttpHeaders({"Content-Type": "text/plain", "Content-Disposition": "attachment; filename=\"example.txt\""})
|
||||
let uploadResponse = client1.uploadRaw("some file contents", headers)
|
||||
let cid = uploadResponse.body
|
||||
|
@ -211,7 +211,7 @@ twonodessuite "REST API", debug1 = false, debug2 = false:
|
|||
check manifest.hasKey("uploadedAt") == true
|
||||
check manifest["uploadedAt"].getInt() > 0
|
||||
|
||||
test "node set the headers when for download":
|
||||
test "node set the headers when for download", twoNodesConfig:
|
||||
let headers = newHttpHeaders({
|
||||
"Content-Disposition": "attachment; filename=\"example.txt\"",
|
||||
"Content-Type": "text/plain"
|
||||
|
|
|
@ -3,6 +3,9 @@ import pkg/codex/contracts
|
|||
import ./twonodes
|
||||
import ../codex/examples
|
||||
import ../contracts/time
|
||||
import ./codexconfig
|
||||
import ./codexclient
|
||||
import ./nodeconfigs
|
||||
|
||||
proc findItem[T](items: seq[T], item: T): ?!T =
|
||||
for tmp in items:
|
||||
|
@ -11,54 +14,65 @@ proc findItem[T](items: seq[T], item: T): ?!T =
|
|||
|
||||
return failure("Not found")
|
||||
|
||||
twonodessuite "Sales", debug1 = "TRACE", debug2 = "TRACE":
|
||||
multinodesuite "Sales":
|
||||
let salesConfig = NodeConfigs(
|
||||
clients: CodexConfigs.init(nodes=1).some,
|
||||
providers: CodexConfigs.init(nodes=1).some,
|
||||
)
|
||||
|
||||
test "node handles new storage availability":
|
||||
let availability1 = client1.postAvailability(totalSize=1.u256, duration=2.u256, minPrice=3.u256, maxCollateral=4.u256).get
|
||||
let availability2 = client1.postAvailability(totalSize=4.u256, duration=5.u256, minPrice=6.u256, maxCollateral=7.u256).get
|
||||
var host: CodexClient
|
||||
var client: CodexClient
|
||||
|
||||
setup:
|
||||
host = providers()[0].client
|
||||
client = clients()[0].client
|
||||
|
||||
test "node handles new storage availability", salesConfig:
|
||||
let availability1 = host.postAvailability(totalSize=1.u256, duration=2.u256, minPrice=3.u256, maxCollateral=4.u256).get
|
||||
let availability2 = host.postAvailability(totalSize=4.u256, duration=5.u256, minPrice=6.u256, maxCollateral=7.u256).get
|
||||
check availability1 != availability2
|
||||
|
||||
test "node lists storage that is for sale":
|
||||
let availability = client1.postAvailability(totalSize=1.u256, duration=2.u256, minPrice=3.u256, maxCollateral=4.u256).get
|
||||
check availability in client1.getAvailabilities().get
|
||||
test "node lists storage that is for sale", salesConfig:
|
||||
let availability = host.postAvailability(totalSize=1.u256, duration=2.u256, minPrice=3.u256, maxCollateral=4.u256).get
|
||||
check availability in host.getAvailabilities().get
|
||||
|
||||
test "updating non-existing availability":
|
||||
let nonExistingResponse = client1.patchAvailabilityRaw(AvailabilityId.example, duration=100.u256.some, minPrice=200.u256.some, maxCollateral=200.u256.some)
|
||||
test "updating non-existing availability", salesConfig:
|
||||
let nonExistingResponse = host.patchAvailabilityRaw(AvailabilityId.example, duration=100.u256.some, minPrice=200.u256.some, maxCollateral=200.u256.some)
|
||||
check nonExistingResponse.status == "404 Not Found"
|
||||
|
||||
test "updating availability":
|
||||
let availability = client1.postAvailability(totalSize=140000.u256, duration=200.u256, minPrice=300.u256, maxCollateral=300.u256).get
|
||||
test "updating availability", salesConfig:
|
||||
let availability = host.postAvailability(totalSize=140000.u256, duration=200.u256, minPrice=300.u256, maxCollateral=300.u256).get
|
||||
|
||||
client1.patchAvailability(availability.id, duration=100.u256.some, minPrice=200.u256.some, maxCollateral=200.u256.some)
|
||||
host.patchAvailability(availability.id, duration=100.u256.some, minPrice=200.u256.some, maxCollateral=200.u256.some)
|
||||
|
||||
let updatedAvailability = (client1.getAvailabilities().get).findItem(availability).get
|
||||
let updatedAvailability = (host.getAvailabilities().get).findItem(availability).get
|
||||
check updatedAvailability.duration == 100
|
||||
check updatedAvailability.minPrice == 200
|
||||
check updatedAvailability.maxCollateral == 200
|
||||
check updatedAvailability.totalSize == 140000
|
||||
check updatedAvailability.freeSize == 140000
|
||||
|
||||
test "updating availability - freeSize is not allowed to be changed":
|
||||
let availability = client1.postAvailability(totalSize=140000.u256, duration=200.u256, minPrice=300.u256, maxCollateral=300.u256).get
|
||||
let freeSizeResponse = client1.patchAvailabilityRaw(availability.id, freeSize=110000.u256.some)
|
||||
test "updating availability - freeSize is not allowed to be changed", salesConfig:
|
||||
let availability = host.postAvailability(totalSize=140000.u256, duration=200.u256, minPrice=300.u256, maxCollateral=300.u256).get
|
||||
let freeSizeResponse = host.patchAvailabilityRaw(availability.id, freeSize=110000.u256.some)
|
||||
check freeSizeResponse.status == "400 Bad Request"
|
||||
check "not allowed" in freeSizeResponse.body
|
||||
|
||||
test "updating availability - updating totalSize":
|
||||
let availability = client1.postAvailability(totalSize=140000.u256, duration=200.u256, minPrice=300.u256, maxCollateral=300.u256).get
|
||||
client1.patchAvailability(availability.id, totalSize=100000.u256.some)
|
||||
let updatedAvailability = (client1.getAvailabilities().get).findItem(availability).get
|
||||
test "updating availability - updating totalSize", salesConfig:
|
||||
let availability = host.postAvailability(totalSize=140000.u256, duration=200.u256, minPrice=300.u256, maxCollateral=300.u256).get
|
||||
host.patchAvailability(availability.id, totalSize=100000.u256.some)
|
||||
let updatedAvailability = (host.getAvailabilities().get).findItem(availability).get
|
||||
check updatedAvailability.totalSize == 100000
|
||||
check updatedAvailability.freeSize == 100000
|
||||
|
||||
test "updating availability - updating totalSize does not allow bellow utilized":
|
||||
test "updating availability - updating totalSize does not allow bellow utilized", salesConfig:
|
||||
let originalSize = 0xFFFFFF.u256
|
||||
let data = await RandomChunker.example(blocks=8)
|
||||
let availability = client1.postAvailability(totalSize=originalSize, duration=20*60.u256, minPrice=300.u256, maxCollateral=300.u256).get
|
||||
let availability = host.postAvailability(totalSize=originalSize, duration=20*60.u256, minPrice=300.u256, maxCollateral=300.u256).get
|
||||
|
||||
# Lets create storage request that will utilize some of the availability's space
|
||||
let cid = client2.upload(data).get
|
||||
let id = client2.requestStorage(
|
||||
let cid = client.upload(data).get
|
||||
let id = client.requestStorage(
|
||||
cid,
|
||||
duration=20*60.u256,
|
||||
reward=400.u256,
|
||||
|
@ -68,16 +82,16 @@ twonodessuite "Sales", debug1 = "TRACE", debug2 = "TRACE":
|
|||
nodes = 3,
|
||||
tolerance = 1).get
|
||||
|
||||
check eventually(client2.purchaseStateIs(id, "started"), timeout=10*60*1000)
|
||||
let updatedAvailability = (client1.getAvailabilities().get).findItem(availability).get
|
||||
check eventually(client.purchaseStateIs(id, "started"), timeout=10*60*1000)
|
||||
let updatedAvailability = (host.getAvailabilities().get).findItem(availability).get
|
||||
check updatedAvailability.totalSize != updatedAvailability.freeSize
|
||||
|
||||
let utilizedSize = updatedAvailability.totalSize - updatedAvailability.freeSize
|
||||
let totalSizeResponse = client1.patchAvailabilityRaw(availability.id, totalSize=(utilizedSize-1.u256).some)
|
||||
let totalSizeResponse = host.patchAvailabilityRaw(availability.id, totalSize=(utilizedSize-1.u256).some)
|
||||
check totalSizeResponse.status == "400 Bad Request"
|
||||
check "totalSize must be larger then current totalSize" in totalSizeResponse.body
|
||||
|
||||
client1.patchAvailability(availability.id, totalSize=(originalSize + 20000).some)
|
||||
let newUpdatedAvailability = (client1.getAvailabilities().get).findItem(availability).get
|
||||
host.patchAvailability(availability.id, totalSize=(originalSize + 20000).some)
|
||||
let newUpdatedAvailability = (host.getAvailabilities().get).findItem(availability).get
|
||||
check newUpdatedAvailability.totalSize == originalSize + 20000
|
||||
check newUpdatedAvailability.freeSize - updatedAvailability.freeSize == 20000
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
import pkg/codex/rest/json
|
||||
import ./twonodes
|
||||
import ../codex/examples
|
||||
import json
|
||||
from pkg/libp2p import Cid, `$`
|
||||
|
||||
twonodessuite "Uploads and downloads", debug1 = false, debug2 = false:
|
||||
|
||||
test "node allows local file downloads":
|
||||
twonodessuite "Uploads and downloads":
|
||||
test "node allows local file downloads", twoNodesConfig:
|
||||
let content1 = "some file contents"
|
||||
let content2 = "some other contents"
|
||||
|
||||
|
@ -19,7 +19,7 @@ twonodessuite "Uploads and downloads", debug1 = false, debug2 = false:
|
|||
content1 == resp1
|
||||
content2 == resp2
|
||||
|
||||
test "node allows remote file downloads":
|
||||
test "node allows remote file downloads", twoNodesConfig:
|
||||
let content1 = "some file contents"
|
||||
let content2 = "some other contents"
|
||||
|
||||
|
@ -33,7 +33,7 @@ twonodessuite "Uploads and downloads", debug1 = false, debug2 = false:
|
|||
content1 == resp1
|
||||
content2 == resp2
|
||||
|
||||
test "node fails retrieving non-existing local file":
|
||||
test "node fails retrieving non-existing local file", twoNodesConfig:
|
||||
let content1 = "some file contents"
|
||||
let cid1 = client1.upload(content1).get # upload to first node
|
||||
let resp2 = client2.download(cid1, local = true) # try retrieving from second node
|
||||
|
@ -64,14 +64,14 @@ twonodessuite "Uploads and downloads", debug1 = false, debug2 = false:
|
|||
check manifest.hasKey("protected") == true
|
||||
check manifest["protected"].getBool() == false
|
||||
|
||||
test "node allows downloading only manifest":
|
||||
test "node allows downloading only manifest", twoNodesConfig:
|
||||
let content1 = "some file contents"
|
||||
let cid1 = client1.upload(content1).get
|
||||
|
||||
let resp2 = client1.downloadManifestOnly(cid1)
|
||||
checkRestContent(cid1, resp2)
|
||||
|
||||
test "node allows downloading content without stream":
|
||||
test "node allows downloading content without stream", twoNodesConfig:
|
||||
let content1 = "some file contents"
|
||||
let cid1 = client1.upload(content1).get
|
||||
|
||||
|
@ -80,3 +80,15 @@ twonodessuite "Uploads and downloads", debug1 = false, debug2 = false:
|
|||
let resp2 = client2.download(cid1, local = true).get
|
||||
check:
|
||||
content1 == resp2
|
||||
|
||||
test "reliable transfer test", twoNodesConfig:
|
||||
proc transferTest(a: CodexClient, b: CodexClient) {.async.} =
|
||||
let data = await RandomChunker.example(blocks=8)
|
||||
let cid = a.upload(data).get
|
||||
let response = b.download(cid).get
|
||||
check:
|
||||
response == data
|
||||
|
||||
for run in 0..10:
|
||||
await transferTest(client1, client2)
|
||||
await transferTest(client2, client1)
|
||||
|
|
|
@ -1,94 +1,34 @@
|
|||
import std/os
|
||||
import std/macros
|
||||
import std/httpclient
|
||||
import ../ethertest
|
||||
import pkg/questionable
|
||||
import ./multinodes
|
||||
import ./codexconfig
|
||||
import ./codexprocess
|
||||
import ./codexclient
|
||||
import ./nodes
|
||||
import ./nodeconfigs
|
||||
|
||||
export ethertest
|
||||
export codexclient
|
||||
export nodes
|
||||
export multinodes
|
||||
|
||||
template twonodessuite*(name: string, debug1, debug2: bool | string, body) =
|
||||
twonodessuite(name, $debug1, $debug2, body)
|
||||
template twonodessuite*(name: string, body: untyped) =
|
||||
multinodesuite name:
|
||||
let twoNodesConfig {.inject, used.} = NodeConfigs(clients: CodexConfigs.init(nodes=2).some)
|
||||
|
||||
template twonodessuite*(name: string, debug1, debug2: string, body) =
|
||||
ethersuite name:
|
||||
|
||||
var node1 {.inject, used.}: NodeProcess
|
||||
var node2 {.inject, used.}: NodeProcess
|
||||
var node1 {.inject, used.}: CodexProcess
|
||||
var node2 {.inject, used.}: CodexProcess
|
||||
var client1 {.inject, used.}: CodexClient
|
||||
var client2 {.inject, used.}: CodexClient
|
||||
var account1 {.inject, used.}: Address
|
||||
var account2 {.inject, used.}: Address
|
||||
|
||||
let dataDir1 = getTempDir() / "Codex1"
|
||||
let dataDir2 = getTempDir() / "Codex2"
|
||||
|
||||
setup:
|
||||
client1 = CodexClient.new("http://localhost:8080/api/codex/v1")
|
||||
client2 = CodexClient.new("http://localhost:8081/api/codex/v1")
|
||||
account1 = accounts[0]
|
||||
account2 = accounts[1]
|
||||
|
||||
var node1Args = @[
|
||||
"--api-port=8080",
|
||||
"--data-dir=" & dataDir1,
|
||||
"--nat=127.0.0.1",
|
||||
"--disc-ip=127.0.0.1",
|
||||
"--disc-port=8090",
|
||||
"--listen-addrs=/ip4/127.0.0.1/tcp/0",
|
||||
"persistence",
|
||||
"prover",
|
||||
"--circom-r1cs=tests/circuits/fixtures/proof_main.r1cs",
|
||||
"--circom-wasm=tests/circuits/fixtures/proof_main.wasm",
|
||||
"--circom-zkey=tests/circuits/fixtures/proof_main.zkey",
|
||||
"--eth-provider=http://127.0.0.1:8545",
|
||||
"--eth-account=" & $account1
|
||||
]
|
||||
node1 = clients()[0]
|
||||
node2 = clients()[1]
|
||||
|
||||
if debug1 != "true" and debug1 != "false":
|
||||
node1Args.add("--log-level=" & debug1)
|
||||
|
||||
node1 = startNode(node1Args, debug = debug1)
|
||||
node1.waitUntilStarted()
|
||||
|
||||
let bootstrap = (!client1.info()["spr"]).getStr()
|
||||
|
||||
var node2Args = @[
|
||||
"--api-port=8081",
|
||||
"--data-dir=" & dataDir2,
|
||||
"--nat=127.0.0.1",
|
||||
"--disc-ip=127.0.0.1",
|
||||
"--disc-port=8091",
|
||||
"--listen-addrs=/ip4/127.0.0.1/tcp/0",
|
||||
"--bootstrap-node=" & bootstrap,
|
||||
"persistence",
|
||||
"prover",
|
||||
"--circom-r1cs=tests/circuits/fixtures/proof_main.r1cs",
|
||||
"--circom-wasm=tests/circuits/fixtures/proof_main.wasm",
|
||||
"--circom-zkey=tests/circuits/fixtures/proof_main.zkey",
|
||||
"--eth-provider=http://127.0.0.1:8545",
|
||||
"--eth-account=" & $account2
|
||||
]
|
||||
|
||||
if debug2 != "true" and debug2 != "false":
|
||||
node2Args.add("--log-level=" & debug2)
|
||||
|
||||
node2 = startNode(node2Args, debug = debug2)
|
||||
node2.waitUntilStarted()
|
||||
|
||||
# ensure that we have a recent block with a fresh timestamp
|
||||
discard await send(ethProvider, "evm_mine")
|
||||
|
||||
teardown:
|
||||
client1.close()
|
||||
client2.close()
|
||||
|
||||
node1.stop()
|
||||
node2.stop()
|
||||
|
||||
removeDir(dataDir1)
|
||||
removeDir(dataDir2)
|
||||
client1 = node1.client
|
||||
client2 = node2.client
|
||||
|
||||
body
|
||||
|
|
Loading…
Reference in New Issue