2022-04-05 19:12:59 +00:00
|
|
|
import std/sequtils
|
2023-11-14 16:53:06 +00:00
|
|
|
import std/sugar
|
2024-03-23 09:56:35 +00:00
|
|
|
import std/cpuinfo
|
2022-04-05 19:12:59 +00:00
|
|
|
|
|
|
|
import pkg/chronos
|
2023-08-21 02:51:04 +00:00
|
|
|
import pkg/datastore
|
2022-04-05 19:12:59 +00:00
|
|
|
import pkg/questionable/results
|
|
|
|
|
2022-05-19 19:56:03 +00:00
|
|
|
import pkg/codex/erasure
|
|
|
|
import pkg/codex/manifest
|
|
|
|
import pkg/codex/stores
|
|
|
|
import pkg/codex/blocktype as bt
|
|
|
|
import pkg/codex/rng
|
2023-11-14 16:53:06 +00:00
|
|
|
import pkg/codex/utils
|
2024-02-07 20:54:57 +00:00
|
|
|
import pkg/codex/indexingstrategy
|
2024-03-23 09:56:35 +00:00
|
|
|
import pkg/taskpools
|
2022-04-05 19:12:59 +00:00
|
|
|
|
2024-01-29 20:03:51 +00:00
|
|
|
import ../asynctest
|
2022-04-05 19:12:59 +00:00
|
|
|
import ./helpers
|
2024-06-27 05:51:50 +00:00
|
|
|
import ./examples
|
2022-04-05 19:12:59 +00:00
|
|
|
|
2024-01-08 22:52:46 +00:00
|
|
|
suite "Erasure encode/decode":
|
2023-07-06 23:23:27 +00:00
|
|
|
const BlockSize = 1024'nb
|
2022-05-10 12:10:17 +00:00
|
|
|
const dataSetSize = BlockSize * 123 # weird geometry
|
|
|
|
|
|
|
|
var rng: Rng
|
|
|
|
var chunker: Chunker
|
|
|
|
var manifest: Manifest
|
|
|
|
var store: BlockStore
|
|
|
|
var erasure: Erasure
|
2024-03-23 09:56:35 +00:00
|
|
|
var taskpool: Taskpool
|
2024-05-30 06:57:10 +00:00
|
|
|
let repoTmp = TempLevelDb.new()
|
|
|
|
let metaTmp = TempLevelDb.new()
|
2022-05-10 12:10:17 +00:00
|
|
|
|
|
|
|
setup:
|
2023-11-14 12:02:17 +00:00
|
|
|
let
|
2024-05-30 06:57:10 +00:00
|
|
|
repoDs = repoTmp.newDb()
|
|
|
|
metaDs = metaTmp.newDb()
|
2022-05-10 12:10:17 +00:00
|
|
|
rng = Rng.instance()
|
|
|
|
chunker = RandomChunker.new(rng, size = dataSetSize, chunkSize = BlockSize)
|
2023-08-21 02:51:04 +00:00
|
|
|
store = RepoStore.new(repoDs, metaDs)
|
2024-03-23 09:56:35 +00:00
|
|
|
taskpool = Taskpool.new(num_threads = countProcessors())
|
|
|
|
erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider, taskpool)
|
2023-11-14 12:02:17 +00:00
|
|
|
manifest = await storeDataGetManifest(store, chunker)
|
2022-04-05 19:12:59 +00:00
|
|
|
|
2024-05-30 06:57:10 +00:00
|
|
|
teardown:
|
|
|
|
await repoTmp.destroyDb()
|
|
|
|
await metaTmp.destroyDb()
|
|
|
|
|
2022-05-10 12:10:17 +00:00
|
|
|
proc encode(buffers, parity: int): Future[Manifest] {.async.} =
|
2022-04-05 19:12:59 +00:00
|
|
|
let
|
|
|
|
encoded = (await erasure.encode(
|
|
|
|
manifest,
|
2024-01-11 16:45:23 +00:00
|
|
|
buffers.Natural,
|
|
|
|
parity.Natural)).tryGet()
|
2022-04-05 19:12:59 +00:00
|
|
|
|
|
|
|
check:
|
2023-11-14 12:02:17 +00:00
|
|
|
encoded.blocksCount mod (buffers + parity) == 0
|
2023-11-14 16:53:06 +00:00
|
|
|
encoded.rounded == roundUp(manifest.blocksCount, buffers)
|
2022-04-05 19:12:59 +00:00
|
|
|
encoded.steps == encoded.rounded div buffers
|
|
|
|
|
2022-05-10 12:10:17 +00:00
|
|
|
return encoded
|
|
|
|
|
2022-11-14 13:50:00 +00:00
|
|
|
test "Should tolerate losing M data blocks in a single random column":
|
2022-05-10 12:10:17 +00:00
|
|
|
const
|
|
|
|
buffers = 20
|
|
|
|
parity = 10
|
|
|
|
|
|
|
|
let encoded = await encode(buffers, parity)
|
|
|
|
|
2022-04-05 19:12:59 +00:00
|
|
|
var
|
2023-11-14 16:53:06 +00:00
|
|
|
column = rng.rand((encoded.blocksCount div encoded.steps) - 1) # random column
|
2023-11-14 12:02:17 +00:00
|
|
|
dropped: seq[int]
|
2022-04-05 19:12:59 +00:00
|
|
|
|
2023-03-10 07:02:54 +00:00
|
|
|
for _ in 0..<encoded.ecM:
|
2023-11-14 12:02:17 +00:00
|
|
|
dropped.add(column)
|
|
|
|
(await store.delBlock(encoded.treeCid, column)).tryGet()
|
|
|
|
(await store.delBlock(manifest.treeCid, column)).tryGet()
|
2023-11-14 16:53:06 +00:00
|
|
|
column = (column + encoded.steps) mod encoded.blocksCount # wrap around
|
2022-04-05 19:12:59 +00:00
|
|
|
|
|
|
|
var
|
|
|
|
decoded = (await erasure.decode(encoded)).tryGet()
|
|
|
|
|
|
|
|
check:
|
2023-11-14 12:02:17 +00:00
|
|
|
decoded.treeCid == manifest.treeCid
|
|
|
|
decoded.treeCid == encoded.originalTreeCid
|
|
|
|
decoded.blocksCount == encoded.originalBlocksCount
|
2022-04-05 19:12:59 +00:00
|
|
|
|
|
|
|
for d in dropped:
|
2023-11-14 16:53:06 +00:00
|
|
|
if d < manifest.blocksCount: # we don't support returning parity blocks yet
|
|
|
|
let present = await store.hasBlock(manifest.treeCid, d)
|
|
|
|
check present.tryGet()
|
2022-04-05 19:12:59 +00:00
|
|
|
|
2022-11-14 13:50:00 +00:00
|
|
|
test "Should not tolerate losing more than M data blocks in a single random column":
|
2022-04-05 19:12:59 +00:00
|
|
|
const
|
|
|
|
buffers = 20
|
|
|
|
parity = 10
|
|
|
|
|
2022-05-10 12:10:17 +00:00
|
|
|
let encoded = await encode(buffers, parity)
|
2022-04-05 19:12:59 +00:00
|
|
|
|
|
|
|
var
|
2023-11-14 16:53:06 +00:00
|
|
|
column = rng.rand((encoded.blocksCount div encoded.steps) - 1) # random column
|
2023-11-14 12:02:17 +00:00
|
|
|
dropped: seq[int]
|
2022-04-05 19:12:59 +00:00
|
|
|
|
2023-03-10 07:02:54 +00:00
|
|
|
for _ in 0..<encoded.ecM + 1:
|
2023-11-14 12:02:17 +00:00
|
|
|
dropped.add(column)
|
|
|
|
(await store.delBlock(encoded.treeCid, column)).tryGet()
|
|
|
|
(await store.delBlock(manifest.treeCid, column)).tryGet()
|
2023-11-14 16:53:06 +00:00
|
|
|
column = (column + encoded.steps) mod encoded.blocksCount # wrap around
|
2022-04-05 19:12:59 +00:00
|
|
|
|
|
|
|
var
|
|
|
|
decoded: Manifest
|
|
|
|
|
|
|
|
expect ResultFailure:
|
|
|
|
decoded = (await erasure.decode(encoded)).tryGet()
|
|
|
|
|
|
|
|
for d in dropped:
|
2023-11-14 12:02:17 +00:00
|
|
|
let present = await store.hasBlock(manifest.treeCid, d)
|
2022-07-28 00:39:17 +00:00
|
|
|
check not present.tryGet()
|
2022-04-05 19:12:59 +00:00
|
|
|
|
2022-11-14 13:50:00 +00:00
|
|
|
test "Should tolerate losing M data blocks in M random columns":
|
2022-04-05 19:12:59 +00:00
|
|
|
const
|
|
|
|
buffers = 20
|
|
|
|
parity = 10
|
|
|
|
|
2022-05-10 12:10:17 +00:00
|
|
|
let encoded = await encode(buffers, parity)
|
2022-04-05 19:12:59 +00:00
|
|
|
|
|
|
|
var
|
|
|
|
blocks: seq[int]
|
|
|
|
offset = 0
|
|
|
|
|
|
|
|
while offset < encoded.steps - 1:
|
|
|
|
let
|
2023-11-14 12:02:17 +00:00
|
|
|
blockIdx = toSeq(countup(offset, encoded.blocksCount - 1, encoded.steps))
|
2022-04-05 19:12:59 +00:00
|
|
|
|
2023-03-10 07:02:54 +00:00
|
|
|
for _ in 0..<encoded.ecM:
|
2022-04-05 19:12:59 +00:00
|
|
|
blocks.add(rng.sample(blockIdx, blocks))
|
|
|
|
offset.inc
|
|
|
|
|
|
|
|
for idx in blocks:
|
2023-11-14 12:02:17 +00:00
|
|
|
(await store.delBlock(encoded.treeCid, idx)).tryGet()
|
|
|
|
(await store.delBlock(manifest.treeCid, idx)).tryGet()
|
|
|
|
discard
|
2022-04-05 19:12:59 +00:00
|
|
|
|
2022-05-10 12:10:17 +00:00
|
|
|
discard (await erasure.decode(encoded)).tryGet()
|
2022-04-05 19:12:59 +00:00
|
|
|
|
2023-11-14 12:02:17 +00:00
|
|
|
for d in 0..<manifest.blocksCount:
|
|
|
|
let present = await store.hasBlock(manifest.treeCid, d)
|
2022-07-28 00:39:17 +00:00
|
|
|
check present.tryGet()
|
2022-04-05 19:12:59 +00:00
|
|
|
|
2022-11-14 13:50:00 +00:00
|
|
|
test "Should not tolerate losing more than M data blocks in M random columns":
|
2022-04-05 19:12:59 +00:00
|
|
|
const
|
|
|
|
buffers = 20
|
|
|
|
parity = 10
|
|
|
|
|
2022-05-10 12:10:17 +00:00
|
|
|
let encoded = await encode(buffers, parity)
|
2022-04-05 19:12:59 +00:00
|
|
|
|
|
|
|
var
|
|
|
|
blocks: seq[int]
|
|
|
|
offset = 0
|
|
|
|
|
2023-11-14 16:53:06 +00:00
|
|
|
while offset < encoded.steps:
|
2022-04-05 19:12:59 +00:00
|
|
|
let
|
2023-11-14 12:02:17 +00:00
|
|
|
blockIdx = toSeq(countup(offset, encoded.blocksCount - 1, encoded.steps))
|
2022-04-05 19:12:59 +00:00
|
|
|
|
2023-03-10 07:02:54 +00:00
|
|
|
for _ in 0..<encoded.ecM + 1: # NOTE: the +1
|
2022-04-07 23:08:43 +00:00
|
|
|
var idx: int
|
|
|
|
while true:
|
|
|
|
idx = rng.sample(blockIdx, blocks)
|
2023-11-14 12:02:17 +00:00
|
|
|
let blk = (await store.getBlock(encoded.treeCid, idx)).tryGet()
|
|
|
|
if not blk.isEmpty:
|
2022-04-07 23:08:43 +00:00
|
|
|
break
|
|
|
|
|
|
|
|
blocks.add(idx)
|
2022-04-05 19:12:59 +00:00
|
|
|
offset.inc
|
|
|
|
|
|
|
|
for idx in blocks:
|
2023-11-14 12:02:17 +00:00
|
|
|
(await store.delBlock(encoded.treeCid, idx)).tryGet()
|
|
|
|
(await store.delBlock(manifest.treeCid, idx)).tryGet()
|
|
|
|
discard
|
2022-04-05 19:12:59 +00:00
|
|
|
|
|
|
|
var
|
|
|
|
decoded: Manifest
|
|
|
|
|
|
|
|
expect ResultFailure:
|
|
|
|
decoded = (await erasure.decode(encoded)).tryGet()
|
|
|
|
|
2022-11-14 13:50:00 +00:00
|
|
|
test "Should tolerate losing M (a.k.a row) contiguous data blocks":
|
2022-04-05 19:12:59 +00:00
|
|
|
const
|
|
|
|
buffers = 20
|
|
|
|
parity = 10
|
|
|
|
|
2022-05-10 12:10:17 +00:00
|
|
|
let encoded = await encode(buffers, parity)
|
2022-04-05 19:12:59 +00:00
|
|
|
|
2023-11-14 16:53:06 +00:00
|
|
|
# loose M original (systematic) symbols/blocks
|
|
|
|
for b in 0..<(encoded.steps * encoded.ecM):
|
2023-11-14 12:02:17 +00:00
|
|
|
(await store.delBlock(encoded.treeCid, b)).tryGet()
|
|
|
|
(await store.delBlock(manifest.treeCid, b)).tryGet()
|
2022-04-05 19:12:59 +00:00
|
|
|
|
2022-05-10 12:10:17 +00:00
|
|
|
discard (await erasure.decode(encoded)).tryGet()
|
2022-04-05 19:12:59 +00:00
|
|
|
|
2023-11-14 12:02:17 +00:00
|
|
|
for d in 0..<manifest.blocksCount:
|
|
|
|
let present = await store.hasBlock(manifest.treeCid, d)
|
2022-07-28 00:39:17 +00:00
|
|
|
check present.tryGet()
|
2022-04-05 19:12:59 +00:00
|
|
|
|
2022-11-14 13:50:00 +00:00
|
|
|
test "Should tolerate losing M (a.k.a row) contiguous parity blocks":
|
2022-04-05 19:12:59 +00:00
|
|
|
const
|
|
|
|
buffers = 20
|
|
|
|
parity = 10
|
|
|
|
|
2023-11-14 16:53:06 +00:00
|
|
|
let
|
|
|
|
encoded = await encode(buffers, parity)
|
|
|
|
blocks = collect:
|
|
|
|
for i in 0..encoded.blocksCount:
|
|
|
|
i
|
2022-04-05 19:12:59 +00:00
|
|
|
|
2023-11-14 16:53:06 +00:00
|
|
|
# loose M parity (all!) symbols/blocks from the dataset
|
|
|
|
for b in blocks[^(encoded.steps * encoded.ecM)..^1]:
|
2023-11-14 12:02:17 +00:00
|
|
|
(await store.delBlock(encoded.treeCid, b)).tryGet()
|
|
|
|
(await store.delBlock(manifest.treeCid, b)).tryGet()
|
2022-04-05 19:12:59 +00:00
|
|
|
|
2022-05-10 12:10:17 +00:00
|
|
|
discard (await erasure.decode(encoded)).tryGet()
|
2022-04-05 19:12:59 +00:00
|
|
|
|
2023-11-14 12:02:17 +00:00
|
|
|
for d in 0..<manifest.blocksCount:
|
|
|
|
let present = await store.hasBlock(manifest.treeCid, d)
|
2022-07-28 00:39:17 +00:00
|
|
|
check present.tryGet()
|
2022-05-10 11:50:22 +00:00
|
|
|
|
|
|
|
test "handles edge case of 0 parity blocks":
|
|
|
|
const
|
|
|
|
buffers = 20
|
|
|
|
parity = 0
|
|
|
|
|
2022-05-10 12:10:17 +00:00
|
|
|
let encoded = await encode(buffers, parity)
|
2022-05-10 11:50:22 +00:00
|
|
|
|
2022-05-10 12:10:17 +00:00
|
|
|
discard (await erasure.decode(encoded)).tryGet()
|
2024-06-27 05:51:50 +00:00
|
|
|
|
|
|
|
test "Should handle verifiable manifests":
|
|
|
|
const
|
|
|
|
buffers = 20
|
|
|
|
parity = 10
|
|
|
|
|
|
|
|
let
|
|
|
|
encoded = await encode(buffers, parity)
|
|
|
|
slotCids = collect(newSeq):
|
|
|
|
for i in 0..<encoded.numSlots: Cid.example
|
|
|
|
|
|
|
|
verifiable = Manifest.new(encoded, Cid.example, slotCids).tryGet()
|
|
|
|
|
|
|
|
decoded = (await erasure.decode(verifiable)).tryGet()
|
|
|
|
|
|
|
|
check:
|
|
|
|
decoded.treeCid == manifest.treeCid
|
|
|
|
decoded.treeCid == verifiable.originalTreeCid
|
|
|
|
decoded.blocksCount == verifiable.originalBlocksCount
|
2024-07-03 17:20:53 +00:00
|
|
|
|
|
|
|
for i in 1..5:
|
|
|
|
test "Should encode/decode using various parameters " & $i & "/5":
|
|
|
|
let
|
|
|
|
blockSize = rng.sample(@[1, 2, 4, 8, 16, 32, 64].mapIt(it.KiBs))
|
|
|
|
datasetSize = 1.MiBs
|
|
|
|
ecK = 10.Natural
|
|
|
|
ecM = 10.Natural
|
|
|
|
|
|
|
|
let
|
|
|
|
chunker = RandomChunker.new(rng, size = datasetSize, chunkSize = blockSize)
|
|
|
|
manifest = await storeDataGetManifest(store, chunker)
|
|
|
|
encoded = (await erasure.encode(manifest, ecK, ecM)).tryGet()
|
|
|
|
decoded = (await erasure.decode(encoded)).tryGet()
|
|
|
|
|
|
|
|
check:
|
|
|
|
decoded.treeCid == manifest.treeCid
|
|
|
|
decoded.treeCid == encoded.originalTreeCid
|
Release v0.1.4 (#912)
* fix: createReservation lock (#825)
* fix: createReservation lock
* fix: additional locking places
* fix: acquire lock
* chore: feedback
Co-authored-by: markspanbroek <mark@spanbroek.net>
Signed-off-by: Adam Uhlíř <adam@uhlir.dev>
* feat: withLock template and fixed tests
* fix: use proc for MockReservations constructor
* chore: feedback
Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
Signed-off-by: Adam Uhlíř <adam@uhlir.dev>
* chore: feedback implementation
---------
Signed-off-by: Adam Uhlíř <adam@uhlir.dev>
Co-authored-by: markspanbroek <mark@spanbroek.net>
Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
* Block deletion with ref count & repostore refactor (#631)
* Fix StoreStream so it doesn't return parity bytes (#838)
* fix storestream so it doesn\'t return parity bits for protected/verifiable manifests
* use Cid.example instead of creating a mock manually
* Fix verifiable manifest initialization (#839)
* fix verifiable manifest initialization
* fix linearstrategy, use verifiableStrategy to select blocks for slots
* check for both strategies in attribute inheritance test
* ci: add verify_circuit=true to the releases (#840)
* provisional fix so EC errors do not crash the node on download (#841)
* prevent node crashing with `not val.isNil` (#843)
* bump nim-leopard to handle no parity data (#845)
* Fix verifiable manifest constructor (#844)
* Fix verifiable manifest constructor
* Add integration test for verifiable manifest download
Add integration test for testing download of verifiable dataset after creating request for storage
* add missing import
* add testecbug to integration suite
* Remove hardhat instance from integration test
* change description, drop echo
---------
Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
Co-authored-by: gmega <giuliano.mega@gmail.com>
* Bump Nim to 1.6.21 (#851)
* bump Nim to 1.6.21 (range type reset fixes)
* remove incompatible versions from compiler matrix
* feat(rest): adds erasure coding constraints when requesting storage (#848)
* Rest API: add erasure coding constraints when requesting storage
* clean up
* Make error message for "dataset too small" more informative.
* fix API integration test
---------
Co-authored-by: gmega <giuliano.mega@gmail.com>
* Prover workshop band-aid (#853)
* add prover bandaid
* Improve error message text
Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
Signed-off-by: Giuliano Mega <giuliano.mega@gmail.com>
---------
Signed-off-by: Giuliano Mega <giuliano.mega@gmail.com>
Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
* Bandaid for failing erasure coding (#855)
* Update Release workflow (#858)
Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>
* Fixes prover behavior with singleton proof trees (#859)
* add logs and test
* add Merkle proof checks
* factor out Circom input normalization, fix proof input serialization
* add test and update existing ones
* update circuit assets
* add back trace message
* switch contracts to fix branch
* update codex-contracts-eth to latest
* do not expose prove with prenormalized inputs
* Chronos v4 Update (v3 Compat Mode) (#814)
* add changes to use chronos v4 in compat mode
* switch chronos to compat fix branch
* use nimbus-build-system with configurable Nim repo
* add missing imports
* add missing await
* bump compat
* pin nim version in Makefile
* add await instead of asyncSpawn to advertisement queue loop
* bump DHT to v0.5.0
* allow error state of `onBatch` to propagate upwards in test code
* pin Nim compiler commit to avoid fetching stale branch
* make CI build against branch head instead of merge
* fix handling of return values in testslotqueue
* Downgrade to gcc 13 on Windows (#874)
* Downgrade to gcc 13 on Windows
Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>
* Increase build job timeout to 90 minutes
Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>
---------
Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>
* Add MIT/Apache licenses (#861)
* Add MIT/Apache licenses
* Center "Apache License"
Signed-off-by: Giuliano Mega <giuliano.mega@gmail.com>
* remove wrong legal entity; rename apache license file
---------
Signed-off-by: Giuliano Mega <giuliano.mega@gmail.com>
* Add OPTIONS endpoint to allow the content-type header for the upload endpoint (#869)
* Add OPTIONS endpoint to allow the content-type header
exec git commit --amend --no-edit -S
* Remove useless header "Access-Control-Headers" and add cache
Signed-off-by: Arnaud <arnaud@status.im>
---------
Signed-off-by: Arnaud <arnaud@status.im>
Co-authored-by: Giuliano Mega <giuliano.mega@gmail.com>
* chore: add `downtimeProduct` config parameter (#867)
* chore: add `downtimeProduct` config parameter
* bump codex-contracts-eth to master
* Support CORS preflight requests when the storage request api returns an error (#878)
* Add CORS headers when the REST API is returning an error
* Use the allowedOrigin instead of the wilcard when setting the origin
Signed-off-by: Arnaud <arnaud@status.im>
---------
Signed-off-by: Arnaud <arnaud@status.im>
* refactor(marketplace): generic querying of historical marketplace events (#872)
* refactor(marketplace): move marketplace events to the Market abstraction
Move marketplace contract events to the Market abstraction so the types can be shared across all modules that call the Market abstraction.
* Remove unneeded conversion
* Switch to generic implementation of event querying
* change parent type to MarketplaceEvent
* Remove extra license file (#876)
* remove extra license
* center "apache license"
* Update advertising (#862)
* Setting up advertiser
* Wires up advertiser
* cleanup
* test compiles
* tests pass
* setting up test for advertiser
* Finishes advertiser tests
* fixes commonstore tests
* Review comments by Giuliano
* Race condition found by Giuliano
* Review comment by Dmitriy
Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
Signed-off-by: Ben Bierens <39762930+benbierens@users.noreply.github.com>
* fixes tests
---------
Signed-off-by: Ben Bierens <39762930+benbierens@users.noreply.github.com>
Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
* feat: add `--payout-address` (#870)
* feat: add `--payout-address`
Allows SPs to be paid out to a separate address, keeping their profits secure.
Supports https://github.com/codex-storage/codex-contracts-eth/pull/144 in the nim-codex client.
* Remove optional payoutAddress
Change --payout-address so that it is no longer optional. There is no longer an overload in `Marketplace.sol` for `fillSlot` accepting no `payoutAddress`.
* Update integration tests to include --payout-address
* move payoutAddress from fillSlot to freeSlot
* Update integration tests to use required payoutAddress
- to make payoutAddress required, the integration tests needed to avoid building the cli params until just before starting the node, otherwise if cli params were added ad-hoc, there would be an error after a non-required parameter was added before a required parameter.
* support client payout address
- withdrawFunds requires a withdrawAddress parameter, directs payouts for withdrawing of client funds (for a cancelled request) to go to that address.
* fix integration test
adds --payout-address to validators
* refactor: support withdrawFunds and freeSlot optional parameters
- withdrawFunds has an optional parameter for withdrawRecipient
- freeSlot has optional parameters for rewardRecipient and collateralRecipient
- change --payout-address to --reward-recipient to match contract signature naming
* Revert "Update integration tests to include --payout-address"
This reverts commit 8f9535cf35b0f2b183ac4013a7ed11b246486964.
There are some valid improvements to the integration tests, but they can be handled in a separate PR.
* small fix
* bump contracts to fix marketplace spec
* bump codex-contracts-eth, now rebased on master
* bump codex-contracts-eth
now that feat/reward-address has been merged to master
* clean up, comments
* Rework circuit downloader (#882)
* Introduces a start method to prover
* Moves backend creation into start method
* sets up three paths for backend initialization
* Extracts backend initialization to backend-factory
* Implements loading backend from cli files or previously downloaded local files
* Wires up downloading and unzipping
* functional implementation
* Fixes testprover.nim
* Sets up tests for backendfactory
* includes libzip-dev
* pulls in updated contracts
* removes integration cli tests for r1cs, wasm, and zkey file arguments.
* Fixes issue where inner-scope values are lost before returning
* sets local proof verification for dist-test images
* Adds two traces and bumps nim-ethers
* Adds separate path for circuit files
* Create circuit dir if not exists
* fix: make sure requestStorage is mined
* fix: correct place to plug confirm
* test: fixing contracts tests
* Restores gitmodules
* restores nim-datastore reference
* Sets up downloader exe
* sets up tool skeleton
* implements getting of circuit hash
* Implements downloader tool
* sets up test skeleton
* Implements test for cirdl
* includes testTools in testAll
* Cleanup building.md
* cleans up previous downloader implementation
* cleans up testbackendfactory
* moves start of prover into node.nim
* Fills in arguments in example command
* Initializes backend in prover constructor
* Restores tests
* Restores tests for cli instructions
* Review comments by Dmitriy, part 1
* Quotes path in download instruction.
* replaces curl with chronos http session
* Moves cirdl build output to 'build' folder.
* Fixes chronicles log output
* Add cirdl support to the codex Dockerfile
Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>
* Add cirdl support to the docker entrypoint
Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>
* Add cirdl support to the release workflow
Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>
* Disable verify_circuit flag for releases
Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>
* Removes backendFactory placeholder type
* wip
* Replaces zip library with status-im/zippy library (which supports zip and tar)
* Updates cirdl to not change circuitdir folder
* Switches from zip to tar.gz
* Review comments by Dmitriy
* updates codex-contracts-eth
* Adds testTools to CI
* Adds check for access to config.circuitdir
* Update fixture circuit zkey
* Update matrix to run tools tests on Windows
* Adds 'deps' dependency for cirdl
* Adjust docker-entrypoint.sh to use CODEX_CIRCUIT_DIR env var
* Review comments by Giuliano
---------
Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>
Co-authored-by: Adam Uhlíř <adam@uhlir.dev>
Co-authored-by: Veaceslav Doina <20563034+veaceslavdoina@users.noreply.github.com>
* Support CORS for POST and PATCH availability endpoints (#897)
* Adds testnet marketplace address to known deployments (#911)
* API tweaks for OpenAPI, errors and endpoints (#886)
* All sort of tweaks
* docs: availability's minPrice doc
* Revert changes to the two node test example
* Change default EC params in REST API
Change default EC params in REST API to 3 nodes and 1 tolerance.
Adjust integration tests to honour these settings.
---------
Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
---------
Signed-off-by: Adam Uhlíř <adam@uhlir.dev>
Signed-off-by: Giuliano Mega <giuliano.mega@gmail.com>
Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com>
Signed-off-by: Arnaud <arnaud@status.im>
Signed-off-by: Ben Bierens <39762930+benbierens@users.noreply.github.com>
Co-authored-by: Adam Uhlíř <adam@uhlir.dev>
Co-authored-by: markspanbroek <mark@spanbroek.net>
Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com>
Co-authored-by: Tomasz Bekas <tomasz.bekas@gmail.com>
Co-authored-by: Giuliano Mega <giuliano.mega@gmail.com>
Co-authored-by: Arnaud <arno.deville@gmail.com>
Co-authored-by: Ben Bierens <39762930+benbierens@users.noreply.github.com>
Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
Co-authored-by: Arnaud <arnaud@status.im>
2024-09-24 10:19:58 +00:00
|
|
|
decoded.blocksCount == encoded.originalBlocksCount
|