diff --git a/.github/workflows/nim-matrix.yml b/.github/workflows/nim-matrix.yml index 0b57c8bf..579ee6a5 100644 --- a/.github/workflows/nim-matrix.yml +++ b/.github/workflows/nim-matrix.yml @@ -6,7 +6,7 @@ on: env: cache_nonce: 0 # Allows for easily busting actions/cache caches - nim_version: pinned, v1.6.16, v1.6.18 + nim_version: pinned jobs: matrix: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 936f568e..a9ba1fcc 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -8,10 +8,12 @@ on: env: cache_nonce: 0 # Allows for easily busting actions/cache caches - nim_version: v1.6.14 + nim_version: pinned rust_version: 1.78.0 binary_base: codex - upload_to_codex: false + build_dir: build + nim_flags: '-d:verify_circuit=true' + windows_libs: 'libstdc++-6.dll libgomp-1.dll libgcc_s_seh-1.dll libwinpthread-1.dll' jobs: # Matrix @@ -74,13 +76,21 @@ jobs: - name: Release - Build run: | - make NIMFLAGS="--out:${{ env.binary }}" - - - name: Release - Upload binaries + make NIMFLAGS="--out:${{ env.build_dir }}/${{ env.binary }} ${{ env.nim_flags }}" + + - name: Release - Libraries + run: | + if [[ "${{ matrix.os }}" == "windows" ]]; then + for lib in ${{ env.windows_libs }}; do + cp -v "${MINGW_PREFIX}/bin/${lib}" "${{ env.build_dir }}" + done + fi + + - name: Release - Upload build artifacts uses: actions/upload-artifact@v4 with: name: release-${{ env.binary }} - path: ${{ env.binary }} + path: ${{ env.build_dir }}/ retention-days: 1 # Release @@ -99,46 +109,49 @@ jobs: - name: Release - Compress and checksum run: | cd /tmp/release - prepare() { - # Checksum + checksum() { arc="${1}" sha256sum "${arc}" >"${arc}.sha256" - - # Upload to Codex - if [[ "${{ env.upload_to_codex }}" == "true" ]]; then - codex_endpoints="${{ secrets.CODEX_ENDPOINTS }}" - codex_username="${{ secrets.CODEX_USERNAME }}" - codex_password="${{ secrets.CODEX_PASSWORD }}" - - for endpoint in ${codex_endpoints}; do - echo "::add-mask::${endpoint}" - cid=$(curl -X POST \ - "${endpoint}/api/codex/v1/data" \ - -u "${codex_username}":"${codex_password}" \ - -H "content-type: application/octet-stream" \ - -T "${arc}") - - echo "${cid}" >"${arc}.cid" - done - fi } + # Compress and prepare for file in *; do - if [[ "${file}" == *".exe"* ]]; then - arc="${file%.*}.zip" - zip "${arc}" "${file}" - rm -f "${file}" - prepare "${arc}" - else - arc="${file}.tar.gz" - tar cfz "${arc}" "${file}" - rm -f "${file}" - prepare "${arc}" + # Exclude libraries + if [[ "${file}" != *".dll"* ]]; then + if [[ "${file}" == *".exe"* ]]; then + + # Windows - binary only + arc="${file%.*}.zip" + zip "${arc}" "${file}" + checksum "${arc}" + + # Windows - binary and libs + arc="${file%.*}-libs.zip" + zip "${arc}" "${file}" ${{ env.windows_libs }} + rm -f "${file}" ${{ env.windows_libs }} + checksum "${arc}" + else + + # Linux/macOS + arc="${file}.tar.gz" + chmod 755 "${file}" + tar cfz "${arc}" "${file}" + rm -f "${file}" + checksum "${arc}" + fi fi done + - name: Release - Upload compressed artifacts and checksums + uses: actions/upload-artifact@v4 + with: + name: archives-and-checksums + path: /tmp/release/ + retention-days: 1 + - name: Release uses: softprops/action-gh-release@v2 + if: startsWith(github.ref, 'refs/tags/') with: files: | /tmp/release/* diff --git a/Makefile b/Makefile index 2e1f97ad..54ac04d2 100644 --- a/Makefile +++ b/Makefile @@ -15,7 +15,7 @@ # # If NIM_COMMIT is set to "nimbusbuild", this will use the # version pinned by nimbus-build-system. -PINNED_NIM_VERSION := v1.6.14 +PINNED_NIM_VERSION := 38640664088251bbc88917b4bacfd86ec53014b8 # 1.6.21 ifeq ($(NIM_COMMIT),) NIM_COMMIT := $(PINNED_NIM_VERSION) diff --git a/codex/blockexchange/engine/discovery.nim b/codex/blockexchange/engine/discovery.nim index 09085fcb..857a60cd 100644 --- a/codex/blockexchange/engine/discovery.nim +++ b/codex/blockexchange/engine/discovery.nim @@ -105,8 +105,7 @@ proc advertiseQueueLoop(b: DiscoveryEngine) {.async.} = trace "Begin iterating blocks..." for c in cids: if cid =? await c: - b.advertiseBlock(cid) - await sleepAsync(100.millis) + await b.advertiseBlock(cid) trace "Iterating blocks finished." await sleepAsync(b.advertiseLoopSleep) diff --git a/codex/blocktype.nim b/codex/blocktype.nim index dd3b70e0..c44e4fd8 100644 --- a/codex/blocktype.nim +++ b/codex/blocktype.nim @@ -31,7 +31,7 @@ import ./codextypes export errors, logutils, units, codextypes type - Block* = object of RootObj + Block* = ref object of RootObj cid*: Cid data*: seq[byte] diff --git a/codex/erasure/asyncbackend.nim b/codex/erasure/asyncbackend.nim index f3bca851..4827806a 100644 --- a/codex/erasure/asyncbackend.nim +++ b/codex/erasure/asyncbackend.nim @@ -114,7 +114,14 @@ proc proxySpawnEncodeTask( args: EncodeTaskArgs, data: ref seq[seq[byte]] ): Flowvar[EncodeTaskResult] = - tp.spawn encodeTask(args, data[]) + # FIXME Uncomment the code below after addressing an issue: + # https://github.com/codex-storage/nim-codex/issues/854 + + # tp.spawn encodeTask(args, data[]) + + let fv = EncodeTaskResult.newFlowVar + fv.readyWith(encodeTask(args, data[])) + return fv proc proxySpawnDecodeTask( tp: Taskpool, @@ -122,7 +129,14 @@ proc proxySpawnDecodeTask( data: ref seq[seq[byte]], parity: ref seq[seq[byte]] ): Flowvar[DecodeTaskResult] = - tp.spawn decodeTask(args, data[], parity[]) + # FIXME Uncomment the code below after addressing an issue: + # https://github.com/codex-storage/nim-codex/issues/854 + + # tp.spawn decodeTask(args, data[], parity[]) + + let fv = DecodeTaskResult.newFlowVar + fv.readyWith(decodeTask(args, data[], parity[])) + return fv proc awaitResult[T](signal: ThreadSignalPtr, handle: Flowvar[T]): Future[?!T] {.async.} = await wait(signal) diff --git a/codex/erasure/erasure.nim b/codex/erasure/erasure.nim index 0c921776..56e3e1cf 100644 --- a/codex/erasure/erasure.nim +++ b/codex/erasure/erasure.nim @@ -27,6 +27,7 @@ import ../blocktype as bt import ../utils import ../utils/asynciter import ../indexingstrategy +import ../errors import pkg/stew/byteutils @@ -82,6 +83,13 @@ type blocksCount: Natural strategy: StrategyType + ErasureError* = object of CodexError + InsufficientBlocksError* = object of ErasureError + # Minimum size, in bytes, that the dataset must have had + # for the encoding request to have succeeded with the parameters + # provided. + minSize*: NBytes + func indexToPos(steps, idx, step: int): int {.inline.} = ## Convert an index to a position in the encoded ## dataset @@ -236,11 +244,13 @@ proc init*( ecK: Natural, ecM: Natural, strategy: StrategyType): ?!EncodingParams = if ecK > manifest.blocksCount: - return failure( - "Unable to encode manifest, not enough blocks, ecK = " & + let exc = (ref InsufficientBlocksError)( + msg: "Unable to encode manifest, not enough blocks, ecK = " & $ecK & ", blocksCount = " & - $manifest.blocksCount) + $manifest.blocksCount, + minSize: ecK.NBytes * manifest.blockSize) + return failure(exc) let rounded = roundUp(manifest.blocksCount, ecK) diff --git a/codex/manifest/manifest.nim b/codex/manifest/manifest.nim index abfd5b4c..93aa5ba5 100644 --- a/codex/manifest/manifest.nim +++ b/codex/manifest/manifest.nim @@ -29,7 +29,7 @@ import ../logutils # TODO: Manifest should be reworked to more concrete types, # perhaps using inheritance type - Manifest* = object of RootObj + Manifest* = ref object of RootObj treeCid {.serialize.}: Cid # Root of the merkle tree datasetSize {.serialize.}: NBytes # Total size of all blocks blockSize {.serialize.}: NBytes # Size of each contained block (might not be needed if blocks are len-prefixed) @@ -322,7 +322,7 @@ func new*( protected: true, ecK: manifest.ecK, ecM: manifest.ecM, - originalTreeCid: manifest.treeCid, + originalTreeCid: manifest.originalTreeCid, originalDatasetSize: manifest.originalDatasetSize, protectedStrategy: manifest.protectedStrategy, verifiable: true, diff --git a/codex/node.nim b/codex/node.nim index f1fad5d8..75a3450b 100644 --- a/codex/node.nim +++ b/codex/node.nim @@ -240,14 +240,14 @@ proc streamEntireDataset( self: CodexNodeRef, manifest: Manifest, manifestCid: Cid, -): ?!LPStream = +): Future[?!LPStream] {.async.} = ## Streams the contents of the entire dataset described by the manifest. ## trace "Retrieving blocks from manifest", manifestCid if manifest.protected: # Retrieve, decode and save to the local store all EС groups - proc erasureJob(): Future[void] {.async.} = + proc erasureJob(): Future[?!void] {.async.} = try: # Spawn an erasure decoding job let @@ -257,11 +257,20 @@ proc streamEntireDataset( leoDecoderProvider, self.taskpool) without _ =? (await erasure.decode(manifest)), error: - trace "Unable to erasure decode manifest", manifestCid, exc = error.msg - except CatchableError as exc: - trace "Exception decoding manifest", manifestCid, exc = exc.msg + error "Unable to erasure decode manifest", manifestCid, exc = error.msg + return failure(error) - asyncSpawn erasureJob() + return success() + # -------------------------------------------------------------------------- + # FIXME this is a HACK so that the node does not crash during the workshop. + # We should NOT catch Defect. + except Exception as exc: + trace "Exception decoding manifest", manifestCid, exc = exc.msg + return failure(exc.msg) + # -------------------------------------------------------------------------- + + if err =? (await erasureJob()).errorOption: + return failure(err) # Retrieve all blocks of the dataset sequentially from the local store or network trace "Creating store stream for manifest", manifestCid @@ -283,7 +292,7 @@ proc retrieve*( return await self.streamSingleBlock(cid) - self.streamEntireDataset(manifest, cid) + await self.streamEntireDataset(manifest, cid) proc store*( self: CodexNodeRef, @@ -414,6 +423,15 @@ proc setupRequest( trace "Unable to fetch manifest for cid" return failure error + # ---------------------------------------------------------------------------- + # FIXME this is a BAND-AID to address + # https://github.com/codex-storage/nim-codex/issues/852 temporarily for the + # workshop. Remove this once we get that fixed. + if manifest.blocksCount.uint == ecK: + return failure("Cannot setup slots for a dataset with ecK == numBlocks. Please use a larger file or a different combination of `nodes` and `tolerance`.") + # ---------------------------------------------------------------------------- + + # Erasure code the dataset according to provided parameters let erasure = Erasure.new( diff --git a/codex/rest/api.nim b/codex/rest/api.nim index a453985b..ba98668b 100644 --- a/codex/rest/api.nim +++ b/codex/rest/api.nim @@ -32,6 +32,7 @@ import ../node import ../blocktype import ../conf import ../contracts +import ../erasure/erasure import ../manifest import ../streams/asyncstreamwrapper import ../stores @@ -432,8 +433,16 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) = let nodes = params.nodes |? 1 let tolerance = params.tolerance |? 0 - if (nodes - tolerance) < 1: - return RestApiResponse.error(Http400, "Tolerance cannot be greater or equal than nodes (nodes - tolerance)") + # prevent underflow + if tolerance > nodes: + return RestApiResponse.error(Http400, "Invalid parameters: `tolerance` cannot be greater than `nodes`") + + let ecK = nodes - tolerance + let ecM = tolerance # for readability + + # ensure leopard constrainst of 1 < K ≥ M + if ecK <= 1 or ecK < ecM: + return RestApiResponse.error(Http400, "Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`") without expiry =? params.expiry: return RestApiResponse.error(Http400, "Expiry required") @@ -451,6 +460,11 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) = params.collateral, expiry), error: + if error of InsufficientBlocksError: + return RestApiResponse.error(Http400, + "Dataset too small for erasure parameters, need at least " & + $(ref InsufficientBlocksError)(error).minSize.int & " bytes") + return RestApiResponse.error(Http500, error.msg) return RestApiResponse.response(purchaseId.toHex) diff --git a/codex/slots/proofs/backends/circomcompat.nim b/codex/slots/proofs/backends/circomcompat.nim index 70ff4dff..8619457a 100644 --- a/codex/slots/proofs/backends/circomcompat.nim +++ b/codex/slots/proofs/backends/circomcompat.nim @@ -9,17 +9,14 @@ {.push raises: [].} -import std/sequtils +import std/sugar import pkg/chronos import pkg/questionable/results import pkg/circomcompat -import pkg/poseidon2/io import ../../types import ../../../stores -import ../../../merkletree -import ../../../codextypes import ../../../contracts import ./converters @@ -39,6 +36,41 @@ type backendCfg : ptr CircomBn254Cfg vkp* : ptr CircomKey + NormalizedProofInputs*[H] {.borrow: `.`.} = distinct ProofInputs[H] + +func normalizeInput*[H](self: CircomCompat, input: ProofInputs[H]): + NormalizedProofInputs[H] = + ## Parameters in CIRCOM circuits are statically sized and must be properly + ## padded before they can be passed onto the circuit. This function takes + ## variable length parameters and performs that padding. + ## + ## The output from this function can be JSON-serialized and used as direct + ## inputs to the CIRCOM circuit for testing and debugging when one wishes + ## to bypass the Rust FFI. + + let normSamples = collect: + for sample in input.samples: + var merklePaths = sample.merklePaths + merklePaths.setLen(self.slotDepth) + Sample[H]( + cellData: sample.cellData, + merklePaths: merklePaths + ) + + var normSlotProof = input.slotProof + normSlotProof.setLen(self.datasetDepth) + + NormalizedProofInputs[H] ProofInputs[H]( + entropy: input.entropy, + datasetRoot: input.datasetRoot, + slotIndex: input.slotIndex, + slotRoot: input.slotRoot, + nCellsPerSlot: input.nCellsPerSlot, + nSlotsPerDataSet: input.nSlotsPerDataSet, + slotProof: normSlotProof, + samples: normSamples + ) + proc release*(self: CircomCompat) = ## Release the ctx ## @@ -49,27 +81,20 @@ proc release*(self: CircomCompat) = if not isNil(self.vkp): self.vkp.unsafeAddr.release_key() -proc prove*[H]( +proc prove[H]( self: CircomCompat, - input: ProofInputs[H]): ?!CircomProof = - ## Encode buffers using a ctx - ## + input: NormalizedProofInputs[H]): ?!CircomProof = - # NOTE: All inputs are statically sized per circuit - # and adjusted accordingly right before being passed - # to the circom ffi - `setLen` is used to adjust the - # sequence length to the correct size which also 0 pads - # to the correct length doAssert input.samples.len == self.numSamples, "Number of samples does not match" doAssert input.slotProof.len <= self.datasetDepth, - "Number of slot proofs does not match" + "Slot proof is too deep - dataset has more slots than what we can handle?" doAssert input.samples.allIt( block: (it.merklePaths.len <= self.slotDepth + self.blkDepth and - it.cellData.len <= self.cellElms * 32)), "Merkle paths length does not match" + it.cellData.len == self.cellElms)), "Merkle paths too deep or cells too big for circuit" # TODO: All parameters should match circom's static parametter var @@ -116,8 +141,7 @@ proc prove*[H]( var slotProof = input.slotProof.mapIt( it.toBytes ).concat - slotProof.setLen(self.datasetDepth) # zero pad inputs to correct size - + doAssert(slotProof.len == self.datasetDepth) # arrays are always flattened if ctx.pushInputU256Array( "slotProof".cstring, @@ -128,16 +152,14 @@ proc prove*[H]( for s in input.samples: var merklePaths = s.merklePaths.mapIt( it.toBytes ) - data = s.cellData + data = s.cellData.mapIt( @(it.toBytes) ).concat - merklePaths.setLen(self.slotDepth) # zero pad inputs to correct size if ctx.pushInputU256Array( "merklePaths".cstring, merklePaths[0].addr, uint (merklePaths[0].len * merklePaths.len)) != ERR_OK: return failure("Failed to push merkle paths") - data.setLen(self.cellElms * 32) # zero pad inputs to correct size if ctx.pushInputU256Array( "cellData".cstring, data[0].addr, @@ -162,6 +184,12 @@ proc prove*[H]( success proof +proc prove*[H]( + self: CircomCompat, + input: ProofInputs[H]): ?!CircomProof = + + self.prove(self.normalizeInput(input)) + proc verify*[H]( self: CircomCompat, proof: CircomProof, diff --git a/codex/slots/sampler/sampler.nim b/codex/slots/sampler/sampler.nim index d22121c2..3270d55a 100644 --- a/codex/slots/sampler/sampler.nim +++ b/codex/slots/sampler/sampler.nim @@ -38,7 +38,7 @@ type func getCell*[T, H]( self: DataSampler[T, H], blkBytes: seq[byte], - blkCellIdx: Natural): seq[byte] = + blkCellIdx: Natural): seq[H] = let cellSize = self.builder.cellSize.uint64 @@ -47,7 +47,7 @@ func getCell*[T, H]( doAssert (dataEnd - dataStart) == cellSize, "Invalid cell size" - toInputData[H](blkBytes[dataStart ..< dataEnd]) + blkBytes[dataStart ..< dataEnd].elements(H).toSeq() proc getSample*[T, H]( self: DataSampler[T, H], diff --git a/codex/slots/sampler/utils.nim b/codex/slots/sampler/utils.nim index fa68e408..998f2cdc 100644 --- a/codex/slots/sampler/utils.nim +++ b/codex/slots/sampler/utils.nim @@ -7,23 +7,13 @@ ## This file may not be copied, modified, or distributed except according to ## those terms. -import std/sugar import std/bitops -import std/sequtils import pkg/questionable/results -import pkg/poseidon2 -import pkg/poseidon2/io - import pkg/constantine/math/arithmetic -import pkg/constantine/math/io/io_fields - import ../../merkletree -func toInputData*[H](data: seq[byte]): seq[byte] = - return toSeq(data.elements(H)).mapIt( @(it.toBytes) ).concat - func extractLowBits*[n: static int](elm: BigInt[n], k: int): uint64 = doAssert( k > 0 and k <= 64 ) var r = 0'u64 @@ -39,6 +29,7 @@ func extractLowBits(fld: Poseidon2Hash, k: int): uint64 = return extractLowBits(elm, k); func floorLog2*(x : int) : int = + doAssert ( x > 0 ) var k = -1 var y = x while (y > 0): @@ -47,10 +38,8 @@ func floorLog2*(x : int) : int = return k func ceilingLog2*(x : int) : int = - if (x == 0): - return -1 - else: - return (floorLog2(x-1) + 1) + doAssert ( x > 0 ) + return (floorLog2(x - 1) + 1) func toBlkInSlot*(cell: Natural, numCells: Natural): Natural = let log2 = ceilingLog2(numCells) @@ -80,7 +69,7 @@ func cellIndices*( numCells: Natural, nSamples: Natural): seq[Natural] = var indices: seq[Natural] - while (indices.len < nSamples): - let idx = cellIndex(entropy, slotRoot, numCells, indices.len + 1) - indices.add(idx.Natural) + for i in 1..nSamples: + indices.add(cellIndex(entropy, slotRoot, numCells, i)) + indices diff --git a/codex/slots/types.nim b/codex/slots/types.nim index 04690adc..8703086e 100644 --- a/codex/slots/types.nim +++ b/codex/slots/types.nim @@ -9,7 +9,7 @@ type Sample*[H] = object - cellData*: seq[byte] + cellData*: seq[H] merklePaths*: seq[H] PublicInputs*[H] = object @@ -24,5 +24,5 @@ type slotRoot*: H nCellsPerSlot*: Natural nSlotsPerDataSet*: Natural - slotProof*: seq[H] - samples*: seq[Sample[H]] + slotProof*: seq[H] # inclusion proof that shows that the slot root (leaf) is part of the dataset (root) + samples*: seq[Sample[H]] # inclusion proofs which show that the selected cells (leafs) are part of the slot (roots) diff --git a/config.nims b/config.nims index 45830fad..b64aacbd 100644 --- a/config.nims +++ b/config.nims @@ -121,6 +121,9 @@ switch("define", "ctt_asm=false") # Allow the use of old-style case objects for nim config compatibility switch("define", "nimOldCaseObjects") +# Enable compat mode for Chronos V4 +switch("define", "chronosHandleException") + # begin Nimble config (version 1) when system.fileExists("nimble.paths"): include "nimble.paths" diff --git a/tests/circuits/fixtures/proof_main.r1cs b/tests/circuits/fixtures/proof_main.r1cs index a2b9c6b7..8b58ffa8 100644 Binary files a/tests/circuits/fixtures/proof_main.r1cs and b/tests/circuits/fixtures/proof_main.r1cs differ diff --git a/tests/circuits/fixtures/proof_main.wasm b/tests/circuits/fixtures/proof_main.wasm index 30fe6f0e..f908d4f7 100644 Binary files a/tests/circuits/fixtures/proof_main.wasm and b/tests/circuits/fixtures/proof_main.wasm differ diff --git a/tests/circuits/fixtures/proof_main.zkey b/tests/circuits/fixtures/proof_main.zkey index 999a12ab..e4b889e9 100644 Binary files a/tests/circuits/fixtures/proof_main.zkey and b/tests/circuits/fixtures/proof_main.zkey differ diff --git a/tests/codex/sales/testsales.nim b/tests/codex/sales/testsales.nim index 543a7133..0fdf3bf9 100644 --- a/tests/codex/sales/testsales.nim +++ b/tests/codex/sales/testsales.nim @@ -323,8 +323,7 @@ asyncchecksuite "Sales": slot: UInt256, onBatch: BatchProc): Future[?!void] {.async.} = let blk = bt.Block.new( @[1.byte] ).get - onBatch( blk.repeat(request.ask.slotSize.truncate(int)) ) - return success() + await onBatch( blk.repeat(request.ask.slotSize.truncate(int)) ) createAvailability() await market.requestStorage(request) @@ -337,8 +336,8 @@ asyncchecksuite "Sales": onBatch: BatchProc): Future[?!void] {.async.} = slotIndex = slot let blk = bt.Block.new( @[1.byte] ).get - onBatch(@[ blk ]) - return success() + await onBatch(@[ blk ]) + let sold = newFuture[void]() sales.onSale = proc(request: StorageRequest, slotIndex: UInt256) = sold.complete() diff --git a/tests/codex/sales/testslotqueue.nim b/tests/codex/sales/testslotqueue.nim index 71ac8597..193751c8 100644 --- a/tests/codex/sales/testslotqueue.nim +++ b/tests/codex/sales/testslotqueue.nim @@ -524,7 +524,7 @@ suite "Slot queue": request.ask, request.expiry, seen = true) - queue.push(item) + check queue.push(item).isOk check eventually queue.paused check onProcessSlotCalledWith.len == 0 @@ -534,7 +534,7 @@ suite "Slot queue": let request = StorageRequest.example var items = SlotQueueItem.init(request) - queue.push(items) + check queue.push(items).isOk # check all items processed check eventually queue.len == 0 @@ -546,7 +546,7 @@ suite "Slot queue": request.expiry, seen = true) check queue.paused - queue.push(item0) + check queue.push(item0).isOk check queue.paused test "paused queue waits for unpause before continuing processing": @@ -558,7 +558,7 @@ suite "Slot queue": seen = false) check queue.paused # push causes unpause - queue.push(item) + check queue.push(item).isOk # check all items processed check eventually onProcessSlotCalledWith == @[ (item.requestId, item.slotIndex), @@ -576,8 +576,8 @@ suite "Slot queue": request.ask, request.expiry, seen = true) - queue.push(item0) - queue.push(item1) + check queue.push(item0).isOk + check queue.push(item1).isOk check queue[0].seen check queue[1].seen diff --git a/tests/codex/slots/backends/helpers.nim b/tests/codex/slots/backends/helpers.nim index e81762cf..85e8ae65 100644 --- a/tests/codex/slots/backends/helpers.nim +++ b/tests/codex/slots/backends/helpers.nim @@ -17,21 +17,6 @@ import pkg/codex/utils/json export types -func fromCircomData*(_: type Poseidon2Hash, cellData: seq[byte]): seq[Poseidon2Hash] = - var - pos = 0 - cellElms: seq[Bn254Fr] - while pos < cellData.len: - var - step = 32 - offset = min(pos + step, cellData.len) - data = cellData[pos.. //_.log + .withLogTopics("node", "erasure", "marketplace", ) + .some, + + providers: + CodexConfigs.init(nodes=0) + # .debug() # uncomment to enable console log output + # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log + # .withLogTopics("node", "marketplace", "sales", "reservations", "node", "proving", "clock") + .some, + ): + let reward = 400.u256 + let duration = 10.periods + let collateral = 200.u256 + let expiry = 5.periods + let data = await RandomChunker.example(blocks=8) + let client = clients()[0] + let clientApi = client.client + + let cid = clientApi.upload(data).get + + var requestId = none RequestId + proc onStorageRequested(event: StorageRequested) {.raises:[].} = + requestId = event.requestId.some + + let subscription = await marketplace.subscribe(StorageRequested, onStorageRequested) + + # client requests storage but requires multiple slots to host the content + let id = await clientApi.requestStorage( + cid, + duration=duration, + reward=reward, + expiry=expiry, + collateral=collateral, + nodes=3, + tolerance=1 + ) + + check eventually(requestId.isSome, timeout=expiry.int * 1000) + + let request = await marketplace.getRequest(requestId.get) + let cidFromRequest = Cid.init(request.content.cid).get() + let downloaded = await clientApi.downloadBytes(cidFromRequest, local = true) + check downloaded.isOk + check downloaded.get.toHex == data.toHex + + await subscription.unsubscribe() diff --git a/tests/integration/testpurchasing.nim b/tests/integration/testpurchasing.nim index 8f5a5bef..bc87f51b 100644 --- a/tests/integration/testpurchasing.nim +++ b/tests/integration/testpurchasing.nim @@ -8,7 +8,8 @@ import ../examples twonodessuite "Purchasing", debug1 = false, debug2 = false: test "node handles storage request": - let cid = client1.upload("some file contents").get + let data = await RandomChunker.example(blocks=2) + let cid = client1.upload(data).get let id1 = client1.requestStorage(cid, duration=100.u256, reward=2.u256, proofProbability=3.u256, expiry=10, collateral=200.u256).get let id2 = client1.requestStorage(cid, duration=400.u256, reward=5.u256, proofProbability=6.u256, expiry=10, collateral=201.u256).get check id1 != id2 @@ -26,7 +27,7 @@ twonodessuite "Purchasing", debug1 = false, debug2 = false: proofProbability=3.u256, expiry=30, collateral=200.u256, - nodes=2, + nodes=3, tolerance=1).get let request = client1.getPurchase(id).get.request.get @@ -35,7 +36,7 @@ twonodessuite "Purchasing", debug1 = false, debug2 = false: check request.ask.proofProbability == 3.u256 check request.expiry == 30 check request.ask.collateral == 200.u256 - check request.ask.slots == 2'u64 + check request.ask.slots == 3'u64 check request.ask.maxSlotLoss == 1'u64 # TODO: We currently do not support encoding single chunks @@ -52,7 +53,8 @@ twonodessuite "Purchasing", debug1 = false, debug2 = false: # check request.ask.maxSlotLoss == 1'u64 test "node remembers purchase status after restart": - let cid = client1.upload("some file contents").get + let data = await RandomChunker.example(blocks=2) + let cid = client1.upload(data).get let id = client1.requestStorage(cid, duration=100.u256, reward=2.u256, @@ -71,25 +73,12 @@ twonodessuite "Purchasing", debug1 = false, debug2 = false: check request.ask.proofProbability == 3.u256 check request.expiry == 30 check request.ask.collateral == 200.u256 - check request.ask.slots == 1'u64 + check request.ask.slots == 2'u64 check request.ask.maxSlotLoss == 0'u64 - test "request storage fails if nodes and tolerance aren't correct": - let cid = client1.upload("some file contents").get - let responseBefore = client1.requestStorageRaw(cid, - duration=100.u256, - reward=2.u256, - proofProbability=3.u256, - expiry=30, - collateral=200.u256, - nodes=1, - tolerance=1) - - check responseBefore.status == "400 Bad Request" - check responseBefore.body == "Tolerance cannot be greater or equal than nodes (nodes - tolerance)" - test "node requires expiry and its value to be in future": - let cid = client1.upload("some file contents").get + let data = await RandomChunker.example(blocks=2) + let cid = client1.upload(data).get let responseMissing = client1.requestStorageRaw(cid, duration=1.u256, reward=2.u256, proofProbability=3.u256, collateral=200.u256) check responseMissing.status == "400 Bad Request" diff --git a/tests/integration/testrestapi.nim b/tests/integration/testrestapi.nim index 8c2c20e4..1834dcf2 100644 --- a/tests/integration/testrestapi.nim +++ b/tests/integration/testrestapi.nim @@ -1,7 +1,9 @@ +import std/httpclient import std/sequtils from pkg/libp2p import `==` import pkg/codex/units import ./twonodes +import ../examples twonodessuite "REST API", debug1 = false, debug2 = false: @@ -36,3 +38,93 @@ twonodessuite "REST API", debug1 = false, debug2 = false: check: [cid1, cid2].allIt(it in list.content.mapIt(it.cid)) + + test "request storage fails for datasets that are too small": + let cid = client1.upload("some file contents").get + let response = client1.requestStorageRaw(cid, duration=10.u256, reward=2.u256, proofProbability=3.u256, nodes=2, collateral=200.u256, expiry=9) + + check: + response.status == "400 Bad Request" + response.body == "Dataset too small for erasure parameters, need at least " & $(2*DefaultBlockSize.int) & " bytes" + + test "request storage succeeds for sufficiently sized datasets": + let data = await RandomChunker.example(blocks=2) + let cid = client1.upload(data).get + let response = client1.requestStorageRaw(cid, duration=10.u256, reward=2.u256, proofProbability=3.u256, collateral=200.u256, expiry=9) + + check: + response.status == "200 OK" + + test "request storage fails if nodes and tolerance aren't correct": + let data = await RandomChunker.example(blocks=2) + let cid = client1.upload(data).get + let duration = 100.u256 + let reward = 2.u256 + let proofProbability = 3.u256 + let expiry = 30.uint + let collateral = 200.u256 + let ecParams = @[(1, 0), (1, 1), (2, 1), (3, 2), (3, 3)] + + for ecParam in ecParams: + let (nodes, tolerance) = ecParam + + var responseBefore = client1.requestStorageRaw(cid, + duration, + reward, + proofProbability, + collateral, + expiry, + nodes.uint, + tolerance.uint) + + check responseBefore.status == "400 Bad Request" + check responseBefore.body == "Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`" + + test "request storage fails if tolerance > nodes (underflow protection)": + let data = await RandomChunker.example(blocks=2) + let cid = client1.upload(data).get + let duration = 100.u256 + let reward = 2.u256 + let proofProbability = 3.u256 + let expiry = 30.uint + let collateral = 200.u256 + let ecParams = @[(0, 1), (1, 2), (2, 3)] + + for ecParam in ecParams: + let (nodes, tolerance) = ecParam + + var responseBefore = client1.requestStorageRaw(cid, + duration, + reward, + proofProbability, + collateral, + expiry, + nodes.uint, + tolerance.uint) + + check responseBefore.status == "400 Bad Request" + check responseBefore.body == "Invalid parameters: `tolerance` cannot be greater than `nodes`" + + test "request storage succeeds if nodes and tolerance within range": + let data = await RandomChunker.example(blocks=2) + let cid = client1.upload(data).get + let duration = 100.u256 + let reward = 2.u256 + let proofProbability = 3.u256 + let expiry = 30.uint + let collateral = 200.u256 + let ecParams = @[(2, 0), (3, 1), (5, 2)] + + for ecParam in ecParams: + let (nodes, tolerance) = ecParam + + var responseBefore = client1.requestStorageRaw(cid, + duration, + reward, + proofProbability, + collateral, + expiry, + nodes.uint, + tolerance.uint) + + check responseBefore.status == "200 OK" diff --git a/tests/testIntegration.nim b/tests/testIntegration.nim index de854ecb..b1f81ef4 100644 --- a/tests/testIntegration.nim +++ b/tests/testIntegration.nim @@ -6,5 +6,6 @@ import ./integration/testpurchasing import ./integration/testblockexpiration import ./integration/testmarketplace import ./integration/testproofs +import ./integration/testecbug {.warning[UnusedImport]:off.} diff --git a/vendor/codex-contracts-eth b/vendor/codex-contracts-eth index 57e8cd50..7ad26688 160000 --- a/vendor/codex-contracts-eth +++ b/vendor/codex-contracts-eth @@ -1 +1 @@ -Subproject commit 57e8cd5013325f05e16833a5320b575d32a403f3 +Subproject commit 7ad26688a3b75b914d626e2623174a36f4425f51 diff --git a/vendor/nim-chronos b/vendor/nim-chronos index 0277b65b..035ae11b 160000 --- a/vendor/nim-chronos +++ b/vendor/nim-chronos @@ -1 +1 @@ -Subproject commit 0277b65be2c7a365ac13df002fba6e172be55537 +Subproject commit 035ae11ba92369e7722e649db597e79134fd06b9 diff --git a/vendor/nim-codex-dht b/vendor/nim-codex-dht index a7f14bc9..63822e83 160000 --- a/vendor/nim-codex-dht +++ b/vendor/nim-codex-dht @@ -1 +1 @@ -Subproject commit a7f14bc9b783f1b9e2d02cc85a338b1411058095 +Subproject commit 63822e83561ea1c6396d0f3eca583b038f5d44c6 diff --git a/vendor/nim-http-utils b/vendor/nim-http-utils index 3b491a40..be57dbc9 160000 --- a/vendor/nim-http-utils +++ b/vendor/nim-http-utils @@ -1 +1 @@ -Subproject commit 3b491a40c60aad9e8d3407443f46f62511e63b18 +Subproject commit be57dbc902d36f37540897e98c69aa80f868cb45 diff --git a/vendor/nim-leopard b/vendor/nim-leopard index 1a6f2ab7..895ff24c 160000 --- a/vendor/nim-leopard +++ b/vendor/nim-leopard @@ -1 +1 @@ -Subproject commit 1a6f2ab7252426a6ac01482a68b75d0c3b134cf0 +Subproject commit 895ff24ca6615d577acfb11811cdd5465f596c97 diff --git a/vendor/nimbus-build-system b/vendor/nimbus-build-system index fe9bc3f3..b2e1fb02 160000 --- a/vendor/nimbus-build-system +++ b/vendor/nimbus-build-system @@ -1 +1 @@ -Subproject commit fe9bc3f3759ae1add6bf8c899db2e75327f03782 +Subproject commit b2e1fb022f1ee800b439648953e92cc993c1264c