mirror of
https://github.com/status-im/nim-dagger.git
synced 2025-02-22 11:28:22 +00:00
Merge branch 'master' into testapi-delete
This commit is contained in:
commit
cf9805e855
2
.github/workflows/nim-matrix.yml
vendored
2
.github/workflows/nim-matrix.yml
vendored
@ -6,7 +6,7 @@ on:
|
|||||||
|
|
||||||
env:
|
env:
|
||||||
cache_nonce: 0 # Allows for easily busting actions/cache caches
|
cache_nonce: 0 # Allows for easily busting actions/cache caches
|
||||||
nim_version: pinned, v1.6.16, v1.6.18
|
nim_version: pinned
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
matrix:
|
matrix:
|
||||||
|
85
.github/workflows/release.yml
vendored
85
.github/workflows/release.yml
vendored
@ -8,10 +8,12 @@ on:
|
|||||||
|
|
||||||
env:
|
env:
|
||||||
cache_nonce: 0 # Allows for easily busting actions/cache caches
|
cache_nonce: 0 # Allows for easily busting actions/cache caches
|
||||||
nim_version: v1.6.14
|
nim_version: pinned
|
||||||
rust_version: 1.78.0
|
rust_version: 1.78.0
|
||||||
binary_base: codex
|
binary_base: codex
|
||||||
upload_to_codex: false
|
build_dir: build
|
||||||
|
nim_flags: '-d:verify_circuit=true'
|
||||||
|
windows_libs: 'libstdc++-6.dll libgomp-1.dll libgcc_s_seh-1.dll libwinpthread-1.dll'
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
# Matrix
|
# Matrix
|
||||||
@ -74,13 +76,21 @@ jobs:
|
|||||||
|
|
||||||
- name: Release - Build
|
- name: Release - Build
|
||||||
run: |
|
run: |
|
||||||
make NIMFLAGS="--out:${{ env.binary }}"
|
make NIMFLAGS="--out:${{ env.build_dir }}/${{ env.binary }} ${{ env.nim_flags }}"
|
||||||
|
|
||||||
- name: Release - Upload binaries
|
- name: Release - Libraries
|
||||||
|
run: |
|
||||||
|
if [[ "${{ matrix.os }}" == "windows" ]]; then
|
||||||
|
for lib in ${{ env.windows_libs }}; do
|
||||||
|
cp -v "${MINGW_PREFIX}/bin/${lib}" "${{ env.build_dir }}"
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Release - Upload build artifacts
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: release-${{ env.binary }}
|
name: release-${{ env.binary }}
|
||||||
path: ${{ env.binary }}
|
path: ${{ env.build_dir }}/
|
||||||
retention-days: 1
|
retention-days: 1
|
||||||
|
|
||||||
# Release
|
# Release
|
||||||
@ -99,46 +109,49 @@ jobs:
|
|||||||
- name: Release - Compress and checksum
|
- name: Release - Compress and checksum
|
||||||
run: |
|
run: |
|
||||||
cd /tmp/release
|
cd /tmp/release
|
||||||
prepare() {
|
checksum() {
|
||||||
# Checksum
|
|
||||||
arc="${1}"
|
arc="${1}"
|
||||||
sha256sum "${arc}" >"${arc}.sha256"
|
sha256sum "${arc}" >"${arc}.sha256"
|
||||||
|
|
||||||
# Upload to Codex
|
|
||||||
if [[ "${{ env.upload_to_codex }}" == "true" ]]; then
|
|
||||||
codex_endpoints="${{ secrets.CODEX_ENDPOINTS }}"
|
|
||||||
codex_username="${{ secrets.CODEX_USERNAME }}"
|
|
||||||
codex_password="${{ secrets.CODEX_PASSWORD }}"
|
|
||||||
|
|
||||||
for endpoint in ${codex_endpoints}; do
|
|
||||||
echo "::add-mask::${endpoint}"
|
|
||||||
cid=$(curl -X POST \
|
|
||||||
"${endpoint}/api/codex/v1/data" \
|
|
||||||
-u "${codex_username}":"${codex_password}" \
|
|
||||||
-H "content-type: application/octet-stream" \
|
|
||||||
-T "${arc}")
|
|
||||||
|
|
||||||
echo "${cid}" >"${arc}.cid"
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# Compress and prepare
|
# Compress and prepare
|
||||||
for file in *; do
|
for file in *; do
|
||||||
if [[ "${file}" == *".exe"* ]]; then
|
# Exclude libraries
|
||||||
arc="${file%.*}.zip"
|
if [[ "${file}" != *".dll"* ]]; then
|
||||||
zip "${arc}" "${file}"
|
if [[ "${file}" == *".exe"* ]]; then
|
||||||
rm -f "${file}"
|
|
||||||
prepare "${arc}"
|
# Windows - binary only
|
||||||
else
|
arc="${file%.*}.zip"
|
||||||
arc="${file}.tar.gz"
|
zip "${arc}" "${file}"
|
||||||
tar cfz "${arc}" "${file}"
|
checksum "${arc}"
|
||||||
rm -f "${file}"
|
|
||||||
prepare "${arc}"
|
# Windows - binary and libs
|
||||||
|
arc="${file%.*}-libs.zip"
|
||||||
|
zip "${arc}" "${file}" ${{ env.windows_libs }}
|
||||||
|
rm -f "${file}" ${{ env.windows_libs }}
|
||||||
|
checksum "${arc}"
|
||||||
|
else
|
||||||
|
|
||||||
|
# Linux/macOS
|
||||||
|
arc="${file}.tar.gz"
|
||||||
|
chmod 755 "${file}"
|
||||||
|
tar cfz "${arc}" "${file}"
|
||||||
|
rm -f "${file}"
|
||||||
|
checksum "${arc}"
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
|
- name: Release - Upload compressed artifacts and checksums
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: archives-and-checksums
|
||||||
|
path: /tmp/release/
|
||||||
|
retention-days: 1
|
||||||
|
|
||||||
- name: Release
|
- name: Release
|
||||||
uses: softprops/action-gh-release@v2
|
uses: softprops/action-gh-release@v2
|
||||||
|
if: startsWith(github.ref, 'refs/tags/')
|
||||||
with:
|
with:
|
||||||
files: |
|
files: |
|
||||||
/tmp/release/*
|
/tmp/release/*
|
||||||
|
2
Makefile
2
Makefile
@ -15,7 +15,7 @@
|
|||||||
#
|
#
|
||||||
# If NIM_COMMIT is set to "nimbusbuild", this will use the
|
# If NIM_COMMIT is set to "nimbusbuild", this will use the
|
||||||
# version pinned by nimbus-build-system.
|
# version pinned by nimbus-build-system.
|
||||||
PINNED_NIM_VERSION := v1.6.14
|
PINNED_NIM_VERSION := 38640664088251bbc88917b4bacfd86ec53014b8 # 1.6.21
|
||||||
|
|
||||||
ifeq ($(NIM_COMMIT),)
|
ifeq ($(NIM_COMMIT),)
|
||||||
NIM_COMMIT := $(PINNED_NIM_VERSION)
|
NIM_COMMIT := $(PINNED_NIM_VERSION)
|
||||||
|
@ -105,8 +105,7 @@ proc advertiseQueueLoop(b: DiscoveryEngine) {.async.} =
|
|||||||
trace "Begin iterating blocks..."
|
trace "Begin iterating blocks..."
|
||||||
for c in cids:
|
for c in cids:
|
||||||
if cid =? await c:
|
if cid =? await c:
|
||||||
b.advertiseBlock(cid)
|
await b.advertiseBlock(cid)
|
||||||
await sleepAsync(100.millis)
|
|
||||||
trace "Iterating blocks finished."
|
trace "Iterating blocks finished."
|
||||||
|
|
||||||
await sleepAsync(b.advertiseLoopSleep)
|
await sleepAsync(b.advertiseLoopSleep)
|
||||||
|
@ -31,7 +31,7 @@ import ./codextypes
|
|||||||
export errors, logutils, units, codextypes
|
export errors, logutils, units, codextypes
|
||||||
|
|
||||||
type
|
type
|
||||||
Block* = object of RootObj
|
Block* = ref object of RootObj
|
||||||
cid*: Cid
|
cid*: Cid
|
||||||
data*: seq[byte]
|
data*: seq[byte]
|
||||||
|
|
||||||
|
@ -114,7 +114,14 @@ proc proxySpawnEncodeTask(
|
|||||||
args: EncodeTaskArgs,
|
args: EncodeTaskArgs,
|
||||||
data: ref seq[seq[byte]]
|
data: ref seq[seq[byte]]
|
||||||
): Flowvar[EncodeTaskResult] =
|
): Flowvar[EncodeTaskResult] =
|
||||||
tp.spawn encodeTask(args, data[])
|
# FIXME Uncomment the code below after addressing an issue:
|
||||||
|
# https://github.com/codex-storage/nim-codex/issues/854
|
||||||
|
|
||||||
|
# tp.spawn encodeTask(args, data[])
|
||||||
|
|
||||||
|
let fv = EncodeTaskResult.newFlowVar
|
||||||
|
fv.readyWith(encodeTask(args, data[]))
|
||||||
|
return fv
|
||||||
|
|
||||||
proc proxySpawnDecodeTask(
|
proc proxySpawnDecodeTask(
|
||||||
tp: Taskpool,
|
tp: Taskpool,
|
||||||
@ -122,7 +129,14 @@ proc proxySpawnDecodeTask(
|
|||||||
data: ref seq[seq[byte]],
|
data: ref seq[seq[byte]],
|
||||||
parity: ref seq[seq[byte]]
|
parity: ref seq[seq[byte]]
|
||||||
): Flowvar[DecodeTaskResult] =
|
): Flowvar[DecodeTaskResult] =
|
||||||
tp.spawn decodeTask(args, data[], parity[])
|
# FIXME Uncomment the code below after addressing an issue:
|
||||||
|
# https://github.com/codex-storage/nim-codex/issues/854
|
||||||
|
|
||||||
|
# tp.spawn decodeTask(args, data[], parity[])
|
||||||
|
|
||||||
|
let fv = DecodeTaskResult.newFlowVar
|
||||||
|
fv.readyWith(decodeTask(args, data[], parity[]))
|
||||||
|
return fv
|
||||||
|
|
||||||
proc awaitResult[T](signal: ThreadSignalPtr, handle: Flowvar[T]): Future[?!T] {.async.} =
|
proc awaitResult[T](signal: ThreadSignalPtr, handle: Flowvar[T]): Future[?!T] {.async.} =
|
||||||
await wait(signal)
|
await wait(signal)
|
||||||
|
@ -27,6 +27,7 @@ import ../blocktype as bt
|
|||||||
import ../utils
|
import ../utils
|
||||||
import ../utils/asynciter
|
import ../utils/asynciter
|
||||||
import ../indexingstrategy
|
import ../indexingstrategy
|
||||||
|
import ../errors
|
||||||
|
|
||||||
import pkg/stew/byteutils
|
import pkg/stew/byteutils
|
||||||
|
|
||||||
@ -82,6 +83,13 @@ type
|
|||||||
blocksCount: Natural
|
blocksCount: Natural
|
||||||
strategy: StrategyType
|
strategy: StrategyType
|
||||||
|
|
||||||
|
ErasureError* = object of CodexError
|
||||||
|
InsufficientBlocksError* = object of ErasureError
|
||||||
|
# Minimum size, in bytes, that the dataset must have had
|
||||||
|
# for the encoding request to have succeeded with the parameters
|
||||||
|
# provided.
|
||||||
|
minSize*: NBytes
|
||||||
|
|
||||||
func indexToPos(steps, idx, step: int): int {.inline.} =
|
func indexToPos(steps, idx, step: int): int {.inline.} =
|
||||||
## Convert an index to a position in the encoded
|
## Convert an index to a position in the encoded
|
||||||
## dataset
|
## dataset
|
||||||
@ -236,11 +244,13 @@ proc init*(
|
|||||||
ecK: Natural, ecM: Natural,
|
ecK: Natural, ecM: Natural,
|
||||||
strategy: StrategyType): ?!EncodingParams =
|
strategy: StrategyType): ?!EncodingParams =
|
||||||
if ecK > manifest.blocksCount:
|
if ecK > manifest.blocksCount:
|
||||||
return failure(
|
let exc = (ref InsufficientBlocksError)(
|
||||||
"Unable to encode manifest, not enough blocks, ecK = " &
|
msg: "Unable to encode manifest, not enough blocks, ecK = " &
|
||||||
$ecK &
|
$ecK &
|
||||||
", blocksCount = " &
|
", blocksCount = " &
|
||||||
$manifest.blocksCount)
|
$manifest.blocksCount,
|
||||||
|
minSize: ecK.NBytes * manifest.blockSize)
|
||||||
|
return failure(exc)
|
||||||
|
|
||||||
let
|
let
|
||||||
rounded = roundUp(manifest.blocksCount, ecK)
|
rounded = roundUp(manifest.blocksCount, ecK)
|
||||||
|
@ -29,7 +29,7 @@ import ../logutils
|
|||||||
# TODO: Manifest should be reworked to more concrete types,
|
# TODO: Manifest should be reworked to more concrete types,
|
||||||
# perhaps using inheritance
|
# perhaps using inheritance
|
||||||
type
|
type
|
||||||
Manifest* = object of RootObj
|
Manifest* = ref object of RootObj
|
||||||
treeCid {.serialize.}: Cid # Root of the merkle tree
|
treeCid {.serialize.}: Cid # Root of the merkle tree
|
||||||
datasetSize {.serialize.}: NBytes # Total size of all blocks
|
datasetSize {.serialize.}: NBytes # Total size of all blocks
|
||||||
blockSize {.serialize.}: NBytes # Size of each contained block (might not be needed if blocks are len-prefixed)
|
blockSize {.serialize.}: NBytes # Size of each contained block (might not be needed if blocks are len-prefixed)
|
||||||
@ -322,7 +322,7 @@ func new*(
|
|||||||
protected: true,
|
protected: true,
|
||||||
ecK: manifest.ecK,
|
ecK: manifest.ecK,
|
||||||
ecM: manifest.ecM,
|
ecM: manifest.ecM,
|
||||||
originalTreeCid: manifest.treeCid,
|
originalTreeCid: manifest.originalTreeCid,
|
||||||
originalDatasetSize: manifest.originalDatasetSize,
|
originalDatasetSize: manifest.originalDatasetSize,
|
||||||
protectedStrategy: manifest.protectedStrategy,
|
protectedStrategy: manifest.protectedStrategy,
|
||||||
verifiable: true,
|
verifiable: true,
|
||||||
|
@ -240,14 +240,14 @@ proc streamEntireDataset(
|
|||||||
self: CodexNodeRef,
|
self: CodexNodeRef,
|
||||||
manifest: Manifest,
|
manifest: Manifest,
|
||||||
manifestCid: Cid,
|
manifestCid: Cid,
|
||||||
): ?!LPStream =
|
): Future[?!LPStream] {.async.} =
|
||||||
## Streams the contents of the entire dataset described by the manifest.
|
## Streams the contents of the entire dataset described by the manifest.
|
||||||
##
|
##
|
||||||
trace "Retrieving blocks from manifest", manifestCid
|
trace "Retrieving blocks from manifest", manifestCid
|
||||||
|
|
||||||
if manifest.protected:
|
if manifest.protected:
|
||||||
# Retrieve, decode and save to the local store all EС groups
|
# Retrieve, decode and save to the local store all EС groups
|
||||||
proc erasureJob(): Future[void] {.async.} =
|
proc erasureJob(): Future[?!void] {.async.} =
|
||||||
try:
|
try:
|
||||||
# Spawn an erasure decoding job
|
# Spawn an erasure decoding job
|
||||||
let
|
let
|
||||||
@ -257,11 +257,20 @@ proc streamEntireDataset(
|
|||||||
leoDecoderProvider,
|
leoDecoderProvider,
|
||||||
self.taskpool)
|
self.taskpool)
|
||||||
without _ =? (await erasure.decode(manifest)), error:
|
without _ =? (await erasure.decode(manifest)), error:
|
||||||
trace "Unable to erasure decode manifest", manifestCid, exc = error.msg
|
error "Unable to erasure decode manifest", manifestCid, exc = error.msg
|
||||||
except CatchableError as exc:
|
return failure(error)
|
||||||
trace "Exception decoding manifest", manifestCid, exc = exc.msg
|
|
||||||
|
|
||||||
asyncSpawn erasureJob()
|
return success()
|
||||||
|
# --------------------------------------------------------------------------
|
||||||
|
# FIXME this is a HACK so that the node does not crash during the workshop.
|
||||||
|
# We should NOT catch Defect.
|
||||||
|
except Exception as exc:
|
||||||
|
trace "Exception decoding manifest", manifestCid, exc = exc.msg
|
||||||
|
return failure(exc.msg)
|
||||||
|
# --------------------------------------------------------------------------
|
||||||
|
|
||||||
|
if err =? (await erasureJob()).errorOption:
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
# Retrieve all blocks of the dataset sequentially from the local store or network
|
# Retrieve all blocks of the dataset sequentially from the local store or network
|
||||||
trace "Creating store stream for manifest", manifestCid
|
trace "Creating store stream for manifest", manifestCid
|
||||||
@ -283,7 +292,7 @@ proc retrieve*(
|
|||||||
|
|
||||||
return await self.streamSingleBlock(cid)
|
return await self.streamSingleBlock(cid)
|
||||||
|
|
||||||
self.streamEntireDataset(manifest, cid)
|
await self.streamEntireDataset(manifest, cid)
|
||||||
|
|
||||||
proc store*(
|
proc store*(
|
||||||
self: CodexNodeRef,
|
self: CodexNodeRef,
|
||||||
@ -414,6 +423,15 @@ proc setupRequest(
|
|||||||
trace "Unable to fetch manifest for cid"
|
trace "Unable to fetch manifest for cid"
|
||||||
return failure error
|
return failure error
|
||||||
|
|
||||||
|
# ----------------------------------------------------------------------------
|
||||||
|
# FIXME this is a BAND-AID to address
|
||||||
|
# https://github.com/codex-storage/nim-codex/issues/852 temporarily for the
|
||||||
|
# workshop. Remove this once we get that fixed.
|
||||||
|
if manifest.blocksCount.uint == ecK:
|
||||||
|
return failure("Cannot setup slots for a dataset with ecK == numBlocks. Please use a larger file or a different combination of `nodes` and `tolerance`.")
|
||||||
|
# ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
# Erasure code the dataset according to provided parameters
|
# Erasure code the dataset according to provided parameters
|
||||||
let
|
let
|
||||||
erasure = Erasure.new(
|
erasure = Erasure.new(
|
||||||
|
@ -32,6 +32,7 @@ import ../node
|
|||||||
import ../blocktype
|
import ../blocktype
|
||||||
import ../conf
|
import ../conf
|
||||||
import ../contracts
|
import ../contracts
|
||||||
|
import ../erasure/erasure
|
||||||
import ../manifest
|
import ../manifest
|
||||||
import ../streams/asyncstreamwrapper
|
import ../streams/asyncstreamwrapper
|
||||||
import ../stores
|
import ../stores
|
||||||
@ -432,8 +433,16 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
|
|||||||
let nodes = params.nodes |? 1
|
let nodes = params.nodes |? 1
|
||||||
let tolerance = params.tolerance |? 0
|
let tolerance = params.tolerance |? 0
|
||||||
|
|
||||||
if (nodes - tolerance) < 1:
|
# prevent underflow
|
||||||
return RestApiResponse.error(Http400, "Tolerance cannot be greater or equal than nodes (nodes - tolerance)")
|
if tolerance > nodes:
|
||||||
|
return RestApiResponse.error(Http400, "Invalid parameters: `tolerance` cannot be greater than `nodes`")
|
||||||
|
|
||||||
|
let ecK = nodes - tolerance
|
||||||
|
let ecM = tolerance # for readability
|
||||||
|
|
||||||
|
# ensure leopard constrainst of 1 < K ≥ M
|
||||||
|
if ecK <= 1 or ecK < ecM:
|
||||||
|
return RestApiResponse.error(Http400, "Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`")
|
||||||
|
|
||||||
without expiry =? params.expiry:
|
without expiry =? params.expiry:
|
||||||
return RestApiResponse.error(Http400, "Expiry required")
|
return RestApiResponse.error(Http400, "Expiry required")
|
||||||
@ -451,6 +460,11 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
|
|||||||
params.collateral,
|
params.collateral,
|
||||||
expiry), error:
|
expiry), error:
|
||||||
|
|
||||||
|
if error of InsufficientBlocksError:
|
||||||
|
return RestApiResponse.error(Http400,
|
||||||
|
"Dataset too small for erasure parameters, need at least " &
|
||||||
|
$(ref InsufficientBlocksError)(error).minSize.int & " bytes")
|
||||||
|
|
||||||
return RestApiResponse.error(Http500, error.msg)
|
return RestApiResponse.error(Http500, error.msg)
|
||||||
|
|
||||||
return RestApiResponse.response(purchaseId.toHex)
|
return RestApiResponse.response(purchaseId.toHex)
|
||||||
|
@ -9,17 +9,14 @@
|
|||||||
|
|
||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import std/sequtils
|
import std/sugar
|
||||||
|
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/questionable/results
|
import pkg/questionable/results
|
||||||
import pkg/circomcompat
|
import pkg/circomcompat
|
||||||
import pkg/poseidon2/io
|
|
||||||
|
|
||||||
import ../../types
|
import ../../types
|
||||||
import ../../../stores
|
import ../../../stores
|
||||||
import ../../../merkletree
|
|
||||||
import ../../../codextypes
|
|
||||||
import ../../../contracts
|
import ../../../contracts
|
||||||
|
|
||||||
import ./converters
|
import ./converters
|
||||||
@ -39,6 +36,41 @@ type
|
|||||||
backendCfg : ptr CircomBn254Cfg
|
backendCfg : ptr CircomBn254Cfg
|
||||||
vkp* : ptr CircomKey
|
vkp* : ptr CircomKey
|
||||||
|
|
||||||
|
NormalizedProofInputs*[H] {.borrow: `.`.} = distinct ProofInputs[H]
|
||||||
|
|
||||||
|
func normalizeInput*[H](self: CircomCompat, input: ProofInputs[H]):
|
||||||
|
NormalizedProofInputs[H] =
|
||||||
|
## Parameters in CIRCOM circuits are statically sized and must be properly
|
||||||
|
## padded before they can be passed onto the circuit. This function takes
|
||||||
|
## variable length parameters and performs that padding.
|
||||||
|
##
|
||||||
|
## The output from this function can be JSON-serialized and used as direct
|
||||||
|
## inputs to the CIRCOM circuit for testing and debugging when one wishes
|
||||||
|
## to bypass the Rust FFI.
|
||||||
|
|
||||||
|
let normSamples = collect:
|
||||||
|
for sample in input.samples:
|
||||||
|
var merklePaths = sample.merklePaths
|
||||||
|
merklePaths.setLen(self.slotDepth)
|
||||||
|
Sample[H](
|
||||||
|
cellData: sample.cellData,
|
||||||
|
merklePaths: merklePaths
|
||||||
|
)
|
||||||
|
|
||||||
|
var normSlotProof = input.slotProof
|
||||||
|
normSlotProof.setLen(self.datasetDepth)
|
||||||
|
|
||||||
|
NormalizedProofInputs[H] ProofInputs[H](
|
||||||
|
entropy: input.entropy,
|
||||||
|
datasetRoot: input.datasetRoot,
|
||||||
|
slotIndex: input.slotIndex,
|
||||||
|
slotRoot: input.slotRoot,
|
||||||
|
nCellsPerSlot: input.nCellsPerSlot,
|
||||||
|
nSlotsPerDataSet: input.nSlotsPerDataSet,
|
||||||
|
slotProof: normSlotProof,
|
||||||
|
samples: normSamples
|
||||||
|
)
|
||||||
|
|
||||||
proc release*(self: CircomCompat) =
|
proc release*(self: CircomCompat) =
|
||||||
## Release the ctx
|
## Release the ctx
|
||||||
##
|
##
|
||||||
@ -49,27 +81,20 @@ proc release*(self: CircomCompat) =
|
|||||||
if not isNil(self.vkp):
|
if not isNil(self.vkp):
|
||||||
self.vkp.unsafeAddr.release_key()
|
self.vkp.unsafeAddr.release_key()
|
||||||
|
|
||||||
proc prove*[H](
|
proc prove[H](
|
||||||
self: CircomCompat,
|
self: CircomCompat,
|
||||||
input: ProofInputs[H]): ?!CircomProof =
|
input: NormalizedProofInputs[H]): ?!CircomProof =
|
||||||
## Encode buffers using a ctx
|
|
||||||
##
|
|
||||||
|
|
||||||
# NOTE: All inputs are statically sized per circuit
|
|
||||||
# and adjusted accordingly right before being passed
|
|
||||||
# to the circom ffi - `setLen` is used to adjust the
|
|
||||||
# sequence length to the correct size which also 0 pads
|
|
||||||
# to the correct length
|
|
||||||
doAssert input.samples.len == self.numSamples,
|
doAssert input.samples.len == self.numSamples,
|
||||||
"Number of samples does not match"
|
"Number of samples does not match"
|
||||||
|
|
||||||
doAssert input.slotProof.len <= self.datasetDepth,
|
doAssert input.slotProof.len <= self.datasetDepth,
|
||||||
"Number of slot proofs does not match"
|
"Slot proof is too deep - dataset has more slots than what we can handle?"
|
||||||
|
|
||||||
doAssert input.samples.allIt(
|
doAssert input.samples.allIt(
|
||||||
block:
|
block:
|
||||||
(it.merklePaths.len <= self.slotDepth + self.blkDepth and
|
(it.merklePaths.len <= self.slotDepth + self.blkDepth and
|
||||||
it.cellData.len <= self.cellElms * 32)), "Merkle paths length does not match"
|
it.cellData.len == self.cellElms)), "Merkle paths too deep or cells too big for circuit"
|
||||||
|
|
||||||
# TODO: All parameters should match circom's static parametter
|
# TODO: All parameters should match circom's static parametter
|
||||||
var
|
var
|
||||||
@ -116,8 +141,7 @@ proc prove*[H](
|
|||||||
var
|
var
|
||||||
slotProof = input.slotProof.mapIt( it.toBytes ).concat
|
slotProof = input.slotProof.mapIt( it.toBytes ).concat
|
||||||
|
|
||||||
slotProof.setLen(self.datasetDepth) # zero pad inputs to correct size
|
doAssert(slotProof.len == self.datasetDepth)
|
||||||
|
|
||||||
# arrays are always flattened
|
# arrays are always flattened
|
||||||
if ctx.pushInputU256Array(
|
if ctx.pushInputU256Array(
|
||||||
"slotProof".cstring,
|
"slotProof".cstring,
|
||||||
@ -128,16 +152,14 @@ proc prove*[H](
|
|||||||
for s in input.samples:
|
for s in input.samples:
|
||||||
var
|
var
|
||||||
merklePaths = s.merklePaths.mapIt( it.toBytes )
|
merklePaths = s.merklePaths.mapIt( it.toBytes )
|
||||||
data = s.cellData
|
data = s.cellData.mapIt( @(it.toBytes) ).concat
|
||||||
|
|
||||||
merklePaths.setLen(self.slotDepth) # zero pad inputs to correct size
|
|
||||||
if ctx.pushInputU256Array(
|
if ctx.pushInputU256Array(
|
||||||
"merklePaths".cstring,
|
"merklePaths".cstring,
|
||||||
merklePaths[0].addr,
|
merklePaths[0].addr,
|
||||||
uint (merklePaths[0].len * merklePaths.len)) != ERR_OK:
|
uint (merklePaths[0].len * merklePaths.len)) != ERR_OK:
|
||||||
return failure("Failed to push merkle paths")
|
return failure("Failed to push merkle paths")
|
||||||
|
|
||||||
data.setLen(self.cellElms * 32) # zero pad inputs to correct size
|
|
||||||
if ctx.pushInputU256Array(
|
if ctx.pushInputU256Array(
|
||||||
"cellData".cstring,
|
"cellData".cstring,
|
||||||
data[0].addr,
|
data[0].addr,
|
||||||
@ -162,6 +184,12 @@ proc prove*[H](
|
|||||||
|
|
||||||
success proof
|
success proof
|
||||||
|
|
||||||
|
proc prove*[H](
|
||||||
|
self: CircomCompat,
|
||||||
|
input: ProofInputs[H]): ?!CircomProof =
|
||||||
|
|
||||||
|
self.prove(self.normalizeInput(input))
|
||||||
|
|
||||||
proc verify*[H](
|
proc verify*[H](
|
||||||
self: CircomCompat,
|
self: CircomCompat,
|
||||||
proof: CircomProof,
|
proof: CircomProof,
|
||||||
|
@ -38,7 +38,7 @@ type
|
|||||||
func getCell*[T, H](
|
func getCell*[T, H](
|
||||||
self: DataSampler[T, H],
|
self: DataSampler[T, H],
|
||||||
blkBytes: seq[byte],
|
blkBytes: seq[byte],
|
||||||
blkCellIdx: Natural): seq[byte] =
|
blkCellIdx: Natural): seq[H] =
|
||||||
|
|
||||||
let
|
let
|
||||||
cellSize = self.builder.cellSize.uint64
|
cellSize = self.builder.cellSize.uint64
|
||||||
@ -47,7 +47,7 @@ func getCell*[T, H](
|
|||||||
|
|
||||||
doAssert (dataEnd - dataStart) == cellSize, "Invalid cell size"
|
doAssert (dataEnd - dataStart) == cellSize, "Invalid cell size"
|
||||||
|
|
||||||
toInputData[H](blkBytes[dataStart ..< dataEnd])
|
blkBytes[dataStart ..< dataEnd].elements(H).toSeq()
|
||||||
|
|
||||||
proc getSample*[T, H](
|
proc getSample*[T, H](
|
||||||
self: DataSampler[T, H],
|
self: DataSampler[T, H],
|
||||||
|
@ -7,23 +7,13 @@
|
|||||||
## This file may not be copied, modified, or distributed except according to
|
## This file may not be copied, modified, or distributed except according to
|
||||||
## those terms.
|
## those terms.
|
||||||
|
|
||||||
import std/sugar
|
|
||||||
import std/bitops
|
import std/bitops
|
||||||
import std/sequtils
|
|
||||||
|
|
||||||
import pkg/questionable/results
|
import pkg/questionable/results
|
||||||
import pkg/poseidon2
|
|
||||||
import pkg/poseidon2/io
|
|
||||||
|
|
||||||
import pkg/constantine/math/arithmetic
|
import pkg/constantine/math/arithmetic
|
||||||
|
|
||||||
import pkg/constantine/math/io/io_fields
|
|
||||||
|
|
||||||
import ../../merkletree
|
import ../../merkletree
|
||||||
|
|
||||||
func toInputData*[H](data: seq[byte]): seq[byte] =
|
|
||||||
return toSeq(data.elements(H)).mapIt( @(it.toBytes) ).concat
|
|
||||||
|
|
||||||
func extractLowBits*[n: static int](elm: BigInt[n], k: int): uint64 =
|
func extractLowBits*[n: static int](elm: BigInt[n], k: int): uint64 =
|
||||||
doAssert( k > 0 and k <= 64 )
|
doAssert( k > 0 and k <= 64 )
|
||||||
var r = 0'u64
|
var r = 0'u64
|
||||||
@ -39,6 +29,7 @@ func extractLowBits(fld: Poseidon2Hash, k: int): uint64 =
|
|||||||
return extractLowBits(elm, k);
|
return extractLowBits(elm, k);
|
||||||
|
|
||||||
func floorLog2*(x : int) : int =
|
func floorLog2*(x : int) : int =
|
||||||
|
doAssert ( x > 0 )
|
||||||
var k = -1
|
var k = -1
|
||||||
var y = x
|
var y = x
|
||||||
while (y > 0):
|
while (y > 0):
|
||||||
@ -47,10 +38,8 @@ func floorLog2*(x : int) : int =
|
|||||||
return k
|
return k
|
||||||
|
|
||||||
func ceilingLog2*(x : int) : int =
|
func ceilingLog2*(x : int) : int =
|
||||||
if (x == 0):
|
doAssert ( x > 0 )
|
||||||
return -1
|
return (floorLog2(x - 1) + 1)
|
||||||
else:
|
|
||||||
return (floorLog2(x-1) + 1)
|
|
||||||
|
|
||||||
func toBlkInSlot*(cell: Natural, numCells: Natural): Natural =
|
func toBlkInSlot*(cell: Natural, numCells: Natural): Natural =
|
||||||
let log2 = ceilingLog2(numCells)
|
let log2 = ceilingLog2(numCells)
|
||||||
@ -80,7 +69,7 @@ func cellIndices*(
|
|||||||
numCells: Natural, nSamples: Natural): seq[Natural] =
|
numCells: Natural, nSamples: Natural): seq[Natural] =
|
||||||
|
|
||||||
var indices: seq[Natural]
|
var indices: seq[Natural]
|
||||||
while (indices.len < nSamples):
|
for i in 1..nSamples:
|
||||||
let idx = cellIndex(entropy, slotRoot, numCells, indices.len + 1)
|
indices.add(cellIndex(entropy, slotRoot, numCells, i))
|
||||||
indices.add(idx.Natural)
|
|
||||||
indices
|
indices
|
||||||
|
@ -9,7 +9,7 @@
|
|||||||
|
|
||||||
type
|
type
|
||||||
Sample*[H] = object
|
Sample*[H] = object
|
||||||
cellData*: seq[byte]
|
cellData*: seq[H]
|
||||||
merklePaths*: seq[H]
|
merklePaths*: seq[H]
|
||||||
|
|
||||||
PublicInputs*[H] = object
|
PublicInputs*[H] = object
|
||||||
@ -24,5 +24,5 @@ type
|
|||||||
slotRoot*: H
|
slotRoot*: H
|
||||||
nCellsPerSlot*: Natural
|
nCellsPerSlot*: Natural
|
||||||
nSlotsPerDataSet*: Natural
|
nSlotsPerDataSet*: Natural
|
||||||
slotProof*: seq[H]
|
slotProof*: seq[H] # inclusion proof that shows that the slot root (leaf) is part of the dataset (root)
|
||||||
samples*: seq[Sample[H]]
|
samples*: seq[Sample[H]] # inclusion proofs which show that the selected cells (leafs) are part of the slot (roots)
|
||||||
|
@ -121,6 +121,9 @@ switch("define", "ctt_asm=false")
|
|||||||
# Allow the use of old-style case objects for nim config compatibility
|
# Allow the use of old-style case objects for nim config compatibility
|
||||||
switch("define", "nimOldCaseObjects")
|
switch("define", "nimOldCaseObjects")
|
||||||
|
|
||||||
|
# Enable compat mode for Chronos V4
|
||||||
|
switch("define", "chronosHandleException")
|
||||||
|
|
||||||
# begin Nimble config (version 1)
|
# begin Nimble config (version 1)
|
||||||
when system.fileExists("nimble.paths"):
|
when system.fileExists("nimble.paths"):
|
||||||
include "nimble.paths"
|
include "nimble.paths"
|
||||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -323,8 +323,7 @@ asyncchecksuite "Sales":
|
|||||||
slot: UInt256,
|
slot: UInt256,
|
||||||
onBatch: BatchProc): Future[?!void] {.async.} =
|
onBatch: BatchProc): Future[?!void] {.async.} =
|
||||||
let blk = bt.Block.new( @[1.byte] ).get
|
let blk = bt.Block.new( @[1.byte] ).get
|
||||||
onBatch( blk.repeat(request.ask.slotSize.truncate(int)) )
|
await onBatch( blk.repeat(request.ask.slotSize.truncate(int)) )
|
||||||
return success()
|
|
||||||
|
|
||||||
createAvailability()
|
createAvailability()
|
||||||
await market.requestStorage(request)
|
await market.requestStorage(request)
|
||||||
@ -337,8 +336,8 @@ asyncchecksuite "Sales":
|
|||||||
onBatch: BatchProc): Future[?!void] {.async.} =
|
onBatch: BatchProc): Future[?!void] {.async.} =
|
||||||
slotIndex = slot
|
slotIndex = slot
|
||||||
let blk = bt.Block.new( @[1.byte] ).get
|
let blk = bt.Block.new( @[1.byte] ).get
|
||||||
onBatch(@[ blk ])
|
await onBatch(@[ blk ])
|
||||||
return success()
|
|
||||||
let sold = newFuture[void]()
|
let sold = newFuture[void]()
|
||||||
sales.onSale = proc(request: StorageRequest, slotIndex: UInt256) =
|
sales.onSale = proc(request: StorageRequest, slotIndex: UInt256) =
|
||||||
sold.complete()
|
sold.complete()
|
||||||
|
@ -524,7 +524,7 @@ suite "Slot queue":
|
|||||||
request.ask,
|
request.ask,
|
||||||
request.expiry,
|
request.expiry,
|
||||||
seen = true)
|
seen = true)
|
||||||
queue.push(item)
|
check queue.push(item).isOk
|
||||||
check eventually queue.paused
|
check eventually queue.paused
|
||||||
check onProcessSlotCalledWith.len == 0
|
check onProcessSlotCalledWith.len == 0
|
||||||
|
|
||||||
@ -534,7 +534,7 @@ suite "Slot queue":
|
|||||||
|
|
||||||
let request = StorageRequest.example
|
let request = StorageRequest.example
|
||||||
var items = SlotQueueItem.init(request)
|
var items = SlotQueueItem.init(request)
|
||||||
queue.push(items)
|
check queue.push(items).isOk
|
||||||
# check all items processed
|
# check all items processed
|
||||||
check eventually queue.len == 0
|
check eventually queue.len == 0
|
||||||
|
|
||||||
@ -546,7 +546,7 @@ suite "Slot queue":
|
|||||||
request.expiry,
|
request.expiry,
|
||||||
seen = true)
|
seen = true)
|
||||||
check queue.paused
|
check queue.paused
|
||||||
queue.push(item0)
|
check queue.push(item0).isOk
|
||||||
check queue.paused
|
check queue.paused
|
||||||
|
|
||||||
test "paused queue waits for unpause before continuing processing":
|
test "paused queue waits for unpause before continuing processing":
|
||||||
@ -558,7 +558,7 @@ suite "Slot queue":
|
|||||||
seen = false)
|
seen = false)
|
||||||
check queue.paused
|
check queue.paused
|
||||||
# push causes unpause
|
# push causes unpause
|
||||||
queue.push(item)
|
check queue.push(item).isOk
|
||||||
# check all items processed
|
# check all items processed
|
||||||
check eventually onProcessSlotCalledWith == @[
|
check eventually onProcessSlotCalledWith == @[
|
||||||
(item.requestId, item.slotIndex),
|
(item.requestId, item.slotIndex),
|
||||||
@ -576,8 +576,8 @@ suite "Slot queue":
|
|||||||
request.ask,
|
request.ask,
|
||||||
request.expiry,
|
request.expiry,
|
||||||
seen = true)
|
seen = true)
|
||||||
queue.push(item0)
|
check queue.push(item0).isOk
|
||||||
queue.push(item1)
|
check queue.push(item1).isOk
|
||||||
check queue[0].seen
|
check queue[0].seen
|
||||||
check queue[1].seen
|
check queue[1].seen
|
||||||
|
|
||||||
|
@ -17,21 +17,6 @@ import pkg/codex/utils/json
|
|||||||
|
|
||||||
export types
|
export types
|
||||||
|
|
||||||
func fromCircomData*(_: type Poseidon2Hash, cellData: seq[byte]): seq[Poseidon2Hash] =
|
|
||||||
var
|
|
||||||
pos = 0
|
|
||||||
cellElms: seq[Bn254Fr]
|
|
||||||
while pos < cellData.len:
|
|
||||||
var
|
|
||||||
step = 32
|
|
||||||
offset = min(pos + step, cellData.len)
|
|
||||||
data = cellData[pos..<offset]
|
|
||||||
let ff = Bn254Fr.fromBytes(data.toArray32).get
|
|
||||||
cellElms.add(ff)
|
|
||||||
pos += data.len
|
|
||||||
|
|
||||||
cellElms
|
|
||||||
|
|
||||||
func toJsonDecimal*(big: BigInt[254]): string =
|
func toJsonDecimal*(big: BigInt[254]): string =
|
||||||
let s = big.toDecimal.strip( leading = true, trailing = false, chars = {'0'} )
|
let s = big.toDecimal.strip( leading = true, trailing = false, chars = {'0'} )
|
||||||
if s.len == 0: "0" else: s
|
if s.len == 0: "0" else: s
|
||||||
@ -78,13 +63,16 @@ func toJson*(input: ProofInputs[Poseidon2Hash]): JsonNode =
|
|||||||
"slotRoot": input.slotRoot.toDecimal,
|
"slotRoot": input.slotRoot.toDecimal,
|
||||||
"slotProof": input.slotProof.mapIt( it.toBig.toJsonDecimal ),
|
"slotProof": input.slotProof.mapIt( it.toBig.toJsonDecimal ),
|
||||||
"cellData": input.samples.mapIt(
|
"cellData": input.samples.mapIt(
|
||||||
toSeq( it.cellData.elements(Poseidon2Hash) ).mapIt( it.toBig.toJsonDecimal )
|
it.cellData.mapIt( it.toBig.toJsonDecimal )
|
||||||
),
|
),
|
||||||
"merklePaths": input.samples.mapIt(
|
"merklePaths": input.samples.mapIt(
|
||||||
it.merklePaths.mapIt( it.toBig.toJsonDecimal )
|
it.merklePaths.mapIt( it.toBig.toJsonDecimal )
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func toJson*(input: NormalizedProofInputs[Poseidon2Hash]): JsonNode =
|
||||||
|
toJson(ProofInputs[Poseidon2Hash](input))
|
||||||
|
|
||||||
func jsonToProofInput*(_: type Poseidon2Hash, inputJson: JsonNode): ProofInputs[Poseidon2Hash] =
|
func jsonToProofInput*(_: type Poseidon2Hash, inputJson: JsonNode): ProofInputs[Poseidon2Hash] =
|
||||||
let
|
let
|
||||||
cellData =
|
cellData =
|
||||||
@ -93,10 +81,12 @@ func jsonToProofInput*(_: type Poseidon2Hash, inputJson: JsonNode): ProofInputs[
|
|||||||
block:
|
block:
|
||||||
var
|
var
|
||||||
big: BigInt[256]
|
big: BigInt[256]
|
||||||
data = newSeq[byte](big.bits div 8)
|
hash: Poseidon2Hash
|
||||||
|
data: array[32, byte]
|
||||||
assert bool(big.fromDecimal( it.str ))
|
assert bool(big.fromDecimal( it.str ))
|
||||||
data.marshal(big, littleEndian)
|
assert data.marshal(big, littleEndian)
|
||||||
data
|
|
||||||
|
Poseidon2Hash.fromBytes(data).get
|
||||||
).concat # flatten out elements
|
).concat # flatten out elements
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -58,7 +58,7 @@ suite "Test Sampler - control samples":
|
|||||||
proofInput.nCellsPerSlot,
|
proofInput.nCellsPerSlot,
|
||||||
sample.merklePaths[5..<9]).tryGet
|
sample.merklePaths[5..<9]).tryGet
|
||||||
|
|
||||||
cellData = Poseidon2Hash.fromCircomData(sample.cellData)
|
cellData = sample.cellData
|
||||||
cellLeaf = Poseidon2Hash.spongeDigest(cellData, rate = 2).tryGet
|
cellLeaf = Poseidon2Hash.spongeDigest(cellData, rate = 2).tryGet
|
||||||
slotLeaf = cellProof.reconstructRoot(cellLeaf).tryGet
|
slotLeaf = cellProof.reconstructRoot(cellLeaf).tryGet
|
||||||
|
|
||||||
@ -158,7 +158,7 @@ suite "Test Sampler":
|
|||||||
nSlotCells,
|
nSlotCells,
|
||||||
sample.merklePaths[5..<sample.merklePaths.len]).tryGet
|
sample.merklePaths[5..<sample.merklePaths.len]).tryGet
|
||||||
|
|
||||||
cellData = Poseidon2Hash.fromCircomData(sample.cellData)
|
cellData = sample.cellData
|
||||||
cellLeaf = Poseidon2Hash.spongeDigest(cellData, rate = 2).tryGet
|
cellLeaf = Poseidon2Hash.spongeDigest(cellData, rate = 2).tryGet
|
||||||
slotLeaf = cellProof.reconstructRoot(cellLeaf).tryGet
|
slotLeaf = cellProof.reconstructRoot(cellLeaf).tryGet
|
||||||
|
|
||||||
|
@ -24,23 +24,19 @@ import ./backends/helpers
|
|||||||
|
|
||||||
suite "Test Prover":
|
suite "Test Prover":
|
||||||
let
|
let
|
||||||
slotId = 1
|
|
||||||
samples = 5
|
samples = 5
|
||||||
ecK = 3
|
|
||||||
ecM = 2
|
|
||||||
numDatasetBlocks = 8
|
|
||||||
blockSize = DefaultBlockSize
|
blockSize = DefaultBlockSize
|
||||||
cellSize = DefaultCellSize
|
cellSize = DefaultCellSize
|
||||||
repoTmp = TempLevelDb.new()
|
repoTmp = TempLevelDb.new()
|
||||||
metaTmp = TempLevelDb.new()
|
metaTmp = TempLevelDb.new()
|
||||||
|
r1cs = "tests/circuits/fixtures/proof_main.r1cs"
|
||||||
|
wasm = "tests/circuits/fixtures/proof_main.wasm"
|
||||||
|
circomBackend = CircomCompat.init(r1cs, wasm)
|
||||||
|
challenge = 1234567.toF.toBytes.toArray32
|
||||||
|
|
||||||
var
|
var
|
||||||
datasetBlocks: seq[bt.Block]
|
|
||||||
store: BlockStore
|
store: BlockStore
|
||||||
manifest: Manifest
|
prover: Prover
|
||||||
protected: Manifest
|
|
||||||
verifiable: Manifest
|
|
||||||
sampler: Poseidon2Sampler
|
|
||||||
|
|
||||||
setup:
|
setup:
|
||||||
let
|
let
|
||||||
@ -48,14 +44,7 @@ suite "Test Prover":
|
|||||||
metaDs = metaTmp.newDb()
|
metaDs = metaTmp.newDb()
|
||||||
|
|
||||||
store = RepoStore.new(repoDs, metaDs)
|
store = RepoStore.new(repoDs, metaDs)
|
||||||
|
prover = Prover.new(store, circomBackend, samples)
|
||||||
(manifest, protected, verifiable) =
|
|
||||||
await createVerifiableManifest(
|
|
||||||
store,
|
|
||||||
numDatasetBlocks,
|
|
||||||
ecK, ecM,
|
|
||||||
blockSize,
|
|
||||||
cellSize)
|
|
||||||
|
|
||||||
teardown:
|
teardown:
|
||||||
await repoTmp.destroyDb()
|
await repoTmp.destroyDb()
|
||||||
@ -63,13 +52,41 @@ suite "Test Prover":
|
|||||||
|
|
||||||
test "Should sample and prove a slot":
|
test "Should sample and prove a slot":
|
||||||
let
|
let
|
||||||
r1cs = "tests/circuits/fixtures/proof_main.r1cs"
|
(_, _, verifiable) =
|
||||||
wasm = "tests/circuits/fixtures/proof_main.wasm"
|
await createVerifiableManifest(
|
||||||
|
store,
|
||||||
|
8, # number of blocks in the original dataset (before EC)
|
||||||
|
5, # ecK
|
||||||
|
3, # ecM
|
||||||
|
blockSize,
|
||||||
|
cellSize)
|
||||||
|
|
||||||
circomBackend = CircomCompat.init(r1cs, wasm)
|
let
|
||||||
prover = Prover.new(store, circomBackend, samples)
|
(inputs, proof) = (
|
||||||
challenge = 1234567.toF.toBytes.toArray32
|
await prover.prove(1, verifiable, challenge)).tryGet
|
||||||
(inputs, proof) = (await prover.prove(1, verifiable, challenge)).tryGet
|
|
||||||
|
check:
|
||||||
|
(await prover.verify(proof, inputs)).tryGet == true
|
||||||
|
|
||||||
|
test "Should generate valid proofs when slots consist of single blocks":
|
||||||
|
|
||||||
|
# To get single-block slots, we just need to set the number of blocks in
|
||||||
|
# the original dataset to be the same as ecK. The total number of blocks
|
||||||
|
# after generating random data for parity will be ecK + ecM, which will
|
||||||
|
# match the number of slots.
|
||||||
|
let
|
||||||
|
(_, _, verifiable) =
|
||||||
|
await createVerifiableManifest(
|
||||||
|
store,
|
||||||
|
2, # number of blocks in the original dataset (before EC)
|
||||||
|
2, # ecK
|
||||||
|
1, # ecM
|
||||||
|
blockSize,
|
||||||
|
cellSize)
|
||||||
|
|
||||||
|
let
|
||||||
|
(inputs, proof) = (
|
||||||
|
await prover.prove(1, verifiable, challenge)).tryGet
|
||||||
|
|
||||||
check:
|
check:
|
||||||
(await prover.verify(proof, inputs)).tryGet == true
|
(await prover.verify(proof, inputs)).tryGet == true
|
||||||
|
@ -17,6 +17,7 @@ import pkg/taskpools
|
|||||||
|
|
||||||
import ../asynctest
|
import ../asynctest
|
||||||
import ./helpers
|
import ./helpers
|
||||||
|
import ./examples
|
||||||
|
|
||||||
suite "Erasure encode/decode":
|
suite "Erasure encode/decode":
|
||||||
const BlockSize = 1024'nb
|
const BlockSize = 1024'nb
|
||||||
@ -232,3 +233,41 @@ suite "Erasure encode/decode":
|
|||||||
let encoded = await encode(buffers, parity)
|
let encoded = await encode(buffers, parity)
|
||||||
|
|
||||||
discard (await erasure.decode(encoded)).tryGet()
|
discard (await erasure.decode(encoded)).tryGet()
|
||||||
|
|
||||||
|
test "Should handle verifiable manifests":
|
||||||
|
const
|
||||||
|
buffers = 20
|
||||||
|
parity = 10
|
||||||
|
|
||||||
|
let
|
||||||
|
encoded = await encode(buffers, parity)
|
||||||
|
slotCids = collect(newSeq):
|
||||||
|
for i in 0..<encoded.numSlots: Cid.example
|
||||||
|
|
||||||
|
verifiable = Manifest.new(encoded, Cid.example, slotCids).tryGet()
|
||||||
|
|
||||||
|
decoded = (await erasure.decode(verifiable)).tryGet()
|
||||||
|
|
||||||
|
check:
|
||||||
|
decoded.treeCid == manifest.treeCid
|
||||||
|
decoded.treeCid == verifiable.originalTreeCid
|
||||||
|
decoded.blocksCount == verifiable.originalBlocksCount
|
||||||
|
|
||||||
|
for i in 1..5:
|
||||||
|
test "Should encode/decode using various parameters " & $i & "/5":
|
||||||
|
let
|
||||||
|
blockSize = rng.sample(@[1, 2, 4, 8, 16, 32, 64].mapIt(it.KiBs))
|
||||||
|
datasetSize = 1.MiBs
|
||||||
|
ecK = 10.Natural
|
||||||
|
ecM = 10.Natural
|
||||||
|
|
||||||
|
let
|
||||||
|
chunker = RandomChunker.new(rng, size = datasetSize, chunkSize = blockSize)
|
||||||
|
manifest = await storeDataGetManifest(store, chunker)
|
||||||
|
encoded = (await erasure.encode(manifest, ecK, ecM)).tryGet()
|
||||||
|
decoded = (await erasure.decode(encoded)).tryGet()
|
||||||
|
|
||||||
|
check:
|
||||||
|
decoded.treeCid == manifest.treeCid
|
||||||
|
decoded.treeCid == encoded.originalTreeCid
|
||||||
|
decoded.blocksCount == encoded.originalBlocksCount
|
||||||
|
@ -256,7 +256,7 @@ ethersuite "On-Chain Market":
|
|||||||
receivedIds.add(requestId)
|
receivedIds.add(requestId)
|
||||||
|
|
||||||
let subscription = await market.subscribeRequestCancelled(request.id, onRequestCancelled)
|
let subscription = await market.subscribeRequestCancelled(request.id, onRequestCancelled)
|
||||||
advanceToCancelledRequest(otherRequest) # shares expiry with otherRequest
|
await advanceToCancelledRequest(otherRequest) # shares expiry with otherRequest
|
||||||
await market.withdrawFunds(otherRequest.id)
|
await market.withdrawFunds(otherRequest.id)
|
||||||
check receivedIds.len == 0
|
check receivedIds.len == 0
|
||||||
await market.withdrawFunds(request.id)
|
await market.withdrawFunds(request.id)
|
||||||
|
@ -25,5 +25,5 @@ template ethersuite*(name, body) =
|
|||||||
|
|
||||||
body
|
body
|
||||||
|
|
||||||
export unittest
|
export asynctest
|
||||||
export ethers except `%`
|
export ethers except `%`
|
||||||
|
@ -2,8 +2,9 @@ import pkg/chronos
|
|||||||
|
|
||||||
# Allow multiple setups and teardowns in a test suite
|
# Allow multiple setups and teardowns in a test suite
|
||||||
template asyncmultisetup* =
|
template asyncmultisetup* =
|
||||||
var setups: seq[proc: Future[void] {.gcsafe.}]
|
var setups: seq[proc: Future[void].Raising([AsyncExceptionError]) {.gcsafe.}]
|
||||||
var teardowns: seq[proc: Future[void] {.gcsafe.}]
|
var teardowns: seq[
|
||||||
|
proc: Future[void].Raising([AsyncExceptionError]) {.gcsafe.}]
|
||||||
|
|
||||||
setup:
|
setup:
|
||||||
for setup in setups:
|
for setup in setups:
|
||||||
@ -14,10 +15,12 @@ template asyncmultisetup* =
|
|||||||
await teardown()
|
await teardown()
|
||||||
|
|
||||||
template setup(setupBody) {.inject, used.} =
|
template setup(setupBody) {.inject, used.} =
|
||||||
setups.add(proc {.async.} = setupBody)
|
setups.add(proc {.async: (
|
||||||
|
handleException: true, raises: [AsyncExceptionError]).} = setupBody)
|
||||||
|
|
||||||
template teardown(teardownBody) {.inject, used.} =
|
template teardown(teardownBody) {.inject, used.} =
|
||||||
teardowns.insert(proc {.async.} = teardownBody)
|
teardowns.insert(proc {.async: (
|
||||||
|
handleException: true, raises: [AsyncExceptionError]).} = teardownBody)
|
||||||
|
|
||||||
template multisetup* =
|
template multisetup* =
|
||||||
var setups: seq[proc() {.gcsafe.}]
|
var setups: seq[proc() {.gcsafe.}]
|
||||||
@ -32,7 +35,8 @@ template multisetup* =
|
|||||||
teardown()
|
teardown()
|
||||||
|
|
||||||
template setup(setupBody) {.inject, used.} =
|
template setup(setupBody) {.inject, used.} =
|
||||||
setups.add(proc = setupBody)
|
let setupProc = proc = setupBody
|
||||||
|
setups.add(setupProc)
|
||||||
|
|
||||||
template teardown(teardownBody) {.inject, used.} =
|
template teardown(teardownBody) {.inject, used.} =
|
||||||
teardowns.insert(proc = teardownBody)
|
teardowns.insert(proc = teardownBody)
|
||||||
|
@ -4,6 +4,7 @@ import std/strutils
|
|||||||
from pkg/libp2p import Cid, `$`, init
|
from pkg/libp2p import Cid, `$`, init
|
||||||
import pkg/stint
|
import pkg/stint
|
||||||
import pkg/questionable/results
|
import pkg/questionable/results
|
||||||
|
import pkg/chronos/apps/http/[httpserver, shttpserver, httpclient]
|
||||||
import pkg/codex/logutils
|
import pkg/codex/logutils
|
||||||
import pkg/codex/rest/json
|
import pkg/codex/rest/json
|
||||||
import pkg/codex/purchasing
|
import pkg/codex/purchasing
|
||||||
@ -15,9 +16,16 @@ export purchasing
|
|||||||
type CodexClient* = ref object
|
type CodexClient* = ref object
|
||||||
http: HttpClient
|
http: HttpClient
|
||||||
baseurl: string
|
baseurl: string
|
||||||
|
session: HttpSessionRef
|
||||||
|
|
||||||
|
type CodexClientError* = object of CatchableError
|
||||||
|
|
||||||
proc new*(_: type CodexClient, baseurl: string): CodexClient =
|
proc new*(_: type CodexClient, baseurl: string): CodexClient =
|
||||||
CodexClient(http: newHttpClient(), baseurl: baseurl)
|
CodexClient(
|
||||||
|
http: newHttpClient(),
|
||||||
|
baseurl: baseurl,
|
||||||
|
session: HttpSessionRef.new({HttpClientFlag.Http11Pipeline})
|
||||||
|
)
|
||||||
|
|
||||||
proc info*(client: CodexClient): ?!JsonNode =
|
proc info*(client: CodexClient): ?!JsonNode =
|
||||||
let url = client.baseurl & "/debug/info"
|
let url = client.baseurl & "/debug/info"
|
||||||
@ -45,6 +53,23 @@ proc download*(client: CodexClient, cid: Cid, local = false): ?!string =
|
|||||||
|
|
||||||
success response.body
|
success response.body
|
||||||
|
|
||||||
|
proc downloadBytes*(
|
||||||
|
client: CodexClient,
|
||||||
|
cid: Cid,
|
||||||
|
local = false): Future[?!seq[byte]] {.async.} =
|
||||||
|
|
||||||
|
let uri = parseUri(
|
||||||
|
client.baseurl & "/data/" & $cid &
|
||||||
|
(if local: "" else: "/network")
|
||||||
|
)
|
||||||
|
|
||||||
|
let (status, bytes) = await client.session.fetch(uri)
|
||||||
|
|
||||||
|
if status != 200:
|
||||||
|
return failure("fetch failed with status " & $status)
|
||||||
|
|
||||||
|
success bytes
|
||||||
|
|
||||||
proc list*(client: CodexClient): ?!RestContentList =
|
proc list*(client: CodexClient): ?!RestContentList =
|
||||||
let url = client.baseurl & "/data"
|
let url = client.baseurl & "/data"
|
||||||
let response = client.http.get(url)
|
let response = client.http.get(url)
|
||||||
@ -71,7 +96,7 @@ proc requestStorageRaw*(
|
|||||||
proofProbability: UInt256,
|
proofProbability: UInt256,
|
||||||
collateral: UInt256,
|
collateral: UInt256,
|
||||||
expiry: uint = 0,
|
expiry: uint = 0,
|
||||||
nodes: uint = 1,
|
nodes: uint = 2,
|
||||||
tolerance: uint = 0
|
tolerance: uint = 0
|
||||||
): Response =
|
): Response =
|
||||||
|
|
||||||
@ -100,7 +125,7 @@ proc requestStorage*(
|
|||||||
proofProbability: UInt256,
|
proofProbability: UInt256,
|
||||||
expiry: uint,
|
expiry: uint,
|
||||||
collateral: UInt256,
|
collateral: UInt256,
|
||||||
nodes: uint = 1,
|
nodes: uint = 2,
|
||||||
tolerance: uint = 0
|
tolerance: uint = 0
|
||||||
): ?!PurchaseId =
|
): ?!PurchaseId =
|
||||||
## Call request storage REST endpoint
|
## Call request storage REST endpoint
|
||||||
|
@ -2,6 +2,7 @@ import pkg/questionable
|
|||||||
import pkg/questionable/results
|
import pkg/questionable/results
|
||||||
import pkg/confutils
|
import pkg/confutils
|
||||||
import pkg/chronicles
|
import pkg/chronicles
|
||||||
|
import pkg/chronos/asyncproc
|
||||||
import pkg/ethers
|
import pkg/ethers
|
||||||
import pkg/libp2p
|
import pkg/libp2p
|
||||||
import std/os
|
import std/os
|
||||||
|
@ -3,6 +3,7 @@ import pkg/questionable/results
|
|||||||
import pkg/confutils
|
import pkg/confutils
|
||||||
import pkg/chronicles
|
import pkg/chronicles
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
|
import pkg/chronos/asyncproc
|
||||||
import pkg/stew/io2
|
import pkg/stew/io2
|
||||||
import std/os
|
import std/os
|
||||||
import std/sets
|
import std/sets
|
||||||
|
@ -2,6 +2,7 @@ import pkg/questionable
|
|||||||
import pkg/questionable/results
|
import pkg/questionable/results
|
||||||
import pkg/confutils
|
import pkg/confutils
|
||||||
import pkg/chronicles
|
import pkg/chronicles
|
||||||
|
import pkg/chronos/asyncproc
|
||||||
import pkg/libp2p
|
import pkg/libp2p
|
||||||
import std/os
|
import std/os
|
||||||
import std/strutils
|
import std/strutils
|
||||||
|
60
tests/integration/testecbug.nim
Normal file
60
tests/integration/testecbug.nim
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
from pkg/libp2p import Cid, init
|
||||||
|
import ../examples
|
||||||
|
import ./marketplacesuite
|
||||||
|
import ./nodeconfigs
|
||||||
|
import ./hardhatconfig
|
||||||
|
|
||||||
|
marketplacesuite "Bug #821 - node crashes during erasure coding":
|
||||||
|
|
||||||
|
test "should be able to create storage request and download dataset",
|
||||||
|
NodeConfigs(
|
||||||
|
clients:
|
||||||
|
CodexConfigs.init(nodes=1)
|
||||||
|
# .debug() # uncomment to enable console log output.debug()
|
||||||
|
.withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||||
|
.withLogTopics("node", "erasure", "marketplace", )
|
||||||
|
.some,
|
||||||
|
|
||||||
|
providers:
|
||||||
|
CodexConfigs.init(nodes=0)
|
||||||
|
# .debug() # uncomment to enable console log output
|
||||||
|
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||||
|
# .withLogTopics("node", "marketplace", "sales", "reservations", "node", "proving", "clock")
|
||||||
|
.some,
|
||||||
|
):
|
||||||
|
let reward = 400.u256
|
||||||
|
let duration = 10.periods
|
||||||
|
let collateral = 200.u256
|
||||||
|
let expiry = 5.periods
|
||||||
|
let data = await RandomChunker.example(blocks=8)
|
||||||
|
let client = clients()[0]
|
||||||
|
let clientApi = client.client
|
||||||
|
|
||||||
|
let cid = clientApi.upload(data).get
|
||||||
|
|
||||||
|
var requestId = none RequestId
|
||||||
|
proc onStorageRequested(event: StorageRequested) {.raises:[].} =
|
||||||
|
requestId = event.requestId.some
|
||||||
|
|
||||||
|
let subscription = await marketplace.subscribe(StorageRequested, onStorageRequested)
|
||||||
|
|
||||||
|
# client requests storage but requires multiple slots to host the content
|
||||||
|
let id = await clientApi.requestStorage(
|
||||||
|
cid,
|
||||||
|
duration=duration,
|
||||||
|
reward=reward,
|
||||||
|
expiry=expiry,
|
||||||
|
collateral=collateral,
|
||||||
|
nodes=3,
|
||||||
|
tolerance=1
|
||||||
|
)
|
||||||
|
|
||||||
|
check eventually(requestId.isSome, timeout=expiry.int * 1000)
|
||||||
|
|
||||||
|
let request = await marketplace.getRequest(requestId.get)
|
||||||
|
let cidFromRequest = Cid.init(request.content.cid).get()
|
||||||
|
let downloaded = await clientApi.downloadBytes(cidFromRequest, local = true)
|
||||||
|
check downloaded.isOk
|
||||||
|
check downloaded.get.toHex == data.toHex
|
||||||
|
|
||||||
|
await subscription.unsubscribe()
|
@ -8,7 +8,8 @@ import ../examples
|
|||||||
twonodessuite "Purchasing", debug1 = false, debug2 = false:
|
twonodessuite "Purchasing", debug1 = false, debug2 = false:
|
||||||
|
|
||||||
test "node handles storage request":
|
test "node handles storage request":
|
||||||
let cid = client1.upload("some file contents").get
|
let data = await RandomChunker.example(blocks=2)
|
||||||
|
let cid = client1.upload(data).get
|
||||||
let id1 = client1.requestStorage(cid, duration=100.u256, reward=2.u256, proofProbability=3.u256, expiry=10, collateral=200.u256).get
|
let id1 = client1.requestStorage(cid, duration=100.u256, reward=2.u256, proofProbability=3.u256, expiry=10, collateral=200.u256).get
|
||||||
let id2 = client1.requestStorage(cid, duration=400.u256, reward=5.u256, proofProbability=6.u256, expiry=10, collateral=201.u256).get
|
let id2 = client1.requestStorage(cid, duration=400.u256, reward=5.u256, proofProbability=6.u256, expiry=10, collateral=201.u256).get
|
||||||
check id1 != id2
|
check id1 != id2
|
||||||
@ -26,7 +27,7 @@ twonodessuite "Purchasing", debug1 = false, debug2 = false:
|
|||||||
proofProbability=3.u256,
|
proofProbability=3.u256,
|
||||||
expiry=30,
|
expiry=30,
|
||||||
collateral=200.u256,
|
collateral=200.u256,
|
||||||
nodes=2,
|
nodes=3,
|
||||||
tolerance=1).get
|
tolerance=1).get
|
||||||
|
|
||||||
let request = client1.getPurchase(id).get.request.get
|
let request = client1.getPurchase(id).get.request.get
|
||||||
@ -35,7 +36,7 @@ twonodessuite "Purchasing", debug1 = false, debug2 = false:
|
|||||||
check request.ask.proofProbability == 3.u256
|
check request.ask.proofProbability == 3.u256
|
||||||
check request.expiry == 30
|
check request.expiry == 30
|
||||||
check request.ask.collateral == 200.u256
|
check request.ask.collateral == 200.u256
|
||||||
check request.ask.slots == 2'u64
|
check request.ask.slots == 3'u64
|
||||||
check request.ask.maxSlotLoss == 1'u64
|
check request.ask.maxSlotLoss == 1'u64
|
||||||
|
|
||||||
# TODO: We currently do not support encoding single chunks
|
# TODO: We currently do not support encoding single chunks
|
||||||
@ -52,7 +53,8 @@ twonodessuite "Purchasing", debug1 = false, debug2 = false:
|
|||||||
# check request.ask.maxSlotLoss == 1'u64
|
# check request.ask.maxSlotLoss == 1'u64
|
||||||
|
|
||||||
test "node remembers purchase status after restart":
|
test "node remembers purchase status after restart":
|
||||||
let cid = client1.upload("some file contents").get
|
let data = await RandomChunker.example(blocks=2)
|
||||||
|
let cid = client1.upload(data).get
|
||||||
let id = client1.requestStorage(cid,
|
let id = client1.requestStorage(cid,
|
||||||
duration=100.u256,
|
duration=100.u256,
|
||||||
reward=2.u256,
|
reward=2.u256,
|
||||||
@ -71,25 +73,12 @@ twonodessuite "Purchasing", debug1 = false, debug2 = false:
|
|||||||
check request.ask.proofProbability == 3.u256
|
check request.ask.proofProbability == 3.u256
|
||||||
check request.expiry == 30
|
check request.expiry == 30
|
||||||
check request.ask.collateral == 200.u256
|
check request.ask.collateral == 200.u256
|
||||||
check request.ask.slots == 1'u64
|
check request.ask.slots == 2'u64
|
||||||
check request.ask.maxSlotLoss == 0'u64
|
check request.ask.maxSlotLoss == 0'u64
|
||||||
|
|
||||||
test "request storage fails if nodes and tolerance aren't correct":
|
|
||||||
let cid = client1.upload("some file contents").get
|
|
||||||
let responseBefore = client1.requestStorageRaw(cid,
|
|
||||||
duration=100.u256,
|
|
||||||
reward=2.u256,
|
|
||||||
proofProbability=3.u256,
|
|
||||||
expiry=30,
|
|
||||||
collateral=200.u256,
|
|
||||||
nodes=1,
|
|
||||||
tolerance=1)
|
|
||||||
|
|
||||||
check responseBefore.status == "400 Bad Request"
|
|
||||||
check responseBefore.body == "Tolerance cannot be greater or equal than nodes (nodes - tolerance)"
|
|
||||||
|
|
||||||
test "node requires expiry and its value to be in future":
|
test "node requires expiry and its value to be in future":
|
||||||
let cid = client1.upload("some file contents").get
|
let data = await RandomChunker.example(blocks=2)
|
||||||
|
let cid = client1.upload(data).get
|
||||||
|
|
||||||
let responseMissing = client1.requestStorageRaw(cid, duration=1.u256, reward=2.u256, proofProbability=3.u256, collateral=200.u256)
|
let responseMissing = client1.requestStorageRaw(cid, duration=1.u256, reward=2.u256, proofProbability=3.u256, collateral=200.u256)
|
||||||
check responseMissing.status == "400 Bad Request"
|
check responseMissing.status == "400 Bad Request"
|
||||||
|
@ -1,7 +1,9 @@
|
|||||||
|
import std/httpclient
|
||||||
import std/sequtils
|
import std/sequtils
|
||||||
from pkg/libp2p import `==`
|
from pkg/libp2p import `==`
|
||||||
import pkg/codex/units
|
import pkg/codex/units
|
||||||
import ./twonodes
|
import ./twonodes
|
||||||
|
import ../examples
|
||||||
|
|
||||||
twonodessuite "REST API", debug1 = false, debug2 = false:
|
twonodessuite "REST API", debug1 = false, debug2 = false:
|
||||||
|
|
||||||
@ -36,3 +38,93 @@ twonodessuite "REST API", debug1 = false, debug2 = false:
|
|||||||
|
|
||||||
check:
|
check:
|
||||||
[cid1, cid2].allIt(it in list.content.mapIt(it.cid))
|
[cid1, cid2].allIt(it in list.content.mapIt(it.cid))
|
||||||
|
|
||||||
|
test "request storage fails for datasets that are too small":
|
||||||
|
let cid = client1.upload("some file contents").get
|
||||||
|
let response = client1.requestStorageRaw(cid, duration=10.u256, reward=2.u256, proofProbability=3.u256, nodes=2, collateral=200.u256, expiry=9)
|
||||||
|
|
||||||
|
check:
|
||||||
|
response.status == "400 Bad Request"
|
||||||
|
response.body == "Dataset too small for erasure parameters, need at least " & $(2*DefaultBlockSize.int) & " bytes"
|
||||||
|
|
||||||
|
test "request storage succeeds for sufficiently sized datasets":
|
||||||
|
let data = await RandomChunker.example(blocks=2)
|
||||||
|
let cid = client1.upload(data).get
|
||||||
|
let response = client1.requestStorageRaw(cid, duration=10.u256, reward=2.u256, proofProbability=3.u256, collateral=200.u256, expiry=9)
|
||||||
|
|
||||||
|
check:
|
||||||
|
response.status == "200 OK"
|
||||||
|
|
||||||
|
test "request storage fails if nodes and tolerance aren't correct":
|
||||||
|
let data = await RandomChunker.example(blocks=2)
|
||||||
|
let cid = client1.upload(data).get
|
||||||
|
let duration = 100.u256
|
||||||
|
let reward = 2.u256
|
||||||
|
let proofProbability = 3.u256
|
||||||
|
let expiry = 30.uint
|
||||||
|
let collateral = 200.u256
|
||||||
|
let ecParams = @[(1, 0), (1, 1), (2, 1), (3, 2), (3, 3)]
|
||||||
|
|
||||||
|
for ecParam in ecParams:
|
||||||
|
let (nodes, tolerance) = ecParam
|
||||||
|
|
||||||
|
var responseBefore = client1.requestStorageRaw(cid,
|
||||||
|
duration,
|
||||||
|
reward,
|
||||||
|
proofProbability,
|
||||||
|
collateral,
|
||||||
|
expiry,
|
||||||
|
nodes.uint,
|
||||||
|
tolerance.uint)
|
||||||
|
|
||||||
|
check responseBefore.status == "400 Bad Request"
|
||||||
|
check responseBefore.body == "Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`"
|
||||||
|
|
||||||
|
test "request storage fails if tolerance > nodes (underflow protection)":
|
||||||
|
let data = await RandomChunker.example(blocks=2)
|
||||||
|
let cid = client1.upload(data).get
|
||||||
|
let duration = 100.u256
|
||||||
|
let reward = 2.u256
|
||||||
|
let proofProbability = 3.u256
|
||||||
|
let expiry = 30.uint
|
||||||
|
let collateral = 200.u256
|
||||||
|
let ecParams = @[(0, 1), (1, 2), (2, 3)]
|
||||||
|
|
||||||
|
for ecParam in ecParams:
|
||||||
|
let (nodes, tolerance) = ecParam
|
||||||
|
|
||||||
|
var responseBefore = client1.requestStorageRaw(cid,
|
||||||
|
duration,
|
||||||
|
reward,
|
||||||
|
proofProbability,
|
||||||
|
collateral,
|
||||||
|
expiry,
|
||||||
|
nodes.uint,
|
||||||
|
tolerance.uint)
|
||||||
|
|
||||||
|
check responseBefore.status == "400 Bad Request"
|
||||||
|
check responseBefore.body == "Invalid parameters: `tolerance` cannot be greater than `nodes`"
|
||||||
|
|
||||||
|
test "request storage succeeds if nodes and tolerance within range":
|
||||||
|
let data = await RandomChunker.example(blocks=2)
|
||||||
|
let cid = client1.upload(data).get
|
||||||
|
let duration = 100.u256
|
||||||
|
let reward = 2.u256
|
||||||
|
let proofProbability = 3.u256
|
||||||
|
let expiry = 30.uint
|
||||||
|
let collateral = 200.u256
|
||||||
|
let ecParams = @[(2, 0), (3, 1), (5, 2)]
|
||||||
|
|
||||||
|
for ecParam in ecParams:
|
||||||
|
let (nodes, tolerance) = ecParam
|
||||||
|
|
||||||
|
var responseBefore = client1.requestStorageRaw(cid,
|
||||||
|
duration,
|
||||||
|
reward,
|
||||||
|
proofProbability,
|
||||||
|
collateral,
|
||||||
|
expiry,
|
||||||
|
nodes.uint,
|
||||||
|
tolerance.uint)
|
||||||
|
|
||||||
|
check responseBefore.status == "200 OK"
|
||||||
|
@ -6,5 +6,6 @@ import ./integration/testpurchasing
|
|||||||
import ./integration/testblockexpiration
|
import ./integration/testblockexpiration
|
||||||
import ./integration/testmarketplace
|
import ./integration/testmarketplace
|
||||||
import ./integration/testproofs
|
import ./integration/testproofs
|
||||||
|
import ./integration/testecbug
|
||||||
|
|
||||||
{.warning[UnusedImport]:off.}
|
{.warning[UnusedImport]:off.}
|
||||||
|
2
vendor/codex-contracts-eth
vendored
2
vendor/codex-contracts-eth
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 57e8cd5013325f05e16833a5320b575d32a403f3
|
Subproject commit 7ad26688a3b75b914d626e2623174a36f4425f51
|
2
vendor/nim-chronos
vendored
2
vendor/nim-chronos
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 0277b65be2c7a365ac13df002fba6e172be55537
|
Subproject commit 035ae11ba92369e7722e649db597e79134fd06b9
|
2
vendor/nim-codex-dht
vendored
2
vendor/nim-codex-dht
vendored
@ -1 +1 @@
|
|||||||
Subproject commit a7f14bc9b783f1b9e2d02cc85a338b1411058095
|
Subproject commit 63822e83561ea1c6396d0f3eca583b038f5d44c6
|
2
vendor/nim-http-utils
vendored
2
vendor/nim-http-utils
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 3b491a40c60aad9e8d3407443f46f62511e63b18
|
Subproject commit be57dbc902d36f37540897e98c69aa80f868cb45
|
2
vendor/nim-leopard
vendored
2
vendor/nim-leopard
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 1a6f2ab7252426a6ac01482a68b75d0c3b134cf0
|
Subproject commit 895ff24ca6615d577acfb11811cdd5465f596c97
|
2
vendor/nimbus-build-system
vendored
2
vendor/nimbus-build-system
vendored
@ -1 +1 @@
|
|||||||
Subproject commit fe9bc3f3759ae1add6bf8c899db2e75327f03782
|
Subproject commit b2e1fb022f1ee800b439648953e92cc993c1264c
|
Loading…
x
Reference in New Issue
Block a user