Merge branch 'master' into testapi-delete

This commit is contained in:
Ben 2024-07-23 08:54:02 +02:00
commit cf9805e855
No known key found for this signature in database
GPG Key ID: 0F16E812E736C24B
41 changed files with 496 additions and 189 deletions

View File

@ -6,7 +6,7 @@ on:
env:
cache_nonce: 0 # Allows for easily busting actions/cache caches
nim_version: pinned, v1.6.16, v1.6.18
nim_version: pinned
jobs:
matrix:

View File

@ -8,10 +8,12 @@ on:
env:
cache_nonce: 0 # Allows for easily busting actions/cache caches
nim_version: v1.6.14
nim_version: pinned
rust_version: 1.78.0
binary_base: codex
upload_to_codex: false
build_dir: build
nim_flags: '-d:verify_circuit=true'
windows_libs: 'libstdc++-6.dll libgomp-1.dll libgcc_s_seh-1.dll libwinpthread-1.dll'
jobs:
# Matrix
@ -74,13 +76,21 @@ jobs:
- name: Release - Build
run: |
make NIMFLAGS="--out:${{ env.binary }}"
- name: Release - Upload binaries
make NIMFLAGS="--out:${{ env.build_dir }}/${{ env.binary }} ${{ env.nim_flags }}"
- name: Release - Libraries
run: |
if [[ "${{ matrix.os }}" == "windows" ]]; then
for lib in ${{ env.windows_libs }}; do
cp -v "${MINGW_PREFIX}/bin/${lib}" "${{ env.build_dir }}"
done
fi
- name: Release - Upload build artifacts
uses: actions/upload-artifact@v4
with:
name: release-${{ env.binary }}
path: ${{ env.binary }}
path: ${{ env.build_dir }}/
retention-days: 1
# Release
@ -99,46 +109,49 @@ jobs:
- name: Release - Compress and checksum
run: |
cd /tmp/release
prepare() {
# Checksum
checksum() {
arc="${1}"
sha256sum "${arc}" >"${arc}.sha256"
# Upload to Codex
if [[ "${{ env.upload_to_codex }}" == "true" ]]; then
codex_endpoints="${{ secrets.CODEX_ENDPOINTS }}"
codex_username="${{ secrets.CODEX_USERNAME }}"
codex_password="${{ secrets.CODEX_PASSWORD }}"
for endpoint in ${codex_endpoints}; do
echo "::add-mask::${endpoint}"
cid=$(curl -X POST \
"${endpoint}/api/codex/v1/data" \
-u "${codex_username}":"${codex_password}" \
-H "content-type: application/octet-stream" \
-T "${arc}")
echo "${cid}" >"${arc}.cid"
done
fi
}
# Compress and prepare
for file in *; do
if [[ "${file}" == *".exe"* ]]; then
arc="${file%.*}.zip"
zip "${arc}" "${file}"
rm -f "${file}"
prepare "${arc}"
else
arc="${file}.tar.gz"
tar cfz "${arc}" "${file}"
rm -f "${file}"
prepare "${arc}"
# Exclude libraries
if [[ "${file}" != *".dll"* ]]; then
if [[ "${file}" == *".exe"* ]]; then
# Windows - binary only
arc="${file%.*}.zip"
zip "${arc}" "${file}"
checksum "${arc}"
# Windows - binary and libs
arc="${file%.*}-libs.zip"
zip "${arc}" "${file}" ${{ env.windows_libs }}
rm -f "${file}" ${{ env.windows_libs }}
checksum "${arc}"
else
# Linux/macOS
arc="${file}.tar.gz"
chmod 755 "${file}"
tar cfz "${arc}" "${file}"
rm -f "${file}"
checksum "${arc}"
fi
fi
done
- name: Release - Upload compressed artifacts and checksums
uses: actions/upload-artifact@v4
with:
name: archives-and-checksums
path: /tmp/release/
retention-days: 1
- name: Release
uses: softprops/action-gh-release@v2
if: startsWith(github.ref, 'refs/tags/')
with:
files: |
/tmp/release/*

View File

@ -15,7 +15,7 @@
#
# If NIM_COMMIT is set to "nimbusbuild", this will use the
# version pinned by nimbus-build-system.
PINNED_NIM_VERSION := v1.6.14
PINNED_NIM_VERSION := 38640664088251bbc88917b4bacfd86ec53014b8 # 1.6.21
ifeq ($(NIM_COMMIT),)
NIM_COMMIT := $(PINNED_NIM_VERSION)

View File

@ -105,8 +105,7 @@ proc advertiseQueueLoop(b: DiscoveryEngine) {.async.} =
trace "Begin iterating blocks..."
for c in cids:
if cid =? await c:
b.advertiseBlock(cid)
await sleepAsync(100.millis)
await b.advertiseBlock(cid)
trace "Iterating blocks finished."
await sleepAsync(b.advertiseLoopSleep)

View File

@ -31,7 +31,7 @@ import ./codextypes
export errors, logutils, units, codextypes
type
Block* = object of RootObj
Block* = ref object of RootObj
cid*: Cid
data*: seq[byte]

View File

@ -114,7 +114,14 @@ proc proxySpawnEncodeTask(
args: EncodeTaskArgs,
data: ref seq[seq[byte]]
): Flowvar[EncodeTaskResult] =
tp.spawn encodeTask(args, data[])
# FIXME Uncomment the code below after addressing an issue:
# https://github.com/codex-storage/nim-codex/issues/854
# tp.spawn encodeTask(args, data[])
let fv = EncodeTaskResult.newFlowVar
fv.readyWith(encodeTask(args, data[]))
return fv
proc proxySpawnDecodeTask(
tp: Taskpool,
@ -122,7 +129,14 @@ proc proxySpawnDecodeTask(
data: ref seq[seq[byte]],
parity: ref seq[seq[byte]]
): Flowvar[DecodeTaskResult] =
tp.spawn decodeTask(args, data[], parity[])
# FIXME Uncomment the code below after addressing an issue:
# https://github.com/codex-storage/nim-codex/issues/854
# tp.spawn decodeTask(args, data[], parity[])
let fv = DecodeTaskResult.newFlowVar
fv.readyWith(decodeTask(args, data[], parity[]))
return fv
proc awaitResult[T](signal: ThreadSignalPtr, handle: Flowvar[T]): Future[?!T] {.async.} =
await wait(signal)

View File

@ -27,6 +27,7 @@ import ../blocktype as bt
import ../utils
import ../utils/asynciter
import ../indexingstrategy
import ../errors
import pkg/stew/byteutils
@ -82,6 +83,13 @@ type
blocksCount: Natural
strategy: StrategyType
ErasureError* = object of CodexError
InsufficientBlocksError* = object of ErasureError
# Minimum size, in bytes, that the dataset must have had
# for the encoding request to have succeeded with the parameters
# provided.
minSize*: NBytes
func indexToPos(steps, idx, step: int): int {.inline.} =
## Convert an index to a position in the encoded
## dataset
@ -236,11 +244,13 @@ proc init*(
ecK: Natural, ecM: Natural,
strategy: StrategyType): ?!EncodingParams =
if ecK > manifest.blocksCount:
return failure(
"Unable to encode manifest, not enough blocks, ecK = " &
let exc = (ref InsufficientBlocksError)(
msg: "Unable to encode manifest, not enough blocks, ecK = " &
$ecK &
", blocksCount = " &
$manifest.blocksCount)
$manifest.blocksCount,
minSize: ecK.NBytes * manifest.blockSize)
return failure(exc)
let
rounded = roundUp(manifest.blocksCount, ecK)

View File

@ -29,7 +29,7 @@ import ../logutils
# TODO: Manifest should be reworked to more concrete types,
# perhaps using inheritance
type
Manifest* = object of RootObj
Manifest* = ref object of RootObj
treeCid {.serialize.}: Cid # Root of the merkle tree
datasetSize {.serialize.}: NBytes # Total size of all blocks
blockSize {.serialize.}: NBytes # Size of each contained block (might not be needed if blocks are len-prefixed)
@ -322,7 +322,7 @@ func new*(
protected: true,
ecK: manifest.ecK,
ecM: manifest.ecM,
originalTreeCid: manifest.treeCid,
originalTreeCid: manifest.originalTreeCid,
originalDatasetSize: manifest.originalDatasetSize,
protectedStrategy: manifest.protectedStrategy,
verifiable: true,

View File

@ -240,14 +240,14 @@ proc streamEntireDataset(
self: CodexNodeRef,
manifest: Manifest,
manifestCid: Cid,
): ?!LPStream =
): Future[?!LPStream] {.async.} =
## Streams the contents of the entire dataset described by the manifest.
##
trace "Retrieving blocks from manifest", manifestCid
if manifest.protected:
# Retrieve, decode and save to the local store all EС groups
proc erasureJob(): Future[void] {.async.} =
proc erasureJob(): Future[?!void] {.async.} =
try:
# Spawn an erasure decoding job
let
@ -257,11 +257,20 @@ proc streamEntireDataset(
leoDecoderProvider,
self.taskpool)
without _ =? (await erasure.decode(manifest)), error:
trace "Unable to erasure decode manifest", manifestCid, exc = error.msg
except CatchableError as exc:
trace "Exception decoding manifest", manifestCid, exc = exc.msg
error "Unable to erasure decode manifest", manifestCid, exc = error.msg
return failure(error)
asyncSpawn erasureJob()
return success()
# --------------------------------------------------------------------------
# FIXME this is a HACK so that the node does not crash during the workshop.
# We should NOT catch Defect.
except Exception as exc:
trace "Exception decoding manifest", manifestCid, exc = exc.msg
return failure(exc.msg)
# --------------------------------------------------------------------------
if err =? (await erasureJob()).errorOption:
return failure(err)
# Retrieve all blocks of the dataset sequentially from the local store or network
trace "Creating store stream for manifest", manifestCid
@ -283,7 +292,7 @@ proc retrieve*(
return await self.streamSingleBlock(cid)
self.streamEntireDataset(manifest, cid)
await self.streamEntireDataset(manifest, cid)
proc store*(
self: CodexNodeRef,
@ -414,6 +423,15 @@ proc setupRequest(
trace "Unable to fetch manifest for cid"
return failure error
# ----------------------------------------------------------------------------
# FIXME this is a BAND-AID to address
# https://github.com/codex-storage/nim-codex/issues/852 temporarily for the
# workshop. Remove this once we get that fixed.
if manifest.blocksCount.uint == ecK:
return failure("Cannot setup slots for a dataset with ecK == numBlocks. Please use a larger file or a different combination of `nodes` and `tolerance`.")
# ----------------------------------------------------------------------------
# Erasure code the dataset according to provided parameters
let
erasure = Erasure.new(

View File

@ -32,6 +32,7 @@ import ../node
import ../blocktype
import ../conf
import ../contracts
import ../erasure/erasure
import ../manifest
import ../streams/asyncstreamwrapper
import ../stores
@ -432,8 +433,16 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
let nodes = params.nodes |? 1
let tolerance = params.tolerance |? 0
if (nodes - tolerance) < 1:
return RestApiResponse.error(Http400, "Tolerance cannot be greater or equal than nodes (nodes - tolerance)")
# prevent underflow
if tolerance > nodes:
return RestApiResponse.error(Http400, "Invalid parameters: `tolerance` cannot be greater than `nodes`")
let ecK = nodes - tolerance
let ecM = tolerance # for readability
# ensure leopard constrainst of 1 < K ≥ M
if ecK <= 1 or ecK < ecM:
return RestApiResponse.error(Http400, "Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`")
without expiry =? params.expiry:
return RestApiResponse.error(Http400, "Expiry required")
@ -451,6 +460,11 @@ proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) =
params.collateral,
expiry), error:
if error of InsufficientBlocksError:
return RestApiResponse.error(Http400,
"Dataset too small for erasure parameters, need at least " &
$(ref InsufficientBlocksError)(error).minSize.int & " bytes")
return RestApiResponse.error(Http500, error.msg)
return RestApiResponse.response(purchaseId.toHex)

View File

@ -9,17 +9,14 @@
{.push raises: [].}
import std/sequtils
import std/sugar
import pkg/chronos
import pkg/questionable/results
import pkg/circomcompat
import pkg/poseidon2/io
import ../../types
import ../../../stores
import ../../../merkletree
import ../../../codextypes
import ../../../contracts
import ./converters
@ -39,6 +36,41 @@ type
backendCfg : ptr CircomBn254Cfg
vkp* : ptr CircomKey
NormalizedProofInputs*[H] {.borrow: `.`.} = distinct ProofInputs[H]
func normalizeInput*[H](self: CircomCompat, input: ProofInputs[H]):
NormalizedProofInputs[H] =
## Parameters in CIRCOM circuits are statically sized and must be properly
## padded before they can be passed onto the circuit. This function takes
## variable length parameters and performs that padding.
##
## The output from this function can be JSON-serialized and used as direct
## inputs to the CIRCOM circuit for testing and debugging when one wishes
## to bypass the Rust FFI.
let normSamples = collect:
for sample in input.samples:
var merklePaths = sample.merklePaths
merklePaths.setLen(self.slotDepth)
Sample[H](
cellData: sample.cellData,
merklePaths: merklePaths
)
var normSlotProof = input.slotProof
normSlotProof.setLen(self.datasetDepth)
NormalizedProofInputs[H] ProofInputs[H](
entropy: input.entropy,
datasetRoot: input.datasetRoot,
slotIndex: input.slotIndex,
slotRoot: input.slotRoot,
nCellsPerSlot: input.nCellsPerSlot,
nSlotsPerDataSet: input.nSlotsPerDataSet,
slotProof: normSlotProof,
samples: normSamples
)
proc release*(self: CircomCompat) =
## Release the ctx
##
@ -49,27 +81,20 @@ proc release*(self: CircomCompat) =
if not isNil(self.vkp):
self.vkp.unsafeAddr.release_key()
proc prove*[H](
proc prove[H](
self: CircomCompat,
input: ProofInputs[H]): ?!CircomProof =
## Encode buffers using a ctx
##
input: NormalizedProofInputs[H]): ?!CircomProof =
# NOTE: All inputs are statically sized per circuit
# and adjusted accordingly right before being passed
# to the circom ffi - `setLen` is used to adjust the
# sequence length to the correct size which also 0 pads
# to the correct length
doAssert input.samples.len == self.numSamples,
"Number of samples does not match"
doAssert input.slotProof.len <= self.datasetDepth,
"Number of slot proofs does not match"
"Slot proof is too deep - dataset has more slots than what we can handle?"
doAssert input.samples.allIt(
block:
(it.merklePaths.len <= self.slotDepth + self.blkDepth and
it.cellData.len <= self.cellElms * 32)), "Merkle paths length does not match"
it.cellData.len == self.cellElms)), "Merkle paths too deep or cells too big for circuit"
# TODO: All parameters should match circom's static parametter
var
@ -116,8 +141,7 @@ proc prove*[H](
var
slotProof = input.slotProof.mapIt( it.toBytes ).concat
slotProof.setLen(self.datasetDepth) # zero pad inputs to correct size
doAssert(slotProof.len == self.datasetDepth)
# arrays are always flattened
if ctx.pushInputU256Array(
"slotProof".cstring,
@ -128,16 +152,14 @@ proc prove*[H](
for s in input.samples:
var
merklePaths = s.merklePaths.mapIt( it.toBytes )
data = s.cellData
data = s.cellData.mapIt( @(it.toBytes) ).concat
merklePaths.setLen(self.slotDepth) # zero pad inputs to correct size
if ctx.pushInputU256Array(
"merklePaths".cstring,
merklePaths[0].addr,
uint (merklePaths[0].len * merklePaths.len)) != ERR_OK:
return failure("Failed to push merkle paths")
data.setLen(self.cellElms * 32) # zero pad inputs to correct size
if ctx.pushInputU256Array(
"cellData".cstring,
data[0].addr,
@ -162,6 +184,12 @@ proc prove*[H](
success proof
proc prove*[H](
self: CircomCompat,
input: ProofInputs[H]): ?!CircomProof =
self.prove(self.normalizeInput(input))
proc verify*[H](
self: CircomCompat,
proof: CircomProof,

View File

@ -38,7 +38,7 @@ type
func getCell*[T, H](
self: DataSampler[T, H],
blkBytes: seq[byte],
blkCellIdx: Natural): seq[byte] =
blkCellIdx: Natural): seq[H] =
let
cellSize = self.builder.cellSize.uint64
@ -47,7 +47,7 @@ func getCell*[T, H](
doAssert (dataEnd - dataStart) == cellSize, "Invalid cell size"
toInputData[H](blkBytes[dataStart ..< dataEnd])
blkBytes[dataStart ..< dataEnd].elements(H).toSeq()
proc getSample*[T, H](
self: DataSampler[T, H],

View File

@ -7,23 +7,13 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/sugar
import std/bitops
import std/sequtils
import pkg/questionable/results
import pkg/poseidon2
import pkg/poseidon2/io
import pkg/constantine/math/arithmetic
import pkg/constantine/math/io/io_fields
import ../../merkletree
func toInputData*[H](data: seq[byte]): seq[byte] =
return toSeq(data.elements(H)).mapIt( @(it.toBytes) ).concat
func extractLowBits*[n: static int](elm: BigInt[n], k: int): uint64 =
doAssert( k > 0 and k <= 64 )
var r = 0'u64
@ -39,6 +29,7 @@ func extractLowBits(fld: Poseidon2Hash, k: int): uint64 =
return extractLowBits(elm, k);
func floorLog2*(x : int) : int =
doAssert ( x > 0 )
var k = -1
var y = x
while (y > 0):
@ -47,10 +38,8 @@ func floorLog2*(x : int) : int =
return k
func ceilingLog2*(x : int) : int =
if (x == 0):
return -1
else:
return (floorLog2(x-1) + 1)
doAssert ( x > 0 )
return (floorLog2(x - 1) + 1)
func toBlkInSlot*(cell: Natural, numCells: Natural): Natural =
let log2 = ceilingLog2(numCells)
@ -80,7 +69,7 @@ func cellIndices*(
numCells: Natural, nSamples: Natural): seq[Natural] =
var indices: seq[Natural]
while (indices.len < nSamples):
let idx = cellIndex(entropy, slotRoot, numCells, indices.len + 1)
indices.add(idx.Natural)
for i in 1..nSamples:
indices.add(cellIndex(entropy, slotRoot, numCells, i))
indices

View File

@ -9,7 +9,7 @@
type
Sample*[H] = object
cellData*: seq[byte]
cellData*: seq[H]
merklePaths*: seq[H]
PublicInputs*[H] = object
@ -24,5 +24,5 @@ type
slotRoot*: H
nCellsPerSlot*: Natural
nSlotsPerDataSet*: Natural
slotProof*: seq[H]
samples*: seq[Sample[H]]
slotProof*: seq[H] # inclusion proof that shows that the slot root (leaf) is part of the dataset (root)
samples*: seq[Sample[H]] # inclusion proofs which show that the selected cells (leafs) are part of the slot (roots)

View File

@ -121,6 +121,9 @@ switch("define", "ctt_asm=false")
# Allow the use of old-style case objects for nim config compatibility
switch("define", "nimOldCaseObjects")
# Enable compat mode for Chronos V4
switch("define", "chronosHandleException")
# begin Nimble config (version 1)
when system.fileExists("nimble.paths"):
include "nimble.paths"

View File

@ -323,8 +323,7 @@ asyncchecksuite "Sales":
slot: UInt256,
onBatch: BatchProc): Future[?!void] {.async.} =
let blk = bt.Block.new( @[1.byte] ).get
onBatch( blk.repeat(request.ask.slotSize.truncate(int)) )
return success()
await onBatch( blk.repeat(request.ask.slotSize.truncate(int)) )
createAvailability()
await market.requestStorage(request)
@ -337,8 +336,8 @@ asyncchecksuite "Sales":
onBatch: BatchProc): Future[?!void] {.async.} =
slotIndex = slot
let blk = bt.Block.new( @[1.byte] ).get
onBatch(@[ blk ])
return success()
await onBatch(@[ blk ])
let sold = newFuture[void]()
sales.onSale = proc(request: StorageRequest, slotIndex: UInt256) =
sold.complete()

View File

@ -524,7 +524,7 @@ suite "Slot queue":
request.ask,
request.expiry,
seen = true)
queue.push(item)
check queue.push(item).isOk
check eventually queue.paused
check onProcessSlotCalledWith.len == 0
@ -534,7 +534,7 @@ suite "Slot queue":
let request = StorageRequest.example
var items = SlotQueueItem.init(request)
queue.push(items)
check queue.push(items).isOk
# check all items processed
check eventually queue.len == 0
@ -546,7 +546,7 @@ suite "Slot queue":
request.expiry,
seen = true)
check queue.paused
queue.push(item0)
check queue.push(item0).isOk
check queue.paused
test "paused queue waits for unpause before continuing processing":
@ -558,7 +558,7 @@ suite "Slot queue":
seen = false)
check queue.paused
# push causes unpause
queue.push(item)
check queue.push(item).isOk
# check all items processed
check eventually onProcessSlotCalledWith == @[
(item.requestId, item.slotIndex),
@ -576,8 +576,8 @@ suite "Slot queue":
request.ask,
request.expiry,
seen = true)
queue.push(item0)
queue.push(item1)
check queue.push(item0).isOk
check queue.push(item1).isOk
check queue[0].seen
check queue[1].seen

View File

@ -17,21 +17,6 @@ import pkg/codex/utils/json
export types
func fromCircomData*(_: type Poseidon2Hash, cellData: seq[byte]): seq[Poseidon2Hash] =
var
pos = 0
cellElms: seq[Bn254Fr]
while pos < cellData.len:
var
step = 32
offset = min(pos + step, cellData.len)
data = cellData[pos..<offset]
let ff = Bn254Fr.fromBytes(data.toArray32).get
cellElms.add(ff)
pos += data.len
cellElms
func toJsonDecimal*(big: BigInt[254]): string =
let s = big.toDecimal.strip( leading = true, trailing = false, chars = {'0'} )
if s.len == 0: "0" else: s
@ -78,13 +63,16 @@ func toJson*(input: ProofInputs[Poseidon2Hash]): JsonNode =
"slotRoot": input.slotRoot.toDecimal,
"slotProof": input.slotProof.mapIt( it.toBig.toJsonDecimal ),
"cellData": input.samples.mapIt(
toSeq( it.cellData.elements(Poseidon2Hash) ).mapIt( it.toBig.toJsonDecimal )
it.cellData.mapIt( it.toBig.toJsonDecimal )
),
"merklePaths": input.samples.mapIt(
it.merklePaths.mapIt( it.toBig.toJsonDecimal )
)
}
func toJson*(input: NormalizedProofInputs[Poseidon2Hash]): JsonNode =
toJson(ProofInputs[Poseidon2Hash](input))
func jsonToProofInput*(_: type Poseidon2Hash, inputJson: JsonNode): ProofInputs[Poseidon2Hash] =
let
cellData =
@ -93,10 +81,12 @@ func jsonToProofInput*(_: type Poseidon2Hash, inputJson: JsonNode): ProofInputs[
block:
var
big: BigInt[256]
data = newSeq[byte](big.bits div 8)
hash: Poseidon2Hash
data: array[32, byte]
assert bool(big.fromDecimal( it.str ))
data.marshal(big, littleEndian)
data
assert data.marshal(big, littleEndian)
Poseidon2Hash.fromBytes(data).get
).concat # flatten out elements
)

View File

@ -58,7 +58,7 @@ suite "Test Sampler - control samples":
proofInput.nCellsPerSlot,
sample.merklePaths[5..<9]).tryGet
cellData = Poseidon2Hash.fromCircomData(sample.cellData)
cellData = sample.cellData
cellLeaf = Poseidon2Hash.spongeDigest(cellData, rate = 2).tryGet
slotLeaf = cellProof.reconstructRoot(cellLeaf).tryGet
@ -158,7 +158,7 @@ suite "Test Sampler":
nSlotCells,
sample.merklePaths[5..<sample.merklePaths.len]).tryGet
cellData = Poseidon2Hash.fromCircomData(sample.cellData)
cellData = sample.cellData
cellLeaf = Poseidon2Hash.spongeDigest(cellData, rate = 2).tryGet
slotLeaf = cellProof.reconstructRoot(cellLeaf).tryGet

View File

@ -24,23 +24,19 @@ import ./backends/helpers
suite "Test Prover":
let
slotId = 1
samples = 5
ecK = 3
ecM = 2
numDatasetBlocks = 8
blockSize = DefaultBlockSize
cellSize = DefaultCellSize
repoTmp = TempLevelDb.new()
metaTmp = TempLevelDb.new()
r1cs = "tests/circuits/fixtures/proof_main.r1cs"
wasm = "tests/circuits/fixtures/proof_main.wasm"
circomBackend = CircomCompat.init(r1cs, wasm)
challenge = 1234567.toF.toBytes.toArray32
var
datasetBlocks: seq[bt.Block]
store: BlockStore
manifest: Manifest
protected: Manifest
verifiable: Manifest
sampler: Poseidon2Sampler
prover: Prover
setup:
let
@ -48,14 +44,7 @@ suite "Test Prover":
metaDs = metaTmp.newDb()
store = RepoStore.new(repoDs, metaDs)
(manifest, protected, verifiable) =
await createVerifiableManifest(
store,
numDatasetBlocks,
ecK, ecM,
blockSize,
cellSize)
prover = Prover.new(store, circomBackend, samples)
teardown:
await repoTmp.destroyDb()
@ -63,13 +52,41 @@ suite "Test Prover":
test "Should sample and prove a slot":
let
r1cs = "tests/circuits/fixtures/proof_main.r1cs"
wasm = "tests/circuits/fixtures/proof_main.wasm"
(_, _, verifiable) =
await createVerifiableManifest(
store,
8, # number of blocks in the original dataset (before EC)
5, # ecK
3, # ecM
blockSize,
cellSize)
circomBackend = CircomCompat.init(r1cs, wasm)
prover = Prover.new(store, circomBackend, samples)
challenge = 1234567.toF.toBytes.toArray32
(inputs, proof) = (await prover.prove(1, verifiable, challenge)).tryGet
let
(inputs, proof) = (
await prover.prove(1, verifiable, challenge)).tryGet
check:
(await prover.verify(proof, inputs)).tryGet == true
test "Should generate valid proofs when slots consist of single blocks":
# To get single-block slots, we just need to set the number of blocks in
# the original dataset to be the same as ecK. The total number of blocks
# after generating random data for parity will be ecK + ecM, which will
# match the number of slots.
let
(_, _, verifiable) =
await createVerifiableManifest(
store,
2, # number of blocks in the original dataset (before EC)
2, # ecK
1, # ecM
blockSize,
cellSize)
let
(inputs, proof) = (
await prover.prove(1, verifiable, challenge)).tryGet
check:
(await prover.verify(proof, inputs)).tryGet == true

View File

@ -17,6 +17,7 @@ import pkg/taskpools
import ../asynctest
import ./helpers
import ./examples
suite "Erasure encode/decode":
const BlockSize = 1024'nb
@ -232,3 +233,41 @@ suite "Erasure encode/decode":
let encoded = await encode(buffers, parity)
discard (await erasure.decode(encoded)).tryGet()
test "Should handle verifiable manifests":
const
buffers = 20
parity = 10
let
encoded = await encode(buffers, parity)
slotCids = collect(newSeq):
for i in 0..<encoded.numSlots: Cid.example
verifiable = Manifest.new(encoded, Cid.example, slotCids).tryGet()
decoded = (await erasure.decode(verifiable)).tryGet()
check:
decoded.treeCid == manifest.treeCid
decoded.treeCid == verifiable.originalTreeCid
decoded.blocksCount == verifiable.originalBlocksCount
for i in 1..5:
test "Should encode/decode using various parameters " & $i & "/5":
let
blockSize = rng.sample(@[1, 2, 4, 8, 16, 32, 64].mapIt(it.KiBs))
datasetSize = 1.MiBs
ecK = 10.Natural
ecM = 10.Natural
let
chunker = RandomChunker.new(rng, size = datasetSize, chunkSize = blockSize)
manifest = await storeDataGetManifest(store, chunker)
encoded = (await erasure.encode(manifest, ecK, ecM)).tryGet()
decoded = (await erasure.decode(encoded)).tryGet()
check:
decoded.treeCid == manifest.treeCid
decoded.treeCid == encoded.originalTreeCid
decoded.blocksCount == encoded.originalBlocksCount

View File

@ -256,7 +256,7 @@ ethersuite "On-Chain Market":
receivedIds.add(requestId)
let subscription = await market.subscribeRequestCancelled(request.id, onRequestCancelled)
advanceToCancelledRequest(otherRequest) # shares expiry with otherRequest
await advanceToCancelledRequest(otherRequest) # shares expiry with otherRequest
await market.withdrawFunds(otherRequest.id)
check receivedIds.len == 0
await market.withdrawFunds(request.id)

View File

@ -25,5 +25,5 @@ template ethersuite*(name, body) =
body
export unittest
export asynctest
export ethers except `%`

View File

@ -2,8 +2,9 @@ import pkg/chronos
# Allow multiple setups and teardowns in a test suite
template asyncmultisetup* =
var setups: seq[proc: Future[void] {.gcsafe.}]
var teardowns: seq[proc: Future[void] {.gcsafe.}]
var setups: seq[proc: Future[void].Raising([AsyncExceptionError]) {.gcsafe.}]
var teardowns: seq[
proc: Future[void].Raising([AsyncExceptionError]) {.gcsafe.}]
setup:
for setup in setups:
@ -14,10 +15,12 @@ template asyncmultisetup* =
await teardown()
template setup(setupBody) {.inject, used.} =
setups.add(proc {.async.} = setupBody)
setups.add(proc {.async: (
handleException: true, raises: [AsyncExceptionError]).} = setupBody)
template teardown(teardownBody) {.inject, used.} =
teardowns.insert(proc {.async.} = teardownBody)
teardowns.insert(proc {.async: (
handleException: true, raises: [AsyncExceptionError]).} = teardownBody)
template multisetup* =
var setups: seq[proc() {.gcsafe.}]
@ -32,7 +35,8 @@ template multisetup* =
teardown()
template setup(setupBody) {.inject, used.} =
setups.add(proc = setupBody)
let setupProc = proc = setupBody
setups.add(setupProc)
template teardown(teardownBody) {.inject, used.} =
teardowns.insert(proc = teardownBody)

View File

@ -4,6 +4,7 @@ import std/strutils
from pkg/libp2p import Cid, `$`, init
import pkg/stint
import pkg/questionable/results
import pkg/chronos/apps/http/[httpserver, shttpserver, httpclient]
import pkg/codex/logutils
import pkg/codex/rest/json
import pkg/codex/purchasing
@ -15,9 +16,16 @@ export purchasing
type CodexClient* = ref object
http: HttpClient
baseurl: string
session: HttpSessionRef
type CodexClientError* = object of CatchableError
proc new*(_: type CodexClient, baseurl: string): CodexClient =
CodexClient(http: newHttpClient(), baseurl: baseurl)
CodexClient(
http: newHttpClient(),
baseurl: baseurl,
session: HttpSessionRef.new({HttpClientFlag.Http11Pipeline})
)
proc info*(client: CodexClient): ?!JsonNode =
let url = client.baseurl & "/debug/info"
@ -45,6 +53,23 @@ proc download*(client: CodexClient, cid: Cid, local = false): ?!string =
success response.body
proc downloadBytes*(
client: CodexClient,
cid: Cid,
local = false): Future[?!seq[byte]] {.async.} =
let uri = parseUri(
client.baseurl & "/data/" & $cid &
(if local: "" else: "/network")
)
let (status, bytes) = await client.session.fetch(uri)
if status != 200:
return failure("fetch failed with status " & $status)
success bytes
proc list*(client: CodexClient): ?!RestContentList =
let url = client.baseurl & "/data"
let response = client.http.get(url)
@ -71,7 +96,7 @@ proc requestStorageRaw*(
proofProbability: UInt256,
collateral: UInt256,
expiry: uint = 0,
nodes: uint = 1,
nodes: uint = 2,
tolerance: uint = 0
): Response =
@ -100,7 +125,7 @@ proc requestStorage*(
proofProbability: UInt256,
expiry: uint,
collateral: UInt256,
nodes: uint = 1,
nodes: uint = 2,
tolerance: uint = 0
): ?!PurchaseId =
## Call request storage REST endpoint

View File

@ -2,6 +2,7 @@ import pkg/questionable
import pkg/questionable/results
import pkg/confutils
import pkg/chronicles
import pkg/chronos/asyncproc
import pkg/ethers
import pkg/libp2p
import std/os

View File

@ -3,6 +3,7 @@ import pkg/questionable/results
import pkg/confutils
import pkg/chronicles
import pkg/chronos
import pkg/chronos/asyncproc
import pkg/stew/io2
import std/os
import std/sets

View File

@ -2,6 +2,7 @@ import pkg/questionable
import pkg/questionable/results
import pkg/confutils
import pkg/chronicles
import pkg/chronos/asyncproc
import pkg/libp2p
import std/os
import std/strutils

View File

@ -0,0 +1,60 @@
from pkg/libp2p import Cid, init
import ../examples
import ./marketplacesuite
import ./nodeconfigs
import ./hardhatconfig
marketplacesuite "Bug #821 - node crashes during erasure coding":
test "should be able to create storage request and download dataset",
NodeConfigs(
clients:
CodexConfigs.init(nodes=1)
# .debug() # uncomment to enable console log output.debug()
.withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
.withLogTopics("node", "erasure", "marketplace", )
.some,
providers:
CodexConfigs.init(nodes=0)
# .debug() # uncomment to enable console log output
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
# .withLogTopics("node", "marketplace", "sales", "reservations", "node", "proving", "clock")
.some,
):
let reward = 400.u256
let duration = 10.periods
let collateral = 200.u256
let expiry = 5.periods
let data = await RandomChunker.example(blocks=8)
let client = clients()[0]
let clientApi = client.client
let cid = clientApi.upload(data).get
var requestId = none RequestId
proc onStorageRequested(event: StorageRequested) {.raises:[].} =
requestId = event.requestId.some
let subscription = await marketplace.subscribe(StorageRequested, onStorageRequested)
# client requests storage but requires multiple slots to host the content
let id = await clientApi.requestStorage(
cid,
duration=duration,
reward=reward,
expiry=expiry,
collateral=collateral,
nodes=3,
tolerance=1
)
check eventually(requestId.isSome, timeout=expiry.int * 1000)
let request = await marketplace.getRequest(requestId.get)
let cidFromRequest = Cid.init(request.content.cid).get()
let downloaded = await clientApi.downloadBytes(cidFromRequest, local = true)
check downloaded.isOk
check downloaded.get.toHex == data.toHex
await subscription.unsubscribe()

View File

@ -8,7 +8,8 @@ import ../examples
twonodessuite "Purchasing", debug1 = false, debug2 = false:
test "node handles storage request":
let cid = client1.upload("some file contents").get
let data = await RandomChunker.example(blocks=2)
let cid = client1.upload(data).get
let id1 = client1.requestStorage(cid, duration=100.u256, reward=2.u256, proofProbability=3.u256, expiry=10, collateral=200.u256).get
let id2 = client1.requestStorage(cid, duration=400.u256, reward=5.u256, proofProbability=6.u256, expiry=10, collateral=201.u256).get
check id1 != id2
@ -26,7 +27,7 @@ twonodessuite "Purchasing", debug1 = false, debug2 = false:
proofProbability=3.u256,
expiry=30,
collateral=200.u256,
nodes=2,
nodes=3,
tolerance=1).get
let request = client1.getPurchase(id).get.request.get
@ -35,7 +36,7 @@ twonodessuite "Purchasing", debug1 = false, debug2 = false:
check request.ask.proofProbability == 3.u256
check request.expiry == 30
check request.ask.collateral == 200.u256
check request.ask.slots == 2'u64
check request.ask.slots == 3'u64
check request.ask.maxSlotLoss == 1'u64
# TODO: We currently do not support encoding single chunks
@ -52,7 +53,8 @@ twonodessuite "Purchasing", debug1 = false, debug2 = false:
# check request.ask.maxSlotLoss == 1'u64
test "node remembers purchase status after restart":
let cid = client1.upload("some file contents").get
let data = await RandomChunker.example(blocks=2)
let cid = client1.upload(data).get
let id = client1.requestStorage(cid,
duration=100.u256,
reward=2.u256,
@ -71,25 +73,12 @@ twonodessuite "Purchasing", debug1 = false, debug2 = false:
check request.ask.proofProbability == 3.u256
check request.expiry == 30
check request.ask.collateral == 200.u256
check request.ask.slots == 1'u64
check request.ask.slots == 2'u64
check request.ask.maxSlotLoss == 0'u64
test "request storage fails if nodes and tolerance aren't correct":
let cid = client1.upload("some file contents").get
let responseBefore = client1.requestStorageRaw(cid,
duration=100.u256,
reward=2.u256,
proofProbability=3.u256,
expiry=30,
collateral=200.u256,
nodes=1,
tolerance=1)
check responseBefore.status == "400 Bad Request"
check responseBefore.body == "Tolerance cannot be greater or equal than nodes (nodes - tolerance)"
test "node requires expiry and its value to be in future":
let cid = client1.upload("some file contents").get
let data = await RandomChunker.example(blocks=2)
let cid = client1.upload(data).get
let responseMissing = client1.requestStorageRaw(cid, duration=1.u256, reward=2.u256, proofProbability=3.u256, collateral=200.u256)
check responseMissing.status == "400 Bad Request"

View File

@ -1,7 +1,9 @@
import std/httpclient
import std/sequtils
from pkg/libp2p import `==`
import pkg/codex/units
import ./twonodes
import ../examples
twonodessuite "REST API", debug1 = false, debug2 = false:
@ -36,3 +38,93 @@ twonodessuite "REST API", debug1 = false, debug2 = false:
check:
[cid1, cid2].allIt(it in list.content.mapIt(it.cid))
test "request storage fails for datasets that are too small":
let cid = client1.upload("some file contents").get
let response = client1.requestStorageRaw(cid, duration=10.u256, reward=2.u256, proofProbability=3.u256, nodes=2, collateral=200.u256, expiry=9)
check:
response.status == "400 Bad Request"
response.body == "Dataset too small for erasure parameters, need at least " & $(2*DefaultBlockSize.int) & " bytes"
test "request storage succeeds for sufficiently sized datasets":
let data = await RandomChunker.example(blocks=2)
let cid = client1.upload(data).get
let response = client1.requestStorageRaw(cid, duration=10.u256, reward=2.u256, proofProbability=3.u256, collateral=200.u256, expiry=9)
check:
response.status == "200 OK"
test "request storage fails if nodes and tolerance aren't correct":
let data = await RandomChunker.example(blocks=2)
let cid = client1.upload(data).get
let duration = 100.u256
let reward = 2.u256
let proofProbability = 3.u256
let expiry = 30.uint
let collateral = 200.u256
let ecParams = @[(1, 0), (1, 1), (2, 1), (3, 2), (3, 3)]
for ecParam in ecParams:
let (nodes, tolerance) = ecParam
var responseBefore = client1.requestStorageRaw(cid,
duration,
reward,
proofProbability,
collateral,
expiry,
nodes.uint,
tolerance.uint)
check responseBefore.status == "400 Bad Request"
check responseBefore.body == "Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`"
test "request storage fails if tolerance > nodes (underflow protection)":
let data = await RandomChunker.example(blocks=2)
let cid = client1.upload(data).get
let duration = 100.u256
let reward = 2.u256
let proofProbability = 3.u256
let expiry = 30.uint
let collateral = 200.u256
let ecParams = @[(0, 1), (1, 2), (2, 3)]
for ecParam in ecParams:
let (nodes, tolerance) = ecParam
var responseBefore = client1.requestStorageRaw(cid,
duration,
reward,
proofProbability,
collateral,
expiry,
nodes.uint,
tolerance.uint)
check responseBefore.status == "400 Bad Request"
check responseBefore.body == "Invalid parameters: `tolerance` cannot be greater than `nodes`"
test "request storage succeeds if nodes and tolerance within range":
let data = await RandomChunker.example(blocks=2)
let cid = client1.upload(data).get
let duration = 100.u256
let reward = 2.u256
let proofProbability = 3.u256
let expiry = 30.uint
let collateral = 200.u256
let ecParams = @[(2, 0), (3, 1), (5, 2)]
for ecParam in ecParams:
let (nodes, tolerance) = ecParam
var responseBefore = client1.requestStorageRaw(cid,
duration,
reward,
proofProbability,
collateral,
expiry,
nodes.uint,
tolerance.uint)
check responseBefore.status == "200 OK"

View File

@ -6,5 +6,6 @@ import ./integration/testpurchasing
import ./integration/testblockexpiration
import ./integration/testmarketplace
import ./integration/testproofs
import ./integration/testecbug
{.warning[UnusedImport]:off.}

@ -1 +1 @@
Subproject commit 57e8cd5013325f05e16833a5320b575d32a403f3
Subproject commit 7ad26688a3b75b914d626e2623174a36f4425f51

2
vendor/nim-chronos vendored

@ -1 +1 @@
Subproject commit 0277b65be2c7a365ac13df002fba6e172be55537
Subproject commit 035ae11ba92369e7722e649db597e79134fd06b9

@ -1 +1 @@
Subproject commit a7f14bc9b783f1b9e2d02cc85a338b1411058095
Subproject commit 63822e83561ea1c6396d0f3eca583b038f5d44c6

@ -1 +1 @@
Subproject commit 3b491a40c60aad9e8d3407443f46f62511e63b18
Subproject commit be57dbc902d36f37540897e98c69aa80f868cb45

2
vendor/nim-leopard vendored

@ -1 +1 @@
Subproject commit 1a6f2ab7252426a6ac01482a68b75d0c3b134cf0
Subproject commit 895ff24ca6615d577acfb11811cdd5465f596c97

@ -1 +1 @@
Subproject commit fe9bc3f3759ae1add6bf8c899db2e75327f03782
Subproject commit b2e1fb022f1ee800b439648953e92cc993c1264c