mirror of
https://github.com/status-im/nim-dagger.git
synced 2025-02-27 05:40:35 +00:00
Merge branch 'master' into feature/ceremony-files
# Conflicts: # tests/codex/slots/testprover.nim
This commit is contained in:
commit
64b82ded9b
@ -105,8 +105,7 @@ proc advertiseQueueLoop(b: DiscoveryEngine) {.async.} =
|
||||
trace "Begin iterating blocks..."
|
||||
for c in cids:
|
||||
if cid =? await c:
|
||||
b.advertiseBlock(cid)
|
||||
await sleepAsync(100.millis)
|
||||
await b.advertiseBlock(cid)
|
||||
trace "Iterating blocks finished."
|
||||
|
||||
await sleepAsync(b.advertiseLoopSleep)
|
||||
|
@ -31,7 +31,7 @@ import ./codextypes
|
||||
export errors, logutils, units, codextypes
|
||||
|
||||
type
|
||||
Block* = object of RootObj
|
||||
Block* = ref object of RootObj
|
||||
cid*: Cid
|
||||
data*: seq[byte]
|
||||
|
||||
|
@ -29,7 +29,7 @@ import ../logutils
|
||||
# TODO: Manifest should be reworked to more concrete types,
|
||||
# perhaps using inheritance
|
||||
type
|
||||
Manifest* = object of RootObj
|
||||
Manifest* = ref object of RootObj
|
||||
treeCid {.serialize.}: Cid # Root of the merkle tree
|
||||
datasetSize {.serialize.}: NBytes # Total size of all blocks
|
||||
blockSize {.serialize.}: NBytes # Size of each contained block (might not be needed if blocks are len-prefixed)
|
||||
|
@ -9,17 +9,14 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import std/sequtils
|
||||
import std/sugar
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/questionable/results
|
||||
import pkg/circomcompat
|
||||
import pkg/poseidon2/io
|
||||
|
||||
import ../../types
|
||||
import ../../../stores
|
||||
import ../../../merkletree
|
||||
import ../../../codextypes
|
||||
import ../../../contracts
|
||||
|
||||
import ./converters
|
||||
@ -39,6 +36,41 @@ type
|
||||
backendCfg : ptr CircomBn254Cfg
|
||||
vkp* : ptr CircomKey
|
||||
|
||||
NormalizedProofInputs*[H] {.borrow: `.`.} = distinct ProofInputs[H]
|
||||
|
||||
func normalizeInput*[H](self: CircomCompat, input: ProofInputs[H]):
|
||||
NormalizedProofInputs[H] =
|
||||
## Parameters in CIRCOM circuits are statically sized and must be properly
|
||||
## padded before they can be passed onto the circuit. This function takes
|
||||
## variable length parameters and performs that padding.
|
||||
##
|
||||
## The output from this function can be JSON-serialized and used as direct
|
||||
## inputs to the CIRCOM circuit for testing and debugging when one wishes
|
||||
## to bypass the Rust FFI.
|
||||
|
||||
let normSamples = collect:
|
||||
for sample in input.samples:
|
||||
var merklePaths = sample.merklePaths
|
||||
merklePaths.setLen(self.slotDepth)
|
||||
Sample[H](
|
||||
cellData: sample.cellData,
|
||||
merklePaths: merklePaths
|
||||
)
|
||||
|
||||
var normSlotProof = input.slotProof
|
||||
normSlotProof.setLen(self.datasetDepth)
|
||||
|
||||
NormalizedProofInputs[H] ProofInputs[H](
|
||||
entropy: input.entropy,
|
||||
datasetRoot: input.datasetRoot,
|
||||
slotIndex: input.slotIndex,
|
||||
slotRoot: input.slotRoot,
|
||||
nCellsPerSlot: input.nCellsPerSlot,
|
||||
nSlotsPerDataSet: input.nSlotsPerDataSet,
|
||||
slotProof: normSlotProof,
|
||||
samples: normSamples
|
||||
)
|
||||
|
||||
proc release*(self: CircomCompat) =
|
||||
## Release the ctx
|
||||
##
|
||||
@ -49,27 +81,20 @@ proc release*(self: CircomCompat) =
|
||||
if not isNil(self.vkp):
|
||||
self.vkp.unsafeAddr.release_key()
|
||||
|
||||
proc prove*[H](
|
||||
proc prove[H](
|
||||
self: CircomCompat,
|
||||
input: ProofInputs[H]): ?!CircomProof =
|
||||
## Encode buffers using a ctx
|
||||
##
|
||||
input: NormalizedProofInputs[H]): ?!CircomProof =
|
||||
|
||||
# NOTE: All inputs are statically sized per circuit
|
||||
# and adjusted accordingly right before being passed
|
||||
# to the circom ffi - `setLen` is used to adjust the
|
||||
# sequence length to the correct size which also 0 pads
|
||||
# to the correct length
|
||||
doAssert input.samples.len == self.numSamples,
|
||||
"Number of samples does not match"
|
||||
|
||||
doAssert input.slotProof.len <= self.datasetDepth,
|
||||
"Number of slot proofs does not match"
|
||||
"Slot proof is too deep - dataset has more slots than what we can handle?"
|
||||
|
||||
doAssert input.samples.allIt(
|
||||
block:
|
||||
(it.merklePaths.len <= self.slotDepth + self.blkDepth and
|
||||
it.cellData.len <= self.cellElms * 32)), "Merkle paths length does not match"
|
||||
it.cellData.len == self.cellElms)), "Merkle paths too deep or cells too big for circuit"
|
||||
|
||||
# TODO: All parameters should match circom's static parametter
|
||||
var
|
||||
@ -116,8 +141,7 @@ proc prove*[H](
|
||||
var
|
||||
slotProof = input.slotProof.mapIt( it.toBytes ).concat
|
||||
|
||||
slotProof.setLen(self.datasetDepth) # zero pad inputs to correct size
|
||||
|
||||
doAssert(slotProof.len == self.datasetDepth)
|
||||
# arrays are always flattened
|
||||
if ctx.pushInputU256Array(
|
||||
"slotProof".cstring,
|
||||
@ -128,16 +152,14 @@ proc prove*[H](
|
||||
for s in input.samples:
|
||||
var
|
||||
merklePaths = s.merklePaths.mapIt( it.toBytes )
|
||||
data = s.cellData
|
||||
data = s.cellData.mapIt( @(it.toBytes) ).concat
|
||||
|
||||
merklePaths.setLen(self.slotDepth) # zero pad inputs to correct size
|
||||
if ctx.pushInputU256Array(
|
||||
"merklePaths".cstring,
|
||||
merklePaths[0].addr,
|
||||
uint (merklePaths[0].len * merklePaths.len)) != ERR_OK:
|
||||
return failure("Failed to push merkle paths")
|
||||
|
||||
data.setLen(self.cellElms * 32) # zero pad inputs to correct size
|
||||
if ctx.pushInputU256Array(
|
||||
"cellData".cstring,
|
||||
data[0].addr,
|
||||
@ -162,6 +184,12 @@ proc prove*[H](
|
||||
|
||||
success proof
|
||||
|
||||
proc prove*[H](
|
||||
self: CircomCompat,
|
||||
input: ProofInputs[H]): ?!CircomProof =
|
||||
|
||||
self.prove(self.normalizeInput(input))
|
||||
|
||||
proc verify*[H](
|
||||
self: CircomCompat,
|
||||
proof: CircomProof,
|
||||
|
@ -38,7 +38,7 @@ type
|
||||
func getCell*[T, H](
|
||||
self: DataSampler[T, H],
|
||||
blkBytes: seq[byte],
|
||||
blkCellIdx: Natural): seq[byte] =
|
||||
blkCellIdx: Natural): seq[H] =
|
||||
|
||||
let
|
||||
cellSize = self.builder.cellSize.uint64
|
||||
@ -47,7 +47,7 @@ func getCell*[T, H](
|
||||
|
||||
doAssert (dataEnd - dataStart) == cellSize, "Invalid cell size"
|
||||
|
||||
toInputData[H](blkBytes[dataStart ..< dataEnd])
|
||||
blkBytes[dataStart ..< dataEnd].elements(H).toSeq()
|
||||
|
||||
proc getSample*[T, H](
|
||||
self: DataSampler[T, H],
|
||||
|
@ -7,23 +7,13 @@
|
||||
## This file may not be copied, modified, or distributed except according to
|
||||
## those terms.
|
||||
|
||||
import std/sugar
|
||||
import std/bitops
|
||||
import std/sequtils
|
||||
|
||||
import pkg/questionable/results
|
||||
import pkg/poseidon2
|
||||
import pkg/poseidon2/io
|
||||
|
||||
import pkg/constantine/math/arithmetic
|
||||
|
||||
import pkg/constantine/math/io/io_fields
|
||||
|
||||
import ../../merkletree
|
||||
|
||||
func toInputData*[H](data: seq[byte]): seq[byte] =
|
||||
return toSeq(data.elements(H)).mapIt( @(it.toBytes) ).concat
|
||||
|
||||
func extractLowBits*[n: static int](elm: BigInt[n], k: int): uint64 =
|
||||
doAssert( k > 0 and k <= 64 )
|
||||
var r = 0'u64
|
||||
@ -39,6 +29,7 @@ func extractLowBits(fld: Poseidon2Hash, k: int): uint64 =
|
||||
return extractLowBits(elm, k);
|
||||
|
||||
func floorLog2*(x : int) : int =
|
||||
doAssert ( x > 0 )
|
||||
var k = -1
|
||||
var y = x
|
||||
while (y > 0):
|
||||
@ -47,9 +38,7 @@ func floorLog2*(x : int) : int =
|
||||
return k
|
||||
|
||||
func ceilingLog2*(x : int) : int =
|
||||
if (x == 0):
|
||||
return -1
|
||||
else:
|
||||
doAssert ( x > 0 )
|
||||
return (floorLog2(x - 1) + 1)
|
||||
|
||||
func toBlkInSlot*(cell: Natural, numCells: Natural): Natural =
|
||||
@ -80,7 +69,7 @@ func cellIndices*(
|
||||
numCells: Natural, nSamples: Natural): seq[Natural] =
|
||||
|
||||
var indices: seq[Natural]
|
||||
while (indices.len < nSamples):
|
||||
let idx = cellIndex(entropy, slotRoot, numCells, indices.len + 1)
|
||||
indices.add(idx.Natural)
|
||||
for i in 1..nSamples:
|
||||
indices.add(cellIndex(entropy, slotRoot, numCells, i))
|
||||
|
||||
indices
|
||||
|
@ -9,7 +9,7 @@
|
||||
|
||||
type
|
||||
Sample*[H] = object
|
||||
cellData*: seq[byte]
|
||||
cellData*: seq[H]
|
||||
merklePaths*: seq[H]
|
||||
|
||||
PublicInputs*[H] = object
|
||||
@ -24,5 +24,5 @@ type
|
||||
slotRoot*: H
|
||||
nCellsPerSlot*: Natural
|
||||
nSlotsPerDataSet*: Natural
|
||||
slotProof*: seq[H]
|
||||
samples*: seq[Sample[H]]
|
||||
slotProof*: seq[H] # inclusion proof that shows that the slot root (leaf) is part of the dataset (root)
|
||||
samples*: seq[Sample[H]] # inclusion proofs which show that the selected cells (leafs) are part of the slot (roots)
|
||||
|
@ -121,6 +121,9 @@ switch("define", "ctt_asm=false")
|
||||
# Allow the use of old-style case objects for nim config compatibility
|
||||
switch("define", "nimOldCaseObjects")
|
||||
|
||||
# Enable compat mode for Chronos V4
|
||||
switch("define", "chronosHandleException")
|
||||
|
||||
# begin Nimble config (version 1)
|
||||
when system.fileExists("nimble.paths"):
|
||||
include "nimble.paths"
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -323,8 +323,7 @@ asyncchecksuite "Sales":
|
||||
slot: UInt256,
|
||||
onBatch: BatchProc): Future[?!void] {.async.} =
|
||||
let blk = bt.Block.new( @[1.byte] ).get
|
||||
onBatch( blk.repeat(request.ask.slotSize.truncate(int)) )
|
||||
return success()
|
||||
await onBatch( blk.repeat(request.ask.slotSize.truncate(int)) )
|
||||
|
||||
createAvailability()
|
||||
await market.requestStorage(request)
|
||||
@ -337,8 +336,8 @@ asyncchecksuite "Sales":
|
||||
onBatch: BatchProc): Future[?!void] {.async.} =
|
||||
slotIndex = slot
|
||||
let blk = bt.Block.new( @[1.byte] ).get
|
||||
onBatch(@[ blk ])
|
||||
return success()
|
||||
await onBatch(@[ blk ])
|
||||
|
||||
let sold = newFuture[void]()
|
||||
sales.onSale = proc(request: StorageRequest, slotIndex: UInt256) =
|
||||
sold.complete()
|
||||
|
@ -524,7 +524,7 @@ suite "Slot queue":
|
||||
request.ask,
|
||||
request.expiry,
|
||||
seen = true)
|
||||
queue.push(item)
|
||||
check queue.push(item).isOk
|
||||
check eventually queue.paused
|
||||
check onProcessSlotCalledWith.len == 0
|
||||
|
||||
@ -534,7 +534,7 @@ suite "Slot queue":
|
||||
|
||||
let request = StorageRequest.example
|
||||
var items = SlotQueueItem.init(request)
|
||||
queue.push(items)
|
||||
check queue.push(items).isOk
|
||||
# check all items processed
|
||||
check eventually queue.len == 0
|
||||
|
||||
@ -546,7 +546,7 @@ suite "Slot queue":
|
||||
request.expiry,
|
||||
seen = true)
|
||||
check queue.paused
|
||||
queue.push(item0)
|
||||
check queue.push(item0).isOk
|
||||
check queue.paused
|
||||
|
||||
test "paused queue waits for unpause before continuing processing":
|
||||
@ -558,7 +558,7 @@ suite "Slot queue":
|
||||
seen = false)
|
||||
check queue.paused
|
||||
# push causes unpause
|
||||
queue.push(item)
|
||||
check queue.push(item).isOk
|
||||
# check all items processed
|
||||
check eventually onProcessSlotCalledWith == @[
|
||||
(item.requestId, item.slotIndex),
|
||||
@ -576,8 +576,8 @@ suite "Slot queue":
|
||||
request.ask,
|
||||
request.expiry,
|
||||
seen = true)
|
||||
queue.push(item0)
|
||||
queue.push(item1)
|
||||
check queue.push(item0).isOk
|
||||
check queue.push(item1).isOk
|
||||
check queue[0].seen
|
||||
check queue[1].seen
|
||||
|
||||
|
@ -17,21 +17,6 @@ import pkg/codex/utils/json
|
||||
|
||||
export types
|
||||
|
||||
func fromCircomData*(_: type Poseidon2Hash, cellData: seq[byte]): seq[Poseidon2Hash] =
|
||||
var
|
||||
pos = 0
|
||||
cellElms: seq[Bn254Fr]
|
||||
while pos < cellData.len:
|
||||
var
|
||||
step = 32
|
||||
offset = min(pos + step, cellData.len)
|
||||
data = cellData[pos..<offset]
|
||||
let ff = Bn254Fr.fromBytes(data.toArray32).get
|
||||
cellElms.add(ff)
|
||||
pos += data.len
|
||||
|
||||
cellElms
|
||||
|
||||
func toJsonDecimal*(big: BigInt[254]): string =
|
||||
let s = big.toDecimal.strip( leading = true, trailing = false, chars = {'0'} )
|
||||
if s.len == 0: "0" else: s
|
||||
@ -78,13 +63,16 @@ func toJson*(input: ProofInputs[Poseidon2Hash]): JsonNode =
|
||||
"slotRoot": input.slotRoot.toDecimal,
|
||||
"slotProof": input.slotProof.mapIt( it.toBig.toJsonDecimal ),
|
||||
"cellData": input.samples.mapIt(
|
||||
toSeq( it.cellData.elements(Poseidon2Hash) ).mapIt( it.toBig.toJsonDecimal )
|
||||
it.cellData.mapIt( it.toBig.toJsonDecimal )
|
||||
),
|
||||
"merklePaths": input.samples.mapIt(
|
||||
it.merklePaths.mapIt( it.toBig.toJsonDecimal )
|
||||
)
|
||||
}
|
||||
|
||||
func toJson*(input: NormalizedProofInputs[Poseidon2Hash]): JsonNode =
|
||||
toJson(ProofInputs[Poseidon2Hash](input))
|
||||
|
||||
func jsonToProofInput*(_: type Poseidon2Hash, inputJson: JsonNode): ProofInputs[Poseidon2Hash] =
|
||||
let
|
||||
cellData =
|
||||
@ -93,10 +81,12 @@ func jsonToProofInput*(_: type Poseidon2Hash, inputJson: JsonNode): ProofInputs[
|
||||
block:
|
||||
var
|
||||
big: BigInt[256]
|
||||
data = newSeq[byte](big.bits div 8)
|
||||
hash: Poseidon2Hash
|
||||
data: array[32, byte]
|
||||
assert bool(big.fromDecimal( it.str ))
|
||||
data.marshal(big, littleEndian)
|
||||
data
|
||||
assert data.marshal(big, littleEndian)
|
||||
|
||||
Poseidon2Hash.fromBytes(data).get
|
||||
).concat # flatten out elements
|
||||
)
|
||||
|
||||
|
@ -58,7 +58,7 @@ suite "Test Sampler - control samples":
|
||||
proofInput.nCellsPerSlot,
|
||||
sample.merklePaths[5..<9]).tryGet
|
||||
|
||||
cellData = Poseidon2Hash.fromCircomData(sample.cellData)
|
||||
cellData = sample.cellData
|
||||
cellLeaf = Poseidon2Hash.spongeDigest(cellData, rate = 2).tryGet
|
||||
slotLeaf = cellProof.reconstructRoot(cellLeaf).tryGet
|
||||
|
||||
@ -158,7 +158,7 @@ suite "Test Sampler":
|
||||
nSlotCells,
|
||||
sample.merklePaths[5..<sample.merklePaths.len]).tryGet
|
||||
|
||||
cellData = Poseidon2Hash.fromCircomData(sample.cellData)
|
||||
cellData = sample.cellData
|
||||
cellLeaf = Poseidon2Hash.spongeDigest(cellData, rate = 2).tryGet
|
||||
slotLeaf = cellProof.reconstructRoot(cellLeaf).tryGet
|
||||
|
||||
|
@ -15,8 +15,6 @@ import pkg/codex/chunker
|
||||
import pkg/codex/blocktype as bt
|
||||
import pkg/codex/slots
|
||||
import pkg/codex/stores
|
||||
import pkg/codex/conf
|
||||
import pkg/confutils/defs
|
||||
import pkg/poseidon2/io
|
||||
import pkg/codex/utils/poseidon2digest
|
||||
|
||||
@ -26,23 +24,19 @@ import ./backends/helpers
|
||||
|
||||
suite "Test Prover":
|
||||
let
|
||||
slotId = 1
|
||||
samples = 5
|
||||
ecK = 3
|
||||
ecM = 2
|
||||
numDatasetBlocks = 8
|
||||
blockSize = DefaultBlockSize
|
||||
cellSize = DefaultCellSize
|
||||
repoTmp = TempLevelDb.new()
|
||||
metaTmp = TempLevelDb.new()
|
||||
r1cs = "tests/circuits/fixtures/proof_main.r1cs"
|
||||
wasm = "tests/circuits/fixtures/proof_main.wasm"
|
||||
circomBackend = CircomCompat.init(r1cs, wasm)
|
||||
challenge = 1234567.toF.toBytes.toArray32
|
||||
|
||||
var
|
||||
datasetBlocks: seq[bt.Block]
|
||||
store: BlockStore
|
||||
manifest: Manifest
|
||||
protected: Manifest
|
||||
verifiable: Manifest
|
||||
sampler: Poseidon2Sampler
|
||||
prover: Prover
|
||||
|
||||
setup:
|
||||
let
|
||||
@ -50,14 +44,7 @@ suite "Test Prover":
|
||||
metaDs = metaTmp.newDb()
|
||||
|
||||
store = RepoStore.new(repoDs, metaDs)
|
||||
|
||||
(manifest, protected, verifiable) =
|
||||
await createVerifiableManifest(
|
||||
store,
|
||||
numDatasetBlocks,
|
||||
ecK, ecM,
|
||||
blockSize,
|
||||
cellSize)
|
||||
prover = Prover.new(store, circomBackend, samples)
|
||||
|
||||
teardown:
|
||||
await repoTmp.destroyDb()
|
||||
@ -65,23 +52,41 @@ suite "Test Prover":
|
||||
|
||||
test "Should sample and prove a slot":
|
||||
let
|
||||
prover = Prover.new(store, samples)
|
||||
challenge = 1234567.toF.toBytes.toArray32
|
||||
config = CodexConf(
|
||||
cmd: StartUpCmd.persistence,
|
||||
nat: ValidIpAddress.init("127.0.0.1"),
|
||||
discoveryIp: ValidIpAddress.init(IPv4_any()),
|
||||
metricsAddress: ValidIpAddress.init("127.0.0.1"),
|
||||
persistenceCmd: PersistenceCmd.prover,
|
||||
circomR1cs: InputFile("tests/circuits/fixtures/proof_main.r1cs"),
|
||||
circomWasm: InputFile("tests/circuits/fixtures/proof_main.wasm"),
|
||||
circomZkey: InputFile("tests/circuits/fixtures/proof_main.zkey")
|
||||
)
|
||||
ceremonyHash = string.none
|
||||
(_, _, verifiable) =
|
||||
await createVerifiableManifest(
|
||||
store,
|
||||
8, # number of blocks in the original dataset (before EC)
|
||||
5, # ecK
|
||||
3, # ecM
|
||||
blockSize,
|
||||
cellSize)
|
||||
|
||||
(await prover.start(config, ceremonyHash)).tryGet()
|
||||
|
||||
let (inputs, proof) = (await prover.prove(1, verifiable, challenge)).tryGet
|
||||
let
|
||||
(inputs, proof) = (
|
||||
await prover.prove(1, verifiable, challenge)).tryGet
|
||||
|
||||
check:
|
||||
(await prover.verify(proof, inputs)).tryGet == true
|
||||
|
||||
test "Should generate valid proofs when slots consist of single blocks":
|
||||
|
||||
# To get single-block slots, we just need to set the number of blocks in
|
||||
# the original dataset to be the same as ecK. The total number of blocks
|
||||
# after generating random data for parity will be ecK + ecM, which will
|
||||
# match the number of slots.
|
||||
let
|
||||
(_, _, verifiable) =
|
||||
await createVerifiableManifest(
|
||||
store,
|
||||
2, # number of blocks in the original dataset (before EC)
|
||||
2, # ecK
|
||||
1, # ecM
|
||||
blockSize,
|
||||
cellSize)
|
||||
|
||||
let
|
||||
(inputs, proof) = (
|
||||
await prover.prove(1, verifiable, challenge)).tryGet
|
||||
|
||||
check:
|
||||
(await prover.verify(proof, inputs)).tryGet == true
|
||||
|
@ -256,7 +256,7 @@ ethersuite "On-Chain Market":
|
||||
receivedIds.add(requestId)
|
||||
|
||||
let subscription = await market.subscribeRequestCancelled(request.id, onRequestCancelled)
|
||||
advanceToCancelledRequest(otherRequest) # shares expiry with otherRequest
|
||||
await advanceToCancelledRequest(otherRequest) # shares expiry with otherRequest
|
||||
await market.withdrawFunds(otherRequest.id)
|
||||
check receivedIds.len == 0
|
||||
await market.withdrawFunds(request.id)
|
||||
|
@ -25,5 +25,5 @@ template ethersuite*(name, body) =
|
||||
|
||||
body
|
||||
|
||||
export unittest
|
||||
export asynctest
|
||||
export ethers except `%`
|
||||
|
@ -2,8 +2,9 @@ import pkg/chronos
|
||||
|
||||
# Allow multiple setups and teardowns in a test suite
|
||||
template asyncmultisetup* =
|
||||
var setups: seq[proc: Future[void] {.gcsafe.}]
|
||||
var teardowns: seq[proc: Future[void] {.gcsafe.}]
|
||||
var setups: seq[proc: Future[void].Raising([AsyncExceptionError]) {.gcsafe.}]
|
||||
var teardowns: seq[
|
||||
proc: Future[void].Raising([AsyncExceptionError]) {.gcsafe.}]
|
||||
|
||||
setup:
|
||||
for setup in setups:
|
||||
@ -14,10 +15,12 @@ template asyncmultisetup* =
|
||||
await teardown()
|
||||
|
||||
template setup(setupBody) {.inject, used.} =
|
||||
setups.add(proc {.async.} = setupBody)
|
||||
setups.add(proc {.async: (
|
||||
handleException: true, raises: [AsyncExceptionError]).} = setupBody)
|
||||
|
||||
template teardown(teardownBody) {.inject, used.} =
|
||||
teardowns.insert(proc {.async.} = teardownBody)
|
||||
teardowns.insert(proc {.async: (
|
||||
handleException: true, raises: [AsyncExceptionError]).} = teardownBody)
|
||||
|
||||
template multisetup* =
|
||||
var setups: seq[proc() {.gcsafe.}]
|
||||
@ -32,7 +35,8 @@ template multisetup* =
|
||||
teardown()
|
||||
|
||||
template setup(setupBody) {.inject, used.} =
|
||||
setups.add(proc = setupBody)
|
||||
let setupProc = proc = setupBody
|
||||
setups.add(setupProc)
|
||||
|
||||
template teardown(teardownBody) {.inject, used.} =
|
||||
teardowns.insert(proc = teardownBody)
|
||||
|
@ -2,6 +2,7 @@ import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/confutils
|
||||
import pkg/chronicles
|
||||
import pkg/chronos/asyncproc
|
||||
import pkg/ethers
|
||||
import pkg/libp2p
|
||||
import std/os
|
||||
|
@ -3,6 +3,7 @@ import pkg/questionable/results
|
||||
import pkg/confutils
|
||||
import pkg/chronicles
|
||||
import pkg/chronos
|
||||
import pkg/chronos/asyncproc
|
||||
import pkg/stew/io2
|
||||
import std/os
|
||||
import std/sets
|
||||
|
@ -2,6 +2,7 @@ import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
import pkg/confutils
|
||||
import pkg/chronicles
|
||||
import pkg/chronos/asyncproc
|
||||
import pkg/libp2p
|
||||
import std/os
|
||||
import std/strutils
|
||||
|
2
vendor/codex-contracts-eth
vendored
2
vendor/codex-contracts-eth
vendored
@ -1 +1 @@
|
||||
Subproject commit 57e8cd5013325f05e16833a5320b575d32a403f3
|
||||
Subproject commit 7ad26688a3b75b914d626e2623174a36f4425f51
|
2
vendor/nim-chronos
vendored
2
vendor/nim-chronos
vendored
@ -1 +1 @@
|
||||
Subproject commit 0277b65be2c7a365ac13df002fba6e172be55537
|
||||
Subproject commit 035ae11ba92369e7722e649db597e79134fd06b9
|
2
vendor/nim-codex-dht
vendored
2
vendor/nim-codex-dht
vendored
@ -1 +1 @@
|
||||
Subproject commit a7f14bc9b783f1b9e2d02cc85a338b1411058095
|
||||
Subproject commit 63822e83561ea1c6396d0f3eca583b038f5d44c6
|
2
vendor/nim-http-utils
vendored
2
vendor/nim-http-utils
vendored
@ -1 +1 @@
|
||||
Subproject commit 3b491a40c60aad9e8d3407443f46f62511e63b18
|
||||
Subproject commit be57dbc902d36f37540897e98c69aa80f868cb45
|
Loading…
x
Reference in New Issue
Block a user