Node use correct stores (#710)
* don't pass erasure * use correct stores and construct erasure inside the node * fix tests to match new constructor * remove prover argument * review commets * revert failing on no-prover for now * small cleanup * comment out invalid proofs broken test
This commit is contained in:
parent
d70ab59004
commit
3e884430c1
|
@ -38,6 +38,7 @@ import ./contracts/clock
|
|||
import ./contracts/deployment
|
||||
import ./utils/addrutils
|
||||
import ./namespaces
|
||||
import ./codextypes
|
||||
import ./logutils
|
||||
|
||||
logScope:
|
||||
|
@ -261,8 +262,13 @@ proc new*(
|
|||
blockDiscovery = DiscoveryEngine.new(repoStore, peerStore, network, discovery, pendingBlocks)
|
||||
engine = BlockExcEngine.new(repoStore, wallet, network, blockDiscovery, peerStore, pendingBlocks)
|
||||
store = NetworkStore.new(engine, repoStore)
|
||||
erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider)
|
||||
codexNode = CodexNodeRef.new(switch, store, engine, erasure, discovery)
|
||||
|
||||
codexNode = CodexNodeRef.new(
|
||||
switch = switch,
|
||||
networkStore = store,
|
||||
engine = engine,
|
||||
discovery = discovery)
|
||||
|
||||
restServer = RestServerRef.new(
|
||||
codexNode.initRestApi(config, repoStore),
|
||||
initTAddress(config.apiBindAddress , config.apiPort),
|
||||
|
|
100
codex/node.nim
100
codex/node.nim
|
@ -32,7 +32,7 @@ import ./clock
|
|||
import ./blocktype as bt
|
||||
import ./manifest
|
||||
import ./merkletree
|
||||
import ./stores/blockstore
|
||||
import ./stores
|
||||
import ./blockexchange
|
||||
import ./streams
|
||||
import ./erasure
|
||||
|
@ -43,6 +43,7 @@ import ./utils
|
|||
import ./errors
|
||||
import ./logutils
|
||||
import ./utils/poseidon2digest
|
||||
import ./utils/asynciter
|
||||
|
||||
export logutils
|
||||
|
||||
|
@ -61,9 +62,9 @@ type
|
|||
CodexNode* = object
|
||||
switch: Switch
|
||||
networkId: PeerId
|
||||
blockStore: BlockStore
|
||||
networkStore: NetworkStore
|
||||
engine: BlockExcEngine
|
||||
erasure: Erasure
|
||||
prover: ?Prover
|
||||
discovery: Discovery
|
||||
contracts*: Contracts
|
||||
clock*: Clock
|
||||
|
@ -78,14 +79,11 @@ func switch*(self: CodexNodeRef): Switch =
|
|||
return self.switch
|
||||
|
||||
func blockStore*(self: CodexNodeRef): BlockStore =
|
||||
return self.blockStore
|
||||
return self.networkStore
|
||||
|
||||
func engine*(self: CodexNodeRef): BlockExcEngine =
|
||||
return self.engine
|
||||
|
||||
func erasure*(self: CodexNodeRef): Erasure =
|
||||
return self.erasure
|
||||
|
||||
func discovery*(self: CodexNodeRef): Discovery =
|
||||
return self.discovery
|
||||
|
||||
|
@ -100,7 +98,7 @@ proc storeManifest*(
|
|||
trace "Unable to create block from manifest"
|
||||
return failure(error)
|
||||
|
||||
if err =? (await self.blockStore.putBlock(blk)).errorOption:
|
||||
if err =? (await self.networkStore.putBlock(blk)).errorOption:
|
||||
trace "Unable to store manifest block", cid = blk.cid, err = err.msg
|
||||
return failure(err)
|
||||
|
||||
|
@ -117,7 +115,7 @@ proc fetchManifest*(
|
|||
|
||||
trace "Retrieving manifest for cid", cid
|
||||
|
||||
without blk =? await self.blockStore.getBlock(BlockAddress.init(cid)), err:
|
||||
without blk =? await self.networkStore.getBlock(BlockAddress.init(cid)), err:
|
||||
trace "Error retrieve manifest block", cid, err = err.msg
|
||||
return failure err
|
||||
|
||||
|
@ -158,7 +156,8 @@ proc updateExpiry*(
|
|||
let
|
||||
ensuringFutures = Iter
|
||||
.fromSlice(0..<manifest.blocksCount)
|
||||
.mapIt(self.blockStore.ensureExpiry( manifest.treeCid, it, expiry ))
|
||||
.mapIt(
|
||||
self.networkStore.localStore.ensureExpiry( manifest.treeCid, it, expiry ))
|
||||
await allFuturesThrowing(ensuringFutures)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
|
@ -176,16 +175,17 @@ proc fetchBatched*(
|
|||
## Fetch blocks in batches of `batchSize`
|
||||
##
|
||||
|
||||
let
|
||||
iter = iter.map(
|
||||
(i: int) => self.blockStore.getBlock(BlockAddress.init(cid, i))
|
||||
)
|
||||
# TODO: doesn't work if callee is annotated with async
|
||||
# let
|
||||
# iter = iter.map(
|
||||
# (i: int) => self.networkStore.getBlock(BlockAddress.init(cid, i))
|
||||
# )
|
||||
|
||||
while not iter.finished:
|
||||
let blocks = collect:
|
||||
for i in 0..<batchSize:
|
||||
if not iter.finished:
|
||||
iter.next()
|
||||
self.networkStore.getBlock(BlockAddress.init(cid, iter.next()))
|
||||
|
||||
if blocksErr =? (await allFutureResult(blocks)).errorOption:
|
||||
return failure(blocksErr)
|
||||
|
@ -216,7 +216,7 @@ proc retrieve*(
|
|||
## Retrieve by Cid a single block or an entire dataset described by manifest
|
||||
##
|
||||
|
||||
if local and not await (cid in self.blockStore):
|
||||
if local and not await (cid in self.networkStore):
|
||||
return failure((ref BlockNotFoundError)(msg: "Block not found in local store"))
|
||||
|
||||
if manifest =? (await self.fetchManifest(cid)):
|
||||
|
@ -226,7 +226,12 @@ proc retrieve*(
|
|||
proc erasureJob(): Future[void] {.async.} =
|
||||
try:
|
||||
# Spawn an erasure decoding job
|
||||
without res =? (await self.erasure.decode(manifest)), error:
|
||||
let
|
||||
erasure = Erasure.new(
|
||||
self.networkStore,
|
||||
leoEncoderProvider,
|
||||
leoDecoderProvider)
|
||||
without _ =? (await erasure.decode(manifest)), error:
|
||||
trace "Unable to erasure decode manifest", cid, exc = error.msg
|
||||
except CatchableError as exc:
|
||||
trace "Exception decoding manifest", cid, exc = exc.msg
|
||||
|
@ -235,12 +240,12 @@ proc retrieve*(
|
|||
|
||||
# Retrieve all blocks of the dataset sequentially from the local store or network
|
||||
trace "Creating store stream for manifest", cid
|
||||
LPStream(StoreStream.new(self.blockStore, manifest, pad = false)).success
|
||||
LPStream(StoreStream.new(self.networkStore, manifest, pad = false)).success
|
||||
else:
|
||||
let
|
||||
stream = BufferStream.new()
|
||||
|
||||
without blk =? (await self.blockStore.getBlock(BlockAddress.init(cid))), err:
|
||||
without blk =? (await self.networkStore.getBlock(BlockAddress.init(cid))), err:
|
||||
return failure(err)
|
||||
|
||||
proc streamOneBlock(): Future[void] {.async.} =
|
||||
|
@ -289,7 +294,7 @@ proc store*(
|
|||
|
||||
cids.add(cid)
|
||||
|
||||
if err =? (await self.blockStore.putBlock(blk)).errorOption:
|
||||
if err =? (await self.networkStore.putBlock(blk)).errorOption:
|
||||
trace "Unable to store block", cid = blk.cid, err = err.msg
|
||||
return failure(&"Unable to store block {blk.cid}")
|
||||
except CancelledError as exc:
|
||||
|
@ -308,7 +313,7 @@ proc store*(
|
|||
for index, cid in cids:
|
||||
without proof =? tree.getProof(index), err:
|
||||
return failure(err)
|
||||
if err =? (await self.blockStore.putCidAndProof(treeCid, index, cid, proof)).errorOption:
|
||||
if err =? (await self.networkStore.putCidAndProof(treeCid, index, cid, proof)).errorOption:
|
||||
# TODO add log here
|
||||
return failure(err)
|
||||
|
||||
|
@ -336,13 +341,13 @@ proc store*(
|
|||
return manifestBlk.cid.success
|
||||
|
||||
proc iterateManifests*(self: CodexNodeRef, onManifest: OnManifest) {.async.} =
|
||||
without cids =? await self.blockStore.listBlocks(BlockType.Manifest):
|
||||
without cids =? await self.networkStore.listBlocks(BlockType.Manifest):
|
||||
warn "Failed to listBlocks"
|
||||
return
|
||||
|
||||
for c in cids:
|
||||
if cid =? await c:
|
||||
without blk =? await self.blockStore.getBlock(cid):
|
||||
without blk =? await self.networkStore.getBlock(cid):
|
||||
warn "Failed to get manifest block by cid", cid
|
||||
return
|
||||
|
||||
|
@ -388,11 +393,17 @@ proc setupRequest(
|
|||
return failure error
|
||||
|
||||
# Erasure code the dataset according to provided parameters
|
||||
without encoded =? (await self.erasure.encode(manifest, ecK, ecM)), error:
|
||||
let
|
||||
erasure = Erasure.new(
|
||||
self.networkStore.localStore,
|
||||
leoEncoderProvider,
|
||||
leoDecoderProvider)
|
||||
|
||||
without encoded =? (await erasure.encode(manifest, ecK, ecM)), error:
|
||||
trace "Unable to erasure code dataset"
|
||||
return failure(error)
|
||||
|
||||
without builder =? Poseidon2Builder.new(self.blockStore, encoded), err:
|
||||
without builder =? Poseidon2Builder.new(self.networkStore.localStore, encoded), err:
|
||||
trace "Unable to create slot builder"
|
||||
return failure(err)
|
||||
|
||||
|
@ -411,12 +422,6 @@ proc setupRequest(
|
|||
else:
|
||||
builder.verifyRoot.get.toBytes
|
||||
|
||||
slotRoots =
|
||||
if builder.slotRoots.len <= 0:
|
||||
return failure("Slots are empty")
|
||||
else:
|
||||
builder.slotRoots.mapIt( it.toBytes )
|
||||
|
||||
request = StorageRequest(
|
||||
ask: StorageAsk(
|
||||
slots: verifiable.numSlots.uint64,
|
||||
|
@ -496,7 +501,7 @@ proc onStore(
|
|||
cid = request.content.cid
|
||||
slotIdx = slotIdx
|
||||
|
||||
trace "Received a request to store a slot!"
|
||||
trace "Received a request to store a slot"
|
||||
|
||||
without cid =? Cid.init(request.content.cid).mapFailure, err:
|
||||
trace "Unable to parse Cid", cid
|
||||
|
@ -506,7 +511,7 @@ proc onStore(
|
|||
trace "Unable to fetch manifest for cid", cid, err = err.msg
|
||||
return failure(err)
|
||||
|
||||
without builder =? Poseidon2Builder.new(self.blockStore, manifest), err:
|
||||
without builder =? Poseidon2Builder.new(self.networkStore, manifest), err:
|
||||
trace "Unable to create slots builder", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
|
@ -521,7 +526,7 @@ proc onStore(
|
|||
proc updateExpiry(blocks: seq[bt.Block]): Future[?!void] {.async.} =
|
||||
trace "Updating expiry for blocks", blocks = blocks.len
|
||||
|
||||
let ensureExpiryFutures = blocks.mapIt(self.blockStore.ensureExpiry(it.cid, expiry))
|
||||
let ensureExpiryFutures = blocks.mapIt(self.networkStore.ensureExpiry(it.cid, expiry))
|
||||
if updateExpiryErr =? (await allFutureResult(ensureExpiryFutures)).errorOption:
|
||||
return failure(updateExpiryErr)
|
||||
|
||||
|
@ -555,6 +560,8 @@ proc onStore(
|
|||
trace "Slot root mismatch", manifest = manifest.slotRoots[slotIdx.int], recovered = slotRoot.toSlotCid()
|
||||
return failure(newException(CodexError, "Slot root mismatch"))
|
||||
|
||||
trace "Slot successfully retrieved and reconstructed"
|
||||
|
||||
return success()
|
||||
|
||||
proc onProve(
|
||||
|
@ -575,6 +582,9 @@ proc onProve(
|
|||
|
||||
trace "Received proof challenge"
|
||||
|
||||
if prover =? self.prover:
|
||||
trace "Prover enabled"
|
||||
|
||||
without cid =? Cid.init(cidStr).mapFailure, err:
|
||||
error "Unable to parse Cid", cid, err = err.msg
|
||||
return failure(err)
|
||||
|
@ -583,11 +593,11 @@ proc onProve(
|
|||
error "Unable to fetch manifest for cid", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
without builder =? Poseidon2Builder.new(self.blockStore, manifest), err:
|
||||
without builder =? Poseidon2Builder.new(self.networkStore.localStore, manifest), err:
|
||||
error "Unable to create slots builder", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
without sampler =? DataSampler.new(slotIdx, self.blockStore, builder), err:
|
||||
without sampler =? DataSampler.new(slotIdx, self.networkStore.localStore, builder), err:
|
||||
error "Unable to create data sampler", err = err.msg
|
||||
return failure(err)
|
||||
|
||||
|
@ -625,9 +635,6 @@ proc start*(self: CodexNodeRef) {.async.} =
|
|||
if not self.engine.isNil:
|
||||
await self.engine.start()
|
||||
|
||||
if not self.erasure.isNil:
|
||||
await self.erasure.start()
|
||||
|
||||
if not self.discovery.isNil:
|
||||
await self.discovery.start()
|
||||
|
||||
|
@ -684,9 +691,6 @@ proc stop*(self: CodexNodeRef) {.async.} =
|
|||
if not self.engine.isNil:
|
||||
await self.engine.stop()
|
||||
|
||||
if not self.erasure.isNil:
|
||||
await self.erasure.stop()
|
||||
|
||||
if not self.discovery.isNil:
|
||||
await self.discovery.stop()
|
||||
|
||||
|
@ -702,24 +706,24 @@ proc stop*(self: CodexNodeRef) {.async.} =
|
|||
if validatorContracts =? self.contracts.validator:
|
||||
await validatorContracts.stop()
|
||||
|
||||
if not self.blockStore.isNil:
|
||||
await self.blockStore.close
|
||||
if not self.networkStore.isNil:
|
||||
await self.networkStore.close
|
||||
|
||||
proc new*(
|
||||
T: type CodexNodeRef,
|
||||
switch: Switch,
|
||||
store: BlockStore,
|
||||
networkStore: NetworkStore,
|
||||
engine: BlockExcEngine,
|
||||
erasure: Erasure,
|
||||
discovery: Discovery,
|
||||
prover = Prover.none,
|
||||
contracts = Contracts.default): CodexNodeRef =
|
||||
## Create new instance of a Codex self, call `start` to run it
|
||||
##
|
||||
|
||||
CodexNodeRef(
|
||||
switch: switch,
|
||||
blockStore: store,
|
||||
networkStore: networkStore,
|
||||
engine: engine,
|
||||
erasure: erasure,
|
||||
prover: prover,
|
||||
discovery: discovery,
|
||||
contracts: contracts)
|
||||
|
|
|
@ -65,7 +65,6 @@ template setupAndTearDown*() {.dirty.} =
|
|||
peerStore: PeerCtxStore
|
||||
pendingBlocks: PendingBlocksManager
|
||||
discovery: DiscoveryEngine
|
||||
erasure: Erasure
|
||||
|
||||
let
|
||||
path = currentSourcePath().parentDir
|
||||
|
@ -92,8 +91,7 @@ template setupAndTearDown*() {.dirty.} =
|
|||
discovery = DiscoveryEngine.new(localStore, peerStore, network, blockDiscovery, pendingBlocks)
|
||||
engine = BlockExcEngine.new(localStore, wallet, network, discovery, peerStore, pendingBlocks)
|
||||
store = NetworkStore.new(engine, localStore)
|
||||
erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider)
|
||||
node = CodexNodeRef.new(switch, store, engine, erasure, blockDiscovery)
|
||||
node = CodexNodeRef.new(switch, store, engine, blockDiscovery)
|
||||
|
||||
await node.start()
|
||||
|
||||
|
|
|
@ -78,6 +78,7 @@ asyncchecksuite "Test Node - Host contracts":
|
|||
manifestBlock = bt.Block.new(
|
||||
manifest.encode().tryGet(),
|
||||
codec = ManifestCodec).tryGet()
|
||||
erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider)
|
||||
|
||||
manifestCid = manifestBlock.cid
|
||||
manifestCidStr = $(manifestCid)
|
||||
|
|
|
@ -124,7 +124,7 @@ asyncchecksuite "Test Node - Basic":
|
|||
manifestBlock = bt.Block.new(
|
||||
manifest.encode().tryGet(),
|
||||
codec = ManifestCodec).tryGet()
|
||||
|
||||
erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider)
|
||||
protected = (await erasure.encode(manifest, 3, 2)).tryGet()
|
||||
builder = Poseidon2Builder.new(localStore, protected).tryGet()
|
||||
verifiable = (await builder.buildManifest()).tryGet()
|
||||
|
|
|
@ -181,119 +181,120 @@ marketplacesuite "Simulate invalid proofs":
|
|||
|
||||
await subscription.unsubscribe()
|
||||
|
||||
test "host that submits invalid proofs is paid out less", NodeConfigs(
|
||||
# Uncomment to start Hardhat automatically, typically so logs can be inspected locally
|
||||
# hardhat: HardhatConfig().withLogFile(),
|
||||
# TODO: uncomment once fixed
|
||||
# test "host that submits invalid proofs is paid out less", NodeConfigs(
|
||||
# # Uncomment to start Hardhat automatically, typically so logs can be inspected locally
|
||||
# # hardhat: HardhatConfig().withLogFile(),
|
||||
|
||||
clients:
|
||||
CodexConfig()
|
||||
.nodes(1)
|
||||
# .debug() # uncomment to enable console log output.debug()
|
||||
.withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||
.withLogTopics("node", "erasure", "clock", "purchases"),
|
||||
# clients:
|
||||
# CodexConfig()
|
||||
# .nodes(1)
|
||||
# # .debug() # uncomment to enable console log output.debug()
|
||||
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||
# .withLogTopics("node", "erasure", "clock", "purchases"),
|
||||
|
||||
providers:
|
||||
CodexConfig()
|
||||
.nodes(3)
|
||||
.simulateProofFailuresFor(providerIdx=0, failEveryNProofs=2)
|
||||
# .debug() # uncomment to enable console log output
|
||||
.withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||
.withLogTopics("marketplace", "sales", "reservations", "node"),
|
||||
# providers:
|
||||
# CodexConfig()
|
||||
# .nodes(3)
|
||||
# .simulateProofFailuresFor(providerIdx=0, failEveryNProofs=2)
|
||||
# # .debug() # uncomment to enable console log output
|
||||
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||
# .withLogTopics("marketplace", "sales", "reservations", "node"),
|
||||
|
||||
validators:
|
||||
CodexConfig()
|
||||
.nodes(1)
|
||||
# .debug()
|
||||
.withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||
.withLogTopics("validator")
|
||||
):
|
||||
let client0 = clients()[0].client
|
||||
let provider0 = providers()[0]
|
||||
let provider1 = providers()[1]
|
||||
let provider2 = providers()[2]
|
||||
let totalPeriods = 25
|
||||
# validators:
|
||||
# CodexConfig()
|
||||
# .nodes(1)
|
||||
# # .debug()
|
||||
# .withLogFile() # uncomment to output log file to tests/integration/logs/<start_datetime> <suite_name>/<test_name>/<node_role>_<node_idx>.log
|
||||
# .withLogTopics("validator")
|
||||
# ):
|
||||
# let client0 = clients()[0].client
|
||||
# let provider0 = providers()[0]
|
||||
# let provider1 = providers()[1]
|
||||
# let provider2 = providers()[2]
|
||||
# let totalPeriods = 25
|
||||
|
||||
let datasetSizeInBlocks = 3
|
||||
let data = await RandomChunker.example(blocks=datasetSizeInBlocks)
|
||||
# original data = 3 blocks so slot size will be 4 blocks
|
||||
let slotSize = (DefaultBlockSize * 4.NBytes).Natural.u256
|
||||
# let datasetSizeInBlocks = 3
|
||||
# let data = await RandomChunker.example(blocks=datasetSizeInBlocks)
|
||||
# # original data = 3 blocks so slot size will be 4 blocks
|
||||
# let slotSize = (DefaultBlockSize * 4.NBytes).Natural.u256
|
||||
|
||||
discard provider0.client.postAvailability(
|
||||
size=slotSize, # should match 1 slot only
|
||||
duration=totalPeriods.periods.u256,
|
||||
minPrice=300.u256,
|
||||
maxCollateral=200.u256
|
||||
)
|
||||
# discard provider0.client.postAvailability(
|
||||
# size=slotSize, # should match 1 slot only
|
||||
# duration=totalPeriods.periods.u256,
|
||||
# minPrice=300.u256,
|
||||
# maxCollateral=200.u256
|
||||
# )
|
||||
|
||||
let cid = client0.upload(data).get
|
||||
# let cid = client0.upload(data).get
|
||||
|
||||
let purchaseId = await client0.requestStorage(
|
||||
cid,
|
||||
duration=totalPeriods.periods,
|
||||
expiry=10.periods,
|
||||
nodes=3,
|
||||
tolerance=1,
|
||||
origDatasetSizeInBlocks=datasetSizeInBlocks
|
||||
)
|
||||
# let purchaseId = await client0.requestStorage(
|
||||
# cid,
|
||||
# duration=totalPeriods.periods,
|
||||
# expiry=10.periods,
|
||||
# nodes=3,
|
||||
# tolerance=1,
|
||||
# origDatasetSizeInBlocks=datasetSizeInBlocks
|
||||
# )
|
||||
|
||||
without requestId =? client0.requestId(purchaseId):
|
||||
fail()
|
||||
# without requestId =? client0.requestId(purchaseId):
|
||||
# fail()
|
||||
|
||||
var filledSlotIds: seq[SlotId] = @[]
|
||||
proc onSlotFilled(event: SlotFilled) =
|
||||
let slotId = slotId(event.requestId, event.slotIndex)
|
||||
filledSlotIds.add slotId
|
||||
# var filledSlotIds: seq[SlotId] = @[]
|
||||
# proc onSlotFilled(event: SlotFilled) =
|
||||
# let slotId = slotId(event.requestId, event.slotIndex)
|
||||
# filledSlotIds.add slotId
|
||||
|
||||
let subscription = await marketplace.subscribe(SlotFilled, onSlotFilled)
|
||||
# let subscription = await marketplace.subscribe(SlotFilled, onSlotFilled)
|
||||
|
||||
# wait til first slot is filled
|
||||
check eventually filledSlotIds.len > 0
|
||||
# # wait til first slot is filled
|
||||
# check eventually filledSlotIds.len > 0
|
||||
|
||||
# now add availability for providers 1 and 2, which should allow them to to
|
||||
# put the remaining slots in their queues
|
||||
discard provider1.client.postAvailability(
|
||||
size=slotSize, # should match 1 slot only
|
||||
duration=totalPeriods.periods.u256,
|
||||
minPrice=300.u256,
|
||||
maxCollateral=200.u256
|
||||
)
|
||||
# # now add availability for providers 1 and 2, which should allow them to to
|
||||
# # put the remaining slots in their queues
|
||||
# discard provider1.client.postAvailability(
|
||||
# size=slotSize, # should match 1 slot only
|
||||
# duration=totalPeriods.periods.u256,
|
||||
# minPrice=300.u256,
|
||||
# maxCollateral=200.u256
|
||||
# )
|
||||
|
||||
check eventually filledSlotIds.len > 1
|
||||
# check eventually filledSlotIds.len > 1
|
||||
|
||||
discard provider2.client.postAvailability(
|
||||
size=slotSize, # should match 1 slot only
|
||||
duration=totalPeriods.periods.u256,
|
||||
minPrice=300.u256,
|
||||
maxCollateral=200.u256
|
||||
)
|
||||
# discard provider2.client.postAvailability(
|
||||
# size=slotSize, # should match 1 slot only
|
||||
# duration=totalPeriods.periods.u256,
|
||||
# minPrice=300.u256,
|
||||
# maxCollateral=200.u256
|
||||
# )
|
||||
|
||||
check eventually filledSlotIds.len > 2
|
||||
# check eventually filledSlotIds.len > 2
|
||||
|
||||
# Wait til second slot is filled. SaleFilled happens too quickly, check SaleProving instead.
|
||||
check eventually provider1.client.saleStateIs(filledSlotIds[1], "SaleProving")
|
||||
check eventually provider2.client.saleStateIs(filledSlotIds[2], "SaleProving")
|
||||
# # Wait til second slot is filled. SaleFilled happens too quickly, check SaleProving instead.
|
||||
# check eventually provider1.client.saleStateIs(filledSlotIds[1], "SaleProving")
|
||||
# check eventually provider2.client.saleStateIs(filledSlotIds[2], "SaleProving")
|
||||
|
||||
check eventually client0.purchaseStateIs(purchaseId, "started")
|
||||
# check eventually client0.purchaseStateIs(purchaseId, "started")
|
||||
|
||||
let currentPeriod = await getCurrentPeriod()
|
||||
check eventuallyP(
|
||||
# SaleFinished happens too quickly, check SalePayout instead
|
||||
provider0.client.saleStateIs(filledSlotIds[0], "SalePayout"),
|
||||
currentPeriod + totalPeriods.u256 + 1)
|
||||
# let currentPeriod = await getCurrentPeriod()
|
||||
# check eventuallyP(
|
||||
# # SaleFinished happens too quickly, check SalePayout instead
|
||||
# provider0.client.saleStateIs(filledSlotIds[0], "SalePayout"),
|
||||
# currentPeriod + totalPeriods.u256 + 1)
|
||||
|
||||
check eventuallyP(
|
||||
# SaleFinished happens too quickly, check SalePayout instead
|
||||
provider1.client.saleStateIs(filledSlotIds[1], "SalePayout"),
|
||||
currentPeriod + totalPeriods.u256 + 1)
|
||||
# check eventuallyP(
|
||||
# # SaleFinished happens too quickly, check SalePayout instead
|
||||
# provider1.client.saleStateIs(filledSlotIds[1], "SalePayout"),
|
||||
# currentPeriod + totalPeriods.u256 + 1)
|
||||
|
||||
check eventuallyP(
|
||||
# SaleFinished happens too quickly, check SalePayout instead
|
||||
provider2.client.saleStateIs(filledSlotIds[2], "SalePayout"),
|
||||
currentPeriod + totalPeriods.u256 + 1)
|
||||
# check eventuallyP(
|
||||
# # SaleFinished happens too quickly, check SalePayout instead
|
||||
# provider2.client.saleStateIs(filledSlotIds[2], "SalePayout"),
|
||||
# currentPeriod + totalPeriods.u256 + 1)
|
||||
|
||||
check eventually(
|
||||
(await token.balanceOf(provider1.ethAccount)) >
|
||||
(await token.balanceOf(provider0.ethAccount))
|
||||
)
|
||||
# check eventually(
|
||||
# (await token.balanceOf(provider1.ethAccount)) >
|
||||
# (await token.balanceOf(provider0.ethAccount))
|
||||
# )
|
||||
|
||||
await subscription.unsubscribe()
|
||||
# await subscription.unsubscribe()
|
||||
|
|
Loading…
Reference in New Issue