mirror of
https://github.com/status-im/nim-codex.git
synced 2025-01-28 11:36:37 +00:00
7ba5e8c13a
* fix: createReservation lock (#825) * fix: createReservation lock * fix: additional locking places * fix: acquire lock * chore: feedback Co-authored-by: markspanbroek <mark@spanbroek.net> Signed-off-by: Adam Uhlíř <adam@uhlir.dev> * feat: withLock template and fixed tests * fix: use proc for MockReservations constructor * chore: feedback Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com> Signed-off-by: Adam Uhlíř <adam@uhlir.dev> * chore: feedback implementation --------- Signed-off-by: Adam Uhlíř <adam@uhlir.dev> Co-authored-by: markspanbroek <mark@spanbroek.net> Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com> * Block deletion with ref count & repostore refactor (#631) * Fix StoreStream so it doesn't return parity bytes (#838) * fix storestream so it doesn\'t return parity bits for protected/verifiable manifests * use Cid.example instead of creating a mock manually * Fix verifiable manifest initialization (#839) * fix verifiable manifest initialization * fix linearstrategy, use verifiableStrategy to select blocks for slots * check for both strategies in attribute inheritance test * ci: add verify_circuit=true to the releases (#840) * provisional fix so EC errors do not crash the node on download (#841) * prevent node crashing with `not val.isNil` (#843) * bump nim-leopard to handle no parity data (#845) * Fix verifiable manifest constructor (#844) * Fix verifiable manifest constructor * Add integration test for verifiable manifest download Add integration test for testing download of verifiable dataset after creating request for storage * add missing import * add testecbug to integration suite * Remove hardhat instance from integration test * change description, drop echo --------- Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com> Co-authored-by: gmega <giuliano.mega@gmail.com> * Bump Nim to 1.6.21 (#851) * bump Nim to 1.6.21 (range type reset fixes) * remove incompatible versions from compiler matrix * feat(rest): adds erasure coding constraints when requesting storage (#848) * Rest API: add erasure coding constraints when requesting storage * clean up * Make error message for "dataset too small" more informative. * fix API integration test --------- Co-authored-by: gmega <giuliano.mega@gmail.com> * Prover workshop band-aid (#853) * add prover bandaid * Improve error message text Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com> Signed-off-by: Giuliano Mega <giuliano.mega@gmail.com> --------- Signed-off-by: Giuliano Mega <giuliano.mega@gmail.com> Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com> * Bandaid for failing erasure coding (#855) * Update Release workflow (#858) Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com> * Fixes prover behavior with singleton proof trees (#859) * add logs and test * add Merkle proof checks * factor out Circom input normalization, fix proof input serialization * add test and update existing ones * update circuit assets * add back trace message * switch contracts to fix branch * update codex-contracts-eth to latest * do not expose prove with prenormalized inputs * Chronos v4 Update (v3 Compat Mode) (#814) * add changes to use chronos v4 in compat mode * switch chronos to compat fix branch * use nimbus-build-system with configurable Nim repo * add missing imports * add missing await * bump compat * pin nim version in Makefile * add await instead of asyncSpawn to advertisement queue loop * bump DHT to v0.5.0 * allow error state of `onBatch` to propagate upwards in test code * pin Nim compiler commit to avoid fetching stale branch * make CI build against branch head instead of merge * fix handling of return values in testslotqueue * Downgrade to gcc 13 on Windows (#874) * Downgrade to gcc 13 on Windows Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com> * Increase build job timeout to 90 minutes Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com> --------- Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com> * Add MIT/Apache licenses (#861) * Add MIT/Apache licenses * Center "Apache License" Signed-off-by: Giuliano Mega <giuliano.mega@gmail.com> * remove wrong legal entity; rename apache license file --------- Signed-off-by: Giuliano Mega <giuliano.mega@gmail.com> * Add OPTIONS endpoint to allow the content-type header for the upload endpoint (#869) * Add OPTIONS endpoint to allow the content-type header exec git commit --amend --no-edit -S * Remove useless header "Access-Control-Headers" and add cache Signed-off-by: Arnaud <arnaud@status.im> --------- Signed-off-by: Arnaud <arnaud@status.im> Co-authored-by: Giuliano Mega <giuliano.mega@gmail.com> * chore: add `downtimeProduct` config parameter (#867) * chore: add `downtimeProduct` config parameter * bump codex-contracts-eth to master * Support CORS preflight requests when the storage request api returns an error (#878) * Add CORS headers when the REST API is returning an error * Use the allowedOrigin instead of the wilcard when setting the origin Signed-off-by: Arnaud <arnaud@status.im> --------- Signed-off-by: Arnaud <arnaud@status.im> * refactor(marketplace): generic querying of historical marketplace events (#872) * refactor(marketplace): move marketplace events to the Market abstraction Move marketplace contract events to the Market abstraction so the types can be shared across all modules that call the Market abstraction. * Remove unneeded conversion * Switch to generic implementation of event querying * change parent type to MarketplaceEvent * Remove extra license file (#876) * remove extra license * center "apache license" * Update advertising (#862) * Setting up advertiser * Wires up advertiser * cleanup * test compiles * tests pass * setting up test for advertiser * Finishes advertiser tests * fixes commonstore tests * Review comments by Giuliano * Race condition found by Giuliano * Review comment by Dmitriy Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com> Signed-off-by: Ben Bierens <39762930+benbierens@users.noreply.github.com> * fixes tests --------- Signed-off-by: Ben Bierens <39762930+benbierens@users.noreply.github.com> Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com> * feat: add `--payout-address` (#870) * feat: add `--payout-address` Allows SPs to be paid out to a separate address, keeping their profits secure. Supports https://github.com/codex-storage/codex-contracts-eth/pull/144 in the nim-codex client. * Remove optional payoutAddress Change --payout-address so that it is no longer optional. There is no longer an overload in `Marketplace.sol` for `fillSlot` accepting no `payoutAddress`. * Update integration tests to include --payout-address * move payoutAddress from fillSlot to freeSlot * Update integration tests to use required payoutAddress - to make payoutAddress required, the integration tests needed to avoid building the cli params until just before starting the node, otherwise if cli params were added ad-hoc, there would be an error after a non-required parameter was added before a required parameter. * support client payout address - withdrawFunds requires a withdrawAddress parameter, directs payouts for withdrawing of client funds (for a cancelled request) to go to that address. * fix integration test adds --payout-address to validators * refactor: support withdrawFunds and freeSlot optional parameters - withdrawFunds has an optional parameter for withdrawRecipient - freeSlot has optional parameters for rewardRecipient and collateralRecipient - change --payout-address to --reward-recipient to match contract signature naming * Revert "Update integration tests to include --payout-address" This reverts commit 8f9535cf35b0f2b183ac4013a7ed11b246486964. There are some valid improvements to the integration tests, but they can be handled in a separate PR. * small fix * bump contracts to fix marketplace spec * bump codex-contracts-eth, now rebased on master * bump codex-contracts-eth now that feat/reward-address has been merged to master * clean up, comments * Rework circuit downloader (#882) * Introduces a start method to prover * Moves backend creation into start method * sets up three paths for backend initialization * Extracts backend initialization to backend-factory * Implements loading backend from cli files or previously downloaded local files * Wires up downloading and unzipping * functional implementation * Fixes testprover.nim * Sets up tests for backendfactory * includes libzip-dev * pulls in updated contracts * removes integration cli tests for r1cs, wasm, and zkey file arguments. * Fixes issue where inner-scope values are lost before returning * sets local proof verification for dist-test images * Adds two traces and bumps nim-ethers * Adds separate path for circuit files * Create circuit dir if not exists * fix: make sure requestStorage is mined * fix: correct place to plug confirm * test: fixing contracts tests * Restores gitmodules * restores nim-datastore reference * Sets up downloader exe * sets up tool skeleton * implements getting of circuit hash * Implements downloader tool * sets up test skeleton * Implements test for cirdl * includes testTools in testAll * Cleanup building.md * cleans up previous downloader implementation * cleans up testbackendfactory * moves start of prover into node.nim * Fills in arguments in example command * Initializes backend in prover constructor * Restores tests * Restores tests for cli instructions * Review comments by Dmitriy, part 1 * Quotes path in download instruction. * replaces curl with chronos http session * Moves cirdl build output to 'build' folder. * Fixes chronicles log output * Add cirdl support to the codex Dockerfile Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com> * Add cirdl support to the docker entrypoint Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com> * Add cirdl support to the release workflow Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com> * Disable verify_circuit flag for releases Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com> * Removes backendFactory placeholder type * wip * Replaces zip library with status-im/zippy library (which supports zip and tar) * Updates cirdl to not change circuitdir folder * Switches from zip to tar.gz * Review comments by Dmitriy * updates codex-contracts-eth * Adds testTools to CI * Adds check for access to config.circuitdir * Update fixture circuit zkey * Update matrix to run tools tests on Windows * Adds 'deps' dependency for cirdl * Adjust docker-entrypoint.sh to use CODEX_CIRCUIT_DIR env var * Review comments by Giuliano --------- Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com> Co-authored-by: Adam Uhlíř <adam@uhlir.dev> Co-authored-by: Veaceslav Doina <20563034+veaceslavdoina@users.noreply.github.com> * Support CORS for POST and PATCH availability endpoints (#897) * Adds testnet marketplace address to known deployments (#911) * API tweaks for OpenAPI, errors and endpoints (#886) * All sort of tweaks * docs: availability's minPrice doc * Revert changes to the two node test example * Change default EC params in REST API Change default EC params in REST API to 3 nodes and 1 tolerance. Adjust integration tests to honour these settings. --------- Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com> * remove erasure and por parameters from openapi spec (#915) * Move Building Codex guide to the main docs site (#893) * updates Marketplace tutorial documentation (#888) * updates Marketplace tutorial documentation * Applies review comments to marketplace-tutorial * Final formatting touches * moved `Prerequisites` around * Fixes indentation in one JSON snippet * Use CLI args when passed for cirdl in Docker entrypoint (#927) * Use CLI args when passed for cirdl in Docker entrypoint Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com> * Increase CI timeout Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com> --------- Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com> * Validator - support partitioning of the slot id space (#890) * Adds validatorPartitionSize and validatorPartitionIndex config options * adds partitioning options to the validation type * adds partitioning logic to the validator * ignores partitionIndex when partitionSize is either 0 or 1 * clips the partition index to <<partitionIndex mod partitionSize>> * handles negative values for the validation partition index * updates long description of the new validator cli options * makes default partitionSize to be 0 for better backward compatibility * Improving formatting on validator CLI * reactors validation params into a separate type and simplifies validation of validation params * removes suspected duplication * fixes typo in validator CLI help * updates README * Applies review comments - using optionals and range types to handle validation params * Adds initializer to the configFactory for validatorMaxSlots * [Review] update validator CLI description and README * [Review]: renaming validationParams to validationConfig (config) * [Review]: move validationconfig.nim to a higher level (next to validation.nim) * changes backing type of MaxSlots to be int and makes sure slots are validated without limit when maxSlots is set to 0 * adds more end-to-end test for the validator and the groups * fixes typo in README and conf.nim * makes `maxSlotsConstraintRespected` and `shouldValidateSlot` private + updates the tests * fixes public address of the signer account in the marketplace tutorial * applies review comments - removes two tests * Remove moved docs (#930) * Remove moved document * Update main Readme and point links to the documentation site * feat(slot-reservations): Support reserving slots (#907) * feat(slot-reservations): Support reserving slots Closes #898. Wire up reserveSlot and canReserveSlot contract calls, but don't call them * Remove return value from `reserveSlot` * convert EthersError to MarketError * Move convertEthersError to reserveSlot * bump codex-contracts-eth after rebase * change `canReserveSlot` and `reserveSlot` parameters Parameters for `canReserveSlot` and `reserveSlot` were changed from `SlotId` to `RequestId` and `UInt256 slotIndex`. * bump codex-contracts-eth after rebase * bump codex-contracts-eth to master after codex-contracts-eth/pull/177 merged * feat(slot-reservations): Add SaleSlotReserving state (#917) * convert EthersError to MarketError * change `canReserveSlot` and `reserveSlot` parameters Parameters for `canReserveSlot` and `reserveSlot` were changed from `SlotId` to `RequestId` and `UInt256 slotIndex`. * Add SaleSlotReserving Adds a new state, SaleSlotReserving, that attempts to reserve a slot before downloading. If the slot cannot be reserved, the state moves to SaleIgnored. On error, the state moves to SaleErrored. SaleIgnored is also updated to pass in `reprocessSlot` and `returnBytes`, controlling the behaviour in the Sales module after the slot is ignored. This is because previously it was assumed that SaleIgnored was only reached when there was no Availability. This is no longer the case, since SaleIgnored can now be reached when a slot cannot be reserved. * Update SalePreparing Specify `reprocessSlot` and `returnBytes` when moving to `SaleIgnored` from `SalePreparing`. Update tests to include test for a raised CatchableError. * Fix unit test * Modify `canReserveSlot` and `reverseSlot` params after rebase * Update MockMarket with new `canReserveSlot` and `reserveSlot` params * fix after rebase also bump codex-contracts-eth to master * Use Ubuntu 20.04 for Linux amd64 releases (#939) * Use Ubuntu 20.04 for Linux amd64 releases (#932) * Accept branches with the slash in the name for release workflow (#932) * Increase artifacts retention-days for release workflow (#932) * feat(slot-reservations): support SlotReservationsFull event (#926) * Remove moved docs (#935) Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com> * Fix: null-ref in networkPeer (#937) * Fixes nullref in networkPeer * Removes inflight semaphore * Revert "Removes inflight semaphore" This reverts commit 26ec15c6f788df3adb6ff3b912a0c4b5d3139358. --------- Signed-off-by: Adam Uhlíř <adam@uhlir.dev> Signed-off-by: Giuliano Mega <giuliano.mega@gmail.com> Signed-off-by: Slava <20563034+veaceslavdoina@users.noreply.github.com> Signed-off-by: Arnaud <arnaud@status.im> Signed-off-by: Ben Bierens <39762930+benbierens@users.noreply.github.com> Co-authored-by: Adam Uhlíř <adam@uhlir.dev> Co-authored-by: markspanbroek <mark@spanbroek.net> Co-authored-by: Eric <5089238+emizzle@users.noreply.github.com> Co-authored-by: Tomasz Bekas <tomasz.bekas@gmail.com> Co-authored-by: Giuliano Mega <giuliano.mega@gmail.com> Co-authored-by: Arnaud <arno.deville@gmail.com> Co-authored-by: Ben Bierens <39762930+benbierens@users.noreply.github.com> Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com> Co-authored-by: Arnaud <arnaud@status.im> Co-authored-by: Marcin Czenko <marcin.czenko@pm.me>
536 lines
17 KiB
Nim
536 lines
17 KiB
Nim
import std/sequtils
|
|
import std/sugar
|
|
import pkg/questionable
|
|
import pkg/questionable/results
|
|
import pkg/stint
|
|
import pkg/datastore
|
|
import ./market
|
|
import ./clock
|
|
import ./stores
|
|
import ./contracts/requests
|
|
import ./contracts/marketplace
|
|
import ./logutils
|
|
import ./sales/salescontext
|
|
import ./sales/salesagent
|
|
import ./sales/statemachine
|
|
import ./sales/slotqueue
|
|
import ./sales/states/preparing
|
|
import ./sales/states/unknown
|
|
import ./utils/then
|
|
import ./utils/trackedfutures
|
|
|
|
## Sales holds a list of available storage that it may sell.
|
|
##
|
|
## When storage is requested on the market that matches availability, the Sales
|
|
## object will instruct the Codex node to persist the requested data. Once the
|
|
## data has been persisted, it uploads a proof of storage to the market in an
|
|
## attempt to win a storage contract.
|
|
##
|
|
## Node Sales Market
|
|
## | | |
|
|
## | -- add availability --> | |
|
|
## | | <-- storage request --- |
|
|
## | <----- store data ------ | |
|
|
## | -----------------------> | |
|
|
## | | |
|
|
## | <----- prove data ---- | |
|
|
## | -----------------------> | |
|
|
## | | ---- storage proof ---> |
|
|
|
|
export stint
|
|
export reservations
|
|
export salesagent
|
|
export salescontext
|
|
|
|
logScope:
|
|
topics = "sales marketplace"
|
|
|
|
type
|
|
Sales* = ref object
|
|
context*: SalesContext
|
|
agents*: seq[SalesAgent]
|
|
running: bool
|
|
subscriptions: seq[market.Subscription]
|
|
trackedFutures: TrackedFutures
|
|
|
|
proc `onStore=`*(sales: Sales, onStore: OnStore) =
|
|
sales.context.onStore = some onStore
|
|
|
|
proc `onClear=`*(sales: Sales, onClear: OnClear) =
|
|
sales.context.onClear = some onClear
|
|
|
|
proc `onSale=`*(sales: Sales, callback: OnSale) =
|
|
sales.context.onSale = some callback
|
|
|
|
proc `onProve=`*(sales: Sales, callback: OnProve) =
|
|
sales.context.onProve = some callback
|
|
|
|
proc `onExpiryUpdate=`*(sales: Sales, callback: OnExpiryUpdate) =
|
|
sales.context.onExpiryUpdate = some callback
|
|
|
|
proc onStore*(sales: Sales): ?OnStore = sales.context.onStore
|
|
|
|
proc onClear*(sales: Sales): ?OnClear = sales.context.onClear
|
|
|
|
proc onSale*(sales: Sales): ?OnSale = sales.context.onSale
|
|
|
|
proc onProve*(sales: Sales): ?OnProve = sales.context.onProve
|
|
|
|
proc onExpiryUpdate*(sales: Sales): ?OnExpiryUpdate = sales.context.onExpiryUpdate
|
|
|
|
proc new*(_: type Sales,
|
|
market: Market,
|
|
clock: Clock,
|
|
repo: RepoStore): Sales =
|
|
Sales.new(market, clock, repo, 0)
|
|
|
|
proc new*(_: type Sales,
|
|
market: Market,
|
|
clock: Clock,
|
|
repo: RepoStore,
|
|
simulateProofFailures: int): Sales =
|
|
|
|
let reservations = Reservations.new(repo)
|
|
Sales(
|
|
context: SalesContext(
|
|
market: market,
|
|
clock: clock,
|
|
reservations: reservations,
|
|
slotQueue: SlotQueue.new(),
|
|
simulateProofFailures: simulateProofFailures
|
|
),
|
|
trackedFutures: TrackedFutures.new(),
|
|
subscriptions: @[]
|
|
)
|
|
|
|
proc remove(sales: Sales, agent: SalesAgent) {.async.} =
|
|
await agent.stop()
|
|
if sales.running:
|
|
sales.agents.keepItIf(it != agent)
|
|
|
|
proc cleanUp(sales: Sales,
|
|
agent: SalesAgent,
|
|
returnBytes: bool,
|
|
reprocessSlot: bool,
|
|
processing: Future[void]) {.async.} =
|
|
|
|
let data = agent.data
|
|
|
|
logScope:
|
|
topics = "sales cleanUp"
|
|
requestId = data.requestId
|
|
slotIndex = data.slotIndex
|
|
reservationId = data.reservation.?id |? ReservationId.default
|
|
availabilityId = data.reservation.?availabilityId |? AvailabilityId.default
|
|
|
|
trace "cleaning up sales agent"
|
|
|
|
# if reservation for the SalesAgent was not created, then it means
|
|
# that the cleanUp was called before the sales process really started, so
|
|
# there are not really any bytes to be returned
|
|
if returnBytes and request =? data.request and reservation =? data.reservation:
|
|
if returnErr =? (await sales.context.reservations.returnBytesToAvailability(
|
|
reservation.availabilityId,
|
|
reservation.id,
|
|
request.ask.slotSize
|
|
)).errorOption:
|
|
error "failure returning bytes",
|
|
error = returnErr.msg,
|
|
bytes = request.ask.slotSize
|
|
|
|
# delete reservation and return reservation bytes back to the availability
|
|
if reservation =? data.reservation and
|
|
deleteErr =? (await sales.context.reservations.deleteReservation(
|
|
reservation.id,
|
|
reservation.availabilityId
|
|
)).errorOption:
|
|
error "failure deleting reservation", error = deleteErr.msg
|
|
|
|
# Re-add items back into the queue to prevent small availabilities from
|
|
# draining the queue. Seen items will be ordered last.
|
|
if reprocessSlot and request =? data.request:
|
|
let queue = sales.context.slotQueue
|
|
var seenItem = SlotQueueItem.init(data.requestId,
|
|
data.slotIndex.truncate(uint16),
|
|
data.ask,
|
|
request.expiry,
|
|
seen = true)
|
|
trace "pushing ignored item to queue, marked as seen"
|
|
if err =? queue.push(seenItem).errorOption:
|
|
error "failed to readd slot to queue",
|
|
errorType = $(type err), error = err.msg
|
|
|
|
await sales.remove(agent)
|
|
|
|
# signal back to the slot queue to cycle a worker
|
|
if not processing.isNil and not processing.finished():
|
|
processing.complete()
|
|
|
|
proc filled(
|
|
sales: Sales,
|
|
request: StorageRequest,
|
|
slotIndex: UInt256,
|
|
processing: Future[void]) =
|
|
|
|
if onSale =? sales.context.onSale:
|
|
onSale(request, slotIndex)
|
|
|
|
# signal back to the slot queue to cycle a worker
|
|
if not processing.isNil and not processing.finished():
|
|
processing.complete()
|
|
|
|
proc processSlot(sales: Sales, item: SlotQueueItem, done: Future[void]) =
|
|
debug "Processing slot from queue", requestId = item.requestId,
|
|
slot = item.slotIndex
|
|
|
|
let agent = newSalesAgent(
|
|
sales.context,
|
|
item.requestId,
|
|
item.slotIndex.u256,
|
|
none StorageRequest
|
|
)
|
|
|
|
agent.onCleanUp = proc (returnBytes = false, reprocessSlot = false) {.async.} =
|
|
await sales.cleanUp(agent, returnBytes, reprocessSlot, done)
|
|
|
|
agent.onFilled = some proc(request: StorageRequest, slotIndex: UInt256) =
|
|
sales.filled(request, slotIndex, done)
|
|
|
|
agent.start(SalePreparing())
|
|
sales.agents.add agent
|
|
|
|
proc deleteInactiveReservations(sales: Sales, activeSlots: seq[Slot]) {.async.} =
|
|
let reservations = sales.context.reservations
|
|
without reservs =? await reservations.all(Reservation):
|
|
return
|
|
|
|
let unused = reservs.filter(r => (
|
|
let slotId = slotId(r.requestId, r.slotIndex)
|
|
not activeSlots.any(slot => slot.id == slotId)
|
|
))
|
|
|
|
if unused.len == 0:
|
|
return
|
|
|
|
info "Found unused reservations for deletion", unused = unused.len
|
|
|
|
for reservation in unused:
|
|
|
|
logScope:
|
|
reservationId = reservation.id
|
|
availabilityId = reservation.availabilityId
|
|
|
|
if err =? (await reservations.deleteReservation(
|
|
reservation.id, reservation.availabilityId
|
|
)).errorOption:
|
|
error "Failed to delete unused reservation", error = err.msg
|
|
else:
|
|
trace "Deleted unused reservation"
|
|
|
|
proc mySlots*(sales: Sales): Future[seq[Slot]] {.async.} =
|
|
let market = sales.context.market
|
|
let slotIds = await market.mySlots()
|
|
var slots: seq[Slot] = @[]
|
|
|
|
info "Loading active slots", slotsCount = len(slots)
|
|
for slotId in slotIds:
|
|
if slot =? (await market.getActiveSlot(slotId)):
|
|
slots.add slot
|
|
|
|
return slots
|
|
|
|
proc activeSale*(sales: Sales, slotId: SlotId): Future[?SalesAgent] {.async.} =
|
|
for agent in sales.agents:
|
|
if slotId(agent.data.requestId, agent.data.slotIndex) == slotId:
|
|
return some agent
|
|
|
|
return none SalesAgent
|
|
|
|
proc load*(sales: Sales) {.async.} =
|
|
let activeSlots = await sales.mySlots()
|
|
|
|
await sales.deleteInactiveReservations(activeSlots)
|
|
|
|
for slot in activeSlots:
|
|
let agent = newSalesAgent(
|
|
sales.context,
|
|
slot.request.id,
|
|
slot.slotIndex,
|
|
some slot.request)
|
|
|
|
agent.onCleanUp = proc(returnBytes = false, reprocessSlot = false) {.async.} =
|
|
# since workers are not being dispatched, this future has not been created
|
|
# by a worker. Create a dummy one here so we can call sales.cleanUp
|
|
let done: Future[void] = nil
|
|
await sales.cleanUp(agent, returnBytes, reprocessSlot, done)
|
|
|
|
# There is no need to assign agent.onFilled as slots loaded from `mySlots`
|
|
# are inherently already filled and so assigning agent.onFilled would be
|
|
# superfluous.
|
|
|
|
agent.start(SaleUnknown())
|
|
sales.agents.add agent
|
|
|
|
proc onAvailabilityAdded(sales: Sales, availability: Availability) {.async.} =
|
|
## When availabilities are modified or added, the queue should be unpaused if
|
|
## it was paused and any slots in the queue should have their `seen` flag
|
|
## cleared.
|
|
let queue = sales.context.slotQueue
|
|
|
|
queue.clearSeenFlags()
|
|
if queue.paused:
|
|
trace "unpausing queue after new availability added"
|
|
queue.unpause()
|
|
|
|
proc onStorageRequested(sales: Sales,
|
|
requestId: RequestId,
|
|
ask: StorageAsk,
|
|
expiry: UInt256) =
|
|
|
|
logScope:
|
|
topics = "marketplace sales onStorageRequested"
|
|
requestId
|
|
slots = ask.slots
|
|
expiry
|
|
|
|
let slotQueue = sales.context.slotQueue
|
|
|
|
trace "storage requested, adding slots to queue"
|
|
|
|
without items =? SlotQueueItem.init(requestId, ask, expiry).catch, err:
|
|
if err of SlotsOutOfRangeError:
|
|
warn "Too many slots, cannot add to queue"
|
|
else:
|
|
warn "Failed to create slot queue items from request", error = err.msg
|
|
return
|
|
|
|
for item in items:
|
|
# continue on failure
|
|
if err =? slotQueue.push(item).errorOption:
|
|
if err of SlotQueueItemExistsError:
|
|
error "Failed to push item to queue becaue it already exists"
|
|
elif err of QueueNotRunningError:
|
|
warn "Failed to push item to queue becaue queue is not running"
|
|
else:
|
|
warn "Error adding request to SlotQueue", error = err.msg
|
|
|
|
proc onSlotFreed(sales: Sales,
|
|
requestId: RequestId,
|
|
slotIndex: UInt256) =
|
|
|
|
logScope:
|
|
topics = "marketplace sales onSlotFreed"
|
|
requestId
|
|
slotIndex
|
|
|
|
trace "slot freed, adding to queue"
|
|
|
|
proc addSlotToQueue() {.async.} =
|
|
let context = sales.context
|
|
let market = context.market
|
|
let queue = context.slotQueue
|
|
|
|
# first attempt to populate request using existing slot metadata in queue
|
|
without var found =? queue.populateItem(requestId,
|
|
slotIndex.truncate(uint16)):
|
|
trace "no existing request metadata, getting request info from contract"
|
|
# if there's no existing slot for that request, retrieve the request
|
|
# from the contract.
|
|
without request =? await market.getRequest(requestId):
|
|
error "unknown request in contract"
|
|
return
|
|
|
|
found = SlotQueueItem.init(request, slotIndex.truncate(uint16))
|
|
|
|
if err =? queue.push(found).errorOption:
|
|
raise err
|
|
|
|
addSlotToQueue()
|
|
.track(sales)
|
|
.catch(proc(err: ref CatchableError) =
|
|
if err of SlotQueueItemExistsError:
|
|
error "Failed to push item to queue becaue it already exists"
|
|
elif err of QueueNotRunningError:
|
|
warn "Failed to push item to queue becaue queue is not running"
|
|
else:
|
|
warn "Error adding request to SlotQueue", error = err.msg
|
|
)
|
|
|
|
proc subscribeRequested(sales: Sales) {.async.} =
|
|
let context = sales.context
|
|
let market = context.market
|
|
|
|
proc onStorageRequested(requestId: RequestId,
|
|
ask: StorageAsk,
|
|
expiry: UInt256) =
|
|
sales.onStorageRequested(requestId, ask, expiry)
|
|
|
|
try:
|
|
let sub = await market.subscribeRequests(onStorageRequested)
|
|
sales.subscriptions.add(sub)
|
|
except CancelledError as error:
|
|
raise error
|
|
except CatchableError as e:
|
|
error "Unable to subscribe to storage request events", msg = e.msg
|
|
|
|
proc subscribeCancellation(sales: Sales) {.async.} =
|
|
let context = sales.context
|
|
let market = context.market
|
|
let queue = context.slotQueue
|
|
|
|
proc onCancelled(requestId: RequestId) =
|
|
trace "request cancelled (via contract RequestCancelled event), removing all request slots from queue"
|
|
queue.delete(requestId)
|
|
|
|
try:
|
|
let sub = await market.subscribeRequestCancelled(onCancelled)
|
|
sales.subscriptions.add(sub)
|
|
except CancelledError as error:
|
|
raise error
|
|
except CatchableError as e:
|
|
error "Unable to subscribe to cancellation events", msg = e.msg
|
|
|
|
proc subscribeFulfilled*(sales: Sales) {.async.} =
|
|
let context = sales.context
|
|
let market = context.market
|
|
let queue = context.slotQueue
|
|
|
|
proc onFulfilled(requestId: RequestId) =
|
|
trace "request fulfilled, removing all request slots from queue"
|
|
queue.delete(requestId)
|
|
|
|
for agent in sales.agents:
|
|
agent.onFulfilled(requestId)
|
|
|
|
try:
|
|
let sub = await market.subscribeFulfillment(onFulfilled)
|
|
sales.subscriptions.add(sub)
|
|
except CancelledError as error:
|
|
raise error
|
|
except CatchableError as e:
|
|
error "Unable to subscribe to storage fulfilled events", msg = e.msg
|
|
|
|
proc subscribeFailure(sales: Sales) {.async.} =
|
|
let context = sales.context
|
|
let market = context.market
|
|
let queue = context.slotQueue
|
|
|
|
proc onFailed(requestId: RequestId) =
|
|
trace "request failed, removing all request slots from queue"
|
|
queue.delete(requestId)
|
|
|
|
for agent in sales.agents:
|
|
agent.onFailed(requestId)
|
|
|
|
try:
|
|
let sub = await market.subscribeRequestFailed(onFailed)
|
|
sales.subscriptions.add(sub)
|
|
except CancelledError as error:
|
|
raise error
|
|
except CatchableError as e:
|
|
error "Unable to subscribe to storage failure events", msg = e.msg
|
|
|
|
proc subscribeSlotFilled(sales: Sales) {.async.} =
|
|
let context = sales.context
|
|
let market = context.market
|
|
let queue = context.slotQueue
|
|
|
|
proc onSlotFilled(requestId: RequestId, slotIndex: UInt256) =
|
|
trace "slot filled, removing from slot queue", requestId, slotIndex
|
|
queue.delete(requestId, slotIndex.truncate(uint16))
|
|
|
|
for agent in sales.agents:
|
|
agent.onSlotFilled(requestId, slotIndex)
|
|
|
|
try:
|
|
let sub = await market.subscribeSlotFilled(onSlotFilled)
|
|
sales.subscriptions.add(sub)
|
|
except CancelledError as error:
|
|
raise error
|
|
except CatchableError as e:
|
|
error "Unable to subscribe to slot filled events", msg = e.msg
|
|
|
|
proc subscribeSlotFreed(sales: Sales) {.async.} =
|
|
let context = sales.context
|
|
let market = context.market
|
|
|
|
proc onSlotFreed(requestId: RequestId, slotIndex: UInt256) =
|
|
sales.onSlotFreed(requestId, slotIndex)
|
|
|
|
try:
|
|
let sub = await market.subscribeSlotFreed(onSlotFreed)
|
|
sales.subscriptions.add(sub)
|
|
except CancelledError as error:
|
|
raise error
|
|
except CatchableError as e:
|
|
error "Unable to subscribe to slot freed events", msg = e.msg
|
|
|
|
proc subscribeSlotReservationsFull(sales: Sales) {.async.} =
|
|
let context = sales.context
|
|
let market = context.market
|
|
let queue = context.slotQueue
|
|
|
|
proc onSlotReservationsFull(requestId: RequestId, slotIndex: UInt256) =
|
|
trace "reservations for slot full, removing from slot queue", requestId, slotIndex
|
|
queue.delete(requestId, slotIndex.truncate(uint16))
|
|
|
|
try:
|
|
let sub = await market.subscribeSlotReservationsFull(onSlotReservationsFull)
|
|
sales.subscriptions.add(sub)
|
|
except CancelledError as error:
|
|
raise error
|
|
except CatchableError as e:
|
|
error "Unable to subscribe to slot filled events", msg = e.msg
|
|
|
|
proc startSlotQueue(sales: Sales) {.async.} =
|
|
let slotQueue = sales.context.slotQueue
|
|
let reservations = sales.context.reservations
|
|
|
|
slotQueue.onProcessSlot =
|
|
proc(item: SlotQueueItem, done: Future[void]) {.async.} =
|
|
trace "processing slot queue item", reqId = item.requestId, slotIdx = item.slotIndex
|
|
sales.processSlot(item, done)
|
|
|
|
asyncSpawn slotQueue.start()
|
|
|
|
proc onAvailabilityAdded(availability: Availability) {.async.} =
|
|
await sales.onAvailabilityAdded(availability)
|
|
|
|
reservations.onAvailabilityAdded = onAvailabilityAdded
|
|
|
|
proc subscribe(sales: Sales) {.async.} =
|
|
await sales.subscribeRequested()
|
|
await sales.subscribeFulfilled()
|
|
await sales.subscribeFailure()
|
|
await sales.subscribeSlotFilled()
|
|
await sales.subscribeSlotFreed()
|
|
await sales.subscribeCancellation()
|
|
await sales.subscribeSlotReservationsFull()
|
|
|
|
proc unsubscribe(sales: Sales) {.async.} =
|
|
for sub in sales.subscriptions:
|
|
try:
|
|
await sub.unsubscribe()
|
|
except CancelledError as error:
|
|
raise error
|
|
except CatchableError as e:
|
|
error "Unable to unsubscribe from subscription", error = e.msg
|
|
|
|
proc start*(sales: Sales) {.async.} =
|
|
await sales.load()
|
|
await sales.startSlotQueue()
|
|
await sales.subscribe()
|
|
sales.running = true
|
|
|
|
proc stop*(sales: Sales) {.async.} =
|
|
trace "stopping sales"
|
|
sales.running = false
|
|
await sales.context.slotQueue.stop()
|
|
await sales.unsubscribe()
|
|
await sales.trackedFutures.cancelTracked()
|
|
|
|
for agent in sales.agents:
|
|
await agent.stop()
|
|
|
|
sales.agents = @[]
|