mirror of
https://github.com/status-im/nim-codex.git
synced 2025-01-09 10:32:11 +00:00
e6a387e8e8
* add seen flag * Add MockSlotQueueItem and better prioritisation tests * Update seen priority, and include in SlotQueueItem.init * Re-add processed slots to queue Re-add processed slots to queue if the sale was ignored or errored * add pausing of queue - when processing slots in queue, pause queue if item was marked seen - if availability size is increased, trigger onAvailabilityAdded callback - in sales, on availability added, clear 'seen' flags, then unpause the queue - when items pushed to the queue, unpause the queue * remove unused NoMatchingAvailabilityError from slotqueue The slot queue should also have nothing to do with availabilities * when all availabilities are empty, pause the queue An empty availability is defined as size < DefaultBlockSize as this means even the smallest possible request could not be served. However, this is up for discussion. * remove availability from onAvailabilitiesEmptied callback * refactor onAvailabilityAdded and onAvailabilitiesEmptied onAvailabilityAdded and onAvailabilitiesEmptied are now only called from reservations.update (and eventually reservations.delete once implemented). - Add empty routine for Availability and Reservation - Add allEmpty routine for Availability and Reservation, which returns true when all all Availability or Reservation objects in the datastore are empty. * SlotQueue test support updates * Sales module test support updates * Reservations module tests for queue pausing * Sales module tests for queue pausing Includes tests for sales states cancelled, errored, ignored to ensure onCleanUp is called with correct parameters * SlotQueue module tests for queue pausing * fix existing sales test * PR feedback - indent `self.unpause` - update comment for `clearSeenFlags` * reprocessSlot in SaleErrored only when coming from downloading * remove pausing of queue when availabilities are "emptied" Queue pausing when all availiabilies are "emptied" is not necessary, given that the node would not be able to service slots once all its availabilities' freeSize are too small for the slots in the queue, and would then be paused anyway. Add test that asserts the queue is paused once the freeSpace of availabilities drops too low to fill slots in the queue. * Update clearing of seen flags The asyncheapqueue update overload would need to check index bounds and ultimately a different solution was found using the mitems iterator. * fix test request.id was different before updating request.ask.slots, and that id was used to set the state in mockmarket. * Change filled/cleanup future to nil, so no await is needed * add wait to allow items to be added to queue * do not unpause queue when seen items are pushed * re-add seen item back to queue once paused Previously, when a seen item was processed, it was first popped off the queue, then the queue was paused waiting to process that item once the queue was unpaused. Now, when a seen item is processed, it is popped off the queue, the queue is paused, then the item is re-added to the queue and the queue will wait until unpaused before it will continue popping items off the queue. If the item was not re-added to the queue, it would have been processed immediately once unpaused, however there may have been other items with higher priority pushed to the queue in the meantime. The queue would not be unpaused if those added items were already seen. In particular, this may happen when ignored items due to lack of availability are re-added to a paused queue. Those ignored items will likely have a higher priority than the item that was just seen (due to it having been processed first), causing the queue to the be paused. * address PR comments
140 lines
4.0 KiB
Nim
140 lines
4.0 KiB
Nim
import pkg/chronos
|
|
import pkg/questionable
|
|
import pkg/questionable/results
|
|
import pkg/stint
|
|
import pkg/upraises
|
|
import ../contracts/requests
|
|
import ../errors
|
|
import ../logutils
|
|
import ./statemachine
|
|
import ./salescontext
|
|
import ./salesdata
|
|
import ./reservations
|
|
|
|
export reservations
|
|
|
|
logScope:
|
|
topics = "marketplace sales"
|
|
|
|
type
|
|
SalesAgent* = ref object of Machine
|
|
context*: SalesContext
|
|
data*: SalesData
|
|
subscribed: bool
|
|
# Slot-level callbacks.
|
|
onCleanUp*: OnCleanUp
|
|
onFilled*: ?OnFilled
|
|
|
|
OnCleanUp* = proc (returnBytes = false, reprocessSlot = false): Future[void] {.gcsafe, upraises: [].}
|
|
OnFilled* = proc(request: StorageRequest,
|
|
slotIndex: UInt256) {.gcsafe, upraises: [].}
|
|
|
|
SalesAgentError = object of CodexError
|
|
AllSlotsFilledError* = object of SalesAgentError
|
|
|
|
func `==`*(a, b: SalesAgent): bool =
|
|
a.data.requestId == b.data.requestId and
|
|
a.data.slotIndex == b.data.slotIndex
|
|
|
|
proc newSalesAgent*(context: SalesContext,
|
|
requestId: RequestId,
|
|
slotIndex: UInt256,
|
|
request: ?StorageRequest): SalesAgent =
|
|
var agent = SalesAgent.new()
|
|
agent.context = context
|
|
agent.data = SalesData(
|
|
requestId: requestId,
|
|
slotIndex: slotIndex,
|
|
request: request)
|
|
return agent
|
|
|
|
proc retrieveRequest*(agent: SalesAgent) {.async.} =
|
|
let data = agent.data
|
|
let market = agent.context.market
|
|
if data.request.isNone:
|
|
data.request = await market.getRequest(data.requestId)
|
|
|
|
proc retrieveRequestState*(agent: SalesAgent): Future[?RequestState] {.async.} =
|
|
let data = agent.data
|
|
let market = agent.context.market
|
|
return await market.requestState(data.requestId)
|
|
|
|
func state*(agent: SalesAgent): ?string =
|
|
proc description(state: State): string =
|
|
$state
|
|
agent.query(description)
|
|
|
|
proc subscribeCancellation(agent: SalesAgent) {.async.} =
|
|
let data = agent.data
|
|
let clock = agent.context.clock
|
|
|
|
proc onCancelled() {.async.} =
|
|
without request =? data.request:
|
|
return
|
|
|
|
let market = agent.context.market
|
|
let expiry = await market.requestExpiresAt(data.requestId)
|
|
|
|
while true:
|
|
let deadline = max(clock.now, expiry) + 1
|
|
trace "Waiting for request to be cancelled", now=clock.now, expiry=deadline
|
|
await clock.waitUntil(deadline)
|
|
|
|
without state =? await agent.retrieveRequestState():
|
|
error "Uknown request", requestId = data.requestId
|
|
return
|
|
|
|
case state
|
|
of New:
|
|
discard
|
|
of RequestState.Cancelled:
|
|
agent.schedule(cancelledEvent(request))
|
|
break
|
|
of RequestState.Started, RequestState.Finished, RequestState.Failed:
|
|
break
|
|
|
|
debug "The request is not yet canceled, even though it should be. Waiting for some more time.", currentState = state, now=clock.now
|
|
|
|
data.cancelled = onCancelled()
|
|
|
|
method onFulfilled*(agent: SalesAgent, requestId: RequestId) {.base, gcsafe, upraises: [].} =
|
|
if agent.data.requestId == requestId and
|
|
not agent.data.cancelled.isNil:
|
|
agent.data.cancelled.cancel()
|
|
|
|
method onFailed*(agent: SalesAgent, requestId: RequestId) {.base, gcsafe, upraises: [].} =
|
|
without request =? agent.data.request:
|
|
return
|
|
if agent.data.requestId == requestId:
|
|
agent.schedule(failedEvent(request))
|
|
|
|
method onSlotFilled*(agent: SalesAgent,
|
|
requestId: RequestId,
|
|
slotIndex: UInt256) {.base, gcsafe, upraises: [].} =
|
|
|
|
if agent.data.requestId == requestId and
|
|
agent.data.slotIndex == slotIndex:
|
|
agent.schedule(slotFilledEvent(requestId, slotIndex))
|
|
|
|
proc subscribe*(agent: SalesAgent) {.async.} =
|
|
if agent.subscribed:
|
|
return
|
|
|
|
await agent.subscribeCancellation()
|
|
agent.subscribed = true
|
|
|
|
proc unsubscribe*(agent: SalesAgent) {.async.} =
|
|
if not agent.subscribed:
|
|
return
|
|
|
|
let data = agent.data
|
|
if not data.cancelled.isNil and not data.cancelled.finished:
|
|
await data.cancelled.cancelAndWait()
|
|
data.cancelled = nil
|
|
|
|
agent.subscribed = false
|
|
|
|
proc stop*(agent: SalesAgent) {.async.} =
|
|
await Machine(agent).stop()
|
|
await agent.unsubscribe()
|