2023-09-08 15:09:15 -06:00
|
|
|
|
2023-09-11 14:48:53 -06:00
|
|
|
when not compileOption("threads"):
|
|
|
|
|
{.error: "This module requires --threads:on compilation flag".}
|
|
|
|
|
|
2023-09-08 15:09:15 -06:00
|
|
|
import pkg/upraises
|
|
|
|
|
|
|
|
|
|
push: {.upraises: [].}
|
|
|
|
|
|
2023-09-15 13:08:38 -06:00
|
|
|
import std/tables
|
2023-09-25 22:35:34 -07:00
|
|
|
import std/locks
|
2023-09-26 17:23:42 -07:00
|
|
|
import std/sugar
|
|
|
|
|
|
2023-09-08 15:09:15 -06:00
|
|
|
|
|
|
|
|
import pkg/chronos
|
|
|
|
|
import pkg/chronos/threadsync
|
|
|
|
|
import pkg/questionable
|
|
|
|
|
import pkg/questionable/results
|
|
|
|
|
import pkg/taskpools
|
2023-09-28 17:36:05 -07:00
|
|
|
import std/isolation
|
2023-09-15 13:08:38 -06:00
|
|
|
import pkg/chronicles
|
2023-09-26 14:53:12 -07:00
|
|
|
import pkg/threading/smartptrs
|
2023-09-08 15:09:15 -06:00
|
|
|
|
|
|
|
|
import ../key
|
|
|
|
|
import ../query
|
2023-09-28 17:14:27 -07:00
|
|
|
import ./backend
|
2023-09-28 17:47:13 -07:00
|
|
|
# import ./fsbackend
|
|
|
|
|
# import ./sqlbackend
|
2023-09-08 15:09:15 -06:00
|
|
|
|
2023-09-15 13:08:38 -06:00
|
|
|
import ./asyncsemaphore
|
2023-09-12 13:51:01 -06:00
|
|
|
import ./databuffer
|
2023-09-19 18:43:00 -06:00
|
|
|
import ./threadresult
|
2023-09-08 15:09:15 -06:00
|
|
|
|
2023-09-28 17:36:05 -07:00
|
|
|
export threadresult, smartptrs, isolation, chronicles
|
2023-09-15 13:08:38 -06:00
|
|
|
|
2023-09-19 18:43:00 -06:00
|
|
|
logScope:
|
2023-09-28 18:34:59 -07:00
|
|
|
topics = "datastore threadproxy"
|
2023-09-08 15:09:15 -06:00
|
|
|
|
2023-09-19 18:43:00 -06:00
|
|
|
type
|
2023-09-25 22:35:34 -07:00
|
|
|
|
2023-09-26 16:12:55 -07:00
|
|
|
TaskCtxObj*[T: ThreadTypes] = object
|
2023-09-28 13:47:48 -07:00
|
|
|
res*: ThreadResult[T]
|
2023-09-08 15:09:15 -06:00
|
|
|
signal: ThreadSignalPtr
|
2023-10-20 15:56:22 -07:00
|
|
|
running*: bool ## used to mark when a task worker is running
|
2023-09-28 13:47:48 -07:00
|
|
|
cancelled*: bool ## used to cancel a task before it's started
|
2023-09-27 16:16:18 -07:00
|
|
|
nextSignal: ThreadSignalPtr
|
2023-09-08 15:09:15 -06:00
|
|
|
|
2023-09-26 16:12:55 -07:00
|
|
|
TaskCtx*[T] = SharedPtr[TaskCtxObj[T]]
|
2023-09-26 20:41:57 -07:00
|
|
|
## Task context object.
|
|
|
|
|
## This is a SharedPtr to make the query iter simpler
|
2023-09-26 14:53:12 -07:00
|
|
|
|
2023-09-28 18:19:05 -07:00
|
|
|
ThreadProxy*[BT] = object
|
|
|
|
|
tp: Taskpool
|
2023-09-28 17:36:05 -07:00
|
|
|
backend*: BT
|
2023-09-28 18:19:05 -07:00
|
|
|
semaphore: AsyncSemaphore # semaphore is used for backpressure \
|
2023-09-15 13:08:38 -06:00
|
|
|
# to avoid exhausting file descriptors
|
2023-09-15 16:40:46 -06:00
|
|
|
|
2023-09-27 16:16:18 -07:00
|
|
|
proc newTaskCtx*[T](tp: typedesc[T],
|
|
|
|
|
signal: ThreadSignalPtr,
|
|
|
|
|
nextSignal: ThreadSignalPtr = nil): TaskCtx[T] =
|
|
|
|
|
newSharedPtr(TaskCtxObj[T](signal: signal, nextSignal: nextSignal))
|
2023-09-27 11:51:55 -07:00
|
|
|
|
2023-09-26 15:29:35 -07:00
|
|
|
proc setCancelled[T](ctx: TaskCtx[T]) =
|
2023-10-20 15:56:22 -07:00
|
|
|
ctx[].cancelled = true
|
2023-09-25 22:35:34 -07:00
|
|
|
|
2023-09-26 14:53:12 -07:00
|
|
|
proc setRunning[T](ctx: TaskCtx[T]): bool =
|
2023-10-20 15:56:22 -07:00
|
|
|
if ctx[].cancelled:
|
|
|
|
|
return false
|
|
|
|
|
ctx[].running = true
|
|
|
|
|
return true
|
2023-09-26 14:53:12 -07:00
|
|
|
proc setDone[T](ctx: TaskCtx[T]) =
|
2023-10-20 15:56:22 -07:00
|
|
|
ctx[].running = false
|
2023-09-26 13:25:15 -07:00
|
|
|
|
2023-09-25 22:51:34 -07:00
|
|
|
proc acquireSignal(): ?!ThreadSignalPtr =
|
2023-09-27 14:01:21 -07:00
|
|
|
# echo "signal:OPEN!"
|
2023-09-25 22:51:34 -07:00
|
|
|
let signal = ThreadSignalPtr.new()
|
|
|
|
|
if signal.isErr():
|
2023-10-20 15:56:22 -07:00
|
|
|
failure (ref CatchableError)(msg: "failed to aquire ThreadSignalPtr: " &
|
|
|
|
|
signal.error())
|
2023-09-25 22:51:34 -07:00
|
|
|
else:
|
|
|
|
|
success signal.get()
|
|
|
|
|
|
2023-09-28 13:47:48 -07:00
|
|
|
template executeTask*[T](ctx: TaskCtx[T], blk: untyped) =
|
2023-09-26 20:46:36 -07:00
|
|
|
## executes a task on a thread work and handles cleanup after cancels/errors
|
2023-10-20 15:56:22 -07:00
|
|
|
##
|
2023-09-15 13:08:38 -06:00
|
|
|
try:
|
2023-09-26 13:25:15 -07:00
|
|
|
if not ctx.setRunning():
|
|
|
|
|
return
|
2023-10-20 15:56:22 -07:00
|
|
|
|
2023-09-25 23:05:26 -07:00
|
|
|
## run backend command
|
2023-09-25 23:21:08 -07:00
|
|
|
let res = `blk`
|
2023-09-25 23:48:17 -07:00
|
|
|
if res.isOk():
|
|
|
|
|
when T is void:
|
2023-09-26 14:53:12 -07:00
|
|
|
ctx[].res.ok()
|
2023-09-25 23:48:17 -07:00
|
|
|
else:
|
2023-09-26 14:53:12 -07:00
|
|
|
ctx[].res.ok(res.get())
|
2023-09-25 23:48:17 -07:00
|
|
|
else:
|
2023-09-26 14:53:12 -07:00
|
|
|
ctx[].res.err res.error().toThreadErr()
|
2023-09-26 16:38:46 -07:00
|
|
|
|
2023-09-15 13:08:38 -06:00
|
|
|
except CatchableError as exc:
|
2023-09-26 13:25:15 -07:00
|
|
|
trace "Unexpected exception thrown in async task", exc = exc.msg
|
|
|
|
|
ctx[].res.err exc.toThreadErr()
|
2023-09-26 17:56:07 -07:00
|
|
|
# except Exception as exc:
|
|
|
|
|
# trace "Unexpected defect thrown in async task", exc = exc.msg
|
|
|
|
|
# ctx[].res.err exc.toThreadErr()
|
2023-09-25 23:05:26 -07:00
|
|
|
finally:
|
2023-09-26 13:25:15 -07:00
|
|
|
ctx.setDone()
|
2023-09-25 23:05:26 -07:00
|
|
|
discard ctx[].signal.fireSync()
|
|
|
|
|
|
2023-09-28 18:19:05 -07:00
|
|
|
template dispatchTaskWrap[BT](self: ThreadProxy[BT],
|
2023-09-27 11:51:55 -07:00
|
|
|
signal: ThreadSignalPtr,
|
|
|
|
|
blk: untyped
|
|
|
|
|
): auto =
|
|
|
|
|
var ds {.used, inject.} = self.backend
|
|
|
|
|
proc runTask() =
|
|
|
|
|
`blk`
|
|
|
|
|
runTask()
|
|
|
|
|
await wait(ctx[].signal)
|
|
|
|
|
|
2023-09-28 18:19:05 -07:00
|
|
|
template dispatchTask*[BT](self: ThreadProxy[BT],
|
2023-09-26 12:51:28 -07:00
|
|
|
signal: ThreadSignalPtr,
|
|
|
|
|
blk: untyped
|
2023-09-27 11:51:55 -07:00
|
|
|
): auto =
|
2023-09-26 20:46:36 -07:00
|
|
|
## handles dispatching a task from an async context
|
|
|
|
|
## `blk` is the actions, it has `ctx` and `ds` variables in scope.
|
|
|
|
|
## note that `ds` is a generic
|
2023-09-26 17:59:05 -07:00
|
|
|
try:
|
2023-09-27 11:51:55 -07:00
|
|
|
dispatchTaskWrap[BT](self, signal, blk)
|
2023-09-26 12:51:28 -07:00
|
|
|
except CancelledError as exc:
|
|
|
|
|
trace "Cancelling thread future!", exc = exc.msg
|
2023-09-26 15:29:35 -07:00
|
|
|
ctx.setCancelled()
|
2023-09-26 12:51:28 -07:00
|
|
|
raise exc
|
2023-09-28 14:51:27 -07:00
|
|
|
except CatchableError as exc:
|
|
|
|
|
ctx.setCancelled()
|
|
|
|
|
raise exc
|
2023-09-26 12:51:28 -07:00
|
|
|
finally:
|
2023-09-26 14:53:12 -07:00
|
|
|
discard ctx[].signal.close()
|
2023-09-26 12:51:28 -07:00
|
|
|
self.semaphore.release()
|
|
|
|
|
|
2023-09-26 14:53:12 -07:00
|
|
|
proc hasTask[T, DB](ctx: TaskCtx[T], ds: DB, key: KeyId) {.gcsafe.} =
|
2023-09-25 23:05:26 -07:00
|
|
|
## run backend command
|
2023-09-28 17:47:13 -07:00
|
|
|
mixin has
|
2023-09-25 23:26:59 -07:00
|
|
|
executeTask(ctx):
|
2023-09-25 23:19:23 -07:00
|
|
|
has(ds, key)
|
2023-09-15 13:08:38 -06:00
|
|
|
|
2023-09-28 18:19:05 -07:00
|
|
|
proc has*[BT](self: ThreadProxy[BT],
|
2023-09-27 16:16:18 -07:00
|
|
|
key: Key): Future[?!bool] {.async.} =
|
2023-09-25 22:09:26 -07:00
|
|
|
await self.semaphore.acquire()
|
2023-09-27 20:28:40 -07:00
|
|
|
let signal = acquireSignal().get()
|
|
|
|
|
# without signal =? acquireSignal(), err:
|
|
|
|
|
# return failure err
|
2023-09-25 22:09:26 -07:00
|
|
|
|
2023-10-20 15:56:22 -07:00
|
|
|
let ctx = newTaskCtx(bool, signal = signal)
|
2023-09-27 11:51:55 -07:00
|
|
|
dispatchTask(self, signal):
|
|
|
|
|
let key = KeyId.new key.id()
|
2023-09-26 14:53:12 -07:00
|
|
|
self.tp.spawn hasTask(ctx, ds, key)
|
2023-09-26 17:25:43 -07:00
|
|
|
return ctx[].res.toRes(v => v)
|
2023-09-25 21:44:26 -07:00
|
|
|
|
2023-09-27 21:07:31 -07:00
|
|
|
proc deleteTask[T, DB](ctx: TaskCtx[T], ds: DB;
|
2023-09-25 23:48:17 -07:00
|
|
|
key: KeyId) {.gcsafe.} =
|
2023-09-25 23:26:59 -07:00
|
|
|
## run backend command
|
2023-09-28 17:47:13 -07:00
|
|
|
mixin delete
|
2023-09-25 23:26:59 -07:00
|
|
|
executeTask(ctx):
|
|
|
|
|
delete(ds, key)
|
2023-09-25 21:44:26 -07:00
|
|
|
|
2023-09-28 18:19:05 -07:00
|
|
|
proc delete*[BT](self: ThreadProxy[BT],
|
2023-09-25 23:26:59 -07:00
|
|
|
key: Key): Future[?!void] {.async.} =
|
2023-09-26 17:32:10 -07:00
|
|
|
## delete key
|
2023-09-25 23:26:59 -07:00
|
|
|
await self.semaphore.acquire()
|
2023-09-27 20:28:40 -07:00
|
|
|
let signal = acquireSignal().get()
|
|
|
|
|
# without signal =? acquireSignal(), err:
|
|
|
|
|
# return failure err
|
2023-09-25 21:44:26 -07:00
|
|
|
|
2023-10-20 15:56:22 -07:00
|
|
|
let ctx = newTaskCtx(void, signal = signal)
|
2023-09-27 11:51:55 -07:00
|
|
|
dispatchTask(self, signal):
|
|
|
|
|
let key = KeyId.new key.id()
|
2023-09-26 14:53:12 -07:00
|
|
|
self.tp.spawn deleteTask(ctx, ds, key)
|
2023-09-26 17:27:50 -07:00
|
|
|
|
2023-09-26 17:25:43 -07:00
|
|
|
return ctx[].res.toRes()
|
2023-09-25 21:44:26 -07:00
|
|
|
|
2023-09-28 18:19:05 -07:00
|
|
|
proc delete*[BT](self: ThreadProxy[BT],
|
2023-09-25 23:26:59 -07:00
|
|
|
keys: seq[Key]): Future[?!void] {.async.} =
|
2023-09-26 17:32:10 -07:00
|
|
|
## delete batch
|
2023-09-25 23:26:59 -07:00
|
|
|
for key in keys:
|
|
|
|
|
if err =? (await self.delete(key)).errorOption:
|
|
|
|
|
return failure err
|
2023-09-25 21:44:26 -07:00
|
|
|
|
2023-09-25 23:26:59 -07:00
|
|
|
return success()
|
2023-09-25 21:44:26 -07:00
|
|
|
|
2023-09-26 16:38:46 -07:00
|
|
|
|
2023-09-26 14:53:12 -07:00
|
|
|
proc putTask[T, DB](ctx: TaskCtx[T], ds: DB;
|
2023-09-26 16:38:46 -07:00
|
|
|
key: KeyId,
|
|
|
|
|
data: DataBuffer) {.gcsafe, nimcall.} =
|
2023-09-28 17:47:13 -07:00
|
|
|
mixin put
|
2023-09-25 23:48:17 -07:00
|
|
|
executeTask(ctx):
|
|
|
|
|
put(ds, key, data)
|
2023-09-25 21:44:26 -07:00
|
|
|
|
2023-09-28 18:19:05 -07:00
|
|
|
proc put*[BT](self: ThreadProxy[BT],
|
2023-09-25 23:48:17 -07:00
|
|
|
key: Key,
|
|
|
|
|
data: seq[byte]): Future[?!void] {.async.} =
|
2023-09-26 17:32:10 -07:00
|
|
|
## put key with data
|
2023-09-25 23:48:17 -07:00
|
|
|
await self.semaphore.acquire()
|
2023-09-27 20:28:40 -07:00
|
|
|
let signal = acquireSignal().get()
|
|
|
|
|
# without signal =? acquireSignal(), err:
|
|
|
|
|
# return failure err
|
2023-09-25 21:44:26 -07:00
|
|
|
|
2023-10-20 15:56:22 -07:00
|
|
|
let ctx = newTaskCtx(void, signal = signal)
|
2023-09-27 11:51:55 -07:00
|
|
|
dispatchTask(self, signal):
|
2023-09-26 17:26:41 -07:00
|
|
|
let key = KeyId.new key.id()
|
|
|
|
|
let data = DataBuffer.new data
|
2023-09-26 14:53:12 -07:00
|
|
|
self.tp.spawn putTask(ctx, ds, key, data)
|
2023-09-26 17:26:41 -07:00
|
|
|
|
2023-09-26 17:16:13 -07:00
|
|
|
return ctx[].res.toRes()
|
2023-09-28 17:14:27 -07:00
|
|
|
|
2023-09-28 18:19:05 -07:00
|
|
|
proc put*[E, DB](self: ThreadProxy[DB],
|
|
|
|
|
batch: seq[E]): Future[?!void] {.async.} =
|
2023-09-26 17:32:10 -07:00
|
|
|
## put batch data
|
2023-09-25 23:48:17 -07:00
|
|
|
for entry in batch:
|
|
|
|
|
if err =? (await self.put(entry.key, entry.data)).errorOption:
|
|
|
|
|
return failure err
|
2023-10-20 15:56:22 -07:00
|
|
|
|
2023-09-26 17:30:20 -07:00
|
|
|
return success()
|
|
|
|
|
|
2023-09-25 21:44:26 -07:00
|
|
|
|
2023-09-27 21:07:31 -07:00
|
|
|
proc getTask[DB](ctx: TaskCtx[DataBuffer], ds: DB;
|
2023-09-25 23:48:17 -07:00
|
|
|
key: KeyId) {.gcsafe, nimcall.} =
|
|
|
|
|
## run backend command
|
2023-09-28 17:47:13 -07:00
|
|
|
mixin get
|
2023-09-25 23:48:17 -07:00
|
|
|
executeTask(ctx):
|
2023-09-26 17:01:21 -07:00
|
|
|
let res = get(ds, key)
|
|
|
|
|
res
|
2023-09-25 21:44:26 -07:00
|
|
|
|
2023-09-28 18:19:05 -07:00
|
|
|
proc get*[BT](self: ThreadProxy[BT],
|
2023-09-27 11:51:55 -07:00
|
|
|
key: Key,
|
|
|
|
|
): Future[?!seq[byte]] {.async.} =
|
2023-09-25 23:48:17 -07:00
|
|
|
await self.semaphore.acquire()
|
2023-09-27 20:28:40 -07:00
|
|
|
let signal = acquireSignal().get()
|
|
|
|
|
# without signal =? acquireSignal(), err:
|
|
|
|
|
# return failure err
|
2023-09-25 21:44:26 -07:00
|
|
|
|
2023-10-20 15:56:22 -07:00
|
|
|
let ctx = newTaskCtx(DataBuffer, signal = signal)
|
2023-09-27 12:02:04 -07:00
|
|
|
dispatchTask(self, signal):
|
|
|
|
|
let key = KeyId.new key.id()
|
2023-09-26 14:53:12 -07:00
|
|
|
self.tp.spawn getTask(ctx, ds, key)
|
2023-09-26 17:26:41 -07:00
|
|
|
|
2023-10-23 13:07:49 -07:00
|
|
|
return ctx[].res.toRes(v => v.toSequence())
|
2023-09-25 21:44:26 -07:00
|
|
|
|
2023-09-28 18:19:05 -07:00
|
|
|
proc close*[BT](self: ThreadProxy[BT]): Future[?!void] {.async.} =
|
2023-09-26 00:02:44 -07:00
|
|
|
await self.semaphore.closeAll()
|
2023-09-27 11:51:55 -07:00
|
|
|
self.backend.close()
|
2023-09-25 21:44:26 -07:00
|
|
|
|
2023-09-26 13:25:15 -07:00
|
|
|
type
|
|
|
|
|
QResult = DbQueryResponse[KeyId, DataBuffer]
|
|
|
|
|
|
2023-09-27 21:07:31 -07:00
|
|
|
proc queryTask[DB](
|
2023-09-26 14:53:12 -07:00
|
|
|
ctx: TaskCtx[QResult],
|
2023-09-26 13:25:15 -07:00
|
|
|
ds: DB,
|
2023-09-26 18:10:20 -07:00
|
|
|
query: DbQuery[KeyId],
|
2023-09-27 21:07:31 -07:00
|
|
|
) =
|
2023-09-26 13:25:15 -07:00
|
|
|
## run query command
|
2023-09-27 21:03:35 -07:00
|
|
|
mixin queryIter
|
2023-09-26 13:31:24 -07:00
|
|
|
executeTask(ctx):
|
2023-09-26 15:54:43 -07:00
|
|
|
# we execute this all inside `executeTask`
|
|
|
|
|
# so we need to return a final result
|
2023-09-27 21:03:35 -07:00
|
|
|
let handleRes = query(ds, query)
|
2023-09-26 18:10:20 -07:00
|
|
|
if handleRes.isErr():
|
2023-09-26 15:54:43 -07:00
|
|
|
# set error and exit executeTask, which will fire final signal
|
2023-09-26 18:10:20 -07:00
|
|
|
(?!QResult).err(handleRes.error())
|
2023-09-26 15:29:35 -07:00
|
|
|
else:
|
2023-09-26 15:54:43 -07:00
|
|
|
# otherwise manually an set empty ok result
|
2023-09-26 16:02:42 -07:00
|
|
|
ctx[].res.ok (KeyId.none, DataBuffer(), )
|
2023-09-26 18:33:36 -07:00
|
|
|
discard ctx[].signal.fireSync()
|
2023-09-27 16:16:18 -07:00
|
|
|
if not ctx[].nextSignal.waitSync(10.seconds).get():
|
|
|
|
|
raise newException(DeadThreadDefect, "queryTask timed out")
|
2023-09-26 15:29:35 -07:00
|
|
|
|
2023-09-26 18:10:20 -07:00
|
|
|
var handle = handleRes.get()
|
2023-09-27 21:07:31 -07:00
|
|
|
for item in handle.queryIter():
|
2023-09-26 15:29:35 -07:00
|
|
|
# wait for next request from async thread
|
2023-09-26 14:02:31 -07:00
|
|
|
|
2023-09-26 15:50:22 -07:00
|
|
|
if ctx[].cancelled:
|
|
|
|
|
# cancel iter, then run next cycle so it'll finish and close
|
|
|
|
|
handle.cancel = true
|
|
|
|
|
continue
|
|
|
|
|
else:
|
|
|
|
|
ctx[].res = item.mapErr() do(exc: ref CatchableError) -> ThreadResErr:
|
|
|
|
|
exc
|
2023-09-26 18:40:04 -07:00
|
|
|
|
2023-09-26 15:50:22 -07:00
|
|
|
discard ctx[].signal.fireSync()
|
2023-09-27 16:16:18 -07:00
|
|
|
if not ctx[].nextSignal.waitSync(10.seconds).get():
|
|
|
|
|
raise newException(DeadThreadDefect, "queryTask timed out")
|
2023-09-26 15:50:22 -07:00
|
|
|
|
2023-09-26 15:54:43 -07:00
|
|
|
# set final result
|
2023-09-26 16:02:42 -07:00
|
|
|
(?!QResult).ok((KeyId.none, DataBuffer()))
|
2023-09-25 21:44:26 -07:00
|
|
|
|
2023-09-28 18:19:05 -07:00
|
|
|
proc query*[BT](self: ThreadProxy[BT],
|
2023-09-26 19:31:07 -07:00
|
|
|
q: Query
|
|
|
|
|
): Future[?!QueryIter] {.async.} =
|
|
|
|
|
## performs async query
|
2023-10-20 15:56:22 -07:00
|
|
|
## keeps one thread running queryTask until finished
|
|
|
|
|
##
|
2023-09-26 13:01:53 -07:00
|
|
|
await self.semaphore.acquire()
|
2023-09-27 20:28:40 -07:00
|
|
|
let signal = acquireSignal().get()
|
|
|
|
|
# without signal =? acquireSignal(), err:
|
|
|
|
|
# return failure err
|
|
|
|
|
let nextSignal = acquireSignal().get()
|
|
|
|
|
# without nextSignal =? acquireSignal(), err:
|
|
|
|
|
# return failure err
|
2023-10-20 15:56:22 -07:00
|
|
|
let ctx = newTaskCtx(QResult, signal = signal, nextSignal = nextSignal)
|
2023-09-26 13:01:53 -07:00
|
|
|
|
2023-09-27 16:16:18 -07:00
|
|
|
proc iterDispose() {.async.} =
|
2023-09-27 16:01:40 -07:00
|
|
|
ctx.setCancelled()
|
2023-09-27 16:16:18 -07:00
|
|
|
await ctx[].nextSignal.fire()
|
|
|
|
|
discard ctx[].signal.close()
|
|
|
|
|
discard ctx[].nextSignal.close()
|
2023-09-27 13:50:21 -07:00
|
|
|
self.semaphore.release()
|
|
|
|
|
|
2023-09-26 19:31:07 -07:00
|
|
|
try:
|
|
|
|
|
let query = dbQuery(
|
2023-10-20 15:56:22 -07:00
|
|
|
key = KeyId.new q.key.id(),
|
|
|
|
|
value = q.value, limit = q.limit, offset = q.offset, sort = q.sort)
|
2023-09-26 19:31:07 -07:00
|
|
|
|
|
|
|
|
# setup initial queryTask
|
2023-09-27 12:02:04 -07:00
|
|
|
dispatchTaskWrap(self, signal):
|
2023-09-27 13:24:32 -07:00
|
|
|
self.tp.spawn queryTask(ctx, ds, query)
|
2023-09-27 16:16:18 -07:00
|
|
|
await ctx[].nextSignal.fire()
|
2023-09-26 19:31:07 -07:00
|
|
|
|
2023-09-27 13:50:21 -07:00
|
|
|
var lock = newAsyncLock() # serialize querying under threads
|
|
|
|
|
var iter = QueryIter.new()
|
|
|
|
|
iter.dispose = proc (): Future[?!void] {.async.} =
|
|
|
|
|
iterDispose()
|
|
|
|
|
success()
|
2023-09-26 19:31:07 -07:00
|
|
|
|
2023-09-27 13:50:21 -07:00
|
|
|
iter.next = proc(): Future[?!QueryResponse] {.async.} =
|
2023-09-26 19:31:07 -07:00
|
|
|
let ctx = ctx
|
|
|
|
|
try:
|
|
|
|
|
trace "About to query"
|
|
|
|
|
if lock.locked:
|
2023-10-20 15:56:22 -07:00
|
|
|
return failure (ref DatastoreError)(
|
|
|
|
|
msg: "Should always await query features")
|
2023-09-26 19:31:07 -07:00
|
|
|
if iter.finished == true:
|
2023-10-20 15:56:22 -07:00
|
|
|
return failure (ref QueryEndedError)(
|
|
|
|
|
msg: "Calling next on a finished query!")
|
2023-09-26 19:31:07 -07:00
|
|
|
|
|
|
|
|
await wait(ctx[].signal)
|
|
|
|
|
if not ctx[].running:
|
|
|
|
|
iter.finished = true
|
|
|
|
|
|
|
|
|
|
defer:
|
2023-09-27 16:16:18 -07:00
|
|
|
await ctx[].nextSignal.fire()
|
2023-09-26 19:31:07 -07:00
|
|
|
|
|
|
|
|
if ctx[].res.isErr():
|
|
|
|
|
return err(ctx[].res.error())
|
|
|
|
|
else:
|
|
|
|
|
let qres = ctx[].res.get()
|
|
|
|
|
let key = qres.key.map(proc (k: KeyId): Key = k.toKey())
|
2023-10-23 13:07:49 -07:00
|
|
|
let data = qres.data.toSequence()
|
2023-09-26 19:31:07 -07:00
|
|
|
return (?!QueryResponse).ok((key: key, data: data))
|
|
|
|
|
except CancelledError as exc:
|
|
|
|
|
trace "Cancelling thread future!", exc = exc.msg
|
|
|
|
|
ctx.setCancelled()
|
2023-09-27 16:16:18 -07:00
|
|
|
await iterDispose() # todo: is this valid?
|
2023-09-26 19:31:07 -07:00
|
|
|
raise exc
|
|
|
|
|
|
|
|
|
|
return success iter
|
|
|
|
|
except CancelledError as exc:
|
|
|
|
|
trace "Cancelling thread future!", exc = exc.msg
|
2023-09-28 14:51:27 -07:00
|
|
|
ctx.setCancelled()
|
2023-09-27 16:16:18 -07:00
|
|
|
await iterDispose()
|
2023-09-26 19:31:07 -07:00
|
|
|
raise exc
|
2023-09-13 14:41:16 -06:00
|
|
|
|
2023-09-28 18:19:05 -07:00
|
|
|
proc new*[DB](self: type ThreadProxy,
|
2023-09-26 16:12:55 -07:00
|
|
|
db: DB,
|
2023-09-26 16:02:42 -07:00
|
|
|
withLocks = static false,
|
|
|
|
|
tp: Taskpool
|
2023-09-28 18:19:05 -07:00
|
|
|
): ?!ThreadProxy[DB] =
|
|
|
|
|
doAssert tp.numThreads > 1, "ThreadProxy requires at least 2 threads"
|
2023-09-12 13:51:01 -06:00
|
|
|
|
2023-09-28 18:19:05 -07:00
|
|
|
success ThreadProxy[DB](
|
2023-09-19 18:43:00 -06:00
|
|
|
tp: tp,
|
2023-09-27 11:51:55 -07:00
|
|
|
backend: db,
|
2023-09-26 16:02:42 -07:00
|
|
|
semaphore: AsyncSemaphore.new(tp.numThreads - 1)
|
|
|
|
|
)
|