2023-08-28 22:18:08 -07:00
|
|
|
import pkg/chronos
|
2023-08-24 17:33:32 -07:00
|
|
|
import pkg/chronos/threadsync
|
2023-08-28 22:18:08 -07:00
|
|
|
import pkg/questionable
|
|
|
|
|
import pkg/questionable/results
|
2023-08-24 17:33:32 -07:00
|
|
|
import stew/results
|
2023-08-24 16:02:13 -07:00
|
|
|
import pkg/upraises
|
2023-08-24 16:18:31 -07:00
|
|
|
import pkg/taskpools
|
2023-08-24 16:02:13 -07:00
|
|
|
|
2023-09-12 18:50:38 -07:00
|
|
|
import ./sharedptr
|
2023-09-05 17:08:40 -07:00
|
|
|
import ../key
|
|
|
|
|
import ../query
|
2023-08-24 16:02:13 -07:00
|
|
|
import ./datastore
|
|
|
|
|
import ./databuffer
|
2023-09-05 13:39:13 -07:00
|
|
|
import ./threadresults
|
2023-08-24 16:02:13 -07:00
|
|
|
|
2023-09-06 15:49:58 -07:00
|
|
|
# import pretty
|
2023-09-12 18:50:38 -07:00
|
|
|
export key, query, sharedptr, databuffer
|
2023-09-05 13:39:13 -07:00
|
|
|
export threadresults
|
2023-08-24 16:02:13 -07:00
|
|
|
|
|
|
|
|
push: {.upraises: [].}
|
|
|
|
|
|
2023-09-11 17:22:35 -07:00
|
|
|
## Design Notes
|
|
|
|
|
## ============
|
|
|
|
|
## This is the threaded backend for `threadproxyds.nim`. It requires
|
|
|
|
|
## a `TResult[T]` to already be allocated, and uses it to "return"
|
|
|
|
|
## the data. The `taskpools` worker uses `TResult[T]` to signal
|
|
|
|
|
## Chronos that the associated future is ready. Then the future on the
|
|
|
|
|
## `threadproxyds` frontend can read the results from `TResult[T]`.
|
|
|
|
|
##
|
|
|
|
|
## `TResult[T]` handles the shared memory aspect so each threaded
|
|
|
|
|
## task here can rely on having the memory until it finishes it's
|
|
|
|
|
## work. Even if the future exits early, the thread workers won't
|
|
|
|
|
## need to worry about using free-ed memory.
|
|
|
|
|
##
|
|
|
|
|
## The `FlowVar[T]` in `taskpools` isn't really suitable because
|
|
|
|
|
## we want to use Chronos's `ThreadSignalPtr` notification mechanism.
|
|
|
|
|
## Likewise the signaling mechanism in `taskpools` isn't suitable
|
|
|
|
|
## for the same reason. We need to notify Chronos when our work
|
|
|
|
|
## is done.
|
|
|
|
|
##
|
|
|
|
|
##
|
|
|
|
|
## Potential Issues
|
|
|
|
|
## ================
|
|
|
|
|
## One issue still outstanding with this setup and using a
|
|
|
|
|
## ThreadSignalPtr pool is if `threadproxyds` frontend called
|
|
|
|
|
## `tresult.release()` early due to a `myFuture.cancel()` scenario.
|
|
|
|
|
## In this case the task here would then fire `tresult[].signal.fireAsync()`.
|
|
|
|
|
## If another `threadproxyds` had gotten that same ThreadSignalPtr it'd
|
|
|
|
|
## potentially get the signal. In this case the `TResult` would still be empty.
|
|
|
|
|
## It shouldn't corrupt memory, but the `threadproxyds` TResult would return "empty".
|
|
|
|
|
##
|
2023-09-11 17:27:20 -07:00
|
|
|
## Note I'm not sure if using ThreadSignalPtr's directly and closing them would work
|
|
|
|
|
## as File sockets are just int's on Linux/Mac and can be racey. It may be possible
|
|
|
|
|
## that if both sides don't `close` the AsyncFD that are used, you'd just get events
|
|
|
|
|
## from another pipe/socketpair which shares the same AsyncFD int's. Probably a solution
|
|
|
|
|
## to this but needs some more consideration.
|
2023-09-11 17:22:35 -07:00
|
|
|
##
|
|
|
|
|
|
2023-08-24 16:02:13 -07:00
|
|
|
type
|
2023-08-24 20:15:22 -07:00
|
|
|
ThreadDatastore* = object
|
2023-08-24 21:55:53 -07:00
|
|
|
tp*: Taskpool
|
2023-08-28 17:51:48 -07:00
|
|
|
ds*: Datastore
|
2023-08-24 16:18:31 -07:00
|
|
|
|
2023-08-24 21:07:07 -07:00
|
|
|
ThreadDatastorePtr* = SharedPtr[ThreadDatastore]
|
2023-08-24 16:18:31 -07:00
|
|
|
|
2023-08-29 15:55:42 -07:00
|
|
|
QueryIterStore* = object
|
|
|
|
|
it*: QueryIter
|
|
|
|
|
QueryIterPtr* = SharedPtr[QueryIterStore]
|
|
|
|
|
|
2023-08-29 12:57:31 -07:00
|
|
|
proc hasTask*(
|
2023-09-14 18:18:14 -07:00
|
|
|
sig: SharedSignal,
|
2023-08-29 12:57:31 -07:00
|
|
|
ret: TResult[bool],
|
|
|
|
|
tds: ThreadDatastorePtr,
|
|
|
|
|
kb: KeyBuffer,
|
|
|
|
|
) =
|
2023-09-13 14:13:06 -07:00
|
|
|
|
|
|
|
|
let key = kb.toKey()
|
2023-08-29 14:14:55 -07:00
|
|
|
|
2023-08-29 12:57:31 -07:00
|
|
|
try:
|
|
|
|
|
let res = waitFor tds[].ds.has(key)
|
|
|
|
|
if res.isErr:
|
2023-08-29 14:14:55 -07:00
|
|
|
ret.failure(res.error())
|
2023-08-29 12:57:31 -07:00
|
|
|
else:
|
2023-08-29 14:14:55 -07:00
|
|
|
ret.success(res.get())
|
2023-09-14 18:18:14 -07:00
|
|
|
discard sig.fireSync()
|
2023-08-29 12:57:31 -07:00
|
|
|
except CatchableError as err:
|
2023-08-29 14:14:55 -07:00
|
|
|
ret.failure(err)
|
2023-08-29 12:57:31 -07:00
|
|
|
|
2023-09-14 19:06:05 -07:00
|
|
|
proc deleteTask*(
|
2023-09-14 18:18:14 -07:00
|
|
|
sig: SharedSignal,
|
2023-09-14 19:06:05 -07:00
|
|
|
ret: TResult[void],
|
2023-08-29 12:57:31 -07:00
|
|
|
tds: ThreadDatastorePtr,
|
2023-09-14 19:06:05 -07:00
|
|
|
kb: KeyBuffer,
|
2023-08-29 12:57:31 -07:00
|
|
|
) =
|
2023-09-14 19:06:05 -07:00
|
|
|
|
|
|
|
|
let key = kb.toKey()
|
|
|
|
|
|
|
|
|
|
let res = (waitFor tds[].ds.delete(key)).catch
|
|
|
|
|
# print "thrbackend: putTask: fire", ret[].signal.fireSync().get()
|
|
|
|
|
if res.isErr:
|
|
|
|
|
ret.failure(res.error())
|
|
|
|
|
else:
|
|
|
|
|
ret.success()
|
|
|
|
|
|
|
|
|
|
discard sig.fireSync()
|
2023-08-29 12:57:31 -07:00
|
|
|
|
2023-08-24 18:33:20 -07:00
|
|
|
proc getTask*(
|
2023-09-14 18:18:14 -07:00
|
|
|
sig: SharedSignal,
|
2023-08-24 22:14:21 -07:00
|
|
|
ret: TResult[DataBuffer],
|
2023-08-28 22:18:08 -07:00
|
|
|
tds: ThreadDatastorePtr,
|
|
|
|
|
kb: KeyBuffer,
|
2023-08-24 18:33:20 -07:00
|
|
|
) =
|
2023-09-14 20:29:35 -07:00
|
|
|
echoed "getTask: ", $getThreadId(), " kb: ", kb.repr
|
2023-09-13 14:13:06 -07:00
|
|
|
let key = kb.toKey()
|
2023-09-14 19:23:14 -07:00
|
|
|
echoed "getTask: key: ", $key
|
2023-08-28 22:18:08 -07:00
|
|
|
try:
|
|
|
|
|
let res = waitFor tds[].ds.get(key)
|
|
|
|
|
if res.isErr:
|
2023-08-29 14:14:55 -07:00
|
|
|
ret.failure(res.error())
|
2023-08-28 22:18:08 -07:00
|
|
|
else:
|
|
|
|
|
let db = DataBuffer.new res.get()
|
2023-08-29 14:14:55 -07:00
|
|
|
ret.success(db)
|
2023-08-28 22:18:08 -07:00
|
|
|
|
2023-09-14 18:18:14 -07:00
|
|
|
discard sig.fireSync()
|
2023-08-28 22:18:08 -07:00
|
|
|
except CatchableError as err:
|
2023-08-29 14:14:55 -07:00
|
|
|
ret.failure(err)
|
2023-08-24 22:14:21 -07:00
|
|
|
|
2023-09-13 13:55:51 -07:00
|
|
|
import std/os
|
2023-08-25 14:34:11 -07:00
|
|
|
|
2023-08-24 18:33:20 -07:00
|
|
|
proc putTask*(
|
2023-09-14 13:52:35 -07:00
|
|
|
sig: SharedSignal,
|
2023-08-24 21:55:53 -07:00
|
|
|
ret: TResult[void],
|
2023-08-28 22:18:08 -07:00
|
|
|
tds: ThreadDatastorePtr,
|
|
|
|
|
kb: KeyBuffer,
|
|
|
|
|
db: DataBuffer,
|
2023-08-24 18:28:30 -07:00
|
|
|
) =
|
2023-08-24 21:55:53 -07:00
|
|
|
|
2023-09-14 19:23:14 -07:00
|
|
|
# os.sleep(1_000)
|
2023-09-13 14:40:32 -07:00
|
|
|
# var ret = ret
|
2023-09-14 22:17:20 -07:00
|
|
|
# echoed "putTask: ", $getThreadId()
|
|
|
|
|
# echo "putTask:kb: ", kb.toString
|
|
|
|
|
# echo "putTask:db: ", db.toString
|
2023-09-13 12:26:29 -07:00
|
|
|
|
2023-09-13 14:13:06 -07:00
|
|
|
let key = kb.toKey()
|
2023-08-28 22:18:08 -07:00
|
|
|
|
|
|
|
|
let data = db.toSeq(byte)
|
|
|
|
|
let res = (waitFor tds[].ds.put(key, data)).catch
|
|
|
|
|
# print "thrbackend: putTask: fire", ret[].signal.fireSync().get()
|
|
|
|
|
if res.isErr:
|
2023-08-29 14:14:55 -07:00
|
|
|
ret.failure(res.error())
|
2023-08-28 22:18:08 -07:00
|
|
|
else:
|
2023-08-29 14:14:55 -07:00
|
|
|
ret.success()
|
2023-08-28 22:18:08 -07:00
|
|
|
|
2023-09-14 13:52:35 -07:00
|
|
|
discard sig.fireSync()
|
2023-09-14 15:21:12 -07:00
|
|
|
sig.decr()
|
2023-09-13 14:13:06 -07:00
|
|
|
echoed "putTask: FINISH\n"
|
2023-08-28 22:18:08 -07:00
|
|
|
|
2023-08-29 15:55:42 -07:00
|
|
|
proc queryTask*(
|
2023-09-14 18:18:14 -07:00
|
|
|
sig: SharedSignal,
|
2023-08-29 15:20:57 -07:00
|
|
|
ret: TResult[QueryResponseBuffer],
|
2023-08-29 14:58:33 -07:00
|
|
|
tds: ThreadDatastorePtr,
|
2023-08-29 15:55:42 -07:00
|
|
|
qiter: QueryIterPtr,
|
2023-08-29 15:20:57 -07:00
|
|
|
) =
|
2023-08-29 14:58:33 -07:00
|
|
|
|
2023-08-29 15:42:53 -07:00
|
|
|
try:
|
2023-09-05 17:19:01 -07:00
|
|
|
# os.sleep(100)
|
2023-08-29 15:55:42 -07:00
|
|
|
without res =? waitFor(qiter[].it.next()), err:
|
2023-08-29 15:42:53 -07:00
|
|
|
ret.failure(err)
|
2023-08-29 14:58:33 -07:00
|
|
|
|
2023-08-29 15:42:53 -07:00
|
|
|
let qrb = res.toBuffer()
|
2023-08-29 20:15:54 -07:00
|
|
|
# print "queryTask: ", " res: ", res
|
2023-08-29 17:00:11 -07:00
|
|
|
|
2023-08-29 15:42:53 -07:00
|
|
|
ret.success(qrb)
|
2023-09-05 13:52:24 -07:00
|
|
|
# print "queryTask: ", " qrb:key: ", ret[].results.get().key.toString()
|
|
|
|
|
# print "queryTask: ", " qrb:data: ", ret[].results.get().data.toString()
|
2023-08-29 14:58:33 -07:00
|
|
|
|
2023-08-29 15:59:49 -07:00
|
|
|
except Exception as exc:
|
|
|
|
|
ret.failure(exc)
|
|
|
|
|
|
2023-09-14 18:18:14 -07:00
|
|
|
discard sig.fireSync()
|
2023-08-29 15:59:49 -07:00
|
|
|
|
|
|
|
|
proc query*(
|
2023-09-14 18:18:14 -07:00
|
|
|
sig: SharedSignal,
|
2023-08-29 15:59:49 -07:00
|
|
|
ret: TResult[QueryResponseBuffer],
|
|
|
|
|
tds: ThreadDatastorePtr,
|
|
|
|
|
qiter: QueryIterPtr,
|
|
|
|
|
) =
|
2023-09-14 18:18:14 -07:00
|
|
|
tds[].tp.spawn queryTask(sig, ret, tds, qiter)
|