mirror of
https://github.com/logos-storage/nim-datastore.git
synced 2026-01-03 22:23:10 +00:00
query fixes
This commit is contained in:
parent
62c9e7c583
commit
ebed992f5a
@ -1,22 +1,23 @@
|
||||
import pkg/questionable/results
|
||||
import pkg/upraises
|
||||
|
||||
import std/algorithm
|
||||
import std/options
|
||||
import ./threads/databuffer
|
||||
import ./threads/threadresult
|
||||
import ./threads/semaphore
|
||||
import ./key
|
||||
import ./types
|
||||
|
||||
export databuffer, threadresult, semaphore, types
|
||||
export upraises, results, SortOrder
|
||||
import pkg/questionable/results
|
||||
|
||||
import ./threads/databuffer
|
||||
|
||||
export databuffer
|
||||
export SortOrder
|
||||
|
||||
type
|
||||
|
||||
DbSortOrder* {.pure.} = enum
|
||||
Ascending,
|
||||
Descending
|
||||
DbQueryResponse*[K, V] = tuple[key: Option[K], data: V]
|
||||
|
||||
DbQuery*[K] = object
|
||||
key*: K # Key to be queried
|
||||
value*: bool # Flag to indicate if data should be returned
|
||||
limit*: int # Max items to return - not available in all backends
|
||||
offset*: int # Offset from which to start querying - not available in all backends
|
||||
sort*: SortOrder # Sort order - not available in all backends
|
||||
|
||||
KeyId* = object
|
||||
## serialized Key ID, equivalent to `key.id()`
|
||||
@ -27,20 +28,27 @@ type
|
||||
|
||||
DbBatchEntry*[K, V] = tuple[key: K, data: V]
|
||||
|
||||
DbQuery*[K] = object
|
||||
key*: K # Key to be queried
|
||||
value*: bool # Flag to indicate if data should be returned
|
||||
limit*: int # Max items to return - not available in all backends
|
||||
offset*: int # Offset from which to start querying - not available in all backends
|
||||
sort*: DbSortOrder # Sort order - not available in all backends
|
||||
|
||||
DbQueryHandle*[K, V, T] = object
|
||||
query*: DbQuery[K]
|
||||
cancel*: bool
|
||||
closed*: bool
|
||||
env*: T
|
||||
|
||||
DbQueryResponse*[K, V] = tuple[key: Option[K], data: V]
|
||||
proc dbQuery*[K](
|
||||
key: K,
|
||||
value = true,
|
||||
sort = SortOrder.Ascending,
|
||||
offset = 0,
|
||||
limit = -1
|
||||
): DbQuery[K] =
|
||||
|
||||
DbQuery[K](
|
||||
key: key,
|
||||
value: value,
|
||||
sort: sort,
|
||||
offset: offset,
|
||||
limit: limit)
|
||||
|
||||
|
||||
proc `$`*(id: KeyId): string = $(id.data)
|
||||
|
||||
@ -56,8 +64,5 @@ proc new*(tp: typedesc[KeyId], id: cstring): KeyId =
|
||||
proc new*(tp: typedesc[KeyId], id: string): KeyId =
|
||||
KeyId(data: DataBuffer.new(id))
|
||||
|
||||
proc toKey*(key: KeyId): Key {.inline, raises: [].} =
|
||||
Key.init(key.data).expect("expected valid key here for but got `" & $key.data & "`")
|
||||
|
||||
template toOpenArray*(x: DbKey): openArray[char] =
|
||||
x.data.toOpenArray(char)
|
||||
|
||||
@ -6,19 +6,17 @@ import pkg/questionable/results
|
||||
|
||||
import ./key
|
||||
import ./types
|
||||
import ./backend
|
||||
|
||||
export types
|
||||
export options, SortOrder
|
||||
|
||||
type
|
||||
Query* = object
|
||||
key*: Key # Key to be queried
|
||||
value*: bool # Flag to indicate if data should be returned
|
||||
limit*: int # Max items to return - not available in all backends
|
||||
offset*: int # Offset from which to start querying - not available in all backends
|
||||
sort*: SortOrder # Sort order - not available in all backends
|
||||
|
||||
QueryResponse* = tuple[key: ?Key, data: seq[byte]]
|
||||
## Front end types
|
||||
Query* = DbQuery[Key]
|
||||
|
||||
QueryResponse* = DbQueryResponse[Key, seq[byte]]
|
||||
|
||||
GetNext* = proc(): Future[?!QueryResponse] {.upraises: [], gcsafe.}
|
||||
IterDispose* = proc(): Future[?!void] {.upraises: [], gcsafe.}
|
||||
@ -37,17 +35,13 @@ proc defaultDispose(): Future[?!void] {.upraises: [], gcsafe, async.} =
|
||||
proc new*(T: type QueryIter, dispose = defaultDispose): T =
|
||||
QueryIter(dispose: dispose)
|
||||
|
||||
proc init*(
|
||||
T: type Query,
|
||||
key: Key,
|
||||
value = true,
|
||||
sort = SortOrder.Ascending,
|
||||
offset = 0,
|
||||
limit = -1): T =
|
||||
proc init*(T: type Query,
|
||||
key: Key,
|
||||
value = true,
|
||||
sort = SortOrder.Ascending,
|
||||
offset = 0,
|
||||
limit = -1): Query =
|
||||
dbQuery[Key](key, value, sort, offset, limit)
|
||||
|
||||
T(
|
||||
key: key,
|
||||
value: value,
|
||||
sort: sort,
|
||||
offset: offset,
|
||||
limit: limit)
|
||||
proc toKey*(key: KeyId): Key {.inline, raises: [].} =
|
||||
Key.init(key.data).expect("expected valid key here for but got `" & $key.data & "`")
|
||||
|
||||
@ -105,7 +105,7 @@ proc close*[K,V](self: SQLiteBackend[K,V]): ?!void =
|
||||
|
||||
proc query*[K,V](
|
||||
self: SQLiteBackend[K,V],
|
||||
query: DbQuery
|
||||
query: DbQuery[K]
|
||||
): Result[DbQueryHandle[K,V,RawStmtPtr], ref CatchableError] =
|
||||
|
||||
var
|
||||
@ -114,9 +114,10 @@ proc query*[K,V](
|
||||
else:
|
||||
QueryStmtIdStr
|
||||
|
||||
if query.sort == DbSortOrder.Descending:
|
||||
case query.sort:
|
||||
of Descending:
|
||||
queryStr &= QueryStmtOrderDescending
|
||||
else:
|
||||
of Ascending:
|
||||
queryStr &= QueryStmtOrderAscending
|
||||
|
||||
if query.limit != 0:
|
||||
@ -125,9 +126,9 @@ proc query*[K,V](
|
||||
if query.offset != 0:
|
||||
queryStr &= QueryStmtOffset
|
||||
|
||||
echo "QUERY_STR: ", queryStr
|
||||
let
|
||||
queryStmt = QueryStmt.prepare(
|
||||
self.db.env, queryStr).expect("should not fail")
|
||||
queryStmt = ? QueryStmt.prepare(self.db.env, queryStr)
|
||||
|
||||
s = RawStmtPtr(queryStmt)
|
||||
queryKey = $query.key & "*"
|
||||
|
||||
@ -12,7 +12,6 @@ import pkg/datastore/sql/sqliteds
|
||||
import pkg/datastore/key
|
||||
|
||||
import ../dscommontests
|
||||
import ../querycommontests
|
||||
|
||||
proc testBasic[K, V](
|
||||
ds: SQLiteBackend[K,V],
|
||||
@ -127,7 +126,7 @@ suite "queryTests":
|
||||
|
||||
test "Key should query all keys and all it's children":
|
||||
let
|
||||
q = DbQuery[KeyId](key: key1, value: true)
|
||||
q = dbQuery(key: key1, value: true)
|
||||
|
||||
ds.put(key1, val1).tryGet
|
||||
ds.put(key2, val2).tryGet
|
||||
@ -150,7 +149,7 @@ suite "queryTests":
|
||||
|
||||
test "query should cancel":
|
||||
let
|
||||
q = DbQuery[KeyId](key: key1, value: true)
|
||||
q = dbQuery(key: key1, value: true)
|
||||
|
||||
ds.put(key1, val1).tryGet
|
||||
ds.put(key2, val2).tryGet
|
||||
@ -180,7 +179,7 @@ suite "queryTests":
|
||||
|
||||
test "Key should query all keys without values":
|
||||
let
|
||||
q = DbQuery[KeyId](key: key1, value: false)
|
||||
q = dbQuery(key: key1, value: false)
|
||||
|
||||
ds.put(key1, val1).tryGet
|
||||
ds.put(key2, val2).tryGet
|
||||
@ -205,7 +204,7 @@ suite "queryTests":
|
||||
|
||||
test "Key should not query parent":
|
||||
let
|
||||
q = DbQuery[KeyId](key: key2, value: true)
|
||||
q = dbQuery(key: key2, value: true)
|
||||
|
||||
ds.put(key1, val1).tryGet
|
||||
ds.put(key2, val2).tryGet
|
||||
@ -227,7 +226,7 @@ suite "queryTests":
|
||||
test "Key should all list all keys at the same level":
|
||||
let
|
||||
queryKey = Key.init("/a").tryGet
|
||||
q = DbQuery[KeyId](key: key1, value: true)
|
||||
q = dbQuery(key: key1, value: true)
|
||||
|
||||
ds.put(key1, val1).tryGet
|
||||
ds.put(key2, val2).tryGet
|
||||
@ -254,7 +253,7 @@ suite "queryTests":
|
||||
test "Should apply limit":
|
||||
let
|
||||
key = Key.init("/a").tryGet
|
||||
q = DbQuery[KeyId](key: key1, limit: 10, value: false)
|
||||
q = dbQuery(key: key1, limit: 10, value: false)
|
||||
|
||||
for i in 0..<100:
|
||||
let
|
||||
@ -268,29 +267,33 @@ suite "queryTests":
|
||||
let
|
||||
res = handle.iter().toSeq().mapIt(it.tryGet())
|
||||
|
||||
echo "RES: ", res.mapIt(it.key)
|
||||
check:
|
||||
res.len == 10
|
||||
|
||||
# test "Should not apply offset":
|
||||
# let
|
||||
# key = Key.init("/a").tryGet
|
||||
# keyId = KeyId.new $key
|
||||
# q = DbQuery(key: KeyId.new $key, offset: 90)
|
||||
test "Should not apply offset":
|
||||
let
|
||||
key = Key.init("/a").tryGet
|
||||
keyId = KeyId.new $key
|
||||
q = dbQuery(key: keyId, offset: 90)
|
||||
|
||||
# for i in 0..<100:
|
||||
# let
|
||||
# key = KeyId.new $Key.init(key, Key.init("/" & $i).tryGet).tryGet
|
||||
# val = DataBuffer.new("val " & $i)
|
||||
for i in 0..<100:
|
||||
let
|
||||
key = KeyId.new $Key.init(key, Key.init("/" & $i).tryGet).tryGet
|
||||
val = DataBuffer.new("val " & $i)
|
||||
|
||||
# ds.put(keyId, val).tryGet
|
||||
ds.put(key, val).tryGet
|
||||
|
||||
# let
|
||||
# (handle, iter) = ds.query(q).tryGet
|
||||
# res = iter.mapIt(it.tryGet())
|
||||
var
|
||||
qr = ds.query(q)
|
||||
echo "RES: ", qr.repr
|
||||
|
||||
# check:
|
||||
# res.len == 10
|
||||
var
|
||||
handle = ds.query(q).tryGet
|
||||
let
|
||||
res = handle.iter().toSeq().mapIt(it.tryGet())
|
||||
|
||||
check:
|
||||
res.len == 10
|
||||
|
||||
|
||||
# test "Should not apply offset and limit":
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user