mirror of
https://github.com/logos-storage/nim-datastore.git
synced 2026-01-05 23:23:10 +00:00
refactor - tests
This commit is contained in:
parent
16bd98d326
commit
ae311856a8
@ -232,6 +232,7 @@ iterator iter*[K, V](handle: var DbQueryHandle[K, V, FsQueryEnv[K,V]]
|
|||||||
if flres.isErr():
|
if flres.isErr():
|
||||||
# echo "FS:ERROR: ", flres.error()
|
# echo "FS:ERROR: ", flres.error()
|
||||||
yield DbQueryResponse[K,V].failure flres.error()
|
yield DbQueryResponse[K,V].failure flres.error()
|
||||||
|
continue
|
||||||
|
|
||||||
let
|
let
|
||||||
key = K.toKey($Key.init(keyPath).expect("valid key"))
|
key = K.toKey($Key.init(keyPath).expect("valid key"))
|
||||||
@ -241,8 +242,8 @@ iterator iter*[K, V](handle: var DbQueryHandle[K, V, FsQueryEnv[K,V]]
|
|||||||
if res.isErr():
|
if res.isErr():
|
||||||
# echo "FS:ERROR: ", res.error()
|
# echo "FS:ERROR: ", res.error()
|
||||||
yield DbQueryResponse[K,V].failure res.error()
|
yield DbQueryResponse[K,V].failure res.error()
|
||||||
else:
|
continue
|
||||||
res.get()
|
res.get()
|
||||||
else:
|
else:
|
||||||
V.new()
|
V.new()
|
||||||
|
|
||||||
|
|||||||
@ -197,109 +197,110 @@ template queryTests*(
|
|||||||
res[2].key.get == key3
|
res[2].key.get == key3
|
||||||
res[2].data == val3
|
res[2].data == val3
|
||||||
|
|
||||||
test "Should apply limit":
|
if extended:
|
||||||
let
|
test "Should apply limit":
|
||||||
key = Key.init("/a").tryGet
|
|
||||||
q = dbQuery(key= key1, limit= 10, value= false)
|
|
||||||
|
|
||||||
for i in 0..<100:
|
|
||||||
let
|
let
|
||||||
key = KeyId.new $Key.init(key, Key.init("/" & $i).tryGet).tryGet
|
key = Key.init("/a").tryGet
|
||||||
val = DataBuffer.new("val " & $i)
|
q = dbQuery(key= key1, limit= 10, value= false)
|
||||||
|
|
||||||
ds.put(key, val).tryGet
|
for i in 0..<100:
|
||||||
|
let
|
||||||
|
key = KeyId.new $Key.init(key, Key.init("/" & $i).tryGet).tryGet
|
||||||
|
val = DataBuffer.new("val " & $i)
|
||||||
|
|
||||||
var
|
ds.put(key, val).tryGet
|
||||||
handle = ds.query(q).tryGet
|
|
||||||
let
|
|
||||||
res = handle.iter().toSeq().mapIt(it.tryGet())
|
|
||||||
|
|
||||||
check:
|
var
|
||||||
res.len == 10
|
handle = ds.query(q).tryGet
|
||||||
|
|
||||||
test "Should not apply offset":
|
|
||||||
let
|
|
||||||
key = Key.init("/a").tryGet
|
|
||||||
keyId = KeyId.new $key
|
|
||||||
q = dbQuery(key= keyId, offset= 90)
|
|
||||||
|
|
||||||
for i in 0..<100:
|
|
||||||
let
|
let
|
||||||
key = KeyId.new $Key.init(key, Key.init("/" & $i).tryGet).tryGet
|
res = handle.iter().toSeq().mapIt(it.tryGet())
|
||||||
val = DataBuffer.new("val " & $i)
|
|
||||||
|
|
||||||
ds.put(key, val).tryGet
|
|
||||||
|
|
||||||
var
|
|
||||||
qr = ds.query(q)
|
|
||||||
# echo "RES: ", qr.repr
|
|
||||||
|
|
||||||
var
|
|
||||||
handle = ds.query(q).tryGet
|
|
||||||
let
|
|
||||||
res = handle.iter().toSeq().mapIt(it.tryGet())
|
|
||||||
|
|
||||||
# echo "RES: ", res.mapIt(it.key)
|
|
||||||
check:
|
|
||||||
res.len == 10
|
|
||||||
|
|
||||||
test "Should not apply offset and limit":
|
|
||||||
let
|
|
||||||
key = Key.init("/a").tryGet
|
|
||||||
keyId = KeyId.new $key
|
|
||||||
q = dbQuery(key= keyId, offset= 95, limit= 5)
|
|
||||||
|
|
||||||
for i in 0..<100:
|
|
||||||
let
|
|
||||||
key = KeyId.new $Key.init(key, Key.init("/" & $i).tryGet).tryGet
|
|
||||||
val = DataBuffer.new("val " & $i)
|
|
||||||
|
|
||||||
ds.put(key, val).tryGet
|
|
||||||
|
|
||||||
var
|
|
||||||
handle = ds.query(q).tryGet
|
|
||||||
res = handle.iter().toSeq().mapIt(it.tryGet())
|
|
||||||
|
|
||||||
check:
|
|
||||||
res.len == 5
|
|
||||||
|
|
||||||
for i in 0..<res.high:
|
|
||||||
let
|
|
||||||
val = DataBuffer.new("val " & $(i + 95))
|
|
||||||
key = KeyId.new $Key.init(key, Key.init("/" & $(i + 95)).tryGet).tryGet
|
|
||||||
|
|
||||||
check:
|
check:
|
||||||
res[i].key.get == key
|
res.len == 10
|
||||||
# res[i].data == val
|
|
||||||
|
|
||||||
test "Should apply sort order - descending":
|
test "Should not apply offset":
|
||||||
let
|
let
|
||||||
key = Key.init("/a").tryGet
|
key = Key.init("/a").tryGet
|
||||||
keyId = KeyId.new $key
|
keyId = KeyId.new $key
|
||||||
q = dbQuery(key= keyId, value=true, sort= SortOrder.Descending)
|
q = dbQuery(key= keyId, offset= 90)
|
||||||
|
|
||||||
var kvs: seq[DbQueryResponse[KeyId, DataBuffer]]
|
|
||||||
for i in 0..<100:
|
for i in 0..<100:
|
||||||
let
|
let
|
||||||
k = KeyId.new $Key.init(key, Key.init("/" & $i).tryGet).tryGet
|
key = KeyId.new $Key.init(key, Key.init("/" & $i).tryGet).tryGet
|
||||||
val = DataBuffer.new ("val " & $i)
|
val = DataBuffer.new("val " & $i)
|
||||||
|
|
||||||
kvs.add((k.some, val))
|
ds.put(key, val).tryGet
|
||||||
ds.put(k, val).tryGet
|
|
||||||
|
|
||||||
# lexicographic sort, as it comes from the backend
|
var
|
||||||
kvs.sort do (a, b: DbQueryResponse[KeyId, DataBuffer]) -> int:
|
qr = ds.query(q)
|
||||||
cmp($a.key.get, $b.key.get)
|
# echo "RES: ", qr.repr
|
||||||
|
|
||||||
|
var
|
||||||
|
handle = ds.query(q).tryGet
|
||||||
|
let
|
||||||
|
res = handle.iter().toSeq().mapIt(it.tryGet())
|
||||||
|
|
||||||
|
# echo "RES: ", res.mapIt(it.key)
|
||||||
|
check:
|
||||||
|
res.len == 10
|
||||||
|
|
||||||
|
test "Should not apply offset and limit":
|
||||||
|
let
|
||||||
|
key = Key.init("/a").tryGet
|
||||||
|
keyId = KeyId.new $key
|
||||||
|
q = dbQuery(key= keyId, offset= 95, limit= 5)
|
||||||
|
|
||||||
|
for i in 0..<100:
|
||||||
|
let
|
||||||
|
key = KeyId.new $Key.init(key, Key.init("/" & $i).tryGet).tryGet
|
||||||
|
val = DataBuffer.new("val " & $i)
|
||||||
|
|
||||||
|
ds.put(key, val).tryGet
|
||||||
|
|
||||||
kvs = kvs.reversed
|
|
||||||
var
|
var
|
||||||
handle = ds.query(q).tryGet
|
handle = ds.query(q).tryGet
|
||||||
res = handle.iter().toSeq().mapIt(it.tryGet())
|
res = handle.iter().toSeq().mapIt(it.tryGet())
|
||||||
|
|
||||||
check:
|
check:
|
||||||
res.len == 100
|
res.len == 5
|
||||||
|
|
||||||
|
for i in 0..<res.high:
|
||||||
|
let
|
||||||
|
val = DataBuffer.new("val " & $(i + 95))
|
||||||
|
key = KeyId.new $Key.init(key, Key.init("/" & $(i + 95)).tryGet).tryGet
|
||||||
|
|
||||||
for i, r in res[1..^1]:
|
|
||||||
check:
|
check:
|
||||||
res[i].key.get == kvs[i].key.get
|
res[i].key.get == key
|
||||||
res[i].data == kvs[i].data
|
# res[i].data == val
|
||||||
|
|
||||||
|
test "Should apply sort order - descending":
|
||||||
|
let
|
||||||
|
key = Key.init("/a").tryGet
|
||||||
|
keyId = KeyId.new $key
|
||||||
|
q = dbQuery(key= keyId, value=true, sort= SortOrder.Descending)
|
||||||
|
|
||||||
|
var kvs: seq[DbQueryResponse[KeyId, DataBuffer]]
|
||||||
|
for i in 0..<100:
|
||||||
|
let
|
||||||
|
k = KeyId.new $Key.init(key, Key.init("/" & $i).tryGet).tryGet
|
||||||
|
val = DataBuffer.new ("val " & $i)
|
||||||
|
|
||||||
|
kvs.add((k.some, val))
|
||||||
|
ds.put(k, val).tryGet
|
||||||
|
|
||||||
|
# lexicographic sort, as it comes from the backend
|
||||||
|
kvs.sort do (a, b: DbQueryResponse[KeyId, DataBuffer]) -> int:
|
||||||
|
cmp($a.key.get, $b.key.get)
|
||||||
|
|
||||||
|
kvs = kvs.reversed
|
||||||
|
var
|
||||||
|
handle = ds.query(q).tryGet
|
||||||
|
res = handle.iter().toSeq().mapIt(it.tryGet())
|
||||||
|
|
||||||
|
check:
|
||||||
|
res.len == 100
|
||||||
|
|
||||||
|
for i, r in res[1..^1]:
|
||||||
|
check:
|
||||||
|
res[i].key.get == kvs[i].key.get
|
||||||
|
res[i].data == kvs[i].data
|
||||||
@ -188,7 +188,7 @@ suite "queryTests":
|
|||||||
val2 = DataBuffer.new "value for 2"
|
val2 = DataBuffer.new "value for 2"
|
||||||
val3 = DataBuffer.new "value for 3"
|
val3 = DataBuffer.new "value for 3"
|
||||||
|
|
||||||
queryTests(fsNew, key1, key2, key3, val1, val2, val3, extended=true)
|
queryTests(fsNew, key1, key2, key3, val1, val2, val3, extended=false)
|
||||||
|
|
||||||
removeDir(basePathAbs)
|
removeDir(basePathAbs)
|
||||||
require(not dirExists(basePathAbs))
|
require(not dirExists(basePathAbs))
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user