Merge pull request #29 from status-im/refactor

Refactor datastore to prepare for proper query support
This commit is contained in:
Dmitriy Ryajov 2022-09-20 14:12:08 -04:00 committed by GitHub
commit 8775b2d49a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
27 changed files with 1531 additions and 1733 deletions

View File

@ -11,35 +11,29 @@ export key, query
push: {.upraises: [].}
type
DatastoreError* = object of CatchableError
DatastoreKeyNotFound* = object of DatastoreError
CodexResult*[T] = Result[T, ref DatastoreError]
Datastore* = ref object of RootObj
method contains*(
self: Datastore,
key: Key): Future[?!bool] {.async, base, locks: "unknown".} =
method contains*(self: Datastore, key: Key): Future[?!bool] {.base, locks: "unknown".} =
raiseAssert("Not implemented!")
method delete*(
self: Datastore,
key: Key): Future[?!void] {.async, base, locks: "unknown".} =
method delete*(self: Datastore, key: Key): Future[?!void] {.base, locks: "unknown".} =
raiseAssert("Not implemented!")
method get*(
self: Datastore,
key: Key): Future[?!(?seq[byte])] {.async, base, locks: "unknown".} =
method get*(self: Datastore, key: Key): Future[?!seq[byte]] {.base, locks: "unknown".} =
raiseAssert("Not implemented!")
method put*(
self: Datastore,
key: Key,
data: seq[byte]): Future[?!void] {.async, base, locks: "unknown".} =
method put*(self: Datastore, key: Key, data: seq[byte]): Future[?!void] {.base, locks: "unknown".} =
raiseAssert("Not implemented!")
iterator query*(
method close*(self: Datastore): Future[?!void] {.base, async, locks: "unknown".} =
return success()
method query*(
self: Datastore,
query: Query): Future[QueryResponse] =
query: Query): Future[QueryIter] {.gcsafe.} =
raiseAssert("Not implemented!")

View File

@ -1,162 +0,0 @@
import std/os
import pkg/chronos
import pkg/questionable
import pkg/questionable/results
from pkg/stew/results as stewResults import get, isErr
import pkg/upraises
import ./datastore
export datastore
push: {.upraises: [].}
type
FileSystemDatastore* = ref object of Datastore
root: string
const
objExt* = ".dsobject"
proc new*(
T: type FileSystemDatastore,
root: string): ?!T =
try:
let
root = if root.isAbsolute: root
else: getCurrentDir() / root
if not dirExists(root):
failure "directory does not exist: " & root
else:
success T(root: root)
except OSError as e:
failure e
proc root*(self: FileSystemDatastore): string =
self.root
proc path*(
self: FileSystemDatastore,
key: Key): string =
var
segments: seq[string]
for ns in key:
without field =? ns.field:
segments.add ns.value
continue
segments.add(field / ns.value)
# is it problematic that per this logic Key(/a:b) evaluates to the same path
# as Key(/a/b)? may need to check if/how other Datastore implementations
# distinguish them
self.root / joinPath(segments) & objExt
method contains*(
self: FileSystemDatastore,
key: Key): Future[?!bool] {.async, locks: "unknown".} =
return success fileExists(self.path(key))
method delete*(
self: FileSystemDatastore,
key: Key): Future[?!void] {.async, locks: "unknown".} =
let
path = self.path(key)
try:
removeFile(path)
return success()
# removing an empty directory might lead to surprising behavior depending
# on what the user specified as the `root` of the FileSystemDatastore, so
# until further consideration, empty directories will be left in place
except OSError as e:
return failure e
method get*(
self: FileSystemDatastore,
key: Key): Future[?!(?seq[byte])] {.async, locks: "unknown".} =
# to support finer control of memory allocation, maybe could/should change
# the signature of `get` so that it has a 3rd parameter
# `bytes: var openArray[byte]` and return type `?!bool`; this variant with
# return type `?!(?seq[byte])` would be a special case (convenience method)
# calling the former after allocating a seq with size automatically
# determined via `getFileSize`
let
path = self.path(key)
containsRes = await self.contains(key)
if containsRes.isErr: return failure containsRes.error.msg
if containsRes.get:
var
file: File
if not file.open(path):
return failure "unable to open file: " & path
else:
try:
let
size = file.getFileSize
var
bytes: seq[byte]
if size > 0:
newSeq(bytes, size)
let
bytesRead = file.readBytes(bytes, 0, size)
if bytesRead < size:
return failure $bytesRead & " bytes were read from " & path &
" but " & $size & " bytes were expected"
return success bytes.some
except IOError as e:
return failure e
finally:
file.close
else:
return success seq[byte].none
method put*(
self: FileSystemDatastore,
key: Key,
data: seq[byte]): Future[?!void] {.async, locks: "unknown".} =
let
path = self.path(key)
try:
createDir(parentDir(path))
if data.len > 0: writeFile(path, data)
else: writeFile(path, "")
return success()
except IOError as e:
return failure e
except OSError as e:
return failure e
# method query*(
# self: FileSystemDatastore,
# query: ...): Future[?!(?...)] {.async, locks: "unknown".} =
#
# return success ....some

158
datastore/fsds.nim Normal file
View File

@ -0,0 +1,158 @@
import std/os
import std/options
import pkg/chronos
import pkg/questionable
import pkg/questionable/results
from pkg/stew/results as stewResults import get, isErr
import pkg/upraises
import ./datastore
export datastore
push: {.upraises: [].}
type
FSDatastore* = ref object of Datastore
root*: string
ignoreProtected: bool
depth: int
template path*(self: FSDatastore, key: Key): string =
var
segments: seq[string]
for ns in key:
if ns.field == "":
segments.add ns.value
continue
# `:` are replaced with `/`
segments.add(ns.field / ns.value)
self.root / segments.joinPath()
template validDepth*(self: FSDatastore, key: Key): bool =
key.len <= self.depth
method contains*(self: FSDatastore, key: Key): Future[?!bool] {.async.} =
if not self.validDepth(key):
return failure "Path has invalid depth!"
let
path = self.path(key)
return success fileExists(path)
method delete*(self: FSDatastore, key: Key): Future[?!void] {.async.} =
if not self.validDepth(key):
return failure "Path has invalid depth!"
let
path = self.path(key)
try:
removeFile(path)
return success()
# removing an empty directory might lead to surprising behavior depending
# on what the user specified as the `root` of the FSDatastore, so
# until further consideration, empty directories will be left in place
except OSError as e:
return failure e
method get*(self: FSDatastore, key: Key): Future[?!seq[byte]] {.async.} =
# to support finer control of memory allocation, maybe could/should change
# the signature of `get` so that it has a 3rd parameter
# `bytes: var openArray[byte]` and return type `?!bool`; this variant with
# return type `?!(?seq[byte])` would be a special case (convenience method)
# calling the former after allocating a seq with size automatically
# determined via `getFileSize`
if not self.validDepth(key):
return failure "Path has invalid depth!"
let
path = self.path(key)
if not fileExists(path):
return failure(newException(DatastoreKeyNotFound, "Key doesn't exist"))
var
file: File
defer:
file.close
if not file.open(path):
return failure "unable to open file: " & path
try:
let
size = file.getFileSize
var
bytes = newSeq[byte](size)
read = 0
while read < size:
read += file.readBytes(bytes, read, size)
if read < size:
return failure $read & " bytes were read from " & path &
" but " & $size & " bytes were expected"
return success bytes
except CatchableError as e:
return failure e
method put*(
self: FSDatastore,
key: Key,
data: seq[byte]): Future[?!void] {.async, locks: "unknown".} =
if not self.validDepth(key):
return failure "Path has invalid depth!"
let
path = self.path(key)
try:
createDir(parentDir(path))
writeFile(path, data)
except CatchableError as e:
return failure e
return success()
# method query*(
# self: FSDatastore,
# query: ...): Future[?!(?...)] {.async, locks: "unknown".} =
#
# return success ....some
proc new*(
T: type FSDatastore,
root: string,
depth = 2,
caseSensitive = true,
ignoreProtected = false): ?!T =
let root = ? (
block:
if root.isAbsolute: root
else: getCurrentDir() / root).catch
if not dirExists(root):
return failure "directory does not exist: " & root
success T(
root: root,
ignoreProtected: ignoreProtected,
depth: depth)

View File

@ -22,59 +22,59 @@ type
namespaces*: seq[Namespace]
const
delimiter = ":"
separator = "/"
Delimiter* = ":"
Separator* = "/"
# TODO: operator/s for combining string|Namespace,string|Namespace
# TODO: lifting from ?![Namespace|Key] for various ops
proc init*(
func init*(
T: type Namespace,
field, value: string): ?!T =
if value.strip == "":
return failure "value string must not be all whitespace or empty"
if value.contains(delimiter):
return failure "value string must not contain delimiter \"" &
delimiter & "\""
if value.contains(Delimiter):
return failure "value string must not contain Delimiter \"" &
Delimiter & "\""
if value.contains(separator):
return failure "value string must not contain separator \"" &
separator & "\""
if value.contains(Separator):
return failure "value string must not contain Separator \"" &
Separator & "\""
if field != "":
if field.strip == "":
return failure "field string must not be all whitespace"
if field.contains(delimiter):
return failure "field string must not contain delimiter \"" &
delimiter & "\""
if field.contains(Delimiter):
return failure "field string must not contain Delimiter \"" &
Delimiter & "\""
if field.contains(separator):
return failure "field string must not contain separator \"" &
separator & "\""
if field.contains(Separator):
return failure "field string must not contain Separator \"" &
Separator & "\""
success T(field: field, value: value)
proc init*(T: type Namespace, id: string): ?!T =
func init*(T: type Namespace, id: string): ?!T =
if id.strip == "":
return failure "id string must not be all whitespace or empty"
if id.contains(separator):
return failure "id string must not contain separator \"" & separator & "\""
if id.contains(Separator):
return failure "id string must not contain Separator \"" & Separator & "\""
if id == delimiter:
return failure "value in id string \"[field]" & delimiter &
if id == Delimiter:
return failure "value in id string \"[field]" & Delimiter &
"[value]\" must not be empty"
if id.count(delimiter) > 1:
return failure "id string must not contain more than one delimiter \"" &
delimiter & "\""
if id.count(Delimiter) > 1:
return failure "id string must not contain more than one Delimiter \"" &
Delimiter & "\""
let
(field, value) = block:
let parts = id.split(delimiter)
let parts = id.split(Delimiter)
if parts.len > 1:
(parts[0], parts[^1])
else:
@ -82,22 +82,25 @@ proc init*(T: type Namespace, id: string): ?!T =
T.init(field, value)
proc id*(self: Namespace): string =
func id*(self: Namespace): string =
if self.field.len > 0:
self.field & delimiter & self.value
self.field & Delimiter & self.value
else:
self.value
proc `$`*(namespace: Namespace): string =
func hash*(namespace: Namespace): Hash =
hash(namespace.id)
func `$`*(namespace: Namespace): string =
"Namespace(" & namespace.id & ")"
proc init*(T: type Key, namespaces: varargs[Namespace]): ?!T =
func init*(T: type Key, namespaces: varargs[Namespace]): ?!T =
if namespaces.len == 0:
failure "namespaces must contain at least one Namespace"
else:
success T(namespaces: @namespaces)
proc init*(T: type Key, namespaces: varargs[string]): ?!T =
func init*(T: type Key, namespaces: varargs[string]): ?!T =
if namespaces.len == 0:
failure "namespaces must contain at least one Namespace id string"
else:
@ -106,7 +109,7 @@ proc init*(T: type Key, namespaces: varargs[string]): ?!T =
?Namespace.init(it)
))
proc init*(T: type Key, id: string): ?!T =
func init*(T: type Key, id: string): ?!T =
if id == "":
return failure "id string must contain at least one Namespace"
@ -114,15 +117,15 @@ proc init*(T: type Key, id: string): ?!T =
return failure "id string must not be all whitespace"
let
nsStrs = id.split(separator).filterIt(it != "")
nsStrs = id.split(Separator).filterIt(it != "")
if nsStrs.len == 0:
return failure "id string must not contain only one or more separator " &
"\"" & separator & "\""
return failure "id string must not contain more than one Separator " &
"\"" & Separator & "\""
Key.init(nsStrs)
proc list*(self: Key): seq[Namespace] =
func list*(self: Key): seq[Namespace] =
self.namespaces
proc random*(T: type Key): string =
@ -131,78 +134,91 @@ proc random*(T: type Key): string =
template `[]`*(key: Key, x: auto): auto =
key.namespaces[x]
proc len*(self: Key): int =
func len*(self: Key): int =
self.namespaces.len
iterator items*(key: Key): Namespace =
for k in key.namespaces:
yield k
proc reversed*(self: Key): Key =
func reversed*(self: Key): Key =
Key(namespaces: self.namespaces.reversed)
proc reverse*(self: Key): Key =
func reverse*(self: Key): Key =
self.reversed
proc name*(self: Key): string =
func name*(self: Key): string =
if self.len > 0:
return self[^1].value
proc `type`*(self: Key): string =
func `type`*(self: Key): string =
if self.len > 0:
return self[^1].field
proc id*(self: Key): string =
separator & self.namespaces.mapIt(it.id).join(separator)
func id*(self: Key): string =
Separator & self.namespaces.mapIt(it.id).join(Separator)
proc isTopLevel*(self: Key): bool =
func root*(self: Key): bool =
self.len == 1
proc parent*(self: Key): ?!Key =
if self.isTopLevel:
func parent*(self: Key): ?!Key =
if self.root:
failure "key has no parent"
else:
success Key(namespaces: self.namespaces[0..^2])
proc path*(self: Key): ?!Key =
func path*(self: Key): ?!Key =
let
parent = ? self.parent
parent = ?self.parent
if self[^1].field == "":
return success parent
success Key(namespaces: parent.namespaces & @[Namespace(value: self[^1].field)])
let ns = parent.namespaces & @[Namespace(value: self[^1].field)]
success Key(namespaces: ns)
proc child*(self: Key, ns: Namespace): Key =
func child*(self: Key, ns: Namespace): Key =
Key(namespaces: self.namespaces & @[ns])
proc `/`*(self: Key, ns: Namespace): Key =
func `/`*(self: Key, ns: Namespace): Key =
self.child(ns)
proc child*(self: Key, namespaces: varargs[Namespace]): Key =
func child*(self: Key, namespaces: varargs[Namespace]): Key =
Key(namespaces: self.namespaces & @namespaces)
proc child*(self, key: Key): Key =
func child*(self, key: Key): Key =
Key(namespaces: self.namespaces & key.namespaces)
proc `/`*(self, key: Key): Key =
func `/`*(self, key: Key): Key =
self.child(key)
proc child*(self: Key, keys: varargs[Key]): Key =
func child*(self: Key, keys: varargs[Key]): Key =
Key(namespaces: self.namespaces & concat(keys.mapIt(it.namespaces)))
proc child*(self: Key, ids: varargs[string]): ?!Key =
func child*(self: Key, ids: varargs[string]): ?!Key =
success self.child(ids.filterIt(it != "").mapIt( ?Key.init(it) ))
proc `/`*(self: Key, id: string): ?!Key =
func relative*(self: Key, parent: Key): ?!Key =
## Get a key relative to parent from current key
##
if self.len < parent.len:
return failure "Not a parent of this key!"
Key.init(self.namespaces[parent.namespaces.high..self.namespaces.high])
func `/`*(self: Key, id: string): ?!Key =
self.child(id)
proc isAncestorOf*(self, other: Key): bool =
func ancestor*(self, other: Key): bool =
if other.len <= self.len: false
else: other.namespaces[0..<self.len] == self.namespaces
proc isDescendantOf*(self, other: Key): bool =
other.isAncestorOf(self)
func descendant*(self, other: Key): bool =
other.ancestor(self)
proc `$`*(key: Key): string =
func hash*(key: Key): Hash {.inline.} =
hash(key.id)
func `$`*(key: Key): string =
"Key(" & key.id & ")"

View File

@ -1,47 +0,0 @@
import pkg/chronos
import pkg/questionable
import pkg/questionable/results
import pkg/upraises
import ./datastore
export datastore
push: {.upraises: [].}
type
NullDatastore* = ref object of Datastore
proc new*(T: type NullDatastore): T =
T()
method contains*(
self: NullDatastore,
key: Key): Future[?!bool] {.async, locks: "unknown".} =
return success false
method delete*(
self: NullDatastore,
key: Key): Future[?!void] {.async, locks: "unknown".} =
return success()
method get*(
self: NullDatastore,
key: Key): Future[?!(?seq[byte])] {.async, locks: "unknown".} =
return success seq[byte].none
method put*(
self: NullDatastore,
key: Key,
data: seq[byte]): Future[?!void] {.async, locks: "unknown".} =
return success()
iterator query*(
self: NullDatastore,
query: Query): Future[QueryResponse] =
discard

View File

@ -1,18 +1,42 @@
import pkg/upraises
import pkg/chronos
import ./key
type
Query* = object
key: QueryKey
SortOrder* {.pure.} = enum
Assending,
Descensing
QueryKey* = Key
Query* = object
key*: Key
value*: bool
limit*: int
skip*: int
sort*: SortOrder
QueryResponse* = tuple[key: Key, data: seq[byte]]
GetNext* = proc(): Future[QueryResponse] {.upraises: [], gcsafe, closure.}
QueryIter* = object
finished: bool
next*: GetNext
iterator items*(q: QueryIter): Future[QueryResponse] =
while not q.finished:
yield q.next()
proc init*(
T: type Query,
key: QueryKey): T =
key: Key,
value = false,
sort = SortOrder.Descensing,
skip = 0,
limit = 0): T =
T(key: key)
proc key*(self: Query): QueryKey =
self.key
T(
key: key,
value: value,
sort: sort,
skip: skip,
limit: limit)

3
datastore/sql.nim Normal file
View File

@ -0,0 +1,3 @@
import ./sql/sqliteds
export sqliteds

155
datastore/sql/sqliteds.nim Normal file
View File

@ -0,0 +1,155 @@
import std/times
import pkg/chronos
import pkg/questionable
import pkg/questionable/results
import pkg/sqlite3_abi
from pkg/stew/results as stewResults import isErr
import pkg/upraises
import ./datastore
import ./sqlitedsdb
export datastore, sqlitedsdb
push: {.upraises: [].}
type
SQLiteDatastore* = ref object of Datastore
readOnly: bool
db: SQLiteDsDB
proc path*(self: SQLiteDatastore): string =
self.db.dbPath
proc `readOnly=`*(self: SQLiteDatastore): bool
{.error: "readOnly should not be assigned".}
proc timestamp*(t = epochTime()): int64 =
(t * 1_000_000).int64
method contains*(self: SQLiteDatastore, key: Key): Future[?!bool] {.async.} =
var
exists = false
proc onData(s: RawStmtPtr) =
exists = sqlite3_column_int64(s, ContainsStmtExistsCol.cint).bool
if (
let res = self.db.containsStmt.query((key.id), onData);
res.isErr):
return failure res.error.msg
return success exists
method delete*(self: SQLiteDatastore, key: Key): Future[?!void] {.async.} =
return self.db.deleteStmt.exec((key.id))
method get*(self: SQLiteDatastore, key: Key): Future[?!seq[byte]] {.async.} =
# see comment in ./filesystem_datastore re: finer control of memory
# allocation in `method get`, could apply here as well if bytes were read
# incrementally with `sqlite3_blob_read`
var
bytes: seq[byte]
proc onData(s: RawStmtPtr) =
bytes = self.db.getDataCol()
if (
let res = self.db.getStmt.query((key.id), onData);
res.isErr):
return failure res.error.msg
return success bytes
method put*(self: SQLiteDatastore, key: Key, data: seq[byte]): Future[?!void] {.async.} =
return self.db.putStmt.exec((key.id, @data, timestamp()))
method close*(self: SQLiteDatastore): Future[?!void] {.async.} =
self.db.close()
return success()
# iterator query*(
# self: SQLiteDatastore,
# query: Query): Future[QueryResponse] =
# let
# queryStmt = QueryStmt.prepare(
# self.db.env, QueryStmtStr).expect("should not fail")
# s = RawStmtPtr(queryStmt)
# defer:
# discard sqlite3_reset(s)
# discard sqlite3_clear_bindings(s)
# s.dispose
# let
# v = sqlite3_bind_text(s, 1.cint, query.key.id.cstring, -1.cint,
# SQLITE_TRANSIENT_GCSAFE)
# if not (v == SQLITE_OK):
# raise (ref Defect)(msg: $sqlite3_errstr(v))
# while true:
# let
# v = sqlite3_step(s)
# case v
# of SQLITE_ROW:
# let
# key = Key.init($sqlite3_column_text_not_null(
# s, QueryStmtIdCol)).expect("should not fail")
# blob = sqlite3_column_blob(s, QueryStmtDataCol)
# # detect out-of-memory error
# # see the conversion table and final paragraph of:
# # https://www.sqlite.org/c3ref/column_blob.html
# # see also https://www.sqlite.org/rescode.html
# # the "data" column can be NULL so in order to detect an out-of-memory
# # error it is necessary to check that the result is a null pointer and
# # that the result code is an error code
# if blob.isNil:
# let
# v = sqlite3_errcode(sqlite3_db_handle(s))
# if not (v in [SQLITE_OK, SQLITE_ROW, SQLITE_DONE]):
# raise (ref Defect)(msg: $sqlite3_errstr(v))
# let
# dataLen = sqlite3_column_bytes(s, QueryStmtDataCol)
# dataBytes = cast[ptr UncheckedArray[byte]](blob)
# data = @(toOpenArray(dataBytes, 0, dataLen - 1))
# fut = newFuture[QueryResponse]()
# fut.complete((key, data))
# yield fut
# of SQLITE_DONE:
# break
# else:
# raise (ref Defect)(msg: $sqlite3_errstr(v))
proc new*(
T: type SQLiteDatastore,
path: string,
readOnly = false): ?!T =
let
flags =
if readOnly: SQLITE_OPEN_READONLY
else: SQLITE_OPEN_READWRITE or SQLITE_OPEN_CREATE
success T(
db: ? SQLIteDsDb.open(path, flags),
readOnly: readOnly)
proc new*(
T: type SQLiteDatastore,
db: SQLIteDsDb): ?!T =
success T(
db: db,
readOnly: db.readOnly)

View File

@ -0,0 +1,264 @@
import std/os
import pkg/questionable
import pkg/questionable/results
import pkg/upraises
import ./sqliteutils
export sqliteutils
type
BoundIdCol* = proc (): string {.closure, gcsafe, upraises: [].}
BoundDataCol* = proc (): seq[byte] {.closure, gcsafe, upraises: [].}
BoundTimestampCol* = proc (): int64 {.closure, gcsafe, upraises: [].}
# feels odd to use `void` for prepared statements corresponding to SELECT
# queries but it fits with the rest of the SQLite wrapper adapted from
# status-im/nwaku, at least in its current form in ./sqlite
ContainsStmt* = SQLiteStmt[(string), void]
DeleteStmt* = SQLiteStmt[(string), void]
GetStmt* = SQLiteStmt[(string), void]
PutStmt* = SQLiteStmt[(string, seq[byte], int64), void]
QueryStmt* = SQLiteStmt[(string), void]
SQLiteDsDb* = object
readOnly*: bool
dbPath*: string
containsStmt*: ContainsStmt
deleteStmt*: DeleteStmt
env*: SQLite
getDataCol*: BoundDataCol
getStmt*: GetStmt
putStmt*: PutStmt
const
DbExt* = ".sqlite3"
TableName* = "Store"
IdColName* = "id"
DataColName* = "data"
TimestampColName* = "timestamp"
IdColType = "TEXT"
DataColType = "BLOB"
TimestampColType = "INTEGER"
Memory* = ":memory:"
# https://stackoverflow.com/a/9756276
# EXISTS returns a boolean value represented by an integer:
# https://sqlite.org/datatype3.html#boolean_datatype
# https://sqlite.org/lang_expr.html#the_exists_operator
ContainsStmtStr* = """
SELECT EXISTS(
SELECT 1 FROM """ & TableName & """
WHERE """ & IdColName & """ = ?
);
"""
ContainsStmtExistsCol* = 0
CreateStmtStr* = """
CREATE TABLE IF NOT EXISTS """ & TableName & """ (
""" & IdColName & """ """ & IdColType & """ NOT NULL PRIMARY KEY,
""" & DataColName & """ """ & DataColType & """,
""" & TimestampColName & """ """ & TimestampColType & """ NOT NULL
) WITHOUT ROWID;
"""
DeleteStmtStr* = """
DELETE FROM """ & TableName & """
WHERE """ & IdColName & """ = ?;
"""
GetStmtStr* = """
SELECT """ & DataColName & """ FROM """ & TableName & """
WHERE """ & IdColName & """ = ?;
"""
GetStmtDataCol* = 0
PutStmtStr* = """
REPLACE INTO """ & TableName & """ (
""" & IdColName & """,
""" & DataColName & """,
""" & TimestampColName & """
) VALUES (?, ?, ?);
"""
QueryStmtStr* = """
SELECT """ & IdColName & """, """ & DataColName & """ FROM """ & TableName &
""" WHERE """ & IdColName & """ GLOB ?;
"""
QueryStmtIdCol* = 0
QueryStmtDataCol* = 1
proc checkColMetadata(s: RawStmtPtr, i: int, expectedName: string) =
let
colName = sqlite3_column_origin_name(s, i.cint)
if colName.isNil:
raise (ref Defect)(msg: "no column exists for index " & $i & " in `" &
$sqlite3_sql(s) & "`")
if $colName != expectedName:
raise (ref Defect)(msg: "original column name for index " & $i & " was \"" &
$colName & "\" in `" & $sqlite3_sql(s) & "` but callee expected \"" &
expectedName & "\"")
proc idCol*(
s: RawStmtPtr,
index: int): BoundIdCol =
checkColMetadata(s, index, IdColName)
return proc (): string =
$sqlite3_column_text_not_null(s, index.cint)
proc dataCol*(
s: RawStmtPtr,
index: int): BoundDataCol =
checkColMetadata(s, index, DataColName)
return proc (): seq[byte] =
let
i = index.cint
blob = sqlite3_column_blob(s, i)
# detect out-of-memory error
# see the conversion table and final paragraph of:
# https://www.sqlite.org/c3ref/column_blob.html
# see also https://www.sqlite.org/rescode.html
# the "data" column can be NULL so in order to detect an out-of-memory error
# it is necessary to check that the result is a null pointer and that the
# result code is an error code
if blob.isNil:
let
v = sqlite3_errcode(sqlite3_db_handle(s))
if not (v in [SQLITE_OK, SQLITE_ROW, SQLITE_DONE]):
raise (ref Defect)(msg: $sqlite3_errstr(v))
let
dataLen = sqlite3_column_bytes(s, i)
dataBytes = cast[ptr UncheckedArray[byte]](blob)
@(toOpenArray(dataBytes, 0, dataLen - 1))
proc timestampCol*(
s: RawStmtPtr,
index: int): BoundTimestampCol =
checkColMetadata(s, index, TimestampColName)
return proc (): int64 =
sqlite3_column_int64(s, index.cint)
proc getDBFilePath*(path: string): ?!string =
try:
let
(parent, name, ext) = path.normalizePathEnd.splitFile
dbExt = if ext == "": DbExt else: ext
absPath =
if parent.isAbsolute: parent
else: getCurrentDir() / parent
dbPath = absPath / name & dbExt
return success dbPath
except CatchableError as exc:
return failure(exc.msg)
proc close*(self: SQLiteDsDb) =
self.containsStmt.dispose
self.getStmt.dispose
if not RawStmtPtr(self.deleteStmt).isNil:
self.deleteStmt.dispose
if not RawStmtPtr(self.putStmt).isNil:
self.putStmt.dispose
self.env.dispose
proc open*(
T: type SQLiteDsDb,
path = Memory,
flags = SQLITE_OPEN_READONLY): ?!SQLiteDsDb =
# make it optional to enable WAL with it enabled being the default?
# make it possible to specify a custom page size?
# https://www.sqlite.org/pragma.html#pragma_page_size
# https://www.sqlite.org/intern-v-extern-blob.html
var
env: AutoDisposed[SQLite]
defer:
disposeIfUnreleased(env)
let
isMemory = path == Memory
absPath = if isMemory: Memory else: ?path.getDBFilePath
readOnly = (SQLITE_OPEN_READONLY and flags).bool
if not isMemory:
if readOnly and not fileExists(absPath):
return failure "read-only database does not exist: " & absPath
elif not dirExists(absPath.parentDir):
return failure "directory does not exist: " & absPath
open(absPath, env.val, flags)
let
pragmaStmt = journalModePragmaStmt(env.val)
checkExec(pragmaStmt)
var
containsStmt: ContainsStmt
deleteStmt: DeleteStmt
getStmt: GetStmt
putStmt: PutStmt
if not readOnly:
checkExec(env.val, CreateStmtStr)
deleteStmt = ? DeleteStmt.prepare(
env.val, DeleteStmtStr, SQLITE_PREPARE_PERSISTENT)
putStmt = ? PutStmt.prepare(
env.val, PutStmtStr, SQLITE_PREPARE_PERSISTENT)
containsStmt = ? ContainsStmt.prepare(
env.val, ContainsStmtStr, SQLITE_PREPARE_PERSISTENT)
getStmt = ? GetStmt.prepare(
env.val, GetStmtStr, SQLITE_PREPARE_PERSISTENT)
# if a readOnly/existing database does not satisfy the expected schema
# `pepare()` will fail and `new` will return an error with message
# "SQL logic error"
let
getDataCol = dataCol(RawStmtPtr(getStmt), GetStmtDataCol)
success T(
readOnly: readOnly,
dbPath: path,
containsStmt: containsStmt,
deleteStmt: deleteStmt,
env: env.release,
getStmt: getStmt,
getDataCol: getDataCol,
putStmt: putStmt)

View File

@ -3,6 +3,8 @@ import pkg/questionable/results
import pkg/sqlite3_abi
import pkg/upraises
export sqlite3_abi
# Adapted from:
# https://github.com/status-im/nwaku/blob/master/waku/v2/node/storage/sqlite.nim
@ -74,9 +76,7 @@ template bindParams(
when params is tuple:
when params isnot NoParams:
var
i = 1
var i = 1
for param in fields(params):
checkErr bindParam(s, i, param)
inc i

View File

@ -1,413 +0,0 @@
import std/os
import std/times
import pkg/chronos
import pkg/questionable
import pkg/questionable/results
import pkg/sqlite3_abi
import pkg/stew/byteutils
from pkg/stew/results as stewResults import isErr
import pkg/upraises
import ./datastore
import ./sqlite
export datastore, sqlite
push: {.upraises: [].}
type
BoundIdCol = proc (): string {.closure, gcsafe, upraises: [].}
BoundDataCol = proc (): seq[byte] {.closure, gcsafe, upraises: [].}
BoundTimestampCol = proc (): int64 {.closure, gcsafe, upraises: [].}
# feels odd to use `void` for prepared statements corresponding to SELECT
# queries but it fits with the rest of the SQLite wrapper adapted from
# status-im/nwaku, at least in its current form in ./sqlite
ContainsStmt = SQLiteStmt[(string), void]
DeleteStmt = SQLiteStmt[(string), void]
GetStmt = SQLiteStmt[(string), void]
PutStmt = SQLiteStmt[(string, seq[byte], int64), void]
QueryStmt = SQLiteStmt[(string), void]
SQLiteDatastore* = ref object of Datastore
dbPath: string
containsStmt: ContainsStmt
deleteStmt: DeleteStmt
env: SQLite
getDataCol: BoundDataCol
getStmt: GetStmt
putStmt: PutStmt
readOnly: bool
const
dbExt* = ".sqlite3"
tableName* = "Store"
idColName* = "id"
dataColName* = "data"
timestampColName* = "timestamp"
idColType = "TEXT"
dataColType = "BLOB"
timestampColType = "INTEGER"
memory* = ":memory:"
# https://stackoverflow.com/a/9756276
# EXISTS returns a boolean value represented by an integer:
# https://sqlite.org/datatype3.html#boolean_datatype
# https://sqlite.org/lang_expr.html#the_exists_operator
containsStmtStr = """
SELECT EXISTS(
SELECT 1 FROM """ & tableName & """
WHERE """ & idColName & """ = ?
);
"""
containsStmtExistsCol = 0
createStmtStr = """
CREATE TABLE IF NOT EXISTS """ & tableName & """ (
""" & idColName & """ """ & idColType & """ NOT NULL PRIMARY KEY,
""" & dataColName & """ """ & dataColType & """,
""" & timestampColName & """ """ & timestampColType & """ NOT NULL
) WITHOUT ROWID;
"""
deleteStmtStr = """
DELETE FROM """ & tableName & """
WHERE """ & idColName & """ = ?;
"""
getStmtStr = """
SELECT """ & dataColName & """ FROM """ & tableName & """
WHERE """ & idColName & """ = ?;
"""
getStmtDataCol = 0
putStmtStr = """
REPLACE INTO """ & tableName & """ (
""" & idColName & """,
""" & dataColName & """,
""" & timestampColName & """
) VALUES (?, ?, ?);
"""
queryStmtStr = """
SELECT """ & idColName & """, """ & dataColName & """ FROM """ & tableName &
""" WHERE """ & idColName & """ GLOB ?;
"""
queryStmtIdCol = 0
queryStmtDataCol = 1
proc checkColMetadata(s: RawStmtPtr, i: int, expectedName: string) =
let
colName = sqlite3_column_origin_name(s, i.cint)
if colName.isNil:
raise (ref Defect)(msg: "no column exists for index " & $i & " in `" &
$sqlite3_sql(s) & "`")
if $colName != expectedName:
raise (ref Defect)(msg: "original column name for index " & $i & " was \"" &
$colName & "\" in `" & $sqlite3_sql(s) & "` but callee expected \"" &
expectedName & "\"")
proc idCol*(
s: RawStmtPtr,
index: int): BoundIdCol =
checkColMetadata(s, index, idColName)
return proc (): string =
$sqlite3_column_text_not_null(s, index.cint)
proc dataCol*(
s: RawStmtPtr,
index: int): BoundDataCol =
checkColMetadata(s, index, dataColName)
return proc (): seq[byte] =
let
i = index.cint
blob = sqlite3_column_blob(s, i)
# detect out-of-memory error
# see the conversion table and final paragraph of:
# https://www.sqlite.org/c3ref/column_blob.html
# see also https://www.sqlite.org/rescode.html
# the "data" column can be NULL so in order to detect an out-of-memory error
# it is necessary to check that the result is a null pointer and that the
# result code is an error code
if blob.isNil:
let
v = sqlite3_errcode(sqlite3_db_handle(s))
if not (v in [SQLITE_OK, SQLITE_ROW, SQLITE_DONE]):
raise (ref Defect)(msg: $sqlite3_errstr(v))
let
dataLen = sqlite3_column_bytes(s, i)
dataBytes = cast[ptr UncheckedArray[byte]](blob)
@(toOpenArray(dataBytes, 0, dataLen - 1))
proc timestampCol*(
s: RawStmtPtr,
index: int): BoundTimestampCol =
checkColMetadata(s, index, timestampColName)
return proc (): int64 =
sqlite3_column_int64(s, index.cint)
proc new*(
T: type SQLiteDatastore,
basePath: string,
filename = "store" & dbExt,
readOnly = false): ?!T =
# make it optional to enable WAL with it enabled being the default?
# make it possible to specify a custom page size?
# https://www.sqlite.org/pragma.html#pragma_page_size
# https://www.sqlite.org/intern-v-extern-blob.html
var
env: AutoDisposed[SQLite]
defer: disposeIfUnreleased(env)
var
basep, fname, dbPath: string
if basePath == memory:
if readOnly:
return failure "SQLiteDatastore cannot be read-only and in-memory"
else:
dbPath = memory
else:
try:
basep = normalizePathEnd(
if basePath.isAbsolute: basePath
else: getCurrentDir() / basePath)
fname = filename.normalizePathEnd
dbPath = basep / fname
if readOnly and not fileExists(dbPath):
return failure "read-only database does not exist: " & dbPath
elif not dirExists(basep):
return failure "directory does not exist: " & basep
except IOError as e:
return failure e
except OSError as e:
return failure e
let
flags =
if readOnly: SQLITE_OPEN_READONLY
else: SQLITE_OPEN_READWRITE or SQLITE_OPEN_CREATE
open(dbPath, env.val, flags)
let
pragmaStmt = journalModePragmaStmt(env.val)
checkExec(pragmaStmt)
var
containsStmt: ContainsStmt
deleteStmt: DeleteStmt
getStmt: GetStmt
putStmt: PutStmt
if not readOnly:
checkExec(env.val, createStmtStr)
deleteStmt = ? DeleteStmt.prepare(
env.val, deleteStmtStr, SQLITE_PREPARE_PERSISTENT)
putStmt = ? PutStmt.prepare(
env.val, putStmtStr, SQLITE_PREPARE_PERSISTENT)
containsStmt = ? ContainsStmt.prepare(
env.val, containsStmtStr, SQLITE_PREPARE_PERSISTENT)
getStmt = ? GetStmt.prepare(
env.val, getStmtStr, SQLITE_PREPARE_PERSISTENT)
# if a readOnly/existing database does not satisfy the expected schema
# `pepare()` will fail and `new` will return an error with message
# "SQL logic error"
let
getDataCol = dataCol(RawStmtPtr(getStmt), getStmtDataCol)
success T(dbPath: dbPath, containsStmt: containsStmt, deleteStmt: deleteStmt,
env: env.release, getStmt: getStmt, getDataCol: getDataCol,
putStmt: putStmt, readOnly: readOnly)
proc dbPath*(self: SQLiteDatastore): string =
self.dbPath
proc env*(self: SQLiteDatastore): SQLite =
self.env
proc close*(self: SQLiteDatastore) =
self.containsStmt.dispose
self.getStmt.dispose
if not self.readOnly:
self.deleteStmt.dispose
self.putStmt.dispose
self.env.dispose
self[] = SQLiteDatastore()[]
proc timestamp*(t = epochTime()): int64 =
(t * 1_000_000).int64
method contains*(
self: SQLiteDatastore,
key: Key): Future[?!bool] {.async, locks: "unknown".} =
var
exists = false
proc onData(s: RawStmtPtr) =
exists = sqlite3_column_int64(s, containsStmtExistsCol.cint).bool
let
queryRes = self.containsStmt.query((key.id), onData)
if queryRes.isErr: return queryRes
return success exists
method delete*(
self: SQLiteDatastore,
key: Key): Future[?!void] {.async, locks: "unknown".} =
if self.readOnly:
return failure "database is read-only":
else:
return self.deleteStmt.exec((key.id))
method get*(
self: SQLiteDatastore,
key: Key): Future[?!(?seq[byte])] {.async, locks: "unknown".} =
# see comment in ./filesystem_datastore re: finer control of memory
# allocation in `method get`, could apply here as well if bytes were read
# incrementally with `sqlite3_blob_read`
var
bytes: ?seq[byte]
let
dataCol = self.getDataCol
proc onData(s: RawStmtPtr) =
bytes = dataCol().some
let
queryRes = self.getStmt.query((key.id), onData)
if queryRes.isErr:
return failure queryRes.error.msg
else:
return success bytes
proc put*(
self: SQLiteDatastore,
key: Key,
data: seq[byte],
timestamp: int64): Future[?!void] {.async.} =
if self.readOnly:
return failure "database is read-only"
else:
return self.putStmt.exec((key.id, @data, timestamp))
method put*(
self: SQLiteDatastore,
key: Key,
data: seq[byte]): Future[?!void] {.async, locks: "unknown".} =
return await self.put(key, data, timestamp())
iterator query*(
self: SQLiteDatastore,
query: Query): Future[QueryResponse] =
let
queryStmt = QueryStmt.prepare(
self.env, queryStmtStr).expect("should not fail")
s = RawStmtPtr(queryStmt)
defer:
discard sqlite3_reset(s)
discard sqlite3_clear_bindings(s)
s.dispose
let
v = sqlite3_bind_text(s, 1.cint, query.key.id.cstring, -1.cint,
SQLITE_TRANSIENT_GCSAFE)
if not (v == SQLITE_OK):
raise (ref Defect)(msg: $sqlite3_errstr(v))
while true:
let
v = sqlite3_step(s)
case v
of SQLITE_ROW:
let
key = Key.init($sqlite3_column_text_not_null(
s, queryStmtIdCol)).expect("should not fail")
blob = sqlite3_column_blob(s, queryStmtDataCol)
# detect out-of-memory error
# see the conversion table and final paragraph of:
# https://www.sqlite.org/c3ref/column_blob.html
# see also https://www.sqlite.org/rescode.html
# the "data" column can be NULL so in order to detect an out-of-memory
# error it is necessary to check that the result is a null pointer and
# that the result code is an error code
if blob.isNil:
let
v = sqlite3_errcode(sqlite3_db_handle(s))
if not (v in [SQLITE_OK, SQLITE_ROW, SQLITE_DONE]):
raise (ref Defect)(msg: $sqlite3_errstr(v))
let
dataLen = sqlite3_column_bytes(s, queryStmtDataCol)
dataBytes = cast[ptr UncheckedArray[byte]](blob)
data = @(toOpenArray(dataBytes, 0, dataLen - 1))
fut = newFuture[QueryResponse]()
fut.complete((key, data))
yield fut
of SQLITE_DONE:
break
else:
raise (ref Defect)(msg: $sqlite3_errstr(v))

View File

@ -55,31 +55,27 @@ method delete*(
method get*(
self: TieredDatastore,
key: Key): Future[?!(?seq[byte])] {.async, locks: "unknown".} =
key: Key): Future[?!seq[byte]] {.async, locks: "unknown".} =
var
bytesOpt: ?seq[byte]
bytes: seq[byte]
for store in self.stores:
let
getRes = await store.get(key)
without bytes =? (await store.get(key)):
continue
if getRes.isErr: return getRes
bytesOpt = getRes.get
if bytes.len <= 0:
continue
# put found data into stores logically in front of the current store
if bytes =? bytesOpt:
for s in self.stores:
if s == store: break
let
putRes = await s.put(key, bytes)
for s in self.stores:
if s == store: break
if(
let res = (await s.put(key, bytes));
res.isErr):
return failure res.error
if putRes.isErr: return failure putRes.error.msg
break
return success bytesOpt
return success bytes
method put*(
self: TieredDatastore,

View File

@ -0,0 +1,34 @@
import std/options
import pkg/asynctest
import pkg/chronos
import pkg/stew/results
import pkg/datastore
proc basicStoreTests*(
ds: Datastore,
key: Key,
bytes: seq[byte],
otherBytes: seq[byte]) =
test "put":
(await ds.put(key, bytes)).tryGet()
test "get":
check:
(await ds.get(key)).tryGet() == bytes
test "put update":
(await ds.put(key, otherBytes)).tryGet()
test "get updated":
check:
(await ds.get(key)).tryGet() == otherBytes
test "delete":
(await ds.delete(key)).tryGet()
test "contains":
check:
not (await ds.contains(key)).tryGet()

View File

@ -0,0 +1,350 @@
import std/options
import std/os
import pkg/asynctest/unittest2
import pkg/chronos
import pkg/stew/results
import pkg/stew/byteutils
import pkg/datastore/sql/sqliteds
import ../basictests
suite "Test Basic SQLiteDatastore":
let
(path, _, _) = instantiationInfo(-1, fullPaths = true) # get this file's name
basePath = "tests_data"
basePathAbs = path.parentDir / basePath
filename = "test_store" & DbExt
dbPathAbs = basePathAbs / filename
key = Key.init("a:b/c/d:e").tryGet()
bytes = "some bytes".toBytes
otherBytes = "some other bytes".toBytes
var
dsDb: SQLiteDatastore
setupAll:
removeDir(basePathAbs)
require(not dirExists(basePathAbs))
createDir(basePathAbs)
dsDb = SQLiteDatastore.new(path = dbPathAbs).tryGet()
teardownAll:
(await dsDb.close()).tryGet()
removeDir(basePathAbs)
require(not dirExists(basePathAbs))
basicStoreTests(dsDb, key, bytes, otherBytes)
suite "Test Read Only SQLiteDatastore":
let
(path, _, _) = instantiationInfo(-1, fullPaths = true) # get this file's name
basePath = "tests_data"
basePathAbs = path.parentDir / basePath
filename = "test_store" & DbExt
dbPathAbs = basePathAbs / filename
key = Key.init("a:b/c/d:e").tryGet()
bytes = "some bytes".toBytes
var
dsDb: SQLiteDatastore
readOnlyDb: SQLiteDatastore
setupAll:
removeDir(basePathAbs)
require(not dirExists(basePathAbs))
createDir(basePathAbs)
dsDb = SQLiteDatastore.new(path = dbPathAbs).tryGet()
readOnlyDb = SQLiteDatastore.new(path = dbPathAbs, readOnly = true).tryGet()
teardownAll:
(await dsDb.close()).tryGet()
(await readOnlyDb.close()).tryGet()
removeDir(basePathAbs)
require(not dirExists(basePathAbs))
test "put":
check:
(await readOnlyDb.put(key, bytes)).isErr
(await dsDb.put(key, bytes)).tryGet()
test "get":
check:
(await readOnlyDb.get(key)).tryGet() == bytes
(await dsDb.get(key)).tryGet() == bytes
test "delete":
check:
(await readOnlyDb.delete(key)).isErr
(await dsDb.delete(key)).tryGet()
test "contains":
check:
not (await readOnlyDb.contains(key)).tryGet()
not (await dsDb.contains(key)).tryGet()
# test "query":
# ds = SQLiteDatastore.new(basePathAbs, filename).get
# var
# key1 = Key.init("a").get
# key2 = Key.init("a/b").get
# key3 = Key.init("a/b:c").get
# key4 = Key.init("a:b").get
# key5 = Key.init("a:b/c").get
# key6 = Key.init("a:b/c:d").get
# key7 = Key.init("A").get
# key8 = Key.init("A/B").get
# key9 = Key.init("A/B:C").get
# key10 = Key.init("A:B").get
# key11 = Key.init("A:B/C").get
# key12 = Key.init("A:B/C:D").get
# bytes1 = @[1.byte, 2.byte, 3.byte]
# bytes2 = @[4.byte, 5.byte, 6.byte]
# bytes3: seq[byte] = @[]
# bytes4 = bytes1
# bytes5 = bytes2
# bytes6 = bytes3
# bytes7 = bytes1
# bytes8 = bytes2
# bytes9 = bytes3
# bytes10 = bytes1
# bytes11 = bytes2
# bytes12 = bytes3
# queryKey = Key.init("*").get
# var
# putRes = await ds.put(key1, bytes1)
# assert putRes.isOk
# putRes = await ds.put(key2, bytes2)
# assert putRes.isOk
# putRes = await ds.put(key3, bytes3)
# assert putRes.isOk
# putRes = await ds.put(key4, bytes4)
# assert putRes.isOk
# putRes = await ds.put(key5, bytes5)
# assert putRes.isOk
# putRes = await ds.put(key6, bytes6)
# assert putRes.isOk
# putRes = await ds.put(key7, bytes7)
# assert putRes.isOk
# putRes = await ds.put(key8, bytes8)
# assert putRes.isOk
# putRes = await ds.put(key9, bytes9)
# assert putRes.isOk
# putRes = await ds.put(key10, bytes10)
# assert putRes.isOk
# putRes = await ds.put(key11, bytes11)
# assert putRes.isOk
# putRes = await ds.put(key12, bytes12)
# assert putRes.isOk
# var
# kds: seq[QueryResponse]
# for kd in ds.query(Query.init(queryKey)):
# let
# (key, data) = await kd
# kds.add (key, data)
# # see https://sqlite.org/lang_select.html#the_order_by_clause
# # If a SELECT statement that returns more than one row does not have an
# # ORDER BY clause, the order in which the rows are returned is undefined.
# check: kds.sortedByIt(it.key.id) == @[
# (key: key1, data: bytes1),
# (key: key2, data: bytes2),
# (key: key3, data: bytes3),
# (key: key4, data: bytes4),
# (key: key5, data: bytes5),
# (key: key6, data: bytes6),
# (key: key7, data: bytes7),
# (key: key8, data: bytes8),
# (key: key9, data: bytes9),
# (key: key10, data: bytes10),
# (key: key11, data: bytes11),
# (key: key12, data: bytes12)
# ].sortedByIt(it.key.id)
# kds = @[]
# queryKey = Key.init("a*").get
# for kd in ds.query(Query.init(queryKey)):
# let
# (key, data) = await kd
# kds.add (key, data)
# check: kds.sortedByIt(it.key.id) == @[
# (key: key1, data: bytes1),
# (key: key2, data: bytes2),
# (key: key3, data: bytes3),
# (key: key4, data: bytes4),
# (key: key5, data: bytes5),
# (key: key6, data: bytes6)
# ].sortedByIt(it.key.id)
# kds = @[]
# queryKey = Key.init("A*").get
# for kd in ds.query(Query.init(queryKey)):
# let
# (key, data) = await kd
# kds.add (key, data)
# check: kds.sortedByIt(it.key.id) == @[
# (key: key7, data: bytes7),
# (key: key8, data: bytes8),
# (key: key9, data: bytes9),
# (key: key10, data: bytes10),
# (key: key11, data: bytes11),
# (key: key12, data: bytes12)
# ].sortedByIt(it.key.id)
# kds = @[]
# queryKey = Key.init("a/?").get
# for kd in ds.query(Query.init(queryKey)):
# let
# (key, data) = await kd
# kds.add (key, data)
# check: kds.sortedByIt(it.key.id) == @[
# (key: key2, data: bytes2)
# ].sortedByIt(it.key.id)
# kds = @[]
# queryKey = Key.init("A/?").get
# for kd in ds.query(Query.init(queryKey)):
# let
# (key, data) = await kd
# kds.add (key, data)
# check: kds.sortedByIt(it.key.id) == @[
# (key: key8, data: bytes8)
# ].sortedByIt(it.key.id)
# kds = @[]
# queryKey = Key.init("*/?").get
# for kd in ds.query(Query.init(queryKey)):
# let
# (key, data) = await kd
# kds.add (key, data)
# check: kds.sortedByIt(it.key.id) == @[
# (key: key2, data: bytes2),
# (key: key5, data: bytes5),
# (key: key8, data: bytes8),
# (key: key11, data: bytes11)
# ].sortedByIt(it.key.id)
# kds = @[]
# queryKey = Key.init("[Aa]/?").get
# for kd in ds.query(Query.init(queryKey)):
# let
# (key, data) = await kd
# kds.add (key, data)
# check: kds.sortedByIt(it.key.id) == @[
# (key: key2, data: bytes2),
# (key: key8, data: bytes8)
# ].sortedByIt(it.key.id)
# kds = @[]
# # SQLite's GLOB operator, akin to Unix file globbing syntax, is greedy re:
# # wildcard "*". So a pattern such as "a:*[^/]" will not restrict results to
# # "/a:b", i.e. it will match on "/a:b", "/a:b/c" and "/a:b/c:d".
# queryKey = Key.init("a:*[^/]").get
# for kd in ds.query(Query.init(queryKey)):
# let
# (key, data) = await kd
# kds.add (key, data)
# check: kds.sortedByIt(it.key.id) == @[
# (key: key4, data: bytes4),
# (key: key5, data: bytes5),
# (key: key6, data: bytes6)
# ].sortedByIt(it.key.id)
# kds = @[]
# queryKey = Key.init("a:*[Bb]").get
# for kd in ds.query(Query.init(queryKey)):
# let
# (key, data) = await kd
# kds.add (key, data)
# check: kds.sortedByIt(it.key.id) == @[
# (key: key4, data: bytes4)
# ].sortedByIt(it.key.id)
# kds = @[]
# var
# deleteRes = await ds.delete(key1)
# assert deleteRes.isOk
# deleteRes = await ds.delete(key2)
# assert deleteRes.isOk
# deleteRes = await ds.delete(key3)
# assert deleteRes.isOk
# deleteRes = await ds.delete(key4)
# assert deleteRes.isOk
# deleteRes = await ds.delete(key5)
# assert deleteRes.isOk
# deleteRes = await ds.delete(key6)
# assert deleteRes.isOk
# deleteRes = await ds.delete(key7)
# assert deleteRes.isOk
# deleteRes = await ds.delete(key8)
# assert deleteRes.isOk
# deleteRes = await ds.delete(key9)
# assert deleteRes.isOk
# deleteRes = await ds.delete(key10)
# assert deleteRes.isOk
# deleteRes = await ds.delete(key11)
# assert deleteRes.isOk
# deleteRes = await ds.delete(key12)
# assert deleteRes.isOk
# let
# emptyKds: seq[QueryResponse] = @[]
# for kd in ds.query(Query.init(queryKey)):
# let
# (key, data) = await kd
# kds.add (key, data)
# check: kds == emptyKds

View File

@ -0,0 +1,161 @@
import std/os
import pkg/chronos
import pkg/asynctest
import pkg/stew/byteutils
import pkg/sqlite3_abi
import pkg/datastore/key
import pkg/datastore/sql/sqlitedsdb
import pkg/datastore/sql/sqliteds
suite "Test Open SQLite Datastore DB":
let
(path, _, _) = instantiationInfo(-1, fullPaths = true) # get this file's name
basePath = "tests_data"
basePathAbs = path.parentDir / basePath
filename = "test_store" & DbExt
dbPathAbs = basePathAbs / filename
setupAll:
removeDir(basePathAbs)
require(not dirExists(basePathAbs))
createDir(basePathAbs)
teardownAll:
removeDir(basePathAbs)
require(not dirExists(basePathAbs))
test "Should create and open datastore DB":
let
dsDb = SQLiteDsDb.open(
path = dbPathAbs,
flags = SQLITE_OPEN_READWRITE or SQLITE_OPEN_CREATE).tryGet()
defer:
dsDb.close()
check:
fileExists(dbPathAbs)
test "Should open existing DB":
let
dsDb = SQLiteDsDb.open(
path = dbPathAbs,
flags = SQLITE_OPEN_READWRITE or SQLITE_OPEN_CREATE).tryGet()
defer:
dsDb.close()
check:
fileExists(dbPathAbs)
test "Should open existing DB in read only mode":
check:
fileExists(dbPathAbs)
let
dsDb = SQLiteDsDb.open(
path = dbPathAbs,
flags = SQLITE_OPEN_READWRITE or SQLITE_OPEN_CREATE).tryGet()
defer:
dsDb.close()
check:
fileExists(dbPathAbs)
test "Should fail open existing DB in read only mode":
removeDir(basePathAbs)
check:
not fileExists(dbPathAbs)
SQLiteDsDb.open(path = dbPathAbs).isErr
suite "Test SQLite Datastore DB operations":
let
(path, _, _) = instantiationInfo(-1, fullPaths = true) # get this file's name
basePath = "tests_data"
basePathAbs = path.parentDir / basePath
filename = "test_store" & DbExt
dbPathAbs = basePathAbs / filename
key = Key.init("test/key").tryGet()
data = "some data".toBytes
otherData = "some other data".toBytes
var
dsDb: SQLiteDsDb
readOnlyDb: SQLiteDsDb
setupAll:
removeDir(basePathAbs)
require(not dirExists(basePathAbs))
createDir(basePathAbs)
dsDb = SQLiteDsDb.open(
path = dbPathAbs,
flags = SQLITE_OPEN_READWRITE or SQLITE_OPEN_CREATE).tryGet()
readOnlyDb = SQLiteDsDb.open(
path = dbPathAbs,
flags = SQLITE_OPEN_READONLY).tryGet()
teardownAll:
dsDb.close()
readOnlyDb.close()
removeDir(basePathAbs)
require(not dirExists(basePathAbs))
test "Should insert key":
check:
readOnlyDb.putStmt.exec((key.id, data, timestamp())).isErr()
dsDb.putStmt.exec((key.id, data, timestamp())).tryGet()
test "Should select key":
let
dataCol = dsDb.getDataCol
var bytes: seq[byte]
proc onData(s: RawStmtPtr) =
bytes = dataCol()
check:
dsDb.getStmt.query((key.id), onData).tryGet()
bytes == data
test "Should update key":
check:
readOnlyDb.putStmt.exec((key.id, otherData, timestamp())).isErr()
dsDb.putStmt.exec((key.id, otherData, timestamp())).tryGet()
test "Should select updated key":
let
dataCol = dsDb.getDataCol
var bytes: seq[byte]
proc onData(s: RawStmtPtr) =
bytes = dataCol()
check:
dsDb.getStmt.query((key.id), onData).tryGet()
bytes == otherData
test "Should delete key":
check:
readOnlyDb.deleteStmt.exec((key.id)).isErr()
dsDb.deleteStmt.exec((key.id)).tryGet()
test "Should not contain key":
var
exists = false
proc onData(s: RawStmtPtr) =
exists = sqlite3_column_int64(s, ContainsStmtExistsCol.cint).bool
check:
dsDb.containsStmt.query((key.id), onData).tryGet()
not exists

View File

@ -1 +0,0 @@
template asyncTest*(name, body: untyped) = test(name, body)

View File

@ -1,196 +0,0 @@
import std/options
import std/os
import pkg/asynctest/unittest2
import pkg/chronos
import pkg/questionable
import pkg/questionable/results
import pkg/stew/byteutils
from pkg/stew/results as stewResults import get, isOk
import ../../datastore/filesystem_datastore
import ./templates
suite "FileSystemDatastore":
# assumes tests/test_all is run from project root, e.g. with `nimble test`
let
root = "tests" / "test_data"
rootAbs = getCurrentDir() / root
setup:
removeDir(rootAbs)
require(not dirExists(rootAbs))
createDir(rootAbs)
teardown:
removeDir(rootAbs)
require(not dirExists(rootAbs))
asyncTest "new":
var
dsRes: ?!FileSystemDatastore
dsRes = FileSystemDatastore.new(rootAbs / "missing")
check: dsRes.isErr
dsRes = FileSystemDatastore.new(rootAbs)
check: dsRes.isOk
dsRes = FileSystemDatastore.new(root)
check: dsRes.isOk
asyncTest "accessors":
let
ds = FileSystemDatastore.new(root).get
check: ds.root == rootAbs
asyncTest "helpers":
let
ds = FileSystemDatastore.new(root).get
check:
# see comment in ../../datastore/filesystem_datastore re: whether path
# equivalence of e.g. Key(/a:b) and Key(/a/b) is problematic
ds.path(Key.init("a").get) == rootAbs / "a" & objExt
ds.path(Key.init("a:b").get) == rootAbs / "a" / "b" & objExt
ds.path(Key.init("a/b").get) == rootAbs / "a" / "b" & objExt
ds.path(Key.init("a:b/c").get) == rootAbs / "a" / "b" / "c" & objExt
ds.path(Key.init("a/b/c").get) == rootAbs / "a" / "b" / "c" & objExt
ds.path(Key.init("a:b/c:d").get) == rootAbs / "a" / "b" / "c" / "d" & objExt
ds.path(Key.init("a/b/c:d").get) == rootAbs / "a" / "b" / "c" / "d" & objExt
ds.path(Key.init("a/b/c/d").get) == rootAbs / "a" / "b" / "c" / "d" & objExt
asyncTest "put":
let
ds = FileSystemDatastore.new(root).get
key = Key.init("a:b/c/d:e").get
path = ds.path(key)
var
bytes: seq[byte]
putRes = await ds.put(key, bytes)
check:
putRes.isOk
readFile(path).toBytes == bytes
bytes = @[1.byte, 2.byte, 3.byte]
putRes = await ds.put(key, bytes)
check:
putRes.isOk
readFile(path).toBytes == bytes
bytes = @[4.byte, 5.byte, 6.byte]
putRes = await ds.put(key, bytes)
check:
putRes.isOk
readFile(path).toBytes == bytes
asyncTest "delete":
let
bytes = @[1.byte, 2.byte, 3.byte]
ds = FileSystemDatastore.new(root).get
var
key = Key.init("a:b/c/d:e").get
path = ds.path(key)
let
putRes = await ds.put(key, bytes)
assert putRes.isOk
var
delRes = await ds.delete(key)
check:
delRes.isOk
not fileExists(path)
dirExists(parentDir(path))
key = Key.init("X/Y/Z").get
path = ds.path(key)
assert not fileExists(path)
delRes = await ds.delete(key)
check: delRes.isOk
asyncTest "contains":
let
bytes = @[1.byte, 2.byte, 3.byte]
ds = FileSystemDatastore.new(root).get
var
key = Key.init("a:b/c/d:e").get
path = ds.path(key)
putRes = await ds.put(key, bytes)
assert putRes.isOk
var
containsRes = await ds.contains(key)
check:
containsRes.isOk
containsRes.get == true
key = Key.init("X/Y/Z").get
path = ds.path(key)
assert not fileExists(path)
containsRes = await ds.contains(key)
check:
containsRes.isOk
containsRes.get == false
asyncTest "get":
let
ds = FileSystemDatastore.new(root).get
var
bytes: seq[byte]
key = Key.init("a:b/c/d:e").get
path = ds.path(key)
putRes = await ds.put(key, bytes)
assert putRes.isOk
var
getRes = await ds.get(key)
getOpt = getRes.get
check: getOpt.isSome and getOpt.get == bytes
bytes = @[1.byte, 2.byte, 3.byte]
putRes = await ds.put(key, bytes)
assert putRes.isOk
getRes = await ds.get(key)
getOpt = getRes.get
check: getOpt.isSome and getOpt.get == bytes
key = Key.init("X/Y/Z").get
path = ds.path(key)
assert not fileExists(path)
getRes = await ds.get(key)
getOpt = getRes.get
check: getOpt.isNone
# asyncTest "query":
# check:
# true

View File

@ -1,44 +0,0 @@
import std/options
import pkg/asynctest/unittest2
import pkg/chronos
import pkg/stew/results
import ../../datastore/null_datastore
import ./templates
suite "NullDatastore":
let
key = Key.init("a").get
ds = NullDatastore.new()
asyncTest "new":
check: not ds.isNil
asyncTest "put":
check: (await ds.put(key, @[1.byte])).isOk
asyncTest "delete":
check: (await ds.delete(key)).isOk
asyncTest "contains":
check:
(await ds.contains(key)).isOk
(await ds.contains(key)).get == false
asyncTest "get":
check:
(await ds.get(key)).isOk
(await ds.get(key)).get.isNone
asyncTest "query":
var
x = true
for n in ds.query(Query.init(key)):
# `iterator query` for NullDatastore never yields so the following lines
# are not run (else the test would hang)
x = false
discard (await n)
check: x

View File

@ -1,586 +0,0 @@
import std/algorithm
import std/options
import std/os
import pkg/asynctest/unittest2
import pkg/chronos
import pkg/stew/results
import ../../datastore/sqlite_datastore
import ./templates
suite "SQLiteDatastore":
var
ds: SQLiteDatastore
# assumes tests/test_all is run from project root, e.g. with `nimble test`
let
basePath = "tests" / "test_data"
basePathAbs = getCurrentDir() / basePath
filename = "test_store" & dbExt
dbPathAbs = basePathAbs / filename
setup:
removeDir(basePathAbs)
require(not dirExists(basePathAbs))
createDir(basePathAbs)
teardown:
if not ds.isNil: ds.close
ds = nil
removeDir(basePathAbs)
require(not dirExists(basePathAbs))
asyncTest "new":
var
dsRes = SQLiteDatastore.new(basePathAbs, filename, readOnly = true)
# for `readOnly = true` to succeed the database file must already exist
check: dsRes.isErr
dsRes = SQLiteDatastore.new(basePathAbs / "missing", filename)
check: dsRes.isErr
dsRes = SQLiteDatastore.new(basePathAbs, filename)
check:
dsRes.isOk
fileExists(dbPathAbs)
dsRes.get.close
removeDir(basePathAbs)
assert not dirExists(basePathAbs)
createDir(basePathAbs)
dsRes = SQLiteDatastore.new(basePath, filename)
check:
dsRes.isOk
fileExists(dbPathAbs)
dsRes.get.close
# for `readOnly = true` to succeed the database file must already exist, so
# the existing file (per previous step) is not deleted prior to the next
# invocation of `SQLiteDatastore.new`
dsRes = SQLiteDatastore.new(basePath, filename, readOnly = true)
check: dsRes.isOk
dsRes.get.close
removeDir(basePathAbs)
assert not dirExists(basePathAbs)
createDir(basePathAbs)
dsRes = SQLiteDatastore.new(memory)
check: dsRes.isOk
dsRes.get.close
dsRes = SQLiteDatastore.new(memory, readOnly = true)
check: dsRes.isErr
asyncTest "accessors":
ds = SQLiteDatastore.new(basePath).get
check:
parentDir(ds.dbPath) == basePathAbs
not ds.env.isNil
asyncTest "helpers":
ds = SQLiteDatastore.new(basePath).get
ds.close
check:
ds.env.isNil
timestamp(10.123_456) == 10_123_456.int64
asyncTest "put":
let
key = Key.init("a:b/c/d:e").get
# for `readOnly = true` to succeed the database file must already exist
ds = SQLiteDatastore.new(basePathAbs, filename).get
ds.close
ds = SQLiteDatastore.new(basePathAbs, filename, readOnly = true).get
var
bytes: seq[byte]
timestamp = timestamp()
putRes = await ds.put(key, bytes, timestamp)
check: putRes.isErr
ds.close
removeDir(basePathAbs)
assert not dirExists(basePathAbs)
createDir(basePathAbs)
ds = SQLiteDatastore.new(basePathAbs, filename).get
timestamp = timestamp()
putRes = await ds.put(key, bytes, timestamp)
check: putRes.isOk
let
prequeryRes = NoParamsStmt.prepare(
ds.env, "SELECT timestamp AS foo, id AS baz, data AS bar FROM " &
tableName & ";")
assert prequeryRes.isOk
let
prequery = prequeryRes.get
idCol = idCol(RawStmtPtr(prequery), 1)
dataCol = dataCol(RawStmtPtr(prequery), 2)
timestampCol = timestampCol(RawStmtPtr(prequery), 0)
var
qId: string
qData: seq[byte]
qTimestamp: int64
rowCount = 0
proc onData(s: RawStmtPtr) {.closure.} =
qId = idCol()
qData = dataCol()
qTimestamp = timestampCol()
inc rowCount
var
qRes = prequery.query((), onData)
assert qRes.isOk
check:
qRes.get
qId == key.id
qData == bytes
qTimestamp == timestamp
rowCount == 1
bytes = @[1.byte, 2.byte, 3.byte]
timestamp = timestamp()
putRes = await ds.put(key, bytes, timestamp)
check: putRes.isOk
rowCount = 0
qRes = prequery.query((), onData)
assert qRes.isOk
check:
qRes.get
qId == key.id
qData == bytes
qTimestamp == timestamp
rowCount == 1
bytes = @[4.byte, 5.byte, 6.byte]
timestamp = timestamp()
putRes = await ds.put(key, bytes, timestamp)
check: putRes.isOk
rowCount = 0
qRes = prequery.query((), onData)
assert qRes.isOk
check:
qRes.get
qId == key.id
qData == bytes
qTimestamp == timestamp
rowCount == 1
prequery.dispose
asyncTest "delete":
let
bytes = @[1.byte, 2.byte, 3.byte]
var
key = Key.init("a:b/c/d:e").get
# for `readOnly = true` to succeed the database file must already exist
ds = SQLiteDatastore.new(basePathAbs, filename).get
ds.close
ds = SQLiteDatastore.new(basePathAbs, filename, readOnly = true).get
var
delRes = await ds.delete(key)
check: delRes.isErr
ds.close
removeDir(basePathAbs)
assert not dirExists(basePathAbs)
createDir(basePathAbs)
ds = SQLiteDatastore.new(basePathAbs, filename).get
let
putRes = await ds.put(key, bytes)
assert putRes.isOk
let
query = "SELECT * FROM " & tableName & ";"
var
rowCount = 0
proc onData(s: RawStmtPtr) {.closure.} =
inc rowCount
var
qRes = ds.env.query(query, onData)
assert qRes.isOk
check: rowCount == 1
delRes = await ds.delete(key)
check: delRes.isOk
rowCount = 0
qRes = ds.env.query(query, onData)
assert qRes.isOk
check:
delRes.isOk
rowCount == 0
key = Key.init("X/Y/Z").get
delRes = await ds.delete(key)
check: delRes.isOk
asyncTest "contains":
let
bytes = @[1.byte, 2.byte, 3.byte]
var
key = Key.init("a:b/c/d:e").get
ds = SQLiteDatastore.new(basePathAbs, filename).get
let
putRes = await ds.put(key, bytes)
assert putRes.isOk
var
containsRes = await ds.contains(key)
check:
containsRes.isOk
containsRes.get == true
key = Key.init("X/Y/Z").get
containsRes = await ds.contains(key)
check:
containsRes.isOk
containsRes.get == false
asyncTest "get":
ds = SQLiteDatastore.new(basePathAbs, filename).get
var
bytes: seq[byte]
key = Key.init("a:b/c/d:e").get
putRes = await ds.put(key, bytes)
assert putRes.isOk
var
getRes = await ds.get(key)
getOpt = getRes.get
check: getOpt.isSome and getOpt.get == bytes
bytes = @[1.byte, 2.byte, 3.byte]
putRes = await ds.put(key, bytes)
assert putRes.isOk
getRes = await ds.get(key)
getOpt = getRes.get
check: getOpt.isSome and getOpt.get == bytes
key = Key.init("X/Y/Z").get
assert not (await ds.contains(key)).get
getRes = await ds.get(key)
getOpt = getRes.get
check: getOpt.isNone
asyncTest "query":
ds = SQLiteDatastore.new(basePathAbs, filename).get
var
key1 = Key.init("a").get
key2 = Key.init("a/b").get
key3 = Key.init("a/b:c").get
key4 = Key.init("a:b").get
key5 = Key.init("a:b/c").get
key6 = Key.init("a:b/c:d").get
key7 = Key.init("A").get
key8 = Key.init("A/B").get
key9 = Key.init("A/B:C").get
key10 = Key.init("A:B").get
key11 = Key.init("A:B/C").get
key12 = Key.init("A:B/C:D").get
bytes1 = @[1.byte, 2.byte, 3.byte]
bytes2 = @[4.byte, 5.byte, 6.byte]
bytes3: seq[byte] = @[]
bytes4 = bytes1
bytes5 = bytes2
bytes6 = bytes3
bytes7 = bytes1
bytes8 = bytes2
bytes9 = bytes3
bytes10 = bytes1
bytes11 = bytes2
bytes12 = bytes3
queryKey = Key.init("*").get
var
putRes = await ds.put(key1, bytes1)
assert putRes.isOk
putRes = await ds.put(key2, bytes2)
assert putRes.isOk
putRes = await ds.put(key3, bytes3)
assert putRes.isOk
putRes = await ds.put(key4, bytes4)
assert putRes.isOk
putRes = await ds.put(key5, bytes5)
assert putRes.isOk
putRes = await ds.put(key6, bytes6)
assert putRes.isOk
putRes = await ds.put(key7, bytes7)
assert putRes.isOk
putRes = await ds.put(key8, bytes8)
assert putRes.isOk
putRes = await ds.put(key9, bytes9)
assert putRes.isOk
putRes = await ds.put(key10, bytes10)
assert putRes.isOk
putRes = await ds.put(key11, bytes11)
assert putRes.isOk
putRes = await ds.put(key12, bytes12)
assert putRes.isOk
var
kds: seq[QueryResponse]
for kd in ds.query(Query.init(queryKey)):
let
(key, data) = await kd
kds.add (key, data)
# see https://sqlite.org/lang_select.html#the_order_by_clause
# If a SELECT statement that returns more than one row does not have an
# ORDER BY clause, the order in which the rows are returned is undefined.
check: kds.sortedByIt(it.key.id) == @[
(key: key1, data: bytes1),
(key: key2, data: bytes2),
(key: key3, data: bytes3),
(key: key4, data: bytes4),
(key: key5, data: bytes5),
(key: key6, data: bytes6),
(key: key7, data: bytes7),
(key: key8, data: bytes8),
(key: key9, data: bytes9),
(key: key10, data: bytes10),
(key: key11, data: bytes11),
(key: key12, data: bytes12)
].sortedByIt(it.key.id)
kds = @[]
queryKey = Key.init("a*").get
for kd in ds.query(Query.init(queryKey)):
let
(key, data) = await kd
kds.add (key, data)
check: kds.sortedByIt(it.key.id) == @[
(key: key1, data: bytes1),
(key: key2, data: bytes2),
(key: key3, data: bytes3),
(key: key4, data: bytes4),
(key: key5, data: bytes5),
(key: key6, data: bytes6)
].sortedByIt(it.key.id)
kds = @[]
queryKey = Key.init("A*").get
for kd in ds.query(Query.init(queryKey)):
let
(key, data) = await kd
kds.add (key, data)
check: kds.sortedByIt(it.key.id) == @[
(key: key7, data: bytes7),
(key: key8, data: bytes8),
(key: key9, data: bytes9),
(key: key10, data: bytes10),
(key: key11, data: bytes11),
(key: key12, data: bytes12)
].sortedByIt(it.key.id)
kds = @[]
queryKey = Key.init("a/?").get
for kd in ds.query(Query.init(queryKey)):
let
(key, data) = await kd
kds.add (key, data)
check: kds.sortedByIt(it.key.id) == @[
(key: key2, data: bytes2)
].sortedByIt(it.key.id)
kds = @[]
queryKey = Key.init("A/?").get
for kd in ds.query(Query.init(queryKey)):
let
(key, data) = await kd
kds.add (key, data)
check: kds.sortedByIt(it.key.id) == @[
(key: key8, data: bytes8)
].sortedByIt(it.key.id)
kds = @[]
queryKey = Key.init("*/?").get
for kd in ds.query(Query.init(queryKey)):
let
(key, data) = await kd
kds.add (key, data)
check: kds.sortedByIt(it.key.id) == @[
(key: key2, data: bytes2),
(key: key5, data: bytes5),
(key: key8, data: bytes8),
(key: key11, data: bytes11)
].sortedByIt(it.key.id)
kds = @[]
queryKey = Key.init("[Aa]/?").get
for kd in ds.query(Query.init(queryKey)):
let
(key, data) = await kd
kds.add (key, data)
check: kds.sortedByIt(it.key.id) == @[
(key: key2, data: bytes2),
(key: key8, data: bytes8)
].sortedByIt(it.key.id)
kds = @[]
# SQLite's GLOB operator, akin to Unix file globbing syntax, is greedy re:
# wildcard "*". So a pattern such as "a:*[^/]" will not restrict results to
# "/a:b", i.e. it will match on "/a:b", "/a:b/c" and "/a:b/c:d".
queryKey = Key.init("a:*[^/]").get
for kd in ds.query(Query.init(queryKey)):
let
(key, data) = await kd
kds.add (key, data)
check: kds.sortedByIt(it.key.id) == @[
(key: key4, data: bytes4),
(key: key5, data: bytes5),
(key: key6, data: bytes6)
].sortedByIt(it.key.id)
kds = @[]
queryKey = Key.init("a:*[Bb]").get
for kd in ds.query(Query.init(queryKey)):
let
(key, data) = await kd
kds.add (key, data)
check: kds.sortedByIt(it.key.id) == @[
(key: key4, data: bytes4)
].sortedByIt(it.key.id)
kds = @[]
var
deleteRes = await ds.delete(key1)
assert deleteRes.isOk
deleteRes = await ds.delete(key2)
assert deleteRes.isOk
deleteRes = await ds.delete(key3)
assert deleteRes.isOk
deleteRes = await ds.delete(key4)
assert deleteRes.isOk
deleteRes = await ds.delete(key5)
assert deleteRes.isOk
deleteRes = await ds.delete(key6)
assert deleteRes.isOk
deleteRes = await ds.delete(key7)
assert deleteRes.isOk
deleteRes = await ds.delete(key8)
assert deleteRes.isOk
deleteRes = await ds.delete(key9)
assert deleteRes.isOk
deleteRes = await ds.delete(key10)
assert deleteRes.isOk
deleteRes = await ds.delete(key11)
assert deleteRes.isOk
deleteRes = await ds.delete(key12)
assert deleteRes.isOk
let
emptyKds: seq[QueryResponse] = @[]
for kd in ds.query(Query.init(queryKey)):
let
(key, data) = await kd
kds.add (key, data)
check: kds == emptyKds

View File

@ -1,154 +0,0 @@
import std/options
import std/os
import pkg/asynctest/unittest2
import pkg/chronos
import pkg/stew/results
import ../../datastore/filesystem_datastore
import ../../datastore/sqlite_datastore
import ../../datastore/tiered_datastore
import ./templates
suite "TieredDatastore":
# assumes tests/test_all is run from project root, e.g. with `nimble test`
let
bytes = @[1.byte, 2.byte, 3.byte]
key = Key.init("a:b/c/d:e").get
root = "tests" / "test_data"
rootAbs = getCurrentDir() / root
var
ds1: SQLiteDatastore
ds2: FileSystemDatastore
setup:
removeDir(rootAbs)
require(not dirExists(rootAbs))
createDir(rootAbs)
ds1 = SQLiteDatastore.new(memory).get
ds2 = FileSystemDatastore.new(rootAbs).get
teardown:
if not ds1.isNil: ds1.close
ds1 = nil
removeDir(rootAbs)
require(not dirExists(rootAbs))
asyncTest "new":
check:
TieredDatastore.new().isErr
TieredDatastore.new([]).isErr
TieredDatastore.new(@[]).isErr
TieredDatastore.new(ds1, ds2).isOk
TieredDatastore.new([ds1, ds2]).isOk
TieredDatastore.new(@[ds1, ds2]).isOk
asyncTest "accessors":
let
stores = @[ds1, ds2]
check:
TieredDatastore.new(ds1, ds2).get.stores == stores
TieredDatastore.new([ds1, ds2]).get.stores == stores
TieredDatastore.new(@[ds1, ds2]).get.stores == stores
asyncTest "put":
let
ds = TieredDatastore.new(ds1, ds2).get
assert (await ds1.get(key)).get.isNone
assert (await ds2.get(key)).get.isNone
let
putRes = await ds.put(key, bytes)
check:
putRes.isOk
(await ds1.get(key)).get.get == bytes
(await ds2.get(key)).get.get == bytes
asyncTest "delete":
let
ds = TieredDatastore.new(ds1, ds2).get
putRes = await ds.put(key, bytes)
assert putRes.isOk
assert (await ds1.get(key)).get.get == bytes
assert (await ds2.get(key)).get.get == bytes
let
delRes = await ds.delete(key)
check:
delRes.isOk
(await ds1.get(key)).get.isNone
(await ds2.get(key)).get.isNone
asyncTest "contains":
let
ds = TieredDatastore.new(ds1, ds2).get
assert not (await ds1.contains(key)).get
assert not (await ds2.contains(key)).get
let
putRes = await ds.put(key, bytes)
assert putRes.isOk
let
containsRes = await ds.contains(key)
check:
containsRes.isOk
containsRes.get
(await ds1.contains(key)).get
(await ds2.contains(key)).get
asyncTest "get":
var
ds = TieredDatastore.new(ds1, ds2).get
assert (await ds1.get(key)).get.isNone
assert (await ds2.get(key)).get.isNone
check: (await ds.get(key)).get.isNone
let
putRes = await ds.put(key, bytes)
assert putRes.isOk
var
getRes = await ds.get(key)
check:
getRes.isOk
getRes.get.isSome
getRes.get.get == bytes
(await ds1.get(key)).get.isSome
(await ds2.get(key)).get.isSome
(await ds1.get(key)).get.get == bytes
(await ds2.get(key)).get.get == bytes
ds1.close
ds1 = SQLiteDatastore.new(memory).get
ds = TieredDatastore.new(ds1, ds2).get
assert (await ds1.get(key)).get.isNone
assert (await ds2.get(key)).get.isSome
assert (await ds2.get(key)).get.get == bytes
getRes = await ds.get(key)
check:
getRes.isOk
getRes.get.isSome
getRes.get.get == bytes
(await ds1.get(key)).get.isSome
(await ds1.get(key)).get.get == bytes
# asyncTest "query":
# check:
# true

View File

@ -5,25 +5,25 @@ import pkg/chronos
import pkg/stew/results
import ../../datastore
import ./templates
suite "Datastore (base)":
let
key = Key.init("a").get
ds = Datastore()
asyncTest "put":
test "put":
expect Defect: discard ds.put(key, @[1.byte])
asyncTest "delete":
test "delete":
expect Defect: discard ds.delete(key)
asyncTest "contains":
test "contains":
expect Defect: discard ds.contains(key)
asyncTest "get":
test "get":
expect Defect: discard ds.get(key)
asyncTest "query":
test "query":
expect Defect:
for n in ds.query(Query.init(key)): discard
let iter = await ds.query(Query.init(key))
for n in iter: discard

View File

@ -0,0 +1,84 @@
import std/options
import std/os
import pkg/asynctest/unittest2
import pkg/chronos
import pkg/stew/results
import pkg/stew/byteutils
import pkg/datastore/fsds
import ./basictests
suite "Test Basic FSDatastore":
let
(path, _, _) = instantiationInfo(-1, fullPaths = true) # get this file's name
basePath = "tests_data"
basePathAbs = path.parentDir / basePath
key = Key.init("/a/b").tryGet()
bytes = "some bytes".toBytes
otherBytes = "some other bytes".toBytes
var
fsStore: FSDatastore
setupAll:
removeDir(basePathAbs)
require(not dirExists(basePathAbs))
createDir(basePathAbs)
fsStore = FSDatastore.new(root = basePathAbs).tryGet()
teardownAll:
removeDir(basePathAbs)
require(not dirExists(basePathAbs))
basicStoreTests(fsStore, key, bytes, otherBytes)
suite "Test Misc FSDatastore":
let
(path, _, _) = instantiationInfo(-1, fullPaths = true) # get this file's name
basePath = "tests_data"
basePathAbs = path.parentDir / basePath
bytes = "some bytes".toBytes
setupAll:
removeDir(basePathAbs)
require(not dirExists(basePathAbs))
createDir(basePathAbs)
teardownAll:
removeDir(basePathAbs)
require(not dirExists(basePathAbs))
test "Test validDepth()":
let
fs = FSDatastore.new(root = "/", depth = 3).tryGet()
invalid = Key.init("/a/b/c/d").tryGet()
valid = Key.init("/a/b/c").tryGet()
check:
not fs.validDepth(invalid)
fs.validDepth(valid)
test "Test invalid key (path) depth":
let
fs = FSDatastore.new(root = basePathAbs, depth = 3).tryGet()
key = Key.init("/a/b/c/d").tryGet()
check:
(await fs.put(key, bytes)).isErr
(await fs.get(key)).isErr
(await fs.delete(key)).isErr
(await fs.contains(key)).isErr
test "Test valid key (path) depth":
let
fs = FSDatastore.new(root = basePathAbs, depth = 3).tryGet()
key = Key.init("/a/b/c").tryGet()
check:
(await fs.put(key, bytes)).isOk
(await fs.get(key)).isOk
(await fs.delete(key)).isOk
(await fs.contains(key)).isOk

View File

@ -202,12 +202,12 @@ suite "Key":
check: key.name == "e"
check:
Key.init(":b").tryGet().isTopLevel
not Key.init(":b/c").tryGet().isTopLevel
Key.init(":b").tryGet().root
not Key.init(":b/c").tryGet().root
check:
Key.init(":b").tryGet().parent.isFailure
Key.init(":b").tryGet().parent.isFailure
Key.init(":b").?parent.isFailure
Key.init(":b").?parent.isFailure
key.parent.tryGet() == Key.init("a:b/c").tryGet()
key.parent.?parent.tryGet() == Key.init("a:b").tryGet()
key.parent.?parent.?parent.isFailure
@ -253,12 +253,12 @@ suite "Key":
(key / "f:g").tryGet() == Key.init("a:b/c/d:e/f:g").tryGet()
check:
not key.isAncestorOf(Key.init("f:g").tryGet())
key.isAncestorOf(key / Key.init("f:g").tryGet())
not key.ancestor(Key.init("f:g").tryGet())
key.ancestor(key / Key.init("f:g").tryGet())
check:
key.isDescendantOf(key.parent.tryGet())
not Key.init("f:g").tryGet().isDescendantOf(key.parent.tryGet())
key.descendant(key.parent.tryGet())
not Key.init("f:g").tryGet().descendant(key.parent.tryGet())
test "serialization":
let

View File

@ -0,0 +1,4 @@
import ./sql/testsqlitedsdb
import ./sql/testsqliteds
{.warning[UnusedImport]: off.}

View File

@ -0,0 +1,159 @@
import std/options
import std/os
import pkg/asynctest
import pkg/chronos
import pkg/stew/results
import pkg/stew/byteutils
import pkg/datastore/fsds
import pkg/datastore/sql
import pkg/datastore/tieredds
import ./basictests
suite "Test Basic FSDatastore":
let
bytes = "some bytes".toBytes
otherBytes = "some other bytes".toBytes
key = Key.init("a:b/c/d:e").get
root = "tests" / "test_data"
(path, _, _) = instantiationInfo(-1, fullPaths = true) # get this file's name
rootAbs = path.parentDir / root
var
ds1: SQLiteDatastore
ds2: FSDatastore
tiredDs: TieredDatastore
setupAll:
removeDir(rootAbs)
require(not dirExists(rootAbs))
createDir(rootAbs)
ds1 = SQLiteDatastore.new(Memory).tryGet
ds2 = FSDatastore.new(rootAbs, depth = 5).tryGet
tiredDs = TieredDatastore.new(@[ds1, ds2]).tryGet
teardownAll:
removeDir(rootAbs)
require(not dirExists(rootAbs))
basicStoreTests(tiredDs, key, bytes, otherBytes)
suite "TieredDatastore":
# assumes tests/test_all is run from project root, e.g. with `nimble test`
let
bytes = @[1.byte, 2.byte, 3.byte]
key = Key.init("a:b/c/d:e").get
root = "tests" / "test_data"
(path, _, _) = instantiationInfo(-1, fullPaths = true) # get this file's name
rootAbs = path.parentDir / root
var
ds1: SQLiteDatastore
ds2: FSDatastore
setup:
removeDir(rootAbs)
require(not dirExists(rootAbs))
createDir(rootAbs)
ds1 = SQLiteDatastore.new(Memory).get
ds2 = FSDatastore.new(rootAbs, depth = 5).get
teardown:
if not ds1.isNil:
discard await ds1.close
ds1 = nil
removeDir(rootAbs)
require(not dirExists(rootAbs))
test "new":
check:
TieredDatastore.new().isErr
TieredDatastore.new([]).isErr
TieredDatastore.new(@[]).isErr
TieredDatastore.new(ds1, ds2).isOk
TieredDatastore.new([ds1, ds2]).isOk
TieredDatastore.new(@[ds1, ds2]).isOk
test "accessors":
let
stores = @[ds1, ds2]
check:
TieredDatastore.new(ds1, ds2).tryGet.stores == stores
TieredDatastore.new([ds1, ds2]).tryGet.stores == stores
TieredDatastore.new(@[ds1, ds2]).tryGet.stores == stores
test "put":
let
ds = TieredDatastore.new(ds1, ds2).get
putRes = await ds.put(key, bytes)
check:
putRes.isOk
(await ds1.get(key)).tryGet == bytes
(await ds2.get(key)).tryGet == bytes
test "delete":
let
ds = TieredDatastore.new(ds1, ds2).get
(await ds.put(key, bytes)).tryGet
(await ds.delete(key)).tryGet
check:
(await ds1.get(key)).tryGet.len == 0
expect DatastoreKeyNotFound:
discard (await ds2.get(key)).tryGet
test "contains":
let
ds = TieredDatastore.new(ds1, ds2).tryGet
check:
not (await ds1.contains(key)).tryGet
not (await ds2.contains(key)).tryGet
(await ds.put(key, bytes)).tryGet
check:
(await ds.contains(key)).tryGet
(await ds1.contains(key)).tryGet
(await ds2.contains(key)).tryGet
test "get":
var
ds = TieredDatastore.new(ds1, ds2).tryGet
check:
not (await ds1.contains(key)).tryGet
not (await ds2.contains(key)).tryGet
not (await ds.contains(key)).tryGet
(await ds.put(key, bytes)).tryGet
check:
(await ds.get(key)).tryGet == bytes
(await ds1.get(key)).tryGet == bytes
(await ds2.get(key)).tryGet == bytes
(await ds1.close()).tryGet
ds1 = nil
ds1 = SQLiteDatastore.new(Memory).tryGet
ds = TieredDatastore.new(ds1, ds2).tryGet
check:
not (await ds1.contains(key)).tryGet
(await ds2.get(key)).tryGet == bytes
(await ds.get(key)).tryGet == bytes
(await ds1.get(key)).tryGet == bytes
# # test "query":
# # check:
# # true

View File

@ -1,9 +0,0 @@
import
./datastore/test_key,
./datastore/test_datastore,
./datastore/test_null_datastore,
./datastore/test_filesystem_datastore,
./datastore/test_sqlite_datastore,
./datastore/test_tiered_datastore
{.warning[UnusedImport]: off.}

8
tests/testall.nim Normal file
View File

@ -0,0 +1,8 @@
import
./datastore/testkey,
./datastore/testdatastore,
./datastore/testfsds,
./datastore/testsql,
./datastore/testtieredds
{.warning[UnusedImport]: off.}