From 78653d9dd474a868521e3b981c13e7ae7574b95b Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Mon, 12 Sep 2022 12:30:52 -0600 Subject: [PATCH 01/41] wip --- datastore/datastore.nim | 8 +-- datastore/filesystem_datastore.nim | 41 ++++++------- datastore/query.nim | 59 ++++++++++++++++--- tests/datastore/test_filesystem_datastore.nim | 18 +++--- 4 files changed, 83 insertions(+), 43 deletions(-) diff --git a/datastore/datastore.nim b/datastore/datastore.nim index dd04725..7535d80 100644 --- a/datastore/datastore.nim +++ b/datastore/datastore.nim @@ -15,26 +15,26 @@ type method contains*( self: Datastore, - key: Key): Future[?!bool] {.async, base, locks: "unknown".} = + key: Key): Future[?!bool] {.base, locks: "unknown".} = raiseAssert("Not implemented!") method delete*( self: Datastore, - key: Key): Future[?!void] {.async, base, locks: "unknown".} = + key: Key): Future[?!void] {.base, locks: "unknown".} = raiseAssert("Not implemented!") method get*( self: Datastore, - key: Key): Future[?!(?seq[byte])] {.async, base, locks: "unknown".} = + key: Key): Future[?!(?seq[byte])] {.base, locks: "unknown".} = raiseAssert("Not implemented!") method put*( self: Datastore, key: Key, - data: seq[byte]): Future[?!void] {.async, base, locks: "unknown".} = + data: seq[byte]): Future[?!void] {.base, locks: "unknown".} = raiseAssert("Not implemented!") diff --git a/datastore/filesystem_datastore.nim b/datastore/filesystem_datastore.nim index 51c50a0..045df35 100644 --- a/datastore/filesystem_datastore.nim +++ b/datastore/filesystem_datastore.nim @@ -1,4 +1,6 @@ import std/os +import std/sequtils +import std/options import pkg/chronos import pkg/questionable @@ -14,30 +16,10 @@ push: {.upraises: [].} type FileSystemDatastore* = ref object of Datastore - root: string + root*: string const - objExt* = ".dsobject" - -proc new*( - T: type FileSystemDatastore, - root: string): ?!T = - - try: - let - root = if root.isAbsolute: root - else: getCurrentDir() / root - - if not dirExists(root): - failure "directory does not exist: " & root - else: - success T(root: root) - - except OSError as e: - failure e - -proc root*(self: FileSystemDatastore): string = - self.root + objExt* = ".obj" proc path*( self: FileSystemDatastore, @@ -160,3 +142,18 @@ method put*( # query: ...): Future[?!(?...)] {.async, locks: "unknown".} = # # return success ....some + +proc new*( + T: type FileSystemDatastore, + root: string, + caseSensitive = true): ?!T = + + let root = ? ( + block: + if root.isAbsolute: root + else: getCurrentDir() / root).catch + + if not dirExists(root): + failure "directory does not exist: " & root + else: + success T(root: root) diff --git a/datastore/query.nim b/datastore/query.nim index b4aa8f1..decfcba 100644 --- a/datastore/query.nim +++ b/datastore/query.nim @@ -1,18 +1,61 @@ import ./key type - Query* = object - key: QueryKey + Node* = object of RootObj + next*: Node + prev*: Node - QueryKey* = Key + Filter* = object of Node + field*: string + value*: string + + FilterBool* = object of Filter + a*, b*: Filter + + FilterAnd = object of FilterBool + FilterOr = object of FilterBool + + Eq = object of Filter + Lt = object of Filter + Gt = object of Filter + Not = object of Filter + + SortOrder* {.pure.} = enum + Assending, + Descensing + + Order* = object + field*: string + sort*: SortOrder + + Query* = object + key*: Key + limit*: int + skip*: int + orders*: seq[Order] + filters*: seq[Filter] QueryResponse* = tuple[key: Key, data: seq[byte]] +proc `==`*(a, b: Filter): Filter = discard + +proc `!=`*(a, b: Filter): Filter = discard +proc `>`*(a, b: Filter): Filter = discard +proc `>=`*(a, b: Filter): Filter = discard +proc `<`*(a, b: Filter): Filter = discard +proc `<=`*(a, b: Filter): Filter = discard + proc init*( T: type Query, - key: QueryKey): T = + key: Key, + orders: openArray[Order] = [], + filters: openArray[Filter] = [], + skip = 0, + limit = 0): T = - T(key: key) - -proc key*(self: Query): QueryKey = - self.key + T( + key: key, + filters: @filters, + orders: @orders, + skip: skip, + limit: limit) diff --git a/tests/datastore/test_filesystem_datastore.nim b/tests/datastore/test_filesystem_datastore.nim index 3c93244..8852c96 100644 --- a/tests/datastore/test_filesystem_datastore.nim +++ b/tests/datastore/test_filesystem_datastore.nim @@ -50,19 +50,19 @@ suite "FileSystemDatastore": asyncTest "helpers": let - ds = FileSystemDatastore.new(root).get + ds = FileSystemDatastore.new(root).tryGet() check: # see comment in ../../datastore/filesystem_datastore re: whether path # equivalence of e.g. Key(/a:b) and Key(/a/b) is problematic - ds.path(Key.init("a").get) == rootAbs / "a" & objExt - ds.path(Key.init("a:b").get) == rootAbs / "a" / "b" & objExt - ds.path(Key.init("a/b").get) == rootAbs / "a" / "b" & objExt - ds.path(Key.init("a:b/c").get) == rootAbs / "a" / "b" / "c" & objExt - ds.path(Key.init("a/b/c").get) == rootAbs / "a" / "b" / "c" & objExt - ds.path(Key.init("a:b/c:d").get) == rootAbs / "a" / "b" / "c" / "d" & objExt - ds.path(Key.init("a/b/c:d").get) == rootAbs / "a" / "b" / "c" / "d" & objExt - ds.path(Key.init("a/b/c/d").get) == rootAbs / "a" / "b" / "c" / "d" & objExt + ds.path(Key.init("a").tryGet()) == rootAbs / "a" & objExt + ds.path(Key.init("a:b").tryGet()) == rootAbs / "a" / "b" & objExt + ds.path(Key.init("a/b").tryGet()) == rootAbs / "a" / "b" & objExt + ds.path(Key.init("a:b/c").tryGet()) == rootAbs / "a" / "b" / "c" & objExt + ds.path(Key.init("a/b/c").tryGet()) == rootAbs / "a" / "b" / "c" & objExt + ds.path(Key.init("a:b/c:d").tryGet()) == rootAbs / "a" / "b" / "c" / "d" & objExt + ds.path(Key.init("a/b/c:d").tryGet()) == rootAbs / "a" / "b" / "c" / "d" & objExt + ds.path(Key.init("a/b/c/d").tryGet()) == rootAbs / "a" / "b" / "c" / "d" & objExt asyncTest "put": let From b150b2f922764dbcb6dd5c685f4c0eab60797fa5 Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Fri, 16 Sep 2022 21:12:49 -0600 Subject: [PATCH 02/41] cleanup key --- datastore/key.nim | 80 ++++++++++++++++++++++++++++------------------- 1 file changed, 48 insertions(+), 32 deletions(-) diff --git a/datastore/key.nim b/datastore/key.nim index 44d6480..516ccbf 100644 --- a/datastore/key.nim +++ b/datastore/key.nim @@ -28,7 +28,7 @@ const # TODO: operator/s for combining string|Namespace,string|Namespace # TODO: lifting from ?![Namespace|Key] for various ops -proc init*( +func init*( T: type Namespace, field, value: string): ?!T = @@ -57,7 +57,7 @@ proc init*( success T(field: field, value: value) -proc init*(T: type Namespace, id: string): ?!T = +func init*(T: type Namespace, id: string): ?!T = if id.strip == "": return failure "id string must not be all whitespace or empty" @@ -82,22 +82,25 @@ proc init*(T: type Namespace, id: string): ?!T = T.init(field, value) -proc id*(self: Namespace): string = +func id*(self: Namespace): string = if self.field.len > 0: self.field & delimiter & self.value else: self.value -proc `$`*(namespace: Namespace): string = +func hash*(namespace: Namespace): Hash = + hash(namespace.id) + +func `$`*(namespace: Namespace): string = "Namespace(" & namespace.id & ")" -proc init*(T: type Key, namespaces: varargs[Namespace]): ?!T = +func init*(T: type Key, namespaces: varargs[Namespace]): ?!T = if namespaces.len == 0: failure "namespaces must contain at least one Namespace" else: success T(namespaces: @namespaces) -proc init*(T: type Key, namespaces: varargs[string]): ?!T = +func init*(T: type Key, namespaces: varargs[string]): ?!T = if namespaces.len == 0: failure "namespaces must contain at least one Namespace id string" else: @@ -106,7 +109,7 @@ proc init*(T: type Key, namespaces: varargs[string]): ?!T = ?Namespace.init(it) )) -proc init*(T: type Key, id: string): ?!T = +func init*(T: type Key, id: string): ?!T = if id == "": return failure "id string must contain at least one Namespace" @@ -122,7 +125,7 @@ proc init*(T: type Key, id: string): ?!T = Key.init(nsStrs) -proc list*(self: Key): seq[Namespace] = +func list*(self: Key): seq[Namespace] = self.namespaces proc random*(T: type Key): string = @@ -131,78 +134,91 @@ proc random*(T: type Key): string = template `[]`*(key: Key, x: auto): auto = key.namespaces[x] -proc len*(self: Key): int = +func len*(self: Key): int = self.namespaces.len iterator items*(key: Key): Namespace = for k in key.namespaces: yield k -proc reversed*(self: Key): Key = +func reversed*(self: Key): Key = Key(namespaces: self.namespaces.reversed) -proc reverse*(self: Key): Key = +func reverse*(self: Key): Key = self.reversed -proc name*(self: Key): string = +func name*(self: Key): string = if self.len > 0: return self[^1].value -proc `type`*(self: Key): string = +func `type`*(self: Key): string = if self.len > 0: return self[^1].field -proc id*(self: Key): string = +func id*(self: Key): string = separator & self.namespaces.mapIt(it.id).join(separator) -proc isTopLevel*(self: Key): bool = +func root*(self: Key): bool = self.len == 1 -proc parent*(self: Key): ?!Key = - if self.isTopLevel: +func parent*(self: Key): ?!Key = + if self.root: failure "key has no parent" else: success Key(namespaces: self.namespaces[0..^2]) -proc path*(self: Key): ?!Key = +func path*(self: Key): ?!Key = let - parent = ? self.parent + parent = ?self.parent if self[^1].field == "": return success parent - success Key(namespaces: parent.namespaces & @[Namespace(value: self[^1].field)]) + let ns = parent.namespaces & @[Namespace(value: self[^1].field)] + success Key(namespaces: ns) -proc child*(self: Key, ns: Namespace): Key = +func child*(self: Key, ns: Namespace): Key = Key(namespaces: self.namespaces & @[ns]) -proc `/`*(self: Key, ns: Namespace): Key = +func `/`*(self: Key, ns: Namespace): Key = self.child(ns) -proc child*(self: Key, namespaces: varargs[Namespace]): Key = +func child*(self: Key, namespaces: varargs[Namespace]): Key = Key(namespaces: self.namespaces & @namespaces) -proc child*(self, key: Key): Key = +func child*(self, key: Key): Key = Key(namespaces: self.namespaces & key.namespaces) -proc `/`*(self, key: Key): Key = +func `/`*(self, key: Key): Key = self.child(key) -proc child*(self: Key, keys: varargs[Key]): Key = +func child*(self: Key, keys: varargs[Key]): Key = Key(namespaces: self.namespaces & concat(keys.mapIt(it.namespaces))) -proc child*(self: Key, ids: varargs[string]): ?!Key = +func child*(self: Key, ids: varargs[string]): ?!Key = success self.child(ids.filterIt(it != "").mapIt( ?Key.init(it) )) -proc `/`*(self: Key, id: string): ?!Key = +func relative*(self: Key, parent: Key): ?!Key = + ## Get a key relative to parent from current key + ## + + if self.len < parent.len: + return failure "Not a parent of this key!" + + Key.init(self.namespaces[parent.namespaces.high..self.namespaces.high]) + +func `/`*(self: Key, id: string): ?!Key = self.child(id) -proc isAncestorOf*(self, other: Key): bool = +func ancestor*(self, other: Key): bool = if other.len <= self.len: false else: other.namespaces[0.. Date: Fri, 16 Sep 2022 21:13:03 -0600 Subject: [PATCH 03/41] cleanup key tests --- tests/datastore/test_key.nim | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/datastore/test_key.nim b/tests/datastore/test_key.nim index fc7233c..9223b7b 100644 --- a/tests/datastore/test_key.nim +++ b/tests/datastore/test_key.nim @@ -202,12 +202,12 @@ suite "Key": check: key.name == "e" check: - Key.init(":b").tryGet().isTopLevel - not Key.init(":b/c").tryGet().isTopLevel + Key.init(":b").tryGet().root + not Key.init(":b/c").tryGet().root check: - Key.init(":b").tryGet().parent.isFailure - Key.init(":b").tryGet().parent.isFailure + Key.init(":b").?parent.isFailure + Key.init(":b").?parent.isFailure key.parent.tryGet() == Key.init("a:b/c").tryGet() key.parent.?parent.tryGet() == Key.init("a:b").tryGet() key.parent.?parent.?parent.isFailure @@ -253,12 +253,12 @@ suite "Key": (key / "f:g").tryGet() == Key.init("a:b/c/d:e/f:g").tryGet() check: - not key.isAncestorOf(Key.init("f:g").tryGet()) - key.isAncestorOf(key / Key.init("f:g").tryGet()) + not key.ancestor(Key.init("f:g").tryGet()) + key.ancestor(key / Key.init("f:g").tryGet()) check: - key.isDescendantOf(key.parent.tryGet()) - not Key.init("f:g").tryGet().isDescendantOf(key.parent.tryGet()) + key.descendant(key.parent.tryGet()) + not Key.init("f:g").tryGet().descendant(key.parent.tryGet()) test "serialization": let From 2390839406196448977752509de5a67c883c0535 Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Fri, 16 Sep 2022 21:13:25 -0600 Subject: [PATCH 04/41] rename and cleanup fs store --- datastore/filesystem_datastore.nim | 159 --------------------------- datastore/fsstore.nim | 168 +++++++++++++++++++++++++++++ 2 files changed, 168 insertions(+), 159 deletions(-) delete mode 100644 datastore/filesystem_datastore.nim create mode 100644 datastore/fsstore.nim diff --git a/datastore/filesystem_datastore.nim b/datastore/filesystem_datastore.nim deleted file mode 100644 index 045df35..0000000 --- a/datastore/filesystem_datastore.nim +++ /dev/null @@ -1,159 +0,0 @@ -import std/os -import std/sequtils -import std/options - -import pkg/chronos -import pkg/questionable -import pkg/questionable/results -from pkg/stew/results as stewResults import get, isErr -import pkg/upraises - -import ./datastore - -export datastore - -push: {.upraises: [].} - -type - FileSystemDatastore* = ref object of Datastore - root*: string - -const - objExt* = ".obj" - -proc path*( - self: FileSystemDatastore, - key: Key): string = - - var - segments: seq[string] - - for ns in key: - without field =? ns.field: - segments.add ns.value - continue - - segments.add(field / ns.value) - - # is it problematic that per this logic Key(/a:b) evaluates to the same path - # as Key(/a/b)? may need to check if/how other Datastore implementations - # distinguish them - - self.root / joinPath(segments) & objExt - -method contains*( - self: FileSystemDatastore, - key: Key): Future[?!bool] {.async, locks: "unknown".} = - - return success fileExists(self.path(key)) - -method delete*( - self: FileSystemDatastore, - key: Key): Future[?!void] {.async, locks: "unknown".} = - - let - path = self.path(key) - - try: - removeFile(path) - return success() - - # removing an empty directory might lead to surprising behavior depending - # on what the user specified as the `root` of the FileSystemDatastore, so - # until further consideration, empty directories will be left in place - - except OSError as e: - return failure e - -method get*( - self: FileSystemDatastore, - key: Key): Future[?!(?seq[byte])] {.async, locks: "unknown".} = - - # to support finer control of memory allocation, maybe could/should change - # the signature of `get` so that it has a 3rd parameter - # `bytes: var openArray[byte]` and return type `?!bool`; this variant with - # return type `?!(?seq[byte])` would be a special case (convenience method) - # calling the former after allocating a seq with size automatically - # determined via `getFileSize` - - let - path = self.path(key) - containsRes = await self.contains(key) - - if containsRes.isErr: return failure containsRes.error.msg - - if containsRes.get: - var - file: File - - if not file.open(path): - return failure "unable to open file: " & path - else: - try: - let - size = file.getFileSize - - var - bytes: seq[byte] - - if size > 0: - newSeq(bytes, size) - - let - bytesRead = file.readBytes(bytes, 0, size) - - if bytesRead < size: - return failure $bytesRead & " bytes were read from " & path & - " but " & $size & " bytes were expected" - - return success bytes.some - - except IOError as e: - return failure e - - finally: - file.close - - else: - return success seq[byte].none - -method put*( - self: FileSystemDatastore, - key: Key, - data: seq[byte]): Future[?!void] {.async, locks: "unknown".} = - - let - path = self.path(key) - - try: - createDir(parentDir(path)) - if data.len > 0: writeFile(path, data) - else: writeFile(path, "") - return success() - - except IOError as e: - return failure e - - except OSError as e: - return failure e - -# method query*( -# self: FileSystemDatastore, -# query: ...): Future[?!(?...)] {.async, locks: "unknown".} = -# -# return success ....some - -proc new*( - T: type FileSystemDatastore, - root: string, - caseSensitive = true): ?!T = - - let root = ? ( - block: - if root.isAbsolute: root - else: getCurrentDir() / root).catch - - if not dirExists(root): - failure "directory does not exist: " & root - else: - success T(root: root) diff --git a/datastore/fsstore.nim b/datastore/fsstore.nim new file mode 100644 index 0000000..7254549 --- /dev/null +++ b/datastore/fsstore.nim @@ -0,0 +1,168 @@ +import std/os +import std/sequtils +import std/options + +import pkg/chronos +import pkg/questionable +import pkg/questionable/results +from pkg/stew/results as stewResults import get, isErr +import pkg/upraises + +import ./datastore + +export datastore + +push: {.upraises: [].} + +const + # TODO: Add more dirs from relevant OSs + + # Paths should be matched exactly, i.e. + # we're forbidding this dirs from being + # touched directly, but subdirectories + # can still be touched + ProtectedPaths = [ + "/", + "/usr", + "/etc", + "/home", + "/Users"] + +type + FSDatastore* = ref object of Datastore + root*: string + ignoreProtected: bool + +func checkProtected(dir: string): bool = + dir in ProtectedPaths + +proc path*(self: FSDatastore, key: Key): string = + var + segments: seq[string] + + for ns in key: + if ns.field == "": + segments.add ns.value + continue + + segments.add(ns.field / ns.value) + + # is it problematic that per this logic Key(/a:b) evaluates to the same path + # as Key(/a/b)? may need to check if/how other Datastore implementations + # distinguish them + + self.root / joinPath(segments) + +method contains*(self: FSDatastore, key: Key): Future[?!bool] {.async.} = + return success fileExists(self.path(key)) + +method delete*(self: FSDatastore, key: Key): Future[?!void] {.async.} = + + let + path = self.path(key) + + if checkProtected(path): + return failure "Path is protected!" + + try: + removeFile(path) + return success() + + # removing an empty directory might lead to surprising behavior depending + # on what the user specified as the `root` of the FSDatastore, so + # until further consideration, empty directories will be left in place + + except OSError as e: + return failure e + +method get*(self: FSDatastore, key: Key): Future[?!seq[byte]] {.async.} = + + # to support finer control of memory allocation, maybe could/should change + # the signature of `get` so that it has a 3rd parameter + # `bytes: var openArray[byte]` and return type `?!bool`; this variant with + # return type `?!(?seq[byte])` would be a special case (convenience method) + # calling the former after allocating a seq with size automatically + # determined via `getFileSize` + + let + path = self.path(key) + + if checkProtected(path): + return failure "Path is protected!" + + if not fileExists(path): + return success(newSeq[byte]()) + + var + file: File + + defer: + file.close + + if not file.open(path): + return failure "unable to open file: " & path + + try: + let + size = file.getFileSize + + var + bytes = newSeq[byte](size) + read = 0 + + while read < size: + read += file.readBytes(bytes, read, size) + + if read < size: + return failure $read & " bytes were read from " & path & + " but " & $size & " bytes were expected" + + return success bytes + + except IOError as e: + return failure e + +method put*( + self: FSDatastore, + key: Key, + data: seq[byte]): Future[?!void] {.async, locks: "unknown".} = + + let + path = self.path(key) + + if checkProtected(path): + return failure "Path is protected!" + + try: + createDir(parentDir(path)) + writeFile(path, data) + except IOError as e: + return failure e + except OSError as e: + return failure e + + return success() + +# method query*( +# self: FSDatastore, +# query: ...): Future[?!(?...)] {.async, locks: "unknown".} = +# +# return success ....some + +proc new*( + T: type FSDatastore, + root: string, + caseSensitive = true, + ignoreProtected = false): ?!T = + + let root = ? ( + block: + if root.isAbsolute: root + else: getCurrentDir() / root).catch + + if not dirExists(root): + return failure "directory does not exist: " & root + + success T( + root: root, + ignoreProtected: ignoreProtected) From acd77c5385b80dceaa9984ac4534eb7a0fa76513 Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Fri, 16 Sep 2022 21:14:02 -0600 Subject: [PATCH 05/41] rename and cleanup fs store tests --- ...ilesystem_datastore.nim => testfstore.nim} | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) rename tests/datastore/{test_filesystem_datastore.nim => testfstore.nim} (83%) diff --git a/tests/datastore/test_filesystem_datastore.nim b/tests/datastore/testfstore.nim similarity index 83% rename from tests/datastore/test_filesystem_datastore.nim rename to tests/datastore/testfstore.nim index 8852c96..9463616 100644 --- a/tests/datastore/test_filesystem_datastore.nim +++ b/tests/datastore/testfstore.nim @@ -8,10 +8,10 @@ import pkg/questionable/results import pkg/stew/byteutils from pkg/stew/results as stewResults import get, isOk -import ../../datastore/filesystem_datastore +import ../../datastore/fsstore import ./templates -suite "FileSystemDatastore": +suite "FSDatastore": # assumes tests/test_all is run from project root, e.g. with `nimble test` let root = "tests" / "test_data" @@ -28,45 +28,45 @@ suite "FileSystemDatastore": asyncTest "new": var - dsRes: ?!FileSystemDatastore + dsRes: ?!FSDatastore - dsRes = FileSystemDatastore.new(rootAbs / "missing") + dsRes = FSDatastore.new(rootAbs / "missing") check: dsRes.isErr - dsRes = FileSystemDatastore.new(rootAbs) + dsRes = FSDatastore.new(rootAbs) check: dsRes.isOk - dsRes = FileSystemDatastore.new(root) + dsRes = FSDatastore.new(root) check: dsRes.isOk asyncTest "accessors": let - ds = FileSystemDatastore.new(root).get + ds = FSDatastore.new(root).get check: ds.root == rootAbs asyncTest "helpers": let - ds = FileSystemDatastore.new(root).tryGet() + ds = FSDatastore.new(root).tryGet() check: # see comment in ../../datastore/filesystem_datastore re: whether path # equivalence of e.g. Key(/a:b) and Key(/a/b) is problematic - ds.path(Key.init("a").tryGet()) == rootAbs / "a" & objExt - ds.path(Key.init("a:b").tryGet()) == rootAbs / "a" / "b" & objExt - ds.path(Key.init("a/b").tryGet()) == rootAbs / "a" / "b" & objExt - ds.path(Key.init("a:b/c").tryGet()) == rootAbs / "a" / "b" / "c" & objExt - ds.path(Key.init("a/b/c").tryGet()) == rootAbs / "a" / "b" / "c" & objExt - ds.path(Key.init("a:b/c:d").tryGet()) == rootAbs / "a" / "b" / "c" / "d" & objExt - ds.path(Key.init("a/b/c:d").tryGet()) == rootAbs / "a" / "b" / "c" / "d" & objExt - ds.path(Key.init("a/b/c/d").tryGet()) == rootAbs / "a" / "b" / "c" / "d" & objExt + ds.path(Key.init("a").tryGet()) == rootAbs / "a" + ds.path(Key.init("a:b").tryGet()) == rootAbs / "a" / "b" + ds.path(Key.init("a/b").tryGet()) == rootAbs / "a" / "b" + ds.path(Key.init("a:b/c").tryGet()) == rootAbs / "a" / "b" / "c" + ds.path(Key.init("a/b/c").tryGet()) == rootAbs / "a" / "b" / "c" + ds.path(Key.init("a:b/c:d").tryGet()) == rootAbs / "a" / "b" / "c" / "d" + ds.path(Key.init("a/b/c:d").tryGet()) == rootAbs / "a" / "b" / "c" / "d" + ds.path(Key.init("a/b/c/d").tryGet()) == rootAbs / "a" / "b" / "c" / "d" asyncTest "put": let - ds = FileSystemDatastore.new(root).get + ds = FSDatastore.new(root).get key = Key.init("a:b/c/d:e").get path = ds.path(key) @@ -97,7 +97,7 @@ suite "FileSystemDatastore": asyncTest "delete": let bytes = @[1.byte, 2.byte, 3.byte] - ds = FileSystemDatastore.new(root).get + ds = FSDatastore.new(root).get var key = Key.init("a:b/c/d:e").get @@ -127,7 +127,7 @@ suite "FileSystemDatastore": asyncTest "contains": let bytes = @[1.byte, 2.byte, 3.byte] - ds = FileSystemDatastore.new(root).get + ds = FSDatastore.new(root).get var key = Key.init("a:b/c/d:e").get @@ -155,7 +155,7 @@ suite "FileSystemDatastore": asyncTest "get": let - ds = FileSystemDatastore.new(root).get + ds = FSDatastore.new(root).get var bytes: seq[byte] From 13bc02f595136a25148866385d916b85ffc09b0d Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Fri, 16 Sep 2022 21:14:31 -0600 Subject: [PATCH 06/41] split out and cleanup sql lite store --- datastore/sql/sqliteds.nim | 156 +++++++ datastore/sql/sqlitedsdb.nim | 264 +++++++++++ datastore/{sqlite.nim => sql/sqliteutils.nim} | 6 +- datastore/sqlite_datastore.nim | 413 ------------------ 4 files changed, 423 insertions(+), 416 deletions(-) create mode 100644 datastore/sql/sqliteds.nim create mode 100644 datastore/sql/sqlitedsdb.nim rename datastore/{sqlite.nim => sql/sqliteutils.nim} (99%) delete mode 100644 datastore/sqlite_datastore.nim diff --git a/datastore/sql/sqliteds.nim b/datastore/sql/sqliteds.nim new file mode 100644 index 0000000..521e5d1 --- /dev/null +++ b/datastore/sql/sqliteds.nim @@ -0,0 +1,156 @@ +import std/os +import std/times + +import pkg/chronos +import pkg/questionable +import pkg/questionable/results +import pkg/sqlite3_abi +import pkg/stew/byteutils +from pkg/stew/results as stewResults import isErr +import pkg/upraises + +import ./datastore +import ./sqlitedsdb + +export datastore, sqlitedsdb + +push: {.upraises: [].} + +type + SQLiteDatastore* = ref object of Datastore + readOnly: bool + db: SQLiteDsDB + +proc path*(self: SQLiteDatastore): string = + self.db.dbPath + +proc `readOnly=`*(self: SQLiteDatastore): bool + {.error: "readOnly should not be assigned".} + +proc timestamp*(t = epochTime()): int64 = + (t * 1_000_000).int64 + +method contains*(self: SQLiteDatastore, key: Key): Future[?!bool] {.async.} = + var + exists = false + + proc onData(s: RawStmtPtr) = + exists = sqlite3_column_int64(s, ContainsStmtExistsCol.cint).bool + + let + queryRes = self.db.containsStmt.query((key.id), onData) + + if queryRes.isErr: return queryRes + return success exists + +method delete*(self: SQLiteDatastore, key: Key): Future[?!void] {.async.} = + return self.db.deleteStmt.exec((key.id)) + +method get*(self: SQLiteDatastore, key: Key): Future[?!seq[byte]] {.async.} = + # see comment in ./filesystem_datastore re: finer control of memory + # allocation in `method get`, could apply here as well if bytes were read + # incrementally with `sqlite3_blob_read` + + var + bytes: seq[byte] + + let + dataCol = self.db.getDataCol + + proc onData(s: RawStmtPtr) = + bytes = dataCol() + + let + queryRes = self.db.getStmt.query((key.id), onData) + + if queryRes.isErr: + return failure queryRes.error.msg + + return success bytes + +method put*(self: SQLiteDatastore, key: Key, data: seq[byte]): Future[?!void] {.async.} = + return self.db.putStmt.exec((key.id, @data, timestamp())) + +iterator query*( + self: SQLiteDatastore, + query: Query): Future[QueryResponse] = + + let + queryStmt = QueryStmt.prepare( + self.db.env, QueryStmtStr).expect("should not fail") + + s = RawStmtPtr(queryStmt) + + defer: + discard sqlite3_reset(s) + discard sqlite3_clear_bindings(s) + s.dispose + + let + v = sqlite3_bind_text(s, 1.cint, query.key.id.cstring, -1.cint, + SQLITE_TRANSIENT_GCSAFE) + + if not (v == SQLITE_OK): + raise (ref Defect)(msg: $sqlite3_errstr(v)) + + while true: + let + v = sqlite3_step(s) + + case v + of SQLITE_ROW: + let + key = Key.init($sqlite3_column_text_not_null( + s, QueryStmtIdCol)).expect("should not fail") + + blob = sqlite3_column_blob(s, QueryStmtDataCol) + + # detect out-of-memory error + # see the conversion table and final paragraph of: + # https://www.sqlite.org/c3ref/column_blob.html + # see also https://www.sqlite.org/rescode.html + + # the "data" column can be NULL so in order to detect an out-of-memory + # error it is necessary to check that the result is a null pointer and + # that the result code is an error code + if blob.isNil: + let + v = sqlite3_errcode(sqlite3_db_handle(s)) + + if not (v in [SQLITE_OK, SQLITE_ROW, SQLITE_DONE]): + raise (ref Defect)(msg: $sqlite3_errstr(v)) + + let + dataLen = sqlite3_column_bytes(s, QueryStmtDataCol) + dataBytes = cast[ptr UncheckedArray[byte]](blob) + data = @(toOpenArray(dataBytes, 0, dataLen - 1)) + fut = newFuture[QueryResponse]() + + fut.complete((key, data)) + yield fut + of SQLITE_DONE: + break + else: + raise (ref Defect)(msg: $sqlite3_errstr(v)) + +proc new*( + T: type SQLiteDatastore, + path: string, + readOnly = false): ?!T = + + let + flags = + if readOnly: SQLITE_OPEN_READONLY + else: SQLITE_OPEN_READWRITE or SQLITE_OPEN_CREATE + + success T( + db: ? SQLIteDsDb.open(path, flags), + readOnly: readOnly) + +proc new*( + T: type SQLiteDatastore, + db: SQLIteDsDb): ?!T = + + success T( + db: db, + readOnly: db.readOnly) diff --git a/datastore/sql/sqlitedsdb.nim b/datastore/sql/sqlitedsdb.nim new file mode 100644 index 0000000..d487215 --- /dev/null +++ b/datastore/sql/sqlitedsdb.nim @@ -0,0 +1,264 @@ +import std/os + +import pkg/questionable +import pkg/questionable/results +import pkg/upraises + +import ./sqliteutils + +export sqliteutils + +type + BoundIdCol* = proc (): string {.closure, gcsafe, upraises: [].} + + BoundDataCol* = proc (): seq[byte] {.closure, gcsafe, upraises: [].} + + BoundTimestampCol* = proc (): int64 {.closure, gcsafe, upraises: [].} + + # feels odd to use `void` for prepared statements corresponding to SELECT + # queries but it fits with the rest of the SQLite wrapper adapted from + # status-im/nwaku, at least in its current form in ./sqlite + ContainsStmt* = SQLiteStmt[(string), void] + + DeleteStmt* = SQLiteStmt[(string), void] + + GetStmt* = SQLiteStmt[(string), void] + + PutStmt* = SQLiteStmt[(string, seq[byte], int64), void] + + QueryStmt* = SQLiteStmt[(string), void] + + SQLiteDsDb* = object + readOnly*: bool + dbPath*: string + containsStmt*: ContainsStmt + deleteStmt*: DeleteStmt + env*: SQLite + getDataCol*: BoundDataCol + getStmt*: GetStmt + putStmt*: PutStmt + +const + DbExt* = ".sqlite3" + TableName* = "Store" + + IdColName* = "id" + DataColName* = "data" + TimestampColName* = "timestamp" + + IdColType = "TEXT" + DataColType = "BLOB" + TimestampColType = "INTEGER" + + Memory* = ":memory:" + + # https://stackoverflow.com/a/9756276 + # EXISTS returns a boolean value represented by an integer: + # https://sqlite.org/datatype3.html#boolean_datatype + # https://sqlite.org/lang_expr.html#the_exists_operator + ContainsStmtStr* = """ + SELECT EXISTS( + SELECT 1 FROM """ & TableName & """ + WHERE """ & IdColName & """ = ? + ); + """ + + ContainsStmtExistsCol* = 0 + + CreateStmtStr* = """ + CREATE TABLE IF NOT EXISTS """ & TableName & """ ( + """ & IdColName & """ """ & IdColType & """ NOT NULL PRIMARY KEY, + """ & DataColName & """ """ & DataColType & """, + """ & TimestampColName & """ """ & TimestampColType & """ NOT NULL + ) WITHOUT ROWID; + """ + + DeleteStmtStr* = """ + DELETE FROM """ & TableName & """ + WHERE """ & IdColName & """ = ?; + """ + + GetStmtStr* = """ + SELECT """ & DataColName & """ FROM """ & TableName & """ + WHERE """ & IdColName & """ = ?; + """ + + GetStmtDataCol* = 0 + + PutStmtStr* = """ + REPLACE INTO """ & TableName & """ ( + """ & IdColName & """, + """ & DataColName & """, + """ & TimestampColName & """ + ) VALUES (?, ?, ?); + """ + + QueryStmtStr* = """ + SELECT """ & IdColName & """, """ & DataColName & """ FROM """ & TableName & + """ WHERE """ & IdColName & """ GLOB ?; + """ + + QueryStmtIdCol* = 0 + QueryStmtDataCol* = 1 + +proc checkColMetadata(s: RawStmtPtr, i: int, expectedName: string) = + let + colName = sqlite3_column_origin_name(s, i.cint) + + if colName.isNil: + raise (ref Defect)(msg: "no column exists for index " & $i & " in `" & + $sqlite3_sql(s) & "`") + + if $colName != expectedName: + raise (ref Defect)(msg: "original column name for index " & $i & " was \"" & + $colName & "\" in `" & $sqlite3_sql(s) & "` but callee expected \"" & + expectedName & "\"") + +proc idCol*( + s: RawStmtPtr, + index: int): BoundIdCol = + + checkColMetadata(s, index, IdColName) + + return proc (): string = + $sqlite3_column_text_not_null(s, index.cint) + +proc dataCol*( + s: RawStmtPtr, + index: int): BoundDataCol = + + checkColMetadata(s, index, DataColName) + + return proc (): seq[byte] = + let + i = index.cint + blob = sqlite3_column_blob(s, i) + + # detect out-of-memory error + # see the conversion table and final paragraph of: + # https://www.sqlite.org/c3ref/column_blob.html + # see also https://www.sqlite.org/rescode.html + + # the "data" column can be NULL so in order to detect an out-of-memory error + # it is necessary to check that the result is a null pointer and that the + # result code is an error code + if blob.isNil: + let + v = sqlite3_errcode(sqlite3_db_handle(s)) + + if not (v in [SQLITE_OK, SQLITE_ROW, SQLITE_DONE]): + raise (ref Defect)(msg: $sqlite3_errstr(v)) + + let + dataLen = sqlite3_column_bytes(s, i) + dataBytes = cast[ptr UncheckedArray[byte]](blob) + + @(toOpenArray(dataBytes, 0, dataLen - 1)) + +proc timestampCol*( + s: RawStmtPtr, + index: int): BoundTimestampCol = + + checkColMetadata(s, index, TimestampColName) + + return proc (): int64 = + sqlite3_column_int64(s, index.cint) + +proc getDBFilePath*(path: string): ?!string = + try: + let + (parent, name, ext) = path.normalizePathEnd.splitFile + dbExt = if ext == "": DbExt else: ext + absPath = + if parent.isAbsolute: parent + else: getCurrentDir() / parent + dbPath = absPath / name & dbExt + + return success dbPath + except CatchableError as exc: + return failure(exc.msg) + +proc close*(self: SQLiteDsDb) = + self.containsStmt.dispose + self.getStmt.dispose + + if not RawStmtPtr(self.deleteStmt).isNil: + self.deleteStmt.dispose + + if not RawStmtPtr(self.putStmt).isNil: + self.putStmt.dispose + + self.env.dispose + +proc open*( + T: type SQLiteDsDb, + path = Memory, + flags = SQLITE_OPEN_READONLY): ?!SQLiteDsDb = + + # make it optional to enable WAL with it enabled being the default? + + # make it possible to specify a custom page size? + # https://www.sqlite.org/pragma.html#pragma_page_size + # https://www.sqlite.org/intern-v-extern-blob.html + + var + env: AutoDisposed[SQLite] + + defer: + disposeIfUnreleased(env) + + let + isMemory = path == Memory + absPath = if isMemory: Memory else: ?path.getDBFilePath + readOnly = (SQLITE_OPEN_READONLY and flags).bool + + if not isMemory: + if readOnly and not fileExists(absPath): + return failure "read-only database does not exist: " & absPath + elif not dirExists(absPath.parentDir): + return failure "directory does not exist: " & absPath + + open(absPath, env.val, flags) + + let + pragmaStmt = journalModePragmaStmt(env.val) + + checkExec(pragmaStmt) + + var + containsStmt: ContainsStmt + deleteStmt: DeleteStmt + getStmt: GetStmt + putStmt: PutStmt + + if not readOnly: + checkExec(env.val, CreateStmtStr) + + deleteStmt = ? DeleteStmt.prepare( + env.val, DeleteStmtStr, SQLITE_PREPARE_PERSISTENT) + + putStmt = ? PutStmt.prepare( + env.val, PutStmtStr, SQLITE_PREPARE_PERSISTENT) + + containsStmt = ? ContainsStmt.prepare( + env.val, ContainsStmtStr, SQLITE_PREPARE_PERSISTENT) + + getStmt = ? GetStmt.prepare( + env.val, GetStmtStr, SQLITE_PREPARE_PERSISTENT) + + # if a readOnly/existing database does not satisfy the expected schema + # `pepare()` will fail and `new` will return an error with message + # "SQL logic error" + + let + getDataCol = dataCol(RawStmtPtr(getStmt), GetStmtDataCol) + + success T( + readOnly: readOnly, + dbPath: path, + containsStmt: containsStmt, + deleteStmt: deleteStmt, + env: env.release, + getStmt: getStmt, + getDataCol: getDataCol, + putStmt: putStmt) diff --git a/datastore/sqlite.nim b/datastore/sql/sqliteutils.nim similarity index 99% rename from datastore/sqlite.nim rename to datastore/sql/sqliteutils.nim index a8b136e..93cc81f 100644 --- a/datastore/sqlite.nim +++ b/datastore/sql/sqliteutils.nim @@ -3,6 +3,8 @@ import pkg/questionable/results import pkg/sqlite3_abi import pkg/upraises +export sqlite3_abi + # Adapted from: # https://github.com/status-im/nwaku/blob/master/waku/v2/node/storage/sqlite.nim @@ -74,9 +76,7 @@ template bindParams( when params is tuple: when params isnot NoParams: - var - i = 1 - + var i = 1 for param in fields(params): checkErr bindParam(s, i, param) inc i diff --git a/datastore/sqlite_datastore.nim b/datastore/sqlite_datastore.nim deleted file mode 100644 index 0a4d321..0000000 --- a/datastore/sqlite_datastore.nim +++ /dev/null @@ -1,413 +0,0 @@ -import std/os -import std/times - -import pkg/chronos -import pkg/questionable -import pkg/questionable/results -import pkg/sqlite3_abi -import pkg/stew/byteutils -from pkg/stew/results as stewResults import isErr -import pkg/upraises - -import ./datastore -import ./sqlite - -export datastore, sqlite - -push: {.upraises: [].} - -type - BoundIdCol = proc (): string {.closure, gcsafe, upraises: [].} - - BoundDataCol = proc (): seq[byte] {.closure, gcsafe, upraises: [].} - - BoundTimestampCol = proc (): int64 {.closure, gcsafe, upraises: [].} - - # feels odd to use `void` for prepared statements corresponding to SELECT - # queries but it fits with the rest of the SQLite wrapper adapted from - # status-im/nwaku, at least in its current form in ./sqlite - ContainsStmt = SQLiteStmt[(string), void] - - DeleteStmt = SQLiteStmt[(string), void] - - GetStmt = SQLiteStmt[(string), void] - - PutStmt = SQLiteStmt[(string, seq[byte], int64), void] - - QueryStmt = SQLiteStmt[(string), void] - - SQLiteDatastore* = ref object of Datastore - dbPath: string - containsStmt: ContainsStmt - deleteStmt: DeleteStmt - env: SQLite - getDataCol: BoundDataCol - getStmt: GetStmt - putStmt: PutStmt - readOnly: bool - -const - dbExt* = ".sqlite3" - tableName* = "Store" - - idColName* = "id" - dataColName* = "data" - timestampColName* = "timestamp" - - idColType = "TEXT" - dataColType = "BLOB" - timestampColType = "INTEGER" - - memory* = ":memory:" - - # https://stackoverflow.com/a/9756276 - # EXISTS returns a boolean value represented by an integer: - # https://sqlite.org/datatype3.html#boolean_datatype - # https://sqlite.org/lang_expr.html#the_exists_operator - containsStmtStr = """ - SELECT EXISTS( - SELECT 1 FROM """ & tableName & """ - WHERE """ & idColName & """ = ? - ); - """ - - containsStmtExistsCol = 0 - - createStmtStr = """ - CREATE TABLE IF NOT EXISTS """ & tableName & """ ( - """ & idColName & """ """ & idColType & """ NOT NULL PRIMARY KEY, - """ & dataColName & """ """ & dataColType & """, - """ & timestampColName & """ """ & timestampColType & """ NOT NULL - ) WITHOUT ROWID; - """ - - deleteStmtStr = """ - DELETE FROM """ & tableName & """ - WHERE """ & idColName & """ = ?; - """ - - getStmtStr = """ - SELECT """ & dataColName & """ FROM """ & tableName & """ - WHERE """ & idColName & """ = ?; - """ - - getStmtDataCol = 0 - - putStmtStr = """ - REPLACE INTO """ & tableName & """ ( - """ & idColName & """, - """ & dataColName & """, - """ & timestampColName & """ - ) VALUES (?, ?, ?); - """ - - queryStmtStr = """ - SELECT """ & idColName & """, """ & dataColName & """ FROM """ & tableName & - """ WHERE """ & idColName & """ GLOB ?; - """ - - queryStmtIdCol = 0 - queryStmtDataCol = 1 - -proc checkColMetadata(s: RawStmtPtr, i: int, expectedName: string) = - let - colName = sqlite3_column_origin_name(s, i.cint) - - if colName.isNil: - raise (ref Defect)(msg: "no column exists for index " & $i & " in `" & - $sqlite3_sql(s) & "`") - - if $colName != expectedName: - raise (ref Defect)(msg: "original column name for index " & $i & " was \"" & - $colName & "\" in `" & $sqlite3_sql(s) & "` but callee expected \"" & - expectedName & "\"") - -proc idCol*( - s: RawStmtPtr, - index: int): BoundIdCol = - - checkColMetadata(s, index, idColName) - - return proc (): string = - $sqlite3_column_text_not_null(s, index.cint) - -proc dataCol*( - s: RawStmtPtr, - index: int): BoundDataCol = - - checkColMetadata(s, index, dataColName) - - return proc (): seq[byte] = - let - i = index.cint - blob = sqlite3_column_blob(s, i) - - # detect out-of-memory error - # see the conversion table and final paragraph of: - # https://www.sqlite.org/c3ref/column_blob.html - # see also https://www.sqlite.org/rescode.html - - # the "data" column can be NULL so in order to detect an out-of-memory error - # it is necessary to check that the result is a null pointer and that the - # result code is an error code - if blob.isNil: - let - v = sqlite3_errcode(sqlite3_db_handle(s)) - - if not (v in [SQLITE_OK, SQLITE_ROW, SQLITE_DONE]): - raise (ref Defect)(msg: $sqlite3_errstr(v)) - - let - dataLen = sqlite3_column_bytes(s, i) - dataBytes = cast[ptr UncheckedArray[byte]](blob) - - @(toOpenArray(dataBytes, 0, dataLen - 1)) - -proc timestampCol*( - s: RawStmtPtr, - index: int): BoundTimestampCol = - - checkColMetadata(s, index, timestampColName) - - return proc (): int64 = - sqlite3_column_int64(s, index.cint) - -proc new*( - T: type SQLiteDatastore, - basePath: string, - filename = "store" & dbExt, - readOnly = false): ?!T = - - # make it optional to enable WAL with it enabled being the default? - - # make it possible to specify a custom page size? - # https://www.sqlite.org/pragma.html#pragma_page_size - # https://www.sqlite.org/intern-v-extern-blob.html - - var - env: AutoDisposed[SQLite] - - defer: disposeIfUnreleased(env) - - var - basep, fname, dbPath: string - - if basePath == memory: - if readOnly: - return failure "SQLiteDatastore cannot be read-only and in-memory" - else: - dbPath = memory - else: - try: - basep = normalizePathEnd( - if basePath.isAbsolute: basePath - else: getCurrentDir() / basePath) - - fname = filename.normalizePathEnd - dbPath = basep / fname - - if readOnly and not fileExists(dbPath): - return failure "read-only database does not exist: " & dbPath - elif not dirExists(basep): - return failure "directory does not exist: " & basep - - except IOError as e: - return failure e - - except OSError as e: - return failure e - - let - flags = - if readOnly: SQLITE_OPEN_READONLY - else: SQLITE_OPEN_READWRITE or SQLITE_OPEN_CREATE - - open(dbPath, env.val, flags) - - let - pragmaStmt = journalModePragmaStmt(env.val) - - checkExec(pragmaStmt) - - var - containsStmt: ContainsStmt - deleteStmt: DeleteStmt - getStmt: GetStmt - putStmt: PutStmt - - if not readOnly: - checkExec(env.val, createStmtStr) - - deleteStmt = ? DeleteStmt.prepare( - env.val, deleteStmtStr, SQLITE_PREPARE_PERSISTENT) - - putStmt = ? PutStmt.prepare( - env.val, putStmtStr, SQLITE_PREPARE_PERSISTENT) - - containsStmt = ? ContainsStmt.prepare( - env.val, containsStmtStr, SQLITE_PREPARE_PERSISTENT) - - getStmt = ? GetStmt.prepare( - env.val, getStmtStr, SQLITE_PREPARE_PERSISTENT) - - # if a readOnly/existing database does not satisfy the expected schema - # `pepare()` will fail and `new` will return an error with message - # "SQL logic error" - - let - getDataCol = dataCol(RawStmtPtr(getStmt), getStmtDataCol) - - success T(dbPath: dbPath, containsStmt: containsStmt, deleteStmt: deleteStmt, - env: env.release, getStmt: getStmt, getDataCol: getDataCol, - putStmt: putStmt, readOnly: readOnly) - -proc dbPath*(self: SQLiteDatastore): string = - self.dbPath - -proc env*(self: SQLiteDatastore): SQLite = - self.env - -proc close*(self: SQLiteDatastore) = - self.containsStmt.dispose - self.getStmt.dispose - - if not self.readOnly: - self.deleteStmt.dispose - self.putStmt.dispose - - self.env.dispose - self[] = SQLiteDatastore()[] - -proc timestamp*(t = epochTime()): int64 = - (t * 1_000_000).int64 - -method contains*( - self: SQLiteDatastore, - key: Key): Future[?!bool] {.async, locks: "unknown".} = - - var - exists = false - - proc onData(s: RawStmtPtr) = - exists = sqlite3_column_int64(s, containsStmtExistsCol.cint).bool - - let - queryRes = self.containsStmt.query((key.id), onData) - - if queryRes.isErr: return queryRes - - return success exists - -method delete*( - self: SQLiteDatastore, - key: Key): Future[?!void] {.async, locks: "unknown".} = - - if self.readOnly: - return failure "database is read-only": - else: - return self.deleteStmt.exec((key.id)) - -method get*( - self: SQLiteDatastore, - key: Key): Future[?!(?seq[byte])] {.async, locks: "unknown".} = - - # see comment in ./filesystem_datastore re: finer control of memory - # allocation in `method get`, could apply here as well if bytes were read - # incrementally with `sqlite3_blob_read` - - var - bytes: ?seq[byte] - - let - dataCol = self.getDataCol - - proc onData(s: RawStmtPtr) = - bytes = dataCol().some - - let - queryRes = self.getStmt.query((key.id), onData) - - if queryRes.isErr: - return failure queryRes.error.msg - else: - return success bytes - -proc put*( - self: SQLiteDatastore, - key: Key, - data: seq[byte], - timestamp: int64): Future[?!void] {.async.} = - - if self.readOnly: - return failure "database is read-only" - else: - return self.putStmt.exec((key.id, @data, timestamp)) - -method put*( - self: SQLiteDatastore, - key: Key, - data: seq[byte]): Future[?!void] {.async, locks: "unknown".} = - - return await self.put(key, data, timestamp()) - -iterator query*( - self: SQLiteDatastore, - query: Query): Future[QueryResponse] = - - let - queryStmt = QueryStmt.prepare( - self.env, queryStmtStr).expect("should not fail") - - s = RawStmtPtr(queryStmt) - - defer: - discard sqlite3_reset(s) - discard sqlite3_clear_bindings(s) - s.dispose - - let - v = sqlite3_bind_text(s, 1.cint, query.key.id.cstring, -1.cint, - SQLITE_TRANSIENT_GCSAFE) - - if not (v == SQLITE_OK): - raise (ref Defect)(msg: $sqlite3_errstr(v)) - - while true: - let - v = sqlite3_step(s) - - case v - of SQLITE_ROW: - let - key = Key.init($sqlite3_column_text_not_null( - s, queryStmtIdCol)).expect("should not fail") - - blob = sqlite3_column_blob(s, queryStmtDataCol) - - # detect out-of-memory error - # see the conversion table and final paragraph of: - # https://www.sqlite.org/c3ref/column_blob.html - # see also https://www.sqlite.org/rescode.html - - # the "data" column can be NULL so in order to detect an out-of-memory - # error it is necessary to check that the result is a null pointer and - # that the result code is an error code - if blob.isNil: - let - v = sqlite3_errcode(sqlite3_db_handle(s)) - - if not (v in [SQLITE_OK, SQLITE_ROW, SQLITE_DONE]): - raise (ref Defect)(msg: $sqlite3_errstr(v)) - - let - dataLen = sqlite3_column_bytes(s, queryStmtDataCol) - dataBytes = cast[ptr UncheckedArray[byte]](blob) - data = @(toOpenArray(dataBytes, 0, dataLen - 1)) - fut = newFuture[QueryResponse]() - - fut.complete((key, data)) - yield fut - of SQLITE_DONE: - break - else: - raise (ref Defect)(msg: $sqlite3_errstr(v)) From 77807d89487a1044d3f19df2e61399213c4aa51a Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Fri, 16 Sep 2022 21:15:08 -0600 Subject: [PATCH 07/41] wip: rework sql store tests --- tests/datastore/sql/testsqliteds.nim | 329 +++++++++++++++++++++++++++ 1 file changed, 329 insertions(+) create mode 100644 tests/datastore/sql/testsqliteds.nim diff --git a/tests/datastore/sql/testsqliteds.nim b/tests/datastore/sql/testsqliteds.nim new file mode 100644 index 0000000..181e0a1 --- /dev/null +++ b/tests/datastore/sql/testsqliteds.nim @@ -0,0 +1,329 @@ +import std/algorithm +import std/options +import std/os + +import pkg/asynctest/unittest2 +import pkg/chronos +import pkg/stew/results +import pkg/stew/byteutils + +import pkg/datastore/sql/sqliteds + +suite "SQLiteDatastore": + let + (path, _, _) = instantiationInfo(-1, fullPaths = true) # get this file's name + basePath = "tests_data" + basePathAbs = path.parentDir / basePath + filename = "test_store" & DbExt + dbPathAbs = basePathAbs / filename + key = Key.init("a:b/c/d:e").tryGet() + bytes = "some bytes".toBytes + otherBytes = "some other bytes".toBytes + + var + dsDb: SQLiteDatastore + readOnlyDb: SQLiteDatastore + + setupAll: + removeDir(basePathAbs) + require(not dirExists(basePathAbs)) + createDir(basePathAbs) + + dsDb = SQLiteDatastore.new(path = dbPathAbs).tryGet() + readOnlyDb = SQLiteDatastore.new(path = dbPathAbs, readOnly = true).tryGet() + + teardownAll: + removeDir(basePathAbs) + require(not dirExists(basePathAbs)) + + test "put": + check: + (await readOnlyDb.put(key, bytes)).isErr + + (await dsDb.put(key, bytes)).tryGet() + + test "get": + check: + (await readOnlyDb.get(key)).tryGet() == bytes + (await dsDb.get(key)).tryGet() == bytes + + test "put update": + check: + (await readOnlyDb.put(key, otherBytes)).isErr + + (await dsDb.put(key, otherBytes)).tryGet() + + test "get updated": + check: + (await readOnlyDb.get(key)).tryGet() == otherBytes + (await dsDb.get(key)).tryGet() == otherBytes + + test "delete": + check: + (await readOnlyDb.delete(key)).isErr + + (await dsDb.delete(key)).tryGet() + + test "contains": + check: + not (await readOnlyDb.contains(key)).tryGet() + not (await dsDb.contains(key)).tryGet() + + # test "query": + # ds = SQLiteDatastore.new(basePathAbs, filename).get + + # var + # key1 = Key.init("a").get + # key2 = Key.init("a/b").get + # key3 = Key.init("a/b:c").get + # key4 = Key.init("a:b").get + # key5 = Key.init("a:b/c").get + # key6 = Key.init("a:b/c:d").get + # key7 = Key.init("A").get + # key8 = Key.init("A/B").get + # key9 = Key.init("A/B:C").get + # key10 = Key.init("A:B").get + # key11 = Key.init("A:B/C").get + # key12 = Key.init("A:B/C:D").get + + # bytes1 = @[1.byte, 2.byte, 3.byte] + # bytes2 = @[4.byte, 5.byte, 6.byte] + # bytes3: seq[byte] = @[] + # bytes4 = bytes1 + # bytes5 = bytes2 + # bytes6 = bytes3 + # bytes7 = bytes1 + # bytes8 = bytes2 + # bytes9 = bytes3 + # bytes10 = bytes1 + # bytes11 = bytes2 + # bytes12 = bytes3 + + # queryKey = Key.init("*").get + + # var + # putRes = await ds.put(key1, bytes1) + + # assert putRes.isOk + # putRes = await ds.put(key2, bytes2) + # assert putRes.isOk + # putRes = await ds.put(key3, bytes3) + # assert putRes.isOk + # putRes = await ds.put(key4, bytes4) + # assert putRes.isOk + # putRes = await ds.put(key5, bytes5) + # assert putRes.isOk + # putRes = await ds.put(key6, bytes6) + # assert putRes.isOk + # putRes = await ds.put(key7, bytes7) + # assert putRes.isOk + # putRes = await ds.put(key8, bytes8) + # assert putRes.isOk + # putRes = await ds.put(key9, bytes9) + # assert putRes.isOk + # putRes = await ds.put(key10, bytes10) + # assert putRes.isOk + # putRes = await ds.put(key11, bytes11) + # assert putRes.isOk + # putRes = await ds.put(key12, bytes12) + # assert putRes.isOk + + # var + # kds: seq[QueryResponse] + + # for kd in ds.query(Query.init(queryKey)): + # let + # (key, data) = await kd + + # kds.add (key, data) + + # # see https://sqlite.org/lang_select.html#the_order_by_clause + # # If a SELECT statement that returns more than one row does not have an + # # ORDER BY clause, the order in which the rows are returned is undefined. + + # check: kds.sortedByIt(it.key.id) == @[ + # (key: key1, data: bytes1), + # (key: key2, data: bytes2), + # (key: key3, data: bytes3), + # (key: key4, data: bytes4), + # (key: key5, data: bytes5), + # (key: key6, data: bytes6), + # (key: key7, data: bytes7), + # (key: key8, data: bytes8), + # (key: key9, data: bytes9), + # (key: key10, data: bytes10), + # (key: key11, data: bytes11), + # (key: key12, data: bytes12) + # ].sortedByIt(it.key.id) + + # kds = @[] + + # queryKey = Key.init("a*").get + + # for kd in ds.query(Query.init(queryKey)): + # let + # (key, data) = await kd + + # kds.add (key, data) + + # check: kds.sortedByIt(it.key.id) == @[ + # (key: key1, data: bytes1), + # (key: key2, data: bytes2), + # (key: key3, data: bytes3), + # (key: key4, data: bytes4), + # (key: key5, data: bytes5), + # (key: key6, data: bytes6) + # ].sortedByIt(it.key.id) + + # kds = @[] + + # queryKey = Key.init("A*").get + + # for kd in ds.query(Query.init(queryKey)): + # let + # (key, data) = await kd + + # kds.add (key, data) + + # check: kds.sortedByIt(it.key.id) == @[ + # (key: key7, data: bytes7), + # (key: key8, data: bytes8), + # (key: key9, data: bytes9), + # (key: key10, data: bytes10), + # (key: key11, data: bytes11), + # (key: key12, data: bytes12) + # ].sortedByIt(it.key.id) + + # kds = @[] + + # queryKey = Key.init("a/?").get + + # for kd in ds.query(Query.init(queryKey)): + # let + # (key, data) = await kd + + # kds.add (key, data) + + # check: kds.sortedByIt(it.key.id) == @[ + # (key: key2, data: bytes2) + # ].sortedByIt(it.key.id) + + # kds = @[] + + # queryKey = Key.init("A/?").get + + # for kd in ds.query(Query.init(queryKey)): + # let + # (key, data) = await kd + + # kds.add (key, data) + + # check: kds.sortedByIt(it.key.id) == @[ + # (key: key8, data: bytes8) + # ].sortedByIt(it.key.id) + + # kds = @[] + + # queryKey = Key.init("*/?").get + + # for kd in ds.query(Query.init(queryKey)): + # let + # (key, data) = await kd + + # kds.add (key, data) + + # check: kds.sortedByIt(it.key.id) == @[ + # (key: key2, data: bytes2), + # (key: key5, data: bytes5), + # (key: key8, data: bytes8), + # (key: key11, data: bytes11) + # ].sortedByIt(it.key.id) + + # kds = @[] + + # queryKey = Key.init("[Aa]/?").get + + # for kd in ds.query(Query.init(queryKey)): + # let + # (key, data) = await kd + + # kds.add (key, data) + + # check: kds.sortedByIt(it.key.id) == @[ + # (key: key2, data: bytes2), + # (key: key8, data: bytes8) + # ].sortedByIt(it.key.id) + + # kds = @[] + + # # SQLite's GLOB operator, akin to Unix file globbing syntax, is greedy re: + # # wildcard "*". So a pattern such as "a:*[^/]" will not restrict results to + # # "/a:b", i.e. it will match on "/a:b", "/a:b/c" and "/a:b/c:d". + + # queryKey = Key.init("a:*[^/]").get + + # for kd in ds.query(Query.init(queryKey)): + # let + # (key, data) = await kd + + # kds.add (key, data) + + # check: kds.sortedByIt(it.key.id) == @[ + # (key: key4, data: bytes4), + # (key: key5, data: bytes5), + # (key: key6, data: bytes6) + # ].sortedByIt(it.key.id) + + # kds = @[] + + # queryKey = Key.init("a:*[Bb]").get + + # for kd in ds.query(Query.init(queryKey)): + # let + # (key, data) = await kd + + # kds.add (key, data) + + # check: kds.sortedByIt(it.key.id) == @[ + # (key: key4, data: bytes4) + # ].sortedByIt(it.key.id) + + # kds = @[] + + # var + # deleteRes = await ds.delete(key1) + + # assert deleteRes.isOk + # deleteRes = await ds.delete(key2) + # assert deleteRes.isOk + # deleteRes = await ds.delete(key3) + # assert deleteRes.isOk + # deleteRes = await ds.delete(key4) + # assert deleteRes.isOk + # deleteRes = await ds.delete(key5) + # assert deleteRes.isOk + # deleteRes = await ds.delete(key6) + # assert deleteRes.isOk + # deleteRes = await ds.delete(key7) + # assert deleteRes.isOk + # deleteRes = await ds.delete(key8) + # assert deleteRes.isOk + # deleteRes = await ds.delete(key9) + # assert deleteRes.isOk + # deleteRes = await ds.delete(key10) + # assert deleteRes.isOk + # deleteRes = await ds.delete(key11) + # assert deleteRes.isOk + # deleteRes = await ds.delete(key12) + # assert deleteRes.isOk + + # let + # emptyKds: seq[QueryResponse] = @[] + + # for kd in ds.query(Query.init(queryKey)): + # let + # (key, data) = await kd + + # kds.add (key, data) + + # check: kds == emptyKds From 5d29ad905dce22cabac092f97d1c25d10f647073 Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Fri, 16 Sep 2022 21:15:28 -0600 Subject: [PATCH 08/41] add sql backend tests --- tests/datastore/sql/testsqlitedsdb.nim | 158 +++++++++++++++++++++++++ 1 file changed, 158 insertions(+) create mode 100644 tests/datastore/sql/testsqlitedsdb.nim diff --git a/tests/datastore/sql/testsqlitedsdb.nim b/tests/datastore/sql/testsqlitedsdb.nim new file mode 100644 index 0000000..68cbfbd --- /dev/null +++ b/tests/datastore/sql/testsqlitedsdb.nim @@ -0,0 +1,158 @@ +import std/os + +import pkg/chronos +import pkg/asynctest +import pkg/stew/byteutils + +import pkg/sqlite3_abi +import pkg/datastore/key +import pkg/datastore/sql/sqlitedsdb +import pkg/datastore/sql/sqliteds + +suite "Test Open SQLite Datastore DB": + let + (path, _, _) = instantiationInfo(-1, fullPaths = true) # get this file's name + basePath = "tests_data" + basePathAbs = path.parentDir / basePath + filename = "test_store" & DbExt + dbPathAbs = basePathAbs / filename + + setupAll: + removeDir(basePathAbs) + require(not dirExists(basePathAbs)) + createDir(basePathAbs) + + teardownAll: + removeDir(basePathAbs) + require(not dirExists(basePathAbs)) + + test "Should create and open datastore DB": + let + dsDb = SQLiteDsDb.open( + path = dbPathAbs, + flags = SQLITE_OPEN_READWRITE or SQLITE_OPEN_CREATE).tryGet() + + defer: + dsDb.close() + + check: + fileExists(dbPathAbs) + + test "Should open existing DB": + let + dsDb = SQLiteDsDb.open( + path = dbPathAbs, + flags = SQLITE_OPEN_READWRITE or SQLITE_OPEN_CREATE).tryGet() + + defer: + dsDb.close() + + check: + fileExists(dbPathAbs) + + test "Should open existing DB in read only mode": + check: + fileExists(dbPathAbs) + + let + dsDb = SQLiteDsDb.open( + path = dbPathAbs, + flags = SQLITE_OPEN_READWRITE or SQLITE_OPEN_CREATE).tryGet() + + defer: + dsDb.close() + + check: + fileExists(dbPathAbs) + + test "Should fail open existing DB in read only mode": + removeDir(basePathAbs) + check: + not fileExists(dbPathAbs) + SQLiteDsDb.open(path = dbPathAbs).isErr + +suite "Test SQLite Datastore DB operations": + let + (path, _, _) = instantiationInfo(-1, fullPaths = true) # get this file's name + basePath = "tests_data" + basePathAbs = path.parentDir / basePath + filename = "test_store" & DbExt + dbPathAbs = basePathAbs / filename + + key = Key.init("test/key").tryGet() + data = "some data".toBytes + otherData = "some other data".toBytes + + var + dsDb: SQLiteDsDb + readOnlyDb: SQLiteDsDb + + setupAll: + removeDir(basePathAbs) + require(not dirExists(basePathAbs)) + createDir(basePathAbs) + + dsDb = SQLiteDsDb.open( + path = dbPathAbs, + flags = SQLITE_OPEN_READWRITE or SQLITE_OPEN_CREATE).tryGet() + + readOnlyDb = SQLiteDsDb.open( + path = dbPathAbs, + flags = SQLITE_OPEN_READONLY).tryGet() + + teardownAll: + removeDir(basePathAbs) + require(not dirExists(basePathAbs)) + + test "Should insert key": + check: + readOnlyDb.putStmt.exec((key.id, data, timestamp())).isErr() + + dsDb.putStmt.exec((key.id, data, timestamp())).tryGet() + + test "Should select key": + let + dataCol = dsDb.getDataCol + + var bytes: seq[byte] + proc onData(s: RawStmtPtr) = + bytes = dataCol() + + check: + dsDb.getStmt.query((key.id), onData).tryGet() + bytes == data + + test "Should update key": + check: + readOnlyDb.putStmt.exec((key.id, otherData, timestamp())).isErr() + + dsDb.putStmt.exec((key.id, otherData, timestamp())).tryGet() + + test "Should select updated key": + let + dataCol = dsDb.getDataCol + + var bytes: seq[byte] + proc onData(s: RawStmtPtr) = + bytes = dataCol() + + check: + dsDb.getStmt.query((key.id), onData).tryGet() + bytes == otherData + + test "Should delete key": + check: + readOnlyDb.deleteStmt.exec((key.id)).isErr() + + dsDb.deleteStmt.exec((key.id)).tryGet() + + test "Should not contain key": + var + exists = false + + proc onData(s: RawStmtPtr) = + exists = sqlite3_column_int64(s, ContainsStmtExistsCol.cint).bool + + check: + dsDb.containsStmt.query((key.id), onData).tryGet() + not exists From c7df334836fe80426f4cc6c294fdc17ed5bc11e0 Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Fri, 16 Sep 2022 21:15:43 -0600 Subject: [PATCH 09/41] rename sql tests --- tests/datastore/test_sqlite_datastore.nim | 586 ---------------------- 1 file changed, 586 deletions(-) delete mode 100644 tests/datastore/test_sqlite_datastore.nim diff --git a/tests/datastore/test_sqlite_datastore.nim b/tests/datastore/test_sqlite_datastore.nim deleted file mode 100644 index e09767a..0000000 --- a/tests/datastore/test_sqlite_datastore.nim +++ /dev/null @@ -1,586 +0,0 @@ -import std/algorithm -import std/options -import std/os - -import pkg/asynctest/unittest2 -import pkg/chronos -import pkg/stew/results - -import ../../datastore/sqlite_datastore -import ./templates - -suite "SQLiteDatastore": - var - ds: SQLiteDatastore - - # assumes tests/test_all is run from project root, e.g. with `nimble test` - let - basePath = "tests" / "test_data" - basePathAbs = getCurrentDir() / basePath - filename = "test_store" & dbExt - dbPathAbs = basePathAbs / filename - - setup: - removeDir(basePathAbs) - require(not dirExists(basePathAbs)) - createDir(basePathAbs) - - teardown: - if not ds.isNil: ds.close - ds = nil - removeDir(basePathAbs) - require(not dirExists(basePathAbs)) - - asyncTest "new": - var - dsRes = SQLiteDatastore.new(basePathAbs, filename, readOnly = true) - - # for `readOnly = true` to succeed the database file must already exist - check: dsRes.isErr - - dsRes = SQLiteDatastore.new(basePathAbs / "missing", filename) - - check: dsRes.isErr - - dsRes = SQLiteDatastore.new(basePathAbs, filename) - - check: - dsRes.isOk - fileExists(dbPathAbs) - - dsRes.get.close - removeDir(basePathAbs) - assert not dirExists(basePathAbs) - createDir(basePathAbs) - - dsRes = SQLiteDatastore.new(basePath, filename) - - check: - dsRes.isOk - fileExists(dbPathAbs) - - dsRes.get.close - - # for `readOnly = true` to succeed the database file must already exist, so - # the existing file (per previous step) is not deleted prior to the next - # invocation of `SQLiteDatastore.new` - - dsRes = SQLiteDatastore.new(basePath, filename, readOnly = true) - - check: dsRes.isOk - - dsRes.get.close - removeDir(basePathAbs) - assert not dirExists(basePathAbs) - createDir(basePathAbs) - - dsRes = SQLiteDatastore.new(memory) - - check: dsRes.isOk - - dsRes.get.close - - dsRes = SQLiteDatastore.new(memory, readOnly = true) - - check: dsRes.isErr - - asyncTest "accessors": - ds = SQLiteDatastore.new(basePath).get - - check: - parentDir(ds.dbPath) == basePathAbs - not ds.env.isNil - - asyncTest "helpers": - ds = SQLiteDatastore.new(basePath).get - - ds.close - - check: - ds.env.isNil - timestamp(10.123_456) == 10_123_456.int64 - - asyncTest "put": - let - key = Key.init("a:b/c/d:e").get - - # for `readOnly = true` to succeed the database file must already exist - ds = SQLiteDatastore.new(basePathAbs, filename).get - ds.close - ds = SQLiteDatastore.new(basePathAbs, filename, readOnly = true).get - - var - bytes: seq[byte] - timestamp = timestamp() - putRes = await ds.put(key, bytes, timestamp) - - check: putRes.isErr - - ds.close - removeDir(basePathAbs) - assert not dirExists(basePathAbs) - createDir(basePathAbs) - - ds = SQLiteDatastore.new(basePathAbs, filename).get - - timestamp = timestamp() - putRes = await ds.put(key, bytes, timestamp) - - check: putRes.isOk - - let - prequeryRes = NoParamsStmt.prepare( - ds.env, "SELECT timestamp AS foo, id AS baz, data AS bar FROM " & - tableName & ";") - - assert prequeryRes.isOk - - let - prequery = prequeryRes.get - idCol = idCol(RawStmtPtr(prequery), 1) - dataCol = dataCol(RawStmtPtr(prequery), 2) - timestampCol = timestampCol(RawStmtPtr(prequery), 0) - - var - qId: string - qData: seq[byte] - qTimestamp: int64 - rowCount = 0 - - proc onData(s: RawStmtPtr) {.closure.} = - qId = idCol() - qData = dataCol() - qTimestamp = timestampCol() - inc rowCount - - var - qRes = prequery.query((), onData) - - assert qRes.isOk - - check: - qRes.get - qId == key.id - qData == bytes - qTimestamp == timestamp - rowCount == 1 - - bytes = @[1.byte, 2.byte, 3.byte] - timestamp = timestamp() - putRes = await ds.put(key, bytes, timestamp) - - check: putRes.isOk - - rowCount = 0 - qRes = prequery.query((), onData) - assert qRes.isOk - - check: - qRes.get - qId == key.id - qData == bytes - qTimestamp == timestamp - rowCount == 1 - - bytes = @[4.byte, 5.byte, 6.byte] - timestamp = timestamp() - putRes = await ds.put(key, bytes, timestamp) - - check: putRes.isOk - - rowCount = 0 - qRes = prequery.query((), onData) - assert qRes.isOk - - check: - qRes.get - qId == key.id - qData == bytes - qTimestamp == timestamp - rowCount == 1 - - prequery.dispose - - asyncTest "delete": - let - bytes = @[1.byte, 2.byte, 3.byte] - - var - key = Key.init("a:b/c/d:e").get - - # for `readOnly = true` to succeed the database file must already exist - ds = SQLiteDatastore.new(basePathAbs, filename).get - ds.close - ds = SQLiteDatastore.new(basePathAbs, filename, readOnly = true).get - - var - delRes = await ds.delete(key) - - check: delRes.isErr - - ds.close - removeDir(basePathAbs) - assert not dirExists(basePathAbs) - createDir(basePathAbs) - - ds = SQLiteDatastore.new(basePathAbs, filename).get - - let - putRes = await ds.put(key, bytes) - - assert putRes.isOk - - let - query = "SELECT * FROM " & tableName & ";" - - var - rowCount = 0 - - proc onData(s: RawStmtPtr) {.closure.} = - inc rowCount - - var - qRes = ds.env.query(query, onData) - - assert qRes.isOk - check: rowCount == 1 - delRes = await ds.delete(key) - - check: delRes.isOk - - rowCount = 0 - qRes = ds.env.query(query, onData) - assert qRes.isOk - - check: - delRes.isOk - rowCount == 0 - - key = Key.init("X/Y/Z").get - - delRes = await ds.delete(key) - - check: delRes.isOk - - asyncTest "contains": - let - bytes = @[1.byte, 2.byte, 3.byte] - - var - key = Key.init("a:b/c/d:e").get - - ds = SQLiteDatastore.new(basePathAbs, filename).get - - let - putRes = await ds.put(key, bytes) - - assert putRes.isOk - - var - containsRes = await ds.contains(key) - - check: - containsRes.isOk - containsRes.get == true - - key = Key.init("X/Y/Z").get - - containsRes = await ds.contains(key) - - check: - containsRes.isOk - containsRes.get == false - - asyncTest "get": - ds = SQLiteDatastore.new(basePathAbs, filename).get - - var - bytes: seq[byte] - key = Key.init("a:b/c/d:e").get - putRes = await ds.put(key, bytes) - - assert putRes.isOk - - var - getRes = await ds.get(key) - getOpt = getRes.get - - check: getOpt.isSome and getOpt.get == bytes - - bytes = @[1.byte, 2.byte, 3.byte] - putRes = await ds.put(key, bytes) - - assert putRes.isOk - - getRes = await ds.get(key) - getOpt = getRes.get - - check: getOpt.isSome and getOpt.get == bytes - - key = Key.init("X/Y/Z").get - - assert not (await ds.contains(key)).get - - getRes = await ds.get(key) - getOpt = getRes.get - - check: getOpt.isNone - - asyncTest "query": - ds = SQLiteDatastore.new(basePathAbs, filename).get - - var - key1 = Key.init("a").get - key2 = Key.init("a/b").get - key3 = Key.init("a/b:c").get - key4 = Key.init("a:b").get - key5 = Key.init("a:b/c").get - key6 = Key.init("a:b/c:d").get - key7 = Key.init("A").get - key8 = Key.init("A/B").get - key9 = Key.init("A/B:C").get - key10 = Key.init("A:B").get - key11 = Key.init("A:B/C").get - key12 = Key.init("A:B/C:D").get - - bytes1 = @[1.byte, 2.byte, 3.byte] - bytes2 = @[4.byte, 5.byte, 6.byte] - bytes3: seq[byte] = @[] - bytes4 = bytes1 - bytes5 = bytes2 - bytes6 = bytes3 - bytes7 = bytes1 - bytes8 = bytes2 - bytes9 = bytes3 - bytes10 = bytes1 - bytes11 = bytes2 - bytes12 = bytes3 - - queryKey = Key.init("*").get - - var - putRes = await ds.put(key1, bytes1) - - assert putRes.isOk - putRes = await ds.put(key2, bytes2) - assert putRes.isOk - putRes = await ds.put(key3, bytes3) - assert putRes.isOk - putRes = await ds.put(key4, bytes4) - assert putRes.isOk - putRes = await ds.put(key5, bytes5) - assert putRes.isOk - putRes = await ds.put(key6, bytes6) - assert putRes.isOk - putRes = await ds.put(key7, bytes7) - assert putRes.isOk - putRes = await ds.put(key8, bytes8) - assert putRes.isOk - putRes = await ds.put(key9, bytes9) - assert putRes.isOk - putRes = await ds.put(key10, bytes10) - assert putRes.isOk - putRes = await ds.put(key11, bytes11) - assert putRes.isOk - putRes = await ds.put(key12, bytes12) - assert putRes.isOk - - var - kds: seq[QueryResponse] - - for kd in ds.query(Query.init(queryKey)): - let - (key, data) = await kd - - kds.add (key, data) - - # see https://sqlite.org/lang_select.html#the_order_by_clause - # If a SELECT statement that returns more than one row does not have an - # ORDER BY clause, the order in which the rows are returned is undefined. - - check: kds.sortedByIt(it.key.id) == @[ - (key: key1, data: bytes1), - (key: key2, data: bytes2), - (key: key3, data: bytes3), - (key: key4, data: bytes4), - (key: key5, data: bytes5), - (key: key6, data: bytes6), - (key: key7, data: bytes7), - (key: key8, data: bytes8), - (key: key9, data: bytes9), - (key: key10, data: bytes10), - (key: key11, data: bytes11), - (key: key12, data: bytes12) - ].sortedByIt(it.key.id) - - kds = @[] - - queryKey = Key.init("a*").get - - for kd in ds.query(Query.init(queryKey)): - let - (key, data) = await kd - - kds.add (key, data) - - check: kds.sortedByIt(it.key.id) == @[ - (key: key1, data: bytes1), - (key: key2, data: bytes2), - (key: key3, data: bytes3), - (key: key4, data: bytes4), - (key: key5, data: bytes5), - (key: key6, data: bytes6) - ].sortedByIt(it.key.id) - - kds = @[] - - queryKey = Key.init("A*").get - - for kd in ds.query(Query.init(queryKey)): - let - (key, data) = await kd - - kds.add (key, data) - - check: kds.sortedByIt(it.key.id) == @[ - (key: key7, data: bytes7), - (key: key8, data: bytes8), - (key: key9, data: bytes9), - (key: key10, data: bytes10), - (key: key11, data: bytes11), - (key: key12, data: bytes12) - ].sortedByIt(it.key.id) - - kds = @[] - - queryKey = Key.init("a/?").get - - for kd in ds.query(Query.init(queryKey)): - let - (key, data) = await kd - - kds.add (key, data) - - check: kds.sortedByIt(it.key.id) == @[ - (key: key2, data: bytes2) - ].sortedByIt(it.key.id) - - kds = @[] - - queryKey = Key.init("A/?").get - - for kd in ds.query(Query.init(queryKey)): - let - (key, data) = await kd - - kds.add (key, data) - - check: kds.sortedByIt(it.key.id) == @[ - (key: key8, data: bytes8) - ].sortedByIt(it.key.id) - - kds = @[] - - queryKey = Key.init("*/?").get - - for kd in ds.query(Query.init(queryKey)): - let - (key, data) = await kd - - kds.add (key, data) - - check: kds.sortedByIt(it.key.id) == @[ - (key: key2, data: bytes2), - (key: key5, data: bytes5), - (key: key8, data: bytes8), - (key: key11, data: bytes11) - ].sortedByIt(it.key.id) - - kds = @[] - - queryKey = Key.init("[Aa]/?").get - - for kd in ds.query(Query.init(queryKey)): - let - (key, data) = await kd - - kds.add (key, data) - - check: kds.sortedByIt(it.key.id) == @[ - (key: key2, data: bytes2), - (key: key8, data: bytes8) - ].sortedByIt(it.key.id) - - kds = @[] - - # SQLite's GLOB operator, akin to Unix file globbing syntax, is greedy re: - # wildcard "*". So a pattern such as "a:*[^/]" will not restrict results to - # "/a:b", i.e. it will match on "/a:b", "/a:b/c" and "/a:b/c:d". - - queryKey = Key.init("a:*[^/]").get - - for kd in ds.query(Query.init(queryKey)): - let - (key, data) = await kd - - kds.add (key, data) - - check: kds.sortedByIt(it.key.id) == @[ - (key: key4, data: bytes4), - (key: key5, data: bytes5), - (key: key6, data: bytes6) - ].sortedByIt(it.key.id) - - kds = @[] - - queryKey = Key.init("a:*[Bb]").get - - for kd in ds.query(Query.init(queryKey)): - let - (key, data) = await kd - - kds.add (key, data) - - check: kds.sortedByIt(it.key.id) == @[ - (key: key4, data: bytes4) - ].sortedByIt(it.key.id) - - kds = @[] - - var - deleteRes = await ds.delete(key1) - - assert deleteRes.isOk - deleteRes = await ds.delete(key2) - assert deleteRes.isOk - deleteRes = await ds.delete(key3) - assert deleteRes.isOk - deleteRes = await ds.delete(key4) - assert deleteRes.isOk - deleteRes = await ds.delete(key5) - assert deleteRes.isOk - deleteRes = await ds.delete(key6) - assert deleteRes.isOk - deleteRes = await ds.delete(key7) - assert deleteRes.isOk - deleteRes = await ds.delete(key8) - assert deleteRes.isOk - deleteRes = await ds.delete(key9) - assert deleteRes.isOk - deleteRes = await ds.delete(key10) - assert deleteRes.isOk - deleteRes = await ds.delete(key11) - assert deleteRes.isOk - deleteRes = await ds.delete(key12) - assert deleteRes.isOk - - let - emptyKds: seq[QueryResponse] = @[] - - for kd in ds.query(Query.init(queryKey)): - let - (key, data) = await kd - - kds.add (key, data) - - check: kds == emptyKds From b380d09d6b1b5c4a2681181194101eb429c11bbb Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Mon, 19 Sep 2022 15:52:00 -0600 Subject: [PATCH 10/41] rename fsstore to fsds --- datastore/{fsstore.nim => fsds.nim} | 82 +++++++++++++++++++++++------ 1 file changed, 65 insertions(+), 17 deletions(-) rename datastore/{fsstore.nim => fsds.nim} (67%) diff --git a/datastore/fsstore.nim b/datastore/fsds.nim similarity index 67% rename from datastore/fsstore.nim rename to datastore/fsds.nim index 7254549..233bce0 100644 --- a/datastore/fsstore.nim +++ b/datastore/fsds.nim @@ -1,5 +1,6 @@ import std/os import std/sequtils +import std/strutils import std/options import pkg/chronos @@ -20,23 +21,27 @@ const # Paths should be matched exactly, i.e. # we're forbidding this dirs from being # touched directly, but subdirectories - # can still be touched - ProtectedPaths = [ + # can still be touched/created + ProtectedPaths* = [ "/", "/usr", "/etc", "/home", "/Users"] + Allowed* = + toSeq('A'..'Z') & + toSeq('a'..'z') & + toSeq('0'..'9') & + toSeq(['/', '_', '-']) + type FSDatastore* = ref object of Datastore root*: string ignoreProtected: bool + depth: int -func checkProtected(dir: string): bool = - dir in ProtectedPaths - -proc path*(self: FSDatastore, key: Key): string = +template path*(self: FSDatastore, key: Key): string = var segments: seq[string] @@ -45,22 +50,53 @@ proc path*(self: FSDatastore, key: Key): string = segments.add ns.value continue + # `:` are replaced with `/` segments.add(ns.field / ns.value) - # is it problematic that per this logic Key(/a:b) evaluates to the same path - # as Key(/a/b)? may need to check if/how other Datastore implementations - # distinguish them + self.root / segments.joinPath() - self.root / joinPath(segments) +template checkProtected*(path: string): bool = + path in ProtectedPaths + +template allowed*(path: string): bool = + var notfound = true + for s in path: + if s.char notin Allowed: + notfound = false + break + + notfound + +template validDepth*(self: FSDatastore, key: Key): bool = + key.len <= self.depth method contains*(self: FSDatastore, key: Key): Future[?!bool] {.async.} = - return success fileExists(self.path(key)) -method delete*(self: FSDatastore, key: Key): Future[?!void] {.async.} = + if not self.validDepth(key): + return failure "Path has invalid depth!" let path = self.path(key) + if not path.allowed: + return failure "Path is contains invalid characters!" + + if checkProtected(path): + return failure "Path is protected!" + + return success fileExists(path) + +method delete*(self: FSDatastore, key: Key): Future[?!void] {.async.} = + + if not self.validDepth(key): + return failure "Path has invalid depth!" + + let + path = self.path(key) + + if not path.allowed: + return failure "Path is contains invalid characters!" + if checkProtected(path): return failure "Path is protected!" @@ -84,9 +120,15 @@ method get*(self: FSDatastore, key: Key): Future[?!seq[byte]] {.async.} = # calling the former after allocating a seq with size automatically # determined via `getFileSize` + if not self.validDepth(key): + return failure "Path has invalid depth!" + let path = self.path(key) + if not path.allowed: + return failure "Path is contains invalid characters!" + if checkProtected(path): return failure "Path is protected!" @@ -119,7 +161,7 @@ method get*(self: FSDatastore, key: Key): Future[?!seq[byte]] {.async.} = return success bytes - except IOError as e: + except CatchableError as e: return failure e method put*( @@ -127,18 +169,22 @@ method put*( key: Key, data: seq[byte]): Future[?!void] {.async, locks: "unknown".} = + if not self.validDepth(key): + return failure "Path has invalid depth!" + let path = self.path(key) + if not path.allowed: + return failure "Path is contains invalid characters!" + if checkProtected(path): return failure "Path is protected!" try: createDir(parentDir(path)) writeFile(path, data) - except IOError as e: - return failure e - except OSError as e: + except CatchableError as e: return failure e return success() @@ -152,6 +198,7 @@ method put*( proc new*( T: type FSDatastore, root: string, + depth = 2, caseSensitive = true, ignoreProtected = false): ?!T = @@ -165,4 +212,5 @@ proc new*( success T( root: root, - ignoreProtected: ignoreProtected) + ignoreProtected: ignoreProtected, + depth: depth) From 437ae2e035ec0c3530f6e21ff1434232135af10c Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Mon, 19 Sep 2022 15:52:34 -0600 Subject: [PATCH 11/41] add proper query interface --- datastore/datastore.nim | 32 ++++++++++++------------------ datastore/query.nim | 44 +++++++---------------------------------- 2 files changed, 20 insertions(+), 56 deletions(-) diff --git a/datastore/datastore.nim b/datastore/datastore.nim index 7535d80..f441a4c 100644 --- a/datastore/datastore.nim +++ b/datastore/datastore.nim @@ -11,35 +11,29 @@ export key, query push: {.upraises: [].} type + DatastoreError* = object of CatchableError + DatastoreKeyNotFound* = object of DatastoreError + + CodexResult*[T] = Result[T, ref DatastoreError] Datastore* = ref object of RootObj -method contains*( - self: Datastore, - key: Key): Future[?!bool] {.base, locks: "unknown".} = - +method contains*(self: Datastore, key: Key): Future[?!bool] {.base, locks: "unknown".} = raiseAssert("Not implemented!") -method delete*( - self: Datastore, - key: Key): Future[?!void] {.base, locks: "unknown".} = - +method delete*(self: Datastore, key: Key): Future[?!void] {.base, locks: "unknown".} = raiseAssert("Not implemented!") -method get*( - self: Datastore, - key: Key): Future[?!(?seq[byte])] {.base, locks: "unknown".} = - +method get*(self: Datastore, key: Key): Future[?!seq[byte]] {.base, locks: "unknown".} = raiseAssert("Not implemented!") -method put*( - self: Datastore, - key: Key, - data: seq[byte]): Future[?!void] {.base, locks: "unknown".} = - +method put*(self: Datastore, key: Key, data: seq[byte]): Future[?!void] {.base, locks: "unknown".} = raiseAssert("Not implemented!") -iterator query*( +method close*(self: Datastore): Future[?!void] {.base, locks: "unknown".} = + raiseAssert("Not implemented!") + +method query*( self: Datastore, - query: Query): Future[QueryResponse] = + query: Query): Future[QueryIter] = raiseAssert("Not implemented!") diff --git a/datastore/query.nim b/datastore/query.nim index decfcba..d89c3f0 100644 --- a/datastore/query.nim +++ b/datastore/query.nim @@ -1,61 +1,31 @@ import ./key type - Node* = object of RootObj - next*: Node - prev*: Node - - Filter* = object of Node - field*: string - value*: string - - FilterBool* = object of Filter - a*, b*: Filter - - FilterAnd = object of FilterBool - FilterOr = object of FilterBool - - Eq = object of Filter - Lt = object of Filter - Gt = object of Filter - Not = object of Filter - SortOrder* {.pure.} = enum Assending, Descensing - Order* = object - field*: string - sort*: SortOrder - Query* = object key*: Key + value*: bool limit*: int skip*: int - orders*: seq[Order] - filters*: seq[Filter] + sort*: SortOrder QueryResponse* = tuple[key: Key, data: seq[byte]] - -proc `==`*(a, b: Filter): Filter = discard - -proc `!=`*(a, b: Filter): Filter = discard -proc `>`*(a, b: Filter): Filter = discard -proc `>=`*(a, b: Filter): Filter = discard -proc `<`*(a, b: Filter): Filter = discard -proc `<=`*(a, b: Filter): Filter = discard + QueryIter* = iterator(): QueryResponse {.closure.} proc init*( T: type Query, key: Key, - orders: openArray[Order] = [], - filters: openArray[Filter] = [], + value = false, + sort = SortOrder.Descensing, skip = 0, limit = 0): T = T( key: key, - filters: @filters, - orders: @orders, + value: value, + sort: sort, skip: skip, limit: limit) From 24d161d4b449ecab317e7bfb9e03bd49af6cc5b7 Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Mon, 19 Sep 2022 15:52:46 -0600 Subject: [PATCH 12/41] more cleanup --- datastore/key.nim | 54 +++++++++++++++++++++++------------------------ 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/datastore/key.nim b/datastore/key.nim index 516ccbf..ee0ab1d 100644 --- a/datastore/key.nim +++ b/datastore/key.nim @@ -22,8 +22,8 @@ type namespaces*: seq[Namespace] const - delimiter = ":" - separator = "/" + Delimiter* = ":" + Separator* = "/" # TODO: operator/s for combining string|Namespace,string|Namespace # TODO: lifting from ?![Namespace|Key] for various ops @@ -35,25 +35,25 @@ func init*( if value.strip == "": return failure "value string must not be all whitespace or empty" - if value.contains(delimiter): - return failure "value string must not contain delimiter \"" & - delimiter & "\"" + if value.contains(Delimiter): + return failure "value string must not contain Delimiter \"" & + Delimiter & "\"" - if value.contains(separator): - return failure "value string must not contain separator \"" & - separator & "\"" + if value.contains(Separator): + return failure "value string must not contain Separator \"" & + Separator & "\"" if field != "": if field.strip == "": return failure "field string must not be all whitespace" - if field.contains(delimiter): - return failure "field string must not contain delimiter \"" & - delimiter & "\"" + if field.contains(Delimiter): + return failure "field string must not contain Delimiter \"" & + Delimiter & "\"" - if field.contains(separator): - return failure "field string must not contain separator \"" & - separator & "\"" + if field.contains(Separator): + return failure "field string must not contain Separator \"" & + Separator & "\"" success T(field: field, value: value) @@ -61,20 +61,20 @@ func init*(T: type Namespace, id: string): ?!T = if id.strip == "": return failure "id string must not be all whitespace or empty" - if id.contains(separator): - return failure "id string must not contain separator \"" & separator & "\"" + if id.contains(Separator): + return failure "id string must not contain Separator \"" & Separator & "\"" - if id == delimiter: - return failure "value in id string \"[field]" & delimiter & + if id == Delimiter: + return failure "value in id string \"[field]" & Delimiter & "[value]\" must not be empty" - if id.count(delimiter) > 1: - return failure "id string must not contain more than one delimiter \"" & - delimiter & "\"" + if id.count(Delimiter) > 1: + return failure "id string must not contain more than one Delimiter \"" & + Delimiter & "\"" let (field, value) = block: - let parts = id.split(delimiter) + let parts = id.split(Delimiter) if parts.len > 1: (parts[0], parts[^1]) else: @@ -84,7 +84,7 @@ func init*(T: type Namespace, id: string): ?!T = func id*(self: Namespace): string = if self.field.len > 0: - self.field & delimiter & self.value + self.field & Delimiter & self.value else: self.value @@ -117,11 +117,11 @@ func init*(T: type Key, id: string): ?!T = return failure "id string must not be all whitespace" let - nsStrs = id.split(separator).filterIt(it != "") + nsStrs = id.split(Separator).filterIt(it != "") if nsStrs.len == 0: - return failure "id string must not contain only one or more separator " & - "\"" & separator & "\"" + return failure "id string must not contain more than one Separator " & + "\"" & Separator & "\"" Key.init(nsStrs) @@ -156,7 +156,7 @@ func `type`*(self: Key): string = return self[^1].field func id*(self: Key): string = - separator & self.namespaces.mapIt(it.id).join(separator) + Separator & self.namespaces.mapIt(it.id).join(Separator) func root*(self: Key): bool = self.len == 1 From 9fcf719d0f0b2bf54ee3aa90a1425ea640615bc8 Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Mon, 19 Sep 2022 15:53:00 -0600 Subject: [PATCH 13/41] rename null datastore --- datastore/{null_datastore.nim => nullds.nim} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename datastore/{null_datastore.nim => nullds.nim} (100%) diff --git a/datastore/null_datastore.nim b/datastore/nullds.nim similarity index 100% rename from datastore/null_datastore.nim rename to datastore/nullds.nim From bdc10c27fdf561c7d2414a3354aa7b7373887e9a Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Mon, 19 Sep 2022 15:53:13 -0600 Subject: [PATCH 14/41] rename tired datastore --- datastore/{tiered_datastore.nim => tieredds.nim} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename datastore/{tiered_datastore.nim => tieredds.nim} (100%) diff --git a/datastore/tiered_datastore.nim b/datastore/tieredds.nim similarity index 100% rename from datastore/tiered_datastore.nim rename to datastore/tieredds.nim From 4d43447ef2d9452a6e485aed2a003dd9486362e9 Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Mon, 19 Sep 2022 15:53:24 -0600 Subject: [PATCH 15/41] add sql top level export --- datastore/sql.nim | 1 + 1 file changed, 1 insertion(+) create mode 100644 datastore/sql.nim diff --git a/datastore/sql.nim b/datastore/sql.nim new file mode 100644 index 0000000..e207daf --- /dev/null +++ b/datastore/sql.nim @@ -0,0 +1 @@ +import ./sql/sqliteds From 36655e318a026d77e7b1b6e491c7d5c972f5c4e1 Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Mon, 19 Sep 2022 15:53:38 -0600 Subject: [PATCH 16/41] cleanup tests --- tests/datastore/sql/testsqliteds.nim | 41 ++++++++++++++++++++-------- 1 file changed, 29 insertions(+), 12 deletions(-) diff --git a/tests/datastore/sql/testsqliteds.nim b/tests/datastore/sql/testsqliteds.nim index 181e0a1..a953611 100644 --- a/tests/datastore/sql/testsqliteds.nim +++ b/tests/datastore/sql/testsqliteds.nim @@ -9,7 +9,9 @@ import pkg/stew/byteutils import pkg/datastore/sql/sqliteds -suite "SQLiteDatastore": +import ../basictests + +suite "Test Basic SQLiteDatastore": let (path, _, _) = instantiationInfo(-1, fullPaths = true) # get this file's name basePath = "tests_data" @@ -20,6 +22,32 @@ suite "SQLiteDatastore": bytes = "some bytes".toBytes otherBytes = "some other bytes".toBytes + var + dsDb: SQLiteDatastore + + setupAll: + removeDir(basePathAbs) + require(not dirExists(basePathAbs)) + createDir(basePathAbs) + + dsDb = SQLiteDatastore.new(path = dbPathAbs).tryGet() + + teardownAll: + removeDir(basePathAbs) + require(not dirExists(basePathAbs)) + + basicStoreTests(dsDb, key, bytes, otherBytes) + +suite "Test Read Only SQLiteDatastore": + let + (path, _, _) = instantiationInfo(-1, fullPaths = true) # get this file's name + basePath = "tests_data" + basePathAbs = path.parentDir / basePath + filename = "test_store" & DbExt + dbPathAbs = basePathAbs / filename + key = Key.init("a:b/c/d:e").tryGet() + bytes = "some bytes".toBytes + var dsDb: SQLiteDatastore readOnlyDb: SQLiteDatastore @@ -47,17 +75,6 @@ suite "SQLiteDatastore": (await readOnlyDb.get(key)).tryGet() == bytes (await dsDb.get(key)).tryGet() == bytes - test "put update": - check: - (await readOnlyDb.put(key, otherBytes)).isErr - - (await dsDb.put(key, otherBytes)).tryGet() - - test "get updated": - check: - (await readOnlyDb.get(key)).tryGet() == otherBytes - (await dsDb.get(key)).tryGet() == otherBytes - test "delete": check: (await readOnlyDb.delete(key)).isErr From 86b13495f1ccc1cf2471dfeb8551dce296a5385c Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Mon, 19 Sep 2022 15:54:19 -0600 Subject: [PATCH 17/41] rename tests --- tests/datastore/templates.nim | 1 - .../{test_datastore.nim => testdatastore.nim} | 11 +- tests/datastore/testfsds.nim | 141 +++++++++++++ tests/datastore/testfstore.nim | 196 ------------------ tests/datastore/{test_key.nim => testkey.nim} | 0 ...test_null_datastore.nim => testnullds.nim} | 15 +- tests/datastore/testsql.nim | 4 + ..._tiered_datastore.nim => testtieredds.nim} | 21 +- tests/test_all.nim | 9 - tests/testall.nim | 9 + 10 files changed, 176 insertions(+), 231 deletions(-) delete mode 100644 tests/datastore/templates.nim rename tests/datastore/{test_datastore.nim => testdatastore.nim} (78%) create mode 100644 tests/datastore/testfsds.nim delete mode 100644 tests/datastore/testfstore.nim rename tests/datastore/{test_key.nim => testkey.nim} (100%) rename tests/datastore/{test_null_datastore.nim => testnullds.nim} (79%) create mode 100644 tests/datastore/testsql.nim rename tests/datastore/{test_tiered_datastore.nim => testtieredds.nim} (91%) delete mode 100644 tests/test_all.nim create mode 100644 tests/testall.nim diff --git a/tests/datastore/templates.nim b/tests/datastore/templates.nim deleted file mode 100644 index 9847e1f..0000000 --- a/tests/datastore/templates.nim +++ /dev/null @@ -1 +0,0 @@ -template asyncTest*(name, body: untyped) = test(name, body) diff --git a/tests/datastore/test_datastore.nim b/tests/datastore/testdatastore.nim similarity index 78% rename from tests/datastore/test_datastore.nim rename to tests/datastore/testdatastore.nim index ccf4d75..b7f3f83 100644 --- a/tests/datastore/test_datastore.nim +++ b/tests/datastore/testdatastore.nim @@ -5,25 +5,24 @@ import pkg/chronos import pkg/stew/results import ../../datastore -import ./templates suite "Datastore (base)": let key = Key.init("a").get ds = Datastore() - asyncTest "put": + test "put": expect Defect: discard ds.put(key, @[1.byte]) - asyncTest "delete": + test "delete": expect Defect: discard ds.delete(key) - asyncTest "contains": + test "contains": expect Defect: discard ds.contains(key) - asyncTest "get": + test "get": expect Defect: discard ds.get(key) - asyncTest "query": + test "query": expect Defect: for n in ds.query(Query.init(key)): discard diff --git a/tests/datastore/testfsds.nim b/tests/datastore/testfsds.nim new file mode 100644 index 0000000..f9b3e94 --- /dev/null +++ b/tests/datastore/testfsds.nim @@ -0,0 +1,141 @@ +import std/algorithm +import std/options +import std/os + +import pkg/asynctest/unittest2 +import pkg/chronos +import pkg/stew/results +import pkg/stew/byteutils + +import pkg/datastore/fsds + +import ./basictests + +suite "Test Basic FSDatastore": + let + (path, _, _) = instantiationInfo(-1, fullPaths = true) # get this file's name + basePath = "tests_data" + basePathAbs = path.parentDir / basePath + key = Key.init("/a/b").tryGet() + bytes = "some bytes".toBytes + otherBytes = "some other bytes".toBytes + + var + fsStore: FSDatastore + + setupAll: + removeDir(basePathAbs) + require(not dirExists(basePathAbs)) + createDir(basePathAbs) + + fsStore = FSDatastore.new(root = basePathAbs).tryGet() + + teardownAll: + removeDir(basePathAbs) + require(not dirExists(basePathAbs)) + + basicStoreTests(fsStore, key, bytes, otherBytes) + +suite "Test Misc FSDatastore": + let + (path, _, _) = instantiationInfo(-1, fullPaths = true) # get this file's name + basePath = "tests_data" + basePathAbs = path.parentDir / basePath + bytes = "some bytes".toBytes + + setupAll: + removeDir(basePathAbs) + require(not dirExists(basePathAbs)) + createDir(basePathAbs) + + teardownAll: + removeDir(basePathAbs) + require(not dirExists(basePathAbs)) + + test "Test checkProtected()": + let + fs = FSDatastore.new(root = "/").tryGet() + + for p in ProtectedPaths: + if p == "/": continue + let + key = Key.init(p).tryGet() + + check: + fs.path(key).checkProtected() + + test "Test protected paths": + let + fs = FSDatastore.new(root = "/").tryGet() + + for p in ProtectedPaths: + if p == "/": continue + let + key = Key.init(p).tryGet() + + check: + (await fs.put(key, bytes)).isErr + (await fs.get(key)).isErr + (await fs.delete(key)).isErr + (await fs.contains(key)).isErr + + test "Test allowed()": + let + chars = [ + "/a*", "/a/b*", "/a/b$", "/a/b()", + "/a/b+", "/a/b$", "/d%", "/A/b@", + "/A!", "/b#/##"] + + for c in chars: + check not c.allowed + + test "Test valid key (path) names": + let + fs = FSDatastore.new(root = basePathAbs).tryGet() + bytes = "some bytes".toBytes + chars = + ["/a*", "/a/b*", "/a/b$", "/a/b()", + "/a/b+", "/a/b$", "/d%", "/A/b@", + "/A!", "/b#/##"] + + for c in chars: + let + key = Key.init(c).tryGet() + + check: + (await fs.put(key, bytes)).isErr + (await fs.get(key)).isErr + (await fs.delete(key)).isErr + (await fs.contains(key)).isErr + + test "Test validDepth()": + let + fs = FSDatastore.new(root = "/", depth = 3).tryGet() + invalid = Key.init("/a/b/c/d").tryGet() + valid = Key.init("/a/b/c").tryGet() + + check: + not fs.validDepth(invalid) + fs.validDepth(valid) + + test "Test invalid key (path) depth": + let + fs = FSDatastore.new(root = basePathAbs, depth = 3).tryGet() + key = Key.init("/a/b/c/d").tryGet() + + check: + (await fs.put(key, bytes)).isErr + (await fs.get(key)).isErr + (await fs.delete(key)).isErr + (await fs.contains(key)).isErr + + test "Test valid key (path) depth": + let + fs = FSDatastore.new(root = basePathAbs, depth = 3).tryGet() + key = Key.init("/a/b/c").tryGet() + + check: + (await fs.put(key, bytes)).isOk + (await fs.get(key)).isOk + (await fs.delete(key)).isOk + (await fs.contains(key)).isOk diff --git a/tests/datastore/testfstore.nim b/tests/datastore/testfstore.nim deleted file mode 100644 index 9463616..0000000 --- a/tests/datastore/testfstore.nim +++ /dev/null @@ -1,196 +0,0 @@ -import std/options -import std/os - -import pkg/asynctest/unittest2 -import pkg/chronos -import pkg/questionable -import pkg/questionable/results -import pkg/stew/byteutils -from pkg/stew/results as stewResults import get, isOk - -import ../../datastore/fsstore -import ./templates - -suite "FSDatastore": - # assumes tests/test_all is run from project root, e.g. with `nimble test` - let - root = "tests" / "test_data" - rootAbs = getCurrentDir() / root - - setup: - removeDir(rootAbs) - require(not dirExists(rootAbs)) - createDir(rootAbs) - - teardown: - removeDir(rootAbs) - require(not dirExists(rootAbs)) - - asyncTest "new": - var - dsRes: ?!FSDatastore - - dsRes = FSDatastore.new(rootAbs / "missing") - - check: dsRes.isErr - - dsRes = FSDatastore.new(rootAbs) - - check: dsRes.isOk - - dsRes = FSDatastore.new(root) - - check: dsRes.isOk - - asyncTest "accessors": - let - ds = FSDatastore.new(root).get - - check: ds.root == rootAbs - - asyncTest "helpers": - let - ds = FSDatastore.new(root).tryGet() - - check: - # see comment in ../../datastore/filesystem_datastore re: whether path - # equivalence of e.g. Key(/a:b) and Key(/a/b) is problematic - ds.path(Key.init("a").tryGet()) == rootAbs / "a" - ds.path(Key.init("a:b").tryGet()) == rootAbs / "a" / "b" - ds.path(Key.init("a/b").tryGet()) == rootAbs / "a" / "b" - ds.path(Key.init("a:b/c").tryGet()) == rootAbs / "a" / "b" / "c" - ds.path(Key.init("a/b/c").tryGet()) == rootAbs / "a" / "b" / "c" - ds.path(Key.init("a:b/c:d").tryGet()) == rootAbs / "a" / "b" / "c" / "d" - ds.path(Key.init("a/b/c:d").tryGet()) == rootAbs / "a" / "b" / "c" / "d" - ds.path(Key.init("a/b/c/d").tryGet()) == rootAbs / "a" / "b" / "c" / "d" - - asyncTest "put": - let - ds = FSDatastore.new(root).get - key = Key.init("a:b/c/d:e").get - path = ds.path(key) - - var - bytes: seq[byte] - putRes = await ds.put(key, bytes) - - check: - putRes.isOk - readFile(path).toBytes == bytes - - bytes = @[1.byte, 2.byte, 3.byte] - - putRes = await ds.put(key, bytes) - - check: - putRes.isOk - readFile(path).toBytes == bytes - - bytes = @[4.byte, 5.byte, 6.byte] - - putRes = await ds.put(key, bytes) - - check: - putRes.isOk - readFile(path).toBytes == bytes - - asyncTest "delete": - let - bytes = @[1.byte, 2.byte, 3.byte] - ds = FSDatastore.new(root).get - - var - key = Key.init("a:b/c/d:e").get - path = ds.path(key) - - let - putRes = await ds.put(key, bytes) - - assert putRes.isOk - - var - delRes = await ds.delete(key) - - check: - delRes.isOk - not fileExists(path) - dirExists(parentDir(path)) - - key = Key.init("X/Y/Z").get - path = ds.path(key) - assert not fileExists(path) - - delRes = await ds.delete(key) - - check: delRes.isOk - - asyncTest "contains": - let - bytes = @[1.byte, 2.byte, 3.byte] - ds = FSDatastore.new(root).get - - var - key = Key.init("a:b/c/d:e").get - path = ds.path(key) - putRes = await ds.put(key, bytes) - - assert putRes.isOk - - var - containsRes = await ds.contains(key) - - check: - containsRes.isOk - containsRes.get == true - - key = Key.init("X/Y/Z").get - path = ds.path(key) - assert not fileExists(path) - - containsRes = await ds.contains(key) - - check: - containsRes.isOk - containsRes.get == false - - asyncTest "get": - let - ds = FSDatastore.new(root).get - - var - bytes: seq[byte] - key = Key.init("a:b/c/d:e").get - path = ds.path(key) - putRes = await ds.put(key, bytes) - - assert putRes.isOk - - var - getRes = await ds.get(key) - getOpt = getRes.get - - check: getOpt.isSome and getOpt.get == bytes - - bytes = @[1.byte, 2.byte, 3.byte] - putRes = await ds.put(key, bytes) - - assert putRes.isOk - - getRes = await ds.get(key) - getOpt = getRes.get - - check: getOpt.isSome and getOpt.get == bytes - - key = Key.init("X/Y/Z").get - path = ds.path(key) - - assert not fileExists(path) - - getRes = await ds.get(key) - getOpt = getRes.get - - check: getOpt.isNone - - # asyncTest "query": - # check: - # true diff --git a/tests/datastore/test_key.nim b/tests/datastore/testkey.nim similarity index 100% rename from tests/datastore/test_key.nim rename to tests/datastore/testkey.nim diff --git a/tests/datastore/test_null_datastore.nim b/tests/datastore/testnullds.nim similarity index 79% rename from tests/datastore/test_null_datastore.nim rename to tests/datastore/testnullds.nim index c37d4f3..0af7a89 100644 --- a/tests/datastore/test_null_datastore.nim +++ b/tests/datastore/testnullds.nim @@ -4,34 +4,33 @@ import pkg/asynctest/unittest2 import pkg/chronos import pkg/stew/results -import ../../datastore/null_datastore -import ./templates +import pkg/datastore/nullds suite "NullDatastore": let key = Key.init("a").get ds = NullDatastore.new() - asyncTest "new": + test "new": check: not ds.isNil - asyncTest "put": + test "put": check: (await ds.put(key, @[1.byte])).isOk - asyncTest "delete": + test "delete": check: (await ds.delete(key)).isOk - asyncTest "contains": + test "contains": check: (await ds.contains(key)).isOk (await ds.contains(key)).get == false - asyncTest "get": + test "get": check: (await ds.get(key)).isOk (await ds.get(key)).get.isNone - asyncTest "query": + test "query": var x = true diff --git a/tests/datastore/testsql.nim b/tests/datastore/testsql.nim new file mode 100644 index 0000000..dcf6259 --- /dev/null +++ b/tests/datastore/testsql.nim @@ -0,0 +1,4 @@ +import ./sql/testsqlitedsdb +import ./sql/testsqliteds + +{.warning[UnusedImport]: off.} diff --git a/tests/datastore/test_tiered_datastore.nim b/tests/datastore/testtieredds.nim similarity index 91% rename from tests/datastore/test_tiered_datastore.nim rename to tests/datastore/testtieredds.nim index 5dfa68c..46a87b2 100644 --- a/tests/datastore/test_tiered_datastore.nim +++ b/tests/datastore/testtieredds.nim @@ -5,10 +5,9 @@ import pkg/asynctest/unittest2 import pkg/chronos import pkg/stew/results -import ../../datastore/filesystem_datastore -import ../../datastore/sqlite_datastore -import ../../datastore/tiered_datastore -import ./templates +import ../../datastore/fsds +import ../../datastore/sql +import ../../datastore/tieredds suite "TieredDatastore": # assumes tests/test_all is run from project root, e.g. with `nimble test` @@ -35,7 +34,7 @@ suite "TieredDatastore": removeDir(rootAbs) require(not dirExists(rootAbs)) - asyncTest "new": + test "new": check: TieredDatastore.new().isErr TieredDatastore.new([]).isErr @@ -44,7 +43,7 @@ suite "TieredDatastore": TieredDatastore.new([ds1, ds2]).isOk TieredDatastore.new(@[ds1, ds2]).isOk - asyncTest "accessors": + test "accessors": let stores = @[ds1, ds2] @@ -53,7 +52,7 @@ suite "TieredDatastore": TieredDatastore.new([ds1, ds2]).get.stores == stores TieredDatastore.new(@[ds1, ds2]).get.stores == stores - asyncTest "put": + test "put": let ds = TieredDatastore.new(ds1, ds2).get @@ -68,7 +67,7 @@ suite "TieredDatastore": (await ds1.get(key)).get.get == bytes (await ds2.get(key)).get.get == bytes - asyncTest "delete": + test "delete": let ds = TieredDatastore.new(ds1, ds2).get putRes = await ds.put(key, bytes) @@ -85,7 +84,7 @@ suite "TieredDatastore": (await ds1.get(key)).get.isNone (await ds2.get(key)).get.isNone - asyncTest "contains": + test "contains": let ds = TieredDatastore.new(ds1, ds2).get @@ -106,7 +105,7 @@ suite "TieredDatastore": (await ds1.contains(key)).get (await ds2.contains(key)).get - asyncTest "get": + test "get": var ds = TieredDatastore.new(ds1, ds2).get @@ -149,6 +148,6 @@ suite "TieredDatastore": (await ds1.get(key)).get.isSome (await ds1.get(key)).get.get == bytes - # asyncTest "query": + # test "query": # check: # true diff --git a/tests/test_all.nim b/tests/test_all.nim deleted file mode 100644 index 82a46fa..0000000 --- a/tests/test_all.nim +++ /dev/null @@ -1,9 +0,0 @@ -import - ./datastore/test_key, - ./datastore/test_datastore, - ./datastore/test_null_datastore, - ./datastore/test_filesystem_datastore, - ./datastore/test_sqlite_datastore, - ./datastore/test_tiered_datastore - -{.warning[UnusedImport]: off.} diff --git a/tests/testall.nim b/tests/testall.nim new file mode 100644 index 0000000..8a6b3ad --- /dev/null +++ b/tests/testall.nim @@ -0,0 +1,9 @@ +import + ./datastore/testkey, + ./datastore/testdatastore, + ./datastore/testnullds, + ./datastore/testfsds, + ./datastore/testsql, + ./datastore/testtieredds + +{.warning[UnusedImport]: off.} From f5c7a3c02b341dec878bc4fa35499a8441dda2d3 Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Mon, 19 Sep 2022 15:54:29 -0600 Subject: [PATCH 18/41] add common basic tests --- tests/datastore/basictests.nim | 36 ++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 tests/datastore/basictests.nim diff --git a/tests/datastore/basictests.nim b/tests/datastore/basictests.nim new file mode 100644 index 0000000..ae11e93 --- /dev/null +++ b/tests/datastore/basictests.nim @@ -0,0 +1,36 @@ +import std/options +import std/os + +import pkg/asynctest +import pkg/chronos +import pkg/stew/results +import pkg/stew/byteutils + +import pkg/datastore + +proc basicStoreTests*( + ds: Datastore, + key: Key, + bytes: seq[byte], + otherBytes: seq[byte]) = + + test "put": + (await ds.put(key, bytes)).tryGet() + + test "get": + check: + (await ds.get(key)).tryGet() == bytes + + test "put update": + (await ds.put(key, otherBytes)).tryGet() + + test "get updated": + check: + (await ds.get(key)).tryGet() == otherBytes + + test "delete": + (await ds.delete(key)).tryGet() + + test "contains": + check: + not (await ds.contains(key)).tryGet() From 5e30230a9f20e26f66d228388406c248bf33b563 Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Mon, 19 Sep 2022 17:12:16 -0600 Subject: [PATCH 19/41] default implementation for close --- datastore/datastore.nim | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/datastore/datastore.nim b/datastore/datastore.nim index f441a4c..139ca29 100644 --- a/datastore/datastore.nim +++ b/datastore/datastore.nim @@ -29,11 +29,11 @@ method get*(self: Datastore, key: Key): Future[?!seq[byte]] {.base, locks: "unkn method put*(self: Datastore, key: Key, data: seq[byte]): Future[?!void] {.base, locks: "unknown".} = raiseAssert("Not implemented!") -method close*(self: Datastore): Future[?!void] {.base, locks: "unknown".} = - raiseAssert("Not implemented!") +method close*(self: Datastore): Future[?!void] {.base, async, locks: "unknown".} = + return success() method query*( self: Datastore, - query: Query): Future[QueryIter] = + query: Query): Future[QueryIter] {.gcsafe.} = raiseAssert("Not implemented!") From 2680789884d7caaaca71b2eacb9605240af08935 Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Mon, 19 Sep 2022 17:12:34 -0600 Subject: [PATCH 20/41] missing key should return appropriate error --- datastore/fsds.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/datastore/fsds.nim b/datastore/fsds.nim index 233bce0..f82b7b3 100644 --- a/datastore/fsds.nim +++ b/datastore/fsds.nim @@ -133,7 +133,7 @@ method get*(self: FSDatastore, key: Key): Future[?!seq[byte]] {.async.} = return failure "Path is protected!" if not fileExists(path): - return success(newSeq[byte]()) + return failure(newException(DatastoreKeyNotFound, "Key doesn't exist")) var file: File From 60e0ea5573341769b5b3ade650afff0832b1dc16 Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Mon, 19 Sep 2022 17:12:52 -0600 Subject: [PATCH 21/41] get rid of null datastore --- datastore/nullds.nim | 47 ---------------------------------- tests/datastore/testnullds.nim | 43 ------------------------------- 2 files changed, 90 deletions(-) delete mode 100644 datastore/nullds.nim delete mode 100644 tests/datastore/testnullds.nim diff --git a/datastore/nullds.nim b/datastore/nullds.nim deleted file mode 100644 index cdb7444..0000000 --- a/datastore/nullds.nim +++ /dev/null @@ -1,47 +0,0 @@ -import pkg/chronos -import pkg/questionable -import pkg/questionable/results -import pkg/upraises - -import ./datastore - -export datastore - -push: {.upraises: [].} - -type - NullDatastore* = ref object of Datastore - -proc new*(T: type NullDatastore): T = - T() - -method contains*( - self: NullDatastore, - key: Key): Future[?!bool] {.async, locks: "unknown".} = - - return success false - -method delete*( - self: NullDatastore, - key: Key): Future[?!void] {.async, locks: "unknown".} = - - return success() - -method get*( - self: NullDatastore, - key: Key): Future[?!(?seq[byte])] {.async, locks: "unknown".} = - - return success seq[byte].none - -method put*( - self: NullDatastore, - key: Key, - data: seq[byte]): Future[?!void] {.async, locks: "unknown".} = - - return success() - -iterator query*( - self: NullDatastore, - query: Query): Future[QueryResponse] = - - discard diff --git a/tests/datastore/testnullds.nim b/tests/datastore/testnullds.nim deleted file mode 100644 index 0af7a89..0000000 --- a/tests/datastore/testnullds.nim +++ /dev/null @@ -1,43 +0,0 @@ -import std/options - -import pkg/asynctest/unittest2 -import pkg/chronos -import pkg/stew/results - -import pkg/datastore/nullds - -suite "NullDatastore": - let - key = Key.init("a").get - ds = NullDatastore.new() - - test "new": - check: not ds.isNil - - test "put": - check: (await ds.put(key, @[1.byte])).isOk - - test "delete": - check: (await ds.delete(key)).isOk - - test "contains": - check: - (await ds.contains(key)).isOk - (await ds.contains(key)).get == false - - test "get": - check: - (await ds.get(key)).isOk - (await ds.get(key)).get.isNone - - test "query": - var - x = true - - for n in ds.query(Query.init(key)): - # `iterator query` for NullDatastore never yields so the following lines - # are not run (else the test would hang) - x = false - discard (await n) - - check: x From ec4015a404cdeaec56a104e686ce76e8bc59a291 Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Mon, 19 Sep 2022 17:13:11 -0600 Subject: [PATCH 22/41] proper query iterator interface --- datastore/query.nim | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/datastore/query.nim b/datastore/query.nim index d89c3f0..68e60a7 100644 --- a/datastore/query.nim +++ b/datastore/query.nim @@ -1,3 +1,6 @@ +import pkg/upraises +import pkg/chronos + import ./key type @@ -13,7 +16,15 @@ type sort*: SortOrder QueryResponse* = tuple[key: Key, data: seq[byte]] - QueryIter* = iterator(): QueryResponse {.closure.} + + GetNext* = proc(): Future[QueryResponse] {.upraises: [], gcsafe, closure.} + QueryIter* = object + finished: bool + next*: GetNext + +iterator items*(q: QueryIter): Future[QueryResponse] = + while not q.finished: + yield q.next() proc init*( T: type Query, From 7f9c319b98149ffcb3dc3c739565199446f69c23 Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Mon, 19 Sep 2022 17:13:20 -0600 Subject: [PATCH 23/41] export sqlds --- datastore/sql.nim | 2 ++ 1 file changed, 2 insertions(+) diff --git a/datastore/sql.nim b/datastore/sql.nim index e207daf..9c30c9e 100644 --- a/datastore/sql.nim +++ b/datastore/sql.nim @@ -1 +1,3 @@ import ./sql/sqliteds + +export sqliteds From c0f6e38fda64379180d80964b9e1ee220fa43bcb Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Mon, 19 Sep 2022 17:13:34 -0600 Subject: [PATCH 24/41] cleanup tiered ds --- datastore/tieredds.nim | 29 ++++++++++++----------------- 1 file changed, 12 insertions(+), 17 deletions(-) diff --git a/datastore/tieredds.nim b/datastore/tieredds.nim index 6b2b50a..35ddad9 100644 --- a/datastore/tieredds.nim +++ b/datastore/tieredds.nim @@ -55,31 +55,26 @@ method delete*( method get*( self: TieredDatastore, - key: Key): Future[?!(?seq[byte])] {.async, locks: "unknown".} = + key: Key): Future[?!seq[byte]] {.async, locks: "unknown".} = var - bytesOpt: ?seq[byte] + bytes: seq[byte] for store in self.stores: - let - getRes = await store.get(key) + without bytes =? (await store.get(key)): + continue - if getRes.isErr: return getRes - - bytesOpt = getRes.get + if bytes.len <= 0: + continue # put found data into stores logically in front of the current store - if bytes =? bytesOpt: - for s in self.stores: - if s == store: break - let - putRes = await s.put(key, bytes) + for s in self.stores: + if s == store: break + if( + let res = await s.put(key, bytes); res.isErr): + return failure res.error - if putRes.isErr: return failure putRes.error.msg - - break - - return success bytesOpt + return success bytes method put*( self: TieredDatastore, From 1f97a923fa6b4eef10fe4b53119617b627606cfe Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Mon, 19 Sep 2022 17:13:46 -0600 Subject: [PATCH 25/41] fix query basic test --- tests/datastore/testdatastore.nim | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/datastore/testdatastore.nim b/tests/datastore/testdatastore.nim index b7f3f83..7b83f7b 100644 --- a/tests/datastore/testdatastore.nim +++ b/tests/datastore/testdatastore.nim @@ -25,4 +25,5 @@ suite "Datastore (base)": test "query": expect Defect: - for n in ds.query(Query.init(key)): discard + let iter = await ds.query(Query.init(key)) + for n in iter: discard From c60ff35bdd00bbd38da63ba541dd49545d682171 Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Mon, 19 Sep 2022 17:14:06 -0600 Subject: [PATCH 26/41] cleanup tired db tests --- tests/datastore/testtieredds.nim | 140 +++++++++++++------------------ 1 file changed, 57 insertions(+), 83 deletions(-) diff --git a/tests/datastore/testtieredds.nim b/tests/datastore/testtieredds.nim index 46a87b2..d25c954 100644 --- a/tests/datastore/testtieredds.nim +++ b/tests/datastore/testtieredds.nim @@ -1,13 +1,13 @@ import std/options import std/os -import pkg/asynctest/unittest2 +import pkg/asynctest import pkg/chronos import pkg/stew/results -import ../../datastore/fsds -import ../../datastore/sql -import ../../datastore/tieredds +import pkg/datastore/fsds +import pkg/datastore/sql +import pkg/datastore/tieredds suite "TieredDatastore": # assumes tests/test_all is run from project root, e.g. with `nimble test` @@ -15,22 +15,26 @@ suite "TieredDatastore": bytes = @[1.byte, 2.byte, 3.byte] key = Key.init("a:b/c/d:e").get root = "tests" / "test_data" - rootAbs = getCurrentDir() / root + (path, _, _) = instantiationInfo(-1, fullPaths = true) # get this file's name + rootAbs = path.parentDir / root var ds1: SQLiteDatastore - ds2: FileSystemDatastore + ds2: FSDatastore setup: removeDir(rootAbs) require(not dirExists(rootAbs)) createDir(rootAbs) - ds1 = SQLiteDatastore.new(memory).get - ds2 = FileSystemDatastore.new(rootAbs).get + ds1 = SQLiteDatastore.new(Memory).get + ds2 = FSDatastore.new(rootAbs, depth = 5).get teardown: - if not ds1.isNil: ds1.close + if not ds1.isNil: + discard await ds1.close + ds1 = nil + removeDir(rootAbs) require(not dirExists(rootAbs)) @@ -48,106 +52,76 @@ suite "TieredDatastore": stores = @[ds1, ds2] check: - TieredDatastore.new(ds1, ds2).get.stores == stores - TieredDatastore.new([ds1, ds2]).get.stores == stores - TieredDatastore.new(@[ds1, ds2]).get.stores == stores + TieredDatastore.new(ds1, ds2).tryGet.stores == stores + TieredDatastore.new([ds1, ds2]).tryGet.stores == stores + TieredDatastore.new(@[ds1, ds2]).tryGet.stores == stores test "put": let ds = TieredDatastore.new(ds1, ds2).get - - assert (await ds1.get(key)).get.isNone - assert (await ds2.get(key)).get.isNone - - let putRes = await ds.put(key, bytes) check: putRes.isOk - (await ds1.get(key)).get.get == bytes - (await ds2.get(key)).get.get == bytes + (await ds1.get(key)).tryGet == bytes + (await ds2.get(key)).tryGet == bytes test "delete": let ds = TieredDatastore.new(ds1, ds2).get - putRes = await ds.put(key, bytes) - assert putRes.isOk - assert (await ds1.get(key)).get.get == bytes - assert (await ds2.get(key)).get.get == bytes - - let - delRes = await ds.delete(key) + (await ds.put(key, bytes)).tryGet + (await ds.delete(key)).tryGet check: - delRes.isOk - (await ds1.get(key)).get.isNone - (await ds2.get(key)).get.isNone + (await ds1.get(key)).tryGet.len == 0 + + expect DatastoreKeyNotFound: + discard (await ds2.get(key)).tryGet test "contains": let - ds = TieredDatastore.new(ds1, ds2).get - - assert not (await ds1.contains(key)).get - assert not (await ds2.contains(key)).get - - let - putRes = await ds.put(key, bytes) - - assert putRes.isOk - - let - containsRes = await ds.contains(key) + ds = TieredDatastore.new(ds1, ds2).tryGet check: - containsRes.isOk - containsRes.get - (await ds1.contains(key)).get - (await ds2.contains(key)).get + not (await ds1.contains(key)).tryGet + not (await ds2.contains(key)).tryGet + + (await ds.put(key, bytes)).tryGet + + check: + (await ds.contains(key)).tryGet + (await ds1.contains(key)).tryGet + (await ds2.contains(key)).tryGet test "get": var - ds = TieredDatastore.new(ds1, ds2).get - - assert (await ds1.get(key)).get.isNone - assert (await ds2.get(key)).get.isNone - - check: (await ds.get(key)).get.isNone - - let - putRes = await ds.put(key, bytes) - - assert putRes.isOk - - var - getRes = await ds.get(key) + ds = TieredDatastore.new(ds1, ds2).tryGet check: - getRes.isOk - getRes.get.isSome - getRes.get.get == bytes - (await ds1.get(key)).get.isSome - (await ds2.get(key)).get.isSome - (await ds1.get(key)).get.get == bytes - (await ds2.get(key)).get.get == bytes + not (await ds1.contains(key)).tryGet + not (await ds2.contains(key)).tryGet + not (await ds.contains(key)).tryGet - ds1.close - ds1 = SQLiteDatastore.new(memory).get - ds = TieredDatastore.new(ds1, ds2).get - - assert (await ds1.get(key)).get.isNone - assert (await ds2.get(key)).get.isSome - assert (await ds2.get(key)).get.get == bytes - - getRes = await ds.get(key) + (await ds.put(key, bytes)).tryGet check: - getRes.isOk - getRes.get.isSome - getRes.get.get == bytes - (await ds1.get(key)).get.isSome - (await ds1.get(key)).get.get == bytes + (await ds.get(key)).tryGet == bytes + (await ds1.get(key)).tryGet == bytes + (await ds2.get(key)).tryGet == bytes - # test "query": - # check: - # true + (await ds1.close()).tryGet + ds1 = nil + + ds1 = SQLiteDatastore.new(Memory).tryGet + ds = TieredDatastore.new(ds1, ds2).tryGet + + check: + not (await ds1.contains(key)).tryGet + (await ds2.get(key)).tryGet == bytes + (await ds.get(key)).tryGet == bytes + (await ds1.get(key)).tryGet == bytes + + # # test "query": + # # check: + # # true From 39258e5f732bfd55ce5a00021af80ce8c73aba35 Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Mon, 19 Sep 2022 17:14:15 -0600 Subject: [PATCH 27/41] remove null ds --- tests/testall.nim | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/testall.nim b/tests/testall.nim index 8a6b3ad..80f20b2 100644 --- a/tests/testall.nim +++ b/tests/testall.nim @@ -1,7 +1,6 @@ import ./datastore/testkey, ./datastore/testdatastore, - ./datastore/testnullds, ./datastore/testfsds, ./datastore/testsql, ./datastore/testtieredds From 9901e71a88ef82dde10e34a1741df60fceb38d70 Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Mon, 19 Sep 2022 17:23:19 -0600 Subject: [PATCH 28/41] add basic tests to tired ds --- tests/datastore/testtieredds.nim | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/tests/datastore/testtieredds.nim b/tests/datastore/testtieredds.nim index d25c954..ec67629 100644 --- a/tests/datastore/testtieredds.nim +++ b/tests/datastore/testtieredds.nim @@ -4,11 +4,43 @@ import std/os import pkg/asynctest import pkg/chronos import pkg/stew/results +import pkg/stew/byteutils import pkg/datastore/fsds import pkg/datastore/sql import pkg/datastore/tieredds +import ./basictests + +suite "Test Basic FSDatastore": + let + bytes = "some bytes".toBytes + otherBytes = "some other bytes".toBytes + key = Key.init("a:b/c/d:e").get + root = "tests" / "test_data" + (path, _, _) = instantiationInfo(-1, fullPaths = true) # get this file's name + rootAbs = path.parentDir / root + + var + ds1: SQLiteDatastore + ds2: FSDatastore + tiredDs: TieredDatastore + + setupAll: + removeDir(rootAbs) + require(not dirExists(rootAbs)) + createDir(rootAbs) + + ds1 = SQLiteDatastore.new(Memory).tryGet + ds2 = FSDatastore.new(rootAbs, depth = 5).tryGet + tiredDs = TieredDatastore.new(@[ds1, ds2]).tryGet + + teardownAll: + removeDir(rootAbs) + require(not dirExists(rootAbs)) + + basicStoreTests(tiredDs, key, bytes, otherBytes) + suite "TieredDatastore": # assumes tests/test_all is run from project root, e.g. with `nimble test` let From f9cfab5cbcc185819e7625b938d3b5e63dbac78e Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Mon, 19 Sep 2022 17:29:32 -0600 Subject: [PATCH 29/41] commend out query for now --- datastore/sql/sqliteds.nim | 117 ++++++++++++++++++------------------- 1 file changed, 57 insertions(+), 60 deletions(-) diff --git a/datastore/sql/sqliteds.nim b/datastore/sql/sqliteds.nim index 521e5d1..4b79391 100644 --- a/datastore/sql/sqliteds.nim +++ b/datastore/sql/sqliteds.nim @@ -37,10 +37,11 @@ method contains*(self: SQLiteDatastore, key: Key): Future[?!bool] {.async.} = proc onData(s: RawStmtPtr) = exists = sqlite3_column_int64(s, ContainsStmtExistsCol.cint).bool - let - queryRes = self.db.containsStmt.query((key.id), onData) + if ( + let res = self.db.containsStmt.query((key.id), onData); + res.isErr): + return failure res.error.msg - if queryRes.isErr: return queryRes return success exists method delete*(self: SQLiteDatastore, key: Key): Future[?!void] {.async.} = @@ -54,84 +55,80 @@ method get*(self: SQLiteDatastore, key: Key): Future[?!seq[byte]] {.async.} = var bytes: seq[byte] - let - dataCol = self.db.getDataCol - proc onData(s: RawStmtPtr) = - bytes = dataCol() + bytes = self.db.getDataCol() - let - queryRes = self.db.getStmt.query((key.id), onData) - - if queryRes.isErr: - return failure queryRes.error.msg + if ( + let res = self.db.getStmt.query((key.id), onData); + res.isErr): + return failure res.error.msg return success bytes method put*(self: SQLiteDatastore, key: Key, data: seq[byte]): Future[?!void] {.async.} = return self.db.putStmt.exec((key.id, @data, timestamp())) -iterator query*( - self: SQLiteDatastore, - query: Query): Future[QueryResponse] = +# iterator query*( +# self: SQLiteDatastore, +# query: Query): Future[QueryResponse] = - let - queryStmt = QueryStmt.prepare( - self.db.env, QueryStmtStr).expect("should not fail") +# let +# queryStmt = QueryStmt.prepare( +# self.db.env, QueryStmtStr).expect("should not fail") - s = RawStmtPtr(queryStmt) +# s = RawStmtPtr(queryStmt) - defer: - discard sqlite3_reset(s) - discard sqlite3_clear_bindings(s) - s.dispose +# defer: +# discard sqlite3_reset(s) +# discard sqlite3_clear_bindings(s) +# s.dispose - let - v = sqlite3_bind_text(s, 1.cint, query.key.id.cstring, -1.cint, - SQLITE_TRANSIENT_GCSAFE) +# let +# v = sqlite3_bind_text(s, 1.cint, query.key.id.cstring, -1.cint, +# SQLITE_TRANSIENT_GCSAFE) - if not (v == SQLITE_OK): - raise (ref Defect)(msg: $sqlite3_errstr(v)) +# if not (v == SQLITE_OK): +# raise (ref Defect)(msg: $sqlite3_errstr(v)) - while true: - let - v = sqlite3_step(s) +# while true: +# let +# v = sqlite3_step(s) - case v - of SQLITE_ROW: - let - key = Key.init($sqlite3_column_text_not_null( - s, QueryStmtIdCol)).expect("should not fail") +# case v +# of SQLITE_ROW: +# let +# key = Key.init($sqlite3_column_text_not_null( +# s, QueryStmtIdCol)).expect("should not fail") - blob = sqlite3_column_blob(s, QueryStmtDataCol) +# blob = sqlite3_column_blob(s, QueryStmtDataCol) - # detect out-of-memory error - # see the conversion table and final paragraph of: - # https://www.sqlite.org/c3ref/column_blob.html - # see also https://www.sqlite.org/rescode.html +# # detect out-of-memory error +# # see the conversion table and final paragraph of: +# # https://www.sqlite.org/c3ref/column_blob.html +# # see also https://www.sqlite.org/rescode.html - # the "data" column can be NULL so in order to detect an out-of-memory - # error it is necessary to check that the result is a null pointer and - # that the result code is an error code - if blob.isNil: - let - v = sqlite3_errcode(sqlite3_db_handle(s)) +# # the "data" column can be NULL so in order to detect an out-of-memory +# # error it is necessary to check that the result is a null pointer and +# # that the result code is an error code +# if blob.isNil: +# let +# v = sqlite3_errcode(sqlite3_db_handle(s)) - if not (v in [SQLITE_OK, SQLITE_ROW, SQLITE_DONE]): - raise (ref Defect)(msg: $sqlite3_errstr(v)) +# if not (v in [SQLITE_OK, SQLITE_ROW, SQLITE_DONE]): +# raise (ref Defect)(msg: $sqlite3_errstr(v)) - let - dataLen = sqlite3_column_bytes(s, QueryStmtDataCol) - dataBytes = cast[ptr UncheckedArray[byte]](blob) - data = @(toOpenArray(dataBytes, 0, dataLen - 1)) - fut = newFuture[QueryResponse]() +# let +# dataLen = sqlite3_column_bytes(s, QueryStmtDataCol) +# dataBytes = cast[ptr UncheckedArray[byte]](blob) +# data = @(toOpenArray(dataBytes, 0, dataLen - 1)) +# fut = newFuture[QueryResponse]() - fut.complete((key, data)) - yield fut - of SQLITE_DONE: - break - else: - raise (ref Defect)(msg: $sqlite3_errstr(v)) +# fut.complete((key, data)) +# yield fut +# of SQLITE_DONE: +# break +# else: +# raise (ref Defect)(msg: $sqlite3_errstr(v)) proc new*( T: type SQLiteDatastore, From 17c4ff92a417a5867a1b9f72bfa4e205eca0e7d5 Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Mon, 19 Sep 2022 22:21:10 -0600 Subject: [PATCH 30/41] use proper os separator depending on OS --- datastore/fsds.nim | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/datastore/fsds.nim b/datastore/fsds.nim index f82b7b3..ef93b5f 100644 --- a/datastore/fsds.nim +++ b/datastore/fsds.nim @@ -29,11 +29,11 @@ const "/home", "/Users"] - Allowed* = - toSeq('A'..'Z') & - toSeq('a'..'z') & - toSeq('0'..'9') & - toSeq(['/', '_', '-']) + Allowed* = { + 'a'..'z', + 'A'..'Z', + DirSep, AltSep, + '_', '-'} type FSDatastore* = ref object of Datastore From 5f6acbd88d1414283b53fe7a8a571a946445c4f2 Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Mon, 19 Sep 2022 22:27:08 -0600 Subject: [PATCH 31/41] add numbers as allowed chars --- datastore/fsds.nim | 1 + 1 file changed, 1 insertion(+) diff --git a/datastore/fsds.nim b/datastore/fsds.nim index ef93b5f..455d7fa 100644 --- a/datastore/fsds.nim +++ b/datastore/fsds.nim @@ -32,6 +32,7 @@ const Allowed* = { 'a'..'z', 'A'..'Z', + '0'..'9', DirSep, AltSep, '_', '-'} From 1dfc30f64a06cfc110b7384e292fc3f11857333f Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Mon, 19 Sep 2022 22:28:58 -0600 Subject: [PATCH 32/41] more valid chars --- datastore/fsds.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/datastore/fsds.nim b/datastore/fsds.nim index 455d7fa..a3d7d0d 100644 --- a/datastore/fsds.nim +++ b/datastore/fsds.nim @@ -34,7 +34,7 @@ const 'A'..'Z', '0'..'9', DirSep, AltSep, - '_', '-'} + '_', '-', '.'} type FSDatastore* = ref object of Datastore From 68f3f86cd9088d44a9561c5da0f6c1388951f8f1 Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Mon, 19 Sep 2022 22:31:24 -0600 Subject: [PATCH 33/41] fix invalid identation on nim 1.6.6 --- datastore/tieredds.nim | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/datastore/tieredds.nim b/datastore/tieredds.nim index 35ddad9..9bdb69f 100644 --- a/datastore/tieredds.nim +++ b/datastore/tieredds.nim @@ -71,7 +71,8 @@ method get*( for s in self.stores: if s == store: break if( - let res = await s.put(key, bytes); res.isErr): + let res = (await s.put(key, bytes)); + res.isErr): return failure res.error return success bytes From fc842663bb253ba12243ad80031e34070a911bef Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Mon, 19 Sep 2022 22:40:01 -0600 Subject: [PATCH 34/41] implement close method for sqlite --- datastore/sql/sqliteds.nim | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/datastore/sql/sqliteds.nim b/datastore/sql/sqliteds.nim index 4b79391..24d23c6 100644 --- a/datastore/sql/sqliteds.nim +++ b/datastore/sql/sqliteds.nim @@ -68,6 +68,10 @@ method get*(self: SQLiteDatastore, key: Key): Future[?!seq[byte]] {.async.} = method put*(self: SQLiteDatastore, key: Key, data: seq[byte]): Future[?!void] {.async.} = return self.db.putStmt.exec((key.id, @data, timestamp())) +method close*(self: SQLiteDatastore): Future[?!void] {.async.} = + self.db.close() + return success() + # iterator query*( # self: SQLiteDatastore, # query: Query): Future[QueryResponse] = From 309ac94f21a8c836ed87a96a9d95d0ce2f05b2b2 Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Mon, 19 Sep 2022 22:40:46 -0600 Subject: [PATCH 35/41] test invalid char on windows --- datastore/fsds.nim | 1 + 1 file changed, 1 insertion(+) diff --git a/datastore/fsds.nim b/datastore/fsds.nim index a3d7d0d..22c73a2 100644 --- a/datastore/fsds.nim +++ b/datastore/fsds.nim @@ -63,6 +63,7 @@ template allowed*(path: string): bool = var notfound = true for s in path: if s.char notin Allowed: + echo "INVALID CHAR ", s notfound = false break From 29577b6dd7f94d28e89960a55ff72c1a4c86b9b4 Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Mon, 19 Sep 2022 23:01:10 -0600 Subject: [PATCH 36/41] paths are already sanitized, removing `allowed` --- datastore/fsds.nim | 29 ----------------------------- tests/datastore/testfsds.nim | 29 ----------------------------- 2 files changed, 58 deletions(-) diff --git a/datastore/fsds.nim b/datastore/fsds.nim index 22c73a2..aa91e9f 100644 --- a/datastore/fsds.nim +++ b/datastore/fsds.nim @@ -29,13 +29,6 @@ const "/home", "/Users"] - Allowed* = { - 'a'..'z', - 'A'..'Z', - '0'..'9', - DirSep, AltSep, - '_', '-', '.'} - type FSDatastore* = ref object of Datastore root*: string @@ -59,16 +52,6 @@ template path*(self: FSDatastore, key: Key): string = template checkProtected*(path: string): bool = path in ProtectedPaths -template allowed*(path: string): bool = - var notfound = true - for s in path: - if s.char notin Allowed: - echo "INVALID CHAR ", s - notfound = false - break - - notfound - template validDepth*(self: FSDatastore, key: Key): bool = key.len <= self.depth @@ -80,9 +63,6 @@ method contains*(self: FSDatastore, key: Key): Future[?!bool] {.async.} = let path = self.path(key) - if not path.allowed: - return failure "Path is contains invalid characters!" - if checkProtected(path): return failure "Path is protected!" @@ -96,9 +76,6 @@ method delete*(self: FSDatastore, key: Key): Future[?!void] {.async.} = let path = self.path(key) - if not path.allowed: - return failure "Path is contains invalid characters!" - if checkProtected(path): return failure "Path is protected!" @@ -128,9 +105,6 @@ method get*(self: FSDatastore, key: Key): Future[?!seq[byte]] {.async.} = let path = self.path(key) - if not path.allowed: - return failure "Path is contains invalid characters!" - if checkProtected(path): return failure "Path is protected!" @@ -177,9 +151,6 @@ method put*( let path = self.path(key) - if not path.allowed: - return failure "Path is contains invalid characters!" - if checkProtected(path): return failure "Path is protected!" diff --git a/tests/datastore/testfsds.nim b/tests/datastore/testfsds.nim index f9b3e94..78907e9 100644 --- a/tests/datastore/testfsds.nim +++ b/tests/datastore/testfsds.nim @@ -79,35 +79,6 @@ suite "Test Misc FSDatastore": (await fs.delete(key)).isErr (await fs.contains(key)).isErr - test "Test allowed()": - let - chars = [ - "/a*", "/a/b*", "/a/b$", "/a/b()", - "/a/b+", "/a/b$", "/d%", "/A/b@", - "/A!", "/b#/##"] - - for c in chars: - check not c.allowed - - test "Test valid key (path) names": - let - fs = FSDatastore.new(root = basePathAbs).tryGet() - bytes = "some bytes".toBytes - chars = - ["/a*", "/a/b*", "/a/b$", "/a/b()", - "/a/b+", "/a/b$", "/d%", "/A/b@", - "/A!", "/b#/##"] - - for c in chars: - let - key = Key.init(c).tryGet() - - check: - (await fs.put(key, bytes)).isErr - (await fs.get(key)).isErr - (await fs.delete(key)).isErr - (await fs.contains(key)).isErr - test "Test validDepth()": let fs = FSDatastore.new(root = "/", depth = 3).tryGet() From b4b90485ef0c94df2b86ee8fd17ddbbed1dbe316 Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Mon, 19 Sep 2022 23:11:47 -0600 Subject: [PATCH 37/41] wip --- datastore/fsds.nim | 1 + 1 file changed, 1 insertion(+) diff --git a/datastore/fsds.nim b/datastore/fsds.nim index aa91e9f..d1d9c4b 100644 --- a/datastore/fsds.nim +++ b/datastore/fsds.nim @@ -50,6 +50,7 @@ template path*(self: FSDatastore, key: Key): string = self.root / segments.joinPath() template checkProtected*(path: string): bool = + echo "PATH ", path path in ProtectedPaths template validDepth*(self: FSDatastore, key: Key): bool = From a045aa4ef1daa7c6e12b7f055e6d8172806ead10 Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Mon, 19 Sep 2022 23:11:57 -0600 Subject: [PATCH 38/41] close db --- tests/datastore/sql/testsqliteds.nim | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/datastore/sql/testsqliteds.nim b/tests/datastore/sql/testsqliteds.nim index a953611..6bf98f4 100644 --- a/tests/datastore/sql/testsqliteds.nim +++ b/tests/datastore/sql/testsqliteds.nim @@ -35,6 +35,7 @@ suite "Test Basic SQLiteDatastore": teardownAll: removeDir(basePathAbs) require(not dirExists(basePathAbs)) + (await dsDb.close()).tryGet() basicStoreTests(dsDb, key, bytes, otherBytes) @@ -63,6 +64,7 @@ suite "Test Read Only SQLiteDatastore": teardownAll: removeDir(basePathAbs) require(not dirExists(basePathAbs)) + (await dsDb.close()).tryGet() test "put": check: From bb387650abd6b31f52f5761792d987c3eb977dc1 Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Mon, 19 Sep 2022 23:19:38 -0600 Subject: [PATCH 39/41] exclude windows from protected dirs checking --- datastore/fsds.nim | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/datastore/fsds.nim b/datastore/fsds.nim index d1d9c4b..0324ad9 100644 --- a/datastore/fsds.nim +++ b/datastore/fsds.nim @@ -22,12 +22,16 @@ const # we're forbidding this dirs from being # touched directly, but subdirectories # can still be touched/created - ProtectedPaths* = [ - "/", - "/usr", - "/etc", - "/home", - "/Users"] + ProtectedPaths* = + when doslikeFileSystem: + [] + else: + [ + "/", + "/usr", + "/etc", + "/home", + "/Users"] type FSDatastore* = ref object of Datastore @@ -50,7 +54,6 @@ template path*(self: FSDatastore, key: Key): string = self.root / segments.joinPath() template checkProtected*(path: string): bool = - echo "PATH ", path path in ProtectedPaths template validDepth*(self: FSDatastore, key: Key): bool = From a3781edc6a6fbb66269100150196f9c54f95db0d Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Tue, 20 Sep 2022 09:16:56 -0600 Subject: [PATCH 40/41] remove path sanitization for now, address in upcoming pr --- datastore/fsds.nim | 32 ++++++++++---------------------- tests/datastore/testfsds.nim | 27 --------------------------- 2 files changed, 10 insertions(+), 49 deletions(-) diff --git a/datastore/fsds.nim b/datastore/fsds.nim index 0324ad9..5a17833 100644 --- a/datastore/fsds.nim +++ b/datastore/fsds.nim @@ -24,14 +24,17 @@ const # can still be touched/created ProtectedPaths* = when doslikeFileSystem: - [] + [ + "\\System32", + "\\System", + "\\Start Menu\\Programs"] else: - [ - "/", - "/usr", - "/etc", - "/home", - "/Users"] + [ + "/", + "/usr", + "/etc", + "/home", + "/Users"] type FSDatastore* = ref object of Datastore @@ -53,9 +56,6 @@ template path*(self: FSDatastore, key: Key): string = self.root / segments.joinPath() -template checkProtected*(path: string): bool = - path in ProtectedPaths - template validDepth*(self: FSDatastore, key: Key): bool = key.len <= self.depth @@ -67,9 +67,6 @@ method contains*(self: FSDatastore, key: Key): Future[?!bool] {.async.} = let path = self.path(key) - if checkProtected(path): - return failure "Path is protected!" - return success fileExists(path) method delete*(self: FSDatastore, key: Key): Future[?!void] {.async.} = @@ -80,9 +77,6 @@ method delete*(self: FSDatastore, key: Key): Future[?!void] {.async.} = let path = self.path(key) - if checkProtected(path): - return failure "Path is protected!" - try: removeFile(path) return success() @@ -109,9 +103,6 @@ method get*(self: FSDatastore, key: Key): Future[?!seq[byte]] {.async.} = let path = self.path(key) - if checkProtected(path): - return failure "Path is protected!" - if not fileExists(path): return failure(newException(DatastoreKeyNotFound, "Key doesn't exist")) @@ -155,9 +146,6 @@ method put*( let path = self.path(key) - if checkProtected(path): - return failure "Path is protected!" - try: createDir(parentDir(path)) writeFile(path, data) diff --git a/tests/datastore/testfsds.nim b/tests/datastore/testfsds.nim index 78907e9..bf70b4e 100644 --- a/tests/datastore/testfsds.nim +++ b/tests/datastore/testfsds.nim @@ -52,33 +52,6 @@ suite "Test Misc FSDatastore": removeDir(basePathAbs) require(not dirExists(basePathAbs)) - test "Test checkProtected()": - let - fs = FSDatastore.new(root = "/").tryGet() - - for p in ProtectedPaths: - if p == "/": continue - let - key = Key.init(p).tryGet() - - check: - fs.path(key).checkProtected() - - test "Test protected paths": - let - fs = FSDatastore.new(root = "/").tryGet() - - for p in ProtectedPaths: - if p == "/": continue - let - key = Key.init(p).tryGet() - - check: - (await fs.put(key, bytes)).isErr - (await fs.get(key)).isErr - (await fs.delete(key)).isErr - (await fs.contains(key)).isErr - test "Test validDepth()": let fs = FSDatastore.new(root = "/", depth = 3).tryGet() From e6430b6b857d2182ecc158f6a3fe0c77f2cf376e Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Tue, 20 Sep 2022 09:21:41 -0600 Subject: [PATCH 41/41] close db --- datastore/fsds.nim | 23 ----------------------- datastore/sql/sqliteds.nim | 2 -- tests/datastore/basictests.nim | 2 -- tests/datastore/sql/testsqliteds.nim | 8 +++++--- tests/datastore/sql/testsqlitedsdb.nim | 3 +++ tests/datastore/testfsds.nim | 1 - 6 files changed, 8 insertions(+), 31 deletions(-) diff --git a/datastore/fsds.nim b/datastore/fsds.nim index 5a17833..9a5b794 100644 --- a/datastore/fsds.nim +++ b/datastore/fsds.nim @@ -1,6 +1,4 @@ import std/os -import std/sequtils -import std/strutils import std/options import pkg/chronos @@ -15,27 +13,6 @@ export datastore push: {.upraises: [].} -const - # TODO: Add more dirs from relevant OSs - - # Paths should be matched exactly, i.e. - # we're forbidding this dirs from being - # touched directly, but subdirectories - # can still be touched/created - ProtectedPaths* = - when doslikeFileSystem: - [ - "\\System32", - "\\System", - "\\Start Menu\\Programs"] - else: - [ - "/", - "/usr", - "/etc", - "/home", - "/Users"] - type FSDatastore* = ref object of Datastore root*: string diff --git a/datastore/sql/sqliteds.nim b/datastore/sql/sqliteds.nim index 24d23c6..05d38f2 100644 --- a/datastore/sql/sqliteds.nim +++ b/datastore/sql/sqliteds.nim @@ -1,11 +1,9 @@ -import std/os import std/times import pkg/chronos import pkg/questionable import pkg/questionable/results import pkg/sqlite3_abi -import pkg/stew/byteutils from pkg/stew/results as stewResults import isErr import pkg/upraises diff --git a/tests/datastore/basictests.nim b/tests/datastore/basictests.nim index ae11e93..28819f2 100644 --- a/tests/datastore/basictests.nim +++ b/tests/datastore/basictests.nim @@ -1,10 +1,8 @@ import std/options -import std/os import pkg/asynctest import pkg/chronos import pkg/stew/results -import pkg/stew/byteutils import pkg/datastore diff --git a/tests/datastore/sql/testsqliteds.nim b/tests/datastore/sql/testsqliteds.nim index 6bf98f4..09ef08b 100644 --- a/tests/datastore/sql/testsqliteds.nim +++ b/tests/datastore/sql/testsqliteds.nim @@ -1,4 +1,3 @@ -import std/algorithm import std/options import std/os @@ -33,9 +32,10 @@ suite "Test Basic SQLiteDatastore": dsDb = SQLiteDatastore.new(path = dbPathAbs).tryGet() teardownAll: + (await dsDb.close()).tryGet() + removeDir(basePathAbs) require(not dirExists(basePathAbs)) - (await dsDb.close()).tryGet() basicStoreTests(dsDb, key, bytes, otherBytes) @@ -62,9 +62,11 @@ suite "Test Read Only SQLiteDatastore": readOnlyDb = SQLiteDatastore.new(path = dbPathAbs, readOnly = true).tryGet() teardownAll: + (await dsDb.close()).tryGet() + (await readOnlyDb.close()).tryGet() + removeDir(basePathAbs) require(not dirExists(basePathAbs)) - (await dsDb.close()).tryGet() test "put": check: diff --git a/tests/datastore/sql/testsqlitedsdb.nim b/tests/datastore/sql/testsqlitedsdb.nim index 68cbfbd..0ef737f 100644 --- a/tests/datastore/sql/testsqlitedsdb.nim +++ b/tests/datastore/sql/testsqlitedsdb.nim @@ -101,6 +101,9 @@ suite "Test SQLite Datastore DB operations": flags = SQLITE_OPEN_READONLY).tryGet() teardownAll: + dsDb.close() + readOnlyDb.close() + removeDir(basePathAbs) require(not dirExists(basePathAbs)) diff --git a/tests/datastore/testfsds.nim b/tests/datastore/testfsds.nim index bf70b4e..c0ae2ae 100644 --- a/tests/datastore/testfsds.nim +++ b/tests/datastore/testfsds.nim @@ -1,4 +1,3 @@ -import std/algorithm import std/options import std/os