initial implementation

This commit is contained in:
Michael Bradley, Jr 2022-05-11 10:50:05 -05:00 committed by Michael Bradley
parent 45ddb0e3b7
commit a9b00efbd6
24 changed files with 2833 additions and 0 deletions

5
.editorconfig Normal file
View File

@ -0,0 +1,5 @@
[*]
indent_style = space
insert_final_newline = true
indent_size = 2
trim_trailing_whitespace = true

1
.gitattributes vendored Normal file
View File

@ -0,0 +1 @@
* text=auto eol=lf

137
.github/workflows/tests.yml vendored Normal file
View File

@ -0,0 +1,137 @@
name: Tests
on:
push:
branches:
- master
pull_request:
workflow_dispatch:
jobs:
tests:
strategy:
fail-fast: false
matrix:
cache_nonce: [ 0 ]
nim_version: [ 1.2.18, 1.4.8, 1.6.6 ]
platform:
- {
icon: 🐧,
label: Linux,
os: ubuntu,
shell: bash --noprofile --norc -eo pipefail
}
- {
icon: 🍎,
label: macOS,
os: macos,
shell: bash --noprofile --norc -eo pipefail
}
- {
icon: 🏁,
label: Windows,
os: windows,
shell: msys2
}
name: ${{ matrix.platform.icon }} ${{ matrix.platform.label }} - Nim v${{ matrix.nim_version }}
runs-on: ${{ matrix.platform.os }}-latest
defaults:
run:
shell: ${{ matrix.platform.shell }} {0}
steps:
# - name: Install tools and libraries via APT (Linux)
# if: matrix.platform.os == 'ubuntu'
# run: |
# sudo apt update
# sudo apt install -y \
# ...
- name: Install tools and libraries via Homebrew (macOS)
if: matrix.platform.os == 'macos'
run: |
brew update
brew install \
findutils
- name: Install tools and libraries via MSYS2 (Windows)
if: matrix.platform.os == 'windows'
uses: msys2/setup-msys2@v2
with:
msystem: UCRT64
install: >
base-devel
git
mingw-w64-ucrt-x86_64-toolchain
- name: Checkout sources from GitHub
uses: actions/checkout@v2
with:
submodules: true
- name: Calculate cache member paths
id: calc-paths
run: |
if [[ ${{ matrix.platform.os }} = windows ]]; then
echo "::set-output name=bash_env::$(cygpath -m "${HOME}")/.bash_env"
echo "::set-output name=choosenim::$(cygpath -m "${USERPROFILE}")/.choosenim"
echo "::set-output name=nimble::$(cygpath -m "${HOME}")/.nimble"
else
echo "::set-output name=bash_env::${HOME}/.bash_env"
echo "::set-output name=choosenim::${HOME}/.choosenim"
echo "::set-output name=nimble::${HOME}/.nimble"
fi
- name: Restore choosenim and Nim tooling from cache
id: choosenim-nim-tooling-cache
uses: actions/cache@v2
with:
path: |
${{ steps.calc-paths.outputs.bash_env }}
${{ steps.calc-paths.outputs.choosenim }}
${{ steps.calc-paths.outputs.nimble }}/bin
key: ${{ matrix.platform.os }}-nim_version:${{ matrix.nim_version }}-cache_nonce:${{ matrix.cache_nonce }}
- name: Install choosenim and Nim tooling
if: steps.choosenim-nim-tooling-cache.outputs.cache-hit != 'true'
run: |
mkdir -p "${HOME}/Downloads"
cd "${HOME}/Downloads"
curl https://nim-lang.org/choosenim/init.sh -sSf -O
chmod +x init.sh
if [[ ${{ matrix.platform.os }} = windows ]]; then
mkdir -p "$(cygpath "${USERPROFILE}")/.nimble/bin"
fi
CHOOSENIM_CHOOSE_VERSION=${{ matrix.nim_version }} ./init.sh -y
if [[ ${{ matrix.platform.os }} = windows ]]; then
mv "$(cygpath "${USERPROFILE}")/.nimble" "${HOME}/"
# intention is to rely only on libs provided by the OS and MSYS2 env
rm -rf "${HOME}/.nimble/bin/"*.dll
rm -rf "${HOME}/.nimble/bin/"*.pem
fi
echo 'export NIMBLE_DIR="${HOME}/.nimble"' >> "${HOME}/.bash_env"
echo 'export PATH="${NIMBLE_DIR}/bin:${PATH}"' >> "${HOME}/.bash_env"
- name: Install project dependencies
run: |
source "${HOME}/.bash_env"
cd "${NIMBLE_DIR}/bin"
# delete broken symlinks, which can arise because e.g. the cache
# restored a symlink that points to an executable within
# ../pkgs/foo-1.2.3/ but the project's .nimble file has been updated
# to install foo-#head; in the case of a broken symlink, nimble's
# auto-overwrite fails
if [[ ${{ matrix.platform.os }} = macos ]]; then
gfind . -xtype l -delete
else
find . -xtype l -delete
fi
cd -
nimble --accept install
- name: Build and run tests
run: |
source "${HOME}/.bash_env"
nim --version
echo
nimble --verbose test

11
.gitignore vendored Normal file
View File

@ -0,0 +1,11 @@
*
!*/
!*.*
*.sqlite3*
.DS_Store
.idea
.vscode
coverage
datastore.nims
nimcache
TODO

201
LICENSE-APACHEv2 Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2018 Status Research & Development GmbH
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

21
LICENSE-MIT Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2022 Status Research & Development GmbH
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1 +1,23 @@
# nim-datastore
[![License: Apache](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
[![Stability: experimental](https://img.shields.io/badge/stability-experimental-orange.svg)](#stability)
[![Tests](https://github.com/status-im/nim-datastore/actions/workflows/tests.yml/badge.svg?branch=master)](https://github.com/status-im/nim-datastore/actions/workflows/tests.yml?query=workflow%3ATests+branch%3Amaster)
Simple, unified API for multiple data stores.
Inspired by the Python library [datastore](https://github.com/datastore/datastore).
## Stability
nim-datastore is currently marked as experimental and may be subject to breaking changes across any version bump until it is marked as stable.
## License
nim-datastore is licensed and distributed under either of:
* Apache License, Version 2.0: [LICENSE-APACHEv2](LICENSE-APACHEv2) or https://opensource.org/licenses/Apache-2.0
* MIT license: [LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT
at your option. The contents of this repository may not be copied, modified, or distributed except according to those terms.

2
config.nims Normal file
View File

@ -0,0 +1,2 @@
--threads:on
--tlsEmulation:off

3
datastore.nim Normal file
View File

@ -0,0 +1,3 @@
import ./datastore/datastore
export datastore

51
datastore.nimble Normal file
View File

@ -0,0 +1,51 @@
mode = ScriptMode.Verbose
packageName = "datastore"
version = "0.0.1"
author = "Status Research & Development GmbH"
description = "Simple, unified API for multiple data stores"
license = "Apache License 2.0 or MIT"
requires "nim >= 1.2.0",
"questionable >= 0.10.3 & < 0.11.0",
"sqlite3_abi",
"stew",
"unittest2",
"upraises >= 0.1.0 & < 0.2.0"
task coverage, "generates code coverage report":
var (output, exitCode) = gorgeEx("which lcov")
if exitCode != 0:
echo ""
echo " ************************** ⛔️ ERROR ⛔️ **************************"
echo " ** **"
echo " ** ERROR: lcov not found, it must be installed to run code **"
echo " ** coverage locally **"
echo " ** **"
echo " *****************************************************************"
echo ""
quit 1
(output, exitCode) = gorgeEx("gcov --version")
if output.contains("Apple LLVM"):
echo ""
echo " ************************* ⚠️ WARNING ⚠️ *************************"
echo " ** **"
echo " ** WARNING: Using Apple's llvm-cov in place of gcov, which **"
echo " ** emulates an old version of gcov (4.2.0) and therefore **"
echo " ** coverage results will differ than those on CI (which **"
echo " ** uses a much newer version of gcov). **"
echo " ** **"
echo " *****************************************************************"
echo ""
exec("nimble --verbose test --verbosity:0 --hints:off --lineDir:on --nimcache:nimcache --passC:-fprofile-arcs --passC:-ftest-coverage --passL:-fprofile-arcs --passL:-ftest-coverage")
exec("cd nimcache; rm *.c; cd ..")
mkDir("coverage")
exec("lcov --capture --directory nimcache --output-file coverage/coverage.info")
exec("$(which bash) -c 'shopt -s globstar; ls $(pwd)/datastore/{*,**/*}.nim'")
exec("$(which bash) -c 'shopt -s globstar; lcov --extract coverage/coverage.info $(pwd)/datastore/{*,**/*}.nim --output-file coverage/coverage.f.info'")
echo "Generating HTML coverage report"
exec("genhtml coverage/coverage.f.info --output-directory coverage/report")
echo "Opening HTML coverage report in browser..."
exec("open coverage/report/index.html")

43
datastore/datastore.nim Normal file
View File

@ -0,0 +1,43 @@
import pkg/questionable
import pkg/questionable/results
import pkg/upraises
import ./key
export key
push: {.upraises: [].}
type
Datastore* = ref object of RootObj
method contains*(
self: Datastore,
key: Key): ?!bool {.base, locks: "unknown".} =
raiseAssert("Not implemented!")
method delete*(
self: Datastore,
key: Key): ?!void {.base, locks: "unknown".} =
raiseAssert("Not implemented!")
method get*(
self: Datastore,
key: Key): ?!(?seq[byte]) {.base, locks: "unknown".} =
raiseAssert("Not implemented!")
method put*(
self: Datastore,
key: Key,
data: openArray[byte]): ?!void {.base, locks: "unknown".} =
raiseAssert("Not implemented!")
# method query*(
# self: Datastore,
# query: ...): ?!(?...) {.base, locks: "unknown".} =
#
# raiseAssert("Not implemented!")

View File

@ -0,0 +1,159 @@
import std/os
import pkg/questionable
import pkg/questionable/results
import pkg/upraises
import ./datastore
export datastore
push: {.upraises: [].}
type
FileSystemDatastore* = ref object of Datastore
root: string
const
objExt* = ".dsobject"
proc new*(
T: type FileSystemDatastore,
root = "data"): ?!T =
try:
let
root = if root.isAbsolute: root
else: getCurrentDir() / root
createDir(root)
success T(root: root)
except IOError as e:
failure e
except OSError as e:
failure e
proc root*(self: FileSystemDatastore): string =
self.root
proc path*(
self: FileSystemDatastore,
key: Key): string =
var
segments: seq[string]
for ns in key:
without field =? ns.field:
segments.add ns.value
continue
segments.add(field / ns.value)
# is it problematic that per this logic Key(/a:b) evaluates to the same path
# as Key(/a/b)? may need to check if/how other Datastore implementations
# distinguish them
self.root / joinPath(segments) & objExt
method contains*(
self: FileSystemDatastore,
key: Key): ?!bool {.locks: "unknown".} =
success fileExists(self.path(key))
method delete*(
self: FileSystemDatastore,
key: Key): ?!void {.locks: "unknown".} =
let
path = self.path(key)
try:
removeFile(path)
success()
# removing an empty directory might lead to surprising behavior depending
# on what the user specified as the `root` of the FileSystemDatastore, so
# until further consideration, empty directories will be left in place
except OSError as e:
failure e
method get*(
self: FileSystemDatastore,
key: Key): ?!(?seq[byte]) {.locks: "unknown".} =
# to support finer control of memory allocation, maybe could/should change
# the signature of `get` so that it has a 3rd parameter
# `bytes: var openArray[byte]` and return type `?!bool`; this variant with
# return type `?!(?seq[byte])` would be a special case (convenience method)
# calling the former after allocating a seq with size automatically
# determined via `getFileSize`
let
path = self.path(key)
exists = ? self.contains(key)
if exists:
var
file: File
if not file.open(path):
return failure "unable to open file: " & path
else:
try:
let
size = file.getFileSize
var
bytes: seq[byte]
if size > 0:
newSeq(bytes, size)
let
bytesRead = file.readBytes(bytes, 0, size)
if bytesRead < size:
return failure $bytesRead & " bytes were read from " & path &
" but " & $size & " bytes were expected"
success bytes.some
except IOError as e:
failure e
finally:
file.close
else:
success seq[byte].none
method put*(
self: FileSystemDatastore,
key: Key,
data: openArray[byte]): ?!void {.locks: "unknown".} =
let
path = self.path(key)
try:
createDir(parentDir(path))
if data.len > 0: writeFile(path, data)
else: writeFile(path, "")
success()
except IOError as e:
failure e
except OSError as e:
failure e
# method query*(
# self: FileSystemDatastore,
# query: ...): ?!(?...) {.locks: "unknown".} =
#
# success ....none

375
datastore/key.nim Normal file
View File

@ -0,0 +1,375 @@
import std/algorithm
import std/hashes
import std/oids
import std/sequtils
import std/strutils
import pkg/questionable
import pkg/questionable/results
import pkg/upraises
export hashes
push: {.upraises: [].}
type
Namespace* = object
field: ?string
value: string
Key* = object
namespaces: seq[Namespace]
const
delimiter = ":"
separator = "/"
# TODO: operator/s for combining string|Namespace,string|Namespace
# TODO: lifting from ?![Namespace|Key] for various ops
proc init*(
T: type Namespace,
field, value: string): ?!T =
if value == "":
return failure "value string must not be empty"
if value.strip == "":
return failure "value string must not be all whitespace"
if value.contains(delimiter):
return failure "value string must not contain delimiter \"" &
delimiter & "\""
if value.contains(separator):
return failure "value string must not contain separator \"" &
separator & "\""
if field != "":
if field.strip == "":
return failure "field string must not be all whitespace"
if field.contains(delimiter):
return failure "field string must not contain delimiter \"" &
delimiter & "\""
if field.contains(separator):
return failure "field string must not contain separator \"" &
separator & "\""
success T(field: field.some, value: value)
else:
success T(field: string.none, value: value)
proc init*(
T: type Namespace,
id: string): ?!T =
if id == "":
return failure "id string must not be empty"
if id.strip == "":
return failure "id string must not be all whitespace"
if id.contains(separator):
return failure "id string must not contain separator \"" & separator & "\""
if id == delimiter:
return failure "value in id string \"[field]" & delimiter &
"[value]\" must not be empty"
let
s = id.split(delimiter)
if s.len > 2:
return failure "id string must not contain more than one delimiter \"" &
delimiter & "\""
var
field: ?string
value: string
if s.len == 1:
value = s[0]
else:
value = s[1]
if value == "":
return failure "value in id string \"[field]" & delimiter &
"[value]\" must not be empty"
if value.strip == "":
return failure "value in id string \"[field]" & delimiter &
"[value]\" must not be all whitespace"
else:
let
f = s[0]
if f != "":
if f.strip == "":
return failure "field in id string \"[field]" & delimiter &
"[value]\" must not be all whitespace"
else:
field = f.some
success T(field: field, value: value)
proc value*(self: Namespace): string =
self.value
proc field*(self: Namespace): ?string =
self.field
proc `type`*(self: Namespace): ?string =
self.field
proc kind*(self: Namespace): ?string =
self.`type`
proc id*(self: Namespace): string =
if field =? self.field: field & delimiter & self.value
else: self.value
proc `$`*(namespace: Namespace): string =
"Namespace(" & namespace.id & ")"
proc init*(
T: type Key,
namespaces: varargs[Namespace]): ?!T =
if namespaces.len == 0:
failure "namespaces must contain at least one Namespace"
else:
success T(namespaces: @namespaces)
proc init*(
T: type Key,
namespaces: varargs[string]): ?!T =
if namespaces.len == 0:
failure "namespaces must contain at least one Namespace id string"
else:
var
nss: seq[Namespace]
for s in namespaces:
let
nsRes = Namespace.init(s)
# if `without ns =? Namespace.init(s), e:` is used `e` is nil in the body
# at runtime, why?
without ns =? nsRes:
return failure "namespaces contains an invalid Namespace: " &
nsRes.error.msg
nss.add ns
success T(namespaces: nss)
proc init*(
T: type Key,
id: string): ?!T =
if id == "":
return failure "id string must contain at least one Namespace"
if id.strip == "":
return failure "id string must not be all whitespace"
let
nsStrs = id.split(separator).filterIt(it != "")
if nsStrs.len == 0:
return failure "id string must not contain only one or more separator " &
"\"" & separator & "\""
let
keyRes = Key.init(nsStrs)
# if `without key =? Key.init(nsStrs), e:` is used `e` is nil in the body
# at runtime, why?
without key =? keyRes:
return failure "id string contains an invalid Namespace:" &
keyRes.error.msg.split(":")[1..^1].join("").replace("\"\"", "\":\"")
success key
proc namespaces*(self: Key): seq[Namespace] =
self.namespaces
proc list*(self: Key): seq[Namespace] =
self.namespaces
proc random*(T: type Key): string =
$genOid()
template `[]`*(
key: Key,
x: auto): auto =
key.namespaces[x]
proc len*(self: Key): int =
self.namespaces.len
iterator items*(key: Key): Namespace {.inline.} =
var
i = 0
while i < key.len:
yield key[i]
inc i
proc reversed*(self: Key): Key =
Key(namespaces: self.namespaces.reversed)
proc reverse*(self: Key): Key =
self.reversed
proc name*(self: Key): string =
self[^1].value
proc `type`*(self: Key): ?string =
self[^1].field
proc kind*(self: Key): ?string =
self.`type`
proc instance*(
self: Key,
value: Namespace): Key =
let
last = self[^1]
inst =
if last.field.isSome:
@[Namespace(field: last.field, value: value.value)]
else:
@[Namespace(field: last.value.some, value: value.value)]
namespaces =
if self.namespaces.len == 1:
inst
else:
self.namespaces[0..^2] & inst
Key(namespaces: namespaces)
proc instance*(self, value: Key): Key =
self.instance(value[^1])
proc instance*(self, value: Namespace): Key =
Key(namespaces: @[self]).instance(value)
proc instance*(
self: Namespace,
value: Key): Key =
self.instance(value[^1])
proc instance*(
self: Key,
id: string): ?!Key =
without key =? Key.init(id), e:
return failure e
success self.instance(key)
proc isTopLevel*(self: Key): bool =
self.len == 1
proc parent*(self: Key): ?!Key =
if self.isTopLevel:
failure "key has no parent"
else:
success Key(namespaces: self.namespaces[0..^2])
proc parent*(self: ?!Key): ?!Key =
let
key = ? self
key.parent
proc path*(self: Key): ?!Key =
let
parent = ? self.parent
without kind =? self[^1].kind:
return success parent
success Key(namespaces: parent.namespaces & @[Namespace(value: kind)])
proc path*(self: ?!Key): ?!Key =
let
key = ? self
key.path
proc child*(
self: Key,
ns: Namespace): Key =
Key(namespaces: self.namespaces & @[ns])
proc `/`*(
self: Key,
ns: Namespace): Key =
self.child(ns)
proc child*(
self: Key,
namespaces: varargs[Namespace]): Key =
Key(namespaces: self.namespaces & @namespaces)
proc child*(self, key: Key): Key =
Key(namespaces: self.namespaces & key.namespaces)
proc `/`*(self, key: Key): Key =
self.child(key)
proc child*(
self: Key,
keys: varargs[Key]): Key =
Key(namespaces: self.namespaces & concat(keys.mapIt(it.namespaces)))
proc child*(
self: Key,
ids: varargs[string]): ?!Key =
let
ids = ids.filterIt(it != "")
var
keys: seq[Key]
for id in ids:
let
key = ? Key.init(id)
keys.add key
success self.child(keys)
proc `/`*(
self: Key,
id: string): ?!Key =
self.child(id)
proc isAncestorOf*(self, other: Key): bool =
if other.len <= self.len: false
else: other.namespaces[0..<self.len] == self.namespaces
proc isDescendantOf*(self, other: Key): bool =
other.isAncestorOf(self)
proc id*(self: Key): string =
separator & self.namespaces.mapIt(it.id).join(separator)
proc `$`*(key: Key): string =
"Key(" & key.id & ")"

View File

@ -0,0 +1,46 @@
import pkg/questionable
import pkg/questionable/results
import pkg/upraises
import ./datastore
export datastore
push: {.upraises: [].}
type
NullDatastore* = ref object of Datastore
proc new*(T: type NullDatastore): T =
T()
method contains*(
self: NullDatastore,
key: Key): ?!bool {.locks: "unknown".} =
success false
method delete*(
self: NullDatastore,
key: Key): ?!void {.locks: "unknown".} =
success()
method get*(
self: NullDatastore,
key: Key): ?!(?seq[byte]) {.locks: "unknown".} =
success seq[byte].none
method put*(
self: NullDatastore,
key: Key,
data: openArray[byte]): ?!void {.locks: "unknown".} =
success()
# method query*(
# self: NullDatastore,
# query: ...): ?!(?...) {.locks: "unknown".} =
#
# success ....none

226
datastore/sqlite.nim Normal file
View File

@ -0,0 +1,226 @@
import pkg/questionable
import pkg/questionable/results
import pkg/sqlite3_abi
import pkg/upraises
push: {.upraises: [].}
# Adapted from:
# https://github.com/status-im/nwaku/blob/master/waku/v2/node/storage/sqlite.nim
type
AutoDisposed*[T: ptr|ref] = object
val*: T
DataProc* = proc(s: RawStmtPtr) {.closure.}
NoParams* = tuple # empty tuple
NoParamsStmt* = SQLiteStmt[NoParams, void]
RawStmtPtr* = ptr sqlite3_stmt
SQLite* = ptr sqlite3
SQLiteStmt*[Params, Res] = distinct RawStmtPtr
proc bindParam(
s: RawStmtPtr,
n: int,
val: auto): cint =
when val is openarray[byte]|seq[byte]:
if val.len > 0:
# `SQLITE_TRANSIENT` "indicate[s] that the object is to be copied prior
# to the return from sqlite3_bind_*(). The object and pointer to it
# must remain valid until then. SQLite will then manage the lifetime of
# its private copy."
sqlite3_bind_blob(s, n.cint, unsafeAddr val[0], val.len.cint,
SQLITE_TRANSIENT)
else:
sqlite3_bind_blob(s, n.cint, nil, 0.cint, nil)
elif val is int32:
sqlite3_bind_int(s, n.cint, val)
elif val is uint32:
sqlite3_bind_int(s, n.cint, int(val).cint)
elif val is int64:
sqlite3_bind_int64(s, n.cint, val)
elif val is float64:
sqlite3_bind_double(s, n.cint, val)
elif val is string:
# `-1` implies string length is num bytes up to first null-terminator;
# `SQLITE_TRANSIENT` "indicate[s] that the object is to be copied prior
# to the return from sqlite3_bind_*(). The object and pointer to it must
# remain valid until then. SQLite will then manage the lifetime of its
# private copy."
sqlite3_bind_text(s, n.cint, val.cstring, -1.cint, SQLITE_TRANSIENT)
else:
{.fatal: "Please add support for the '" & $typeof(val) & "' type".}
template bindParams(
s: RawStmtPtr,
params: auto) =
when params is tuple:
when params isnot NoParams:
var
i = 1
for param in fields(params):
checkErr bindParam(s, i, param)
inc i
else:
checkErr bindParam(s, 1, params)
template checkErr*(op: untyped) =
if (let v = (op); v != SQLITE_OK):
return failure $sqlite3_errstr(v)
template checkExec*(s: RawStmtPtr) =
if (let x = sqlite3_step(s); x != SQLITE_DONE):
s.dispose
return failure $sqlite3_errstr(x)
if (let x = sqlite3_finalize(s); x != SQLITE_OK):
return failure $sqlite3_errstr(x)
template checkExec*(env: SQLite, q: string) =
let
s = prepare(env, q)
checkExec(s)
template dispose*(db: SQLite) =
discard sqlite3_close(db)
template dispose*(rawStmt: RawStmtPtr) =
discard sqlite3_finalize(rawStmt)
template dispose*(sqliteStmt: SQLiteStmt) =
discard sqlite3_finalize(RawStmtPtr(sqliteStmt))
proc disposeIfUnreleased*[T](x: var AutoDisposed[T]) =
mixin dispose
if x.val != nil: dispose(x.release)
proc exec*[P](
s: SQLiteStmt[P, void],
params: P): ?!void =
let
s = RawStmtPtr(s)
bindParams(s, params)
let
res =
if (let v = sqlite3_step(s); v != SQLITE_DONE):
failure $sqlite3_errstr(v)
else:
success()
# release implict transaction
discard sqlite3_reset(s) # same return information as step
discard sqlite3_clear_bindings(s) # no errors possible
res
template journalModePragmaStmt*(env: SQLite): RawStmtPtr =
let
s = prepare(env, "PRAGMA journal_mode = WAL;")
if (let x = sqlite3_step(s); x != SQLITE_ROW):
s.dispose
return failure $sqlite3_errstr(x)
if (let x = sqlite3_column_type(s, 0); x != SQLITE3_TEXT):
s.dispose
return failure $sqlite3_errstr(x)
if (let x = $sqlite3_column_text(s, 0).cstring; x != "memory" and x != "wal"):
s.dispose
return failure "Invalid pragma result: " & $x
s
template open*(
dbPath: string,
env: var SQLite,
flags = 0) =
checkErr sqlite3_open_v2(dbPath.cstring, addr env, flags.cint, nil)
proc prepare*[Params, Res](
T: type SQLiteStmt[Params, Res],
env: SQLite,
stmt: string): ?!T =
var
s: RawStmtPtr
checkErr sqlite3_prepare_v2(env, stmt.cstring, stmt.len.cint, addr s, nil)
success T(s)
template prepare*(
env: SQLite,
q: string): RawStmtPtr =
var
s: RawStmtPtr
checkErr sqlite3_prepare_v2(env, q.cstring, q.len.cint, addr s, nil)
s
proc query*[P](
s: SQLiteStmt[P, void],
params: P,
onData: DataProc): ?!bool =
let
s = RawStmtPtr(s)
bindParams(s, params)
var
res = success false
while true:
let
v = sqlite3_step(s)
case v
of SQLITE_ROW:
onData(s)
res = success true
of SQLITE_DONE:
break
else:
res = failure $sqlite3_errstr(v)
break
# release implict transaction
discard sqlite3_reset(s) # same return information as step
discard sqlite3_clear_bindings(s) # no errors possible
res
proc query*(
env: SQLite,
query: string,
onData: DataProc): ?!bool =
let
s = ? NoParamsStmt.prepare(env, query)
res = s.query((), onData)
# NB: dispose of the prepared query statement and free associated memory
s.dispose
res
proc release*[T](x: var AutoDisposed[T]): T =
result = x.val
x.val = nil

View File

@ -0,0 +1,272 @@
import std/os
import std/times
import pkg/questionable
import pkg/questionable/results
import pkg/sqlite3_abi
import pkg/stew/byteutils
import pkg/upraises
import ./datastore
import ./sqlite
export datastore, sqlite
push: {.upraises: [].}
type
# feels odd to use `void` for prepared statements corresponding to SELECT
# queries but it fits with the rest of the SQLite wrapper adapted from
# status-im/nwaku, at least in its current form in ./sqlite
ContainsStmt = SQLiteStmt[(string), void]
DeleteStmt = SQLiteStmt[(string), void]
GetStmt = SQLiteStmt[(string), void]
PutStmt = SQLiteStmt[(string, seq[byte], int64), void]
SQLiteDatastore* = ref object of Datastore
dbPath: string
containsStmt: ContainsStmt
deleteStmt: DeleteStmt
env: SQLite
getStmt: GetStmt
putStmt: PutStmt
readOnly: bool
const
IdType = "TEXT"
DataType = "BLOB"
TimestampType = "INTEGER"
dbExt* = ".sqlite3"
tableTitle* = "Store"
# https://stackoverflow.com/a/9756276
# EXISTS returns a boolean value represented by an integer:
# https://sqlite.org/datatype3.html#boolean_datatype
# https://sqlite.org/lang_expr.html#the_exists_operator
containsStmtStr = """
SELECT EXISTS(
SELECT 1 FROM """ & tableTitle & """
WHERE id = ?
);
"""
createStmtStr = """
CREATE TABLE IF NOT EXISTS """ & tableTitle & """ (
id """ & IdType & """ NOT NULL PRIMARY KEY,
data """ & DataType & """,
timestamp """ & TimestampType & """ NOT NULL
) WITHOUT ROWID;
"""
deleteStmtStr = """
DELETE FROM """ & tableTitle & """
WHERE id = ?;
"""
getStmtStr = """
SELECT data FROM """ & tableTitle & """
WHERE id = ?;
"""
putStmtStr = """
REPLACE INTO """ & tableTitle & """ (
id, data, timestamp
) VALUES (?, ?, ?);
"""
proc new*(
T: type SQLiteDatastore,
basePath = "data",
filename = "store" & dbExt,
readOnly = false,
inMemory = false): ?!T =
# make it optional to enable WAL with it enabled being the default?
# make it possible to specify a custom page size?
# https://www.sqlite.org/pragma.html#pragma_page_size
# https://www.sqlite.org/intern-v-extern-blob.html
var
env: AutoDisposed[SQLite]
defer: disposeIfUnreleased(env)
var
basep, fname, dbPath: string
if inMemory:
if readOnly:
return failure "SQLiteDatastore cannot be read-only and in-memory"
else:
dbPath = ":memory:"
else:
try:
basep = normalizePathEnd(
if basePath.isAbsolute: basePath
else: getCurrentDir() / basePath)
fname = filename.normalizePathEnd
dbPath = basep / fname
if readOnly and not fileExists(dbPath):
return failure "read-only database does not exist: " & dbPath
else:
createDir(basep)
except IOError as e:
return failure e
except OSError as e:
return failure e
let
flags =
if readOnly: SQLITE_OPEN_READONLY
else: SQLITE_OPEN_READWRITE or SQLITE_OPEN_CREATE
open(dbPath, env.val, flags)
let
pragmaStmt = journalModePragmaStmt(env.val)
checkExec(pragmaStmt)
var
containsStmt: ContainsStmt
deleteStmt: DeleteStmt
getStmt: GetStmt
putStmt: PutStmt
if not readOnly:
checkExec(env.val, createStmtStr)
deleteStmt = ? DeleteStmt.prepare(env.val, deleteStmtStr)
putStmt = ? PutStmt.prepare(env.val, putStmtStr)
containsStmt = ? ContainsStmt.prepare(env.val, containsStmtStr)
getStmt = ? GetStmt.prepare(env.val, getStmtStr)
# if a readOnly/existing database does not satisfy the expected schema
# `pepare()` will fail and `new` will return an error with message
# "SQL logic error"
success T(dbPath: dbPath, containsStmt: containsStmt, deleteStmt: deleteStmt,
env: env.release, getStmt: getStmt, putStmt: putStmt,
readOnly: readOnly)
proc dbPath*(self: SQLiteDatastore): string =
self.dbPath
proc env*(self: SQLiteDatastore): SQLite =
self.env
proc close*(self: SQLiteDatastore) =
self.containsStmt.dispose
self.getStmt.dispose
if not self.readOnly:
self.deleteStmt.dispose
self.putStmt.dispose
self.env.dispose
self[] = SQLiteDatastore()[]
proc timestamp*(t = epochTime()): int64 =
(t * 1_000_000).int64
proc idCol*(
s: RawStmtPtr,
index = 0): string =
$sqlite3_column_text(s, index.cint).cstring
proc dataCol*(
s: RawStmtPtr,
index = 1): seq[byte] =
let
i = index.cint
dataBytes = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, i))
dataLen = sqlite3_column_bytes(s, i)
@(toOpenArray(dataBytes, 0, dataLen - 1))
proc timestampCol*(
s: RawStmtPtr,
index = 2): int64 =
sqlite3_column_int64(s, index.cint)
method contains*(
self: SQLiteDatastore,
key: Key): ?!bool {.locks: "unknown".} =
var
exists = false
proc onData(s: RawStmtPtr) {.closure.} =
exists = sqlite3_column_int64(s, 0).bool
discard ? self.containsStmt.query((key.id), onData)
success exists
method delete*(
self: SQLiteDatastore,
key: Key): ?!void {.locks: "unknown".} =
if self.readOnly:
failure "database is read-only":
else:
self.deleteStmt.exec((key.id))
method get*(
self: SQLiteDatastore,
key: Key): ?!(?seq[byte]) {.locks: "unknown".} =
# see comment in ./filesystem_datastore re: finer control of memory
# allocation in `method get`, could apply here as well if bytes were read
# incrementally with `sqlite3_blob_read`
var
bytes: seq[byte]
proc onData(s: RawStmtPtr) {.closure.} =
bytes = dataCol(s, 0)
let
exists = ? self.getStmt.query((key.id), onData)
if exists:
success bytes.some
else:
success seq[byte].none
proc put*(
self: SQLiteDatastore,
key: Key,
data: openArray[byte],
timestamp: int64): ?!void =
if self.readOnly:
failure "database is read-only"
else:
self.putStmt.exec((key.id, @data, timestamp))
method put*(
self: SQLiteDatastore,
key: Key,
data: openArray[byte]): ?!void {.locks: "unknown".} =
self.put(key, data, timestamp())
# method query*(
# self: SQLiteDatastore,
# query: ...): ?!(?...) {.locks: "unknown".} =
#
# success ....none

View File

@ -0,0 +1,82 @@
import pkg/questionable
import pkg/questionable/results
import pkg/upraises
import ./datastore
export datastore
push: {.upraises: [].}
type
TieredDatastore* = ref object of Datastore
stores: seq[Datastore]
proc new*(
T: type TieredDatastore,
stores: varargs[Datastore]): ?!T =
if stores.len == 0:
failure "stores must contain at least one Datastore"
else:
success T(stores: @stores)
proc stores*(self: TieredDatastore): seq[Datastore] =
self.stores
method contains*(
self: TieredDatastore,
key: Key): ?!bool {.locks: "unknown".} =
var
exists = false
for store in self.stores:
exists = ? store.contains(key)
if exists: break
success exists
method delete*(
self: TieredDatastore,
key: Key): ?!void {.locks: "unknown".} =
for store in self.stores:
? store.delete(key)
success()
method get*(
self: TieredDatastore,
key: Key): ?!(?seq[byte]) {.locks: "unknown".} =
var
bytesOpt: ?seq[byte]
for store in self.stores:
bytesOpt = ? store.get(key)
# put found data into stores logically in front of the current store
if bytes =? bytesOpt:
for s in self.stores:
if s == store: break
? s.put(key, bytes)
break
success bytesOpt
method put*(
self: TieredDatastore,
key: Key,
data: openArray[byte]): ?!void {.locks: "unknown".} =
for store in self.stores:
? store.put(key, data)
success()
# method query*(
# self: TieredDatastore,
# query: ...): ?!(?...) {.locks: "unknown".} =
#
# success ....none

View File

@ -0,0 +1,30 @@
import std/options
import pkg/stew/results
import pkg/unittest2
import ../../datastore
const
oneByte = @[1.byte]
suite "Datastore (base)":
setup:
let
key = Key.init("a").get
ds = Datastore()
test "put":
expect Defect: discard ds.put(key, oneByte)
test "delete":
expect Defect: discard ds.delete(key)
test "contains":
expect Defect: discard ds.contains(key)
test "get":
expect Defect: discard ds.get(key)
# test "query":
# expect Defect: discard ds.query(...)

View File

@ -0,0 +1,196 @@
import std/options
import std/os
import pkg/stew/byteutils
import pkg/stew/results
import pkg/unittest2
import ../../datastore/filesystem_datastore
suite "FileSystemDatastore":
setup:
# assumes tests/test_all is run from project root, e.g. with `nimble test`
let
root = "tests" / "test_data"
rootAbs = getCurrentDir() / root
removeDir(rootAbs)
require(not dirExists(rootAbs))
teardown:
removeDir(rootAbs)
require(not dirExists(rootAbs))
test "new":
var
dsRes: Result[FileSystemDatastore, ref CatchableError]
ds: FileSystemDatastore
dsRes = FileSystemDatastore.new(rootAbs)
assert dsRes.isOk
ds = dsRes.get
check: dirExists(rootAbs)
removeDir(rootAbs)
assert not dirExists(rootAbs)
dsRes = FileSystemDatastore.new(root)
assert dsRes.isOk
ds = dsRes.get
check: dirExists(rootAbs)
test "accessors":
let
ds = FileSystemDatastore.new(root).get
check: ds.root == rootAbs
test "helpers":
let
ds = FileSystemDatastore.new(root).get
check:
# see comment in ../../datastore/filesystem_datastore re: whether path
# equivalence of e.g. Key(/a:b) and Key(/a/b) is problematic
ds.path(Key.init("a").get) == rootAbs / "a" & objExt
ds.path(Key.init("a:b").get) == rootAbs / "a" / "b" & objExt
ds.path(Key.init("a/b").get) == rootAbs / "a" / "b" & objExt
ds.path(Key.init("a:b/c").get) == rootAbs / "a" / "b" / "c" & objExt
ds.path(Key.init("a/b/c").get) == rootAbs / "a" / "b" / "c" & objExt
ds.path(Key.init("a:b/c:d").get) == rootAbs / "a" / "b" / "c" / "d" & objExt
ds.path(Key.init("a/b/c:d").get) == rootAbs / "a" / "b" / "c" / "d" & objExt
ds.path(Key.init("a/b/c/d").get) == rootAbs / "a" / "b" / "c" / "d" & objExt
test "put":
let
ds = FileSystemDatastore.new(root).get
key = Key.init("a:b/c/d:e").get
path = ds.path(key)
var
bytes: seq[byte]
putRes = ds.put(key, bytes)
check:
putRes.isOk
readFile(path).toBytes == bytes
bytes = @[1.byte, 2.byte, 3.byte]
putRes = ds.put(key, bytes)
check:
putRes.isOk
readFile(path).toBytes == bytes
bytes = @[4.byte, 5.byte, 6.byte]
putRes = ds.put(key, bytes)
check:
putRes.isOk
readFile(path).toBytes == bytes
test "delete":
let
bytes = @[1.byte, 2.byte, 3.byte]
ds = FileSystemDatastore.new(root).get
var
key = Key.init("a:b/c/d:e").get
path = ds.path(key)
let
putRes = ds.put(key, bytes)
assert putRes.isOk
var
delRes = ds.delete(key)
check:
delRes.isOk
not fileExists(path)
dirExists(parentDir(path))
key = Key.init("X/Y/Z").get
path = ds.path(key)
assert not fileExists(path)
delRes = ds.delete(key)
check: delRes.isOk
test "contains":
let
bytes = @[1.byte, 2.byte, 3.byte]
ds = FileSystemDatastore.new(root).get
var
key = Key.init("a:b/c/d:e").get
path = ds.path(key)
putRes = ds.put(key, bytes)
assert putRes.isOk
var
containsRes = ds.contains(key)
assert containsRes.isOk
check: containsRes.get == true
key = Key.init("X/Y/Z").get
path = ds.path(key)
assert not fileExists(path)
containsRes = ds.contains(key)
assert containsRes.isOk
check: containsRes.get == false
test "get":
let
ds = FileSystemDatastore.new(root).get
var
bytes: seq[byte]
key = Key.init("a:b/c/d:e").get
path = ds.path(key)
putRes = ds.put(key, bytes)
assert putRes.isOk
var
getRes = ds.get(key)
getOpt = getRes.get
check: getOpt.isSome and getOpt.get == bytes
bytes = @[1.byte, 2.byte, 3.byte]
putRes = ds.put(key, bytes)
assert putRes.isOk
getRes = ds.get(key)
getOpt = getRes.get
check: getOpt.isSome and getOpt.get == bytes
key = Key.init("X/Y/Z").get
path = ds.path(key)
assert not fileExists(path)
getRes = ds.get(key)
getOpt = getRes.get
check: getOpt.isNone
# test "query":
# check:
# true

View File

@ -0,0 +1,428 @@
import std/options
import pkg/stew/results
import pkg/unittest2
import ../../datastore/key
suite "Namespace":
test "init":
var
nsRes: Result[Namespace, ref CatchableError]
nsRes = Namespace.init("a", "")
check: nsRes.isErr
nsRes = Namespace.init("a", " ")
check: nsRes.isErr
nsRes = Namespace.init("a", ":")
check: nsRes.isErr
nsRes = Namespace.init("a", "/")
check: nsRes.isErr
nsRes = Namespace.init(":", "b")
check: nsRes.isErr
nsRes = Namespace.init("/", "b")
check: nsRes.isErr
nsRes = Namespace.init("", "b")
check: nsRes.isOk
nsRes = Namespace.init(" ", "b")
check: nsRes.isErr
nsRes = Namespace.init("a", "b")
check: nsRes.isOk
nsRes = Namespace.init("")
check: nsRes.isErr
nsRes = Namespace.init(" ")
check: nsRes.isErr
nsRes = Namespace.init("/")
check: nsRes.isErr
nsRes = Namespace.init(":")
check: nsRes.isErr
nsRes = Namespace.init("a:b:c")
check: nsRes.isErr
nsRes = Namespace.init("a")
check: nsRes.isOk
nsRes = Namespace.init("a:")
check: nsRes.isErr
nsRes = Namespace.init("a: ")
check: nsRes.isErr
nsRes = Namespace.init(" :b")
check: nsRes.isErr
nsRes = Namespace.init("a:b")
check: nsRes.isOk
nsRes = Namespace.init(":b")
check: nsRes.isOk
test "accessors":
var
ns: Namespace
ns = Namespace.init("", "b").get
check:
ns.value == "b"
ns.field.isNone
ns = Namespace.init("a", "b").get
check:
ns.value == "b"
ns.field.isSome and ns.field.get == "a"
ns = Namespace.init(":b").get
check:
ns.value == "b"
ns.field.isNone
ns = Namespace.init("a:b").get
check:
ns.value == "b"
ns.field.isSome and ns.field.get == "a"
check:
ns.`type`.get == ns.field.get
ns.kind.get == ns.field.get
test "equality":
check:
Namespace.init("a").get == Namespace.init("a").get
Namespace.init("a").get != Namespace.init("b").get
Namespace.init("", "b").get == Namespace.init("", "b").get
Namespace.init("", "b").get == Namespace.init("b").get
Namespace.init(":b").get == Namespace.init("b").get
Namespace.init("", "b").get != Namespace.init("", "a").get
Namespace.init("", "b").get != Namespace.init("a").get
Namespace.init(":b").get != Namespace.init("a").get
Namespace.init("a", "b").get == Namespace.init("a", "b").get
Namespace.init("a", "b").get == Namespace.init("a:b").get
Namespace.init("a:b").get == Namespace.init("a:b").get
Namespace.init("a", "b").get != Namespace.init("b", "a").get
Namespace.init("a", "b").get != Namespace.init("b:a").get
Namespace.init("a:b").get != Namespace.init("b:a").get
Namespace.init("a").get != Namespace.init("a:b").get
test "serialization":
var
ns: Namespace
ns = Namespace.init(":b").get
check:
ns.id == "b"
$ns == "Namespace(" & ns.id & ")"
ns = Namespace.init("a:b").get
check:
ns.id == "a:b"
$ns == "Namespace(" & ns.id & ")"
suite "Key":
test "init":
var
keyRes: Result[Key, ref CatchableError]
nss: seq[Namespace]
keyRes = Key.init(nss)
check: keyRes.isErr
nss = @[Namespace.init("a").get]
keyRes = Key.init(nss)
check: keyRes.isOk
var
nsStrs: seq[string]
keyRes = Key.init(nsStrs)
check: keyRes.isErr
nsStrs = @[":"]
keyRes = Key.init(nsStrs)
check: keyRes.isErr
nsStrs = @["/"]
keyRes = Key.init(nsStrs)
check: keyRes.isErr
nsStrs = @["a:b"]
keyRes = Key.init(nsStrs)
check: keyRes.isOk
keyRes = Key.init("")
check: keyRes.isErr
keyRes = Key.init(" ")
check: keyRes.isErr
keyRes = Key.init("/")
check: keyRes.isErr
keyRes = Key.init("///")
check: keyRes.isErr
keyRes = Key.init(":")
check: keyRes.isErr
keyRes = Key.init("::")
check: keyRes.isErr
keyRes = Key.init("a:")
check: keyRes.isErr
keyRes = Key.init("a:b/c:")
check: keyRes.isErr
keyRes = Key.init(":b")
check: keyRes.isOk
keyRes = Key.init("a:b")
check: keyRes.isOk
keyRes = Key.init("a:b/c")
check: keyRes.isOk
keyRes = Key.init("a:b/:c")
check: keyRes.isOk
keyRes = Key.init("/a:b/c/")
check: keyRes.isOk
keyRes = Key.init("///a:b///c///")
check: keyRes.isOk
test "accessors":
let
key = Key.init("/a:b/c/d:e").get
check:
key.namespaces == @[
Namespace.init("a:b").get,
Namespace.init("c").get,
Namespace.init("d:e").get
]
key.list == key.namespaces
test "equality":
check:
Key.init(Namespace.init("a:b").get, Namespace.init("c").get).get == Key.init("a:b/c").get
Key.init("a:b", "c").get == Key.init("a:b/c").get
Key.init("a:b/c").get == Key.init("a:b/c").get
Key.init(Namespace.init("a:b").get, Namespace.init("c").get).get != Key.init("c:b/a").get
Key.init("a:b", "c").get != Key.init("c:b/a").get
Key.init("a:b/c").get != Key.init("c:b/a").get
Key.init("a:b/c").get == Key.init("/a:b/c/").get
Key.init("a:b/c").get == Key.init("///a:b///c///").get
Key.init("a:b/c").get != Key.init("///a:b///d///").get
Key.init("a").get != Key.init("a:b").get
Key.init("a").get != Key.init("a/b").get
Key.init("a/b/c").get != Key.init("a/b").get
Key.init("a:X/b/c").get == Key.init("a:X/b/c").get
Key.init("a/b:X/c").get == Key.init("a/b:X/c").get
Key.init("a/b/c:X").get == Key.init("a/b/c:X").get
Key.init("a:X/b/c:X").get == Key.init("a:X/b/c:X").get
Key.init("a:X/b:X/c").get == Key.init("a:X/b:X/c").get
Key.init("a/b:X/c:X").get == Key.init("a/b:X/c:X").get
Key.init("a:X/b:X/c:X").get == Key.init("a:X/b:X/c:X").get
Key.init("a/b/c").get != Key.init("a:X/b/c").get
Key.init("a/b/c").get != Key.init("a/b:X/c").get
Key.init("a/b/c").get != Key.init("a/b/c:X").get
Key.init("a/b/c").get != Key.init("a:X/b/c:X").get
Key.init("a/b/c").get != Key.init("a:X/b:X/c").get
Key.init("a/b/c").get != Key.init("a/b:X/c:X").get
Key.init("a/b/c").get != Key.init("a:X/b:X/c:X").get
test "helpers":
check: Key.random.len == 24
let
key = Key.init("/a:b/c/d:e").get
check:
key[1] == Namespace.init("c").get
key[1..^1] == @[Namespace.init("c").get, Namespace.init("d:e").get]
key[^1] == Namespace.init("d:e").get
check: key.len == key.namespaces.len
var
nss: seq[Namespace]
for ns in key:
nss.add ns
check:
nss == @[
Namespace.init("a:b").get,
Namespace.init("c").get,
Namespace.init("d:e").get
]
check:
key.reversed.namespaces == @[
Namespace.init("d:e").get,
Namespace.init("c").get,
Namespace.init("a:b").get
]
key.reverse == key.reversed
check: key.name == "e"
check:
key.`type` == key[^1].`type`
key.kind == key.`type`
check:
key.instance(Namespace.init("f:g").get) == Key.init("a:b/c/d:g").get
Key.init("a:b").get.instance(Namespace.init(":c").get) == Key.init("a:c").get
Key.init(":b").get.instance(Namespace.init(":c").get) == Key.init("b:c").get
Key.init(":b").get.instance(key) == Key.init("b:e").get
Namespace.init("a:b").get.instance(Namespace.init("c").get) == Key.init("a:c").get
Namespace.init(":b").get.instance(Namespace.init("c").get) == Key.init("b:c").get
Namespace.init("a:b").get.instance(key) == Key.init("a:e").get
Namespace.init(":b").get.instance(key) == Key.init("b:e").get
Key.init(":b").get.instance("").isErr
Key.init(":b").get.instance(":").isErr
Key.init(":b").get.instance("/").isErr
Key.init(":b").get.instance("//").isErr
Key.init(":b").get.instance("///").isErr
Key.init(":b").get.instance("a").get == Key.init("b:a").get
Key.init(":b").get.instance(":b").get == Key.init("b:b").get
Key.init(":b").get.instance("a:b").get == Key.init("b:b").get
Key.init(":b").get.instance("/a:b/c/d:e").get == Key.init("b:e").get
Key.init("a:b").get.instance("a").get == Key.init("a:a").get
Key.init("a:b").get.instance(":b").get == Key.init("a:b").get
Key.init("a:b").get.instance("a:b").get == Key.init("a:b").get
Key.init("a:b").get.instance("/a:b/c/d:e").get == Key.init("a:e").get
check:
Key.init(":b").get.isTopLevel
not Key.init(":b/c").get.isTopLevel
check:
Key.init(":b").get.parent.isErr
Key.init(":b").parent.isErr
key.parent.get == Key.init("a:b/c").get
key.parent.parent.get == Key.init("a:b").get
key.parent.parent.parent.isErr
check:
key.parent.get.path.get == Key.init("a:b").get
key.path.get == Key.init("a:b/c/d").get
Key.init("a:b/c").path.get == Key.init("a:b").get
Key.init("a:b/c/d:e").path.get == Key.init("a:b/c/d").get
check: key.child(Namespace.init("f:g").get) == Key.init("a:b/c/d:e/f:g").get
check: key / Namespace.init("f:g").get == Key.init("a:b/c/d:e/f:g").get
var
emptyNss: seq[Namespace]
check:
key.child(emptyNss) == key
key.child(Namespace.init("f:g").get, Namespace.init("h:i").get) ==
Key.init("a:b/c/d:e/f:g/h:i").get
check:
key.child(Key.init("f:g").get) == Key.init("a:b/c/d:e/f:g").get
key / Key.init("f:g").get == Key.init("a:b/c/d:e/f:g").get
var
emptyKeys: seq[Key]
check:
key.child(emptyKeys) == key
key.child(Key.init("f:g").get, Key.init("h:i").get) ==
Key.init("a:b/c/d:e/f:g/h:i").get
check:
key.child("f:g", ":::").isErr
key.child("f:g", "h:i").get == Key.init("a:b/c/d:e/f:g/h:i").get
key.child("").get == key
key.child("", "", "").get == key
check:
(key / "").get == key
(key / "f:g").get == Key.init("a:b/c/d:e/f:g").get
check:
not key.isAncestorOf(Key.init("f:g").get)
key.isAncestorOf(key / Key.init("f:g").get)
check:
key.isDescendantOf(key.parent.get)
not Key.init("f:g").get.isDescendantOf(key.parent.get)
test "serialization":
let
idStr = "/a:b/c/d:e"
key = Key.init(idStr).get
check:
key.id == idStr
$key == "Key(" & key.id & ")"

View File

@ -0,0 +1,38 @@
import std/options
import pkg/stew/results
import pkg/unittest2
import ../../datastore/null_datastore
suite "NullDatastore":
setup:
let
key = Key.init("a").get
ds = NullDatastore.new()
discard key # suppresses "declared but not used" re: key
test "new":
check: not ds.isNil
test "put":
check: ds.put(key, [1.byte]).isOk
test "delete":
check: ds.delete(key).isOk
test "contains":
check:
ds.contains(key).isOk
ds.contains(key).get == false
test "get":
check:
ds.get(key).isOk
ds.get(key).get.isNone
# test "query":
# check:
# ds.query(...).isOk
# ds.query(...).get.isNone

View File

@ -0,0 +1,322 @@
import std/options
import std/os
import pkg/stew/results
import pkg/unittest2
import ../../datastore/sqlite_datastore
suite "SQLiteDatastore":
setup:
var
ds: SQLiteDatastore
# assumes tests/test_all is run from project root, e.g. with `nimble test`
let
basePath = "tests" / "test_data"
basePathAbs = getCurrentDir() / basePath
filename = "test_store" & dbExt
dbPathAbs = basePathAbs / filename
removeDir(basePathAbs)
require(not dirExists(basePathAbs))
discard dbPathAbs # suppresses "declared but not used" re: dbPathAbs
teardown:
if not ds.isNil: ds.close
removeDir(basePathAbs)
require(not dirExists(basePathAbs))
test "new":
var
dsRes = SQLiteDatastore.new(basePathAbs, filename, readOnly = true)
# for `readOnly = true` to succeed the database file must already exist
check: dsRes.isErr
dsRes = SQLiteDatastore.new(basePathAbs, filename)
assert dsRes.isOk
ds = dsRes.get
check:
dirExists(basePathAbs)
fileExists(dbPathAbs)
ds.close
removeDir(basePathAbs)
assert not dirExists(basePathAbs)
dsRes = SQLiteDatastore.new(basePath, filename)
assert dsRes.isOk
ds = dsRes.get
check:
dirExists(basePathAbs)
fileExists(dbPathAbs)
ds.close
# for `readOnly = true` to succeed the database file must already exist, so
# the existing file (per previous step) is not deleted prior to the next
# invocation of `SQLiteDatastore.new`
dsRes = SQLiteDatastore.new(basePath, filename, readOnly = true)
assert dsRes.isOk
ds = dsRes.get
check:
dirExists(basePathAbs)
fileExists(dbPathAbs)
ds.close
removeDir(basePathAbs)
assert not dirExists(basePathAbs)
dsRes = SQLiteDatastore.new(inMemory = true)
assert dsRes.isOk
ds = dsRes.get
check:
not dirExists(basePathAbs)
not fileExists(dbPathAbs)
ds.close
dsRes = SQLiteDatastore.new(readOnly = true, inMemory = true)
check: dsRes.isErr
test "accessors":
ds = SQLiteDatastore.new(basePath).get
check:
parentDir(ds.dbPath) == basePathAbs
not ds.env.isNil
test "helpers":
ds = SQLiteDatastore.new(basePath).get
ds.close
check:
ds.env.isNil
timestamp(10.123_456) == 10_123_456.int64
test "put":
let
key = Key.init("a:b/c/d:e").get
# for `readOnly = true` to succeed the database file must already exist
ds = SQLiteDatastore.new(basePathAbs, filename).get
ds.close
ds = SQLiteDatastore.new(basePathAbs, filename, readOnly = true).get
var
bytes: seq[byte]
timestamp = timestamp()
putRes = ds.put(key, bytes, timestamp)
check: putRes.isErr
ds.close
removeDir(basePathAbs)
assert not dirExists(basePathAbs)
ds = SQLiteDatastore.new(basePathAbs, filename).get
timestamp = timestamp()
putRes = ds.put(key, bytes, timestamp)
check: putRes.isOk
let
query = "SELECT * FROM " & tableTitle & ";"
var
qId: string
qData: seq[byte]
qTimestamp: int64
rowCount = 0
proc onData(s: RawStmtPtr) {.closure.} =
qId = idCol(s)
qData = dataCol(s)
qTimestamp = timestampCol(s)
inc rowCount
var
qRes = ds.env.query(query, onData)
assert qRes.isOk
check:
qRes.get
qId == key.id
qData == bytes
qTimestamp == timestamp
rowCount == 1
bytes = @[1.byte, 2.byte, 3.byte]
timestamp = timestamp()
putRes = ds.put(key, bytes, timestamp)
check: putRes.isOk
rowCount = 0
qRes = ds.env.query(query, onData)
assert qRes.isOk
check:
qRes.get
qId == key.id
qData == bytes
qTimestamp == timestamp
rowCount == 1
bytes = @[4.byte, 5.byte, 6.byte]
timestamp = timestamp()
putRes = ds.put(key, bytes, timestamp)
check: putRes.isOk
rowCount = 0
qRes = ds.env.query(query, onData)
assert qRes.isOk
check:
qRes.get
qId == key.id
qData == bytes
qTimestamp == timestamp
rowCount == 1
test "delete":
let
bytes = @[1.byte, 2.byte, 3.byte]
var
key = Key.init("a:b/c/d:e").get
# for `readOnly = true` to succeed the database file must already exist
ds = SQLiteDatastore.new(basePathAbs, filename).get
ds.close
ds = SQLiteDatastore.new(basePathAbs, filename, readOnly = true).get
var
delRes = ds.delete(key)
check: delRes.isErr
ds.close
removeDir(basePathAbs)
assert not dirExists(basePathAbs)
ds = SQLiteDatastore.new(basePathAbs, filename).get
let
putRes = ds.put(key, bytes)
assert putRes.isOk
let
query = "SELECT * FROM " & tableTitle & ";"
var
rowCount = 0
proc onData(s: RawStmtPtr) {.closure.} =
inc rowCount
var
qRes = ds.env.query(query, onData)
assert qRes.isOk
check: rowCount == 1
delRes = ds.delete(key)
check: delRes.isOk
rowCount = 0
qRes = ds.env.query(query, onData)
assert qRes.isOk
check:
delRes.isOk
rowCount == 0
key = Key.init("X/Y/Z").get
delRes = ds.delete(key)
check: delRes.isOk
test "contains":
let
bytes = @[1.byte, 2.byte, 3.byte]
var
key = Key.init("a:b/c/d:e").get
ds = SQLiteDatastore.new(basePathAbs, filename).get
let
putRes = ds.put(key, bytes)
assert putRes.isOk
var
containsRes = ds.contains(key)
assert containsRes.isOk
check: containsRes.get == true
key = Key.init("X/Y/Z").get
containsRes = ds.contains(key)
assert containsRes.isOk
check: containsRes.get == false
test "get":
ds = SQLiteDatastore.new(basePathAbs, filename).get
var
bytes: seq[byte]
key = Key.init("a:b/c/d:e").get
putRes = ds.put(key, bytes)
assert putRes.isOk
var
getRes = ds.get(key)
getOpt = getRes.get
check: getOpt.isSome and getOpt.get == bytes
bytes = @[1.byte, 2.byte, 3.byte]
putRes = ds.put(key, bytes)
assert putRes.isOk
getRes = ds.get(key)
getOpt = getRes.get
check: getOpt.isSome and getOpt.get == bytes
key = Key.init("X/Y/Z").get
assert not ds.contains(key).get
getRes = ds.get(key)
getOpt = getRes.get
check: getOpt.isNone
# test "query":
# check:
# true

View File

@ -0,0 +1,153 @@
import std/options
import std/os
import pkg/stew/results
import pkg/unittest2
import ../../datastore/filesystem_datastore
import ../../datastore/sqlite_datastore
import ../../datastore/tiered_datastore
suite "TieredDatastore":
setup:
# assumes tests/test_all is run from project root, e.g. with `nimble test`
let
bytes = @[1.byte, 2.byte, 3.byte]
key = Key.init("a:b/c/d:e").get
root = "tests" / "test_data"
rootAbs = getCurrentDir() / root
discard bytes # suppresses "declared but not used" re: bytes
discard key # # suppresses "declared but not used" re: key
removeDir(rootAbs)
require(not dirExists(rootAbs))
var
ds1 = SQLiteDatastore.new(inMemory = true).get
ds2 = FileSystemDatastore.new(rootAbs).get
discard ds2 # suppresses "declared but not used" re: ds2
teardown:
ds1.close
removeDir(rootAbs)
require(not dirExists(rootAbs))
test "new":
check:
TieredDatastore.new().isErr
TieredDatastore.new([]).isErr
TieredDatastore.new(@[]).isErr
TieredDatastore.new(ds1, ds2).isOk
TieredDatastore.new([ds1, ds2]).isOk
TieredDatastore.new(@[ds1, ds2]).isOk
test "accessors":
let
stores = @[ds1, ds2]
check:
TieredDatastore.new(ds1, ds2).get.stores == stores
TieredDatastore.new([ds1, ds2]).get.stores == stores
TieredDatastore.new(@[ds1, ds2]).get.stores == stores
test "put":
let
ds = TieredDatastore.new(ds1, ds2).get
assert ds1.get(key).get.isNone
assert ds2.get(key).get.isNone
let
putRes = ds.put(key, bytes)
check:
putRes.isOk
ds1.get(key).get.get == bytes
ds2.get(key).get.get == bytes
test "delete":
let
ds = TieredDatastore.new(ds1, ds2).get
putRes = ds.put(key, bytes)
assert putRes.isOk
assert ds1.get(key).get.get == bytes
assert ds2.get(key).get.get == bytes
let
delRes = ds.delete(key)
check:
delRes.isOk
ds1.get(key).get.isNone
ds2.get(key).get.isNone
test "contains":
let
ds = TieredDatastore.new(ds1, ds2).get
assert not ds1.contains(key).get
assert not ds2.contains(key).get
let
putRes = ds.put(key, bytes)
assert putRes.isOk
let
containsRes = ds.contains(key)
check:
containsRes.isOk
containsRes.get
ds1.contains(key).get
ds2.contains(key).get
test "get":
var
ds = TieredDatastore.new(ds1, ds2).get
assert ds1.get(key).get.isNone
assert ds2.get(key).get.isNone
check: ds.get(key).get.isNone
let
putRes = ds.put(key, bytes)
assert putRes.isOk
var
getRes = ds.get(key)
check:
getRes.isOk
getRes.get.isSome
getRes.get.get == bytes
ds1.get(key).get.isSome
ds2.get(key).get.isSome
ds1.get(key).get.get == bytes
ds2.get(key).get.get == bytes
ds1.close
ds1 = SQLiteDatastore.new(inMemory = true).get
ds = TieredDatastore.new(ds1, ds2).get
assert ds1.get(key).get.isNone
assert ds2.get(key).get.isSome
assert ds2.get(key).get.get == bytes
getRes = ds.get(key)
check:
getRes.isOk
getRes.get.isSome
getRes.get.get == bytes
ds1.get(key).get.isSome
ds1.get(key).get.get == bytes
# test "query":
# check:
# true

9
tests/test_all.nim Normal file
View File

@ -0,0 +1,9 @@
import
./datastore/test_key,
./datastore/test_datastore,
./datastore/test_null_datastore,
./datastore/test_filesystem_datastore,
./datastore/test_sqlite_datastore,
./datastore/test_tiered_datastore
{.warning[UnusedImport]: off.}