mirror of
https://github.com/logos-storage/nim-datastore.git
synced 2026-01-03 14:13:09 +00:00
initial take at making fsds synchronous
This commit is contained in:
parent
24ac1a8708
commit
653ec99b53
@ -92,7 +92,7 @@ proc delete*(self: FSDatastore, keys: openArray[KeyId]): ?!void =
|
||||
|
||||
return success()
|
||||
|
||||
proc readFile*(self: FSDatastore, path: string): ?!seq[byte] =
|
||||
proc readFile*[V](self: FSDatastore, path: string): ?!V =
|
||||
var
|
||||
file: File
|
||||
|
||||
@ -104,14 +104,19 @@ proc readFile*(self: FSDatastore, path: string): ?!seq[byte] =
|
||||
|
||||
try:
|
||||
let
|
||||
size = file.getFileSize
|
||||
size = file.getFileSize().int
|
||||
|
||||
when V is seq[byte]:
|
||||
var bytes = newSeq[byte](size)
|
||||
elif V is DataBuffer:
|
||||
var bytes = DataBuffer.new(capacity=size)
|
||||
else:
|
||||
{.error: "unhandled result type".}
|
||||
var
|
||||
bytes = newSeq[byte](size)
|
||||
read = 0
|
||||
|
||||
while read < size:
|
||||
read += file.readBytes(bytes, read, size)
|
||||
read += file.readBytes(bytes.toOpenArray(), read, size)
|
||||
|
||||
if read < size:
|
||||
return failure $read & " bytes were read from " & path &
|
||||
@ -122,7 +127,8 @@ proc readFile*(self: FSDatastore, path: string): ?!seq[byte] =
|
||||
except CatchableError as e:
|
||||
return failure e
|
||||
|
||||
proc get*(self: FSDatastore, key: Key): ?!seq[byte] =
|
||||
proc get*(self: FSDatastore, key: KeyId): ?!DataBuffer =
|
||||
let key = key.toKey()
|
||||
without path =? self.path(key), error:
|
||||
return failure error
|
||||
|
||||
@ -130,12 +136,12 @@ proc get*(self: FSDatastore, key: Key): ?!seq[byte] =
|
||||
return failure(
|
||||
newException(DatastoreKeyNotFound, "Key doesn't exist"))
|
||||
|
||||
return self.readFile(path)
|
||||
return readFile[DataBuffer](self, path)
|
||||
|
||||
proc put*(
|
||||
self: FSDatastore,
|
||||
key: Key,
|
||||
data: seq[byte]): ?!void =
|
||||
key: KeyId,
|
||||
data: DataBuffer): ?!void =
|
||||
|
||||
without path =? self.path(key), error:
|
||||
return failure error
|
||||
@ -174,7 +180,7 @@ proc close*(self: FSDatastore): ?!void =
|
||||
|
||||
proc query*(
|
||||
self: FSDatastore,
|
||||
query: Query): ?!QueryIter =
|
||||
query: DbQuery[KeyId, DataBuffer]): ?!QueryIter =
|
||||
|
||||
without path =? self.path(query.key), error:
|
||||
return failure error
|
||||
@ -228,7 +234,7 @@ proc query*(
|
||||
key = Key.init(keyPath).expect("should not fail")
|
||||
data =
|
||||
if query.value:
|
||||
self.readFile((basePath / path).absolutePath)
|
||||
self.readFile[DataBuffer]((basePath / path).absolutePath)
|
||||
.expect("Should read file")
|
||||
else:
|
||||
@[]
|
||||
|
||||
@ -47,7 +47,7 @@ template `==`*[T: char | byte](a: DataBuffer, b: openArray[T]): bool =
|
||||
elif a[].size != b.len: false
|
||||
else: a.hash() == b.hash()
|
||||
|
||||
proc new(tp: type DataBuffer, capacity: int = 0): DataBuffer =
|
||||
proc new*(tp: type DataBuffer, capacity: int = 0): DataBuffer =
|
||||
## allocate new buffer with given capacity
|
||||
##
|
||||
|
||||
@ -130,9 +130,12 @@ converter toBuffer*(err: ref CatchableError): DataBuffer =
|
||||
|
||||
return DataBuffer.new(err.msg)
|
||||
|
||||
template toOpenArray*[T: byte | char](data: DataBuffer, t: typedesc[T]): openArray[T] =
|
||||
template toOpenArray*[T: byte | char](data: var DataBuffer, t: typedesc[T]): var openArray[T] =
|
||||
## get openArray from DataBuffer as char
|
||||
##
|
||||
## this is explicit since sqlite treats string differently from openArray[byte]
|
||||
let bf = cast[ptr UncheckedArray[T]](data[].buf)
|
||||
var bf = cast[ptr UncheckedArray[T]](data[].buf)
|
||||
bf.toOpenArray(0, data[].size-1)
|
||||
|
||||
template toOpenArray*(data: var DataBuffer): var openArray[byte] =
|
||||
toOpenArray(data, byte)
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user