nim-codex/codex/streams/storestream.nim
Eric de88fd2c53
feat: create logging proxy (#663)
* implement a logging proxy

The logging proxy:
- prevents the need to import chronicles (as well as export except toJson),
- prevents the need to override `writeValue` or use or import nim-json-seralization elsewhere in the codebase, allowing for sole use of utils/json for de/serialization,
- and handles json formatting correctly in chronicles json sinks

* Rename logging -> logutils to avoid ambiguity with common names

* clean up

* add setProperty for JsonRecord, remove nim-json-serialization conflict

* Allow specifying textlines and json format separately

Not specifying a LogFormat will apply the formatting to both textlines and json sinks.

Specifying a LogFormat will apply the formatting to only that sink.

* remove unneeded usages of std/json

We only need to import utils/json instead of std/json

* move serialization from rest/json to utils/json so it can be shared

* fix NoColors ambiguity

Was causing unit tests to fail on Windows.

* Remove nre usage to fix Windows error

Windows was erroring with `could not load: pcre64.dll`. Instead of fixing that error, remove the pcre usage :)

* Add logutils module doc

* Shorten logutils.formatIt for `NBytes`

Both json and textlines formatIt were not needed, and could be combined into one formatIt

* remove debug integration test config

debug output and logformat of json for integration test logs

* Use ## module doc to support docgen

* bump nim-poseidon2 to export fromBytes

Before the changes in this branch, fromBytes was likely being resolved by nim-stew, or other dependency. With the changes in this branch, that dependency was removed and fromBytes could no longer be resolved. By exporting fromBytes from nim-poseidon, the correct resolution is now happening.

* fixes to get compiling after rebasing master

* Add support for Result types being logged using formatIt
2024-01-22 23:35:03 -08:00

125 lines
3.7 KiB
Nim

## Nim-Codex
## Copyright (c) 2023 Status Research & Development GmbH
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import std/options
import pkg/upraises
push: {.upraises: [].}
import pkg/chronos
import pkg/stew/ptrops
import ../stores
import ../manifest
import ../blocktype
import ../logutils
import ../utils
import ./seekablestream
export stores, blocktype, manifest, chronos
logScope:
topics = "codex storestream"
const
StoreStreamTrackerName* = "StoreStream"
type
# Make SeekableStream from a sequence of blocks stored in Manifest
# (only original file data - see StoreStream.size)
StoreStream* = ref object of SeekableStream
store*: BlockStore # Store where to lookup block contents
manifest*: Manifest # List of block CIDs
pad*: bool # Pad last block to manifest.blockSize?
method initStream*(s: StoreStream) =
if s.objName.len == 0:
s.objName = StoreStreamTrackerName
procCall SeekableStream(s).initStream()
proc new*(
T: type StoreStream,
store: BlockStore,
manifest: Manifest,
pad = true
): StoreStream =
## Create a new StoreStream instance for a given store and manifest
##
result = StoreStream(
store: store,
manifest: manifest,
pad: pad,
offset: 0)
result.initStream()
method `size`*(self: StoreStream): int =
bytes(self.manifest, self.pad).int
proc `size=`*(self: StoreStream, size: int)
{.error: "Setting the size is forbidden".} =
discard
method atEof*(self: StoreStream): bool =
self.offset >= self.size
method readOnce*(
self: StoreStream,
pbytes: pointer,
nbytes: int
): Future[int] {.async.} =
## Read `nbytes` from current position in the StoreStream into output buffer pointed by `pbytes`.
## Return how many bytes were actually read before EOF was encountered.
## Raise exception if we are already at EOF.
##
trace "Reading from manifest", cid = self.manifest.cid.get(), blocks = self.manifest.blocksCount
if self.atEof:
raise newLPStreamEOFError()
# The loop iterates over blocks in the StoreStream,
# reading them and copying their data into outbuf
var read = 0 # Bytes read so far, and thus write offset in the outbuf
while read < nbytes and not self.atEof:
# Compute from the current stream position `self.offset` the block num/offset to read
# Compute how many bytes to read from this block
let
blockNum = self.offset div self.manifest.blockSize.int
blockOffset = self.offset mod self.manifest.blockSize.int
readBytes = min([self.size - self.offset,
nbytes - read,
self.manifest.blockSize.int - blockOffset])
address = BlockAddress(leaf: true, treeCid: self.manifest.treeCid, index: blockNum)
# Read contents of block `blockNum`
without blk =? await self.store.getBlock(address), error:
raise newLPStreamReadError(error)
trace "Reading bytes from store stream", blockNum, cid = blk.cid, bytes = readBytes, blockOffset
# Copy `readBytes` bytes starting at `blockOffset` from the block into the outbuf
if blk.isEmpty:
zeroMem(pbytes.offset(read), readBytes)
else:
copyMem(pbytes.offset(read), blk.data[blockOffset].addr, readBytes)
# Update current positions in the stream and outbuf
self.offset += readBytes
read += readBytes
return read
method closeImpl*(self: StoreStream) {.async.} =
trace "Closing StoreStream"
self.offset = self.size # set Eof
await procCall LPStream(self).closeImpl()