mirror of
https://github.com/logos-storage/logos-storage-nim.git
synced 2026-01-02 13:33:10 +00:00
* cleanup imports and logs * add BlockHandle type * revert deps * refactor: async error handling and future tracking improvements - Update async procedures to use explicit raises annotation - Modify TrackedFutures to handle futures with no raised exceptions - Replace `asyncSpawn` with explicit future tracking - Update test suites to use `unittest2` - Standardize error handling across network and async components - Remove deprecated error handling patterns This commit introduces a more robust approach to async error handling and future management, improving type safety and reducing potential runtime errors. * bump nim-serde * remove asyncSpawn * rework background downloads and prefetch * imporove logging * refactor: enhance async procedures with error handling and raise annotations * misc cleanup * misc * refactor: implement allFinishedFailed to aggregate future results with success and failure tracking * refactor: update error handling in reader procedures to raise ChunkerError and CancelledError * refactor: improve error handling in wantListHandler and accountHandler procedures * refactor: simplify LPStreamReadError creation by consolidating parameters * refactor: enhance error handling in AsyncStreamWrapper to catch unexpected errors * refactor: enhance error handling in advertiser and discovery loops to improve resilience * misc * refactor: improve code structure and readability * remove cancellation from addSlotToQueue * refactor: add assertion for unexpected errors in local store checks * refactor: prevent tracking of finished futures and improve test assertions * refactor: improve error handling in local store checks * remove usage of msgDetail * feat: add initial implementation of discovery engine and related components * refactor: improve task scheduling logic by removing unnecessary break statement * break after scheduling a task * make taskHandler cancelable * refactor: update async handlers to raise CancelledError * refactor(advertiser): streamline error handling and improve task flow in advertise loops * fix: correct spelling of "divisible" in error messages and comments * refactor(discovery): simplify discovery task loop and improve error handling * refactor(engine): filter peers before processing in cancelBlocks procedure
128 lines
3.5 KiB
Nim
128 lines
3.5 KiB
Nim
## Nim-Codex
|
|
## Copyright (c) 2021 Status Research & Development GmbH
|
|
## Licensed under either of
|
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
|
## at your option.
|
|
## This file may not be copied, modified, or distributed except according to
|
|
## those terms.
|
|
|
|
# TODO: This is super inneficient and needs a rewrite, but it'll do for now
|
|
|
|
import pkg/upraises
|
|
|
|
push:
|
|
{.upraises: [].}
|
|
|
|
import pkg/questionable
|
|
import pkg/questionable/results
|
|
import pkg/chronos
|
|
import pkg/libp2p except shuffle
|
|
|
|
import ./blocktype
|
|
import ./logutils
|
|
|
|
export blocktype
|
|
|
|
const DefaultChunkSize* = DefaultBlockSize
|
|
|
|
type
|
|
# default reader type
|
|
ChunkerError* = object of CatchableError
|
|
ChunkBuffer* = ptr UncheckedArray[byte]
|
|
Reader* = proc(data: ChunkBuffer, len: int): Future[int] {.
|
|
gcsafe, async: (raises: [ChunkerError, CancelledError])
|
|
.}
|
|
|
|
# Reader that splits input data into fixed-size chunks
|
|
Chunker* = ref object
|
|
reader*: Reader # Procedure called to actually read the data
|
|
offset*: int # Bytes read so far (position in the stream)
|
|
chunkSize*: NBytes # Size of each chunk
|
|
pad*: bool # Pad last chunk to chunkSize?
|
|
|
|
FileChunker* = Chunker
|
|
LPStreamChunker* = Chunker
|
|
|
|
proc getBytes*(c: Chunker): Future[seq[byte]] {.async.} =
|
|
## returns a chunk of bytes from
|
|
## the instantiated chunker
|
|
##
|
|
|
|
var buff = newSeq[byte](c.chunkSize.int)
|
|
let read = await c.reader(cast[ChunkBuffer](addr buff[0]), buff.len)
|
|
|
|
if read <= 0:
|
|
return @[]
|
|
|
|
c.offset += read
|
|
|
|
if not c.pad and buff.len > read:
|
|
buff.setLen(read)
|
|
|
|
return move buff
|
|
|
|
proc new*(
|
|
T: type Chunker, reader: Reader, chunkSize = DefaultChunkSize, pad = true
|
|
): Chunker =
|
|
## create a new Chunker instance
|
|
##
|
|
Chunker(reader: reader, offset: 0, chunkSize: chunkSize, pad: pad)
|
|
|
|
proc new*(
|
|
T: type LPStreamChunker, stream: LPStream, chunkSize = DefaultChunkSize, pad = true
|
|
): LPStreamChunker =
|
|
## create the default File chunker
|
|
##
|
|
|
|
proc reader(
|
|
data: ChunkBuffer, len: int
|
|
): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} =
|
|
var res = 0
|
|
try:
|
|
while res < len:
|
|
res += await stream.readOnce(addr data[res], len - res)
|
|
except LPStreamEOFError as exc:
|
|
trace "LPStreamChunker stream Eof", exc = exc.msg
|
|
except CancelledError as error:
|
|
raise error
|
|
except LPStreamError as error:
|
|
error "LPStream error", err = error.msg
|
|
raise newException(ChunkerError, "LPStream error", error)
|
|
except CatchableError as exc:
|
|
error "CatchableError exception", exc = exc.msg
|
|
raise newException(Defect, exc.msg)
|
|
|
|
return res
|
|
|
|
LPStreamChunker.new(reader = reader, chunkSize = chunkSize, pad = pad)
|
|
|
|
proc new*(
|
|
T: type FileChunker, file: File, chunkSize = DefaultChunkSize, pad = true
|
|
): FileChunker =
|
|
## create the default File chunker
|
|
##
|
|
|
|
proc reader(
|
|
data: ChunkBuffer, len: int
|
|
): Future[int] {.gcsafe, async: (raises: [ChunkerError, CancelledError]).} =
|
|
var total = 0
|
|
try:
|
|
while total < len:
|
|
let res = file.readBuffer(addr data[total], len - total)
|
|
if res <= 0:
|
|
break
|
|
|
|
total += res
|
|
except IOError as exc:
|
|
trace "Exception reading file", exc = exc.msg
|
|
except CancelledError as error:
|
|
raise error
|
|
except CatchableError as exc:
|
|
error "CatchableError exception", exc = exc.msg
|
|
raise newException(Defect, exc.msg)
|
|
|
|
return total
|
|
|
|
FileChunker.new(reader = reader, chunkSize = chunkSize, pad = pad)
|