2022-05-19 19:56:03 +00:00
|
|
|
## Nim-Codex
|
2022-04-05 19:12:59 +00:00
|
|
|
## Copyright (c) 2022 Status Research & Development GmbH
|
|
|
|
## Licensed under either of
|
|
|
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
|
|
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
|
|
|
## at your option.
|
|
|
|
## This file may not be copied, modified, or distributed except according to
|
|
|
|
## those terms.
|
|
|
|
|
|
|
|
import pkg/upraises
|
|
|
|
|
|
|
|
push: {.upraises: [].}
|
|
|
|
|
|
|
|
import std/sequtils
|
2023-11-14 12:02:17 +00:00
|
|
|
import std/sugar
|
2022-04-05 19:12:59 +00:00
|
|
|
|
|
|
|
import pkg/chronos
|
2023-12-22 12:04:01 +00:00
|
|
|
import pkg/libp2p/[multicodec, cid, multihash]
|
2023-11-14 12:02:17 +00:00
|
|
|
import pkg/libp2p/protobuf/minprotobuf
|
2022-04-05 19:12:59 +00:00
|
|
|
|
feat: create logging proxy (#663)
* implement a logging proxy
The logging proxy:
- prevents the need to import chronicles (as well as export except toJson),
- prevents the need to override `writeValue` or use or import nim-json-seralization elsewhere in the codebase, allowing for sole use of utils/json for de/serialization,
- and handles json formatting correctly in chronicles json sinks
* Rename logging -> logutils to avoid ambiguity with common names
* clean up
* add setProperty for JsonRecord, remove nim-json-serialization conflict
* Allow specifying textlines and json format separately
Not specifying a LogFormat will apply the formatting to both textlines and json sinks.
Specifying a LogFormat will apply the formatting to only that sink.
* remove unneeded usages of std/json
We only need to import utils/json instead of std/json
* move serialization from rest/json to utils/json so it can be shared
* fix NoColors ambiguity
Was causing unit tests to fail on Windows.
* Remove nre usage to fix Windows error
Windows was erroring with `could not load: pcre64.dll`. Instead of fixing that error, remove the pcre usage :)
* Add logutils module doc
* Shorten logutils.formatIt for `NBytes`
Both json and textlines formatIt were not needed, and could be combined into one formatIt
* remove debug integration test config
debug output and logformat of json for integration test logs
* Use ## module doc to support docgen
* bump nim-poseidon2 to export fromBytes
Before the changes in this branch, fromBytes was likely being resolved by nim-stew, or other dependency. With the changes in this branch, that dependency was removed and fromBytes could no longer be resolved. By exporting fromBytes from nim-poseidon, the correct resolution is now happening.
* fixes to get compiling after rebasing master
* Add support for Result types being logged using formatIt
2024-01-23 07:35:03 +00:00
|
|
|
import ../logutils
|
2022-04-05 19:12:59 +00:00
|
|
|
import ../manifest
|
2023-11-14 12:02:17 +00:00
|
|
|
import ../merkletree
|
2022-04-05 19:12:59 +00:00
|
|
|
import ../stores
|
2022-05-19 02:29:15 +00:00
|
|
|
import ../blocktype as bt
|
2023-11-14 12:02:17 +00:00
|
|
|
import ../utils
|
|
|
|
import ../utils/asynciter
|
2024-01-08 22:52:46 +00:00
|
|
|
import ../indexingstrategy
|
2023-11-14 12:02:17 +00:00
|
|
|
|
|
|
|
import pkg/stew/byteutils
|
2022-04-05 19:12:59 +00:00
|
|
|
|
|
|
|
import ./backend
|
|
|
|
|
|
|
|
export backend
|
|
|
|
|
|
|
|
logScope:
|
2022-05-19 19:56:03 +00:00
|
|
|
topics = "codex erasure"
|
2022-04-05 19:12:59 +00:00
|
|
|
|
|
|
|
type
|
|
|
|
## Encode a manifest into one that is erasure protected.
|
|
|
|
##
|
|
|
|
## The new manifest has K `blocks` that are encoded into
|
|
|
|
## additional M `parity` blocks. The resulting dataset
|
|
|
|
## is padded with empty blocks if it doesn't have a square
|
|
|
|
## shape.
|
|
|
|
##
|
|
|
|
## NOTE: The padding blocks could be excluded
|
|
|
|
## from transmission, but they aren't for now.
|
|
|
|
##
|
|
|
|
## The resulting dataset is logically divided into rows
|
|
|
|
## where a row is made up of B blocks. There are then,
|
|
|
|
## K + M = N rows in total, each of length B blocks. Rows
|
|
|
|
## are assumed to be of the same number of (B) blocks.
|
|
|
|
##
|
|
|
|
## The encoding is systematic and the rows can be
|
|
|
|
## read sequentially by any node without decoding.
|
|
|
|
##
|
|
|
|
## Decoding is possible with any K rows or partial K
|
|
|
|
## columns (with up to M blocks missing per column),
|
|
|
|
## or any combination there of.
|
|
|
|
##
|
|
|
|
|
|
|
|
EncoderProvider* = proc(size, blocks, parity: int): EncoderBackend
|
|
|
|
{.raises: [Defect], noSideEffect.}
|
|
|
|
|
|
|
|
DecoderProvider* = proc(size, blocks, parity: int): DecoderBackend
|
|
|
|
{.raises: [Defect], noSideEffect.}
|
|
|
|
|
|
|
|
Erasure* = ref object
|
|
|
|
encoderProvider*: EncoderProvider
|
|
|
|
decoderProvider*: DecoderProvider
|
|
|
|
store*: BlockStore
|
|
|
|
|
2023-11-14 12:02:17 +00:00
|
|
|
EncodingParams = object
|
2024-01-11 16:45:23 +00:00
|
|
|
ecK: Natural
|
|
|
|
ecM: Natural
|
|
|
|
rounded: Natural
|
|
|
|
steps: Natural
|
|
|
|
blocksCount: Natural
|
2023-09-25 14:31:10 +00:00
|
|
|
|
2023-11-14 12:02:17 +00:00
|
|
|
func indexToPos(steps, idx, step: int): int {.inline.} =
|
2023-09-25 14:31:10 +00:00
|
|
|
## Convert an index to a position in the encoded
|
|
|
|
## dataset
|
|
|
|
## `idx` - the index to convert
|
|
|
|
## `step` - the current step
|
|
|
|
## `pos` - the position in the encoded dataset
|
2022-04-05 19:12:59 +00:00
|
|
|
##
|
2023-09-25 14:31:10 +00:00
|
|
|
|
2023-11-14 12:02:17 +00:00
|
|
|
(idx - step) div steps
|
2023-09-25 14:31:10 +00:00
|
|
|
|
|
|
|
proc getPendingBlocks(
|
|
|
|
self: Erasure,
|
|
|
|
manifest: Manifest,
|
2023-11-14 12:02:17 +00:00
|
|
|
indicies: seq[int]): AsyncIter[(?!bt.Block, int)] =
|
2023-09-25 14:31:10 +00:00
|
|
|
## Get pending blocks iterator
|
|
|
|
##
|
|
|
|
|
|
|
|
var
|
2023-11-14 12:02:17 +00:00
|
|
|
# request blocks from the store
|
|
|
|
pendingBlocks = indicies.map( (i: int) =>
|
2023-12-21 06:41:43 +00:00
|
|
|
self.store.getBlock(
|
|
|
|
BlockAddress.init(manifest.treeCid, i)
|
|
|
|
).map((r: ?!bt.Block) => (r, i)) # Get the data blocks (first K)
|
2023-09-25 14:31:10 +00:00
|
|
|
)
|
|
|
|
|
2023-11-14 12:02:17 +00:00
|
|
|
proc isFinished(): bool = pendingBlocks.len == 0
|
2023-09-25 14:31:10 +00:00
|
|
|
|
2023-11-14 17:52:27 +00:00
|
|
|
proc genNext(): Future[(?!bt.Block, int)] {.async.} =
|
2023-11-14 12:02:17 +00:00
|
|
|
let completedFut = await one(pendingBlocks)
|
|
|
|
if (let i = pendingBlocks.find(completedFut); i >= 0):
|
|
|
|
pendingBlocks.del(i)
|
|
|
|
return await completedFut
|
|
|
|
else:
|
|
|
|
let (_, index) = await completedFut
|
2024-01-15 16:45:04 +00:00
|
|
|
raise newException(
|
|
|
|
CatchableError,
|
2024-01-11 16:45:23 +00:00
|
|
|
"Future for block id not found, tree cid: " & $manifest.treeCid & ", index: " & $index)
|
2023-09-25 14:31:10 +00:00
|
|
|
|
2023-11-14 12:02:17 +00:00
|
|
|
Iter.new(genNext, isFinished)
|
2023-11-14 17:52:27 +00:00
|
|
|
|
2023-09-25 14:31:10 +00:00
|
|
|
proc prepareEncodingData(
|
|
|
|
self: Erasure,
|
2023-11-14 12:02:17 +00:00
|
|
|
manifest: Manifest,
|
|
|
|
params: EncodingParams,
|
2024-01-11 16:45:23 +00:00
|
|
|
step: Natural,
|
2023-09-25 14:31:10 +00:00
|
|
|
data: ref seq[seq[byte]],
|
2023-11-14 12:02:17 +00:00
|
|
|
cids: ref seq[Cid],
|
2024-01-11 16:45:23 +00:00
|
|
|
emptyBlock: seq[byte]): Future[?!Natural] {.async.} =
|
2023-09-25 14:31:10 +00:00
|
|
|
## Prepare data for encoding
|
|
|
|
##
|
|
|
|
|
2023-11-14 12:02:17 +00:00
|
|
|
let
|
2024-02-07 19:13:27 +00:00
|
|
|
strategy = SteppedStrategy.init(
|
2024-01-08 22:52:46 +00:00
|
|
|
firstIndex = 0,
|
|
|
|
lastIndex = params.rounded - 1,
|
2024-02-07 19:13:27 +00:00
|
|
|
iterations = params.steps
|
2024-01-08 22:52:46 +00:00
|
|
|
)
|
2024-01-15 16:45:04 +00:00
|
|
|
indicies = toSeq(strategy.getIndicies(step))
|
2023-11-14 12:02:17 +00:00
|
|
|
pendingBlocksIter = self.getPendingBlocks(manifest, indicies.filterIt(it < manifest.blocksCount))
|
2023-09-25 14:31:10 +00:00
|
|
|
|
|
|
|
var resolved = 0
|
2023-11-14 12:02:17 +00:00
|
|
|
for fut in pendingBlocksIter:
|
|
|
|
let (blkOrErr, idx) = await fut
|
|
|
|
without blk =? blkOrErr, err:
|
|
|
|
warn "Failed retreiving a block", treeCid = manifest.treeCid, idx, msg = err.msg
|
|
|
|
continue
|
2023-11-14 17:52:27 +00:00
|
|
|
|
2023-11-14 12:02:17 +00:00
|
|
|
let pos = indexToPos(params.steps, idx, step)
|
|
|
|
shallowCopy(data[pos], if blk.isEmpty: emptyBlock else: blk.data)
|
|
|
|
cids[idx] = blk.cid
|
|
|
|
|
|
|
|
resolved.inc()
|
|
|
|
|
|
|
|
for idx in indicies.filterIt(it >= manifest.blocksCount):
|
|
|
|
let pos = indexToPos(params.steps, idx, step)
|
|
|
|
trace "Padding with empty block", idx
|
|
|
|
shallowCopy(data[pos], emptyBlock)
|
|
|
|
without emptyBlockCid =? emptyCid(manifest.version, manifest.hcodec, manifest.codec), err:
|
|
|
|
return failure(err)
|
|
|
|
cids[idx] = emptyBlockCid
|
2023-09-25 14:31:10 +00:00
|
|
|
|
2024-01-11 16:45:23 +00:00
|
|
|
success(resolved.Natural)
|
2023-09-25 14:31:10 +00:00
|
|
|
|
|
|
|
proc prepareDecodingData(
|
|
|
|
self: Erasure,
|
|
|
|
encoded: Manifest,
|
2024-01-11 16:45:23 +00:00
|
|
|
step: Natural,
|
2023-09-25 14:31:10 +00:00
|
|
|
data: ref seq[seq[byte]],
|
|
|
|
parityData: ref seq[seq[byte]],
|
2023-11-14 12:02:17 +00:00
|
|
|
cids: ref seq[Cid],
|
2024-01-11 16:45:23 +00:00
|
|
|
emptyBlock: seq[byte]): Future[?!(Natural, Natural)] {.async.} =
|
2023-09-25 14:31:10 +00:00
|
|
|
## Prepare data for decoding
|
|
|
|
## `encoded` - the encoded manifest
|
|
|
|
## `step` - the current step
|
|
|
|
## `data` - the data to be prepared
|
|
|
|
## `parityData` - the parityData to be prepared
|
2023-11-14 12:02:17 +00:00
|
|
|
## `cids` - cids of prepared data
|
2023-09-25 14:31:10 +00:00
|
|
|
## `emptyBlock` - the empty block to be used for padding
|
2022-04-05 19:12:59 +00:00
|
|
|
##
|
|
|
|
|
2023-11-14 17:52:27 +00:00
|
|
|
let
|
2024-02-07 19:13:27 +00:00
|
|
|
strategy = SteppedStrategy.init(
|
2024-01-08 22:52:46 +00:00
|
|
|
firstIndex = 0,
|
|
|
|
lastIndex = encoded.blocksCount - 1,
|
2024-02-07 19:13:27 +00:00
|
|
|
iterations = encoded.steps
|
2024-01-08 22:52:46 +00:00
|
|
|
)
|
2024-01-15 16:45:04 +00:00
|
|
|
indicies = toSeq(strategy.getIndicies(step))
|
2023-11-14 12:02:17 +00:00
|
|
|
pendingBlocksIter = self.getPendingBlocks(encoded, indicies)
|
2023-09-25 14:31:10 +00:00
|
|
|
|
|
|
|
var
|
|
|
|
dataPieces = 0
|
|
|
|
parityPieces = 0
|
|
|
|
resolved = 0
|
2023-11-14 12:02:17 +00:00
|
|
|
for fut in pendingBlocksIter:
|
2023-09-25 14:31:10 +00:00
|
|
|
# Continue to receive blocks until we have just enough for decoding
|
|
|
|
# or no more blocks can arrive
|
|
|
|
if resolved >= encoded.ecK:
|
|
|
|
break
|
|
|
|
|
2023-11-14 12:02:17 +00:00
|
|
|
let (blkOrErr, idx) = await fut
|
|
|
|
without blk =? blkOrErr, err:
|
|
|
|
trace "Failed retreiving a block", idx, treeCid = encoded.treeCid, msg = err.msg
|
|
|
|
continue
|
2023-09-25 14:31:10 +00:00
|
|
|
|
2023-11-14 12:02:17 +00:00
|
|
|
let
|
|
|
|
pos = indexToPos(encoded.steps, idx, step)
|
2023-09-25 14:31:10 +00:00
|
|
|
|
2023-11-14 12:02:17 +00:00
|
|
|
logScope:
|
|
|
|
cid = blk.cid
|
|
|
|
idx = idx
|
|
|
|
pos = pos
|
|
|
|
step = step
|
|
|
|
empty = blk.isEmpty
|
|
|
|
|
|
|
|
cids[idx] = blk.cid
|
|
|
|
if idx >= encoded.rounded:
|
|
|
|
trace "Retrieved parity block"
|
|
|
|
shallowCopy(parityData[pos - encoded.ecK], if blk.isEmpty: emptyBlock else: blk.data)
|
|
|
|
parityPieces.inc
|
|
|
|
else:
|
|
|
|
trace "Retrieved data block"
|
|
|
|
shallowCopy(data[pos], if blk.isEmpty: emptyBlock else: blk.data)
|
|
|
|
dataPieces.inc
|
|
|
|
|
|
|
|
resolved.inc
|
2023-09-25 14:31:10 +00:00
|
|
|
|
2024-01-11 16:45:23 +00:00
|
|
|
return success (dataPieces.Natural, parityPieces.Natural)
|
2023-09-25 14:31:10 +00:00
|
|
|
|
2024-01-08 22:52:46 +00:00
|
|
|
proc init*(
|
|
|
|
_: type EncodingParams,
|
|
|
|
manifest: Manifest,
|
2024-01-11 16:45:23 +00:00
|
|
|
ecK: Natural, ecM: Natural): ?!EncodingParams =
|
2023-11-14 12:02:17 +00:00
|
|
|
if ecK > manifest.blocksCount:
|
2024-01-11 16:45:23 +00:00
|
|
|
return failure(
|
|
|
|
"Unable to encode manifest, not enough blocks, ecK = " &
|
|
|
|
$ecK &
|
|
|
|
", blocksCount = " &
|
|
|
|
$manifest.blocksCount)
|
2022-04-05 19:12:59 +00:00
|
|
|
|
2023-11-14 12:02:17 +00:00
|
|
|
let
|
|
|
|
rounded = roundUp(manifest.blocksCount, ecK)
|
|
|
|
steps = divUp(manifest.blocksCount, ecK)
|
|
|
|
blocksCount = rounded + (steps * ecM)
|
2023-09-25 14:31:10 +00:00
|
|
|
|
2023-11-14 12:02:17 +00:00
|
|
|
EncodingParams(
|
|
|
|
ecK: ecK,
|
|
|
|
ecM: ecM,
|
|
|
|
rounded: rounded,
|
|
|
|
steps: steps,
|
|
|
|
blocksCount: blocksCount
|
|
|
|
).success
|
2023-09-25 14:31:10 +00:00
|
|
|
|
|
|
|
proc encodeData(
|
|
|
|
self: Erasure,
|
2023-11-14 12:02:17 +00:00
|
|
|
manifest: Manifest,
|
|
|
|
params: EncodingParams
|
|
|
|
): Future[?!Manifest] {.async.} =
|
2023-09-25 14:31:10 +00:00
|
|
|
## Encode blocks pointed to by the protected manifest
|
|
|
|
##
|
|
|
|
## `manifest` - the manifest to encode
|
|
|
|
##
|
|
|
|
|
|
|
|
logScope:
|
2023-11-14 12:02:17 +00:00
|
|
|
steps = params.steps
|
|
|
|
rounded_blocks = params.rounded
|
|
|
|
blocks_count = params.blocksCount
|
|
|
|
ecK = params.ecK
|
|
|
|
ecM = params.ecM
|
2023-09-25 14:31:10 +00:00
|
|
|
|
2022-04-05 19:12:59 +00:00
|
|
|
var
|
2023-11-14 12:02:17 +00:00
|
|
|
cids = seq[Cid].new()
|
|
|
|
encoder = self.encoderProvider(manifest.blockSize.int, params.ecK, params.ecM)
|
|
|
|
emptyBlock = newSeq[byte](manifest.blockSize.int)
|
|
|
|
|
|
|
|
cids[].setLen(params.blocksCount)
|
2022-04-05 19:12:59 +00:00
|
|
|
|
|
|
|
try:
|
2023-11-14 12:02:17 +00:00
|
|
|
for step in 0..<params.steps:
|
2022-05-19 02:29:15 +00:00
|
|
|
# TODO: Don't allocate a new seq every time, allocate once and zero out
|
2022-04-05 19:12:59 +00:00
|
|
|
var
|
2023-09-25 14:31:10 +00:00
|
|
|
data = seq[seq[byte]].new() # number of blocks to encode
|
2023-11-14 12:02:17 +00:00
|
|
|
parityData = newSeqWith[seq[byte]](params.ecM, newSeq[byte](manifest.blockSize.int))
|
2022-04-05 19:12:59 +00:00
|
|
|
|
2023-11-14 12:02:17 +00:00
|
|
|
data[].setLen(params.ecK)
|
2022-04-06 00:34:29 +00:00
|
|
|
# TODO: this is a tight blocking loop so we sleep here to allow
|
|
|
|
# other events to be processed, this should be addressed
|
|
|
|
# by threading
|
2022-11-15 15:46:21 +00:00
|
|
|
await sleepAsync(10.millis)
|
2022-04-06 00:34:29 +00:00
|
|
|
|
2023-09-25 14:31:10 +00:00
|
|
|
without resolved =?
|
2023-11-14 12:02:17 +00:00
|
|
|
(await self.prepareEncodingData(manifest, params, step, data, cids, emptyBlock)), err:
|
2023-09-25 14:31:10 +00:00
|
|
|
trace "Unable to prepare data", error = err.msg
|
|
|
|
return failure(err)
|
2022-07-28 00:39:17 +00:00
|
|
|
|
2023-09-25 14:31:10 +00:00
|
|
|
trace "Erasure coding data", data = data[].len, parity = parityData.len
|
2022-04-05 19:12:59 +00:00
|
|
|
|
2023-09-25 14:31:10 +00:00
|
|
|
if (
|
|
|
|
let res = encoder.encode(data[], parityData);
|
|
|
|
res.isErr):
|
2022-07-28 00:39:17 +00:00
|
|
|
trace "Unable to encode manifest!", error = $res.error
|
2023-11-14 12:02:17 +00:00
|
|
|
return failure($res.error)
|
2022-04-05 19:12:59 +00:00
|
|
|
|
2023-11-14 12:02:17 +00:00
|
|
|
var idx = params.rounded + step
|
|
|
|
for j in 0..<params.ecM:
|
2022-05-19 02:29:15 +00:00
|
|
|
without blk =? bt.Block.new(parityData[j]), error:
|
2022-04-05 19:12:59 +00:00
|
|
|
trace "Unable to create parity block", err = error.msg
|
|
|
|
return failure(error)
|
|
|
|
|
2023-09-25 14:31:10 +00:00
|
|
|
trace "Adding parity block", cid = blk.cid, idx
|
2023-11-14 12:02:17 +00:00
|
|
|
cids[idx] = blk.cid
|
2022-07-28 00:39:17 +00:00
|
|
|
if isErr (await self.store.putBlock(blk)):
|
2022-04-05 19:12:59 +00:00
|
|
|
trace "Unable to store block!", cid = blk.cid
|
|
|
|
return failure("Unable to store block!")
|
2023-11-14 12:02:17 +00:00
|
|
|
idx.inc(params.steps)
|
|
|
|
|
2023-12-21 06:41:43 +00:00
|
|
|
without tree =? CodexTree.init(cids[]), err:
|
2023-11-14 12:02:17 +00:00
|
|
|
return failure(err)
|
|
|
|
|
|
|
|
without treeCid =? tree.rootCid, err:
|
|
|
|
return failure(err)
|
|
|
|
|
|
|
|
if err =? (await self.store.putAllProofs(tree)).errorOption:
|
|
|
|
return failure(err)
|
|
|
|
|
|
|
|
let encodedManifest = Manifest.new(
|
|
|
|
manifest = manifest,
|
|
|
|
treeCid = treeCid,
|
|
|
|
datasetSize = (manifest.blockSize.int * params.blocksCount).NBytes,
|
|
|
|
ecK = params.ecK,
|
|
|
|
ecM = params.ecM
|
|
|
|
)
|
|
|
|
|
2023-12-21 06:41:43 +00:00
|
|
|
trace "Encoded data successfully", treeCid, blocksCount = params.blocksCount
|
2023-11-14 12:02:17 +00:00
|
|
|
return encodedManifest.success
|
2022-04-05 19:12:59 +00:00
|
|
|
except CancelledError as exc:
|
|
|
|
trace "Erasure coding encoding cancelled"
|
|
|
|
raise exc # cancellation needs to be propagated
|
|
|
|
except CatchableError as exc:
|
|
|
|
trace "Erasure coding encoding error", exc = exc.msg
|
|
|
|
return failure(exc)
|
|
|
|
finally:
|
|
|
|
encoder.release()
|
|
|
|
|
2023-09-25 14:31:10 +00:00
|
|
|
proc encode*(
|
|
|
|
self: Erasure,
|
|
|
|
manifest: Manifest,
|
2024-01-11 16:45:23 +00:00
|
|
|
blocks: Natural,
|
|
|
|
parity: Natural): Future[?!Manifest] {.async.} =
|
2023-09-25 14:31:10 +00:00
|
|
|
## Encode a manifest into one that is erasure protected.
|
|
|
|
##
|
|
|
|
## `manifest` - the original manifest to be encoded
|
|
|
|
## `blocks` - the number of blocks to be encoded - K
|
|
|
|
## `parity` - the number of parity blocks to generate - M
|
|
|
|
##
|
|
|
|
|
2024-01-11 16:45:23 +00:00
|
|
|
without params =? EncodingParams.init(manifest, blocks.int, parity.int), err:
|
2023-11-14 12:02:17 +00:00
|
|
|
return failure(err)
|
2023-09-25 14:31:10 +00:00
|
|
|
|
2023-11-14 12:02:17 +00:00
|
|
|
without encodedManifest =? await self.encodeData(manifest, params), err:
|
|
|
|
return failure(err)
|
2023-09-25 14:31:10 +00:00
|
|
|
|
2023-11-14 12:02:17 +00:00
|
|
|
return success encodedManifest
|
2022-04-05 19:12:59 +00:00
|
|
|
|
|
|
|
proc decode*(
|
2023-11-14 12:02:17 +00:00
|
|
|
self: Erasure,
|
|
|
|
encoded: Manifest
|
|
|
|
): Future[?!Manifest] {.async.} =
|
2022-04-05 19:12:59 +00:00
|
|
|
## Decode a protected manifest into it's original
|
|
|
|
## manifest
|
|
|
|
##
|
|
|
|
## `encoded` - the encoded (protected) manifest to
|
|
|
|
## be recovered
|
|
|
|
##
|
|
|
|
|
|
|
|
logScope:
|
|
|
|
steps = encoded.steps
|
|
|
|
rounded_blocks = encoded.rounded
|
2023-11-14 12:02:17 +00:00
|
|
|
new_manifest = encoded.blocksCount
|
2022-04-05 19:12:59 +00:00
|
|
|
|
|
|
|
var
|
2023-11-14 12:02:17 +00:00
|
|
|
cids = seq[Cid].new()
|
2024-01-11 16:45:23 +00:00
|
|
|
recoveredIndices = newSeq[Natural]()
|
2023-07-06 23:23:27 +00:00
|
|
|
decoder = self.decoderProvider(encoded.blockSize.int, encoded.ecK, encoded.ecM)
|
2023-09-25 14:31:10 +00:00
|
|
|
emptyBlock = newSeq[byte](encoded.blockSize.int)
|
2022-04-05 19:12:59 +00:00
|
|
|
|
2023-11-14 12:02:17 +00:00
|
|
|
cids[].setLen(encoded.blocksCount)
|
2022-04-05 19:12:59 +00:00
|
|
|
try:
|
2023-09-25 14:31:10 +00:00
|
|
|
for step in 0..<encoded.steps:
|
2022-04-06 00:34:29 +00:00
|
|
|
# TODO: this is a tight blocking loop so we sleep here to allow
|
|
|
|
# other events to be processed, this should be addressed
|
|
|
|
# by threading
|
|
|
|
await sleepAsync(10.millis)
|
|
|
|
|
2022-04-05 19:12:59 +00:00
|
|
|
var
|
2023-09-25 14:31:10 +00:00
|
|
|
data = seq[seq[byte]].new()
|
|
|
|
parityData = seq[seq[byte]].new()
|
2023-07-06 23:23:27 +00:00
|
|
|
recovered = newSeqWith[seq[byte]](encoded.ecK, newSeq[byte](encoded.blockSize.int))
|
2022-04-05 19:12:59 +00:00
|
|
|
|
2023-09-25 14:31:10 +00:00
|
|
|
data[].setLen(encoded.ecK) # set len to K
|
|
|
|
parityData[].setLen(encoded.ecM) # set len to M
|
2022-04-05 19:12:59 +00:00
|
|
|
|
2023-12-22 12:04:01 +00:00
|
|
|
without (dataPieces, _) =?
|
2023-11-14 12:02:17 +00:00
|
|
|
(await self.prepareDecodingData(encoded, step, data, parityData, cids, emptyBlock)), err:
|
2023-09-25 14:31:10 +00:00
|
|
|
trace "Unable to prepare data", error = err.msg
|
|
|
|
return failure(err)
|
2022-04-05 19:12:59 +00:00
|
|
|
|
2023-03-10 07:02:54 +00:00
|
|
|
if dataPieces >= encoded.ecK:
|
2023-09-25 14:31:10 +00:00
|
|
|
trace "Retrieved all the required data blocks"
|
2022-04-05 19:12:59 +00:00
|
|
|
continue
|
|
|
|
|
2023-09-25 14:31:10 +00:00
|
|
|
trace "Erasure decoding data"
|
2022-04-05 19:12:59 +00:00
|
|
|
if (
|
2023-09-25 14:31:10 +00:00
|
|
|
let err = decoder.decode(data[], parityData[], recovered);
|
2022-04-05 19:12:59 +00:00
|
|
|
err.isErr):
|
2023-09-25 14:31:10 +00:00
|
|
|
trace "Unable to decode data!", err = $err.error
|
2022-04-05 19:12:59 +00:00
|
|
|
return failure($err.error)
|
|
|
|
|
2023-03-10 07:02:54 +00:00
|
|
|
for i in 0..<encoded.ecK:
|
2023-11-14 12:02:17 +00:00
|
|
|
let idx = i * encoded.steps + step
|
|
|
|
if data[i].len <= 0 and not cids[idx].isEmpty:
|
2022-05-19 02:29:15 +00:00
|
|
|
without blk =? bt.Block.new(recovered[i]), error:
|
2022-04-05 19:12:59 +00:00
|
|
|
trace "Unable to create block!", exc = error.msg
|
|
|
|
return failure(error)
|
|
|
|
|
2023-09-25 14:31:10 +00:00
|
|
|
trace "Recovered block", cid = blk.cid, index = i
|
2022-07-28 00:39:17 +00:00
|
|
|
if isErr (await self.store.putBlock(blk)):
|
2022-04-05 19:12:59 +00:00
|
|
|
trace "Unable to store block!", cid = blk.cid
|
|
|
|
return failure("Unable to store block!")
|
2023-11-14 12:02:17 +00:00
|
|
|
|
|
|
|
cids[idx] = blk.cid
|
|
|
|
recoveredIndices.add(idx)
|
2022-04-05 19:12:59 +00:00
|
|
|
except CancelledError as exc:
|
|
|
|
trace "Erasure coding decoding cancelled"
|
|
|
|
raise exc # cancellation needs to be propagated
|
|
|
|
except CatchableError as exc:
|
|
|
|
trace "Erasure coding decoding error", exc = exc.msg
|
|
|
|
return failure(exc)
|
|
|
|
finally:
|
|
|
|
decoder.release()
|
|
|
|
|
2023-12-21 06:41:43 +00:00
|
|
|
without tree =? CodexTree.init(cids[0..<encoded.originalBlocksCount]), err:
|
2023-11-14 12:02:17 +00:00
|
|
|
return failure(err)
|
|
|
|
|
|
|
|
without treeCid =? tree.rootCid, err:
|
|
|
|
return failure(err)
|
|
|
|
|
|
|
|
if treeCid != encoded.originalTreeCid:
|
|
|
|
return failure("Original tree root differs from the tree root computed out of recovered data")
|
|
|
|
|
|
|
|
let idxIter = Iter
|
|
|
|
.fromItems(recoveredIndices)
|
2024-01-11 16:45:23 +00:00
|
|
|
.filter((i: Natural) => i < tree.leavesCount)
|
2023-11-14 12:02:17 +00:00
|
|
|
|
|
|
|
if err =? (await self.store.putSomeProofs(tree, idxIter)).errorOption:
|
|
|
|
return failure(err)
|
|
|
|
|
|
|
|
let decoded = Manifest.new(encoded)
|
2022-04-05 19:12:59 +00:00
|
|
|
|
|
|
|
return decoded.success
|
|
|
|
|
|
|
|
proc start*(self: Erasure) {.async.} =
|
|
|
|
return
|
|
|
|
|
|
|
|
proc stop*(self: Erasure) {.async.} =
|
|
|
|
return
|
|
|
|
|
|
|
|
proc new*(
|
2023-06-22 15:11:18 +00:00
|
|
|
T: type Erasure,
|
|
|
|
store: BlockStore,
|
|
|
|
encoderProvider: EncoderProvider,
|
|
|
|
decoderProvider: DecoderProvider
|
|
|
|
): Erasure =
|
|
|
|
## Create a new Erasure instance for encoding and decoding manifests
|
2022-04-05 19:12:59 +00:00
|
|
|
|
|
|
|
Erasure(
|
|
|
|
store: store,
|
|
|
|
encoderProvider: encoderProvider,
|
|
|
|
decoderProvider: decoderProvider)
|