import std/sequtils import std/sugar import std/cpuinfo import pkg/chronos import pkg/datastore import pkg/questionable/results import pkg/codex/erasure import pkg/codex/manifest import pkg/codex/stores import pkg/codex/blocktype as bt import pkg/codex/rng import pkg/codex/utils import pkg/codex/indexingstrategy import pkg/taskpools import ../asynctest import ./helpers suite "Erasure encode/decode": const BlockSize = 1024'nb const dataSetSize = BlockSize * 123 # weird geometry var rng: Rng var chunker: Chunker var manifest: Manifest var store: BlockStore var erasure: Erasure var taskpool: Taskpool let repoTmp = TempLevelDb.new() let metaTmp = TempLevelDb.new() setup: let repoDs = repoTmp.newDb() metaDs = metaTmp.newDb() rng = Rng.instance() chunker = RandomChunker.new(rng, size = dataSetSize, chunkSize = BlockSize) store = RepoStore.new(repoDs, metaDs) taskpool = Taskpool.new(num_threads = countProcessors()) erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider, taskpool) manifest = await storeDataGetManifest(store, chunker) teardown: await repoTmp.destroyDb() await metaTmp.destroyDb() proc encode(buffers, parity: int): Future[Manifest] {.async.} = let encoded = (await erasure.encode( manifest, buffers.Natural, parity.Natural)).tryGet() check: encoded.blocksCount mod (buffers + parity) == 0 encoded.rounded == roundUp(manifest.blocksCount, buffers) encoded.steps == encoded.rounded div buffers return encoded test "Should tolerate losing M data blocks in a single random column": const buffers = 20 parity = 10 let encoded = await encode(buffers, parity) var column = rng.rand((encoded.blocksCount div encoded.steps) - 1) # random column dropped: seq[int] for _ in 0..