mirror of
https://github.com/status-im/nim-codex.git
synced 2025-01-22 00:30:37 +00:00
f25c555d59
* Move to version 2.0.6 * Update nim-confutils submodule to latest version * Update dependencies * Update Nim version to 2.0.12 * Add gcsafe pragma * Add missing import * Update specific conf for Nim 2.x * Fix method signatures * Revert erasure coding attempt to fix bug * More gcsafe pragma * Duplicate code from libp2p because it is not exported anymore * Fix camelcase function names * Use alreadySeen because need is not a bool anymore * newLPStreamReadError does not exist anymore so use another error * Replace ValidIpAddress by IpAddress * Add gcsafe pragma * Restore maintenance parameter deleted by mistake when removing esasure coding fix attempt code * Update method signatures * Copy LPStreamReadError code from libp2p which was removed * Fix camel case * Fix enums in tests * Fix camel case * Extract node components to a variable to make Nim 2 happy * Update the tests using ValidIpAddress to IpAddress * Fix cast for value which is already an option * Set nim version to 2.0.x for CI * Set nim version to 2.0.x for CI * Move to miniupnp version 2.2.4 to avoid symlink error * Set core.symlinks to false for Windows for miniupnp >= 2.2.5 support * Update to Nim 2.0.14 * Update CI nim versions to 2.0.14 * Try with GCC 14 * Replace apt-fast by apt-get * Update ubuntu runner to latest * Use Ubuntu 20.04 for coverage * Disable CI cache for coverage * Add coverage property description * Remove commented test * Check the node value of seen instead of using alreadySeen * Fix the merge. The taskpool work was reverted. * Update nim-ethers submodule * Remove deprecated ValidIpAddress. Fix missing case and imports. * Fix a weird issue where nim-confutils cannot find NatAny * Fix tests and remove useless static keyword
269 lines
7.5 KiB
Nim
269 lines
7.5 KiB
Nim
import std/sequtils
|
|
import std/sugar
|
|
|
|
import pkg/chronos
|
|
import pkg/questionable/results
|
|
|
|
import pkg/codex/erasure
|
|
import pkg/codex/manifest
|
|
import pkg/codex/stores
|
|
import pkg/codex/blocktype as bt
|
|
import pkg/codex/rng
|
|
import pkg/codex/utils
|
|
import pkg/codex/indexingstrategy
|
|
|
|
import ../asynctest
|
|
import ./helpers
|
|
import ./examples
|
|
|
|
suite "Erasure encode/decode":
|
|
const BlockSize = 1024'nb
|
|
const dataSetSize = BlockSize * 123 # weird geometry
|
|
|
|
var rng: Rng
|
|
var chunker: Chunker
|
|
var manifest: Manifest
|
|
var store: BlockStore
|
|
var erasure: Erasure
|
|
let repoTmp = TempLevelDb.new()
|
|
let metaTmp = TempLevelDb.new()
|
|
|
|
setup:
|
|
let
|
|
repoDs = repoTmp.newDb()
|
|
metaDs = metaTmp.newDb()
|
|
rng = Rng.instance()
|
|
chunker = RandomChunker.new(rng, size = dataSetSize, chunkSize = BlockSize)
|
|
store = RepoStore.new(repoDs, metaDs)
|
|
erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider)
|
|
manifest = await storeDataGetManifest(store, chunker)
|
|
|
|
teardown:
|
|
await repoTmp.destroyDb()
|
|
await metaTmp.destroyDb()
|
|
|
|
proc encode(buffers, parity: int): Future[Manifest] {.async.} =
|
|
let
|
|
encoded = (await erasure.encode(
|
|
manifest,
|
|
buffers.Natural,
|
|
parity.Natural)).tryGet()
|
|
|
|
check:
|
|
encoded.blocksCount mod (buffers + parity) == 0
|
|
encoded.rounded == roundUp(manifest.blocksCount, buffers)
|
|
encoded.steps == encoded.rounded div buffers
|
|
|
|
return encoded
|
|
|
|
test "Should tolerate losing M data blocks in a single random column":
|
|
const
|
|
buffers = 20
|
|
parity = 10
|
|
|
|
let encoded = await encode(buffers, parity)
|
|
|
|
var
|
|
column = rng.rand((encoded.blocksCount div encoded.steps) - 1) # random column
|
|
dropped: seq[int]
|
|
|
|
for _ in 0..<encoded.ecM:
|
|
dropped.add(column)
|
|
(await store.delBlock(encoded.treeCid, column)).tryGet()
|
|
(await store.delBlock(manifest.treeCid, column)).tryGet()
|
|
column = (column + encoded.steps) mod encoded.blocksCount # wrap around
|
|
|
|
var
|
|
decoded = (await erasure.decode(encoded)).tryGet()
|
|
|
|
check:
|
|
decoded.treeCid == manifest.treeCid
|
|
decoded.treeCid == encoded.originalTreeCid
|
|
decoded.blocksCount == encoded.originalBlocksCount
|
|
|
|
for d in dropped:
|
|
if d < manifest.blocksCount: # we don't support returning parity blocks yet
|
|
let present = await store.hasBlock(manifest.treeCid, d)
|
|
check present.tryGet()
|
|
|
|
test "Should not tolerate losing more than M data blocks in a single random column":
|
|
const
|
|
buffers = 20
|
|
parity = 10
|
|
|
|
let encoded = await encode(buffers, parity)
|
|
|
|
var
|
|
column = rng.rand((encoded.blocksCount div encoded.steps) - 1) # random column
|
|
dropped: seq[int]
|
|
|
|
for _ in 0..<encoded.ecM + 1:
|
|
dropped.add(column)
|
|
(await store.delBlock(encoded.treeCid, column)).tryGet()
|
|
(await store.delBlock(manifest.treeCid, column)).tryGet()
|
|
column = (column + encoded.steps) mod encoded.blocksCount # wrap around
|
|
|
|
var
|
|
decoded: Manifest
|
|
|
|
expect ResultFailure:
|
|
decoded = (await erasure.decode(encoded)).tryGet()
|
|
|
|
for d in dropped:
|
|
let present = await store.hasBlock(manifest.treeCid, d)
|
|
check not present.tryGet()
|
|
|
|
test "Should tolerate losing M data blocks in M random columns":
|
|
const
|
|
buffers = 20
|
|
parity = 10
|
|
|
|
let encoded = await encode(buffers, parity)
|
|
|
|
var
|
|
blocks: seq[int]
|
|
offset = 0
|
|
|
|
while offset < encoded.steps - 1:
|
|
let
|
|
blockIdx = toSeq(countup(offset, encoded.blocksCount - 1, encoded.steps))
|
|
|
|
for _ in 0..<encoded.ecM:
|
|
blocks.add(rng.sample(blockIdx, blocks))
|
|
offset.inc
|
|
|
|
for idx in blocks:
|
|
(await store.delBlock(encoded.treeCid, idx)).tryGet()
|
|
(await store.delBlock(manifest.treeCid, idx)).tryGet()
|
|
discard
|
|
|
|
discard (await erasure.decode(encoded)).tryGet()
|
|
|
|
for d in 0..<manifest.blocksCount:
|
|
let present = await store.hasBlock(manifest.treeCid, d)
|
|
check present.tryGet()
|
|
|
|
test "Should not tolerate losing more than M data blocks in M random columns":
|
|
const
|
|
buffers = 20
|
|
parity = 10
|
|
|
|
let encoded = await encode(buffers, parity)
|
|
|
|
var
|
|
blocks: seq[int]
|
|
offset = 0
|
|
|
|
while offset < encoded.steps:
|
|
let
|
|
blockIdx = toSeq(countup(offset, encoded.blocksCount - 1, encoded.steps))
|
|
|
|
for _ in 0..<encoded.ecM + 1: # NOTE: the +1
|
|
var idx: int
|
|
while true:
|
|
idx = rng.sample(blockIdx, blocks)
|
|
let blk = (await store.getBlock(encoded.treeCid, idx)).tryGet()
|
|
if not blk.isEmpty:
|
|
break
|
|
|
|
blocks.add(idx)
|
|
offset.inc
|
|
|
|
for idx in blocks:
|
|
(await store.delBlock(encoded.treeCid, idx)).tryGet()
|
|
(await store.delBlock(manifest.treeCid, idx)).tryGet()
|
|
discard
|
|
|
|
var
|
|
decoded: Manifest
|
|
|
|
expect ResultFailure:
|
|
decoded = (await erasure.decode(encoded)).tryGet()
|
|
|
|
test "Should tolerate losing M (a.k.a row) contiguous data blocks":
|
|
const
|
|
buffers = 20
|
|
parity = 10
|
|
|
|
let encoded = await encode(buffers, parity)
|
|
|
|
# loose M original (systematic) symbols/blocks
|
|
for b in 0..<(encoded.steps * encoded.ecM):
|
|
(await store.delBlock(encoded.treeCid, b)).tryGet()
|
|
(await store.delBlock(manifest.treeCid, b)).tryGet()
|
|
|
|
discard (await erasure.decode(encoded)).tryGet()
|
|
|
|
for d in 0..<manifest.blocksCount:
|
|
let present = await store.hasBlock(manifest.treeCid, d)
|
|
check present.tryGet()
|
|
|
|
test "Should tolerate losing M (a.k.a row) contiguous parity blocks":
|
|
const
|
|
buffers = 20
|
|
parity = 10
|
|
|
|
let
|
|
encoded = await encode(buffers, parity)
|
|
blocks = collect:
|
|
for i in 0..encoded.blocksCount:
|
|
i
|
|
|
|
# loose M parity (all!) symbols/blocks from the dataset
|
|
for b in blocks[^(encoded.steps * encoded.ecM)..^1]:
|
|
(await store.delBlock(encoded.treeCid, b)).tryGet()
|
|
(await store.delBlock(manifest.treeCid, b)).tryGet()
|
|
|
|
discard (await erasure.decode(encoded)).tryGet()
|
|
|
|
for d in 0..<manifest.blocksCount:
|
|
let present = await store.hasBlock(manifest.treeCid, d)
|
|
check present.tryGet()
|
|
|
|
test "handles edge case of 0 parity blocks":
|
|
const
|
|
buffers = 20
|
|
parity = 0
|
|
|
|
let encoded = await encode(buffers, parity)
|
|
|
|
discard (await erasure.decode(encoded)).tryGet()
|
|
|
|
test "Should handle verifiable manifests":
|
|
const
|
|
buffers = 20
|
|
parity = 10
|
|
|
|
let
|
|
encoded = await encode(buffers, parity)
|
|
slotCids = collect(newSeq):
|
|
for i in 0..<encoded.numSlots: Cid.example
|
|
|
|
verifiable = Manifest.new(encoded, Cid.example, slotCids).tryGet()
|
|
|
|
decoded = (await erasure.decode(verifiable)).tryGet()
|
|
|
|
check:
|
|
decoded.treeCid == manifest.treeCid
|
|
decoded.treeCid == verifiable.originalTreeCid
|
|
decoded.blocksCount == verifiable.originalBlocksCount
|
|
|
|
for i in 1..5:
|
|
test "Should encode/decode using various parameters " & $i & "/5":
|
|
let
|
|
blockSize = rng.sample(@[1, 2, 4, 8, 16, 32, 64].mapIt(it.KiBs))
|
|
datasetSize = 1.MiBs
|
|
ecK = 10.Natural
|
|
ecM = 10.Natural
|
|
|
|
let
|
|
chunker = RandomChunker.new(rng, size = datasetSize, chunkSize = blockSize)
|
|
manifest = await storeDataGetManifest(store, chunker)
|
|
encoded = (await erasure.encode(manifest, ecK, ecM)).tryGet()
|
|
decoded = (await erasure.decode(encoded)).tryGet()
|
|
|
|
check:
|
|
decoded.treeCid == manifest.treeCid
|
|
decoded.treeCid == encoded.originalTreeCid
|
|
decoded.blocksCount == encoded.originalBlocksCount
|