2022-04-05 13:12:59 -06:00
|
|
|
import std/sequtils
|
2023-11-14 10:53:06 -06:00
|
|
|
import std/sugar
|
2022-04-05 13:12:59 -06:00
|
|
|
|
|
|
|
import pkg/asynctest
|
|
|
|
import pkg/chronos
|
2023-08-21 12:51:04 +10:00
|
|
|
import pkg/datastore
|
2022-04-05 13:12:59 -06:00
|
|
|
import pkg/questionable/results
|
|
|
|
|
2022-05-19 14:56:03 -05:00
|
|
|
import pkg/codex/erasure
|
|
|
|
import pkg/codex/manifest
|
|
|
|
import pkg/codex/stores
|
|
|
|
import pkg/codex/blocktype as bt
|
|
|
|
import pkg/codex/rng
|
2023-11-14 10:53:06 -06:00
|
|
|
import pkg/codex/utils
|
2022-04-05 13:12:59 -06:00
|
|
|
|
|
|
|
import ./helpers
|
|
|
|
|
2023-06-22 12:01:21 -06:00
|
|
|
asyncchecksuite "Erasure encode/decode":
|
2023-11-16 17:05:36 +01:00
|
|
|
const BlockSize = 64'nb
|
|
|
|
const dataSetSize = BlockSize * 20 # weird geometry
|
2022-05-10 14:10:17 +02:00
|
|
|
|
|
|
|
var rng: Rng
|
|
|
|
var chunker: Chunker
|
|
|
|
var manifest: Manifest
|
|
|
|
var store: BlockStore
|
|
|
|
var erasure: Erasure
|
|
|
|
|
|
|
|
setup:
|
2023-11-14 13:02:17 +01:00
|
|
|
let
|
|
|
|
repoDs = SQLiteDatastore.new(Memory).tryGet()
|
|
|
|
metaDs = SQLiteDatastore.new(Memory).tryGet()
|
2022-05-10 14:10:17 +02:00
|
|
|
rng = Rng.instance()
|
|
|
|
chunker = RandomChunker.new(rng, size = dataSetSize, chunkSize = BlockSize)
|
2023-08-21 12:51:04 +10:00
|
|
|
store = RepoStore.new(repoDs, metaDs)
|
2022-05-10 14:10:17 +02:00
|
|
|
erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider)
|
2023-11-14 13:02:17 +01:00
|
|
|
manifest = await storeDataGetManifest(store, chunker)
|
2022-04-05 13:12:59 -06:00
|
|
|
|
2023-11-15 13:20:51 +01:00
|
|
|
proc encode(buffers, parity: int, interleave: int = 0,
|
|
|
|
manifest: Manifest = manifest): Future[Manifest] {.async.} =
|
2022-04-05 13:12:59 -06:00
|
|
|
let
|
|
|
|
encoded = (await erasure.encode(
|
|
|
|
manifest,
|
|
|
|
buffers,
|
2023-11-15 13:20:51 +01:00
|
|
|
parity, interleave)).tryGet()
|
2022-04-05 13:12:59 -06:00
|
|
|
|
|
|
|
check:
|
2023-11-14 13:02:17 +01:00
|
|
|
encoded.blocksCount mod (buffers + parity) == 0
|
2023-11-15 13:20:51 +01:00
|
|
|
#encoded.rounded == (manifest.blocksCount + (buffers - (manifest.blocksCount mod buffers)))
|
|
|
|
encoded.steps == (encoded.rounded - 1) div (buffers * encoded.interleave) + 1
|
2022-04-05 13:12:59 -06:00
|
|
|
|
2022-05-10 14:10:17 +02:00
|
|
|
return encoded
|
|
|
|
|
2022-11-14 08:50:00 -05:00
|
|
|
test "Should tolerate losing M data blocks in a single random column":
|
2022-05-10 14:10:17 +02:00
|
|
|
const
|
|
|
|
buffers = 20
|
|
|
|
parity = 10
|
|
|
|
|
|
|
|
let encoded = await encode(buffers, parity)
|
|
|
|
|
2022-04-05 13:12:59 -06:00
|
|
|
var
|
2023-11-15 13:20:16 +01:00
|
|
|
column = rng.rand(encoded.interleave - 1) # random column
|
2023-11-14 13:02:17 +01:00
|
|
|
dropped: seq[int]
|
2022-04-05 13:12:59 -06:00
|
|
|
|
2023-03-10 08:02:54 +01:00
|
|
|
for _ in 0..<encoded.ecM:
|
2023-11-14 13:02:17 +01:00
|
|
|
dropped.add(column)
|
|
|
|
(await store.delBlock(encoded.treeCid, column)).tryGet()
|
|
|
|
(await store.delBlock(manifest.treeCid, column)).tryGet()
|
2023-11-15 13:20:16 +01:00
|
|
|
column.inc(encoded.interleave)
|
2022-04-05 13:12:59 -06:00
|
|
|
|
|
|
|
var
|
|
|
|
decoded = (await erasure.decode(encoded)).tryGet()
|
|
|
|
|
|
|
|
check:
|
2023-11-14 13:02:17 +01:00
|
|
|
decoded.treeCid == manifest.treeCid
|
|
|
|
decoded.treeCid == encoded.originalTreeCid
|
|
|
|
decoded.blocksCount == encoded.originalBlocksCount
|
2022-04-05 13:12:59 -06:00
|
|
|
|
|
|
|
for d in dropped:
|
2023-11-14 10:53:06 -06:00
|
|
|
if d < manifest.blocksCount: # we don't support returning parity blocks yet
|
|
|
|
let present = await store.hasBlock(manifest.treeCid, d)
|
|
|
|
check present.tryGet()
|
2022-04-05 13:12:59 -06:00
|
|
|
|
2022-11-14 08:50:00 -05:00
|
|
|
test "Should not tolerate losing more than M data blocks in a single random column":
|
2022-04-05 13:12:59 -06:00
|
|
|
const
|
|
|
|
buffers = 20
|
|
|
|
parity = 10
|
|
|
|
|
2022-05-10 14:10:17 +02:00
|
|
|
let encoded = await encode(buffers, parity)
|
2022-04-05 13:12:59 -06:00
|
|
|
|
|
|
|
var
|
2023-11-15 13:20:16 +01:00
|
|
|
column = rng.rand(encoded.interleave - 1) # random column
|
2023-11-14 13:02:17 +01:00
|
|
|
dropped: seq[int]
|
2022-04-05 13:12:59 -06:00
|
|
|
|
2023-03-10 08:02:54 +01:00
|
|
|
for _ in 0..<encoded.ecM + 1:
|
2023-11-14 13:02:17 +01:00
|
|
|
dropped.add(column)
|
|
|
|
(await store.delBlock(encoded.treeCid, column)).tryGet()
|
|
|
|
(await store.delBlock(manifest.treeCid, column)).tryGet()
|
2023-11-14 18:29:03 +01:00
|
|
|
column.inc(encoded.interleave)
|
2022-04-05 13:12:59 -06:00
|
|
|
|
|
|
|
var
|
|
|
|
decoded: Manifest
|
|
|
|
|
|
|
|
expect ResultFailure:
|
|
|
|
decoded = (await erasure.decode(encoded)).tryGet()
|
|
|
|
|
|
|
|
for d in dropped:
|
2023-11-14 13:02:17 +01:00
|
|
|
let present = await store.hasBlock(manifest.treeCid, d)
|
2022-07-28 03:39:17 +03:00
|
|
|
check not present.tryGet()
|
2022-04-05 13:12:59 -06:00
|
|
|
|
2022-11-14 08:50:00 -05:00
|
|
|
test "Should tolerate losing M data blocks in M random columns":
|
2022-04-05 13:12:59 -06:00
|
|
|
const
|
|
|
|
buffers = 20
|
|
|
|
parity = 10
|
|
|
|
|
2022-05-10 14:10:17 +02:00
|
|
|
let encoded = await encode(buffers, parity)
|
2022-04-05 13:12:59 -06:00
|
|
|
|
|
|
|
var
|
|
|
|
blocks: seq[int]
|
|
|
|
offset = 0
|
|
|
|
|
2023-11-14 18:29:03 +01:00
|
|
|
while offset < encoded.interleave - 1:
|
2022-04-05 13:12:59 -06:00
|
|
|
let
|
2023-11-14 18:29:03 +01:00
|
|
|
blockIdx = toSeq(countup(offset, encoded.blocksCount - 1, encoded.interleave))
|
2022-04-05 13:12:59 -06:00
|
|
|
|
2023-03-10 08:02:54 +01:00
|
|
|
for _ in 0..<encoded.ecM:
|
2022-04-05 13:12:59 -06:00
|
|
|
blocks.add(rng.sample(blockIdx, blocks))
|
|
|
|
offset.inc
|
|
|
|
|
|
|
|
for idx in blocks:
|
2023-11-14 13:02:17 +01:00
|
|
|
(await store.delBlock(encoded.treeCid, idx)).tryGet()
|
|
|
|
(await store.delBlock(manifest.treeCid, idx)).tryGet()
|
|
|
|
discard
|
2022-04-05 13:12:59 -06:00
|
|
|
|
2022-05-10 14:10:17 +02:00
|
|
|
discard (await erasure.decode(encoded)).tryGet()
|
2022-04-05 13:12:59 -06:00
|
|
|
|
2023-11-14 13:02:17 +01:00
|
|
|
for d in 0..<manifest.blocksCount:
|
|
|
|
let present = await store.hasBlock(manifest.treeCid, d)
|
2022-07-28 03:39:17 +03:00
|
|
|
check present.tryGet()
|
2022-04-05 13:12:59 -06:00
|
|
|
|
2022-11-14 08:50:00 -05:00
|
|
|
test "Should not tolerate losing more than M data blocks in M random columns":
|
2022-04-05 13:12:59 -06:00
|
|
|
const
|
|
|
|
buffers = 20
|
|
|
|
parity = 10
|
|
|
|
|
2022-05-10 14:10:17 +02:00
|
|
|
let encoded = await encode(buffers, parity)
|
2022-04-05 13:12:59 -06:00
|
|
|
|
|
|
|
var
|
|
|
|
blocks: seq[int]
|
|
|
|
offset = 0
|
|
|
|
|
2023-11-14 18:29:03 +01:00
|
|
|
while offset < encoded.interleave:
|
2022-04-05 13:12:59 -06:00
|
|
|
let
|
2023-11-14 18:29:03 +01:00
|
|
|
blockIdx = toSeq(countup(offset, encoded.blocksCount - 1, encoded.interleave))
|
2022-04-05 13:12:59 -06:00
|
|
|
|
2023-03-10 08:02:54 +01:00
|
|
|
for _ in 0..<encoded.ecM + 1: # NOTE: the +1
|
2022-04-07 17:08:43 -06:00
|
|
|
var idx: int
|
|
|
|
while true:
|
|
|
|
idx = rng.sample(blockIdx, blocks)
|
2023-11-14 13:02:17 +01:00
|
|
|
let blk = (await store.getBlock(encoded.treeCid, idx)).tryGet()
|
|
|
|
if not blk.isEmpty:
|
2022-04-07 17:08:43 -06:00
|
|
|
break
|
|
|
|
|
|
|
|
blocks.add(idx)
|
2022-04-05 13:12:59 -06:00
|
|
|
offset.inc
|
|
|
|
|
|
|
|
for idx in blocks:
|
2023-11-14 13:02:17 +01:00
|
|
|
(await store.delBlock(encoded.treeCid, idx)).tryGet()
|
|
|
|
(await store.delBlock(manifest.treeCid, idx)).tryGet()
|
|
|
|
discard
|
2022-04-05 13:12:59 -06:00
|
|
|
|
|
|
|
var
|
|
|
|
decoded: Manifest
|
|
|
|
|
|
|
|
expect ResultFailure:
|
|
|
|
decoded = (await erasure.decode(encoded)).tryGet()
|
|
|
|
|
2022-11-14 08:50:00 -05:00
|
|
|
test "Should tolerate losing M (a.k.a row) contiguous data blocks":
|
2022-04-05 13:12:59 -06:00
|
|
|
const
|
|
|
|
buffers = 20
|
|
|
|
parity = 10
|
|
|
|
|
2022-05-10 14:10:17 +02:00
|
|
|
let encoded = await encode(buffers, parity)
|
2022-04-05 13:12:59 -06:00
|
|
|
|
2023-11-14 10:53:06 -06:00
|
|
|
# loose M original (systematic) symbols/blocks
|
2023-11-14 18:29:03 +01:00
|
|
|
for b in 0..<(encoded.interleave * encoded.ecM):
|
2023-11-14 13:02:17 +01:00
|
|
|
(await store.delBlock(encoded.treeCid, b)).tryGet()
|
|
|
|
(await store.delBlock(manifest.treeCid, b)).tryGet()
|
2022-04-05 13:12:59 -06:00
|
|
|
|
2022-05-10 14:10:17 +02:00
|
|
|
discard (await erasure.decode(encoded)).tryGet()
|
2022-04-05 13:12:59 -06:00
|
|
|
|
2023-11-14 13:02:17 +01:00
|
|
|
for d in 0..<manifest.blocksCount:
|
|
|
|
let present = await store.hasBlock(manifest.treeCid, d)
|
2022-07-28 03:39:17 +03:00
|
|
|
check present.tryGet()
|
2022-04-05 13:12:59 -06:00
|
|
|
|
2022-11-14 08:50:00 -05:00
|
|
|
test "Should tolerate losing M (a.k.a row) contiguous parity blocks":
|
2022-04-05 13:12:59 -06:00
|
|
|
const
|
|
|
|
buffers = 20
|
|
|
|
parity = 10
|
|
|
|
|
2023-11-14 10:53:06 -06:00
|
|
|
let
|
|
|
|
encoded = await encode(buffers, parity)
|
|
|
|
blocks = collect:
|
|
|
|
for i in 0..encoded.blocksCount:
|
|
|
|
i
|
2022-04-05 13:12:59 -06:00
|
|
|
|
2023-11-14 10:53:06 -06:00
|
|
|
# loose M parity (all!) symbols/blocks from the dataset
|
2023-11-14 18:29:03 +01:00
|
|
|
for b in blocks[^(encoded.interleave * encoded.ecM)..^1]:
|
2023-11-14 13:02:17 +01:00
|
|
|
(await store.delBlock(encoded.treeCid, b)).tryGet()
|
|
|
|
(await store.delBlock(manifest.treeCid, b)).tryGet()
|
2022-04-05 13:12:59 -06:00
|
|
|
|
2022-05-10 14:10:17 +02:00
|
|
|
discard (await erasure.decode(encoded)).tryGet()
|
2022-04-05 13:12:59 -06:00
|
|
|
|
2023-11-14 13:02:17 +01:00
|
|
|
for d in 0..<manifest.blocksCount:
|
|
|
|
let present = await store.hasBlock(manifest.treeCid, d)
|
2022-07-28 03:39:17 +03:00
|
|
|
check present.tryGet()
|
2022-05-10 13:50:22 +02:00
|
|
|
|
|
|
|
test "handles edge case of 0 parity blocks":
|
|
|
|
const
|
|
|
|
buffers = 20
|
|
|
|
parity = 0
|
|
|
|
|
2022-05-10 14:10:17 +02:00
|
|
|
let encoded = await encode(buffers, parity)
|
2022-05-10 13:50:22 +02:00
|
|
|
|
2022-05-10 14:10:17 +02:00
|
|
|
discard (await erasure.decode(encoded)).tryGet()
|
2023-11-15 13:20:51 +01:00
|
|
|
|
|
|
|
test "Encode without interleaving (horizontal): Should tolerate losing M data blocks in a single random row":
|
|
|
|
const
|
|
|
|
buffers = 20
|
|
|
|
parity = 10
|
|
|
|
interleave = 1
|
|
|
|
|
|
|
|
let encoded = await encode(buffers, parity, interleave)
|
|
|
|
|
|
|
|
var
|
|
|
|
idx = rng.rand(encoded.steps - 1) # random row
|
|
|
|
dropped: seq[int]
|
|
|
|
|
|
|
|
for _ in 0..<encoded.ecM:
|
|
|
|
dropped.add(idx)
|
|
|
|
(await store.delBlock(encoded.treeCid, idx)).tryGet()
|
|
|
|
(await store.delBlock(manifest.treeCid, idx)).tryGet()
|
|
|
|
idx.inc(encoded.interleave)
|
|
|
|
|
|
|
|
var
|
|
|
|
decoded = (await erasure.decode(encoded)).tryGet()
|
|
|
|
|
|
|
|
check:
|
|
|
|
decoded.treeCid == manifest.treeCid
|
|
|
|
decoded.treeCid == encoded.originalTreeCid
|
|
|
|
decoded.blocksCount == encoded.originalBlocksCount
|
|
|
|
|
|
|
|
for d in dropped:
|
|
|
|
let present = await store.hasBlock(manifest.treeCid, d)
|
|
|
|
check present.tryGet()
|
|
|
|
|
|
|
|
test "Encode without interleaving (horizontal): Should not tolerate losing M+1 data blocks in a single random row":
|
|
|
|
const
|
|
|
|
buffers = 20
|
|
|
|
parity = 10
|
|
|
|
interleave = 1
|
|
|
|
|
|
|
|
let encoded = await encode(buffers, parity, interleave)
|
|
|
|
|
|
|
|
var
|
|
|
|
idx = rng.rand(encoded.steps - 1) # random row
|
|
|
|
dropped: seq[int]
|
|
|
|
|
|
|
|
for _ in 0..<encoded.ecM + 1:
|
|
|
|
dropped.add(idx)
|
|
|
|
(await store.delBlock(encoded.treeCid, idx)).tryGet()
|
|
|
|
(await store.delBlock(manifest.treeCid, idx)).tryGet()
|
|
|
|
idx.inc(encoded.interleave)
|
|
|
|
|
|
|
|
var
|
|
|
|
decoded: Manifest
|
|
|
|
|
|
|
|
expect ResultFailure:
|
|
|
|
decoded = (await erasure.decode(encoded)).tryGet()
|
|
|
|
|
|
|
|
for d in dropped:
|
|
|
|
let present = await store.hasBlock(manifest.treeCid, d)
|
|
|
|
check not present.tryGet()
|
|
|
|
|
|
|
|
|
|
|
|
test "2D encode: Should tolerate losing M data blocks in a single random row":
|
|
|
|
const
|
|
|
|
k1 = 7
|
|
|
|
m1 = 3
|
|
|
|
i1 = 1
|
|
|
|
k2 = 5
|
|
|
|
m2 = 2
|
|
|
|
i2 = k1 + m1
|
|
|
|
|
|
|
|
let
|
|
|
|
encoded1 = await encode(k1, m1, i1)
|
|
|
|
encoded2 = await encode(k2, m2, i2, encoded1)
|
|
|
|
|
|
|
|
var
|
|
|
|
idx = rng.rand(encoded2.steps - 1) # random row
|
|
|
|
dropped: seq[int]
|
|
|
|
|
|
|
|
for _ in 0..<encoded2.ecM:
|
|
|
|
dropped.add(idx)
|
|
|
|
(await store.delBlock(encoded2.treeCid, idx)).tryGet()
|
|
|
|
idx.inc(encoded2.interleave)
|
|
|
|
|
|
|
|
var
|
|
|
|
decoded1 = (await erasure.decode(encoded2)).tryGet()
|
|
|
|
decoded = (await erasure.decode(decoded1)).tryGet()
|
|
|
|
|
|
|
|
check:
|
|
|
|
decoded.treeCid == manifest.treeCid
|
|
|
|
decoded.treeCid == encoded1.originalTreeCid
|
|
|
|
decoded.blocksCount == encoded1.originalBlocksCount
|
|
|
|
|
|
|
|
for d in dropped:
|
|
|
|
let present = await store.hasBlock(manifest.treeCid, d)
|
|
|
|
check present.tryGet()
|
2023-11-15 15:07:57 +01:00
|
|
|
|
|
|
|
test "3D encode: Should tolerate losing M data blocks in a single random row":
|
|
|
|
const
|
|
|
|
k1 = 7
|
|
|
|
m1 = 3
|
|
|
|
i1 = 1
|
|
|
|
k2 = 5
|
|
|
|
m2 = 2
|
2023-11-22 14:22:56 +01:00
|
|
|
i2 = i1 * (k1 + m1)
|
2023-11-15 15:07:57 +01:00
|
|
|
k3 = 3
|
2023-11-22 14:22:56 +01:00
|
|
|
m3 = 2
|
|
|
|
i3 = i2 * (k2 + m2)
|
2023-11-15 15:07:57 +01:00
|
|
|
|
|
|
|
let
|
|
|
|
encoded1 = await encode(k1, m1, i1)
|
|
|
|
encoded2 = await encode(k2, m2, i2, encoded1)
|
|
|
|
encoded3 = await encode(k3, m3, i3, encoded2)
|
|
|
|
|
|
|
|
var
|
|
|
|
idx = rng.rand(encoded3.steps - 1) # random row
|
|
|
|
dropped: seq[int]
|
|
|
|
|
|
|
|
for _ in 0..<encoded3.ecM:
|
|
|
|
dropped.add(idx)
|
|
|
|
(await store.delBlock(encoded3.treeCid, idx)).tryGet()
|
|
|
|
idx.inc(encoded3.interleave)
|
|
|
|
|
|
|
|
var
|
|
|
|
decoded2 = (await erasure.decode(encoded3)).tryGet()
|
|
|
|
decoded1 = (await erasure.decode(decoded2)).tryGet()
|
|
|
|
decoded = (await erasure.decode(decoded1)).tryGet()
|
|
|
|
|
|
|
|
check:
|
|
|
|
decoded.treeCid == manifest.treeCid
|
|
|
|
decoded.treeCid == encoded1.originalTreeCid
|
|
|
|
decoded.blocksCount == encoded1.originalBlocksCount
|
|
|
|
|
2023-11-22 14:22:56 +01:00
|
|
|
# Checking after decoding one layer.
|
2023-11-15 15:07:57 +01:00
|
|
|
for d in dropped:
|
2023-11-22 14:22:56 +01:00
|
|
|
if not encoded3.isParity(d) and not encoded3.isPadding(d): # Parity blocks are not restored
|
|
|
|
let present = await store.hasBlock(encoded2.treeCid, encoded3.oldIndex(d))
|
|
|
|
echo present
|
|
|
|
check present.tryGet()
|
2023-11-16 11:52:02 +01:00
|
|
|
|
|
|
|
test "3D encode: test multi-dimensional API":
|
|
|
|
const
|
|
|
|
encoding = @[(7, 3),(5, 2),(3, 1)]
|
|
|
|
|
|
|
|
let
|
|
|
|
encoded = (await erasure.encodeMulti(manifest, encoding)).tryGet()
|
|
|
|
decoded = (await erasure.decodeMulti(encoded)).tryGet()
|
|
|
|
|
|
|
|
check:
|
|
|
|
decoded.treeCid == manifest.treeCid
|
|
|
|
decoded.blocksCount == encoded.unprotectedBlocksCount
|
|
|
|
|
|
|
|
test "3D encode: test multi-dimensional API with drop":
|
|
|
|
const
|
2023-11-22 14:18:43 +01:00
|
|
|
encoding = @[(7, 3),(5, 2),(3, 2)]
|
2023-11-16 11:52:02 +01:00
|
|
|
|
|
|
|
let encoded = (await erasure.encodeMulti(manifest, encoding)).tryGet()
|
|
|
|
|
|
|
|
var
|
|
|
|
idx = rng.rand(encoded.steps - 1) # random row
|
|
|
|
dropped: seq[int]
|
|
|
|
|
|
|
|
for _ in 0..<encoded.ecM:
|
|
|
|
dropped.add(idx)
|
|
|
|
(await store.delBlock(encoded.treeCid, idx)).tryGet()
|
|
|
|
idx.inc(encoded.interleave)
|
|
|
|
|
|
|
|
let decoded = (await erasure.decodeMulti(encoded)).tryGet()
|
|
|
|
|
|
|
|
check:
|
|
|
|
decoded.treeCid == manifest.treeCid
|
|
|
|
decoded.blocksCount == encoded.unprotectedBlocksCount
|