refactor: improve code formatting and consistency across multiple files

This commit is contained in:
munna0908 2025-06-27 19:24:29 +05:30
parent c2bb9a1d13
commit a1d8adb190
No known key found for this signature in database
GPG Key ID: 2FFCD637E937D3E6
7 changed files with 263 additions and 205 deletions

View File

@ -1,8 +1,13 @@
--styleCheck:usages
--styleCheck:error
--threads:on
--tlsEmulation:off
# begin Nimble config (version 1)
when fileExists("nimble.paths"):
--styleCheck:
usages
--styleCheck:
error
--threads:
on
--tlsEmulation:
off
# begin Nimble config (version 2)
--noNimblePath
when withDir(thisDir(), system.fileExists("nimble.paths")):
include "nimble.paths"
# end Nimble config

View File

@ -19,14 +19,13 @@ import ./utils
export wrapper, results
const
BuffMultiples* = 64
const BuffMultiples* = 64
type
LeoBufferPtr* = ptr UncheckedArray[byte]
LeoCoderKind* {.pure.} = enum
Encoder,
Encoder
Decoder
Leo* = object of RootObj
@ -35,7 +34,8 @@ type
parity*: int # total number of parity buffers (M)
dataBufferPtr: seq[LeoBufferPtr] # buffer where data is copied before encoding
workBufferCount: int # number of parity work buffers
workBufferPtr: seq[LeoBufferPtr] # buffer where parity data is written during encoding or before decoding
workBufferPtr: seq[LeoBufferPtr]
# buffer where parity data is written during encoding or before decoding
case kind: LeoCoderKind
of LeoCoderKind.Decoder:
decodeBufferCount: int # number of decoding work buffers
@ -47,9 +47,8 @@ type
LeoDecoder* = object of Leo
func encode*(
self: var LeoEncoder,
data,
parity: var openArray[seq[byte]]): Result[void, cstring] =
self: var LeoEncoder, data, parity: var openArray[seq[byte]]
): Result[void, cstring] =
## Encode a list of buffers in `data` into a number of `bufSize` sized
## `parity` buffers
##
@ -71,14 +70,14 @@ func encode*(
for i in 0 ..< data.len:
copyMem(self.dataBufferPtr[i], addr data[i][0], self.bufSize)
let
res = leoEncode(
let res = leoEncode(
self.bufSize.culonglong,
self.buffers.cuint,
self.parity.cuint,
self.workBufferCount.cuint,
cast[LeoDataPtr](addr self.dataBufferPtr[0]),
cast[ptr pointer](addr self.workBufferPtr[0]))
cast[ptr pointer](addr self.workBufferPtr[0]),
)
if ord(res) != ord(LeopardSuccess):
return err(leoResultString(res.LeopardResult))
@ -89,10 +88,8 @@ func encode*(
return ok()
func decode*(
self: var LeoDecoder,
data,
parity,
recovered: var openArray[seq[byte]]): Result[void, cstring] =
self: var LeoDecoder, data, parity, recovered: var openArray[seq[byte]]
): Result[void, cstring] =
## Decode a list of buffers in `data` and `parity` into a list
## of `recovered` buffers of `bufSize`. The list of `recovered`
## buffers should be match the `Leo.buffers`
@ -142,15 +139,15 @@ func decode*(
else:
parityPtr[i] = nil
let
res = leoDecode(
let res = leoDecode(
self.bufSize.culonglong,
self.buffers.cuint,
self.parity.cuint,
self.decodeBufferCount.cuint,
cast[LeoDataPtr](addr dataPtr[0]),
cast[LeoDataPtr](addr parityPtr[0]),
cast[ptr pointer](addr self.decodeBufferPtr[0]))
cast[ptr pointer](addr self.decodeBufferPtr[0]),
)
if ord(res) != ord(LeopardSuccess):
return err(leoResultString(res.LeopardResult))
@ -195,11 +192,8 @@ func free*(self: var Leo) =
# self.free()
proc init[TT: Leo](
T: type TT,
bufSize,
buffers,
parity: int,
kind: LeoCoderKind): Result[T, cstring] =
T: type TT, bufSize, buffers, parity: int, kind: LeoCoderKind
): Result[T, cstring] =
if bufSize mod BuffMultiples != 0:
return err("bufSize should be multiples of 64 bytes!")
@ -221,16 +215,9 @@ proc init[TT: Leo](
if (let res = leoInit(); res.ord != LeopardSuccess.ord):
return err(leoResultString(res.LeopardResult))
var
self = T(
kind: kind,
bufSize: bufSize,
buffers: buffers,
parity: parity)
var self = T(kind: kind, bufSize: bufSize, buffers: buffers, parity: parity)
self.workBufferCount = leoEncodeWorkCount(
buffers.cuint,
parity.cuint).int
self.workBufferCount = leoEncodeWorkCount(buffers.cuint, parity.cuint).int
# initialize encode work buffers
for _ in 0 ..< self.workBufferCount:
@ -241,9 +228,7 @@ proc init[TT: Leo](
self.dataBufferPtr.add(cast[LeoBufferPtr](self.bufSize.leoAlloc()))
if self.kind == LeoCoderKind.Decoder:
self.decodeBufferCount = leoDecodeWorkCount(
buffers.cuint,
parity.cuint).int
self.decodeBufferCount = leoDecodeWorkCount(buffers.cuint, parity.cuint).int
# initialize decode work buffers
for _ in 0 ..< self.decodeBufferCount:
@ -252,15 +237,11 @@ proc init[TT: Leo](
ok(self)
proc init*(
T: type LeoEncoder,
bufSize,
buffers,
parity: int): Result[LeoEncoder, cstring] =
T: type LeoEncoder, bufSize, buffers, parity: int
): Result[LeoEncoder, cstring] =
LeoEncoder.init(bufSize, buffers, parity, LeoCoderKind.Encoder)
proc init*(
T: type LeoDecoder,
bufSize,
buffers,
parity: int): Result[LeoDecoder, cstring] =
T: type LeoDecoder, bufSize, buffers, parity: int
): Result[LeoDecoder, cstring] =
LeoDecoder.init(bufSize, buffers, parity, LeoCoderKind.Decoder)

View File

@ -8,7 +8,8 @@
## those terms.
import pkg/upraises
push: {.upraises: [].}
push:
{.upraises: [].}
{.deadCodeElim: on.}
@ -25,43 +26,44 @@ else:
let LeoAlignBytes* = 16'u
when defined(windows):
proc alignedAllocWindows(size, alignment: csize_t): pointer
{.importc: "_aligned_malloc", header: "<malloc.h>".}
proc alignedAllocWindows(
size, alignment: csize_t
): pointer {.importc: "_aligned_malloc", header: "<malloc.h>".}
# Beware of the arg order!
proc alignedAlloc(alignment, size: csize_t): pointer =
alignedAllocWindows(size, alignment)
proc alignedFree*[T](p: ptr T)
{.importc: "_aligned_free", header: "<malloc.h>".}
proc alignedFree*[T](p: ptr T) {.importc: "_aligned_free", header: "<malloc.h>".}
elif defined(osx):
proc posix_memalign(mem: var pointer, alignment, size: csize_t)
{.importc, header:"<stdlib.h>".}
proc posix_memalign(
mem: var pointer, alignment, size: csize_t
) {.importc, header: "<stdlib.h>".}
proc alignedAlloc(alignment, size: csize_t): pointer {.inline.} =
posix_memalign(result, alignment, size)
proc alignedFree*[T](p: ptr T) {.inline.} =
c_free(p)
elif defined(unix):
proc alignedAlloc(alignment, size: csize_t): pointer
{.importc: "aligned_alloc", header: "<stdlib.h>".}
proc alignedAlloc(
alignment, size: csize_t
): pointer {.importc: "aligned_alloc", header: "<stdlib.h>".}
proc alignedFree*[T](p: ptr T) {.inline.} =
c_free(p)
else:
{.warning: "Falling back to manual pointer alignment, this is highly inefficient!".}
proc alignedAlloc*(size, align: Positive): pointer {.inline.} =
var
data = c_malloc(align + size)
var data = c_malloc(align + size)
if not isNil(data):
var
doffset = cast[uint](data) mod align
var doffset = cast[uint](data) mod align
data = data.offset((align + doffset).int)
var
offsetPtr = cast[pointer](cast[uint](data) - 1'u)
var offsetPtr = cast[pointer](cast[uint](data) - 1'u)
moveMem(offsetPtr, addr doffset, sizeof(doffset))
return data

View File

@ -17,8 +17,10 @@ push: {.upraises: [].}
proc cpuidX86(eaxi, ecxi: int32): tuple[eax, ebx, ecx, edx: int32] {.used.} =
when defined(vcc):
# limited inline asm support in vcc, so intrinsics, here we go:
proc cpuidVcc(cpuInfo: ptr int32; functionID, subFunctionID: int32)
{.cdecl, importc: "__cpuidex", header: "intrin.h".}
proc cpuidVcc(
cpuInfo: ptr int32, functionID, subFunctionID: int32
) {.cdecl, importc: "__cpuidex", header: "intrin.h".}
cpuidVcc(addr result.eax, eaxi, ecxi)
else:
var (eaxr, ebxr, ecxr, edxr) = (0'i32, 0'i32, 0'i32, 0'i32)
@ -32,20 +34,77 @@ proc cpuNameX86(): string {.used.}=
var leaves {.global.} = cast[array[48, char]]([
cpuidX86(eaxi = 0x80000002'i32, ecxi = 0),
cpuidX86(eaxi = 0x80000003'i32, ecxi = 0),
cpuidX86(eaxi = 0x80000004'i32, ecxi = 0)])
cpuidX86(eaxi = 0x80000004'i32, ecxi = 0),
])
result = $cast[cstring](addr leaves[0])
type
X86Feature {.pure.} = enum
HypervisorPresence, Hyperthreading, NoSMT, IntelVtx, Amdv, X87fpu, Mmx,
MmxExt, F3DNow, F3DNowEnhanced, Prefetch, Sse, Sse2, Sse3, Ssse3, Sse4a,
Sse41, Sse42, Avx, Avx2, Avx512f, Avx512dq, Avx512ifma, Avx512pf,
Avx512er, Avx512cd, Avx512bw, Avx512vl, Avx512vbmi, Avx512vbmi2,
Avx512vpopcntdq, Avx512vnni, Avx512vnniw4, Avx512fmaps4, Avx512bitalg,
Avx512bfloat16, Avx512vp2intersect, Rdrand, Rdseed, MovBigEndian, Popcnt,
Fma3, Fma4, Xop, Cas8B, Cas16B, Abm, Bmi1, Bmi2, TsxHle, TsxRtm, Adx, Sgx,
Gfni, Aes, Vaes, Vpclmulqdq, Pclmulqdq, NxBit, Float16c, Sha, Clflush,
ClflushOpt, Clwb, PrefetchWT1, Mpx
type X86Feature {.pure.} = enum
HypervisorPresence
Hyperthreading
NoSMT
IntelVtx
Amdv
X87fpu
Mmx
MmxExt
F3DNow
F3DNowEnhanced
Prefetch
Sse
Sse2
Sse3
Ssse3
Sse4a
Sse41
Sse42
Avx
Avx2
Avx512f
Avx512dq
Avx512ifma
Avx512pf
Avx512er
Avx512cd
Avx512bw
Avx512vl
Avx512vbmi
Avx512vbmi2
Avx512vpopcntdq
Avx512vnni
Avx512vnniw4
Avx512fmaps4
Avx512bitalg
Avx512bfloat16
Avx512vp2intersect
Rdrand
Rdseed
MovBigEndian
Popcnt
Fma3
Fma4
Xop
Cas8B
Cas16B
Abm
Bmi1
Bmi2
TsxHle
TsxRtm
Adx
Sgx
Gfni
Aes
Vaes
Vpclmulqdq
Pclmulqdq
NxBit
Float16c
Sha
Clflush
ClflushOpt
Clwb
PrefetchWT1
Mpx
let
leaf1 = cpuidX86(eaxi = 1, ecxi = 0)
@ -62,7 +121,8 @@ proc testX86Feature(feature: X86Feature): bool =
# see: https://en.wikipedia.org/wiki/CPUID#Calling_CPUID
# see: Intel® Architecture Instruction Set Extensions and Future Features
# Programming Reference
result = case feature
result =
case feature
# leaf 1, edx
of X87fpu:
leaf1.edx.test(0)

View File

@ -24,7 +24,6 @@
## ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
## POSSIBILITY OF SUCH DAMAGE.
## Leopard-RS
## MDS Reed-Solomon Erasure Correction Codes for Large Data in C
##
@ -56,9 +55,9 @@
## arithmetic using Intel SIMD instructions." In: FAST-2013: 11th Usenix
## Conference on File and Storage Technologies, San Jose, 2013
import pkg/upraises
push: {.upraises: [].}
push:
{.upraises: [].}
## -----------------------------------------------------------------------------
## Build configuration
@ -67,8 +66,7 @@ import std/compilesettings
import std/os
import std/strutils
type
LeoDataPtr* {.importc: "const void* const*", bycopy.} = pointer
type LeoDataPtr* {.importc: "const void* const*", bycopy.} = pointer
const
LeopardCmakeFlags {.strdefine.} =
@ -121,12 +119,15 @@ static:
let
buildDirUnix = buildDir.pathWin2Unix
leopardDirUnix = LeopardDir.pathWin2Unix
if defined(LeopardRebuild): discard bash("rm -rf", buildDirUnix)
if defined(LeopardRebuild):
discard bash("rm -rf", buildDirUnix)
if (bashEx("ls", LeopardLib.pathWin2Unix)).exitCode != 0:
discard bash("mkdir -p", buildDirUnix)
let cmd =
@["cd", buildDirUnix, "&& cmake", leopardDirUnix, LeopardCmakeFlags,
"&& make libleopard"]
@[
"cd", buildDirUnix, "&& cmake", leopardDirUnix, LeopardCmakeFlags,
"&& make libleopard",
]
echo "\nBuilding Leopard-RS: " & cmd.join(" ")
let (output, exitCode) = bashEx cmd
echo output
@ -134,7 +135,8 @@ static:
discard bash("rm -rf", buildDirUnix)
raise (ref Defect)(msg: "Failed to build Leopard-RS")
else:
if defined(LeopardRebuild): discard gorge "rm -rf " & buildDir
if defined(LeopardRebuild):
discard gorge "rm -rf " & buildDir
if gorgeEx("ls " & LeopardLib).exitCode != 0:
discard gorge "mkdir -p " & buildDir
let cmd =
@ -159,21 +161,22 @@ proc leoInit*(): cint {.leo, importcpp: "leo_init".}
## Results
# TODO: For some reason it's only possibly to use the enum with `ord`
type
LeopardResult* = enum
LeopardCallInitialize = -7, ## Call leo_init() first
LeopardPlatform = -6, ## Platform is unsupported
LeopardInvalidInput = -5, ## A function parameter was invalid
LeopardInvalidCounts = -4, ## Invalid counts provided
LeopardInvalidSize = -3, ## Buffer size must be a multiple of 64 bytes
LeopardTooMuchData = -2, ## Buffer counts are too high
LeopardNeedMoreData = -1, ## Not enough recovery data received
type LeopardResult* = enum
LeopardCallInitialize = -7 ## Call leo_init() first
LeopardPlatform = -6 ## Platform is unsupported
LeopardInvalidInput = -5 ## A function parameter was invalid
LeopardInvalidCounts = -4 ## Invalid counts provided
LeopardInvalidSize = -3 ## Buffer size must be a multiple of 64 bytes
LeopardTooMuchData = -2 ## Buffer counts are too high
LeopardNeedMoreData = -1 ## Not enough recovery data received
LeopardSuccess = 0 ## Operation succeeded
## Convert Leopard result to string
proc leoResultString*(result: LeopardResult): cstring {.leo, importc: "leo_result_string".}
proc leoResultString*(
result: LeopardResult
): cstring {.leo, importc: "leo_result_string".}
## ------------------------------------------------------------------------------
## Encoder API
##
@ -187,8 +190,10 @@ proc leoResultString*(result: LeopardResult): cstring {.leo, importc: "leo_resul
## Returns 0 on invalid input.
##
proc leoEncodeWorkCount*(originalCount: cuint; recoveryCount: cuint): cuint
{.leo, importc: "leo_encode_work_count".}
proc leoEncodeWorkCount*(
originalCount: cuint, recoveryCount: cuint
): cuint {.leo, importc: "leo_encode_work_count".}
##
## leo_encode()
##
@ -224,12 +229,13 @@ proc leoEncodeWorkCount*(originalCount: cuint; recoveryCount: cuint): cuint
##
proc leoEncode*(
bufferBytes: uint64;
originalCount: cuint;
recoveryCount: cuint;
workCount: cuint;
originalData: LeoDataPtr;
workData: ptr pointer): LeopardResult {.leo, importc: "leo_encode".}
bufferBytes: uint64,
originalCount: cuint,
recoveryCount: cuint,
workCount: cuint,
originalData: LeoDataPtr,
workData: ptr pointer,
): LeopardResult {.leo, importc: "leo_encode".}
## Number of bytes in each data buffer
## Number of original_data[] buffer pointers
## Number of recovery_data[] buffer pointers
@ -251,8 +257,10 @@ proc leoEncode*(
## Returns 0 on invalid input.
##
proc leoDecodeWorkCount*(originalCount: cuint; recoveryCount: cuint): cuint
{.leo, importc: "leo_decode_work_count".}
proc leoDecodeWorkCount*(
originalCount: cuint, recoveryCount: cuint
): cuint {.leo, importc: "leo_decode_work_count".}
##
## leoDecode()
##
@ -276,13 +284,14 @@ proc leoDecodeWorkCount*(originalCount: cuint; recoveryCount: cuint): cuint
##
proc leoDecode*(
bufferBytes: uint64;
originalCount: cuint;
recoveryCount: cuint;
workCount: cuint;
originalData: LeoDataPtr;
recoveryData: LeoDataPtr;
workData: ptr pointer): LeopardResult {.leo, importc: "leo_decode".}
bufferBytes: uint64,
originalCount: cuint,
recoveryCount: cuint,
workCount: cuint,
originalData: LeoDataPtr,
recoveryData: LeoDataPtr,
workData: ptr pointer,
): LeopardResult {.leo, importc: "leo_decode".}
## Number of bytes in each data buffer
## Number of original_data[] buffer pointers
## Number of recovery_data[] buffer pointers

View File

@ -9,12 +9,10 @@ proc randomCRCPacket*(data: var openArray[byte]) =
for i in 1 ..< data.len:
data[i] = data[0]
else:
let
len: uint32 = data.len.uint32
let len: uint32 = data.len.uint32
copyMem(addr data[0], unsafeAddr len, sizeof(len))
var
crc = data.len.uint32
var crc = data.len.uint32
for i in 8 ..< data.len:
let v = rand(data.len).byte
@ -63,22 +61,18 @@ proc dropRandomIdx*(bufs: var openArray[seq[byte]], dropCount: int) =
count.inc
proc testPackets*(
buffers,
parity,
bufSize,
dataLosses: int,
buffers, parity, bufSize, dataLosses: int,
parityLosses: int,
encoder: var LeoEncoder,
decoder: var LeoDecoder): Result[void, cstring] =
decoder: var LeoDecoder,
): Result[void, cstring] =
var
dataBuf = newSeqOfCap[seq[byte]](buffers)
parityBuf = newSeqOfCap[seq[byte]](parity)
recoveredBuf = newSeqOfCap[seq[byte]](buffers)
for _ in 0 ..< buffers:
var
dataSeq = newSeq[byte](bufSize)
var dataSeq = newSeq[byte](bufSize)
randomCRCPacket(dataSeq)
dataBuf.add(dataSeq)

View File

@ -64,7 +64,8 @@ suite "Leopard Parametrization":
recovered = newSeq[seq[byte]](3)
check:
leo.decode(data, parity, recovered).error == "Number of parity buffers should match!"
leo.decode(data, parity, recovered).error ==
"Number of parity buffers should match!"
test "Should not allow decoding with invalid data buffer counts":
var
@ -74,7 +75,8 @@ suite "Leopard Parametrization":
recovered = newSeq[seq[byte]](3)
check:
leo.decode(data, parity, recovered).error == "Number of recovered buffers should match buffers!"
leo.decode(data, parity, recovered).error ==
"Number of recovered buffers should match buffers!"
suite "Leopard simple Encode/Decode":
const
@ -105,8 +107,7 @@ suite "Leopard simple Encode/Decode":
for i in 0 ..< DataCount:
data[i] = newSeq[byte](BufferSize)
recovered[i] = newSeq[byte](BufferSize)
var
str = TestString & " " & $i
var str = TestString & " " & $i
copyMem(addr data[i][0], addr str[0], str.len)
@ -132,8 +133,7 @@ suite "Leopard simple Encode/Decode":
data[i] = newSeq[byte](BufferSize)
recovered[i] = newSeq[byte](BufferSize)
var
str = TestString & " " & $i
var str = TestString & " " & $i
copyMem(addr data[i][0], addr str[0], str.len)
@ -142,8 +142,7 @@ suite "Leopard simple Encode/Decode":
encoder.encode(data, parity).tryGet()
var
data1 = data[0]
var data1 = data[0]
data[0].setLen(0)
parity[0].setLen(0)
@ -199,7 +198,8 @@ suite "Leopard Encode/Decode":
try:
encoder = LeoEncoder.init(bufSize, buffers, parity).tryGet()
decoder = LeoDecoder.init(bufSize, buffers, parity).tryGet()
testPackets(buffers, parity, bufSize, dataLoses, parityLoses, encoder, decoder).tryGet()
testPackets(buffers, parity, bufSize, dataLoses, parityLoses, encoder, decoder)
.tryGet()
finally:
encoder.free()
decoder.free()
@ -217,7 +217,8 @@ suite "Leopard Encode/Decode":
try:
encoder = LeoEncoder.init(bufSize, buffers, parity).tryGet()
decoder = LeoDecoder.init(bufSize, buffers, parity).tryGet()
testPackets(buffers, parity, bufSize, dataLoses, parityLoses, encoder, decoder).tryGet()
testPackets(buffers, parity, bufSize, dataLoses, parityLoses, encoder, decoder)
.tryGet()
finally:
encoder.free()
decoder.free()
@ -235,7 +236,8 @@ suite "Leopard Encode/Decode":
try:
encoder = LeoEncoder.init(bufSize, buffers, parity).tryGet()
decoder = LeoDecoder.init(bufSize, buffers, parity).tryGet()
testPackets(buffers, parity, bufSize, dataLoses, parityLoses, encoder, decoder).tryGet()
testPackets(buffers, parity, bufSize, dataLoses, parityLoses, encoder, decoder)
.tryGet()
finally:
encoder.free()
decoder.free()
@ -253,7 +255,8 @@ suite "Leopard Encode/Decode":
try:
encoder = LeoEncoder.init(bufSize, buffers, parity).tryGet()
decoder = LeoDecoder.init(bufSize, buffers, parity).tryGet()
testPackets(buffers, parity, bufSize, dataLoses, parityLoses, encoder, decoder).tryGet()
testPackets(buffers, parity, bufSize, dataLoses, parityLoses, encoder, decoder)
.tryGet()
finally:
encoder.free()
decoder.free()
@ -271,7 +274,8 @@ suite "Leopard Encode/Decode":
try:
encoder = LeoEncoder.init(bufSize, buffers, parity).tryGet()
decoder = LeoDecoder.init(bufSize, buffers, parity).tryGet()
testPackets(buffers, parity, bufSize, dataLoses, parityLoses, encoder, decoder).tryGet()
testPackets(buffers, parity, bufSize, dataLoses, parityLoses, encoder, decoder)
.tryGet()
finally:
encoder.free()
decoder.free()
@ -289,7 +293,8 @@ suite "Leopard Encode/Decode":
try:
encoder = LeoEncoder.init(bufSize, buffers, parity).tryGet()
decoder = LeoDecoder.init(bufSize, buffers, parity).tryGet()
testPackets(buffers, parity, bufSize, dataLoses, parityLoses, encoder, decoder).tryGet()
testPackets(buffers, parity, bufSize, dataLoses, parityLoses, encoder, decoder)
.tryGet()
finally:
encoder.free()
decoder.free()
@ -307,7 +312,8 @@ suite "Leopard Encode/Decode":
try:
encoder = LeoEncoder.init(bufSize, buffers, parity).tryGet()
decoder = LeoDecoder.init(bufSize, buffers, parity).tryGet()
testPackets(buffers, parity, bufSize, dataLoses, parityLoses, encoder, decoder).tryGet()
testPackets(buffers, parity, bufSize, dataLoses, parityLoses, encoder, decoder)
.tryGet()
finally:
encoder.free()
decoder.free()
@ -322,7 +328,8 @@ suite "Leopard use same encoder/decoder multiple times":
decoder = LeoDecoder.init(4096, 800, 800).tryGet()
for i in 0 .. 10:
let lost = 40 * i
test "Encode/Decode using same encoder/decoder - lost data = " & $lost & " lost parity = " & $lost:
test "Encode/Decode using same encoder/decoder - lost data = " & $lost &
" lost parity = " & $lost:
testPackets(800, 800, 4096, 40 * i, 40 * i, encoder, decoder).tryGet()
finally:
encoder.free()