mirror of
https://github.com/logos-storage/nim-leopard.git
synced 2026-01-05 23:23:07 +00:00
Merge a1d8adb19037fe4d55de838d82a4933fce59e836 into b81147ad6adb66d57b81b902c4a7efad5d055a5b
This commit is contained in:
commit
d18c5478f8
17
config.nims
17
config.nims
@ -1,8 +1,13 @@
|
|||||||
--styleCheck:usages
|
--styleCheck:
|
||||||
--styleCheck:error
|
usages
|
||||||
--threads:on
|
--styleCheck:
|
||||||
--tlsEmulation:off
|
error
|
||||||
# begin Nimble config (version 1)
|
--threads:
|
||||||
when fileExists("nimble.paths"):
|
on
|
||||||
|
--tlsEmulation:
|
||||||
|
off
|
||||||
|
# begin Nimble config (version 2)
|
||||||
|
--noNimblePath
|
||||||
|
when withDir(thisDir(), system.fileExists("nimble.paths")):
|
||||||
include "nimble.paths"
|
include "nimble.paths"
|
||||||
# end Nimble config
|
# end Nimble config
|
||||||
|
|||||||
@ -19,27 +19,27 @@ import ./utils
|
|||||||
|
|
||||||
export wrapper, results
|
export wrapper, results
|
||||||
|
|
||||||
const
|
const BuffMultiples* = 64
|
||||||
BuffMultiples* = 64
|
|
||||||
|
|
||||||
type
|
type
|
||||||
LeoBufferPtr* = ptr UncheckedArray[byte]
|
LeoBufferPtr* = ptr UncheckedArray[byte]
|
||||||
|
|
||||||
LeoCoderKind* {.pure.} = enum
|
LeoCoderKind* {.pure.} = enum
|
||||||
Encoder,
|
Encoder
|
||||||
Decoder
|
Decoder
|
||||||
|
|
||||||
Leo* = object of RootObj
|
Leo* = object of RootObj
|
||||||
bufSize*: int # size of the buffer in multiples of 64
|
bufSize*: int # size of the buffer in multiples of 64
|
||||||
buffers*: int # total number of data buffers (K)
|
buffers*: int # total number of data buffers (K)
|
||||||
parity*: int # total number of parity buffers (M)
|
parity*: int # total number of parity buffers (M)
|
||||||
dataBufferPtr: seq[LeoBufferPtr] # buffer where data is copied before encoding
|
dataBufferPtr: seq[LeoBufferPtr] # buffer where data is copied before encoding
|
||||||
workBufferCount: int # number of parity work buffers
|
workBufferCount: int # number of parity work buffers
|
||||||
workBufferPtr: seq[LeoBufferPtr] # buffer where parity data is written during encoding or before decoding
|
workBufferPtr: seq[LeoBufferPtr]
|
||||||
|
# buffer where parity data is written during encoding or before decoding
|
||||||
case kind: LeoCoderKind
|
case kind: LeoCoderKind
|
||||||
of LeoCoderKind.Decoder:
|
of LeoCoderKind.Decoder:
|
||||||
decodeBufferCount: int # number of decoding work buffers
|
decodeBufferCount: int # number of decoding work buffers
|
||||||
decodeBufferPtr: seq[LeoBufferPtr] # work buffer used for decoding
|
decodeBufferPtr: seq[LeoBufferPtr] # work buffer used for decoding
|
||||||
of LeoCoderKind.Encoder:
|
of LeoCoderKind.Encoder:
|
||||||
discard
|
discard
|
||||||
|
|
||||||
@ -47,9 +47,8 @@ type
|
|||||||
LeoDecoder* = object of Leo
|
LeoDecoder* = object of Leo
|
||||||
|
|
||||||
func encode*(
|
func encode*(
|
||||||
self: var LeoEncoder,
|
self: var LeoEncoder, data, parity: var openArray[seq[byte]]
|
||||||
data,parity: ptr UncheckedArray[ptr UncheckedArray[byte]],
|
): Result[void, cstring] =
|
||||||
dataLen,parityLen: int ): Result[void, cstring] =
|
|
||||||
## Encode a list of buffers in `data` into a number of `bufSize` sized
|
## Encode a list of buffers in `data` into a number of `bufSize` sized
|
||||||
## `parity` buffers
|
## `parity` buffers
|
||||||
##
|
##
|
||||||
@ -57,43 +56,40 @@ func encode*(
|
|||||||
## `parity` - list of parity `buffers` of size `bufSize`
|
## `parity` - list of parity `buffers` of size `bufSize`
|
||||||
##
|
##
|
||||||
|
|
||||||
if dataLen != self.buffers:
|
if data.len != self.buffers:
|
||||||
return err("Number of data buffers should match!")
|
return err("Number of data buffers should match!")
|
||||||
|
|
||||||
if parityLen != self.parity:
|
if parity.len != self.parity:
|
||||||
return err("Number of parity buffers should match!")
|
return err("Number of parity buffers should match!")
|
||||||
|
|
||||||
# zero encode work buffer to avoid corrupting with previous run
|
# zero encode work buffer to avoid corrupting with previous run
|
||||||
for i in 0..<self.workBufferCount:
|
for i in 0 ..< self.workBufferCount:
|
||||||
zeroMem(self.workBufferPtr[i], self.bufSize)
|
zeroMem(self.workBufferPtr[i], self.bufSize)
|
||||||
|
|
||||||
# copy data into aligned buffer
|
# copy data into aligned buffer
|
||||||
for i in 0..<dataLen:
|
for i in 0 ..< data.len:
|
||||||
copyMem(self.dataBufferPtr[i], addr data[i][0], self.bufSize)
|
copyMem(self.dataBufferPtr[i], addr data[i][0], self.bufSize)
|
||||||
|
|
||||||
let
|
let res = leoEncode(
|
||||||
res = leoEncode(
|
self.bufSize.culonglong,
|
||||||
self.bufSize.culonglong,
|
self.buffers.cuint,
|
||||||
self.buffers.cuint,
|
self.parity.cuint,
|
||||||
self.parity.cuint,
|
self.workBufferCount.cuint,
|
||||||
self.workBufferCount.cuint,
|
cast[LeoDataPtr](addr self.dataBufferPtr[0]),
|
||||||
cast[LeoDataPtr](addr self.dataBufferPtr[0]),
|
cast[ptr pointer](addr self.workBufferPtr[0]),
|
||||||
cast[ptr pointer](addr self.workBufferPtr[0]))
|
)
|
||||||
|
|
||||||
if ord(res) != ord(LeopardSuccess):
|
if ord(res) != ord(LeopardSuccess):
|
||||||
return err(leoResultString(res.LeopardResult))
|
return err(leoResultString(res.LeopardResult))
|
||||||
|
|
||||||
for i in 0..<parityLen:
|
for i in 0 ..< parity.len:
|
||||||
copyMem(parity[i], self.workBufferPtr[i], self.bufSize)
|
copyMem(addr parity[i][0], self.workBufferPtr[i], self.bufSize)
|
||||||
|
|
||||||
return ok()
|
return ok()
|
||||||
|
|
||||||
func decode*(
|
func decode*(
|
||||||
self: var LeoDecoder,
|
self: var LeoDecoder, data, parity, recovered: var openArray[seq[byte]]
|
||||||
data,
|
): Result[void, cstring] =
|
||||||
parity,
|
|
||||||
recovered: ptr UncheckedArray[ptr UncheckedArray[byte]],
|
|
||||||
dataLen,parityLen,recoveredLen: int): Result[void, cstring] =
|
|
||||||
## Decode a list of buffers in `data` and `parity` into a list
|
## Decode a list of buffers in `data` and `parity` into a list
|
||||||
## of `recovered` buffers of `bufSize`. The list of `recovered`
|
## of `recovered` buffers of `bufSize`. The list of `recovered`
|
||||||
## buffers should be match the `Leo.buffers`
|
## buffers should be match the `Leo.buffers`
|
||||||
@ -103,55 +99,55 @@ func decode*(
|
|||||||
## `recovered` - list of recovered `buffers` of size `bufSize`
|
## `recovered` - list of recovered `buffers` of size `bufSize`
|
||||||
##
|
##
|
||||||
|
|
||||||
if dataLen != self.buffers:
|
if data.len != self.buffers:
|
||||||
return err("Number of data buffers should match!")
|
return err("Number of data buffers should match!")
|
||||||
|
|
||||||
if parityLen != self.parity:
|
if parity.len != self.parity:
|
||||||
return err("Number of parity buffers should match!")
|
return err("Number of parity buffers should match!")
|
||||||
|
|
||||||
if recoveredLen != self.buffers:
|
if recovered.len != self.buffers:
|
||||||
return err("Number of recovered buffers should match buffers!")
|
return err("Number of recovered buffers should match buffers!")
|
||||||
|
|
||||||
# clean out work and data buffers
|
# clean out work and data buffers
|
||||||
for i in 0..<self.workBufferCount:
|
for i in 0 ..< self.workBufferCount:
|
||||||
zeroMem(self.workBufferPtr[i], self.bufSize)
|
zeroMem(self.workBufferPtr[i], self.bufSize)
|
||||||
|
|
||||||
for i in 0..<self.decodeBufferCount:
|
for i in 0 ..< self.decodeBufferCount:
|
||||||
zeroMem(self.decodeBufferPtr[i], self.bufSize)
|
zeroMem(self.decodeBufferPtr[i], self.bufSize)
|
||||||
|
|
||||||
for i in 0..<dataLen:
|
for i in 0 ..< data.len:
|
||||||
zeroMem(self.dataBufferPtr[i], self.bufSize)
|
zeroMem(self.dataBufferPtr[i], self.bufSize)
|
||||||
|
|
||||||
# this is needed because erasures are nil pointers
|
# this is needed because erasures are nil pointers
|
||||||
var
|
var
|
||||||
dataPtr = newSeq[LeoBufferPtr](dataLen)
|
dataPtr = newSeq[LeoBufferPtr](data.len)
|
||||||
parityPtr = newSeq[LeoBufferPtr](self.workBufferCount)
|
parityPtr = newSeq[LeoBufferPtr](self.workBufferCount)
|
||||||
|
|
||||||
# copy data into aligned buffer
|
# copy data into aligned buffer
|
||||||
for i in 0..<dataLen:
|
for i in 0 ..< data.len:
|
||||||
if not data[i].isNil:
|
if data[i].len > 0:
|
||||||
copyMem(self.dataBufferPtr[i],addr data[i][0], self.bufSize)
|
copyMem(self.dataBufferPtr[i], addr data[i][0], self.bufSize)
|
||||||
dataPtr[i] = self.dataBufferPtr[i]
|
dataPtr[i] = self.dataBufferPtr[i]
|
||||||
else:
|
else:
|
||||||
dataPtr[i] = nil
|
dataPtr[i] = nil
|
||||||
|
|
||||||
# copy parity into aligned buffer
|
# copy parity into aligned buffer
|
||||||
for i in 0..<self.workBufferCount:
|
for i in 0 ..< self.workBufferCount:
|
||||||
if i < parityLen and not parity[i].isNil:
|
if i < parity.len and parity[i].len > 0:
|
||||||
copyMem(self.workBufferPtr[i], addr parity[i][0], self.bufSize)
|
copyMem(self.workBufferPtr[i], addr parity[i][0], self.bufSize)
|
||||||
parityPtr[i] = self.workBufferPtr[i]
|
parityPtr[i] = self.workBufferPtr[i]
|
||||||
else:
|
else:
|
||||||
parityPtr[i] = nil
|
parityPtr[i] = nil
|
||||||
|
|
||||||
let
|
let res = leoDecode(
|
||||||
res = leoDecode(
|
self.bufSize.culonglong,
|
||||||
self.bufSize.culonglong,
|
self.buffers.cuint,
|
||||||
self.buffers.cuint,
|
self.parity.cuint,
|
||||||
self.parity.cuint,
|
self.decodeBufferCount.cuint,
|
||||||
self.decodeBufferCount.cuint,
|
cast[LeoDataPtr](addr dataPtr[0]),
|
||||||
cast[LeoDataPtr](addr dataPtr[0]),
|
cast[LeoDataPtr](addr parityPtr[0]),
|
||||||
cast[LeoDataPtr](addr parityPtr[0]),
|
cast[ptr pointer](addr self.decodeBufferPtr[0]),
|
||||||
cast[ptr pointer](addr self.decodeBufferPtr[0]))
|
)
|
||||||
|
|
||||||
if ord(res) != ord(LeopardSuccess):
|
if ord(res) != ord(LeopardSuccess):
|
||||||
return err(leoResultString(res.LeopardResult))
|
return err(leoResultString(res.LeopardResult))
|
||||||
@ -196,11 +192,8 @@ func free*(self: var Leo) =
|
|||||||
# self.free()
|
# self.free()
|
||||||
|
|
||||||
proc init[TT: Leo](
|
proc init[TT: Leo](
|
||||||
T: type TT,
|
T: type TT, bufSize, buffers, parity: int, kind: LeoCoderKind
|
||||||
bufSize,
|
): Result[T, cstring] =
|
||||||
buffers,
|
|
||||||
parity: int,
|
|
||||||
kind: LeoCoderKind): Result[T, cstring] =
|
|
||||||
if bufSize mod BuffMultiples != 0:
|
if bufSize mod BuffMultiples != 0:
|
||||||
return err("bufSize should be multiples of 64 bytes!")
|
return err("bufSize should be multiples of 64 bytes!")
|
||||||
|
|
||||||
@ -222,46 +215,33 @@ proc init[TT: Leo](
|
|||||||
if (let res = leoInit(); res.ord != LeopardSuccess.ord):
|
if (let res = leoInit(); res.ord != LeopardSuccess.ord):
|
||||||
return err(leoResultString(res.LeopardResult))
|
return err(leoResultString(res.LeopardResult))
|
||||||
|
|
||||||
var
|
var self = T(kind: kind, bufSize: bufSize, buffers: buffers, parity: parity)
|
||||||
self = T(
|
|
||||||
kind: kind,
|
|
||||||
bufSize: bufSize,
|
|
||||||
buffers: buffers,
|
|
||||||
parity: parity)
|
|
||||||
|
|
||||||
self.workBufferCount = leoEncodeWorkCount(
|
self.workBufferCount = leoEncodeWorkCount(buffers.cuint, parity.cuint).int
|
||||||
buffers.cuint,
|
|
||||||
parity.cuint).int
|
|
||||||
|
|
||||||
# initialize encode work buffers
|
# initialize encode work buffers
|
||||||
for _ in 0..<self.workBufferCount:
|
for _ in 0 ..< self.workBufferCount:
|
||||||
self.workBufferPtr.add(cast[LeoBufferPtr](self.bufSize.leoAlloc()))
|
self.workBufferPtr.add(cast[LeoBufferPtr](self.bufSize.leoAlloc()))
|
||||||
|
|
||||||
# initialize data buffers
|
# initialize data buffers
|
||||||
for _ in 0..<self.buffers:
|
for _ in 0 ..< self.buffers:
|
||||||
self.dataBufferPtr.add(cast[LeoBufferPtr](self.bufSize.leoAlloc()))
|
self.dataBufferPtr.add(cast[LeoBufferPtr](self.bufSize.leoAlloc()))
|
||||||
|
|
||||||
if self.kind == LeoCoderKind.Decoder:
|
if self.kind == LeoCoderKind.Decoder:
|
||||||
self.decodeBufferCount = leoDecodeWorkCount(
|
self.decodeBufferCount = leoDecodeWorkCount(buffers.cuint, parity.cuint).int
|
||||||
buffers.cuint,
|
|
||||||
parity.cuint).int
|
|
||||||
|
|
||||||
# initialize decode work buffers
|
# initialize decode work buffers
|
||||||
for _ in 0..<self.decodeBufferCount:
|
for _ in 0 ..< self.decodeBufferCount:
|
||||||
self.decodeBufferPtr.add(cast[LeoBufferPtr](self.bufSize.leoAlloc()))
|
self.decodeBufferPtr.add(cast[LeoBufferPtr](self.bufSize.leoAlloc()))
|
||||||
|
|
||||||
ok(self)
|
ok(self)
|
||||||
|
|
||||||
proc init*(
|
proc init*(
|
||||||
T: type LeoEncoder,
|
T: type LeoEncoder, bufSize, buffers, parity: int
|
||||||
bufSize,
|
): Result[LeoEncoder, cstring] =
|
||||||
buffers,
|
|
||||||
parity: int): Result[LeoEncoder, cstring] =
|
|
||||||
LeoEncoder.init(bufSize, buffers, parity, LeoCoderKind.Encoder)
|
LeoEncoder.init(bufSize, buffers, parity, LeoCoderKind.Encoder)
|
||||||
|
|
||||||
proc init*(
|
proc init*(
|
||||||
T: type LeoDecoder,
|
T: type LeoDecoder, bufSize, buffers, parity: int
|
||||||
bufSize,
|
): Result[LeoDecoder, cstring] =
|
||||||
buffers,
|
|
||||||
parity: int): Result[LeoDecoder, cstring] =
|
|
||||||
LeoDecoder.init(bufSize, buffers, parity, LeoCoderKind.Decoder)
|
LeoDecoder.init(bufSize, buffers, parity, LeoCoderKind.Decoder)
|
||||||
|
|||||||
@ -8,7 +8,8 @@
|
|||||||
## those terms.
|
## those terms.
|
||||||
|
|
||||||
import pkg/upraises
|
import pkg/upraises
|
||||||
push: {.upraises: [].}
|
push:
|
||||||
|
{.upraises: [].}
|
||||||
|
|
||||||
{.deadCodeElim: on.}
|
{.deadCodeElim: on.}
|
||||||
|
|
||||||
@ -25,43 +26,44 @@ else:
|
|||||||
let LeoAlignBytes* = 16'u
|
let LeoAlignBytes* = 16'u
|
||||||
|
|
||||||
when defined(windows):
|
when defined(windows):
|
||||||
proc alignedAllocWindows(size, alignment: csize_t): pointer
|
proc alignedAllocWindows(
|
||||||
{.importc: "_aligned_malloc", header: "<malloc.h>".}
|
size, alignment: csize_t
|
||||||
|
): pointer {.importc: "_aligned_malloc", header: "<malloc.h>".}
|
||||||
# Beware of the arg order!
|
# Beware of the arg order!
|
||||||
|
|
||||||
proc alignedAlloc(alignment, size: csize_t): pointer =
|
proc alignedAlloc(alignment, size: csize_t): pointer =
|
||||||
alignedAllocWindows(size, alignment)
|
alignedAllocWindows(size, alignment)
|
||||||
|
|
||||||
proc alignedFree*[T](p: ptr T)
|
proc alignedFree*[T](p: ptr T) {.importc: "_aligned_free", header: "<malloc.h>".}
|
||||||
{.importc: "_aligned_free", header: "<malloc.h>".}
|
|
||||||
elif defined(osx):
|
elif defined(osx):
|
||||||
proc posix_memalign(mem: var pointer, alignment, size: csize_t)
|
proc posix_memalign(
|
||||||
{.importc, header:"<stdlib.h>".}
|
mem: var pointer, alignment, size: csize_t
|
||||||
|
) {.importc, header: "<stdlib.h>".}
|
||||||
|
|
||||||
proc alignedAlloc(alignment, size: csize_t): pointer {.inline.} =
|
proc alignedAlloc(alignment, size: csize_t): pointer {.inline.} =
|
||||||
posix_memalign(result, alignment, size)
|
posix_memalign(result, alignment, size)
|
||||||
|
|
||||||
proc alignedFree*[T](p: ptr T) {.inline.} =
|
proc alignedFree*[T](p: ptr T) {.inline.} =
|
||||||
c_free(p)
|
c_free(p)
|
||||||
|
|
||||||
elif defined(unix):
|
elif defined(unix):
|
||||||
proc alignedAlloc(alignment, size: csize_t): pointer
|
proc alignedAlloc(
|
||||||
{.importc: "aligned_alloc", header: "<stdlib.h>".}
|
alignment, size: csize_t
|
||||||
|
): pointer {.importc: "aligned_alloc", header: "<stdlib.h>".}
|
||||||
|
|
||||||
proc alignedFree*[T](p: ptr T) {.inline.} =
|
proc alignedFree*[T](p: ptr T) {.inline.} =
|
||||||
c_free(p)
|
c_free(p)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
{.warning: "Falling back to manual pointer alignment, this is highly inefficient!".}
|
{.warning: "Falling back to manual pointer alignment, this is highly inefficient!".}
|
||||||
proc alignedAlloc*(size, align: Positive): pointer {.inline.} =
|
proc alignedAlloc*(size, align: Positive): pointer {.inline.} =
|
||||||
var
|
var data = c_malloc(align + size)
|
||||||
data = c_malloc(align + size)
|
|
||||||
|
|
||||||
if not isNil(data):
|
if not isNil(data):
|
||||||
var
|
var doffset = cast[uint](data) mod align
|
||||||
doffset = cast[uint](data) mod align
|
|
||||||
|
|
||||||
data = data.offset((align + doffset).int)
|
data = data.offset((align + doffset).int)
|
||||||
var
|
var offsetPtr = cast[pointer](cast[uint](data) - 1'u)
|
||||||
offsetPtr = cast[pointer](cast[uint](data) - 1'u)
|
|
||||||
moveMem(offsetPtr, addr doffset, sizeof(doffset))
|
moveMem(offsetPtr, addr doffset, sizeof(doffset))
|
||||||
|
|
||||||
return data
|
return data
|
||||||
@ -71,7 +73,7 @@ else:
|
|||||||
if not isNil(data):
|
if not isNil(data):
|
||||||
let offset = cast[uint](data) - 1'u
|
let offset = cast[uint](data) - 1'u
|
||||||
if offset >= align:
|
if offset >= align:
|
||||||
return
|
return
|
||||||
|
|
||||||
data = cast[pointer](cast[uint](data) - (align - offset))
|
data = cast[pointer](cast[uint](data) - (align - offset))
|
||||||
c_free(data)
|
c_free(data)
|
||||||
|
|||||||
@ -14,11 +14,13 @@ push: {.upraises: [].}
|
|||||||
|
|
||||||
# From awr1: https://github.com/nim-lang/Nim/pull/11816/files
|
# From awr1: https://github.com/nim-lang/Nim/pull/11816/files
|
||||||
|
|
||||||
proc cpuidX86(eaxi, ecxi: int32): tuple[eax, ebx, ecx, edx: int32] {.used.}=
|
proc cpuidX86(eaxi, ecxi: int32): tuple[eax, ebx, ecx, edx: int32] {.used.} =
|
||||||
when defined(vcc):
|
when defined(vcc):
|
||||||
# limited inline asm support in vcc, so intrinsics, here we go:
|
# limited inline asm support in vcc, so intrinsics, here we go:
|
||||||
proc cpuidVcc(cpuInfo: ptr int32; functionID, subFunctionID: int32)
|
proc cpuidVcc(
|
||||||
{.cdecl, importc: "__cpuidex", header: "intrin.h".}
|
cpuInfo: ptr int32, functionID, subFunctionID: int32
|
||||||
|
) {.cdecl, importc: "__cpuidex", header: "intrin.h".}
|
||||||
|
|
||||||
cpuidVcc(addr result.eax, eaxi, ecxi)
|
cpuidVcc(addr result.eax, eaxi, ecxi)
|
||||||
else:
|
else:
|
||||||
var (eaxr, ebxr, ecxr, edxr) = (0'i32, 0'i32, 0'i32, 0'i32)
|
var (eaxr, ebxr, ecxr, edxr) = (0'i32, 0'i32, 0'i32, 0'i32)
|
||||||
@ -28,24 +30,81 @@ proc cpuidX86(eaxi, ecxi: int32): tuple[eax, ebx, ecx, edx: int32] {.used.}=
|
|||||||
:"a"(`eaxi`), "c"(`ecxi`)"""
|
:"a"(`eaxi`), "c"(`ecxi`)"""
|
||||||
(eaxr, ebxr, ecxr, edxr)
|
(eaxr, ebxr, ecxr, edxr)
|
||||||
|
|
||||||
proc cpuNameX86(): string {.used.}=
|
proc cpuNameX86(): string {.used.} =
|
||||||
var leaves {.global.} = cast[array[48, char]]([
|
var leaves {.global.} = cast[array[48, char]]([
|
||||||
cpuidX86(eaxi = 0x80000002'i32, ecxi = 0),
|
cpuidX86(eaxi = 0x80000002'i32, ecxi = 0),
|
||||||
cpuidX86(eaxi = 0x80000003'i32, ecxi = 0),
|
cpuidX86(eaxi = 0x80000003'i32, ecxi = 0),
|
||||||
cpuidX86(eaxi = 0x80000004'i32, ecxi = 0)])
|
cpuidX86(eaxi = 0x80000004'i32, ecxi = 0),
|
||||||
|
])
|
||||||
result = $cast[cstring](addr leaves[0])
|
result = $cast[cstring](addr leaves[0])
|
||||||
|
|
||||||
type
|
type X86Feature {.pure.} = enum
|
||||||
X86Feature {.pure.} = enum
|
HypervisorPresence
|
||||||
HypervisorPresence, Hyperthreading, NoSMT, IntelVtx, Amdv, X87fpu, Mmx,
|
Hyperthreading
|
||||||
MmxExt, F3DNow, F3DNowEnhanced, Prefetch, Sse, Sse2, Sse3, Ssse3, Sse4a,
|
NoSMT
|
||||||
Sse41, Sse42, Avx, Avx2, Avx512f, Avx512dq, Avx512ifma, Avx512pf,
|
IntelVtx
|
||||||
Avx512er, Avx512cd, Avx512bw, Avx512vl, Avx512vbmi, Avx512vbmi2,
|
Amdv
|
||||||
Avx512vpopcntdq, Avx512vnni, Avx512vnniw4, Avx512fmaps4, Avx512bitalg,
|
X87fpu
|
||||||
Avx512bfloat16, Avx512vp2intersect, Rdrand, Rdseed, MovBigEndian, Popcnt,
|
Mmx
|
||||||
Fma3, Fma4, Xop, Cas8B, Cas16B, Abm, Bmi1, Bmi2, TsxHle, TsxRtm, Adx, Sgx,
|
MmxExt
|
||||||
Gfni, Aes, Vaes, Vpclmulqdq, Pclmulqdq, NxBit, Float16c, Sha, Clflush,
|
F3DNow
|
||||||
ClflushOpt, Clwb, PrefetchWT1, Mpx
|
F3DNowEnhanced
|
||||||
|
Prefetch
|
||||||
|
Sse
|
||||||
|
Sse2
|
||||||
|
Sse3
|
||||||
|
Ssse3
|
||||||
|
Sse4a
|
||||||
|
Sse41
|
||||||
|
Sse42
|
||||||
|
Avx
|
||||||
|
Avx2
|
||||||
|
Avx512f
|
||||||
|
Avx512dq
|
||||||
|
Avx512ifma
|
||||||
|
Avx512pf
|
||||||
|
Avx512er
|
||||||
|
Avx512cd
|
||||||
|
Avx512bw
|
||||||
|
Avx512vl
|
||||||
|
Avx512vbmi
|
||||||
|
Avx512vbmi2
|
||||||
|
Avx512vpopcntdq
|
||||||
|
Avx512vnni
|
||||||
|
Avx512vnniw4
|
||||||
|
Avx512fmaps4
|
||||||
|
Avx512bitalg
|
||||||
|
Avx512bfloat16
|
||||||
|
Avx512vp2intersect
|
||||||
|
Rdrand
|
||||||
|
Rdseed
|
||||||
|
MovBigEndian
|
||||||
|
Popcnt
|
||||||
|
Fma3
|
||||||
|
Fma4
|
||||||
|
Xop
|
||||||
|
Cas8B
|
||||||
|
Cas16B
|
||||||
|
Abm
|
||||||
|
Bmi1
|
||||||
|
Bmi2
|
||||||
|
TsxHle
|
||||||
|
TsxRtm
|
||||||
|
Adx
|
||||||
|
Sgx
|
||||||
|
Gfni
|
||||||
|
Aes
|
||||||
|
Vaes
|
||||||
|
Vpclmulqdq
|
||||||
|
Pclmulqdq
|
||||||
|
NxBit
|
||||||
|
Float16c
|
||||||
|
Sha
|
||||||
|
Clflush
|
||||||
|
ClflushOpt
|
||||||
|
Clwb
|
||||||
|
PrefetchWT1
|
||||||
|
Mpx
|
||||||
|
|
||||||
let
|
let
|
||||||
leaf1 = cpuidX86(eaxi = 1, ecxi = 0)
|
leaf1 = cpuidX86(eaxi = 1, ecxi = 0)
|
||||||
@ -62,7 +121,8 @@ proc testX86Feature(feature: X86Feature): bool =
|
|||||||
# see: https://en.wikipedia.org/wiki/CPUID#Calling_CPUID
|
# see: https://en.wikipedia.org/wiki/CPUID#Calling_CPUID
|
||||||
# see: Intel® Architecture Instruction Set Extensions and Future Features
|
# see: Intel® Architecture Instruction Set Extensions and Future Features
|
||||||
# Programming Reference
|
# Programming Reference
|
||||||
result = case feature
|
result =
|
||||||
|
case feature
|
||||||
# leaf 1, edx
|
# leaf 1, edx
|
||||||
of X87fpu:
|
of X87fpu:
|
||||||
leaf1.edx.test(0)
|
leaf1.edx.test(0)
|
||||||
|
|||||||
@ -24,7 +24,6 @@
|
|||||||
## ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
## ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||||
## POSSIBILITY OF SUCH DAMAGE.
|
## POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
|
||||||
## Leopard-RS
|
## Leopard-RS
|
||||||
## MDS Reed-Solomon Erasure Correction Codes for Large Data in C
|
## MDS Reed-Solomon Erasure Correction Codes for Large Data in C
|
||||||
##
|
##
|
||||||
@ -56,9 +55,9 @@
|
|||||||
## arithmetic using Intel SIMD instructions." In: FAST-2013: 11th Usenix
|
## arithmetic using Intel SIMD instructions." In: FAST-2013: 11th Usenix
|
||||||
## Conference on File and Storage Technologies, San Jose, 2013
|
## Conference on File and Storage Technologies, San Jose, 2013
|
||||||
|
|
||||||
|
|
||||||
import pkg/upraises
|
import pkg/upraises
|
||||||
push: {.upraises: [].}
|
push:
|
||||||
|
{.upraises: [].}
|
||||||
|
|
||||||
## -----------------------------------------------------------------------------
|
## -----------------------------------------------------------------------------
|
||||||
## Build configuration
|
## Build configuration
|
||||||
@ -67,8 +66,7 @@ import std/compilesettings
|
|||||||
import std/os
|
import std/os
|
||||||
import std/strutils
|
import std/strutils
|
||||||
|
|
||||||
type
|
type LeoDataPtr* {.importc: "const void* const*", bycopy.} = pointer
|
||||||
LeoDataPtr* {.importc: "const void* const*", bycopy.} = pointer
|
|
||||||
|
|
||||||
const
|
const
|
||||||
LeopardCmakeFlags {.strdefine.} =
|
LeopardCmakeFlags {.strdefine.} =
|
||||||
@ -121,12 +119,15 @@ static:
|
|||||||
let
|
let
|
||||||
buildDirUnix = buildDir.pathWin2Unix
|
buildDirUnix = buildDir.pathWin2Unix
|
||||||
leopardDirUnix = LeopardDir.pathWin2Unix
|
leopardDirUnix = LeopardDir.pathWin2Unix
|
||||||
if defined(LeopardRebuild): discard bash("rm -rf", buildDirUnix)
|
if defined(LeopardRebuild):
|
||||||
|
discard bash("rm -rf", buildDirUnix)
|
||||||
if (bashEx("ls", LeopardLib.pathWin2Unix)).exitCode != 0:
|
if (bashEx("ls", LeopardLib.pathWin2Unix)).exitCode != 0:
|
||||||
discard bash("mkdir -p", buildDirUnix)
|
discard bash("mkdir -p", buildDirUnix)
|
||||||
let cmd =
|
let cmd =
|
||||||
@["cd", buildDirUnix, "&& cmake", leopardDirUnix, LeopardCmakeFlags,
|
@[
|
||||||
"&& make libleopard"]
|
"cd", buildDirUnix, "&& cmake", leopardDirUnix, LeopardCmakeFlags,
|
||||||
|
"&& make libleopard",
|
||||||
|
]
|
||||||
echo "\nBuilding Leopard-RS: " & cmd.join(" ")
|
echo "\nBuilding Leopard-RS: " & cmd.join(" ")
|
||||||
let (output, exitCode) = bashEx cmd
|
let (output, exitCode) = bashEx cmd
|
||||||
echo output
|
echo output
|
||||||
@ -134,7 +135,8 @@ static:
|
|||||||
discard bash("rm -rf", buildDirUnix)
|
discard bash("rm -rf", buildDirUnix)
|
||||||
raise (ref Defect)(msg: "Failed to build Leopard-RS")
|
raise (ref Defect)(msg: "Failed to build Leopard-RS")
|
||||||
else:
|
else:
|
||||||
if defined(LeopardRebuild): discard gorge "rm -rf " & buildDir
|
if defined(LeopardRebuild):
|
||||||
|
discard gorge "rm -rf " & buildDir
|
||||||
if gorgeEx("ls " & LeopardLib).exitCode != 0:
|
if gorgeEx("ls " & LeopardLib).exitCode != 0:
|
||||||
discard gorge "mkdir -p " & buildDir
|
discard gorge "mkdir -p " & buildDir
|
||||||
let cmd =
|
let cmd =
|
||||||
@ -159,21 +161,22 @@ proc leoInit*(): cint {.leo, importcpp: "leo_init".}
|
|||||||
## Results
|
## Results
|
||||||
|
|
||||||
# TODO: For some reason it's only possibly to use the enum with `ord`
|
# TODO: For some reason it's only possibly to use the enum with `ord`
|
||||||
type
|
type LeopardResult* = enum
|
||||||
LeopardResult* = enum
|
LeopardCallInitialize = -7 ## Call leo_init() first
|
||||||
LeopardCallInitialize = -7, ## Call leo_init() first
|
LeopardPlatform = -6 ## Platform is unsupported
|
||||||
LeopardPlatform = -6, ## Platform is unsupported
|
LeopardInvalidInput = -5 ## A function parameter was invalid
|
||||||
LeopardInvalidInput = -5, ## A function parameter was invalid
|
LeopardInvalidCounts = -4 ## Invalid counts provided
|
||||||
LeopardInvalidCounts = -4, ## Invalid counts provided
|
LeopardInvalidSize = -3 ## Buffer size must be a multiple of 64 bytes
|
||||||
LeopardInvalidSize = -3, ## Buffer size must be a multiple of 64 bytes
|
LeopardTooMuchData = -2 ## Buffer counts are too high
|
||||||
LeopardTooMuchData = -2, ## Buffer counts are too high
|
LeopardNeedMoreData = -1 ## Not enough recovery data received
|
||||||
LeopardNeedMoreData = -1, ## Not enough recovery data received
|
LeopardSuccess = 0 ## Operation succeeded
|
||||||
LeopardSuccess = 0 ## Operation succeeded
|
|
||||||
|
|
||||||
|
|
||||||
## Convert Leopard result to string
|
## Convert Leopard result to string
|
||||||
|
|
||||||
proc leoResultString*(result: LeopardResult): cstring {.leo, importc: "leo_result_string".}
|
proc leoResultString*(
|
||||||
|
result: LeopardResult
|
||||||
|
): cstring {.leo, importc: "leo_result_string".}
|
||||||
|
|
||||||
## ------------------------------------------------------------------------------
|
## ------------------------------------------------------------------------------
|
||||||
## Encoder API
|
## Encoder API
|
||||||
##
|
##
|
||||||
@ -187,8 +190,10 @@ proc leoResultString*(result: LeopardResult): cstring {.leo, importc: "leo_resul
|
|||||||
## Returns 0 on invalid input.
|
## Returns 0 on invalid input.
|
||||||
##
|
##
|
||||||
|
|
||||||
proc leoEncodeWorkCount*(originalCount: cuint; recoveryCount: cuint): cuint
|
proc leoEncodeWorkCount*(
|
||||||
{.leo, importc: "leo_encode_work_count".}
|
originalCount: cuint, recoveryCount: cuint
|
||||||
|
): cuint {.leo, importc: "leo_encode_work_count".}
|
||||||
|
|
||||||
##
|
##
|
||||||
## leo_encode()
|
## leo_encode()
|
||||||
##
|
##
|
||||||
@ -224,12 +229,13 @@ proc leoEncodeWorkCount*(originalCount: cuint; recoveryCount: cuint): cuint
|
|||||||
##
|
##
|
||||||
|
|
||||||
proc leoEncode*(
|
proc leoEncode*(
|
||||||
bufferBytes: uint64;
|
bufferBytes: uint64,
|
||||||
originalCount: cuint;
|
originalCount: cuint,
|
||||||
recoveryCount: cuint;
|
recoveryCount: cuint,
|
||||||
workCount: cuint;
|
workCount: cuint,
|
||||||
originalData: LeoDataPtr;
|
originalData: LeoDataPtr,
|
||||||
workData: ptr pointer): LeopardResult {.leo, importc: "leo_encode".}
|
workData: ptr pointer,
|
||||||
|
): LeopardResult {.leo, importc: "leo_encode".}
|
||||||
## Number of bytes in each data buffer
|
## Number of bytes in each data buffer
|
||||||
## Number of original_data[] buffer pointers
|
## Number of original_data[] buffer pointers
|
||||||
## Number of recovery_data[] buffer pointers
|
## Number of recovery_data[] buffer pointers
|
||||||
@ -251,8 +257,10 @@ proc leoEncode*(
|
|||||||
## Returns 0 on invalid input.
|
## Returns 0 on invalid input.
|
||||||
##
|
##
|
||||||
|
|
||||||
proc leoDecodeWorkCount*(originalCount: cuint; recoveryCount: cuint): cuint
|
proc leoDecodeWorkCount*(
|
||||||
{.leo, importc: "leo_decode_work_count".}
|
originalCount: cuint, recoveryCount: cuint
|
||||||
|
): cuint {.leo, importc: "leo_decode_work_count".}
|
||||||
|
|
||||||
##
|
##
|
||||||
## leoDecode()
|
## leoDecode()
|
||||||
##
|
##
|
||||||
@ -276,13 +284,14 @@ proc leoDecodeWorkCount*(originalCount: cuint; recoveryCount: cuint): cuint
|
|||||||
##
|
##
|
||||||
|
|
||||||
proc leoDecode*(
|
proc leoDecode*(
|
||||||
bufferBytes: uint64;
|
bufferBytes: uint64,
|
||||||
originalCount: cuint;
|
originalCount: cuint,
|
||||||
recoveryCount: cuint;
|
recoveryCount: cuint,
|
||||||
workCount: cuint;
|
workCount: cuint,
|
||||||
originalData: LeoDataPtr;
|
originalData: LeoDataPtr,
|
||||||
recoveryData: LeoDataPtr;
|
recoveryData: LeoDataPtr,
|
||||||
workData: ptr pointer): LeopardResult {.leo, importc: "leo_decode".}
|
workData: ptr pointer,
|
||||||
|
): LeopardResult {.leo, importc: "leo_decode".}
|
||||||
## Number of bytes in each data buffer
|
## Number of bytes in each data buffer
|
||||||
## Number of original_data[] buffer pointers
|
## Number of original_data[] buffer pointers
|
||||||
## Number of recovery_data[] buffer pointers
|
## Number of recovery_data[] buffer pointers
|
||||||
|
|||||||
@ -6,17 +6,15 @@ import ../leopard
|
|||||||
proc randomCRCPacket*(data: var openArray[byte]) =
|
proc randomCRCPacket*(data: var openArray[byte]) =
|
||||||
if data.len < 16:
|
if data.len < 16:
|
||||||
data[0] = rand(data.len).byte
|
data[0] = rand(data.len).byte
|
||||||
for i in 1..<data.len:
|
for i in 1 ..< data.len:
|
||||||
data[i] = data[0]
|
data[i] = data[0]
|
||||||
else:
|
else:
|
||||||
let
|
let len: uint32 = data.len.uint32
|
||||||
len: uint32 = data.len.uint32
|
|
||||||
|
|
||||||
copyMem(addr data[0], unsafeAddr len, sizeof(len))
|
copyMem(addr data[0], unsafeAddr len, sizeof(len))
|
||||||
var
|
var crc = data.len.uint32
|
||||||
crc = data.len.uint32
|
|
||||||
|
|
||||||
for i in 8..<data.len:
|
for i in 8 ..< data.len:
|
||||||
let v = rand(data.len).byte
|
let v = rand(data.len).byte
|
||||||
data[i] = v
|
data[i] = v
|
||||||
crc = (crc shl 3) and (crc shr (32 - 3))
|
crc = (crc shl 3) and (crc shr (32 - 3))
|
||||||
@ -24,22 +22,22 @@ proc randomCRCPacket*(data: var openArray[byte]) =
|
|||||||
|
|
||||||
copyMem(addr data[4], unsafeAddr crc, sizeof(crc))
|
copyMem(addr data[4], unsafeAddr crc, sizeof(crc))
|
||||||
|
|
||||||
proc checkCRCPacket*(data: ptr UncheckedArray[byte], len: int): bool =
|
proc checkCRCPacket*(data: openArray[byte]): bool =
|
||||||
if len < 16:
|
if data.len < 16:
|
||||||
for i in 1..<len:
|
for d in data[1 .. data.high]:
|
||||||
if data[i] != data[0]:
|
if d != data[0]:
|
||||||
raise (ref Defect)(msg: "Packet don't match")
|
raise (ref Defect)(msg: "Packet don't match")
|
||||||
else:
|
else:
|
||||||
var
|
var
|
||||||
crc = len.uint32
|
crc = data.len.uint32
|
||||||
packCrc: uint32
|
packCrc: uint32
|
||||||
packSize: uint32
|
packSize: uint32
|
||||||
|
|
||||||
copyMem(addr packSize, unsafeAddr data[0], sizeof(packSize))
|
copyMem(addr packSize, unsafeAddr data[0], sizeof(packSize))
|
||||||
if packSize != len.uint:
|
if packSize != data.len.uint:
|
||||||
raise (ref Defect)(msg: "Packet size don't match!")
|
raise (ref Defect)(msg: "Packet size don't match!")
|
||||||
|
|
||||||
for i in 4..<len:
|
for i in 4 ..< data.len:
|
||||||
let v = data[i]
|
let v = data[i]
|
||||||
crc = (crc shl 3) and (crc shr (32 - 3))
|
crc = (crc shl 3) and (crc shr (32 - 3))
|
||||||
crc += v
|
crc += v
|
||||||
@ -49,84 +47,54 @@ proc checkCRCPacket*(data: ptr UncheckedArray[byte], len: int): bool =
|
|||||||
if packCrc == crc:
|
if packCrc == crc:
|
||||||
return true
|
return true
|
||||||
|
|
||||||
proc dropRandomIdx*(bufs: ptr UncheckedArray[ptr UncheckedArray[byte]], bufsLen,dropCount: int) =
|
proc dropRandomIdx*(bufs: var openArray[seq[byte]], dropCount: int) =
|
||||||
var
|
var
|
||||||
count = 0
|
count = 0
|
||||||
dups: seq[int]
|
dups: seq[int]
|
||||||
size = bufsLen
|
size = bufs.len
|
||||||
|
|
||||||
while count < dropCount:
|
while count < dropCount:
|
||||||
let i = rand(0..<size)
|
let i = rand(0 ..< size)
|
||||||
if dups.find(i) == -1:
|
if dups.find(i) == -1:
|
||||||
dups.add(i)
|
dups.add(i)
|
||||||
bufs[i]=nil
|
bufs[i].setLen(0)
|
||||||
count.inc
|
count.inc
|
||||||
|
|
||||||
proc createDoubleArray*(
|
|
||||||
outerLen, innerLen: int
|
|
||||||
): ptr UncheckedArray[ptr UncheckedArray[byte]] =
|
|
||||||
# Allocate outer array
|
|
||||||
result = cast[ptr UncheckedArray[ptr UncheckedArray[byte]]](alloc0(
|
|
||||||
sizeof(ptr UncheckedArray[byte]) * outerLen
|
|
||||||
))
|
|
||||||
|
|
||||||
# Allocate each inner array
|
|
||||||
for i in 0 ..< outerLen:
|
|
||||||
result[i] = cast[ptr UncheckedArray[byte]](alloc0(sizeof(byte) * innerLen))
|
|
||||||
|
|
||||||
proc freeDoubleArray*(
|
|
||||||
arr: ptr UncheckedArray[ptr UncheckedArray[byte]], outerLen: int
|
|
||||||
) =
|
|
||||||
# Free each inner array
|
|
||||||
for i in 0 ..< outerLen:
|
|
||||||
if not arr[i].isNil:
|
|
||||||
dealloc(arr[i])
|
|
||||||
|
|
||||||
# Free outer array
|
|
||||||
if not arr.isNil:
|
|
||||||
dealloc(arr)
|
|
||||||
|
|
||||||
proc testPackets*(
|
proc testPackets*(
|
||||||
buffers,
|
buffers, parity, bufSize, dataLosses: int,
|
||||||
parity,
|
parityLosses: int,
|
||||||
bufSize,
|
encoder: var LeoEncoder,
|
||||||
dataLosses: int,
|
decoder: var LeoDecoder,
|
||||||
parityLosses: int,
|
): Result[void, cstring] =
|
||||||
encoder: var LeoEncoder,
|
|
||||||
decoder: var LeoDecoder): Result[void, cstring] =
|
|
||||||
|
|
||||||
var
|
var
|
||||||
dataBuf = createDoubleArray(buffers, bufSize)
|
dataBuf = newSeqOfCap[seq[byte]](buffers)
|
||||||
parityBuf = createDoubleArray(parity, bufSize)
|
parityBuf = newSeqOfCap[seq[byte]](parity)
|
||||||
recoveredBuf = createDoubleArray(buffers, bufSize)
|
recoveredBuf = newSeqOfCap[seq[byte]](buffers)
|
||||||
|
|
||||||
defer:
|
|
||||||
freeDoubleArray(dataBuf, buffers)
|
|
||||||
freeDoubleArray(parityBuf, parity)
|
|
||||||
freeDoubleArray(recoveredBuf, buffers)
|
|
||||||
|
|
||||||
|
for _ in 0 ..< buffers:
|
||||||
|
var dataSeq = newSeq[byte](bufSize)
|
||||||
for i in 0..<buffers:
|
|
||||||
var
|
|
||||||
dataSeq = newSeq[byte](bufSize)
|
|
||||||
|
|
||||||
randomCRCPacket(dataSeq)
|
randomCRCPacket(dataSeq)
|
||||||
copyMem(dataBuf[i],addr dataSeq[0],bufSize)
|
dataBuf.add(dataSeq)
|
||||||
|
|
||||||
encoder.encode(dataBuf, parityBuf,buffers,parity).tryGet()
|
recoveredBuf.add(newSeq[byte](bufSize))
|
||||||
|
|
||||||
|
for _ in 0 ..< parity:
|
||||||
|
parityBuf.add(newSeq[byte](bufSize))
|
||||||
|
|
||||||
|
encoder.encode(dataBuf, parityBuf).tryGet()
|
||||||
|
|
||||||
if dataLosses > 0:
|
if dataLosses > 0:
|
||||||
dropRandomIdx(dataBuf,buffers, dataLosses)
|
dropRandomIdx(dataBuf, dataLosses)
|
||||||
|
|
||||||
if parityLosses > 0:
|
if parityLosses > 0:
|
||||||
dropRandomIdx(parityBuf,parity,parityLosses)
|
dropRandomIdx(parityBuf, parityLosses)
|
||||||
|
|
||||||
decoder.decode(dataBuf, parityBuf, recoveredBuf,buffers,parity,buffers).tryGet()
|
decoder.decode(dataBuf, parityBuf, recoveredBuf).tryGet()
|
||||||
|
|
||||||
for i in 0..<buffers:
|
for i, d in dataBuf:
|
||||||
if dataBuf[i].isNil:
|
if d.len <= 0:
|
||||||
if not checkCRCPacket(recoveredBuf[i],bufSize):
|
if not checkCRCPacket(recoveredBuf[i]):
|
||||||
return err(("Check failed for packet " & $i).cstring)
|
return err(("Check failed for packet " & $i).cstring)
|
||||||
|
|
||||||
ok()
|
ok()
|
||||||
|
|||||||
@ -18,86 +18,65 @@ suite "Leopard Parametrization":
|
|||||||
test "Should not allow invalid data/parity buffer counts":
|
test "Should not allow invalid data/parity buffer counts":
|
||||||
check:
|
check:
|
||||||
LeoEncoder.init(64, 1, 2).error ==
|
LeoEncoder.init(64, 1, 2).error ==
|
||||||
"number of parity buffers cannot exceed number of data buffers!"
|
"number of parity buffers cannot exceed number of data buffers!"
|
||||||
|
|
||||||
test "Should not allow data + parity to exceed 65536":
|
test "Should not allow data + parity to exceed 65536":
|
||||||
check:
|
check:
|
||||||
LeoEncoder.init(64, 65536 + 1, 0).error ==
|
LeoEncoder.init(64, 65536 + 1, 0).error ==
|
||||||
"number of parity and data buffers cannot exceed 65536!"
|
"number of parity and data buffers cannot exceed 65536!"
|
||||||
|
|
||||||
LeoEncoder.init(64, 32768 + 1, 32768).error ==
|
LeoEncoder.init(64, 32768 + 1, 32768).error ==
|
||||||
"number of parity and data buffers cannot exceed 65536!"
|
"number of parity and data buffers cannot exceed 65536!"
|
||||||
|
|
||||||
test "Should not allow encoding with invalid data buffer counts":
|
test "Should not allow encoding with invalid data buffer counts":
|
||||||
var
|
var
|
||||||
dataLen =3
|
|
||||||
parityLen = 2
|
|
||||||
leo = LeoEncoder.init(64, 4, 2).tryGet()
|
leo = LeoEncoder.init(64, 4, 2).tryGet()
|
||||||
data = createDoubleArray(dataLen, 64)
|
data = newSeq[seq[byte]](3)
|
||||||
parity = createDoubleArray(parityLen, 64)
|
parity = newSeq[seq[byte]](2)
|
||||||
defer:
|
|
||||||
freeDoubleArray(data, dataLen)
|
|
||||||
freeDoubleArray(parity, parityLen)
|
|
||||||
check:
|
check:
|
||||||
leo.encode(data, parity,dataLen,parityLen).error == "Number of data buffers should match!"
|
leo.encode(data, parity).error == "Number of data buffers should match!"
|
||||||
|
|
||||||
test "Should not allow encoding with invalid parity buffer counts":
|
test "Should not allow encoding with invalid parity buffer counts":
|
||||||
var
|
var
|
||||||
dataLen =4
|
|
||||||
parityLen = 3
|
|
||||||
leo = LeoEncoder.init(64, 4, 2).tryGet()
|
leo = LeoEncoder.init(64, 4, 2).tryGet()
|
||||||
data = createDoubleArray(dataLen, 64)
|
data = newSeq[seq[byte]](4)
|
||||||
parity = createDoubleArray(parityLen, 64)
|
parity = newSeq[seq[byte]](3)
|
||||||
|
|
||||||
defer:
|
|
||||||
freeDoubleArray(data, dataLen)
|
|
||||||
freeDoubleArray(parity, parityLen)
|
|
||||||
|
|
||||||
check:
|
check:
|
||||||
leo.encode(data, parity,dataLen,parityLen).error == "Number of parity buffers should match!"
|
leo.encode(data, parity).error == "Number of parity buffers should match!"
|
||||||
|
|
||||||
test "Should not allow decoding with invalid data buffer counts":
|
test "Should not allow decoding with invalid data buffer counts":
|
||||||
var
|
var
|
||||||
dataLen =3
|
|
||||||
parityLen = 2
|
|
||||||
leo = LeoDecoder.init(64, 4, 2).tryGet()
|
leo = LeoDecoder.init(64, 4, 2).tryGet()
|
||||||
data = createDoubleArray(dataLen, 64)
|
data = newSeq[seq[byte]](3)
|
||||||
parity = createDoubleArray(parityLen, 64)
|
parity = newSeq[seq[byte]](2)
|
||||||
recovered = createDoubleArray(dataLen, 64)
|
recovered = newSeq[seq[byte]](3)
|
||||||
|
|
||||||
defer:
|
|
||||||
freeDoubleArray(data, dataLen)
|
|
||||||
freeDoubleArray(parity, parityLen)
|
|
||||||
freeDoubleArray(recovered, dataLen)
|
|
||||||
|
|
||||||
check:
|
check:
|
||||||
leo.decode(data, parity, recovered,dataLen,parityLen,dataLen).error == "Number of data buffers should match!"
|
leo.decode(data, parity, recovered).error == "Number of data buffers should match!"
|
||||||
|
|
||||||
test "Should not allow decoding with invalid data buffer counts":
|
test "Should not allow decoding with invalid data buffer counts":
|
||||||
var
|
var
|
||||||
dataLen =4
|
|
||||||
parityLen = 1
|
|
||||||
recoveredLen = 3
|
|
||||||
leo = LeoDecoder.init(64, 4, 2).tryGet()
|
leo = LeoDecoder.init(64, 4, 2).tryGet()
|
||||||
data = createDoubleArray(dataLen, 64)
|
data = newSeq[seq[byte]](4)
|
||||||
parity = createDoubleArray(parityLen, 64)
|
parity = newSeq[seq[byte]](1)
|
||||||
recovered = createDoubleArray(recoveredLen, 64)
|
recovered = newSeq[seq[byte]](3)
|
||||||
|
|
||||||
check:
|
check:
|
||||||
leo.decode(data, parity, recovered,dataLen,parityLen,recoveredLen).error == "Number of parity buffers should match!"
|
leo.decode(data, parity, recovered).error ==
|
||||||
|
"Number of parity buffers should match!"
|
||||||
|
|
||||||
test "Should not allow decoding with invalid data buffer counts":
|
test "Should not allow decoding with invalid data buffer counts":
|
||||||
var
|
var
|
||||||
dataLen =4
|
|
||||||
parityLen = 2
|
|
||||||
recoveredLen = 3
|
|
||||||
leo = LeoDecoder.init(64, 4, 2).tryGet()
|
leo = LeoDecoder.init(64, 4, 2).tryGet()
|
||||||
data = createDoubleArray(dataLen, 64)
|
data = newSeq[seq[byte]](4)
|
||||||
parity = createDoubleArray(parityLen, 64)
|
parity = newSeq[seq[byte]](2)
|
||||||
recovered = createDoubleArray(recoveredLen, 64)
|
recovered = newSeq[seq[byte]](3)
|
||||||
|
|
||||||
check:
|
check:
|
||||||
leo.decode(data, parity, recovered,dataLen,parityLen,recoveredLen).error == "Number of recovered buffers should match buffers!"
|
leo.decode(data, parity, recovered).error ==
|
||||||
|
"Number of recovered buffers should match buffers!"
|
||||||
|
|
||||||
suite "Leopard simple Encode/Decode":
|
suite "Leopard simple Encode/Decode":
|
||||||
const
|
const
|
||||||
@ -109,76 +88,67 @@ suite "Leopard simple Encode/Decode":
|
|||||||
var
|
var
|
||||||
encoder: LeoEncoder
|
encoder: LeoEncoder
|
||||||
decoder: LeoDecoder
|
decoder: LeoDecoder
|
||||||
data: ptr UncheckedArray[ptr UncheckedArray[byte]]
|
data: seq[seq[byte]]
|
||||||
parity: ptr UncheckedArray[ptr UncheckedArray[byte]]
|
parity: seq[seq[byte]]
|
||||||
recovered: ptr UncheckedArray[ptr UncheckedArray[byte]]
|
recovered: seq[seq[byte]]
|
||||||
|
|
||||||
setup:
|
setup:
|
||||||
encoder = LeoEncoder.init(BufferSize, DataCount, ParityCount).tryGet()
|
encoder = LeoEncoder.init(BufferSize, DataCount, ParityCount).tryGet()
|
||||||
decoder = LeoDecoder.init(BufferSize, DataCount, ParityCount).tryGet()
|
decoder = LeoDecoder.init(BufferSize, DataCount, ParityCount).tryGet()
|
||||||
data = createDoubleArray(DataCount, BufferSize)
|
data = newSeq[seq[byte]](DataCount)
|
||||||
parity = createDoubleArray(ParityCount, BufferSize)
|
parity = newSeq[seq[byte]](ParityCount)
|
||||||
recovered = createDoubleArray(DataCount, BufferSize)
|
recovered = newSeq[seq[byte]](DataCount)
|
||||||
|
|
||||||
teardown:
|
teardown:
|
||||||
freeDoubleArray(data, DataCount)
|
|
||||||
freeDoubleArray(parity, ParityCount)
|
|
||||||
freeDoubleArray(recovered, DataCount)
|
|
||||||
encoder.free()
|
encoder.free()
|
||||||
decoder.free()
|
decoder.free()
|
||||||
|
|
||||||
test "Test 2 data loses out of 4 possible":
|
test "Test 2 data loses out of 4 possible":
|
||||||
for i in 0..<DataCount:
|
for i in 0 ..< DataCount:
|
||||||
var
|
data[i] = newSeq[byte](BufferSize)
|
||||||
str = TestString & " " & $i
|
recovered[i] = newSeq[byte](BufferSize)
|
||||||
|
var str = TestString & " " & $i
|
||||||
copyMem(data[i], addr str[0], str.len)
|
|
||||||
|
|
||||||
|
|
||||||
encoder.encode(data, parity,DataCount,ParityCount).tryGet()
|
|
||||||
|
|
||||||
var
|
|
||||||
data1 =cast[ptr UncheckedArray[byte]](allocShared0(sizeof(byte) * BufferSize))
|
|
||||||
data2 = cast[ptr UncheckedArray[byte]](allocShared0(sizeof(byte) * BufferSize))
|
|
||||||
|
|
||||||
defer:
|
|
||||||
deallocShared(data1)
|
|
||||||
deallocShared(data2)
|
|
||||||
|
|
||||||
copyMem(data1,data[0], BufferSize)
|
|
||||||
copyMem(data2,data[1], BufferSize)
|
|
||||||
|
|
||||||
data[0]=nil
|
|
||||||
data[1]=nil
|
|
||||||
|
|
||||||
decoder.decode(data, parity, recovered,DataCount,ParityCount,DataCount).tryGet()
|
|
||||||
|
|
||||||
check equalMem(recovered[0], data1, BufferSize)
|
|
||||||
check equalMem(recovered[1], data2, BufferSize)
|
|
||||||
|
|
||||||
test "Test 1 data and 1 parity loss out of 4 possible":
|
|
||||||
for i in 0..<DataCount:
|
|
||||||
var
|
|
||||||
str = TestString & " " & $i
|
|
||||||
|
|
||||||
copyMem(addr data[i][0], addr str[0], str.len)
|
copyMem(addr data[i][0], addr str[0], str.len)
|
||||||
|
|
||||||
encoder.encode(data, parity,DataCount,ParityCount).tryGet()
|
for i in 0 ..< ParityCount:
|
||||||
|
parity[i] = newSeq[byte](BufferSize)
|
||||||
|
|
||||||
|
encoder.encode(data, parity).tryGet()
|
||||||
var data1 = cast[ptr UncheckedArray[byte]](allocShared0(sizeof(byte) * BufferSize))
|
|
||||||
|
|
||||||
defer: deallocShared(data1)
|
var
|
||||||
|
data1 = data[0]
|
||||||
|
data2 = data[1]
|
||||||
|
|
||||||
copyMem(data1,data[0], BufferSize)
|
data[0].setLen(0)
|
||||||
|
data[1].setLen(0)
|
||||||
|
|
||||||
data[0]=nil
|
decoder.decode(data, parity, recovered).tryGet()
|
||||||
parity[0]=nil
|
|
||||||
|
|
||||||
decoder.decode(data, parity, recovered,DataCount,ParityCount,DataCount).tryGet()
|
check recovered[0] == data1
|
||||||
|
check recovered[1] == data2
|
||||||
|
|
||||||
check equalMem(recovered[0], data1, BufferSize)
|
test "Test 1 data and 1 parity loss out of 4 possible":
|
||||||
|
for i in 0 ..< DataCount:
|
||||||
|
data[i] = newSeq[byte](BufferSize)
|
||||||
|
recovered[i] = newSeq[byte](BufferSize)
|
||||||
|
|
||||||
|
var str = TestString & " " & $i
|
||||||
|
|
||||||
|
copyMem(addr data[i][0], addr str[0], str.len)
|
||||||
|
|
||||||
|
for i in 0 ..< ParityCount:
|
||||||
|
parity[i] = newSeq[byte](BufferSize)
|
||||||
|
|
||||||
|
encoder.encode(data, parity).tryGet()
|
||||||
|
|
||||||
|
var data1 = data[0]
|
||||||
|
|
||||||
|
data[0].setLen(0)
|
||||||
|
parity[0].setLen(0)
|
||||||
|
|
||||||
|
decoder.decode(data, parity, recovered).tryGet()
|
||||||
|
check recovered[0] == data1
|
||||||
|
|
||||||
suite "Leopard Encode/Decode":
|
suite "Leopard Encode/Decode":
|
||||||
test "bufSize = 4096, K = 800, M = 200 - drop data = 200 data":
|
test "bufSize = 4096, K = 800, M = 200 - drop data = 200 data":
|
||||||
@ -228,7 +198,8 @@ suite "Leopard Encode/Decode":
|
|||||||
try:
|
try:
|
||||||
encoder = LeoEncoder.init(bufSize, buffers, parity).tryGet()
|
encoder = LeoEncoder.init(bufSize, buffers, parity).tryGet()
|
||||||
decoder = LeoDecoder.init(bufSize, buffers, parity).tryGet()
|
decoder = LeoDecoder.init(bufSize, buffers, parity).tryGet()
|
||||||
testPackets(buffers, parity, bufSize, dataLoses, parityLoses, encoder, decoder).tryGet()
|
testPackets(buffers, parity, bufSize, dataLoses, parityLoses, encoder, decoder)
|
||||||
|
.tryGet()
|
||||||
finally:
|
finally:
|
||||||
encoder.free()
|
encoder.free()
|
||||||
decoder.free()
|
decoder.free()
|
||||||
@ -246,7 +217,8 @@ suite "Leopard Encode/Decode":
|
|||||||
try:
|
try:
|
||||||
encoder = LeoEncoder.init(bufSize, buffers, parity).tryGet()
|
encoder = LeoEncoder.init(bufSize, buffers, parity).tryGet()
|
||||||
decoder = LeoDecoder.init(bufSize, buffers, parity).tryGet()
|
decoder = LeoDecoder.init(bufSize, buffers, parity).tryGet()
|
||||||
testPackets(buffers, parity, bufSize, dataLoses, parityLoses, encoder, decoder).tryGet()
|
testPackets(buffers, parity, bufSize, dataLoses, parityLoses, encoder, decoder)
|
||||||
|
.tryGet()
|
||||||
finally:
|
finally:
|
||||||
encoder.free()
|
encoder.free()
|
||||||
decoder.free()
|
decoder.free()
|
||||||
@ -264,7 +236,8 @@ suite "Leopard Encode/Decode":
|
|||||||
try:
|
try:
|
||||||
encoder = LeoEncoder.init(bufSize, buffers, parity).tryGet()
|
encoder = LeoEncoder.init(bufSize, buffers, parity).tryGet()
|
||||||
decoder = LeoDecoder.init(bufSize, buffers, parity).tryGet()
|
decoder = LeoDecoder.init(bufSize, buffers, parity).tryGet()
|
||||||
testPackets(buffers, parity, bufSize, dataLoses, parityLoses, encoder, decoder).tryGet()
|
testPackets(buffers, parity, bufSize, dataLoses, parityLoses, encoder, decoder)
|
||||||
|
.tryGet()
|
||||||
finally:
|
finally:
|
||||||
encoder.free()
|
encoder.free()
|
||||||
decoder.free()
|
decoder.free()
|
||||||
@ -282,7 +255,8 @@ suite "Leopard Encode/Decode":
|
|||||||
try:
|
try:
|
||||||
encoder = LeoEncoder.init(bufSize, buffers, parity).tryGet()
|
encoder = LeoEncoder.init(bufSize, buffers, parity).tryGet()
|
||||||
decoder = LeoDecoder.init(bufSize, buffers, parity).tryGet()
|
decoder = LeoDecoder.init(bufSize, buffers, parity).tryGet()
|
||||||
testPackets(buffers, parity, bufSize, dataLoses, parityLoses, encoder, decoder).tryGet()
|
testPackets(buffers, parity, bufSize, dataLoses, parityLoses, encoder, decoder)
|
||||||
|
.tryGet()
|
||||||
finally:
|
finally:
|
||||||
encoder.free()
|
encoder.free()
|
||||||
decoder.free()
|
decoder.free()
|
||||||
@ -300,7 +274,8 @@ suite "Leopard Encode/Decode":
|
|||||||
try:
|
try:
|
||||||
encoder = LeoEncoder.init(bufSize, buffers, parity).tryGet()
|
encoder = LeoEncoder.init(bufSize, buffers, parity).tryGet()
|
||||||
decoder = LeoDecoder.init(bufSize, buffers, parity).tryGet()
|
decoder = LeoDecoder.init(bufSize, buffers, parity).tryGet()
|
||||||
testPackets(buffers, parity, bufSize, dataLoses, parityLoses, encoder, decoder).tryGet()
|
testPackets(buffers, parity, bufSize, dataLoses, parityLoses, encoder, decoder)
|
||||||
|
.tryGet()
|
||||||
finally:
|
finally:
|
||||||
encoder.free()
|
encoder.free()
|
||||||
decoder.free()
|
decoder.free()
|
||||||
@ -318,7 +293,8 @@ suite "Leopard Encode/Decode":
|
|||||||
try:
|
try:
|
||||||
encoder = LeoEncoder.init(bufSize, buffers, parity).tryGet()
|
encoder = LeoEncoder.init(bufSize, buffers, parity).tryGet()
|
||||||
decoder = LeoDecoder.init(bufSize, buffers, parity).tryGet()
|
decoder = LeoDecoder.init(bufSize, buffers, parity).tryGet()
|
||||||
testPackets(buffers, parity, bufSize, dataLoses, parityLoses, encoder, decoder).tryGet()
|
testPackets(buffers, parity, bufSize, dataLoses, parityLoses, encoder, decoder)
|
||||||
|
.tryGet()
|
||||||
finally:
|
finally:
|
||||||
encoder.free()
|
encoder.free()
|
||||||
decoder.free()
|
decoder.free()
|
||||||
@ -336,23 +312,25 @@ suite "Leopard Encode/Decode":
|
|||||||
try:
|
try:
|
||||||
encoder = LeoEncoder.init(bufSize, buffers, parity).tryGet()
|
encoder = LeoEncoder.init(bufSize, buffers, parity).tryGet()
|
||||||
decoder = LeoDecoder.init(bufSize, buffers, parity).tryGet()
|
decoder = LeoDecoder.init(bufSize, buffers, parity).tryGet()
|
||||||
testPackets(buffers, parity, bufSize, dataLoses, parityLoses, encoder, decoder).tryGet()
|
testPackets(buffers, parity, bufSize, dataLoses, parityLoses, encoder, decoder)
|
||||||
|
.tryGet()
|
||||||
finally:
|
finally:
|
||||||
encoder.free()
|
encoder.free()
|
||||||
decoder.free()
|
decoder.free()
|
||||||
|
|
||||||
suite "Leopard use same encoder/decoder multiple times":
|
suite "Leopard use same encoder/decoder multiple times":
|
||||||
var
|
var
|
||||||
encoder: LeoEncoder
|
encoder: LeoEncoder
|
||||||
decoder: LeoDecoder
|
decoder: LeoDecoder
|
||||||
|
|
||||||
try:
|
try:
|
||||||
encoder = LeoEncoder.init(4096, 800, 800).tryGet()
|
encoder = LeoEncoder.init(4096, 800, 800).tryGet()
|
||||||
decoder = LeoDecoder.init(4096, 800, 800).tryGet()
|
decoder = LeoDecoder.init(4096, 800, 800).tryGet()
|
||||||
for i in 0..10:
|
for i in 0 .. 10:
|
||||||
let lost = 40 * i
|
let lost = 40 * i
|
||||||
test "Encode/Decode using same encoder/decoder - lost data = " & $lost & " lost parity = " & $lost:
|
test "Encode/Decode using same encoder/decoder - lost data = " & $lost &
|
||||||
testPackets(800, 800, 4096, 40 * i, 40 * i, encoder, decoder).tryGet()
|
" lost parity = " & $lost:
|
||||||
finally:
|
testPackets(800, 800, 4096, 40 * i, 40 * i, encoder, decoder).tryGet()
|
||||||
encoder.free()
|
finally:
|
||||||
decoder.free()
|
encoder.free()
|
||||||
|
decoder.free()
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user