2017-05-18 03:06:13 +00:00
|
|
|
/*
|
2017-05-25 09:24:15 +00:00
|
|
|
Copyright (c) 2017 Christopher A. Taylor. All rights reserved.
|
|
|
|
|
|
|
|
Redistribution and use in source and binary forms, with or without
|
|
|
|
modification, are permitted provided that the following conditions are met:
|
|
|
|
|
|
|
|
* Redistributions of source code must retain the above copyright notice,
|
|
|
|
this list of conditions and the following disclaimer.
|
|
|
|
* Redistributions in binary form must reproduce the above copyright notice,
|
|
|
|
this list of conditions and the following disclaimer in the documentation
|
|
|
|
and/or other materials provided with the distribution.
|
2017-05-27 02:51:30 +00:00
|
|
|
* Neither the name of Leopard-RS nor the names of its contributors may be
|
2017-05-25 09:24:15 +00:00
|
|
|
used to endorse or promote products derived from this software without
|
|
|
|
specific prior written permission.
|
|
|
|
|
|
|
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
|
|
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
|
|
|
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
|
|
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
|
|
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
POSSIBILITY OF SUCH DAMAGE.
|
2017-05-18 03:06:13 +00:00
|
|
|
*/
|
|
|
|
|
2017-05-27 02:51:30 +00:00
|
|
|
#include "LeopardFF16.h"
|
2017-05-27 03:10:53 +00:00
|
|
|
|
|
|
|
#ifdef LEO_HAS_FF16
|
|
|
|
|
2017-05-18 03:06:13 +00:00
|
|
|
#include <string.h>
|
|
|
|
|
2017-06-04 10:35:29 +00:00
|
|
|
#ifdef _MSC_VER
|
|
|
|
#pragma warning(disable: 4752) // found Intel(R) Advanced Vector Extensions; consider using /arch:AVX
|
|
|
|
#endif
|
|
|
|
|
2017-05-27 02:51:30 +00:00
|
|
|
namespace leopard { namespace ff16 {
|
2017-05-18 03:06:13 +00:00
|
|
|
|
|
|
|
|
|
|
|
//------------------------------------------------------------------------------
|
2017-05-27 02:51:30 +00:00
|
|
|
// Datatypes and Constants
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-05-27 02:51:30 +00:00
|
|
|
// Basis used for generating logarithm tables
|
2017-05-28 01:44:06 +00:00
|
|
|
static const ffe_t kCantorBasis[kBits] = {
|
|
|
|
0x0001, 0xACCA, 0x3C0E, 0x163E,
|
2017-05-18 03:06:13 +00:00
|
|
|
0xC582, 0xED2E, 0x914C, 0x4012,
|
|
|
|
0x6C98, 0x10D8, 0x6A72, 0xB900,
|
|
|
|
0xFDB8, 0xFB34, 0xFF38, 0x991E
|
|
|
|
};
|
|
|
|
|
2017-05-27 08:15:24 +00:00
|
|
|
// Using the Cantor basis here enables us to avoid a lot of extra calculations
|
|
|
|
// when applying the formal derivative in decoding.
|
|
|
|
|
2017-05-18 03:06:13 +00:00
|
|
|
|
|
|
|
//------------------------------------------------------------------------------
|
2017-05-27 02:51:30 +00:00
|
|
|
// Field Operations
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-05-27 02:51:30 +00:00
|
|
|
// z = x + y (mod kModulus)
|
|
|
|
static inline ffe_t AddMod(const ffe_t a, const ffe_t b)
|
2017-05-18 03:06:13 +00:00
|
|
|
{
|
|
|
|
const unsigned sum = (unsigned)a + b;
|
|
|
|
|
2017-05-27 02:51:30 +00:00
|
|
|
// Partial reduction step, allowing for kModulus to be returned
|
|
|
|
return static_cast<ffe_t>(sum + (sum >> kBits));
|
2017-05-18 03:06:13 +00:00
|
|
|
}
|
|
|
|
|
2017-05-27 02:51:30 +00:00
|
|
|
// z = x - y (mod kModulus)
|
|
|
|
static inline ffe_t SubMod(const ffe_t a, const ffe_t b)
|
2017-05-18 03:06:13 +00:00
|
|
|
{
|
|
|
|
const unsigned dif = (unsigned)a - b;
|
|
|
|
|
2017-05-27 02:51:30 +00:00
|
|
|
// Partial reduction step, allowing for kModulus to be returned
|
|
|
|
return static_cast<ffe_t>(dif + (dif >> kBits));
|
2017-05-18 03:06:13 +00:00
|
|
|
}
|
|
|
|
|
2017-05-24 08:23:19 +00:00
|
|
|
|
2017-05-18 03:06:13 +00:00
|
|
|
//------------------------------------------------------------------------------
|
2017-05-27 02:51:30 +00:00
|
|
|
// Fast Walsh-Hadamard Transform (FWHT) (mod kModulus)
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-05-24 08:23:19 +00:00
|
|
|
// {a, b} = {a + b, a - b} (Mod Q)
|
2017-05-27 02:51:30 +00:00
|
|
|
static LEO_FORCE_INLINE void FWHT_2(ffe_t& LEO_RESTRICT a, ffe_t& LEO_RESTRICT b)
|
2017-05-24 08:23:19 +00:00
|
|
|
{
|
2017-05-27 02:51:30 +00:00
|
|
|
const ffe_t sum = AddMod(a, b);
|
|
|
|
const ffe_t dif = SubMod(a, b);
|
2017-05-24 08:23:19 +00:00
|
|
|
a = sum;
|
|
|
|
b = dif;
|
|
|
|
}
|
|
|
|
|
2017-05-27 02:51:30 +00:00
|
|
|
static LEO_FORCE_INLINE void FWHT_4(ffe_t* data, unsigned s)
|
2017-05-18 03:06:13 +00:00
|
|
|
{
|
2017-06-03 23:23:49 +00:00
|
|
|
const unsigned s2 = s << 1;
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-05-27 02:51:30 +00:00
|
|
|
ffe_t t0 = data[0];
|
2017-06-03 23:23:49 +00:00
|
|
|
ffe_t t1 = data[s];
|
|
|
|
ffe_t t2 = data[s2];
|
|
|
|
ffe_t t3 = data[s2 + s];
|
2017-05-18 03:06:13 +00:00
|
|
|
|
|
|
|
FWHT_2(t0, t1);
|
|
|
|
FWHT_2(t2, t3);
|
|
|
|
FWHT_2(t0, t2);
|
|
|
|
FWHT_2(t1, t3);
|
|
|
|
|
2017-06-03 23:23:49 +00:00
|
|
|
data[0] = t0;
|
|
|
|
data[s] = t1;
|
|
|
|
data[s2] = t2;
|
|
|
|
data[s2 + s] = t3;
|
2017-05-18 03:06:13 +00:00
|
|
|
}
|
|
|
|
|
2017-06-03 23:23:49 +00:00
|
|
|
// Decimation in time (DIT) Fast Walsh-Hadamard Transform
|
|
|
|
// Unrolls pairs of layers to perform cross-layer operations in registers
|
|
|
|
// m_truncated: Number of elements that are non-zero at the front of data
|
|
|
|
static void FWHT(ffe_t* data, const unsigned m, const unsigned m_truncated)
|
2017-05-18 03:06:13 +00:00
|
|
|
{
|
2017-06-03 23:23:49 +00:00
|
|
|
// Decimation in time: Unroll 2 layers at a time
|
|
|
|
unsigned dist = 1, dist4 = 4;
|
|
|
|
for (; dist4 <= m; dist = dist4, dist4 <<= 2)
|
2017-05-18 03:06:13 +00:00
|
|
|
{
|
2017-06-03 23:23:49 +00:00
|
|
|
// For each set of dist*4 elements:
|
|
|
|
for (unsigned r = 0; r < m_truncated; r += dist4)
|
|
|
|
{
|
|
|
|
// For each set of dist elements:
|
|
|
|
for (unsigned i = r; i < r + dist; ++i)
|
|
|
|
FWHT_4(data + i, dist);
|
|
|
|
}
|
2017-05-18 03:06:13 +00:00
|
|
|
}
|
|
|
|
|
2017-06-03 23:23:49 +00:00
|
|
|
// If there is one layer left:
|
|
|
|
if (dist < m)
|
|
|
|
for (unsigned i = 0; i < dist; ++i)
|
|
|
|
FWHT_2(data[i], data[i + dist]);
|
2017-05-27 02:51:30 +00:00
|
|
|
}
|
2017-05-18 03:06:13 +00:00
|
|
|
|
|
|
|
|
|
|
|
//------------------------------------------------------------------------------
|
2017-05-27 02:51:30 +00:00
|
|
|
// Logarithm Tables
|
|
|
|
|
|
|
|
static ffe_t LogLUT[kOrder];
|
|
|
|
static ffe_t ExpLUT[kOrder];
|
|
|
|
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-05-30 08:37:27 +00:00
|
|
|
// Returns a * Log(b)
|
|
|
|
static ffe_t MultiplyLog(ffe_t a, ffe_t log_b)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
Note that this operation is not a normal multiplication in a finite
|
|
|
|
field because the right operand is already a logarithm. This is done
|
|
|
|
because it moves K table lookups from the Decode() method into the
|
|
|
|
initialization step that is less performance critical. The LogWalsh[]
|
|
|
|
table below contains precalculated logarithms so it is easier to do
|
|
|
|
all the other multiplies in that form as well.
|
|
|
|
*/
|
|
|
|
if (a == 0)
|
|
|
|
return 0;
|
|
|
|
return ExpLUT[AddMod(LogLUT[a], log_b)];
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-05-27 02:51:30 +00:00
|
|
|
// Initialize LogLUT[], ExpLUT[]
|
|
|
|
static void InitializeLogarithmTables()
|
2017-05-18 03:06:13 +00:00
|
|
|
{
|
2017-05-27 02:51:30 +00:00
|
|
|
// LFSR table generation:
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-05-27 02:51:30 +00:00
|
|
|
unsigned state = 1;
|
|
|
|
for (unsigned i = 0; i < kModulus; ++i)
|
2017-05-18 03:06:13 +00:00
|
|
|
{
|
2017-05-27 02:51:30 +00:00
|
|
|
ExpLUT[state] = static_cast<ffe_t>(i);
|
|
|
|
state <<= 1;
|
|
|
|
if (state >= kOrder)
|
|
|
|
state ^= kPolynomial;
|
|
|
|
}
|
|
|
|
ExpLUT[0] = kModulus;
|
|
|
|
|
2017-05-28 01:44:06 +00:00
|
|
|
// Conversion to Cantor basis:
|
2017-05-27 02:51:30 +00:00
|
|
|
|
|
|
|
LogLUT[0] = 0;
|
|
|
|
for (unsigned i = 0; i < kBits; ++i)
|
|
|
|
{
|
2017-05-28 01:44:06 +00:00
|
|
|
const ffe_t basis = kCantorBasis[i];
|
2017-05-27 02:51:30 +00:00
|
|
|
const unsigned width = static_cast<unsigned>(1UL << i);
|
|
|
|
|
|
|
|
for (unsigned j = 0; j < width; ++j)
|
|
|
|
LogLUT[j + width] = LogLUT[j] ^ basis;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < kOrder; ++i)
|
|
|
|
LogLUT[i] = ExpLUT[LogLUT[i]];
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < kOrder; ++i)
|
|
|
|
ExpLUT[LogLUT[i]] = i;
|
|
|
|
|
|
|
|
ExpLUT[kModulus] = ExpLUT[0];
|
|
|
|
}
|
|
|
|
|
2017-05-29 22:01:01 +00:00
|
|
|
|
2017-05-27 02:51:30 +00:00
|
|
|
//------------------------------------------------------------------------------
|
|
|
|
// Multiplies
|
|
|
|
|
2017-05-30 08:37:27 +00:00
|
|
|
/*
|
|
|
|
The multiplication algorithm used follows the approach outlined in {4}.
|
|
|
|
Specifically section 7 outlines the algorithm used here for 16-bit fields.
|
2017-05-31 08:11:20 +00:00
|
|
|
The ALTMAP memory layout is used since there is no need to convert in/out.
|
2017-05-30 08:37:27 +00:00
|
|
|
*/
|
|
|
|
|
2017-06-03 23:48:09 +00:00
|
|
|
struct Multiply128LUT_t
|
|
|
|
{
|
|
|
|
LEO_M128 Lo[4];
|
|
|
|
LEO_M128 Hi[4];
|
|
|
|
};
|
|
|
|
|
|
|
|
static const Multiply128LUT_t* Multiply128LUT = nullptr;
|
|
|
|
|
2017-06-04 02:30:55 +00:00
|
|
|
// 128-bit x_reg ^= y_reg * log_m
|
|
|
|
#define LEO_MULADD_128(xreg, yreg, table) { \
|
|
|
|
LEO_M128 data_0 = _mm_and_si128(work_reg_lo_##yreg, clr_mask); \
|
|
|
|
LEO_M128 data_1 = _mm_srli_epi64(work_reg_lo_##yreg, 4); \
|
|
|
|
data_1 = _mm_and_si128(data_1, clr_mask); \
|
|
|
|
LEO_M128 prod_lo = _mm_shuffle_epi8(T0_lo_##table, data_0); \
|
|
|
|
prod_lo = _mm_xor_si128(prod_lo, _mm_shuffle_epi8(T1_lo_##table, data_1)); \
|
|
|
|
LEO_M128 prod_hi = _mm_shuffle_epi8(T0_hi_##table, data_0); \
|
|
|
|
prod_hi = _mm_xor_si128(prod_hi, _mm_shuffle_epi8(T1_hi_##table, data_1)); \
|
|
|
|
data_0 = _mm_and_si128(work_reg_hi_##yreg, clr_mask); \
|
|
|
|
data_1 = _mm_srli_epi64(work_reg_hi_##yreg, 4); \
|
|
|
|
data_1 = _mm_and_si128(data_1, clr_mask); \
|
|
|
|
prod_lo = _mm_xor_si128(prod_lo, _mm_shuffle_epi8(T2_lo_##table, data_0)); \
|
|
|
|
prod_lo = _mm_xor_si128(prod_lo, _mm_shuffle_epi8(T3_lo_##table, data_1)); \
|
|
|
|
prod_hi = _mm_xor_si128(prod_hi, _mm_shuffle_epi8(T2_hi_##table, data_0)); \
|
|
|
|
prod_hi = _mm_xor_si128(prod_hi, _mm_shuffle_epi8(T3_hi_##table, data_1)); \
|
|
|
|
work_reg_lo_##xreg = _mm_xor_si128(prod_lo, work_reg_lo_##xreg); \
|
|
|
|
work_reg_hi_##xreg = _mm_xor_si128(prod_hi, work_reg_hi_##xreg); }
|
|
|
|
|
2017-05-27 02:51:30 +00:00
|
|
|
#if defined(LEO_TRY_AVX2)
|
2017-06-03 23:48:09 +00:00
|
|
|
|
|
|
|
struct Multiply256LUT_t
|
|
|
|
{
|
|
|
|
LEO_M256 Lo[4];
|
|
|
|
LEO_M256 Hi[4];
|
|
|
|
};
|
|
|
|
|
|
|
|
static const Multiply256LUT_t* Multiply256LUT = nullptr;
|
|
|
|
|
2017-06-04 02:30:55 +00:00
|
|
|
// 256-bit x_reg ^= y_reg * log_m
|
|
|
|
#define LEO_MULADD_256(xreg, yreg, table) { \
|
|
|
|
LEO_M256 data_0 = _mm256_and_si256(work_reg_lo_##yreg, clr_mask); \
|
|
|
|
LEO_M256 data_1 = _mm256_srli_epi64(work_reg_lo_##yreg, 4); \
|
|
|
|
data_1 = _mm256_and_si256(data_1, clr_mask); \
|
|
|
|
LEO_M256 prod_lo = _mm256_shuffle_epi8(T0_lo_##table, data_0); \
|
|
|
|
prod_lo = _mm256_xor_si256(prod_lo, _mm256_shuffle_epi8(T1_lo_##table, data_1)); \
|
|
|
|
LEO_M256 prod_hi = _mm256_shuffle_epi8(T0_hi_##table, data_0); \
|
|
|
|
prod_hi = _mm256_xor_si256(prod_hi, _mm256_shuffle_epi8(T1_hi_##table, data_1)); \
|
|
|
|
data_0 = _mm256_and_si256(work_reg_hi_##yreg, clr_mask); \
|
|
|
|
data_1 = _mm256_srli_epi64(work_reg_hi_##yreg, 4); \
|
|
|
|
data_1 = _mm256_and_si256(data_1, clr_mask); \
|
|
|
|
prod_lo = _mm256_xor_si256(prod_lo, _mm256_shuffle_epi8(T2_lo_##table, data_0)); \
|
|
|
|
prod_lo = _mm256_xor_si256(prod_lo, _mm256_shuffle_epi8(T3_lo_##table, data_1)); \
|
|
|
|
prod_hi = _mm256_xor_si256(prod_hi, _mm256_shuffle_epi8(T2_hi_##table, data_0)); \
|
|
|
|
prod_hi = _mm256_xor_si256(prod_hi, _mm256_shuffle_epi8(T3_hi_##table, data_1)); \
|
|
|
|
work_reg_lo_##xreg = _mm256_xor_si256(prod_lo, work_reg_lo_##xreg); \
|
|
|
|
work_reg_hi_##xreg = _mm256_xor_si256(prod_hi, work_reg_hi_##xreg); }
|
|
|
|
|
2017-05-27 02:51:30 +00:00
|
|
|
#endif // LEO_TRY_AVX2
|
|
|
|
|
2017-05-29 22:01:01 +00:00
|
|
|
|
2017-06-04 02:30:55 +00:00
|
|
|
static void InitializeMultiplyTables()
|
2017-05-27 02:51:30 +00:00
|
|
|
{
|
2017-06-03 23:48:09 +00:00
|
|
|
// If we cannot use the PSHUFB instruction, generate Multiply8LUT:
|
|
|
|
if (!CpuHasSSSE3)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (CpuHasAVX2)
|
|
|
|
Multiply256LUT = reinterpret_cast<const Multiply256LUT_t*>(SIMDSafeAllocate(sizeof(Multiply256LUT_t) * kOrder));
|
|
|
|
else
|
|
|
|
Multiply128LUT = reinterpret_cast<const Multiply128LUT_t*>(SIMDSafeAllocate(sizeof(Multiply128LUT_t) * kOrder));
|
|
|
|
|
2017-05-30 08:37:27 +00:00
|
|
|
// For each value we could multiply by:
|
2017-05-29 22:01:01 +00:00
|
|
|
for (unsigned log_m = 0; log_m < kOrder; ++log_m)
|
2017-05-27 02:51:30 +00:00
|
|
|
{
|
2017-05-30 08:37:27 +00:00
|
|
|
// For each 4 bits of the finite field width in bits:
|
|
|
|
for (unsigned i = 0, shift = 0; i < 4; ++i, shift += 4)
|
2017-05-18 03:06:13 +00:00
|
|
|
{
|
2017-05-30 08:37:27 +00:00
|
|
|
// Construct 16 entry LUT for PSHUFB
|
|
|
|
uint8_t prod_lo[16], prod_hi[16];
|
|
|
|
for (ffe_t x = 0; x < 16; ++x)
|
|
|
|
{
|
|
|
|
const ffe_t prod = MultiplyLog(x << shift, static_cast<ffe_t>(log_m));
|
|
|
|
prod_lo[x] = static_cast<uint8_t>(prod);
|
|
|
|
prod_hi[x] = static_cast<uint8_t>(prod >> 8);
|
|
|
|
}
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-05-30 08:37:27 +00:00
|
|
|
const LEO_M128 value_lo = _mm_loadu_si128((LEO_M128*)prod_lo);
|
|
|
|
const LEO_M128 value_hi = _mm_loadu_si128((LEO_M128*)prod_hi);
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-05-30 08:37:27 +00:00
|
|
|
// Store in 128-bit wide table
|
2017-06-03 23:48:09 +00:00
|
|
|
if (!CpuHasAVX2)
|
|
|
|
{
|
|
|
|
_mm_storeu_si128((LEO_M128*)&Multiply128LUT[log_m].Lo[i], value_lo);
|
|
|
|
_mm_storeu_si128((LEO_M128*)&Multiply128LUT[log_m].Hi[i], value_hi);
|
|
|
|
}
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-05-30 08:37:27 +00:00
|
|
|
// Store in 256-bit wide table
|
2017-05-27 02:51:30 +00:00
|
|
|
#if defined(LEO_TRY_AVX2)
|
2017-05-30 08:37:27 +00:00
|
|
|
if (CpuHasAVX2)
|
|
|
|
{
|
2017-06-03 23:48:09 +00:00
|
|
|
_mm256_storeu_si256((LEO_M256*)&Multiply256LUT[log_m].Lo[i],
|
2017-05-30 08:37:27 +00:00
|
|
|
_mm256_broadcastsi128_si256(value_lo));
|
2017-06-03 23:48:09 +00:00
|
|
|
_mm256_storeu_si256((LEO_M256*)&Multiply256LUT[log_m].Hi[i],
|
2017-05-30 08:37:27 +00:00
|
|
|
_mm256_broadcastsi128_si256(value_hi));
|
|
|
|
}
|
2017-05-27 02:51:30 +00:00
|
|
|
#endif // LEO_TRY_AVX2
|
2017-05-30 08:37:27 +00:00
|
|
|
}
|
2017-05-18 03:06:13 +00:00
|
|
|
}
|
2017-05-27 02:51:30 +00:00
|
|
|
}
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-05-27 02:51:30 +00:00
|
|
|
|
2017-06-04 02:30:55 +00:00
|
|
|
static void mul_mem(
|
2017-05-29 22:01:01 +00:00
|
|
|
void * LEO_RESTRICT x, const void * LEO_RESTRICT y,
|
|
|
|
ffe_t log_m, uint64_t bytes)
|
|
|
|
{
|
2017-05-27 02:51:30 +00:00
|
|
|
#if defined(LEO_TRY_AVX2)
|
2017-05-18 03:06:13 +00:00
|
|
|
if (CpuHasAVX2)
|
|
|
|
{
|
2017-05-30 08:37:27 +00:00
|
|
|
#define LEO_MUL_TABLES_256() \
|
|
|
|
const LEO_M256 T0_lo = _mm256_loadu_si256(&Multiply256LUT[log_m].Lo[0]); \
|
|
|
|
const LEO_M256 T1_lo = _mm256_loadu_si256(&Multiply256LUT[log_m].Lo[1]); \
|
|
|
|
const LEO_M256 T2_lo = _mm256_loadu_si256(&Multiply256LUT[log_m].Lo[2]); \
|
|
|
|
const LEO_M256 T3_lo = _mm256_loadu_si256(&Multiply256LUT[log_m].Lo[3]); \
|
|
|
|
const LEO_M256 T0_hi = _mm256_loadu_si256(&Multiply256LUT[log_m].Hi[0]); \
|
|
|
|
const LEO_M256 T1_hi = _mm256_loadu_si256(&Multiply256LUT[log_m].Hi[1]); \
|
|
|
|
const LEO_M256 T2_hi = _mm256_loadu_si256(&Multiply256LUT[log_m].Hi[2]); \
|
|
|
|
const LEO_M256 T3_hi = _mm256_loadu_si256(&Multiply256LUT[log_m].Hi[3]);
|
|
|
|
|
|
|
|
LEO_MUL_TABLES_256();
|
2017-05-27 02:51:30 +00:00
|
|
|
|
|
|
|
const LEO_M256 clr_mask = _mm256_set1_epi8(0x0f);
|
|
|
|
|
2017-05-29 22:01:01 +00:00
|
|
|
LEO_M256 * LEO_RESTRICT x32 = reinterpret_cast<LEO_M256 *>(x);
|
|
|
|
const LEO_M256 * LEO_RESTRICT y32 = reinterpret_cast<const LEO_M256 *>(y);
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-05-29 22:01:01 +00:00
|
|
|
do
|
2017-05-18 03:06:13 +00:00
|
|
|
{
|
2017-05-29 22:01:01 +00:00
|
|
|
#define LEO_MUL_256(x_ptr, y_ptr) { \
|
2017-05-30 09:23:33 +00:00
|
|
|
const LEO_M256 A_lo = _mm256_loadu_si256(y_ptr); \
|
2017-05-30 08:37:27 +00:00
|
|
|
const LEO_M256 A_hi = _mm256_loadu_si256(y_ptr + 1); \
|
|
|
|
LEO_M256 data_0 = _mm256_and_si256(A_lo, clr_mask); \
|
|
|
|
LEO_M256 data_1 = _mm256_srli_epi64(A_lo, 4); \
|
|
|
|
data_1 = _mm256_and_si256(data_1, clr_mask); \
|
|
|
|
LEO_M256 data_2 = _mm256_and_si256(A_hi, clr_mask); \
|
|
|
|
LEO_M256 data_3 = _mm256_srli_epi64(A_hi, 4); \
|
|
|
|
data_3 = _mm256_and_si256(data_3, clr_mask); \
|
|
|
|
LEO_M256 output_lo = _mm256_shuffle_epi8(T0_lo, data_0); \
|
|
|
|
output_lo = _mm256_xor_si256(output_lo, _mm256_shuffle_epi8(T1_lo, data_1)); \
|
|
|
|
output_lo = _mm256_xor_si256(output_lo, _mm256_shuffle_epi8(T2_lo, data_2)); \
|
|
|
|
output_lo = _mm256_xor_si256(output_lo, _mm256_shuffle_epi8(T3_lo, data_3)); \
|
|
|
|
LEO_M256 output_hi = _mm256_shuffle_epi8(T0_hi, data_0); \
|
|
|
|
output_hi = _mm256_xor_si256(output_hi, _mm256_shuffle_epi8(T1_hi, data_1)); \
|
|
|
|
output_hi = _mm256_xor_si256(output_hi, _mm256_shuffle_epi8(T2_hi, data_2)); \
|
|
|
|
output_hi = _mm256_xor_si256(output_hi, _mm256_shuffle_epi8(T3_hi, data_3)); \
|
|
|
|
_mm256_storeu_si256(x_ptr, output_lo); \
|
|
|
|
_mm256_storeu_si256(x_ptr + 1, output_hi); }
|
|
|
|
|
2017-05-29 22:01:01 +00:00
|
|
|
LEO_MUL_256(x32, y32);
|
|
|
|
y32 += 2, x32 += 2;
|
|
|
|
|
|
|
|
bytes -= 64;
|
|
|
|
} while (bytes > 0);
|
|
|
|
|
2017-05-27 02:51:30 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
#endif // LEO_TRY_AVX2
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-05-30 08:37:27 +00:00
|
|
|
#define LEO_MUL_TABLES_128() \
|
|
|
|
const LEO_M128 T0_lo = _mm_loadu_si128(&Multiply128LUT[log_m].Lo[0]); \
|
|
|
|
const LEO_M128 T1_lo = _mm_loadu_si128(&Multiply128LUT[log_m].Lo[1]); \
|
|
|
|
const LEO_M128 T2_lo = _mm_loadu_si128(&Multiply128LUT[log_m].Lo[2]); \
|
|
|
|
const LEO_M128 T3_lo = _mm_loadu_si128(&Multiply128LUT[log_m].Lo[3]); \
|
|
|
|
const LEO_M128 T0_hi = _mm_loadu_si128(&Multiply128LUT[log_m].Hi[0]); \
|
|
|
|
const LEO_M128 T1_hi = _mm_loadu_si128(&Multiply128LUT[log_m].Hi[1]); \
|
|
|
|
const LEO_M128 T2_hi = _mm_loadu_si128(&Multiply128LUT[log_m].Hi[2]); \
|
|
|
|
const LEO_M128 T3_hi = _mm_loadu_si128(&Multiply128LUT[log_m].Hi[3]);
|
|
|
|
|
|
|
|
LEO_MUL_TABLES_128();
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-05-27 02:51:30 +00:00
|
|
|
const LEO_M128 clr_mask = _mm_set1_epi8(0x0f);
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-05-29 22:01:01 +00:00
|
|
|
LEO_M128 * LEO_RESTRICT x16 = reinterpret_cast<LEO_M128 *>(x);
|
|
|
|
const LEO_M128 * LEO_RESTRICT y16 = reinterpret_cast<const LEO_M128 *>(y);
|
2017-05-27 02:51:30 +00:00
|
|
|
|
|
|
|
do
|
2017-05-18 03:06:13 +00:00
|
|
|
{
|
2017-05-29 22:01:01 +00:00
|
|
|
#define LEO_MUL_128(x_ptr, y_ptr) { \
|
2017-05-30 09:23:33 +00:00
|
|
|
const LEO_M128 A_lo = _mm_loadu_si128(y_ptr); \
|
2017-05-30 08:37:27 +00:00
|
|
|
const LEO_M128 A_hi = _mm_loadu_si128(y_ptr + 2); \
|
|
|
|
LEO_M128 data_0 = _mm_and_si128(A_lo, clr_mask); \
|
|
|
|
LEO_M128 data_1 = _mm_srli_epi64(A_lo, 4); \
|
|
|
|
data_1 = _mm_and_si128(data_1, clr_mask); \
|
|
|
|
LEO_M128 data_2 = _mm_and_si128(A_hi, clr_mask); \
|
|
|
|
LEO_M128 data_3 = _mm_srli_epi64(A_hi, 4); \
|
|
|
|
data_3 = _mm_and_si128(data_3, clr_mask); \
|
|
|
|
LEO_M128 output_lo = _mm_shuffle_epi8(T0_lo, data_0); \
|
|
|
|
output_lo = _mm_xor_si128(output_lo, _mm_shuffle_epi8(T1_lo, data_1)); \
|
|
|
|
output_lo = _mm_xor_si128(output_lo, _mm_shuffle_epi8(T2_lo, data_2)); \
|
|
|
|
output_lo = _mm_xor_si128(output_lo, _mm_shuffle_epi8(T3_lo, data_3)); \
|
|
|
|
LEO_M128 output_hi = _mm_shuffle_epi8(T0_hi, data_0); \
|
|
|
|
output_hi = _mm_xor_si128(output_hi, _mm_shuffle_epi8(T1_hi, data_1)); \
|
|
|
|
output_hi = _mm_xor_si128(output_hi, _mm_shuffle_epi8(T2_hi, data_2)); \
|
|
|
|
output_hi = _mm_xor_si128(output_hi, _mm_shuffle_epi8(T3_hi, data_3)); \
|
|
|
|
_mm_storeu_si128(x_ptr, output_lo); \
|
|
|
|
_mm_storeu_si128(x_ptr + 2, output_hi); }
|
|
|
|
|
2017-05-29 22:01:01 +00:00
|
|
|
LEO_MUL_128(x16 + 1, y16 + 1);
|
|
|
|
LEO_MUL_128(x16, y16);
|
2017-05-27 02:51:30 +00:00
|
|
|
x16 += 4, y16 += 4;
|
2017-05-29 22:01:01 +00:00
|
|
|
|
2017-05-27 02:51:30 +00:00
|
|
|
bytes -= 64;
|
|
|
|
} while (bytes > 0);
|
|
|
|
}
|
|
|
|
|
2017-05-18 03:06:13 +00:00
|
|
|
|
|
|
|
//------------------------------------------------------------------------------
|
2017-06-04 00:17:05 +00:00
|
|
|
// FFT
|
|
|
|
|
|
|
|
// Twisted factors used in FFT
|
|
|
|
static ffe_t FFTSkew[kModulus];
|
|
|
|
|
|
|
|
// Factors used in the evaluation of the error locator polynomial
|
|
|
|
static ffe_t LogWalsh[kOrder];
|
|
|
|
|
|
|
|
|
|
|
|
static void FFTInitialize()
|
|
|
|
{
|
|
|
|
ffe_t temp[kBits - 1];
|
|
|
|
|
|
|
|
// Generate FFT skew vector {1}:
|
|
|
|
|
|
|
|
for (unsigned i = 1; i < kBits; ++i)
|
|
|
|
temp[i - 1] = static_cast<ffe_t>(1UL << i);
|
|
|
|
|
|
|
|
for (unsigned m = 0; m < (kBits - 1); ++m)
|
|
|
|
{
|
|
|
|
const unsigned step = 1UL << (m + 1);
|
|
|
|
|
|
|
|
FFTSkew[(1UL << m) - 1] = 0;
|
|
|
|
|
|
|
|
for (unsigned i = m; i < (kBits - 1); ++i)
|
|
|
|
{
|
|
|
|
const unsigned s = (1UL << (i + 1));
|
|
|
|
|
|
|
|
for (unsigned j = (1UL << m) - 1; j < s; j += step)
|
|
|
|
FFTSkew[j + s] = FFTSkew[j] ^ temp[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
temp[m] = kModulus - LogLUT[MultiplyLog(temp[m], LogLUT[temp[m] ^ 1])];
|
|
|
|
|
|
|
|
for (unsigned i = m + 1; i < (kBits - 1); ++i)
|
|
|
|
{
|
|
|
|
const ffe_t sum = AddMod(LogLUT[temp[i] ^ 1], temp[m]);
|
|
|
|
temp[i] = MultiplyLog(temp[i], sum);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < kModulus; ++i)
|
|
|
|
FFTSkew[i] = LogLUT[FFTSkew[i]];
|
|
|
|
|
|
|
|
// Precalculate FWHT(Log[i]):
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < kOrder; ++i)
|
|
|
|
LogWalsh[i] = LogLUT[i];
|
|
|
|
LogWalsh[0] = 0;
|
|
|
|
|
|
|
|
FWHT(LogWalsh, kOrder, kOrder);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
Decimation in time IFFT:
|
|
|
|
|
|
|
|
The decimation in time IFFT algorithm allows us to unroll 2 layers at a time,
|
|
|
|
performing calculations on local registers and faster cache memory.
|
|
|
|
|
|
|
|
Each ^___^ below indicates a butterfly between the associated indices.
|
|
|
|
|
|
|
|
The ifft_butterfly(x, y) operation:
|
|
|
|
|
2017-06-04 02:30:55 +00:00
|
|
|
y[] ^= x[]
|
2017-06-04 00:17:05 +00:00
|
|
|
if (log_m != kModulus)
|
|
|
|
x[] ^= exp(log(y[]) + log_m)
|
|
|
|
|
|
|
|
Layer 0:
|
|
|
|
0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7
|
|
|
|
^_^ ^_^ ^_^ ^_^ ^_^ ^_^ ^_^ ^_^
|
|
|
|
|
|
|
|
Layer 1:
|
|
|
|
0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7
|
|
|
|
^___^ ^___^ ^___^ ^___^
|
|
|
|
^___^ ^___^ ^___^ ^___^
|
|
|
|
|
|
|
|
Layer 2:
|
|
|
|
0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7
|
|
|
|
^_______^ ^_______^
|
|
|
|
^_______^ ^_______^
|
|
|
|
^_______^ ^_______^
|
|
|
|
^_______^ ^_______^
|
|
|
|
|
|
|
|
Layer 3:
|
|
|
|
0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7
|
|
|
|
^_______________^
|
|
|
|
^_______________^
|
|
|
|
^_______________^
|
|
|
|
^_______________^
|
|
|
|
^_______________^
|
|
|
|
^_______________^
|
|
|
|
^_______________^
|
|
|
|
^_______________^
|
|
|
|
|
|
|
|
DIT layer 0-1 operations, grouped 4 at a time:
|
|
|
|
{0-1, 2-3, 0-2, 1-3},
|
|
|
|
{4-5, 6-7, 4-6, 5-7},
|
|
|
|
|
|
|
|
DIT layer 1-2 operations, grouped 4 at a time:
|
|
|
|
{0-2, 4-6, 0-4, 2-6},
|
|
|
|
{1-3, 5-7, 1-5, 3-7},
|
|
|
|
|
|
|
|
DIT layer 2-3 operations, grouped 4 at a time:
|
|
|
|
{0-4, 0'-4', 0-0', 4-4'},
|
|
|
|
{1-5, 1'-5', 1-1', 5-5'},
|
|
|
|
*/
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-06-04 02:30:55 +00:00
|
|
|
static void IFFT_DIT2(
|
2017-05-27 02:51:30 +00:00
|
|
|
void * LEO_RESTRICT x, void * LEO_RESTRICT y,
|
2017-05-29 22:01:01 +00:00
|
|
|
ffe_t log_m, uint64_t bytes)
|
2017-05-18 03:06:13 +00:00
|
|
|
{
|
2017-05-29 22:01:01 +00:00
|
|
|
#if defined(LEO_TRY_AVX2)
|
|
|
|
if (CpuHasAVX2)
|
|
|
|
{
|
2017-05-30 08:37:27 +00:00
|
|
|
LEO_MUL_TABLES_256();
|
2017-05-29 22:01:01 +00:00
|
|
|
|
|
|
|
const LEO_M256 clr_mask = _mm256_set1_epi8(0x0f);
|
|
|
|
|
|
|
|
LEO_M256 * LEO_RESTRICT x32 = reinterpret_cast<LEO_M256 *>(x);
|
|
|
|
LEO_M256 * LEO_RESTRICT y32 = reinterpret_cast<LEO_M256 *>(y);
|
|
|
|
|
|
|
|
do
|
|
|
|
{
|
2017-06-04 00:17:05 +00:00
|
|
|
#define LEO_IFFTB_256(x_ptr, y_ptr) { \
|
|
|
|
LEO_M256 x_lo = _mm256_loadu_si256(x_ptr); \
|
2017-05-30 09:23:33 +00:00
|
|
|
LEO_M256 y_lo = _mm256_loadu_si256(y_ptr); \
|
2017-06-04 00:17:05 +00:00
|
|
|
y_lo = _mm256_xor_si256(y_lo, x_lo); \
|
|
|
|
_mm256_storeu_si256(y_ptr, y_lo); \
|
2017-05-30 08:37:27 +00:00
|
|
|
LEO_M256 data_0 = _mm256_and_si256(y_lo, clr_mask); \
|
|
|
|
LEO_M256 data_1 = _mm256_srli_epi64(y_lo, 4); \
|
|
|
|
data_1 = _mm256_and_si256(data_1, clr_mask); \
|
|
|
|
LEO_M256 prod_lo = _mm256_shuffle_epi8(T0_lo, data_0); \
|
|
|
|
prod_lo = _mm256_xor_si256(prod_lo, _mm256_shuffle_epi8(T1_lo, data_1)); \
|
|
|
|
LEO_M256 prod_hi = _mm256_shuffle_epi8(T0_hi, data_0); \
|
|
|
|
prod_hi = _mm256_xor_si256(prod_hi, _mm256_shuffle_epi8(T1_hi, data_1)); \
|
2017-06-04 00:17:05 +00:00
|
|
|
LEO_M256 x_hi = _mm256_loadu_si256(x_ptr + 1); \
|
2017-05-30 08:37:27 +00:00
|
|
|
LEO_M256 y_hi = _mm256_loadu_si256(y_ptr + 1); \
|
2017-06-04 00:17:05 +00:00
|
|
|
y_hi = _mm256_xor_si256(y_hi, x_hi); \
|
|
|
|
_mm256_storeu_si256(y_ptr + 1, y_hi); \
|
2017-05-30 08:37:27 +00:00
|
|
|
data_0 = _mm256_and_si256(y_hi, clr_mask); \
|
|
|
|
data_1 = _mm256_srli_epi64(y_hi, 4); \
|
|
|
|
data_1 = _mm256_and_si256(data_1, clr_mask); \
|
|
|
|
prod_lo = _mm256_xor_si256(prod_lo, _mm256_shuffle_epi8(T2_lo, data_0)); \
|
|
|
|
prod_lo = _mm256_xor_si256(prod_lo, _mm256_shuffle_epi8(T3_lo, data_1)); \
|
|
|
|
prod_hi = _mm256_xor_si256(prod_hi, _mm256_shuffle_epi8(T2_hi, data_0)); \
|
|
|
|
prod_hi = _mm256_xor_si256(prod_hi, _mm256_shuffle_epi8(T3_hi, data_1)); \
|
|
|
|
x_lo = _mm256_xor_si256(prod_lo, x_lo); \
|
|
|
|
_mm256_storeu_si256(x_ptr, x_lo); \
|
|
|
|
x_hi = _mm256_xor_si256(prod_hi, x_hi); \
|
2017-06-04 00:17:05 +00:00
|
|
|
_mm256_storeu_si256(x_ptr + 1, x_hi); }
|
2017-05-30 08:37:27 +00:00
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
LEO_IFFTB_256(x32, y32);
|
2017-05-29 22:01:01 +00:00
|
|
|
y32 += 2, x32 += 2;
|
|
|
|
|
|
|
|
bytes -= 64;
|
|
|
|
} while (bytes > 0);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
#endif // LEO_TRY_AVX2
|
|
|
|
|
2017-05-30 08:37:27 +00:00
|
|
|
LEO_MUL_TABLES_128();
|
2017-05-29 22:01:01 +00:00
|
|
|
|
|
|
|
const LEO_M128 clr_mask = _mm_set1_epi8(0x0f);
|
|
|
|
|
|
|
|
LEO_M128 * LEO_RESTRICT x16 = reinterpret_cast<LEO_M128 *>(x);
|
|
|
|
LEO_M128 * LEO_RESTRICT y16 = reinterpret_cast<LEO_M128 *>(y);
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-05-29 22:01:01 +00:00
|
|
|
do
|
|
|
|
{
|
2017-06-04 00:17:05 +00:00
|
|
|
#define LEO_IFFTB_128(x_ptr, y_ptr) { \
|
|
|
|
LEO_M128 x_lo = _mm_loadu_si128(x_ptr); \
|
2017-05-30 09:23:33 +00:00
|
|
|
LEO_M128 y_lo = _mm_loadu_si128(y_ptr); \
|
2017-06-04 00:17:05 +00:00
|
|
|
y_lo = _mm_xor_si128(y_lo, x_lo); \
|
|
|
|
_mm_storeu_si128(y_ptr, y_lo); \
|
2017-05-30 08:37:27 +00:00
|
|
|
LEO_M128 data_0 = _mm_and_si128(y_lo, clr_mask); \
|
|
|
|
LEO_M128 data_1 = _mm_srli_epi64(y_lo, 4); \
|
|
|
|
data_1 = _mm_and_si128(data_1, clr_mask); \
|
|
|
|
LEO_M128 prod_lo = _mm_shuffle_epi8(T0_lo, data_0); \
|
|
|
|
prod_lo = _mm_xor_si128(prod_lo, _mm_shuffle_epi8(T1_lo, data_1)); \
|
|
|
|
LEO_M128 prod_hi = _mm_shuffle_epi8(T0_hi, data_0); \
|
|
|
|
prod_hi = _mm_xor_si128(prod_hi, _mm_shuffle_epi8(T1_hi, data_1)); \
|
2017-06-04 00:17:05 +00:00
|
|
|
LEO_M128 x_hi = _mm_loadu_si128(x_ptr + 2); \
|
2017-05-30 08:37:27 +00:00
|
|
|
LEO_M128 y_hi = _mm_loadu_si128(y_ptr + 2); \
|
2017-06-04 00:17:05 +00:00
|
|
|
y_hi = _mm_xor_si128(y_hi, x_hi); \
|
|
|
|
_mm_storeu_si128(y_ptr + 2, y_hi); \
|
2017-05-30 08:37:27 +00:00
|
|
|
data_0 = _mm_and_si128(y_hi, clr_mask); \
|
|
|
|
data_1 = _mm_srli_epi64(y_hi, 4); \
|
|
|
|
data_1 = _mm_and_si128(data_1, clr_mask); \
|
|
|
|
prod_lo = _mm_xor_si128(prod_lo, _mm_shuffle_epi8(T2_lo, data_0)); \
|
|
|
|
prod_lo = _mm_xor_si128(prod_lo, _mm_shuffle_epi8(T3_lo, data_1)); \
|
|
|
|
prod_hi = _mm_xor_si128(prod_hi, _mm_shuffle_epi8(T2_hi, data_0)); \
|
|
|
|
prod_hi = _mm_xor_si128(prod_hi, _mm_shuffle_epi8(T3_hi, data_1)); \
|
|
|
|
x_lo = _mm_xor_si128(prod_lo, x_lo); \
|
|
|
|
_mm_storeu_si128(x_ptr, x_lo); \
|
|
|
|
x_hi = _mm_xor_si128(prod_hi, x_hi); \
|
2017-06-04 00:17:05 +00:00
|
|
|
_mm_storeu_si128(x_ptr + 2, x_hi); }
|
2017-05-30 08:37:27 +00:00
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
LEO_IFFTB_128(x16 + 1, y16 + 1);
|
|
|
|
LEO_IFFTB_128(x16, y16);
|
2017-05-29 22:01:01 +00:00
|
|
|
x16 += 4, y16 += 4;
|
|
|
|
|
|
|
|
bytes -= 64;
|
|
|
|
} while (bytes > 0);
|
2017-05-27 02:51:30 +00:00
|
|
|
}
|
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
// 4-way butterfly
|
|
|
|
static void IFFT_DIT4(
|
|
|
|
uint64_t bytes,
|
|
|
|
void** work,
|
|
|
|
unsigned dist,
|
|
|
|
const ffe_t log_m01,
|
|
|
|
const ffe_t log_m23,
|
|
|
|
const ffe_t log_m02)
|
2017-05-27 02:51:30 +00:00
|
|
|
{
|
2017-06-04 00:17:05 +00:00
|
|
|
#ifdef LEO_INTERLEAVE_BUTTERFLY4_OPT
|
|
|
|
|
2017-05-29 22:01:01 +00:00
|
|
|
#if defined(LEO_TRY_AVX2)
|
2017-06-04 00:17:05 +00:00
|
|
|
|
2017-05-29 22:01:01 +00:00
|
|
|
if (CpuHasAVX2)
|
|
|
|
{
|
2017-06-04 02:30:55 +00:00
|
|
|
#define LEO_IFFTB4_256_TABLE(xy) \
|
|
|
|
const LEO_M256 T0_lo_##xy = _mm256_loadu_si256(&Multiply256LUT[log_m##xy].Lo[0]); \
|
|
|
|
const LEO_M256 T1_lo_##xy = _mm256_loadu_si256(&Multiply256LUT[log_m##xy].Lo[1]); \
|
|
|
|
const LEO_M256 T2_lo_##xy = _mm256_loadu_si256(&Multiply256LUT[log_m##xy].Lo[2]); \
|
|
|
|
const LEO_M256 T3_lo_##xy = _mm256_loadu_si256(&Multiply256LUT[log_m##xy].Lo[3]); \
|
|
|
|
const LEO_M256 T0_hi_##xy = _mm256_loadu_si256(&Multiply256LUT[log_m##xy].Hi[0]); \
|
|
|
|
const LEO_M256 T1_hi_##xy = _mm256_loadu_si256(&Multiply256LUT[log_m##xy].Hi[1]); \
|
|
|
|
const LEO_M256 T2_hi_##xy = _mm256_loadu_si256(&Multiply256LUT[log_m##xy].Hi[2]); \
|
|
|
|
const LEO_M256 T3_hi_##xy = _mm256_loadu_si256(&Multiply256LUT[log_m##xy].Hi[3]);
|
|
|
|
|
|
|
|
LEO_IFFTB4_256_TABLE(01);
|
|
|
|
LEO_IFFTB4_256_TABLE(23);
|
|
|
|
LEO_IFFTB4_256_TABLE(02);
|
2017-05-29 22:01:01 +00:00
|
|
|
|
|
|
|
const LEO_M256 clr_mask = _mm256_set1_epi8(0x0f);
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
LEO_M256 * LEO_RESTRICT work0 = reinterpret_cast<LEO_M256 *>(work[0]);
|
|
|
|
LEO_M256 * LEO_RESTRICT work1 = reinterpret_cast<LEO_M256 *>(work[dist]);
|
|
|
|
LEO_M256 * LEO_RESTRICT work2 = reinterpret_cast<LEO_M256 *>(work[dist * 2]);
|
|
|
|
LEO_M256 * LEO_RESTRICT work3 = reinterpret_cast<LEO_M256 *>(work[dist * 3]);
|
2017-05-29 22:01:01 +00:00
|
|
|
|
|
|
|
do
|
|
|
|
{
|
2017-06-04 02:30:55 +00:00
|
|
|
LEO_M256 work_reg_lo_0 = _mm256_loadu_si256(work0);
|
|
|
|
LEO_M256 work_reg_hi_0 = _mm256_loadu_si256(work0 + 1);
|
|
|
|
LEO_M256 work_reg_lo_1 = _mm256_loadu_si256(work1);
|
|
|
|
LEO_M256 work_reg_hi_1 = _mm256_loadu_si256(work1 + 1);
|
2017-06-04 00:17:05 +00:00
|
|
|
|
|
|
|
// First layer:
|
2017-06-04 02:30:55 +00:00
|
|
|
work_reg_lo_1 = _mm256_xor_si256(work_reg_lo_0, work_reg_lo_1);
|
|
|
|
work_reg_hi_1 = _mm256_xor_si256(work_reg_hi_0, work_reg_hi_1);
|
2017-06-04 00:17:05 +00:00
|
|
|
if (log_m01 != kModulus)
|
2017-06-04 02:30:55 +00:00
|
|
|
LEO_MULADD_256(0, 1, 01);
|
2017-06-04 00:17:05 +00:00
|
|
|
|
2017-06-04 02:30:55 +00:00
|
|
|
LEO_M256 work_reg_lo_2 = _mm256_loadu_si256(work2);
|
|
|
|
LEO_M256 work_reg_hi_2 = _mm256_loadu_si256(work2 + 1);
|
|
|
|
LEO_M256 work_reg_lo_3 = _mm256_loadu_si256(work3);
|
|
|
|
LEO_M256 work_reg_hi_3 = _mm256_loadu_si256(work3 + 1);
|
2017-06-04 00:17:05 +00:00
|
|
|
|
2017-06-04 02:30:55 +00:00
|
|
|
work_reg_lo_3 = _mm256_xor_si256(work_reg_lo_2, work_reg_lo_3);
|
|
|
|
work_reg_hi_3 = _mm256_xor_si256(work_reg_hi_2, work_reg_hi_3);
|
2017-06-04 00:17:05 +00:00
|
|
|
if (log_m23 != kModulus)
|
2017-06-04 02:30:55 +00:00
|
|
|
LEO_MULADD_256(2, 3, 23);
|
2017-05-29 22:01:01 +00:00
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
// Second layer:
|
2017-06-04 02:30:55 +00:00
|
|
|
work_reg_lo_2 = _mm256_xor_si256(work_reg_lo_0, work_reg_lo_2);
|
|
|
|
work_reg_hi_2 = _mm256_xor_si256(work_reg_hi_0, work_reg_hi_2);
|
|
|
|
work_reg_lo_3 = _mm256_xor_si256(work_reg_lo_1, work_reg_lo_3);
|
|
|
|
work_reg_hi_3 = _mm256_xor_si256(work_reg_hi_1, work_reg_hi_3);
|
2017-06-04 00:17:05 +00:00
|
|
|
if (log_m02 != kModulus)
|
|
|
|
{
|
2017-06-04 02:30:55 +00:00
|
|
|
LEO_MULADD_256(0, 2, 02);
|
|
|
|
LEO_MULADD_256(1, 3, 02);
|
2017-06-04 00:17:05 +00:00
|
|
|
}
|
2017-05-29 22:01:01 +00:00
|
|
|
|
2017-06-04 02:30:55 +00:00
|
|
|
_mm256_storeu_si256(work0, work_reg_lo_0);
|
|
|
|
_mm256_storeu_si256(work0 + 1, work_reg_hi_0);
|
|
|
|
_mm256_storeu_si256(work1, work_reg_lo_1);
|
|
|
|
_mm256_storeu_si256(work1 + 1, work_reg_hi_1);
|
|
|
|
_mm256_storeu_si256(work2, work_reg_lo_2);
|
|
|
|
_mm256_storeu_si256(work2 + 1, work_reg_hi_2);
|
|
|
|
_mm256_storeu_si256(work3, work_reg_lo_3);
|
|
|
|
_mm256_storeu_si256(work3 + 1, work_reg_hi_3);
|
2017-05-29 22:01:01 +00:00
|
|
|
|
2017-06-04 02:30:55 +00:00
|
|
|
work0 += 2, work1 += 2, work2 += 2, work3 += 2;
|
2017-05-29 22:01:01 +00:00
|
|
|
|
2017-06-04 02:30:55 +00:00
|
|
|
bytes -= 64;
|
2017-05-29 22:01:01 +00:00
|
|
|
} while (bytes > 0);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
2017-06-04 00:17:05 +00:00
|
|
|
|
2017-05-29 22:01:01 +00:00
|
|
|
#endif // LEO_TRY_AVX2
|
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
if (CpuHasSSSE3)
|
|
|
|
{
|
2017-06-04 02:30:55 +00:00
|
|
|
#define LEO_IFFTB4_128_TABLE(xy) \
|
|
|
|
const LEO_M128 T0_lo_##xy = _mm_loadu_si128(&Multiply128LUT[log_m##xy].Lo[0]); \
|
|
|
|
const LEO_M128 T1_lo_##xy = _mm_loadu_si128(&Multiply128LUT[log_m##xy].Lo[1]); \
|
|
|
|
const LEO_M128 T2_lo_##xy = _mm_loadu_si128(&Multiply128LUT[log_m##xy].Lo[2]); \
|
|
|
|
const LEO_M128 T3_lo_##xy = _mm_loadu_si128(&Multiply128LUT[log_m##xy].Lo[3]); \
|
|
|
|
const LEO_M128 T0_hi_##xy = _mm_loadu_si128(&Multiply128LUT[log_m##xy].Hi[0]); \
|
|
|
|
const LEO_M128 T1_hi_##xy = _mm_loadu_si128(&Multiply128LUT[log_m##xy].Hi[1]); \
|
|
|
|
const LEO_M128 T2_hi_##xy = _mm_loadu_si128(&Multiply128LUT[log_m##xy].Hi[2]); \
|
|
|
|
const LEO_M128 T3_hi_##xy = _mm_loadu_si128(&Multiply128LUT[log_m##xy].Hi[3]);
|
|
|
|
|
|
|
|
LEO_IFFTB4_128_TABLE(01);
|
|
|
|
LEO_IFFTB4_128_TABLE(23);
|
|
|
|
LEO_IFFTB4_128_TABLE(02);
|
2017-05-29 22:01:01 +00:00
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
const LEO_M128 clr_mask = _mm_set1_epi8(0x0f);
|
2017-05-29 22:01:01 +00:00
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
LEO_M128 * LEO_RESTRICT work0 = reinterpret_cast<LEO_M128 *>(work[0]);
|
|
|
|
LEO_M128 * LEO_RESTRICT work1 = reinterpret_cast<LEO_M128 *>(work[dist]);
|
|
|
|
LEO_M128 * LEO_RESTRICT work2 = reinterpret_cast<LEO_M128 *>(work[dist * 2]);
|
|
|
|
LEO_M128 * LEO_RESTRICT work3 = reinterpret_cast<LEO_M128 *>(work[dist * 3]);
|
2017-05-29 22:01:01 +00:00
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
do
|
|
|
|
{
|
2017-06-04 02:55:46 +00:00
|
|
|
for (unsigned i = 0; i < 2; ++i)
|
2017-06-04 00:17:05 +00:00
|
|
|
{
|
2017-06-04 02:55:46 +00:00
|
|
|
LEO_M128 work_reg_lo_0 = _mm_loadu_si128(work0);
|
|
|
|
LEO_M128 work_reg_hi_0 = _mm_loadu_si128(work0 + 2);
|
|
|
|
LEO_M128 work_reg_lo_1 = _mm_loadu_si128(work1);
|
|
|
|
LEO_M128 work_reg_hi_1 = _mm_loadu_si128(work1 + 2);
|
|
|
|
|
|
|
|
// First layer:
|
|
|
|
work_reg_lo_1 = _mm_xor_si128(work_reg_lo_0, work_reg_lo_1);
|
|
|
|
work_reg_hi_1 = _mm_xor_si128(work_reg_hi_0, work_reg_hi_1);
|
|
|
|
if (log_m01 != kModulus)
|
|
|
|
LEO_MULADD_128(0, 1, 01);
|
|
|
|
|
|
|
|
LEO_M128 work_reg_lo_2 = _mm_loadu_si128(work2);
|
|
|
|
LEO_M128 work_reg_hi_2 = _mm_loadu_si128(work2 + 2);
|
|
|
|
LEO_M128 work_reg_lo_3 = _mm_loadu_si128(work3);
|
|
|
|
LEO_M128 work_reg_hi_3 = _mm_loadu_si128(work3 + 2);
|
|
|
|
|
|
|
|
work_reg_lo_3 = _mm_xor_si128(work_reg_lo_2, work_reg_lo_3);
|
|
|
|
work_reg_hi_3 = _mm_xor_si128(work_reg_hi_2, work_reg_hi_3);
|
|
|
|
if (log_m23 != kModulus)
|
|
|
|
LEO_MULADD_128(2, 3, 23);
|
|
|
|
|
|
|
|
// Second layer:
|
|
|
|
work_reg_lo_2 = _mm_xor_si128(work_reg_lo_0, work_reg_lo_2);
|
|
|
|
work_reg_hi_2 = _mm_xor_si128(work_reg_hi_0, work_reg_hi_2);
|
|
|
|
work_reg_lo_3 = _mm_xor_si128(work_reg_lo_1, work_reg_lo_3);
|
|
|
|
work_reg_hi_3 = _mm_xor_si128(work_reg_hi_1, work_reg_hi_3);
|
|
|
|
if (log_m02 != kModulus)
|
|
|
|
{
|
|
|
|
LEO_MULADD_128(0, 2, 02);
|
|
|
|
LEO_MULADD_128(1, 3, 02);
|
|
|
|
}
|
|
|
|
|
|
|
|
_mm_storeu_si128(work0, work_reg_lo_0);
|
|
|
|
_mm_storeu_si128(work0 + 2, work_reg_hi_0);
|
|
|
|
_mm_storeu_si128(work1, work_reg_lo_1);
|
|
|
|
_mm_storeu_si128(work1 + 2, work_reg_hi_1);
|
|
|
|
_mm_storeu_si128(work2, work_reg_lo_2);
|
|
|
|
_mm_storeu_si128(work2 + 2, work_reg_hi_2);
|
|
|
|
_mm_storeu_si128(work3, work_reg_lo_3);
|
|
|
|
_mm_storeu_si128(work3 + 2, work_reg_hi_3);
|
|
|
|
|
|
|
|
work0++, work1++, work2++, work3++;
|
2017-06-04 00:17:05 +00:00
|
|
|
}
|
2017-05-29 22:01:01 +00:00
|
|
|
|
2017-06-04 02:30:55 +00:00
|
|
|
work0 += 2, work1 += 2, work2 += 2, work3 += 2;
|
2017-06-04 02:55:46 +00:00
|
|
|
bytes -= 64;
|
2017-06-04 00:17:05 +00:00
|
|
|
} while (bytes > 0);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif // LEO_INTERLEAVE_BUTTERFLY4_OPT
|
|
|
|
|
|
|
|
// First layer:
|
|
|
|
if (log_m01 == kModulus)
|
|
|
|
xor_mem(work[dist], work[0], bytes);
|
|
|
|
else
|
|
|
|
IFFT_DIT2(work[0], work[dist], log_m01, bytes);
|
|
|
|
|
|
|
|
if (log_m23 == kModulus)
|
|
|
|
xor_mem(work[dist * 3], work[dist * 2], bytes);
|
|
|
|
else
|
|
|
|
IFFT_DIT2(work[dist * 2], work[dist * 3], log_m23, bytes);
|
|
|
|
|
|
|
|
// Second layer:
|
|
|
|
if (log_m02 == kModulus)
|
|
|
|
{
|
|
|
|
xor_mem(work[dist * 2], work[0], bytes);
|
|
|
|
xor_mem(work[dist * 3], work[dist], bytes);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
IFFT_DIT2(work[0], work[dist * 2], log_m02, bytes);
|
|
|
|
IFFT_DIT2(work[dist], work[dist * 3], log_m02, bytes);
|
|
|
|
}
|
2017-05-18 03:06:13 +00:00
|
|
|
}
|
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
static void IFFT_DIT(
|
|
|
|
const uint64_t bytes,
|
|
|
|
const void* const* data,
|
|
|
|
const unsigned m_truncated,
|
|
|
|
void** work,
|
|
|
|
void** xor_result,
|
|
|
|
const unsigned m,
|
|
|
|
const ffe_t* skewLUT)
|
|
|
|
{
|
|
|
|
// FIXME: Roll into first layer
|
|
|
|
if (data)
|
|
|
|
{
|
|
|
|
for (unsigned i = 0; i < m_truncated; ++i)
|
|
|
|
memcpy(work[i], data[i], bytes);
|
|
|
|
for (unsigned i = m_truncated; i < m; ++i)
|
|
|
|
memset(work[i], 0, bytes);
|
|
|
|
}
|
2017-05-29 22:01:01 +00:00
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
// I tried splitting up the first few layers into L3-cache sized blocks but
|
|
|
|
// found that it only provides about 5% performance boost, which is not
|
|
|
|
// worth the extra complexity.
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
// Decimation in time: Unroll 2 layers at a time
|
|
|
|
unsigned dist = 1, dist4 = 4;
|
|
|
|
for (; dist4 <= m; dist = dist4, dist4 <<= 2)
|
|
|
|
{
|
|
|
|
// For each set of dist*4 elements:
|
|
|
|
for (unsigned r = 0; r < m_truncated; r += dist4)
|
|
|
|
{
|
|
|
|
const ffe_t log_m01 = skewLUT[r + dist];
|
|
|
|
const ffe_t log_m23 = skewLUT[r + dist * 3];
|
|
|
|
const ffe_t log_m02 = skewLUT[r + dist * 2];
|
|
|
|
|
|
|
|
// For each set of dist elements:
|
|
|
|
const unsigned i_end = r + dist;
|
|
|
|
for (unsigned i = r; i < i_end; ++i)
|
|
|
|
{
|
|
|
|
IFFT_DIT4(
|
|
|
|
bytes,
|
|
|
|
work + i,
|
|
|
|
dist,
|
|
|
|
log_m01,
|
|
|
|
log_m23,
|
|
|
|
log_m02);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// I tried alternating sweeps left->right and right->left to reduce cache misses.
|
|
|
|
// It provides about 1% performance boost when done for both FFT and IFFT, so it
|
|
|
|
// does not seem to be worth the extra complexity.
|
|
|
|
|
|
|
|
// Clear data after the first layer
|
|
|
|
data = nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If there is one layer left:
|
|
|
|
if (dist < m)
|
|
|
|
{
|
2017-06-04 10:35:29 +00:00
|
|
|
// Assuming that dist = m / 2
|
|
|
|
LEO_DEBUG_ASSERT(dist * 2 == m);
|
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
const ffe_t log_m = skewLUT[dist];
|
|
|
|
|
|
|
|
if (log_m == kModulus)
|
|
|
|
VectorXOR(bytes, dist, work + dist, work);
|
|
|
|
else
|
|
|
|
{
|
|
|
|
for (unsigned i = 0; i < dist; ++i)
|
|
|
|
{
|
|
|
|
IFFT_DIT2(
|
|
|
|
work[i],
|
|
|
|
work[i + dist],
|
|
|
|
log_m,
|
|
|
|
bytes);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// FIXME: Roll into last layer
|
|
|
|
if (xor_result)
|
|
|
|
for (unsigned i = 0; i < m; ++i)
|
|
|
|
xor_mem(xor_result[i], work[i], bytes);
|
|
|
|
}
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
/*
|
|
|
|
Decimation in time FFT:
|
|
|
|
|
|
|
|
The decimation in time FFT algorithm allows us to unroll 2 layers at a time,
|
|
|
|
performing calculations on local registers and faster cache memory.
|
|
|
|
|
|
|
|
Each ^___^ below indicates a butterfly between the associated indices.
|
|
|
|
|
|
|
|
The fft_butterfly(x, y) operation:
|
|
|
|
|
|
|
|
if (log_m != kModulus)
|
|
|
|
x[] ^= exp(log(y[]) + log_m)
|
2017-06-04 02:30:55 +00:00
|
|
|
y[] ^= x[]
|
2017-06-04 00:17:05 +00:00
|
|
|
|
|
|
|
Layer 0:
|
|
|
|
0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7
|
|
|
|
^_______________^
|
|
|
|
^_______________^
|
|
|
|
^_______________^
|
|
|
|
^_______________^
|
|
|
|
^_______________^
|
|
|
|
^_______________^
|
|
|
|
^_______________^
|
|
|
|
^_______________^
|
|
|
|
|
|
|
|
Layer 1:
|
|
|
|
0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7
|
|
|
|
^_______^ ^_______^
|
|
|
|
^_______^ ^_______^
|
|
|
|
^_______^ ^_______^
|
|
|
|
^_______^ ^_______^
|
|
|
|
|
|
|
|
Layer 2:
|
|
|
|
0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7
|
|
|
|
^___^ ^___^ ^___^ ^___^
|
|
|
|
^___^ ^___^ ^___^ ^___^
|
|
|
|
|
|
|
|
Layer 3:
|
|
|
|
0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7
|
|
|
|
^_^ ^_^ ^_^ ^_^ ^_^ ^_^ ^_^ ^_^
|
|
|
|
|
|
|
|
DIT layer 0-1 operations, grouped 4 at a time:
|
|
|
|
{0-0', 4-4', 0-4, 0'-4'},
|
|
|
|
{1-1', 5-5', 1-5, 1'-5'},
|
|
|
|
|
|
|
|
DIT layer 1-2 operations, grouped 4 at a time:
|
|
|
|
{0-4, 2-6, 0-2, 4-6},
|
|
|
|
{1-5, 3-7, 1-3, 5-7},
|
|
|
|
|
|
|
|
DIT layer 2-3 operations, grouped 4 at a time:
|
|
|
|
{0-2, 1-3, 0-1, 2-3},
|
|
|
|
{4-6, 5-7, 4-5, 6-7},
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void FFT_DIT2(
|
2017-05-27 02:51:30 +00:00
|
|
|
void * LEO_RESTRICT x, void * LEO_RESTRICT y,
|
2017-05-29 22:01:01 +00:00
|
|
|
ffe_t log_m, uint64_t bytes)
|
2017-05-18 03:06:13 +00:00
|
|
|
{
|
2017-05-29 22:01:01 +00:00
|
|
|
#if defined(LEO_TRY_AVX2)
|
|
|
|
if (CpuHasAVX2)
|
|
|
|
{
|
2017-05-30 08:37:27 +00:00
|
|
|
LEO_MUL_TABLES_256();
|
2017-05-29 22:01:01 +00:00
|
|
|
|
|
|
|
const LEO_M256 clr_mask = _mm256_set1_epi8(0x0f);
|
|
|
|
|
|
|
|
LEO_M256 * LEO_RESTRICT x32 = reinterpret_cast<LEO_M256 *>(x);
|
|
|
|
LEO_M256 * LEO_RESTRICT y32 = reinterpret_cast<LEO_M256 *>(y);
|
|
|
|
|
|
|
|
do
|
|
|
|
{
|
2017-06-04 00:17:05 +00:00
|
|
|
#define LEO_FFTB_256(x_ptr, y_ptr) { \
|
2017-05-30 09:23:33 +00:00
|
|
|
LEO_M256 y_lo = _mm256_loadu_si256(y_ptr); \
|
2017-05-30 08:37:27 +00:00
|
|
|
LEO_M256 data_0 = _mm256_and_si256(y_lo, clr_mask); \
|
|
|
|
LEO_M256 data_1 = _mm256_srli_epi64(y_lo, 4); \
|
|
|
|
data_1 = _mm256_and_si256(data_1, clr_mask); \
|
|
|
|
LEO_M256 prod_lo = _mm256_shuffle_epi8(T0_lo, data_0); \
|
|
|
|
prod_lo = _mm256_xor_si256(prod_lo, _mm256_shuffle_epi8(T1_lo, data_1)); \
|
|
|
|
LEO_M256 prod_hi = _mm256_shuffle_epi8(T0_hi, data_0); \
|
|
|
|
prod_hi = _mm256_xor_si256(prod_hi, _mm256_shuffle_epi8(T1_hi, data_1)); \
|
|
|
|
LEO_M256 y_hi = _mm256_loadu_si256(y_ptr + 1); \
|
|
|
|
data_0 = _mm256_and_si256(y_hi, clr_mask); \
|
|
|
|
data_1 = _mm256_srli_epi64(y_hi, 4); \
|
|
|
|
data_1 = _mm256_and_si256(data_1, clr_mask); \
|
|
|
|
prod_lo = _mm256_xor_si256(prod_lo, _mm256_shuffle_epi8(T2_lo, data_0)); \
|
|
|
|
prod_lo = _mm256_xor_si256(prod_lo, _mm256_shuffle_epi8(T3_lo, data_1)); \
|
|
|
|
prod_hi = _mm256_xor_si256(prod_hi, _mm256_shuffle_epi8(T2_hi, data_0)); \
|
|
|
|
prod_hi = _mm256_xor_si256(prod_hi, _mm256_shuffle_epi8(T3_hi, data_1)); \
|
2017-06-04 00:17:05 +00:00
|
|
|
LEO_M256 x_lo = _mm256_loadu_si256(x_ptr); \
|
|
|
|
LEO_M256 x_hi = _mm256_loadu_si256(x_ptr + 1); \
|
2017-05-30 08:37:27 +00:00
|
|
|
x_lo = _mm256_xor_si256(prod_lo, x_lo); \
|
|
|
|
_mm256_storeu_si256(x_ptr, x_lo); \
|
|
|
|
x_hi = _mm256_xor_si256(prod_hi, x_hi); \
|
2017-06-04 00:17:05 +00:00
|
|
|
_mm256_storeu_si256(x_ptr + 1, x_hi); \
|
|
|
|
y_lo = _mm256_xor_si256(y_lo, x_lo); \
|
|
|
|
_mm256_storeu_si256(y_ptr, y_lo); \
|
|
|
|
y_hi = _mm256_xor_si256(y_hi, x_hi); \
|
|
|
|
_mm256_storeu_si256(y_ptr + 1, y_hi); }
|
2017-05-30 08:37:27 +00:00
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
LEO_FFTB_256(x32, y32);
|
2017-05-29 22:01:01 +00:00
|
|
|
y32 += 2, x32 += 2;
|
|
|
|
|
|
|
|
bytes -= 64;
|
|
|
|
} while (bytes > 0);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
#endif // LEO_TRY_AVX2
|
|
|
|
|
2017-05-30 08:37:27 +00:00
|
|
|
LEO_MUL_TABLES_128();
|
2017-05-29 22:01:01 +00:00
|
|
|
|
|
|
|
const LEO_M128 clr_mask = _mm_set1_epi8(0x0f);
|
|
|
|
|
|
|
|
LEO_M128 * LEO_RESTRICT x16 = reinterpret_cast<LEO_M128 *>(x);
|
|
|
|
LEO_M128 * LEO_RESTRICT y16 = reinterpret_cast<LEO_M128 *>(y);
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-05-29 22:01:01 +00:00
|
|
|
do
|
|
|
|
{
|
2017-06-04 00:17:05 +00:00
|
|
|
#define LEO_FFTB_128(x_ptr, y_ptr) { \
|
2017-05-30 09:23:33 +00:00
|
|
|
LEO_M128 y_lo = _mm_loadu_si128(y_ptr); \
|
2017-05-30 08:37:27 +00:00
|
|
|
LEO_M128 data_0 = _mm_and_si128(y_lo, clr_mask); \
|
|
|
|
LEO_M128 data_1 = _mm_srli_epi64(y_lo, 4); \
|
|
|
|
data_1 = _mm_and_si128(data_1, clr_mask); \
|
|
|
|
LEO_M128 prod_lo = _mm_shuffle_epi8(T0_lo, data_0); \
|
|
|
|
prod_lo = _mm_xor_si128(prod_lo, _mm_shuffle_epi8(T1_lo, data_1)); \
|
|
|
|
LEO_M128 prod_hi = _mm_shuffle_epi8(T0_hi, data_0); \
|
|
|
|
prod_hi = _mm_xor_si128(prod_hi, _mm_shuffle_epi8(T1_hi, data_1)); \
|
|
|
|
LEO_M128 y_hi = _mm_loadu_si128(y_ptr + 2); \
|
|
|
|
data_0 = _mm_and_si128(y_hi, clr_mask); \
|
|
|
|
data_1 = _mm_srli_epi64(y_hi, 4); \
|
|
|
|
data_1 = _mm_and_si128(data_1, clr_mask); \
|
|
|
|
prod_lo = _mm_xor_si128(prod_lo, _mm_shuffle_epi8(T2_lo, data_0)); \
|
|
|
|
prod_lo = _mm_xor_si128(prod_lo, _mm_shuffle_epi8(T3_lo, data_1)); \
|
|
|
|
prod_hi = _mm_xor_si128(prod_hi, _mm_shuffle_epi8(T2_hi, data_0)); \
|
|
|
|
prod_hi = _mm_xor_si128(prod_hi, _mm_shuffle_epi8(T3_hi, data_1)); \
|
2017-06-04 00:17:05 +00:00
|
|
|
LEO_M128 x_lo = _mm_loadu_si128(x_ptr); \
|
|
|
|
LEO_M128 x_hi = _mm_loadu_si128(x_ptr + 2); \
|
2017-05-30 08:37:27 +00:00
|
|
|
x_lo = _mm_xor_si128(prod_lo, x_lo); \
|
|
|
|
_mm_storeu_si128(x_ptr, x_lo); \
|
|
|
|
x_hi = _mm_xor_si128(prod_hi, x_hi); \
|
2017-06-04 00:17:05 +00:00
|
|
|
_mm_storeu_si128(x_ptr + 2, x_hi); \
|
|
|
|
y_lo = _mm_xor_si128(y_lo, x_lo); \
|
|
|
|
_mm_storeu_si128(y_ptr, y_lo); \
|
|
|
|
y_hi = _mm_xor_si128(y_hi, x_hi); \
|
|
|
|
_mm_storeu_si128(y_ptr + 2, y_hi); }
|
2017-05-30 08:37:27 +00:00
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
LEO_FFTB_128(x16 + 1, y16 + 1);
|
|
|
|
LEO_FFTB_128(x16, y16);
|
2017-05-29 22:01:01 +00:00
|
|
|
x16 += 4, y16 += 4;
|
|
|
|
|
|
|
|
bytes -= 64;
|
|
|
|
} while (bytes > 0);
|
2017-05-18 03:06:13 +00:00
|
|
|
}
|
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
static void FFT_DIT4(
|
|
|
|
uint64_t bytes,
|
|
|
|
void** work,
|
|
|
|
unsigned dist,
|
|
|
|
const ffe_t log_m01,
|
|
|
|
const ffe_t log_m23,
|
|
|
|
const ffe_t log_m02)
|
2017-05-27 02:51:30 +00:00
|
|
|
{
|
2017-06-04 00:17:05 +00:00
|
|
|
#ifdef LEO_INTERLEAVE_BUTTERFLY4_OPT
|
|
|
|
|
2017-06-04 02:30:55 +00:00
|
|
|
#if defined(LEO_TRY_AVX2)
|
|
|
|
|
2017-05-29 22:01:01 +00:00
|
|
|
if (CpuHasAVX2)
|
|
|
|
{
|
2017-06-04 02:30:55 +00:00
|
|
|
LEO_IFFTB4_256_TABLE(01);
|
|
|
|
LEO_IFFTB4_256_TABLE(23);
|
|
|
|
LEO_IFFTB4_256_TABLE(02);
|
2017-05-29 22:01:01 +00:00
|
|
|
|
|
|
|
const LEO_M256 clr_mask = _mm256_set1_epi8(0x0f);
|
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
LEO_M256 * LEO_RESTRICT work0 = reinterpret_cast<LEO_M256 *>(work[0]);
|
|
|
|
LEO_M256 * LEO_RESTRICT work1 = reinterpret_cast<LEO_M256 *>(work[dist]);
|
|
|
|
LEO_M256 * LEO_RESTRICT work2 = reinterpret_cast<LEO_M256 *>(work[dist * 2]);
|
|
|
|
LEO_M256 * LEO_RESTRICT work3 = reinterpret_cast<LEO_M256 *>(work[dist * 3]);
|
2017-05-29 22:01:01 +00:00
|
|
|
|
|
|
|
do
|
|
|
|
{
|
2017-06-04 02:30:55 +00:00
|
|
|
LEO_M256 work_reg_lo_0 = _mm256_loadu_si256(work0);
|
|
|
|
LEO_M256 work_reg_hi_0 = _mm256_loadu_si256(work0 + 1);
|
|
|
|
LEO_M256 work_reg_lo_1 = _mm256_loadu_si256(work1);
|
|
|
|
LEO_M256 work_reg_hi_1 = _mm256_loadu_si256(work1 + 1);
|
|
|
|
LEO_M256 work_reg_lo_2 = _mm256_loadu_si256(work2);
|
|
|
|
LEO_M256 work_reg_hi_2 = _mm256_loadu_si256(work2 + 1);
|
|
|
|
LEO_M256 work_reg_lo_3 = _mm256_loadu_si256(work3);
|
|
|
|
LEO_M256 work_reg_hi_3 = _mm256_loadu_si256(work3 + 1);
|
2017-06-04 00:17:05 +00:00
|
|
|
|
|
|
|
// First layer:
|
|
|
|
if (log_m02 != kModulus)
|
|
|
|
{
|
2017-06-04 02:30:55 +00:00
|
|
|
LEO_MULADD_256(0, 2, 02);
|
|
|
|
LEO_MULADD_256(1, 3, 02);
|
2017-06-04 00:17:05 +00:00
|
|
|
}
|
2017-06-04 02:30:55 +00:00
|
|
|
work_reg_lo_2 = _mm256_xor_si256(work_reg_lo_0, work_reg_lo_2);
|
|
|
|
work_reg_hi_2 = _mm256_xor_si256(work_reg_hi_0, work_reg_hi_2);
|
|
|
|
work_reg_lo_3 = _mm256_xor_si256(work_reg_lo_1, work_reg_lo_3);
|
|
|
|
work_reg_hi_3 = _mm256_xor_si256(work_reg_hi_1, work_reg_hi_3);
|
2017-05-29 22:01:01 +00:00
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
// Second layer:
|
|
|
|
if (log_m01 != kModulus)
|
2017-06-04 02:30:55 +00:00
|
|
|
LEO_MULADD_256(0, 1, 01);
|
|
|
|
work_reg_lo_1 = _mm256_xor_si256(work_reg_lo_0, work_reg_lo_1);
|
|
|
|
work_reg_hi_1 = _mm256_xor_si256(work_reg_hi_0, work_reg_hi_1);
|
2017-05-29 22:01:01 +00:00
|
|
|
|
2017-06-04 02:30:55 +00:00
|
|
|
_mm256_storeu_si256(work0, work_reg_lo_0);
|
|
|
|
_mm256_storeu_si256(work0 + 1, work_reg_hi_0);
|
|
|
|
_mm256_storeu_si256(work1, work_reg_lo_1);
|
|
|
|
_mm256_storeu_si256(work1 + 1, work_reg_hi_1);
|
2017-05-29 22:01:01 +00:00
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
if (log_m23 != kModulus)
|
2017-06-04 02:30:55 +00:00
|
|
|
LEO_MULADD_256(2, 3, 23);
|
|
|
|
work_reg_lo_3 = _mm256_xor_si256(work_reg_lo_2, work_reg_lo_3);
|
|
|
|
work_reg_hi_3 = _mm256_xor_si256(work_reg_hi_2, work_reg_hi_3);
|
2017-05-29 22:01:01 +00:00
|
|
|
|
2017-06-04 02:30:55 +00:00
|
|
|
_mm256_storeu_si256(work2, work_reg_lo_2);
|
|
|
|
_mm256_storeu_si256(work2 + 1, work_reg_hi_2);
|
|
|
|
_mm256_storeu_si256(work3, work_reg_lo_3);
|
|
|
|
_mm256_storeu_si256(work3 + 1, work_reg_hi_3);
|
2017-06-04 00:17:05 +00:00
|
|
|
|
2017-06-04 02:30:55 +00:00
|
|
|
work0 += 2, work1 += 2, work2 += 2, work3 += 2;
|
|
|
|
|
|
|
|
bytes -= 64;
|
2017-05-29 22:01:01 +00:00
|
|
|
} while (bytes > 0);
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-05-29 22:01:01 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-06-04 02:30:55 +00:00
|
|
|
#endif // LEO_TRY_AVX2
|
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
if (CpuHasSSSE3)
|
2017-05-29 22:01:01 +00:00
|
|
|
{
|
2017-06-04 02:30:55 +00:00
|
|
|
LEO_IFFTB4_128_TABLE(01);
|
|
|
|
LEO_IFFTB4_128_TABLE(23);
|
|
|
|
LEO_IFFTB4_128_TABLE(02);
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
const LEO_M128 clr_mask = _mm_set1_epi8(0x0f);
|
2017-05-29 22:01:01 +00:00
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
LEO_M128 * LEO_RESTRICT work0 = reinterpret_cast<LEO_M128 *>(work[0]);
|
|
|
|
LEO_M128 * LEO_RESTRICT work1 = reinterpret_cast<LEO_M128 *>(work[dist]);
|
|
|
|
LEO_M128 * LEO_RESTRICT work2 = reinterpret_cast<LEO_M128 *>(work[dist * 2]);
|
|
|
|
LEO_M128 * LEO_RESTRICT work3 = reinterpret_cast<LEO_M128 *>(work[dist * 3]);
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
do
|
2017-05-18 03:06:13 +00:00
|
|
|
{
|
2017-06-04 02:55:46 +00:00
|
|
|
for (unsigned i = 0; i < 2; ++i)
|
2017-06-04 00:17:05 +00:00
|
|
|
{
|
2017-06-04 02:55:46 +00:00
|
|
|
LEO_M128 work_reg_lo_0 = _mm_loadu_si128(work0);
|
|
|
|
LEO_M128 work_reg_hi_0 = _mm_loadu_si128(work0 + 2);
|
|
|
|
LEO_M128 work_reg_lo_1 = _mm_loadu_si128(work1);
|
|
|
|
LEO_M128 work_reg_hi_1 = _mm_loadu_si128(work1 + 2);
|
|
|
|
LEO_M128 work_reg_lo_2 = _mm_loadu_si128(work2);
|
|
|
|
LEO_M128 work_reg_hi_2 = _mm_loadu_si128(work2 + 2);
|
|
|
|
LEO_M128 work_reg_lo_3 = _mm_loadu_si128(work3);
|
|
|
|
LEO_M128 work_reg_hi_3 = _mm_loadu_si128(work3 + 2);
|
|
|
|
|
|
|
|
// First layer:
|
|
|
|
if (log_m02 != kModulus)
|
|
|
|
{
|
|
|
|
LEO_MULADD_128(0, 2, 02);
|
|
|
|
LEO_MULADD_128(1, 3, 02);
|
|
|
|
}
|
|
|
|
work_reg_lo_2 = _mm_xor_si128(work_reg_lo_0, work_reg_lo_2);
|
|
|
|
work_reg_hi_2 = _mm_xor_si128(work_reg_hi_0, work_reg_hi_2);
|
|
|
|
work_reg_lo_3 = _mm_xor_si128(work_reg_lo_1, work_reg_lo_3);
|
|
|
|
work_reg_hi_3 = _mm_xor_si128(work_reg_hi_1, work_reg_hi_3);
|
|
|
|
|
|
|
|
// Second layer:
|
|
|
|
if (log_m01 != kModulus)
|
|
|
|
LEO_MULADD_128(0, 1, 01);
|
|
|
|
work_reg_lo_1 = _mm_xor_si128(work_reg_lo_0, work_reg_lo_1);
|
|
|
|
work_reg_hi_1 = _mm_xor_si128(work_reg_hi_0, work_reg_hi_1);
|
|
|
|
|
|
|
|
_mm_storeu_si128(work0, work_reg_lo_0);
|
|
|
|
_mm_storeu_si128(work0 + 2, work_reg_hi_0);
|
|
|
|
_mm_storeu_si128(work1, work_reg_lo_1);
|
|
|
|
_mm_storeu_si128(work1 + 2, work_reg_hi_1);
|
|
|
|
|
|
|
|
if (log_m23 != kModulus)
|
|
|
|
LEO_MULADD_128(2, 3, 23);
|
|
|
|
work_reg_lo_3 = _mm_xor_si128(work_reg_lo_2, work_reg_lo_3);
|
|
|
|
work_reg_hi_3 = _mm_xor_si128(work_reg_hi_2, work_reg_hi_3);
|
|
|
|
|
|
|
|
_mm_storeu_si128(work2, work_reg_lo_2);
|
|
|
|
_mm_storeu_si128(work2 + 2, work_reg_hi_2);
|
|
|
|
_mm_storeu_si128(work3, work_reg_lo_3);
|
|
|
|
_mm_storeu_si128(work3 + 2, work_reg_hi_3);
|
|
|
|
|
|
|
|
work0++, work1++, work2++, work3++;
|
2017-06-04 00:17:05 +00:00
|
|
|
}
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-06-04 02:30:55 +00:00
|
|
|
work0 += 2, work1 += 2, work2 += 2, work3 += 2;
|
2017-06-04 02:55:46 +00:00
|
|
|
bytes -= 64;
|
2017-06-04 00:17:05 +00:00
|
|
|
} while (bytes > 0);
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
return;
|
|
|
|
}
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
#endif // LEO_INTERLEAVE_BUTTERFLY4_OPT
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
// First layer:
|
|
|
|
if (log_m02 == kModulus)
|
2017-05-29 22:01:01 +00:00
|
|
|
{
|
2017-06-04 00:17:05 +00:00
|
|
|
xor_mem(work[dist * 2], work[0], bytes);
|
|
|
|
xor_mem(work[dist * 3], work[dist], bytes);
|
2017-05-29 22:01:01 +00:00
|
|
|
}
|
2017-06-04 00:17:05 +00:00
|
|
|
else
|
2017-05-29 22:01:01 +00:00
|
|
|
{
|
2017-06-04 00:17:05 +00:00
|
|
|
FFT_DIT2(work[0], work[dist * 2], log_m02, bytes);
|
|
|
|
FFT_DIT2(work[dist], work[dist * 3], log_m02, bytes);
|
2017-05-29 22:01:01 +00:00
|
|
|
}
|
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
// Second layer:
|
|
|
|
if (log_m01 == kModulus)
|
|
|
|
xor_mem(work[dist], work[0], bytes);
|
|
|
|
else
|
|
|
|
FFT_DIT2(work[0], work[dist], log_m01, bytes);
|
|
|
|
|
|
|
|
if (log_m23 == kModulus)
|
|
|
|
xor_mem(work[dist * 3], work[dist * 2], bytes);
|
|
|
|
else
|
|
|
|
FFT_DIT2(work[dist * 2], work[dist * 3], log_m23, bytes);
|
2017-05-29 22:01:01 +00:00
|
|
|
}
|
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
|
|
|
|
static void FFT_DIT(
|
2017-05-29 22:01:01 +00:00
|
|
|
const uint64_t bytes,
|
2017-06-04 00:17:05 +00:00
|
|
|
void** work,
|
|
|
|
const unsigned m_truncated,
|
|
|
|
const unsigned m,
|
|
|
|
const ffe_t* skewLUT)
|
2017-05-29 22:01:01 +00:00
|
|
|
{
|
2017-06-04 00:17:05 +00:00
|
|
|
// Decimation in time: Unroll 2 layers at a time
|
|
|
|
unsigned dist4 = m, dist = m >> 2;
|
|
|
|
for (; dist != 0; dist4 = dist, dist >>= 2)
|
2017-05-29 22:01:01 +00:00
|
|
|
{
|
2017-06-04 00:17:05 +00:00
|
|
|
// For each set of dist*4 elements:
|
|
|
|
for (unsigned r = 0; r < m_truncated; r += dist4)
|
|
|
|
{
|
|
|
|
const ffe_t log_m01 = skewLUT[r + dist];
|
|
|
|
const ffe_t log_m23 = skewLUT[r + dist * 3];
|
|
|
|
const ffe_t log_m02 = skewLUT[r + dist * 2];
|
|
|
|
|
|
|
|
// For each set of dist elements:
|
|
|
|
const unsigned i_end = r + dist;
|
|
|
|
for (unsigned i = r; i < i_end; ++i)
|
|
|
|
{
|
|
|
|
FFT_DIT4(
|
|
|
|
bytes,
|
|
|
|
work + i,
|
|
|
|
dist,
|
|
|
|
log_m01,
|
|
|
|
log_m23,
|
|
|
|
log_m02);
|
|
|
|
}
|
|
|
|
}
|
2017-05-29 22:01:01 +00:00
|
|
|
}
|
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
// If there is one layer left:
|
|
|
|
if (dist4 == 2)
|
2017-05-29 22:01:01 +00:00
|
|
|
{
|
2017-06-04 00:17:05 +00:00
|
|
|
for (unsigned r = 0; r < m_truncated; r += 2)
|
|
|
|
{
|
|
|
|
const ffe_t log_m = skewLUT[r + 1];
|
2017-05-29 22:01:01 +00:00
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
if (log_m == kModulus)
|
|
|
|
xor_mem(work[r + 1], work[r], bytes);
|
|
|
|
else
|
|
|
|
{
|
|
|
|
FFT_DIT2(
|
|
|
|
work[r],
|
|
|
|
work[r + 1],
|
|
|
|
log_m,
|
|
|
|
bytes);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-05-29 22:01:01 +00:00
|
|
|
}
|
|
|
|
|
2017-05-18 03:06:13 +00:00
|
|
|
|
|
|
|
//------------------------------------------------------------------------------
|
2017-05-29 22:01:01 +00:00
|
|
|
// Reed-Solomon Encode
|
2017-05-27 02:51:30 +00:00
|
|
|
|
2017-05-29 22:01:01 +00:00
|
|
|
void ReedSolomonEncode(
|
2017-05-27 02:51:30 +00:00
|
|
|
uint64_t buffer_bytes,
|
|
|
|
unsigned original_count,
|
|
|
|
unsigned recovery_count,
|
|
|
|
unsigned m,
|
2017-06-02 15:43:54 +00:00
|
|
|
const void* const * data,
|
2017-05-27 02:51:30 +00:00
|
|
|
void** work)
|
2017-05-18 03:06:13 +00:00
|
|
|
{
|
2017-05-27 02:51:30 +00:00
|
|
|
// work <- IFFT(data, m, m)
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
const ffe_t* skewLUT = FFTSkew + m - 1;
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
IFFT_DIT(
|
|
|
|
buffer_bytes,
|
|
|
|
data,
|
|
|
|
original_count < m ? original_count : m,
|
|
|
|
work,
|
|
|
|
nullptr, // No xor output
|
|
|
|
m,
|
|
|
|
skewLUT);
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-06-02 15:12:03 +00:00
|
|
|
const unsigned last_count = original_count % m;
|
2017-05-29 22:01:01 +00:00
|
|
|
if (m >= original_count)
|
|
|
|
goto skip_body;
|
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
// For sets of m data pieces:
|
2017-05-27 02:51:30 +00:00
|
|
|
for (unsigned i = m; i + m <= original_count; i += m)
|
2017-05-18 03:06:13 +00:00
|
|
|
{
|
2017-05-29 22:01:01 +00:00
|
|
|
data += m;
|
2017-06-04 00:17:05 +00:00
|
|
|
skewLUT += m;
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
// work <- work xor IFFT(data + i, m, m + i)
|
2017-05-29 22:01:01 +00:00
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
IFFT_DIT(
|
2017-05-29 22:01:01 +00:00
|
|
|
buffer_bytes,
|
2017-06-04 00:17:05 +00:00
|
|
|
data, // data source
|
|
|
|
m,
|
|
|
|
work + m, // temporary workspace
|
|
|
|
work, // xor destination
|
2017-05-29 22:01:01 +00:00
|
|
|
m,
|
2017-06-04 00:17:05 +00:00
|
|
|
skewLUT);
|
2017-05-27 02:51:30 +00:00
|
|
|
}
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
// Handle final partial set of m pieces:
|
2017-05-27 02:51:30 +00:00
|
|
|
if (last_count != 0)
|
|
|
|
{
|
|
|
|
const unsigned i = original_count - last_count;
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-05-29 22:01:01 +00:00
|
|
|
data += m;
|
2017-06-04 00:17:05 +00:00
|
|
|
skewLUT += m;
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
// work <- work xor IFFT(data + i, m, m + i)
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
IFFT_DIT(
|
2017-05-29 22:01:01 +00:00
|
|
|
buffer_bytes,
|
2017-06-04 00:17:05 +00:00
|
|
|
data, // data source
|
|
|
|
last_count,
|
|
|
|
work + m, // temporary workspace
|
|
|
|
work, // xor destination
|
2017-05-29 22:01:01 +00:00
|
|
|
m,
|
2017-06-04 00:17:05 +00:00
|
|
|
skewLUT);
|
2017-05-24 08:23:19 +00:00
|
|
|
}
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-05-29 22:01:01 +00:00
|
|
|
skip_body:
|
|
|
|
|
2017-05-27 02:51:30 +00:00
|
|
|
// work <- FFT(work, m, 0)
|
2017-06-04 00:17:05 +00:00
|
|
|
FFT_DIT(
|
|
|
|
buffer_bytes,
|
|
|
|
work,
|
|
|
|
recovery_count,
|
|
|
|
m,
|
|
|
|
FFTSkew - 1);
|
2017-05-29 22:01:01 +00:00
|
|
|
}
|
2017-05-27 02:51:30 +00:00
|
|
|
|
2017-05-29 22:01:01 +00:00
|
|
|
|
|
|
|
//------------------------------------------------------------------------------
|
|
|
|
// ErrorBitfield
|
|
|
|
|
2017-05-30 08:37:27 +00:00
|
|
|
#ifdef LEO_ERROR_BITFIELD_OPT
|
2017-05-29 22:01:01 +00:00
|
|
|
|
|
|
|
// Used in decoding to decide which final FFT operations to perform
|
|
|
|
class ErrorBitfield
|
|
|
|
{
|
2017-05-30 08:37:27 +00:00
|
|
|
static const unsigned kWordMips = 5;
|
2017-05-29 22:01:01 +00:00
|
|
|
static const unsigned kWords = kOrder / 64;
|
2017-05-30 08:37:27 +00:00
|
|
|
uint64_t Words[kWordMips][kWords] = {};
|
|
|
|
|
2017-05-30 08:49:48 +00:00
|
|
|
static const unsigned kBigMips = 6;
|
2017-05-30 08:37:27 +00:00
|
|
|
static const unsigned kBigWords = (kWords + 63) / 64;
|
|
|
|
uint64_t BigWords[kBigMips][kBigWords] = {};
|
|
|
|
|
2017-05-30 08:49:48 +00:00
|
|
|
static const unsigned kBiggestMips = 4;
|
2017-05-30 08:37:27 +00:00
|
|
|
uint64_t BiggestWords[kBiggestMips] = {};
|
2017-05-29 22:01:01 +00:00
|
|
|
|
|
|
|
public:
|
|
|
|
LEO_FORCE_INLINE void Set(unsigned i)
|
|
|
|
{
|
|
|
|
Words[0][i / 64] |= (uint64_t)1 << (i % 64);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Prepare();
|
|
|
|
|
|
|
|
LEO_FORCE_INLINE bool IsNeeded(unsigned mip_level, unsigned bit) const
|
|
|
|
{
|
2017-05-30 08:37:27 +00:00
|
|
|
if (mip_level >= 16)
|
2017-05-29 22:01:01 +00:00
|
|
|
return true;
|
2017-05-30 08:49:48 +00:00
|
|
|
if (mip_level >= 12)
|
2017-05-30 08:37:27 +00:00
|
|
|
{
|
|
|
|
bit /= 4096;
|
2017-05-30 08:49:48 +00:00
|
|
|
return 0 != (BiggestWords[mip_level - 12] & ((uint64_t)1 << bit));
|
2017-05-30 08:37:27 +00:00
|
|
|
}
|
|
|
|
if (mip_level >= 6)
|
|
|
|
{
|
|
|
|
bit /= 64;
|
|
|
|
return 0 != (BigWords[mip_level - 6][bit / 64] & ((uint64_t)1 << (bit % 64)));
|
|
|
|
}
|
2017-05-29 22:01:01 +00:00
|
|
|
return 0 != (Words[mip_level - 1][bit / 64] & ((uint64_t)1 << (bit % 64)));
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
static const uint64_t kHiMasks[5] = {
|
|
|
|
0xAAAAAAAAAAAAAAAAULL,
|
|
|
|
0xCCCCCCCCCCCCCCCCULL,
|
|
|
|
0xF0F0F0F0F0F0F0F0ULL,
|
|
|
|
0xFF00FF00FF00FF00ULL,
|
|
|
|
0xFFFF0000FFFF0000ULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
void ErrorBitfield::Prepare()
|
|
|
|
{
|
|
|
|
// First mip level is for final layer of FFT: pairs of data
|
|
|
|
for (unsigned i = 0; i < kWords; ++i)
|
|
|
|
{
|
2017-05-30 08:37:27 +00:00
|
|
|
uint64_t w_i = Words[0][i];
|
|
|
|
const uint64_t hi2lo0 = w_i | ((w_i & kHiMasks[0]) >> 1);
|
|
|
|
const uint64_t lo2hi0 = ((w_i & (kHiMasks[0] >> 1)) << 1);
|
|
|
|
Words[0][i] = w_i = hi2lo0 | lo2hi0;
|
2017-05-29 22:01:01 +00:00
|
|
|
|
2017-05-30 08:37:27 +00:00
|
|
|
for (unsigned j = 1, bits = 2; j < kWordMips; ++j, bits <<= 1)
|
2017-05-29 22:01:01 +00:00
|
|
|
{
|
2017-05-30 08:37:27 +00:00
|
|
|
const uint64_t hi2lo_j = w_i | ((w_i & kHiMasks[j]) >> bits);
|
|
|
|
const uint64_t lo2hi_j = ((w_i & (kHiMasks[j] >> bits)) << bits);
|
|
|
|
Words[j][i] = w_i = hi2lo_j | lo2hi_j;
|
2017-05-24 08:23:19 +00:00
|
|
|
}
|
|
|
|
}
|
2017-05-29 22:01:01 +00:00
|
|
|
|
2017-05-30 08:37:27 +00:00
|
|
|
for (unsigned i = 0; i < kBigWords; ++i)
|
2017-05-29 22:01:01 +00:00
|
|
|
{
|
2017-05-30 08:37:27 +00:00
|
|
|
uint64_t w_i = 0;
|
|
|
|
uint64_t bit = 1;
|
2017-05-30 08:49:48 +00:00
|
|
|
const uint64_t* src = &Words[kWordMips - 1][i * 64];
|
2017-05-30 08:37:27 +00:00
|
|
|
for (unsigned j = 0; j < 64; ++j, bit <<= 1)
|
|
|
|
{
|
|
|
|
const uint64_t w = src[j];
|
|
|
|
w_i |= (w | (w >> 32) | (w << 32)) & bit;
|
|
|
|
}
|
|
|
|
BigWords[0][i] = w_i;
|
|
|
|
|
|
|
|
for (unsigned j = 1, bits = 1; j < kBigMips; ++j, bits <<= 1)
|
|
|
|
{
|
|
|
|
const uint64_t hi2lo_j = w_i | ((w_i & kHiMasks[j - 1]) >> bits);
|
|
|
|
const uint64_t lo2hi_j = ((w_i & (kHiMasks[j - 1] >> bits)) << bits);
|
|
|
|
BigWords[j][i] = w_i = hi2lo_j | lo2hi_j;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t w_i = 0;
|
|
|
|
uint64_t bit = 1;
|
2017-05-30 08:49:48 +00:00
|
|
|
const uint64_t* src = &BigWords[kBigMips - 1][0];
|
2017-05-30 08:37:27 +00:00
|
|
|
for (unsigned j = 0; j < kBigWords; ++j, bit <<= 1)
|
|
|
|
{
|
2017-05-30 08:49:48 +00:00
|
|
|
const uint64_t w = src[j];
|
2017-05-30 08:37:27 +00:00
|
|
|
w_i |= (w | (w >> 32) | (w << 32)) & bit;
|
2017-05-29 22:01:01 +00:00
|
|
|
}
|
2017-05-30 08:37:27 +00:00
|
|
|
BiggestWords[0] = w_i;
|
2017-05-29 22:01:01 +00:00
|
|
|
|
2017-05-30 08:37:27 +00:00
|
|
|
for (unsigned j = 1, bits = 1; j < kBiggestMips; ++j, bits <<= 1)
|
|
|
|
{
|
|
|
|
const uint64_t hi2lo_j = w_i | ((w_i & kHiMasks[j - 1]) >> bits);
|
|
|
|
const uint64_t lo2hi_j = ((w_i & (kHiMasks[j - 1] >> bits)) << bits);
|
|
|
|
BiggestWords[j] = w_i = hi2lo_j | lo2hi_j;
|
|
|
|
}
|
2017-05-18 03:06:13 +00:00
|
|
|
}
|
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
|
|
|
|
static void FFT_DIT_ErrorBits(
|
|
|
|
const uint64_t bytes,
|
|
|
|
void** work,
|
|
|
|
const unsigned n_truncated,
|
|
|
|
const unsigned n,
|
|
|
|
const ffe_t* skewLUT,
|
|
|
|
const ErrorBitfield& error_bits)
|
|
|
|
{
|
|
|
|
unsigned mip_level = LastNonzeroBit32(n);
|
|
|
|
|
|
|
|
// Decimation in time: Unroll 2 layers at a time
|
|
|
|
unsigned dist4 = n, dist = n >> 2;
|
|
|
|
for (; dist != 0; dist4 = dist, dist >>= 2, mip_level -=2)
|
|
|
|
{
|
|
|
|
// For each set of dist*4 elements:
|
|
|
|
for (unsigned r = 0; r < n_truncated; r += dist4)
|
|
|
|
{
|
|
|
|
if (!error_bits.IsNeeded(mip_level, r))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
const ffe_t log_m01 = skewLUT[r + dist];
|
|
|
|
const ffe_t log_m23 = skewLUT[r + dist * 3];
|
|
|
|
const ffe_t log_m02 = skewLUT[r + dist * 2];
|
|
|
|
|
|
|
|
// For each set of dist elements:
|
|
|
|
for (unsigned i = r; i < r + dist; ++i)
|
|
|
|
{
|
|
|
|
FFT_DIT4(
|
|
|
|
bytes,
|
|
|
|
work + i,
|
|
|
|
dist,
|
|
|
|
log_m01,
|
|
|
|
log_m23,
|
|
|
|
log_m02);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If there is one layer left:
|
|
|
|
if (dist4 == 2)
|
|
|
|
{
|
|
|
|
for (unsigned r = 0; r < n_truncated; r += 2)
|
|
|
|
{
|
|
|
|
const ffe_t log_m = skewLUT[r + 1];
|
|
|
|
|
|
|
|
if (log_m == kModulus)
|
|
|
|
xor_mem(work[r + 1], work[r], bytes);
|
|
|
|
else
|
|
|
|
{
|
|
|
|
FFT_DIT2(
|
|
|
|
work[r],
|
|
|
|
work[r + 1],
|
|
|
|
log_m,
|
|
|
|
bytes);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-30 08:37:27 +00:00
|
|
|
#endif // LEO_ERROR_BITFIELD_OPT
|
2017-05-29 22:01:01 +00:00
|
|
|
|
2017-05-18 03:06:13 +00:00
|
|
|
|
|
|
|
//------------------------------------------------------------------------------
|
2017-05-29 22:01:01 +00:00
|
|
|
// Reed-Solomon Decode
|
2017-05-27 02:51:30 +00:00
|
|
|
|
2017-05-29 22:01:01 +00:00
|
|
|
void ReedSolomonDecode(
|
2017-05-27 02:51:30 +00:00
|
|
|
uint64_t buffer_bytes,
|
|
|
|
unsigned original_count,
|
|
|
|
unsigned recovery_count,
|
|
|
|
unsigned m, // NextPow2(recovery_count)
|
|
|
|
unsigned n, // NextPow2(m + original_count) = work_count
|
2017-06-02 15:43:54 +00:00
|
|
|
const void* const * const original, // original_count entries
|
|
|
|
const void* const * const recovery, // recovery_count entries
|
2017-05-27 02:51:30 +00:00
|
|
|
void** work) // n entries
|
2017-05-18 03:06:13 +00:00
|
|
|
{
|
2017-05-27 02:51:30 +00:00
|
|
|
// Fill in error locations
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-05-30 08:37:27 +00:00
|
|
|
#ifdef LEO_ERROR_BITFIELD_OPT
|
2017-06-04 00:17:05 +00:00
|
|
|
ErrorBitfield error_bits;
|
2017-05-30 08:37:27 +00:00
|
|
|
#endif // LEO_ERROR_BITFIELD_OPT
|
2017-05-29 22:01:01 +00:00
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
ffe_t error_locations[kOrder] = {};
|
2017-05-27 02:51:30 +00:00
|
|
|
for (unsigned i = 0; i < recovery_count; ++i)
|
2017-05-29 22:01:01 +00:00
|
|
|
if (!recovery[i])
|
2017-06-04 00:17:05 +00:00
|
|
|
error_locations[i] = 1;
|
2017-05-27 02:51:30 +00:00
|
|
|
for (unsigned i = recovery_count; i < m; ++i)
|
2017-06-04 00:17:05 +00:00
|
|
|
error_locations[i] = 1;
|
2017-05-27 02:51:30 +00:00
|
|
|
for (unsigned i = 0; i < original_count; ++i)
|
2017-05-29 22:01:01 +00:00
|
|
|
{
|
|
|
|
if (!original[i])
|
|
|
|
{
|
2017-06-04 00:17:05 +00:00
|
|
|
error_locations[i + m] = 1;
|
2017-05-30 08:37:27 +00:00
|
|
|
#ifdef LEO_ERROR_BITFIELD_OPT
|
2017-06-04 00:17:05 +00:00
|
|
|
error_bits.Set(i + m);
|
2017-05-30 08:37:27 +00:00
|
|
|
#endif // LEO_ERROR_BITFIELD_OPT
|
2017-05-29 22:01:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-30 08:37:27 +00:00
|
|
|
#ifdef LEO_ERROR_BITFIELD_OPT
|
2017-06-04 00:17:05 +00:00
|
|
|
error_bits.Prepare();
|
2017-05-30 08:37:27 +00:00
|
|
|
#endif // LEO_ERROR_BITFIELD_OPT
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-05-27 02:51:30 +00:00
|
|
|
// Evaluate error locator polynomial
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
FWHT(error_locations, kOrder, m + original_count);
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-05-27 02:51:30 +00:00
|
|
|
for (unsigned i = 0; i < kOrder; ++i)
|
2017-06-04 00:17:05 +00:00
|
|
|
error_locations[i] = ((unsigned)error_locations[i] * (unsigned)LogWalsh[i]) % kModulus;
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
FWHT(error_locations, kOrder, kOrder);
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-05-27 02:51:30 +00:00
|
|
|
// work <- recovery data
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-05-27 02:51:30 +00:00
|
|
|
for (unsigned i = 0; i < recovery_count; ++i)
|
|
|
|
{
|
|
|
|
if (recovery[i])
|
2017-06-04 00:17:05 +00:00
|
|
|
mul_mem(work[i], recovery[i], error_locations[i], buffer_bytes);
|
2017-05-27 02:51:30 +00:00
|
|
|
else
|
|
|
|
memset(work[i], 0, buffer_bytes);
|
|
|
|
}
|
|
|
|
for (unsigned i = recovery_count; i < m; ++i)
|
|
|
|
memset(work[i], 0, buffer_bytes);
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-05-27 02:51:30 +00:00
|
|
|
// work <- original data
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-05-27 02:51:30 +00:00
|
|
|
for (unsigned i = 0; i < original_count; ++i)
|
|
|
|
{
|
|
|
|
if (original[i])
|
2017-06-04 00:17:05 +00:00
|
|
|
mul_mem(work[m + i], original[i], error_locations[m + i], buffer_bytes);
|
2017-05-27 02:51:30 +00:00
|
|
|
else
|
|
|
|
memset(work[m + i], 0, buffer_bytes);
|
|
|
|
}
|
|
|
|
for (unsigned i = m + original_count; i < n; ++i)
|
|
|
|
memset(work[i], 0, buffer_bytes);
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-05-27 02:51:30 +00:00
|
|
|
// work <- IFFT(work, n, 0)
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
IFFT_DIT(
|
|
|
|
buffer_bytes,
|
|
|
|
nullptr,
|
|
|
|
m + original_count,
|
|
|
|
work,
|
|
|
|
nullptr,
|
|
|
|
n,
|
|
|
|
FFTSkew - 1);
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-05-27 02:51:30 +00:00
|
|
|
// work <- FormalDerivative(work, n)
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-05-27 02:51:30 +00:00
|
|
|
for (unsigned i = 1; i < n; ++i)
|
|
|
|
{
|
|
|
|
const unsigned width = ((i ^ (i - 1)) + 1) >> 1;
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-05-29 22:01:01 +00:00
|
|
|
VectorXOR(
|
|
|
|
buffer_bytes,
|
|
|
|
width,
|
|
|
|
work + i - width,
|
|
|
|
work + i);
|
2017-05-27 02:51:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// work <- FFT(work, n, 0) truncated to m + original_count
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-05-27 02:51:30 +00:00
|
|
|
const unsigned output_count = m + original_count;
|
|
|
|
|
2017-06-04 00:17:05 +00:00
|
|
|
#ifdef LEO_ERROR_BITFIELD_OPT
|
|
|
|
FFT_DIT_ErrorBits(buffer_bytes, work, output_count, n, FFTSkew - 1, error_bits);
|
2017-05-29 22:01:01 +00:00
|
|
|
#else
|
2017-06-04 00:17:05 +00:00
|
|
|
FFT_DIT(buffer_bytes, work, output_count, n, FFTSkew - 1);
|
2017-05-29 22:01:01 +00:00
|
|
|
#endif
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-05-27 02:51:30 +00:00
|
|
|
// Reveal erasures
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < original_count; ++i)
|
|
|
|
if (!original[i])
|
2017-06-04 00:17:05 +00:00
|
|
|
mul_mem(work[i], work[i + m], kModulus - error_locations[i + m], buffer_bytes);
|
2017-05-18 03:06:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
//------------------------------------------------------------------------------
|
2017-05-27 02:51:30 +00:00
|
|
|
// API
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-05-27 02:51:30 +00:00
|
|
|
static bool IsInitialized = false;
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-05-27 02:51:30 +00:00
|
|
|
bool Initialize()
|
|
|
|
{
|
|
|
|
if (IsInitialized)
|
|
|
|
return true;
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-05-27 02:51:30 +00:00
|
|
|
if (!CpuHasSSSE3)
|
|
|
|
return false;
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-05-27 02:51:30 +00:00
|
|
|
InitializeLogarithmTables();
|
2017-05-29 22:01:01 +00:00
|
|
|
InitializeMultiplyTables();
|
2017-05-27 02:51:30 +00:00
|
|
|
FFTInitialize();
|
2017-05-18 03:06:13 +00:00
|
|
|
|
2017-05-27 02:51:30 +00:00
|
|
|
IsInitialized = true;
|
|
|
|
return true;
|
2017-05-18 03:06:13 +00:00
|
|
|
}
|
2017-05-27 02:51:30 +00:00
|
|
|
|
|
|
|
|
|
|
|
}} // namespace leopard::ff16
|
2017-05-27 03:10:53 +00:00
|
|
|
|
|
|
|
#endif // LEO_HAS_FF16
|