performance(sqlcipher): provide optimized versions of sha1_compress (#3490)
* performance(sqlcipher): provide optimized versions of sha1_compress - on ARM, use Neon/ACLE extensions (a 3x-4x perf improvement) - on x64 use a plain C implementation from nayuki (a 2x-3x perf gain) Unfortunately, we can't use the dedicated `SHA1` extension on x64 as this became widely available only recently (esp. on AMD CPUs)
This commit is contained in:
parent
9151aa7f04
commit
62e3e9bd62
2
go.mod
2
go.mod
|
@ -10,7 +10,7 @@ replace github.com/nfnt/resize => github.com/status-im/resize v0.0.0-20201215164
|
|||
|
||||
replace github.com/forPelevin/gomoji => github.com/status-im/gomoji v1.1.3-0.20220213022530-e5ac4a8732d4
|
||||
|
||||
replace github.com/mutecomm/go-sqlcipher v0.0.0-20190227152316-55dbde17881f => github.com/status-im/go-sqlcipher v0.1.0-status.1
|
||||
replace github.com/mutecomm/go-sqlcipher v0.0.0-20190227152316-55dbde17881f => github.com/status-im/go-sqlcipher v0.1.0-status.3
|
||||
|
||||
require (
|
||||
github.com/anacrolix/torrent v1.41.0
|
||||
|
|
4
go.sum
4
go.sum
|
@ -1991,8 +1991,8 @@ github.com/status-im/go-ethereum v1.10.25-status.6 h1:5YC8k1inTBqA6LpON0uX6y86ni
|
|||
github.com/status-im/go-ethereum v1.10.25-status.6/go.mod h1:Dt4K5JYMhJRdtXJwBEyGZLZn9iz/chSOZyjVmt5ZhwQ=
|
||||
github.com/status-im/go-multiaddr-ethv4 v1.2.4 h1:7fw0Y48TJXEqx4fOHlDOUiM/uBq9zG5w4x975Mjh4E0=
|
||||
github.com/status-im/go-multiaddr-ethv4 v1.2.4/go.mod h1:PDh4D7h5CvecPIy0ji0rLNwTnzzEcyz9uTPHD42VyH4=
|
||||
github.com/status-im/go-sqlcipher v0.1.0-status.1 h1:5fkuM4FG3G5o754zFO9xErATxeig9pww+Nr6sVTUN4o=
|
||||
github.com/status-im/go-sqlcipher v0.1.0-status.1/go.mod h1:MyUWrZlB1aI5bs7j9/pJ8ckLLZ4QcCYcNiSbsAW32D4=
|
||||
github.com/status-im/go-sqlcipher v0.1.0-status.3 h1:moVgjoWda6j2OPg8fFGXQa2q78hCCQhlt7O5aWVU1ck=
|
||||
github.com/status-im/go-sqlcipher v0.1.0-status.3/go.mod h1:MyUWrZlB1aI5bs7j9/pJ8ckLLZ4QcCYcNiSbsAW32D4=
|
||||
github.com/status-im/gomoji v1.1.3-0.20220213022530-e5ac4a8732d4 h1:CtobZoiNdHpx+xurFxnuJ1xsGm3oKMfcZkB3vmomJmA=
|
||||
github.com/status-im/gomoji v1.1.3-0.20220213022530-e5ac4a8732d4/go.mod h1:hmpnZzkzSZJbFYWAUkrPV8I36x7mdYiPhPqnALP4fKA=
|
||||
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
|
||||
|
|
|
@ -10,6 +10,8 @@
|
|||
*/
|
||||
#include "tomcrypt.h"
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
/**
|
||||
@file sha1.c
|
||||
LTC_SHA1 code by Tom St Denis
|
||||
|
@ -18,6 +20,44 @@
|
|||
|
||||
#ifdef LTC_SHA1
|
||||
|
||||
// -> BEGIN arm intrinsics block
|
||||
#if defined(__APPLE__) && (defined(__arm__) || defined(__aarch32__) || defined(__arm64__) || defined(__aarch64__) || defined(_M_ARM))
|
||||
# if defined(__GNUC__)
|
||||
# include <stdint.h>
|
||||
# endif
|
||||
# if defined(__ARM_NEON)|| defined(_MSC_VER) || defined(__GNUC__)
|
||||
# include <arm_neon.h>
|
||||
# endif
|
||||
/* GCC and LLVM Clang, but not Apple Clang */
|
||||
# if defined(__GNUC__) && !defined(__apple_build_version__)
|
||||
# if defined(__ARM_ACLE) || defined(__ARM_FEATURE_CRYPTO)
|
||||
# include <arm_acle.h>
|
||||
# endif
|
||||
# endif
|
||||
#define SHA1_TARGET_ARM 1
|
||||
// -> END arm intrinsics block
|
||||
// -> BEGIN x86_64 intrinsics block
|
||||
#elif defined(__x86_64__) || defined(__SHA__)
|
||||
#if defined(__GNUC__) /* GCC and LLVM Clang */
|
||||
# include <x86intrin.h>
|
||||
#endif
|
||||
|
||||
/* Microsoft supports Intel SHA ACLE extensions as of Visual Studio 2015 */
|
||||
#if defined(_MSC_VER)
|
||||
# include <immintrin.h>
|
||||
# define WIN32_LEAN_AND_MEAN
|
||||
# include <Windows.h>
|
||||
typedef UINT32 uint32_t;
|
||||
typedef UINT8 uint8_t;
|
||||
#endif
|
||||
//#define SHA1_TARGET_X86 1
|
||||
#endif
|
||||
// -> END x86_64 intrinsics block
|
||||
|
||||
#define LENGTH_SIZE 8 // In bytes
|
||||
#define BLOCK_LEN 64 // In bytes
|
||||
#define STATE_LEN 5 // In words
|
||||
|
||||
const struct ltc_hash_descriptor sha1_desc =
|
||||
{
|
||||
"sha1",
|
||||
|
@ -36,114 +76,508 @@ const struct ltc_hash_descriptor sha1_desc =
|
|||
NULL
|
||||
};
|
||||
|
||||
#define F0(x,y,z) (z ^ (x & (y ^ z)))
|
||||
#define F1(x,y,z) (x ^ y ^ z)
|
||||
#define F2(x,y,z) ((x & y) | (z & (x | y)))
|
||||
#define F3(x,y,z) (x ^ y ^ z)
|
||||
|
||||
#ifdef LTC_CLEAN_STACK
|
||||
static int _sha1_compress(hash_state *md, unsigned char *buf)
|
||||
#else
|
||||
static int sha1_compress(hash_state *md, unsigned char *buf)
|
||||
#endif
|
||||
{
|
||||
ulong32 a,b,c,d,e,W[80],i;
|
||||
#ifdef LTC_SMALL_CODE
|
||||
ulong32 t;
|
||||
#endif
|
||||
#if SHA1_TARGET_ARM
|
||||
/* sha1-arm.c - ARMv8 SHA extensions using C intrinsics */
|
||||
/* Written and placed in public domain by Jeffrey Walton */
|
||||
/* Based on code from ARM, and by Johannes Schneiders, Skip */
|
||||
/* Hovsmith and Barry O'Rourke for the mbedTLS project. */
|
||||
// -> BEGIN arm intrinsics block
|
||||
uint32x4_t ABCD, ABCD_SAVED;
|
||||
uint32x4_t TMP0, TMP1;
|
||||
uint32x4_t MSG0, MSG1, MSG2, MSG3;
|
||||
uint32_t E0, E0_SAVED, E1;
|
||||
|
||||
/* copy the state into 512-bits into W[0..15] */
|
||||
for (i = 0; i < 16; i++) {
|
||||
LOAD32H(W[i], buf + (4*i));
|
||||
}
|
||||
/* Load state */
|
||||
ABCD = vld1q_u32(&md->sha1.state[0]);
|
||||
E0 = md->sha1.state[4];
|
||||
|
||||
/* copy state */
|
||||
a = md->sha1.state[0];
|
||||
b = md->sha1.state[1];
|
||||
c = md->sha1.state[2];
|
||||
d = md->sha1.state[3];
|
||||
e = md->sha1.state[4];
|
||||
/* Save state */
|
||||
ABCD_SAVED = ABCD;
|
||||
E0_SAVED = E0;
|
||||
|
||||
/* expand it */
|
||||
for (i = 16; i < 80; i++) {
|
||||
W[i] = ROL(W[i-3] ^ W[i-8] ^ W[i-14] ^ W[i-16], 1);
|
||||
}
|
||||
/* Load message */
|
||||
MSG0 = vld1q_u32((const uint32_t*)(buf));
|
||||
MSG1 = vld1q_u32((const uint32_t*)(buf + 16));
|
||||
MSG2 = vld1q_u32((const uint32_t*)(buf + 32));
|
||||
MSG3 = vld1q_u32((const uint32_t*)(buf + 48));
|
||||
|
||||
/* compress */
|
||||
/* round one */
|
||||
#define FF0(a,b,c,d,e,i) e = (ROLc(a, 5) + F0(b,c,d) + e + W[i] + 0x5a827999UL); b = ROLc(b, 30);
|
||||
#define FF1(a,b,c,d,e,i) e = (ROLc(a, 5) + F1(b,c,d) + e + W[i] + 0x6ed9eba1UL); b = ROLc(b, 30);
|
||||
#define FF2(a,b,c,d,e,i) e = (ROLc(a, 5) + F2(b,c,d) + e + W[i] + 0x8f1bbcdcUL); b = ROLc(b, 30);
|
||||
#define FF3(a,b,c,d,e,i) e = (ROLc(a, 5) + F3(b,c,d) + e + W[i] + 0xca62c1d6UL); b = ROLc(b, 30);
|
||||
/* Reverse for little endian */
|
||||
MSG0 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG0)));
|
||||
MSG1 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG1)));
|
||||
MSG2 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG2)));
|
||||
MSG3 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG3)));
|
||||
|
||||
#ifdef LTC_SMALL_CODE
|
||||
TMP0 = vaddq_u32(MSG0, vdupq_n_u32(0x5A827999));
|
||||
TMP1 = vaddq_u32(MSG1, vdupq_n_u32(0x5A827999));
|
||||
|
||||
for (i = 0; i < 20; ) {
|
||||
FF0(a,b,c,d,e,i++); t = e; e = d; d = c; c = b; b = a; a = t;
|
||||
}
|
||||
/* Rounds 0-3 */
|
||||
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
|
||||
ABCD = vsha1cq_u32(ABCD, E0, TMP0);
|
||||
TMP0 = vaddq_u32(MSG2, vdupq_n_u32(0x5A827999));
|
||||
MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2);
|
||||
|
||||
for (; i < 40; ) {
|
||||
FF1(a,b,c,d,e,i++); t = e; e = d; d = c; c = b; b = a; a = t;
|
||||
}
|
||||
/* Rounds 4-7 */
|
||||
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
|
||||
ABCD = vsha1cq_u32(ABCD, E1, TMP1);
|
||||
TMP1 = vaddq_u32(MSG3, vdupq_n_u32(0x5A827999));
|
||||
MSG0 = vsha1su1q_u32(MSG0, MSG3);
|
||||
MSG1 = vsha1su0q_u32(MSG1, MSG2, MSG3);
|
||||
|
||||
for (; i < 60; ) {
|
||||
FF2(a,b,c,d,e,i++); t = e; e = d; d = c; c = b; b = a; a = t;
|
||||
}
|
||||
/* Rounds 8-11 */
|
||||
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
|
||||
ABCD = vsha1cq_u32(ABCD, E0, TMP0);
|
||||
TMP0 = vaddq_u32(MSG0, vdupq_n_u32(0x5A827999));
|
||||
MSG1 = vsha1su1q_u32(MSG1, MSG0);
|
||||
MSG2 = vsha1su0q_u32(MSG2, MSG3, MSG0);
|
||||
|
||||
for (; i < 80; ) {
|
||||
FF3(a,b,c,d,e,i++); t = e; e = d; d = c; c = b; b = a; a = t;
|
||||
}
|
||||
/* Rounds 12-15 */
|
||||
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
|
||||
ABCD = vsha1cq_u32(ABCD, E1, TMP1);
|
||||
TMP1 = vaddq_u32(MSG1, vdupq_n_u32(0x6ED9EBA1));
|
||||
MSG2 = vsha1su1q_u32(MSG2, MSG1);
|
||||
MSG3 = vsha1su0q_u32(MSG3, MSG0, MSG1);
|
||||
|
||||
/* Rounds 16-19 */
|
||||
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
|
||||
ABCD = vsha1cq_u32(ABCD, E0, TMP0);
|
||||
TMP0 = vaddq_u32(MSG2, vdupq_n_u32(0x6ED9EBA1));
|
||||
MSG3 = vsha1su1q_u32(MSG3, MSG2);
|
||||
MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2);
|
||||
|
||||
/* Rounds 20-23 */
|
||||
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
|
||||
ABCD = vsha1pq_u32(ABCD, E1, TMP1);
|
||||
TMP1 = vaddq_u32(MSG3, vdupq_n_u32(0x6ED9EBA1));
|
||||
MSG0 = vsha1su1q_u32(MSG0, MSG3);
|
||||
MSG1 = vsha1su0q_u32(MSG1, MSG2, MSG3);
|
||||
|
||||
/* Rounds 24-27 */
|
||||
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
|
||||
ABCD = vsha1pq_u32(ABCD, E0, TMP0);
|
||||
TMP0 = vaddq_u32(MSG0, vdupq_n_u32(0x6ED9EBA1));
|
||||
MSG1 = vsha1su1q_u32(MSG1, MSG0);
|
||||
MSG2 = vsha1su0q_u32(MSG2, MSG3, MSG0);
|
||||
|
||||
/* Rounds 28-31 */
|
||||
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
|
||||
ABCD = vsha1pq_u32(ABCD, E1, TMP1);
|
||||
TMP1 = vaddq_u32(MSG1, vdupq_n_u32(0x6ED9EBA1));
|
||||
MSG2 = vsha1su1q_u32(MSG2, MSG1);
|
||||
MSG3 = vsha1su0q_u32(MSG3, MSG0, MSG1);
|
||||
|
||||
/* Rounds 32-35 */
|
||||
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
|
||||
ABCD = vsha1pq_u32(ABCD, E0, TMP0);
|
||||
TMP0 = vaddq_u32(MSG2, vdupq_n_u32(0x8F1BBCDC));
|
||||
MSG3 = vsha1su1q_u32(MSG3, MSG2);
|
||||
MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2);
|
||||
|
||||
/* Rounds 36-39 */
|
||||
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
|
||||
ABCD = vsha1pq_u32(ABCD, E1, TMP1);
|
||||
TMP1 = vaddq_u32(MSG3, vdupq_n_u32(0x8F1BBCDC));
|
||||
MSG0 = vsha1su1q_u32(MSG0, MSG3);
|
||||
MSG1 = vsha1su0q_u32(MSG1, MSG2, MSG3);
|
||||
|
||||
/* Rounds 40-43 */
|
||||
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
|
||||
ABCD = vsha1mq_u32(ABCD, E0, TMP0);
|
||||
TMP0 = vaddq_u32(MSG0, vdupq_n_u32(0x8F1BBCDC));
|
||||
MSG1 = vsha1su1q_u32(MSG1, MSG0);
|
||||
MSG2 = vsha1su0q_u32(MSG2, MSG3, MSG0);
|
||||
|
||||
/* Rounds 44-47 */
|
||||
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
|
||||
ABCD = vsha1mq_u32(ABCD, E1, TMP1);
|
||||
TMP1 = vaddq_u32(MSG1, vdupq_n_u32(0x8F1BBCDC));
|
||||
MSG2 = vsha1su1q_u32(MSG2, MSG1);
|
||||
MSG3 = vsha1su0q_u32(MSG3, MSG0, MSG1);
|
||||
|
||||
/* Rounds 48-51 */
|
||||
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
|
||||
ABCD = vsha1mq_u32(ABCD, E0, TMP0);
|
||||
TMP0 = vaddq_u32(MSG2, vdupq_n_u32(0x8F1BBCDC));
|
||||
MSG3 = vsha1su1q_u32(MSG3, MSG2);
|
||||
MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2);
|
||||
|
||||
/* Rounds 52-55 */
|
||||
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
|
||||
ABCD = vsha1mq_u32(ABCD, E1, TMP1);
|
||||
TMP1 = vaddq_u32(MSG3, vdupq_n_u32(0xCA62C1D6));
|
||||
MSG0 = vsha1su1q_u32(MSG0, MSG3);
|
||||
MSG1 = vsha1su0q_u32(MSG1, MSG2, MSG3);
|
||||
|
||||
/* Rounds 56-59 */
|
||||
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
|
||||
ABCD = vsha1mq_u32(ABCD, E0, TMP0);
|
||||
TMP0 = vaddq_u32(MSG0, vdupq_n_u32(0xCA62C1D6));
|
||||
MSG1 = vsha1su1q_u32(MSG1, MSG0);
|
||||
MSG2 = vsha1su0q_u32(MSG2, MSG3, MSG0);
|
||||
|
||||
/* Rounds 60-63 */
|
||||
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
|
||||
ABCD = vsha1pq_u32(ABCD, E1, TMP1);
|
||||
TMP1 = vaddq_u32(MSG1, vdupq_n_u32(0xCA62C1D6));
|
||||
MSG2 = vsha1su1q_u32(MSG2, MSG1);
|
||||
MSG3 = vsha1su0q_u32(MSG3, MSG0, MSG1);
|
||||
|
||||
/* Rounds 64-67 */
|
||||
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
|
||||
ABCD = vsha1pq_u32(ABCD, E0, TMP0);
|
||||
TMP0 = vaddq_u32(MSG2, vdupq_n_u32(0xCA62C1D6));
|
||||
MSG3 = vsha1su1q_u32(MSG3, MSG2);
|
||||
MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2);
|
||||
|
||||
/* Rounds 68-71 */
|
||||
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
|
||||
ABCD = vsha1pq_u32(ABCD, E1, TMP1);
|
||||
TMP1 = vaddq_u32(MSG3, vdupq_n_u32(0xCA62C1D6));
|
||||
MSG0 = vsha1su1q_u32(MSG0, MSG3);
|
||||
|
||||
/* Rounds 72-75 */
|
||||
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
|
||||
ABCD = vsha1pq_u32(ABCD, E0, TMP0);
|
||||
|
||||
/* Rounds 76-79 */
|
||||
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
|
||||
ABCD = vsha1pq_u32(ABCD, E1, TMP1);
|
||||
|
||||
/* Combine state */
|
||||
E0 += E0_SAVED;
|
||||
ABCD = vaddq_u32(ABCD_SAVED, ABCD);
|
||||
|
||||
/* Save state */
|
||||
vst1q_u32(&md->sha1.state[0], ABCD);
|
||||
md->sha1.state[4] = E0;
|
||||
// -> END arm intrinsics block
|
||||
#elif SHA1_TARGET_X86
|
||||
/* sha1-x86.c - Intel SHA extensions using C intrinsics */
|
||||
/* Written and place in public domain by Jeffrey Walton */
|
||||
/* Based on code from Intel, and by Sean Gulley for */
|
||||
/* the miTLS project. */
|
||||
// -> BEGIN x86_64 intrinsics block
|
||||
__m128i ABCD, ABCD_SAVE, E0, E0_SAVE, E1;
|
||||
__m128i MSG0, MSG1, MSG2, MSG3;
|
||||
const __m128i MASK = _mm_set_epi64x(0x0001020304050607ULL, 0x08090a0b0c0d0e0fULL);
|
||||
|
||||
/* Load initial values */
|
||||
ABCD = _mm_loadu_si128((const __m128i*) md->sha1.state);
|
||||
E0 = _mm_set_epi32(md->sha1.state[4], 0, 0, 0);
|
||||
ABCD = _mm_shuffle_epi32(ABCD, 0x1B);
|
||||
|
||||
/* Save current state */
|
||||
ABCD_SAVE = ABCD;
|
||||
E0_SAVE = E0;
|
||||
|
||||
/* Rounds 0-3 */
|
||||
MSG0 = _mm_loadu_si128((const __m128i*)(buf + 0));
|
||||
MSG0 = _mm_shuffle_epi8(MSG0, MASK);
|
||||
E0 = _mm_add_epi32(E0, MSG0);
|
||||
E1 = ABCD;
|
||||
ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 0);
|
||||
|
||||
/* Rounds 4-7 */
|
||||
MSG1 = _mm_loadu_si128((const __m128i*)(buf + 16));
|
||||
MSG1 = _mm_shuffle_epi8(MSG1, MASK);
|
||||
E1 = _mm_sha1nexte_epu32(E1, MSG1);
|
||||
E0 = ABCD;
|
||||
ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 0);
|
||||
MSG0 = _mm_sha1msg1_epu32(MSG0, MSG1);
|
||||
|
||||
/* Rounds 8-11 */
|
||||
MSG2 = _mm_loadu_si128((const __m128i*)(buf + 32));
|
||||
MSG2 = _mm_shuffle_epi8(MSG2, MASK);
|
||||
E0 = _mm_sha1nexte_epu32(E0, MSG2);
|
||||
E1 = ABCD;
|
||||
ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 0);
|
||||
MSG1 = _mm_sha1msg1_epu32(MSG1, MSG2);
|
||||
MSG0 = _mm_xor_si128(MSG0, MSG2);
|
||||
|
||||
/* Rounds 12-15 */
|
||||
MSG3 = _mm_loadu_si128((const __m128i*)(buf + 48));
|
||||
MSG3 = _mm_shuffle_epi8(MSG3, MASK);
|
||||
E1 = _mm_sha1nexte_epu32(E1, MSG3);
|
||||
E0 = ABCD;
|
||||
MSG0 = _mm_sha1msg2_epu32(MSG0, MSG3);
|
||||
ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 0);
|
||||
MSG2 = _mm_sha1msg1_epu32(MSG2, MSG3);
|
||||
MSG1 = _mm_xor_si128(MSG1, MSG3);
|
||||
|
||||
/* Rounds 16-19 */
|
||||
E0 = _mm_sha1nexte_epu32(E0, MSG0);
|
||||
E1 = ABCD;
|
||||
MSG1 = _mm_sha1msg2_epu32(MSG1, MSG0);
|
||||
ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 0);
|
||||
MSG3 = _mm_sha1msg1_epu32(MSG3, MSG0);
|
||||
MSG2 = _mm_xor_si128(MSG2, MSG0);
|
||||
|
||||
/* Rounds 20-23 */
|
||||
E1 = _mm_sha1nexte_epu32(E1, MSG1);
|
||||
E0 = ABCD;
|
||||
MSG2 = _mm_sha1msg2_epu32(MSG2, MSG1);
|
||||
ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 1);
|
||||
MSG0 = _mm_sha1msg1_epu32(MSG0, MSG1);
|
||||
MSG3 = _mm_xor_si128(MSG3, MSG1);
|
||||
|
||||
/* Rounds 24-27 */
|
||||
E0 = _mm_sha1nexte_epu32(E0, MSG2);
|
||||
E1 = ABCD;
|
||||
MSG3 = _mm_sha1msg2_epu32(MSG3, MSG2);
|
||||
ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 1);
|
||||
MSG1 = _mm_sha1msg1_epu32(MSG1, MSG2);
|
||||
MSG0 = _mm_xor_si128(MSG0, MSG2);
|
||||
|
||||
/* Rounds 28-31 */
|
||||
E1 = _mm_sha1nexte_epu32(E1, MSG3);
|
||||
E0 = ABCD;
|
||||
MSG0 = _mm_sha1msg2_epu32(MSG0, MSG3);
|
||||
ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 1);
|
||||
MSG2 = _mm_sha1msg1_epu32(MSG2, MSG3);
|
||||
MSG1 = _mm_xor_si128(MSG1, MSG3);
|
||||
|
||||
/* Rounds 32-35 */
|
||||
E0 = _mm_sha1nexte_epu32(E0, MSG0);
|
||||
E1 = ABCD;
|
||||
MSG1 = _mm_sha1msg2_epu32(MSG1, MSG0);
|
||||
ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 1);
|
||||
MSG3 = _mm_sha1msg1_epu32(MSG3, MSG0);
|
||||
MSG2 = _mm_xor_si128(MSG2, MSG0);
|
||||
|
||||
/* Rounds 36-39 */
|
||||
E1 = _mm_sha1nexte_epu32(E1, MSG1);
|
||||
E0 = ABCD;
|
||||
MSG2 = _mm_sha1msg2_epu32(MSG2, MSG1);
|
||||
ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 1);
|
||||
MSG0 = _mm_sha1msg1_epu32(MSG0, MSG1);
|
||||
MSG3 = _mm_xor_si128(MSG3, MSG1);
|
||||
|
||||
/* Rounds 40-43 */
|
||||
E0 = _mm_sha1nexte_epu32(E0, MSG2);
|
||||
E1 = ABCD;
|
||||
MSG3 = _mm_sha1msg2_epu32(MSG3, MSG2);
|
||||
ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 2);
|
||||
MSG1 = _mm_sha1msg1_epu32(MSG1, MSG2);
|
||||
MSG0 = _mm_xor_si128(MSG0, MSG2);
|
||||
|
||||
/* Rounds 44-47 */
|
||||
E1 = _mm_sha1nexte_epu32(E1, MSG3);
|
||||
E0 = ABCD;
|
||||
MSG0 = _mm_sha1msg2_epu32(MSG0, MSG3);
|
||||
ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 2);
|
||||
MSG2 = _mm_sha1msg1_epu32(MSG2, MSG3);
|
||||
MSG1 = _mm_xor_si128(MSG1, MSG3);
|
||||
|
||||
/* Rounds 48-51 */
|
||||
E0 = _mm_sha1nexte_epu32(E0, MSG0);
|
||||
E1 = ABCD;
|
||||
MSG1 = _mm_sha1msg2_epu32(MSG1, MSG0);
|
||||
ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 2);
|
||||
MSG3 = _mm_sha1msg1_epu32(MSG3, MSG0);
|
||||
MSG2 = _mm_xor_si128(MSG2, MSG0);
|
||||
|
||||
/* Rounds 52-55 */
|
||||
E1 = _mm_sha1nexte_epu32(E1, MSG1);
|
||||
E0 = ABCD;
|
||||
MSG2 = _mm_sha1msg2_epu32(MSG2, MSG1);
|
||||
ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 2);
|
||||
MSG0 = _mm_sha1msg1_epu32(MSG0, MSG1);
|
||||
MSG3 = _mm_xor_si128(MSG3, MSG1);
|
||||
|
||||
/* Rounds 56-59 */
|
||||
E0 = _mm_sha1nexte_epu32(E0, MSG2);
|
||||
E1 = ABCD;
|
||||
MSG3 = _mm_sha1msg2_epu32(MSG3, MSG2);
|
||||
ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 2);
|
||||
MSG1 = _mm_sha1msg1_epu32(MSG1, MSG2);
|
||||
MSG0 = _mm_xor_si128(MSG0, MSG2);
|
||||
|
||||
/* Rounds 60-63 */
|
||||
E1 = _mm_sha1nexte_epu32(E1, MSG3);
|
||||
E0 = ABCD;
|
||||
MSG0 = _mm_sha1msg2_epu32(MSG0, MSG3);
|
||||
ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 3);
|
||||
MSG2 = _mm_sha1msg1_epu32(MSG2, MSG3);
|
||||
MSG1 = _mm_xor_si128(MSG1, MSG3);
|
||||
|
||||
/* Rounds 64-67 */
|
||||
E0 = _mm_sha1nexte_epu32(E0, MSG0);
|
||||
E1 = ABCD;
|
||||
MSG1 = _mm_sha1msg2_epu32(MSG1, MSG0);
|
||||
ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 3);
|
||||
MSG3 = _mm_sha1msg1_epu32(MSG3, MSG0);
|
||||
MSG2 = _mm_xor_si128(MSG2, MSG0);
|
||||
|
||||
/* Rounds 68-71 */
|
||||
E1 = _mm_sha1nexte_epu32(E1, MSG1);
|
||||
E0 = ABCD;
|
||||
MSG2 = _mm_sha1msg2_epu32(MSG2, MSG1);
|
||||
ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 3);
|
||||
MSG3 = _mm_xor_si128(MSG3, MSG1);
|
||||
|
||||
/* Rounds 72-75 */
|
||||
E0 = _mm_sha1nexte_epu32(E0, MSG2);
|
||||
E1 = ABCD;
|
||||
MSG3 = _mm_sha1msg2_epu32(MSG3, MSG2);
|
||||
ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 3);
|
||||
|
||||
/* Rounds 76-79 */
|
||||
E1 = _mm_sha1nexte_epu32(E1, MSG3);
|
||||
E0 = ABCD;
|
||||
ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 3);
|
||||
|
||||
/* Combine state */
|
||||
E0 = _mm_sha1nexte_epu32(E0, E0_SAVE);
|
||||
ABCD = _mm_add_epi32(ABCD, ABCD_SAVE);
|
||||
|
||||
/* Save state */
|
||||
ABCD = _mm_shuffle_epi32(ABCD, 0x1B);
|
||||
_mm_storeu_si128((__m128i*) md->sha1.state, ABCD);
|
||||
md->sha1.state[4] = _mm_extract_epi32(E0, 3);
|
||||
// -> END x86_64 intrinsics block
|
||||
#else
|
||||
// -> BEGIN generic, non intrinsics block
|
||||
/*
|
||||
* SHA-1 hash in C
|
||||
*
|
||||
* Copyright (c) 2023 Project Nayuki. (MIT License)
|
||||
* https://www.nayuki.io/page/fast-sha1-hash-implementation-in-x86-assembly
|
||||
*/
|
||||
|
||||
for (i = 0; i < 20; ) {
|
||||
FF0(a,b,c,d,e,i++);
|
||||
FF0(e,a,b,c,d,i++);
|
||||
FF0(d,e,a,b,c,i++);
|
||||
FF0(c,d,e,a,b,i++);
|
||||
FF0(b,c,d,e,a,i++);
|
||||
}
|
||||
#define ROTL32(x, n) (((0U + (x)) << (n)) | ((x) >> (32 - (n)))) // Assumes that x is uint32_t and 0 < n < 32
|
||||
|
||||
/* round two */
|
||||
for (; i < 40; ) {
|
||||
FF1(a,b,c,d,e,i++);
|
||||
FF1(e,a,b,c,d,i++);
|
||||
FF1(d,e,a,b,c,i++);
|
||||
FF1(c,d,e,a,b,i++);
|
||||
FF1(b,c,d,e,a,i++);
|
||||
}
|
||||
#define LOADSCHEDULE(i) \
|
||||
schedule[i] = (uint32_t)buf[i * 4 + 0] << 24 \
|
||||
| (uint32_t)buf[i * 4 + 1] << 16 \
|
||||
| (uint32_t)buf[i * 4 + 2] << 8 \
|
||||
| (uint32_t)buf[i * 4 + 3] << 0;
|
||||
|
||||
/* round three */
|
||||
for (; i < 60; ) {
|
||||
FF2(a,b,c,d,e,i++);
|
||||
FF2(e,a,b,c,d,i++);
|
||||
FF2(d,e,a,b,c,i++);
|
||||
FF2(c,d,e,a,b,i++);
|
||||
FF2(b,c,d,e,a,i++);
|
||||
}
|
||||
#define SCHEDULE(i) \
|
||||
temp = schedule[(i - 3) & 0xF] ^ schedule[(i - 8) & 0xF] ^ schedule[(i - 14) & 0xF] ^ schedule[(i - 16) & 0xF]; \
|
||||
schedule[i & 0xF] = ROTL32(temp, 1);
|
||||
|
||||
/* round four */
|
||||
for (; i < 80; ) {
|
||||
FF3(a,b,c,d,e,i++);
|
||||
FF3(e,a,b,c,d,i++);
|
||||
FF3(d,e,a,b,c,i++);
|
||||
FF3(c,d,e,a,b,i++);
|
||||
FF3(b,c,d,e,a,i++);
|
||||
}
|
||||
#define ROUND0a(a, b, c, d, e, i) LOADSCHEDULE(i) ROUNDTAIL(a, b, e, ((b & c) | (~b & d)) , i, 0x5A827999)
|
||||
#define ROUND0b(a, b, c, d, e, i) SCHEDULE(i) ROUNDTAIL(a, b, e, ((b & c) | (~b & d)) , i, 0x5A827999)
|
||||
#define ROUND1(a, b, c, d, e, i) SCHEDULE(i) ROUNDTAIL(a, b, e, (b ^ c ^ d) , i, 0x6ED9EBA1)
|
||||
#define ROUND2(a, b, c, d, e, i) SCHEDULE(i) ROUNDTAIL(a, b, e, ((b & c) ^ (b & d) ^ (c & d)), i, 0x8F1BBCDC)
|
||||
#define ROUND3(a, b, c, d, e, i) SCHEDULE(i) ROUNDTAIL(a, b, e, (b ^ c ^ d) , i, 0xCA62C1D6)
|
||||
|
||||
#define ROUNDTAIL(a, b, e, f, i, k) \
|
||||
e = 0U + e + ROTL32(a, 5) + f + UINT32_C(k) + schedule[i & 0xF]; \
|
||||
b = ROTL32(b, 30);
|
||||
|
||||
uint32_t a = md->sha1.state[0];
|
||||
uint32_t b = md->sha1.state[1];
|
||||
uint32_t c = md->sha1.state[2];
|
||||
uint32_t d = md->sha1.state[3];
|
||||
uint32_t e = md->sha1.state[4];
|
||||
|
||||
uint32_t schedule[16];
|
||||
uint32_t temp;
|
||||
ROUND0a(a, b, c, d, e, 0)
|
||||
ROUND0a(e, a, b, c, d, 1)
|
||||
ROUND0a(d, e, a, b, c, 2)
|
||||
ROUND0a(c, d, e, a, b, 3)
|
||||
ROUND0a(b, c, d, e, a, 4)
|
||||
ROUND0a(a, b, c, d, e, 5)
|
||||
ROUND0a(e, a, b, c, d, 6)
|
||||
ROUND0a(d, e, a, b, c, 7)
|
||||
ROUND0a(c, d, e, a, b, 8)
|
||||
ROUND0a(b, c, d, e, a, 9)
|
||||
ROUND0a(a, b, c, d, e, 10)
|
||||
ROUND0a(e, a, b, c, d, 11)
|
||||
ROUND0a(d, e, a, b, c, 12)
|
||||
ROUND0a(c, d, e, a, b, 13)
|
||||
ROUND0a(b, c, d, e, a, 14)
|
||||
ROUND0a(a, b, c, d, e, 15)
|
||||
ROUND0b(e, a, b, c, d, 16)
|
||||
ROUND0b(d, e, a, b, c, 17)
|
||||
ROUND0b(c, d, e, a, b, 18)
|
||||
ROUND0b(b, c, d, e, a, 19)
|
||||
ROUND1(a, b, c, d, e, 20)
|
||||
ROUND1(e, a, b, c, d, 21)
|
||||
ROUND1(d, e, a, b, c, 22)
|
||||
ROUND1(c, d, e, a, b, 23)
|
||||
ROUND1(b, c, d, e, a, 24)
|
||||
ROUND1(a, b, c, d, e, 25)
|
||||
ROUND1(e, a, b, c, d, 26)
|
||||
ROUND1(d, e, a, b, c, 27)
|
||||
ROUND1(c, d, e, a, b, 28)
|
||||
ROUND1(b, c, d, e, a, 29)
|
||||
ROUND1(a, b, c, d, e, 30)
|
||||
ROUND1(e, a, b, c, d, 31)
|
||||
ROUND1(d, e, a, b, c, 32)
|
||||
ROUND1(c, d, e, a, b, 33)
|
||||
ROUND1(b, c, d, e, a, 34)
|
||||
ROUND1(a, b, c, d, e, 35)
|
||||
ROUND1(e, a, b, c, d, 36)
|
||||
ROUND1(d, e, a, b, c, 37)
|
||||
ROUND1(c, d, e, a, b, 38)
|
||||
ROUND1(b, c, d, e, a, 39)
|
||||
ROUND2(a, b, c, d, e, 40)
|
||||
ROUND2(e, a, b, c, d, 41)
|
||||
ROUND2(d, e, a, b, c, 42)
|
||||
ROUND2(c, d, e, a, b, 43)
|
||||
ROUND2(b, c, d, e, a, 44)
|
||||
ROUND2(a, b, c, d, e, 45)
|
||||
ROUND2(e, a, b, c, d, 46)
|
||||
ROUND2(d, e, a, b, c, 47)
|
||||
ROUND2(c, d, e, a, b, 48)
|
||||
ROUND2(b, c, d, e, a, 49)
|
||||
ROUND2(a, b, c, d, e, 50)
|
||||
ROUND2(e, a, b, c, d, 51)
|
||||
ROUND2(d, e, a, b, c, 52)
|
||||
ROUND2(c, d, e, a, b, 53)
|
||||
ROUND2(b, c, d, e, a, 54)
|
||||
ROUND2(a, b, c, d, e, 55)
|
||||
ROUND2(e, a, b, c, d, 56)
|
||||
ROUND2(d, e, a, b, c, 57)
|
||||
ROUND2(c, d, e, a, b, 58)
|
||||
ROUND2(b, c, d, e, a, 59)
|
||||
ROUND3(a, b, c, d, e, 60)
|
||||
ROUND3(e, a, b, c, d, 61)
|
||||
ROUND3(d, e, a, b, c, 62)
|
||||
ROUND3(c, d, e, a, b, 63)
|
||||
ROUND3(b, c, d, e, a, 64)
|
||||
ROUND3(a, b, c, d, e, 65)
|
||||
ROUND3(e, a, b, c, d, 66)
|
||||
ROUND3(d, e, a, b, c, 67)
|
||||
ROUND3(c, d, e, a, b, 68)
|
||||
ROUND3(b, c, d, e, a, 69)
|
||||
ROUND3(a, b, c, d, e, 70)
|
||||
ROUND3(e, a, b, c, d, 71)
|
||||
ROUND3(d, e, a, b, c, 72)
|
||||
ROUND3(c, d, e, a, b, 73)
|
||||
ROUND3(b, c, d, e, a, 74)
|
||||
ROUND3(a, b, c, d, e, 75)
|
||||
ROUND3(e, a, b, c, d, 76)
|
||||
ROUND3(d, e, a, b, c, 77)
|
||||
ROUND3(c, d, e, a, b, 78)
|
||||
ROUND3(b, c, d, e, a, 79)
|
||||
|
||||
md->sha1.state[0] = 0U + md->sha1.state[0] + a;
|
||||
md->sha1.state[1] = 0U + md->sha1.state[1] + b;
|
||||
md->sha1.state[2] = 0U + md->sha1.state[2] + c;
|
||||
md->sha1.state[3] = 0U + md->sha1.state[3] + d;
|
||||
md->sha1.state[4] = 0U + md->sha1.state[4] + e;
|
||||
|
||||
#undef ROTL32
|
||||
#undef LOADSCHEDULE
|
||||
#undef SCHEDULE
|
||||
#undef ROUND0a
|
||||
#undef ROUND0b
|
||||
#undef ROUND1
|
||||
#undef ROUND2
|
||||
#undef ROUND3
|
||||
#undef ROUNDTAIL
|
||||
// -> END generic, non intrinsics block
|
||||
#endif
|
||||
|
||||
#undef FF0
|
||||
#undef FF1
|
||||
#undef FF2
|
||||
#undef FF3
|
||||
|
||||
/* store */
|
||||
md->sha1.state[0] = md->sha1.state[0] + a;
|
||||
md->sha1.state[1] = md->sha1.state[1] + b;
|
||||
md->sha1.state[2] = md->sha1.state[2] + c;
|
||||
md->sha1.state[3] = md->sha1.state[3] + d;
|
||||
md->sha1.state[4] = md->sha1.state[4] + e;
|
||||
|
||||
return CRYPT_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
/* this is the "32-bit at least" data type
|
||||
* Re-define it to suit your platform but it must be at least 32-bits
|
||||
*/
|
||||
#if defined(__x86_64__) || (defined(__sparc__) && defined(__arch64__))
|
||||
#if defined(__x86_64__) || (defined(__sparc__) && defined(__arch64__)) || defined(__arm64__) || defined(__aarch64__)
|
||||
typedef unsigned ulong32;
|
||||
#else
|
||||
typedef unsigned long ulong32;
|
||||
|
|
|
@ -652,7 +652,7 @@ github.com/multiformats/go-multistream
|
|||
# github.com/multiformats/go-varint v0.0.7
|
||||
## explicit; go 1.18
|
||||
github.com/multiformats/go-varint
|
||||
# github.com/mutecomm/go-sqlcipher v0.0.0-20190227152316-55dbde17881f => github.com/status-im/go-sqlcipher v0.1.0-status.1
|
||||
# github.com/mutecomm/go-sqlcipher v0.0.0-20190227152316-55dbde17881f => github.com/status-im/go-sqlcipher v0.1.0-status.3
|
||||
## explicit; go 1.12
|
||||
github.com/mutecomm/go-sqlcipher
|
||||
# github.com/nfnt/resize v0.0.0-00010101000000-000000000000 => github.com/status-im/resize v0.0.0-20201215164250-7c6d9f0d3088
|
||||
|
|
Loading…
Reference in New Issue