go-sqlcipher/sha1.c

711 lines
21 KiB
C
Raw Normal View History

/* LibTomCrypt, modular cryptographic library -- Tom St Denis */
/* SPDX-License-Identifier: Unlicense */
#include "tomcrypt_private.h"
2015-04-28 23:56:53 +00:00
#include <stdint.h>
2015-04-28 23:56:53 +00:00
/**
@file sha1.c
LTC_SHA1 code by Tom St Denis
2015-04-28 23:56:53 +00:00
*/
#ifdef LTC_SHA1
// -> BEGIN arm intrinsics block
#if defined(__APPLE__) && (defined(__arm__) || defined(__aarch32__) || defined(__arm64__) || defined(__aarch64__) || defined(_M_ARM))
# if defined(__GNUC__)
# include <stdint.h>
# endif
# if defined(__ARM_NEON)|| defined(_MSC_VER) || defined(__GNUC__)
# include <arm_neon.h>
# endif
/* GCC and LLVM Clang, but not Apple Clang */
# if defined(__GNUC__) && !defined(__apple_build_version__)
# if defined(__ARM_ACLE) || defined(__ARM_FEATURE_CRYPTO)
# include <arm_acle.h>
# endif
# endif
#define SHA1_TARGET_ARM 1
// -> END arm intrinsics block
// -> BEGIN x86_64 intrinsics block
#elif defined(__x86_64__) || defined(__SHA__)
#if defined(__GNUC__) /* GCC and LLVM Clang */
# include <x86intrin.h>
#endif
/* Microsoft supports Intel SHA ACLE extensions as of Visual Studio 2015 */
#if defined(_MSC_VER)
# include <immintrin.h>
# define WIN32_LEAN_AND_MEAN
# include <Windows.h>
typedef UINT32 uint32_t;
typedef UINT8 uint8_t;
#endif
//#define SHA1_TARGET_X86 1
#endif
// -> END x86_64 intrinsics block
#define LENGTH_SIZE 8 // In bytes
#define BLOCK_LEN 64 // In bytes
#define STATE_LEN 5 // In words
2015-04-28 23:56:53 +00:00
const struct ltc_hash_descriptor sha1_desc =
{
"sha1",
2,
20,
64,
/* OID */
{ 1, 3, 14, 3, 2, 26, },
6,
&sha1_init,
&sha1_process,
&sha1_done,
&sha1_test,
NULL
};
#ifdef LTC_CLEAN_STACK
2020-12-07 17:45:45 +00:00
static int ss_sha1_compress(hash_state *md, const unsigned char *buf)
2015-04-28 23:56:53 +00:00
#else
2020-12-07 17:45:45 +00:00
static int s_sha1_compress(hash_state *md, const unsigned char *buf)
2015-04-28 23:56:53 +00:00
#endif
{
#if SHA1_TARGET_ARM
/* sha1-arm.c - ARMv8 SHA extensions using C intrinsics */
/* Written and placed in public domain by Jeffrey Walton */
/* Based on code from ARM, and by Johannes Schneiders, Skip */
/* Hovsmith and Barry O'Rourke for the mbedTLS project. */
// -> BEGIN arm intrinsics block
uint32x4_t ABCD, ABCD_SAVED;
uint32x4_t TMP0, TMP1;
uint32x4_t MSG0, MSG1, MSG2, MSG3;
uint32_t E0, E0_SAVED, E1;
/* Load state */
ABCD = vld1q_u32(&md->sha1.state[0]);
E0 = md->sha1.state[4];
/* Save state */
ABCD_SAVED = ABCD;
E0_SAVED = E0;
/* Load message */
MSG0 = vld1q_u32((const uint32_t*)(buf));
MSG1 = vld1q_u32((const uint32_t*)(buf + 16));
MSG2 = vld1q_u32((const uint32_t*)(buf + 32));
MSG3 = vld1q_u32((const uint32_t*)(buf + 48));
/* Reverse for little endian */
MSG0 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG0)));
MSG1 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG1)));
MSG2 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG2)));
MSG3 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG3)));
TMP0 = vaddq_u32(MSG0, vdupq_n_u32(0x5A827999));
TMP1 = vaddq_u32(MSG1, vdupq_n_u32(0x5A827999));
/* Rounds 0-3 */
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1cq_u32(ABCD, E0, TMP0);
TMP0 = vaddq_u32(MSG2, vdupq_n_u32(0x5A827999));
MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2);
/* Rounds 4-7 */
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1cq_u32(ABCD, E1, TMP1);
TMP1 = vaddq_u32(MSG3, vdupq_n_u32(0x5A827999));
MSG0 = vsha1su1q_u32(MSG0, MSG3);
MSG1 = vsha1su0q_u32(MSG1, MSG2, MSG3);
/* Rounds 8-11 */
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1cq_u32(ABCD, E0, TMP0);
TMP0 = vaddq_u32(MSG0, vdupq_n_u32(0x5A827999));
MSG1 = vsha1su1q_u32(MSG1, MSG0);
MSG2 = vsha1su0q_u32(MSG2, MSG3, MSG0);
/* Rounds 12-15 */
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1cq_u32(ABCD, E1, TMP1);
TMP1 = vaddq_u32(MSG1, vdupq_n_u32(0x6ED9EBA1));
MSG2 = vsha1su1q_u32(MSG2, MSG1);
MSG3 = vsha1su0q_u32(MSG3, MSG0, MSG1);
/* Rounds 16-19 */
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1cq_u32(ABCD, E0, TMP0);
TMP0 = vaddq_u32(MSG2, vdupq_n_u32(0x6ED9EBA1));
MSG3 = vsha1su1q_u32(MSG3, MSG2);
MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2);
/* Rounds 20-23 */
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1pq_u32(ABCD, E1, TMP1);
TMP1 = vaddq_u32(MSG3, vdupq_n_u32(0x6ED9EBA1));
MSG0 = vsha1su1q_u32(MSG0, MSG3);
MSG1 = vsha1su0q_u32(MSG1, MSG2, MSG3);
/* Rounds 24-27 */
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1pq_u32(ABCD, E0, TMP0);
TMP0 = vaddq_u32(MSG0, vdupq_n_u32(0x6ED9EBA1));
MSG1 = vsha1su1q_u32(MSG1, MSG0);
MSG2 = vsha1su0q_u32(MSG2, MSG3, MSG0);
/* Rounds 28-31 */
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1pq_u32(ABCD, E1, TMP1);
TMP1 = vaddq_u32(MSG1, vdupq_n_u32(0x6ED9EBA1));
MSG2 = vsha1su1q_u32(MSG2, MSG1);
MSG3 = vsha1su0q_u32(MSG3, MSG0, MSG1);
/* Rounds 32-35 */
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1pq_u32(ABCD, E0, TMP0);
TMP0 = vaddq_u32(MSG2, vdupq_n_u32(0x8F1BBCDC));
MSG3 = vsha1su1q_u32(MSG3, MSG2);
MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2);
/* Rounds 36-39 */
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1pq_u32(ABCD, E1, TMP1);
TMP1 = vaddq_u32(MSG3, vdupq_n_u32(0x8F1BBCDC));
MSG0 = vsha1su1q_u32(MSG0, MSG3);
MSG1 = vsha1su0q_u32(MSG1, MSG2, MSG3);
/* Rounds 40-43 */
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1mq_u32(ABCD, E0, TMP0);
TMP0 = vaddq_u32(MSG0, vdupq_n_u32(0x8F1BBCDC));
MSG1 = vsha1su1q_u32(MSG1, MSG0);
MSG2 = vsha1su0q_u32(MSG2, MSG3, MSG0);
/* Rounds 44-47 */
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1mq_u32(ABCD, E1, TMP1);
TMP1 = vaddq_u32(MSG1, vdupq_n_u32(0x8F1BBCDC));
MSG2 = vsha1su1q_u32(MSG2, MSG1);
MSG3 = vsha1su0q_u32(MSG3, MSG0, MSG1);
/* Rounds 48-51 */
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1mq_u32(ABCD, E0, TMP0);
TMP0 = vaddq_u32(MSG2, vdupq_n_u32(0x8F1BBCDC));
MSG3 = vsha1su1q_u32(MSG3, MSG2);
MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2);
/* Rounds 52-55 */
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1mq_u32(ABCD, E1, TMP1);
TMP1 = vaddq_u32(MSG3, vdupq_n_u32(0xCA62C1D6));
MSG0 = vsha1su1q_u32(MSG0, MSG3);
MSG1 = vsha1su0q_u32(MSG1, MSG2, MSG3);
/* Rounds 56-59 */
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1mq_u32(ABCD, E0, TMP0);
TMP0 = vaddq_u32(MSG0, vdupq_n_u32(0xCA62C1D6));
MSG1 = vsha1su1q_u32(MSG1, MSG0);
MSG2 = vsha1su0q_u32(MSG2, MSG3, MSG0);
/* Rounds 60-63 */
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1pq_u32(ABCD, E1, TMP1);
TMP1 = vaddq_u32(MSG1, vdupq_n_u32(0xCA62C1D6));
MSG2 = vsha1su1q_u32(MSG2, MSG1);
MSG3 = vsha1su0q_u32(MSG3, MSG0, MSG1);
2015-04-28 23:56:53 +00:00
/* Rounds 64-67 */
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1pq_u32(ABCD, E0, TMP0);
TMP0 = vaddq_u32(MSG2, vdupq_n_u32(0xCA62C1D6));
MSG3 = vsha1su1q_u32(MSG3, MSG2);
MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2);
2015-04-28 23:56:53 +00:00
/* Rounds 68-71 */
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1pq_u32(ABCD, E1, TMP1);
TMP1 = vaddq_u32(MSG3, vdupq_n_u32(0xCA62C1D6));
MSG0 = vsha1su1q_u32(MSG0, MSG3);
2015-04-28 23:56:53 +00:00
/* Rounds 72-75 */
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1pq_u32(ABCD, E0, TMP0);
2015-04-28 23:56:53 +00:00
/* Rounds 76-79 */
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1pq_u32(ABCD, E1, TMP1);
/* Combine state */
E0 += E0_SAVED;
ABCD = vaddq_u32(ABCD_SAVED, ABCD);
/* Save state */
vst1q_u32(&md->sha1.state[0], ABCD);
md->sha1.state[4] = E0;
// -> END arm intrinsics block
#elif SHA1_TARGET_X86
/* sha1-x86.c - Intel SHA extensions using C intrinsics */
/* Written and place in public domain by Jeffrey Walton */
/* Based on code from Intel, and by Sean Gulley for */
/* the miTLS project. */
// -> BEGIN x86_64 intrinsics block
__m128i ABCD, ABCD_SAVE, E0, E0_SAVE, E1;
__m128i MSG0, MSG1, MSG2, MSG3;
const __m128i MASK = _mm_set_epi64x(0x0001020304050607ULL, 0x08090a0b0c0d0e0fULL);
2015-04-28 23:56:53 +00:00
/* Load initial values */
ABCD = _mm_loadu_si128((const __m128i*) md->sha1.state);
E0 = _mm_set_epi32(md->sha1.state[4], 0, 0, 0);
ABCD = _mm_shuffle_epi32(ABCD, 0x1B);
2015-04-28 23:56:53 +00:00
/* Save current state */
ABCD_SAVE = ABCD;
E0_SAVE = E0;
2015-04-28 23:56:53 +00:00
/* Rounds 0-3 */
MSG0 = _mm_loadu_si128((const __m128i*)(buf + 0));
MSG0 = _mm_shuffle_epi8(MSG0, MASK);
E0 = _mm_add_epi32(E0, MSG0);
E1 = ABCD;
ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 0);
2015-04-28 23:56:53 +00:00
/* Rounds 4-7 */
MSG1 = _mm_loadu_si128((const __m128i*)(buf + 16));
MSG1 = _mm_shuffle_epi8(MSG1, MASK);
E1 = _mm_sha1nexte_epu32(E1, MSG1);
E0 = ABCD;
ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 0);
MSG0 = _mm_sha1msg1_epu32(MSG0, MSG1);
/* Rounds 8-11 */
MSG2 = _mm_loadu_si128((const __m128i*)(buf + 32));
MSG2 = _mm_shuffle_epi8(MSG2, MASK);
E0 = _mm_sha1nexte_epu32(E0, MSG2);
E1 = ABCD;
ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 0);
MSG1 = _mm_sha1msg1_epu32(MSG1, MSG2);
MSG0 = _mm_xor_si128(MSG0, MSG2);
/* Rounds 12-15 */
MSG3 = _mm_loadu_si128((const __m128i*)(buf + 48));
MSG3 = _mm_shuffle_epi8(MSG3, MASK);
E1 = _mm_sha1nexte_epu32(E1, MSG3);
E0 = ABCD;
MSG0 = _mm_sha1msg2_epu32(MSG0, MSG3);
ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 0);
MSG2 = _mm_sha1msg1_epu32(MSG2, MSG3);
MSG1 = _mm_xor_si128(MSG1, MSG3);
/* Rounds 16-19 */
E0 = _mm_sha1nexte_epu32(E0, MSG0);
E1 = ABCD;
MSG1 = _mm_sha1msg2_epu32(MSG1, MSG0);
ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 0);
MSG3 = _mm_sha1msg1_epu32(MSG3, MSG0);
MSG2 = _mm_xor_si128(MSG2, MSG0);
/* Rounds 20-23 */
E1 = _mm_sha1nexte_epu32(E1, MSG1);
E0 = ABCD;
MSG2 = _mm_sha1msg2_epu32(MSG2, MSG1);
ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 1);
MSG0 = _mm_sha1msg1_epu32(MSG0, MSG1);
MSG3 = _mm_xor_si128(MSG3, MSG1);
/* Rounds 24-27 */
E0 = _mm_sha1nexte_epu32(E0, MSG2);
E1 = ABCD;
MSG3 = _mm_sha1msg2_epu32(MSG3, MSG2);
ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 1);
MSG1 = _mm_sha1msg1_epu32(MSG1, MSG2);
MSG0 = _mm_xor_si128(MSG0, MSG2);
/* Rounds 28-31 */
E1 = _mm_sha1nexte_epu32(E1, MSG3);
E0 = ABCD;
MSG0 = _mm_sha1msg2_epu32(MSG0, MSG3);
ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 1);
MSG2 = _mm_sha1msg1_epu32(MSG2, MSG3);
MSG1 = _mm_xor_si128(MSG1, MSG3);
/* Rounds 32-35 */
E0 = _mm_sha1nexte_epu32(E0, MSG0);
E1 = ABCD;
MSG1 = _mm_sha1msg2_epu32(MSG1, MSG0);
ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 1);
MSG3 = _mm_sha1msg1_epu32(MSG3, MSG0);
MSG2 = _mm_xor_si128(MSG2, MSG0);
/* Rounds 36-39 */
E1 = _mm_sha1nexte_epu32(E1, MSG1);
E0 = ABCD;
MSG2 = _mm_sha1msg2_epu32(MSG2, MSG1);
ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 1);
MSG0 = _mm_sha1msg1_epu32(MSG0, MSG1);
MSG3 = _mm_xor_si128(MSG3, MSG1);
/* Rounds 40-43 */
E0 = _mm_sha1nexte_epu32(E0, MSG2);
E1 = ABCD;
MSG3 = _mm_sha1msg2_epu32(MSG3, MSG2);
ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 2);
MSG1 = _mm_sha1msg1_epu32(MSG1, MSG2);
MSG0 = _mm_xor_si128(MSG0, MSG2);
/* Rounds 44-47 */
E1 = _mm_sha1nexte_epu32(E1, MSG3);
E0 = ABCD;
MSG0 = _mm_sha1msg2_epu32(MSG0, MSG3);
ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 2);
MSG2 = _mm_sha1msg1_epu32(MSG2, MSG3);
MSG1 = _mm_xor_si128(MSG1, MSG3);
/* Rounds 48-51 */
E0 = _mm_sha1nexte_epu32(E0, MSG0);
E1 = ABCD;
MSG1 = _mm_sha1msg2_epu32(MSG1, MSG0);
ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 2);
MSG3 = _mm_sha1msg1_epu32(MSG3, MSG0);
MSG2 = _mm_xor_si128(MSG2, MSG0);
/* Rounds 52-55 */
E1 = _mm_sha1nexte_epu32(E1, MSG1);
E0 = ABCD;
MSG2 = _mm_sha1msg2_epu32(MSG2, MSG1);
ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 2);
MSG0 = _mm_sha1msg1_epu32(MSG0, MSG1);
MSG3 = _mm_xor_si128(MSG3, MSG1);
/* Rounds 56-59 */
E0 = _mm_sha1nexte_epu32(E0, MSG2);
E1 = ABCD;
MSG3 = _mm_sha1msg2_epu32(MSG3, MSG2);
ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 2);
MSG1 = _mm_sha1msg1_epu32(MSG1, MSG2);
MSG0 = _mm_xor_si128(MSG0, MSG2);
/* Rounds 60-63 */
E1 = _mm_sha1nexte_epu32(E1, MSG3);
E0 = ABCD;
MSG0 = _mm_sha1msg2_epu32(MSG0, MSG3);
ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 3);
MSG2 = _mm_sha1msg1_epu32(MSG2, MSG3);
MSG1 = _mm_xor_si128(MSG1, MSG3);
/* Rounds 64-67 */
E0 = _mm_sha1nexte_epu32(E0, MSG0);
E1 = ABCD;
MSG1 = _mm_sha1msg2_epu32(MSG1, MSG0);
ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 3);
MSG3 = _mm_sha1msg1_epu32(MSG3, MSG0);
MSG2 = _mm_xor_si128(MSG2, MSG0);
/* Rounds 68-71 */
E1 = _mm_sha1nexte_epu32(E1, MSG1);
E0 = ABCD;
MSG2 = _mm_sha1msg2_epu32(MSG2, MSG1);
ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 3);
MSG3 = _mm_xor_si128(MSG3, MSG1);
/* Rounds 72-75 */
E0 = _mm_sha1nexte_epu32(E0, MSG2);
E1 = ABCD;
MSG3 = _mm_sha1msg2_epu32(MSG3, MSG2);
ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 3);
/* Rounds 76-79 */
E1 = _mm_sha1nexte_epu32(E1, MSG3);
E0 = ABCD;
ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 3);
/* Combine state */
E0 = _mm_sha1nexte_epu32(E0, E0_SAVE);
ABCD = _mm_add_epi32(ABCD, ABCD_SAVE);
/* Save state */
ABCD = _mm_shuffle_epi32(ABCD, 0x1B);
_mm_storeu_si128((__m128i*) md->sha1.state, ABCD);
md->sha1.state[4] = _mm_extract_epi32(E0, 3);
// -> END x86_64 intrinsics block
2015-04-28 23:56:53 +00:00
#else
// -> BEGIN generic, non intrinsics block
/*
* SHA-1 hash in C
*
* Copyright (c) 2023 Project Nayuki. (MIT License)
* https://www.nayuki.io/page/fast-sha1-hash-implementation-in-x86-assembly
*/
2015-04-28 23:56:53 +00:00
#define ROTL32(x, n) (((0U + (x)) << (n)) | ((x) >> (32 - (n)))) // Assumes that x is uint32_t and 0 < n < 32
#define LOADSCHEDULE(i) \
schedule[i] = (uint32_t)buf[i * 4 + 0] << 24 \
| (uint32_t)buf[i * 4 + 1] << 16 \
| (uint32_t)buf[i * 4 + 2] << 8 \
| (uint32_t)buf[i * 4 + 3] << 0;
2015-04-28 23:56:53 +00:00
#define SCHEDULE(i) \
temp = schedule[(i - 3) & 0xF] ^ schedule[(i - 8) & 0xF] ^ schedule[(i - 14) & 0xF] ^ schedule[(i - 16) & 0xF]; \
schedule[i & 0xF] = ROTL32(temp, 1);
#define ROUND0a(a, b, c, d, e, i) LOADSCHEDULE(i) ROUNDTAIL(a, b, e, ((b & c) | (~b & d)) , i, 0x5A827999)
#define ROUND0b(a, b, c, d, e, i) SCHEDULE(i) ROUNDTAIL(a, b, e, ((b & c) | (~b & d)) , i, 0x5A827999)
#define ROUND1(a, b, c, d, e, i) SCHEDULE(i) ROUNDTAIL(a, b, e, (b ^ c ^ d) , i, 0x6ED9EBA1)
#define ROUND2(a, b, c, d, e, i) SCHEDULE(i) ROUNDTAIL(a, b, e, ((b & c) ^ (b & d) ^ (c & d)), i, 0x8F1BBCDC)
#define ROUND3(a, b, c, d, e, i) SCHEDULE(i) ROUNDTAIL(a, b, e, (b ^ c ^ d) , i, 0xCA62C1D6)
#define ROUNDTAIL(a, b, e, f, i, k) \
e = 0U + e + ROTL32(a, 5) + f + UINT32_C(k) + schedule[i & 0xF]; \
b = ROTL32(b, 30);
uint32_t a = md->sha1.state[0];
uint32_t b = md->sha1.state[1];
uint32_t c = md->sha1.state[2];
uint32_t d = md->sha1.state[3];
uint32_t e = md->sha1.state[4];
uint32_t schedule[16];
uint32_t temp;
ROUND0a(a, b, c, d, e, 0)
ROUND0a(e, a, b, c, d, 1)
ROUND0a(d, e, a, b, c, 2)
ROUND0a(c, d, e, a, b, 3)
ROUND0a(b, c, d, e, a, 4)
ROUND0a(a, b, c, d, e, 5)
ROUND0a(e, a, b, c, d, 6)
ROUND0a(d, e, a, b, c, 7)
ROUND0a(c, d, e, a, b, 8)
ROUND0a(b, c, d, e, a, 9)
ROUND0a(a, b, c, d, e, 10)
ROUND0a(e, a, b, c, d, 11)
ROUND0a(d, e, a, b, c, 12)
ROUND0a(c, d, e, a, b, 13)
ROUND0a(b, c, d, e, a, 14)
ROUND0a(a, b, c, d, e, 15)
ROUND0b(e, a, b, c, d, 16)
ROUND0b(d, e, a, b, c, 17)
ROUND0b(c, d, e, a, b, 18)
ROUND0b(b, c, d, e, a, 19)
ROUND1(a, b, c, d, e, 20)
ROUND1(e, a, b, c, d, 21)
ROUND1(d, e, a, b, c, 22)
ROUND1(c, d, e, a, b, 23)
ROUND1(b, c, d, e, a, 24)
ROUND1(a, b, c, d, e, 25)
ROUND1(e, a, b, c, d, 26)
ROUND1(d, e, a, b, c, 27)
ROUND1(c, d, e, a, b, 28)
ROUND1(b, c, d, e, a, 29)
ROUND1(a, b, c, d, e, 30)
ROUND1(e, a, b, c, d, 31)
ROUND1(d, e, a, b, c, 32)
ROUND1(c, d, e, a, b, 33)
ROUND1(b, c, d, e, a, 34)
ROUND1(a, b, c, d, e, 35)
ROUND1(e, a, b, c, d, 36)
ROUND1(d, e, a, b, c, 37)
ROUND1(c, d, e, a, b, 38)
ROUND1(b, c, d, e, a, 39)
ROUND2(a, b, c, d, e, 40)
ROUND2(e, a, b, c, d, 41)
ROUND2(d, e, a, b, c, 42)
ROUND2(c, d, e, a, b, 43)
ROUND2(b, c, d, e, a, 44)
ROUND2(a, b, c, d, e, 45)
ROUND2(e, a, b, c, d, 46)
ROUND2(d, e, a, b, c, 47)
ROUND2(c, d, e, a, b, 48)
ROUND2(b, c, d, e, a, 49)
ROUND2(a, b, c, d, e, 50)
ROUND2(e, a, b, c, d, 51)
ROUND2(d, e, a, b, c, 52)
ROUND2(c, d, e, a, b, 53)
ROUND2(b, c, d, e, a, 54)
ROUND2(a, b, c, d, e, 55)
ROUND2(e, a, b, c, d, 56)
ROUND2(d, e, a, b, c, 57)
ROUND2(c, d, e, a, b, 58)
ROUND2(b, c, d, e, a, 59)
ROUND3(a, b, c, d, e, 60)
ROUND3(e, a, b, c, d, 61)
ROUND3(d, e, a, b, c, 62)
ROUND3(c, d, e, a, b, 63)
ROUND3(b, c, d, e, a, 64)
ROUND3(a, b, c, d, e, 65)
ROUND3(e, a, b, c, d, 66)
ROUND3(d, e, a, b, c, 67)
ROUND3(c, d, e, a, b, 68)
ROUND3(b, c, d, e, a, 69)
ROUND3(a, b, c, d, e, 70)
ROUND3(e, a, b, c, d, 71)
ROUND3(d, e, a, b, c, 72)
ROUND3(c, d, e, a, b, 73)
ROUND3(b, c, d, e, a, 74)
ROUND3(a, b, c, d, e, 75)
ROUND3(e, a, b, c, d, 76)
ROUND3(d, e, a, b, c, 77)
ROUND3(c, d, e, a, b, 78)
ROUND3(b, c, d, e, a, 79)
md->sha1.state[0] = 0U + md->sha1.state[0] + a;
md->sha1.state[1] = 0U + md->sha1.state[1] + b;
md->sha1.state[2] = 0U + md->sha1.state[2] + c;
md->sha1.state[3] = 0U + md->sha1.state[3] + d;
md->sha1.state[4] = 0U + md->sha1.state[4] + e;
#undef ROTL32
#undef LOADSCHEDULE
#undef SCHEDULE
#undef ROUND0a
#undef ROUND0b
#undef ROUND1
#undef ROUND2
#undef ROUND3
#undef ROUNDTAIL
// -> END generic, non intrinsics block
#endif
2015-04-28 23:56:53 +00:00
return CRYPT_OK;
}
#ifdef LTC_CLEAN_STACK
2020-12-07 17:45:45 +00:00
static int s_sha1_compress(hash_state *md, const unsigned char *buf)
2015-04-28 23:56:53 +00:00
{
int err;
2020-12-07 17:45:45 +00:00
err = ss_sha1_compress(md, buf);
2015-04-28 23:56:53 +00:00
burn_stack(sizeof(ulong32) * 87);
return err;
}
#endif
/**
Initialize the hash state
@param md The hash state you wish to initialize
@return CRYPT_OK if successful
*/
int sha1_init(hash_state * md)
{
LTC_ARGCHK(md != NULL);
md->sha1.state[0] = 0x67452301UL;
md->sha1.state[1] = 0xefcdab89UL;
md->sha1.state[2] = 0x98badcfeUL;
md->sha1.state[3] = 0x10325476UL;
md->sha1.state[4] = 0xc3d2e1f0UL;
md->sha1.curlen = 0;
md->sha1.length = 0;
return CRYPT_OK;
}
/**
Process a block of memory though the hash
@param md The hash state
@param in The data to hash
@param inlen The length of the data (octets)
@return CRYPT_OK if successful
*/
2020-12-07 17:45:45 +00:00
HASH_PROCESS(sha1_process, s_sha1_compress, sha1, 64)
2015-04-28 23:56:53 +00:00
/**
Terminate the hash to get the digest
@param md The hash state
@param out [out] The destination of the hash (20 bytes)
@return CRYPT_OK if successful
*/
int sha1_done(hash_state * md, unsigned char *out)
{
int i;
LTC_ARGCHK(md != NULL);
LTC_ARGCHK(out != NULL);
if (md->sha1.curlen >= sizeof(md->sha1.buf)) {
return CRYPT_INVALID_ARG;
}
/* increase the length of the message */
md->sha1.length += md->sha1.curlen * 8;
/* append the '1' bit */
md->sha1.buf[md->sha1.curlen++] = (unsigned char)0x80;
/* if the length is currently above 56 bytes we append zeros
* then compress. Then we can fall back to padding zeros and length
* encoding like normal.
*/
if (md->sha1.curlen > 56) {
while (md->sha1.curlen < 64) {
md->sha1.buf[md->sha1.curlen++] = (unsigned char)0;
}
2020-12-07 17:45:45 +00:00
s_sha1_compress(md, md->sha1.buf);
2015-04-28 23:56:53 +00:00
md->sha1.curlen = 0;
}
/* pad upto 56 bytes of zeroes */
while (md->sha1.curlen < 56) {
md->sha1.buf[md->sha1.curlen++] = (unsigned char)0;
}
/* store length */
STORE64H(md->sha1.length, md->sha1.buf+56);
2020-12-07 17:45:45 +00:00
s_sha1_compress(md, md->sha1.buf);
2015-04-28 23:56:53 +00:00
/* copy output */
for (i = 0; i < 5; i++) {
STORE32H(md->sha1.state[i], out+(4*i));
}
#ifdef LTC_CLEAN_STACK
zeromem(md, sizeof(hash_state));
#endif
return CRYPT_OK;
}
/**
Self-test the hash
@return CRYPT_OK if successful, CRYPT_NOP if self-tests have been disabled
*/
2015-04-28 23:56:53 +00:00
int sha1_test(void)
{
#ifndef LTC_TEST
return CRYPT_NOP;
#else
2015-04-28 23:56:53 +00:00
static const struct {
const char *msg;
2015-04-28 23:56:53 +00:00
unsigned char hash[20];
} tests[] = {
{ "abc",
{ 0xa9, 0x99, 0x3e, 0x36, 0x47, 0x06, 0x81, 0x6a,
0xba, 0x3e, 0x25, 0x71, 0x78, 0x50, 0xc2, 0x6c,
0x9c, 0xd0, 0xd8, 0x9d }
},
{ "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
{ 0x84, 0x98, 0x3E, 0x44, 0x1C, 0x3B, 0xD2, 0x6E,
0xBA, 0xAE, 0x4A, 0xA1, 0xF9, 0x51, 0x29, 0xE5,
0xE5, 0x46, 0x70, 0xF1 }
}
};
int i;
unsigned char tmp[20];
hash_state md;
for (i = 0; i < (int)(sizeof(tests) / sizeof(tests[0])); i++) {
sha1_init(&md);
sha1_process(&md, (unsigned char*)tests[i].msg, (unsigned long)XSTRLEN(tests[i].msg));
2015-04-28 23:56:53 +00:00
sha1_done(&md, tmp);
if (compare_testvector(tmp, sizeof(tmp), tests[i].hash, sizeof(tests[i].hash), "SHA1", i)) {
2015-04-28 23:56:53 +00:00
return CRYPT_FAIL_TESTVECTOR;
}
}
return CRYPT_OK;
#endif
}
#endif