Centralize the definition of uint128_t and use it uniformly.

This needs to be renamed along with the other _t types, because _t
 is reserved.  But that will be done later.
This commit is contained in:
Gregory Maxwell 2015-01-25 03:41:59 +00:00
parent d9543c904f
commit 4be8d6fc42
3 changed files with 48 additions and 46 deletions

View File

@ -16,7 +16,7 @@
#endif
SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) {
__int128 c, d;
uint128_t c, d;
uint64_t t3, t4, tx, u0;
uint64_t a0 = a[0], a1 = a[1], a2 = a[2], a3 = a[3], a4 = a[4];
const uint64_t M = 0xFFFFFFFFFFFFFULL, R = 0x1000003D10ULL;
@ -38,13 +38,13 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint64_t *r, const uint64_t
* Note that [x 0 0 0 0 0] = [x*R].
*/
d = (__int128)a0 * b[3]
+ (__int128)a1 * b[2]
+ (__int128)a2 * b[1]
+ (__int128)a3 * b[0];
d = (uint128_t)a0 * b[3]
+ (uint128_t)a1 * b[2]
+ (uint128_t)a2 * b[1]
+ (uint128_t)a3 * b[0];
VERIFY_BITS(d, 114);
/* [d 0 0 0] = [p3 0 0 0] */
c = (__int128)a4 * b[4];
c = (uint128_t)a4 * b[4];
VERIFY_BITS(c, 112);
/* [c 0 0 0 0 d 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */
d += (c & M) * R; c >>= 52;
@ -56,11 +56,11 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint64_t *r, const uint64_t
VERIFY_BITS(d, 63);
/* [c 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */
d += (__int128)a0 * b[4]
+ (__int128)a1 * b[3]
+ (__int128)a2 * b[2]
+ (__int128)a3 * b[1]
+ (__int128)a4 * b[0];
d += (uint128_t)a0 * b[4]
+ (uint128_t)a1 * b[3]
+ (uint128_t)a2 * b[2]
+ (uint128_t)a3 * b[1]
+ (uint128_t)a4 * b[0];
VERIFY_BITS(d, 115);
/* [c 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */
d += c * R;
@ -75,13 +75,13 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint64_t *r, const uint64_t
VERIFY_BITS(t4, 48);
/* [d t4+(tx<<48) t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */
c = (__int128)a0 * b[0];
c = (uint128_t)a0 * b[0];
VERIFY_BITS(c, 112);
/* [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 0 p4 p3 0 0 p0] */
d += (__int128)a1 * b[4]
+ (__int128)a2 * b[3]
+ (__int128)a3 * b[2]
+ (__int128)a4 * b[1];
d += (uint128_t)a1 * b[4]
+ (uint128_t)a2 * b[3]
+ (uint128_t)a3 * b[2]
+ (uint128_t)a4 * b[1];
VERIFY_BITS(d, 115);
/* [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */
u0 = d & M; d >>= 52;
@ -92,7 +92,7 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint64_t *r, const uint64_t
u0 = (u0 << 4) | tx;
VERIFY_BITS(u0, 56);
/* [d 0 t4+(u0<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */
c += (__int128)u0 * (R >> 4);
c += (uint128_t)u0 * (R >> 4);
VERIFY_BITS(c, 115);
/* [d 0 t4 t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */
r[0] = c & M; c >>= 52;
@ -100,13 +100,13 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint64_t *r, const uint64_t
VERIFY_BITS(c, 61);
/* [d 0 t4 t3 0 c r0] = [p8 0 0 p5 p4 p3 0 0 p0] */
c += (__int128)a0 * b[1]
+ (__int128)a1 * b[0];
c += (uint128_t)a0 * b[1]
+ (uint128_t)a1 * b[0];
VERIFY_BITS(c, 114);
/* [d 0 t4 t3 0 c r0] = [p8 0 0 p5 p4 p3 0 p1 p0] */
d += (__int128)a2 * b[4]
+ (__int128)a3 * b[3]
+ (__int128)a4 * b[2];
d += (uint128_t)a2 * b[4]
+ (uint128_t)a3 * b[3]
+ (uint128_t)a4 * b[2];
VERIFY_BITS(d, 114);
/* [d 0 t4 t3 0 c r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */
c += (d & M) * R; d >>= 52;
@ -118,13 +118,13 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint64_t *r, const uint64_t
VERIFY_BITS(c, 63);
/* [d 0 0 t4 t3 c r1 r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */
c += (__int128)a0 * b[2]
+ (__int128)a1 * b[1]
+ (__int128)a2 * b[0];
c += (uint128_t)a0 * b[2]
+ (uint128_t)a1 * b[1]
+ (uint128_t)a2 * b[0];
VERIFY_BITS(c, 114);
/* [d 0 0 t4 t3 c r1 r0] = [p8 0 p6 p5 p4 p3 p2 p1 p0] */
d += (__int128)a3 * b[4]
+ (__int128)a4 * b[3];
d += (uint128_t)a3 * b[4]
+ (uint128_t)a4 * b[3];
VERIFY_BITS(d, 114);
/* [d 0 0 t4 t3 c t1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */
c += (d & M) * R; d >>= 52;
@ -153,7 +153,7 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint64_t *r, const uint64_t
}
SECP256K1_INLINE static void secp256k1_fe_sqr_inner(uint64_t *r, const uint64_t *a) {
__int128 c, d;
uint128_t c, d;
uint64_t a0 = a[0], a1 = a[1], a2 = a[2], a3 = a[3], a4 = a[4];
int64_t t3, t4, tx, u0;
const uint64_t M = 0xFFFFFFFFFFFFFULL, R = 0x1000003D10ULL;
@ -169,11 +169,11 @@ SECP256K1_INLINE static void secp256k1_fe_sqr_inner(uint64_t *r, const uint64_t
* Note that [x 0 0 0 0 0] = [x*R].
*/
d = (__int128)(a0*2) * a3
+ (__int128)(a1*2) * a2;
d = (uint128_t)(a0*2) * a3
+ (uint128_t)(a1*2) * a2;
VERIFY_BITS(d, 114);
/* [d 0 0 0] = [p3 0 0 0] */
c = (__int128)a4 * a4;
c = (uint128_t)a4 * a4;
VERIFY_BITS(c, 112);
/* [c 0 0 0 0 d 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */
d += (c & M) * R; c >>= 52;
@ -186,9 +186,9 @@ SECP256K1_INLINE static void secp256k1_fe_sqr_inner(uint64_t *r, const uint64_t
/* [c 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */
a4 *= 2;
d += (__int128)a0 * a4
+ (__int128)(a1*2) * a3
+ (__int128)a2 * a2;
d += (uint128_t)a0 * a4
+ (uint128_t)(a1*2) * a3
+ (uint128_t)a2 * a2;
VERIFY_BITS(d, 115);
/* [c 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */
d += c * R;
@ -203,11 +203,11 @@ SECP256K1_INLINE static void secp256k1_fe_sqr_inner(uint64_t *r, const uint64_t
VERIFY_BITS(t4, 48);
/* [d t4+(tx<<48) t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */
c = (__int128)a0 * a0;
c = (uint128_t)a0 * a0;
VERIFY_BITS(c, 112);
/* [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 0 p4 p3 0 0 p0] */
d += (__int128)a1 * a4
+ (__int128)(a2*2) * a3;
d += (uint128_t)a1 * a4
+ (uint128_t)(a2*2) * a3;
VERIFY_BITS(d, 114);
/* [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */
u0 = d & M; d >>= 52;
@ -218,7 +218,7 @@ SECP256K1_INLINE static void secp256k1_fe_sqr_inner(uint64_t *r, const uint64_t
u0 = (u0 << 4) | tx;
VERIFY_BITS(u0, 56);
/* [d 0 t4+(u0<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */
c += (__int128)u0 * (R >> 4);
c += (uint128_t)u0 * (R >> 4);
VERIFY_BITS(c, 113);
/* [d 0 t4 t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */
r[0] = c & M; c >>= 52;
@ -227,11 +227,11 @@ SECP256K1_INLINE static void secp256k1_fe_sqr_inner(uint64_t *r, const uint64_t
/* [d 0 t4 t3 0 c r0] = [p8 0 0 p5 p4 p3 0 0 p0] */
a0 *= 2;
c += (__int128)a0 * a1;
c += (uint128_t)a0 * a1;
VERIFY_BITS(c, 114);
/* [d 0 t4 t3 0 c r0] = [p8 0 0 p5 p4 p3 0 p1 p0] */
d += (__int128)a2 * a4
+ (__int128)a3 * a3;
d += (uint128_t)a2 * a4
+ (uint128_t)a3 * a3;
VERIFY_BITS(d, 114);
/* [d 0 t4 t3 0 c r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */
c += (d & M) * R; d >>= 52;
@ -243,11 +243,11 @@ SECP256K1_INLINE static void secp256k1_fe_sqr_inner(uint64_t *r, const uint64_t
VERIFY_BITS(c, 63);
/* [d 0 0 t4 t3 c r1 r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */
c += (__int128)a0 * a2
+ (__int128)a1 * a1;
c += (uint128_t)a0 * a2
+ (uint128_t)a1 * a1;
VERIFY_BITS(c, 114);
/* [d 0 0 t4 t3 c r1 r0] = [p8 0 p6 p5 p4 p3 p2 p1 p0] */
d += (__int128)a3 * a4;
d += (uint128_t)a3 * a4;
VERIFY_BITS(d, 114);
/* [d 0 0 t4 t3 c r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */
c += (d & M) * R; d >>= 52;

View File

@ -7,8 +7,6 @@
#ifndef _SECP256K1_SCALAR_REPR_IMPL_H_
#define _SECP256K1_SCALAR_REPR_IMPL_H_
typedef unsigned __int128 uint128_t;
/* Limbs of the secp256k1 order. */
#define SECP256K1_N_0 ((uint64_t)0xBFD25E8CD0364141ULL)
#define SECP256K1_N_1 ((uint64_t)0xBAAEDCE6AF48A03BULL)

View File

@ -84,4 +84,8 @@ static SECP256K1_INLINE void *checked_malloc(size_t size) {
# endif
#endif
#if defined(HAVE___INT128)
typedef unsigned __int128 uint128_t;
#endif
#endif