mirror of
https://github.com/status-im/secp256k1.git
synced 2025-02-24 11:48:18 +00:00
Merge #772: Improve constant-timeness on PowerPC
67a429f31fd3d1b37c5365cc58b70588b8645d62 Suppress a harmless variable-time optimization by clang in _int_cmov (Tim Ruffing) 5b196338f0c8dc07bf0eece37b46d8686c4da3ce Remove redundant "? 1 : 0" after comparisons in scalar code (Tim Ruffing) Pull request description: Attempt at resolving #771 . This surprisingly seems to improve the situation at least for the compilers available on godbolt. ACKs for top commit: gmaxwell: ACK 67a429f31fd3d1b37c5365cc58b70588b8645d62 elichai: tACK 67a429f31fd3d1b37c5365cc58b70588b8645d62 Tree-SHA512: ee8b0c86831ec8c3d5a9abcad773ed8a0f267e5c47012e4e1423b10a64c26b4cf6e3c466c3df765ba7e636787a3fe134d633926d67b599287f12c51be924f478
This commit is contained in:
commit
214cb3c321
@ -192,9 +192,9 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
|
||||
tl = t; \
|
||||
} \
|
||||
c0 += tl; /* overflow is handled on the next line */ \
|
||||
th += (c0 < tl) ? 1 : 0; /* at most 0xFFFFFFFFFFFFFFFF */ \
|
||||
th += (c0 < tl); /* at most 0xFFFFFFFFFFFFFFFF */ \
|
||||
c1 += th; /* overflow is handled on the next line */ \
|
||||
c2 += (c1 < th) ? 1 : 0; /* never overflows by contract (verified in the next line) */ \
|
||||
c2 += (c1 < th); /* never overflows by contract (verified in the next line) */ \
|
||||
VERIFY_CHECK((c1 >= th) || (c2 != 0)); \
|
||||
}
|
||||
|
||||
@ -207,7 +207,7 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
|
||||
tl = t; \
|
||||
} \
|
||||
c0 += tl; /* overflow is handled on the next line */ \
|
||||
th += (c0 < tl) ? 1 : 0; /* at most 0xFFFFFFFFFFFFFFFF */ \
|
||||
th += (c0 < tl); /* at most 0xFFFFFFFFFFFFFFFF */ \
|
||||
c1 += th; /* never overflows by contract (verified in the next line) */ \
|
||||
VERIFY_CHECK(c1 >= th); \
|
||||
}
|
||||
@ -221,16 +221,16 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
|
||||
tl = t; \
|
||||
} \
|
||||
th2 = th + th; /* at most 0xFFFFFFFFFFFFFFFE (in case th was 0x7FFFFFFFFFFFFFFF) */ \
|
||||
c2 += (th2 < th) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
|
||||
c2 += (th2 < th); /* never overflows by contract (verified the next line) */ \
|
||||
VERIFY_CHECK((th2 >= th) || (c2 != 0)); \
|
||||
tl2 = tl + tl; /* at most 0xFFFFFFFFFFFFFFFE (in case the lowest 63 bits of tl were 0x7FFFFFFFFFFFFFFF) */ \
|
||||
th2 += (tl2 < tl) ? 1 : 0; /* at most 0xFFFFFFFFFFFFFFFF */ \
|
||||
th2 += (tl2 < tl); /* at most 0xFFFFFFFFFFFFFFFF */ \
|
||||
c0 += tl2; /* overflow is handled on the next line */ \
|
||||
th2 += (c0 < tl2) ? 1 : 0; /* second overflow is handled on the next line */ \
|
||||
th2 += (c0 < tl2); /* second overflow is handled on the next line */ \
|
||||
c2 += (c0 < tl2) & (th2 == 0); /* never overflows by contract (verified the next line) */ \
|
||||
VERIFY_CHECK((c0 >= tl2) || (th2 != 0) || (c2 != 0)); \
|
||||
c1 += th2; /* overflow is handled on the next line */ \
|
||||
c2 += (c1 < th2) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
|
||||
c2 += (c1 < th2); /* never overflows by contract (verified the next line) */ \
|
||||
VERIFY_CHECK((c1 >= th2) || (c2 != 0)); \
|
||||
}
|
||||
|
||||
@ -238,15 +238,15 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
|
||||
#define sumadd(a) { \
|
||||
unsigned int over; \
|
||||
c0 += (a); /* overflow is handled on the next line */ \
|
||||
over = (c0 < (a)) ? 1 : 0; \
|
||||
over = (c0 < (a)); \
|
||||
c1 += over; /* overflow is handled on the next line */ \
|
||||
c2 += (c1 < over) ? 1 : 0; /* never overflows by contract */ \
|
||||
c2 += (c1 < over); /* never overflows by contract */ \
|
||||
}
|
||||
|
||||
/** Add a to the number defined by (c0,c1). c1 must never overflow, c2 must be zero. */
|
||||
#define sumadd_fast(a) { \
|
||||
c0 += (a); /* overflow is handled on the next line */ \
|
||||
c1 += (c0 < (a)) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
|
||||
c1 += (c0 < (a)); /* never overflows by contract (verified the next line) */ \
|
||||
VERIFY_CHECK((c1 != 0) | (c0 >= (a))); \
|
||||
VERIFY_CHECK(c2 == 0); \
|
||||
}
|
||||
|
@ -271,9 +271,9 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
|
||||
tl = t; \
|
||||
} \
|
||||
c0 += tl; /* overflow is handled on the next line */ \
|
||||
th += (c0 < tl) ? 1 : 0; /* at most 0xFFFFFFFF */ \
|
||||
th += (c0 < tl); /* at most 0xFFFFFFFF */ \
|
||||
c1 += th; /* overflow is handled on the next line */ \
|
||||
c2 += (c1 < th) ? 1 : 0; /* never overflows by contract (verified in the next line) */ \
|
||||
c2 += (c1 < th); /* never overflows by contract (verified in the next line) */ \
|
||||
VERIFY_CHECK((c1 >= th) || (c2 != 0)); \
|
||||
}
|
||||
|
||||
@ -286,7 +286,7 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
|
||||
tl = t; \
|
||||
} \
|
||||
c0 += tl; /* overflow is handled on the next line */ \
|
||||
th += (c0 < tl) ? 1 : 0; /* at most 0xFFFFFFFF */ \
|
||||
th += (c0 < tl); /* at most 0xFFFFFFFF */ \
|
||||
c1 += th; /* never overflows by contract (verified in the next line) */ \
|
||||
VERIFY_CHECK(c1 >= th); \
|
||||
}
|
||||
@ -300,16 +300,16 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
|
||||
tl = t; \
|
||||
} \
|
||||
th2 = th + th; /* at most 0xFFFFFFFE (in case th was 0x7FFFFFFF) */ \
|
||||
c2 += (th2 < th) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
|
||||
c2 += (th2 < th); /* never overflows by contract (verified the next line) */ \
|
||||
VERIFY_CHECK((th2 >= th) || (c2 != 0)); \
|
||||
tl2 = tl + tl; /* at most 0xFFFFFFFE (in case the lowest 63 bits of tl were 0x7FFFFFFF) */ \
|
||||
th2 += (tl2 < tl) ? 1 : 0; /* at most 0xFFFFFFFF */ \
|
||||
th2 += (tl2 < tl); /* at most 0xFFFFFFFF */ \
|
||||
c0 += tl2; /* overflow is handled on the next line */ \
|
||||
th2 += (c0 < tl2) ? 1 : 0; /* second overflow is handled on the next line */ \
|
||||
th2 += (c0 < tl2); /* second overflow is handled on the next line */ \
|
||||
c2 += (c0 < tl2) & (th2 == 0); /* never overflows by contract (verified the next line) */ \
|
||||
VERIFY_CHECK((c0 >= tl2) || (th2 != 0) || (c2 != 0)); \
|
||||
c1 += th2; /* overflow is handled on the next line */ \
|
||||
c2 += (c1 < th2) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
|
||||
c2 += (c1 < th2); /* never overflows by contract (verified the next line) */ \
|
||||
VERIFY_CHECK((c1 >= th2) || (c2 != 0)); \
|
||||
}
|
||||
|
||||
@ -317,15 +317,15 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
|
||||
#define sumadd(a) { \
|
||||
unsigned int over; \
|
||||
c0 += (a); /* overflow is handled on the next line */ \
|
||||
over = (c0 < (a)) ? 1 : 0; \
|
||||
over = (c0 < (a)); \
|
||||
c1 += over; /* overflow is handled on the next line */ \
|
||||
c2 += (c1 < over) ? 1 : 0; /* never overflows by contract */ \
|
||||
c2 += (c1 < over); /* never overflows by contract */ \
|
||||
}
|
||||
|
||||
/** Add a to the number defined by (c0,c1). c1 must never overflow, c2 must be zero. */
|
||||
#define sumadd_fast(a) { \
|
||||
c0 += (a); /* overflow is handled on the next line */ \
|
||||
c1 += (c0 < (a)) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
|
||||
c1 += (c0 < (a)); /* never overflows by contract (verified the next line) */ \
|
||||
VERIFY_CHECK((c1 != 0) | (c0 >= (a))); \
|
||||
VERIFY_CHECK(c2 == 0); \
|
||||
}
|
||||
|
@ -197,10 +197,15 @@ static SECP256K1_INLINE void memczero(void *s, size_t len, int flag) {
|
||||
/** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. Both *r and *a must be initialized and non-negative.*/
|
||||
static SECP256K1_INLINE void secp256k1_int_cmov(int *r, const int *a, int flag) {
|
||||
unsigned int mask0, mask1, r_masked, a_masked;
|
||||
/* Access flag with a volatile-qualified lvalue.
|
||||
This prevents clang from figuring out (after inlining) that flag can
|
||||
take only be 0 or 1, which leads to variable time code. */
|
||||
volatile int vflag = flag;
|
||||
|
||||
/* Casting a negative int to unsigned and back to int is implementation defined behavior */
|
||||
VERIFY_CHECK(*r >= 0 && *a >= 0);
|
||||
|
||||
mask0 = (unsigned int)flag + ~0u;
|
||||
mask0 = (unsigned int)vflag + ~0u;
|
||||
mask1 = ~mask0;
|
||||
r_masked = ((unsigned int)*r & mask0);
|
||||
a_masked = ((unsigned int)*a & mask1);
|
||||
|
Loading…
x
Reference in New Issue
Block a user