Eliminate all side-effects from VERIFY_CHECK() usage.
The side-effects make review somewhat harder because 99.9% of the time the macro usage has no sideeffects, so they're easily ignored. The main motivation for avoiding the side effects is so that the macro can be completely stubbed out for branch coverage analysis otherwise all the unreachable verify code gets counted against coverage.
This commit is contained in:
parent
b30fc85c9e
commit
e3cd679634
|
@ -260,13 +260,16 @@ static int secp256k1_ecdsa_sig_recover(const secp256k1_ecmult_context *ctx, cons
|
|||
secp256k1_gej xj;
|
||||
secp256k1_scalar rn, u1, u2;
|
||||
secp256k1_gej qj;
|
||||
int r;
|
||||
|
||||
if (secp256k1_scalar_is_zero(sigr) || secp256k1_scalar_is_zero(sigs)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
secp256k1_scalar_get_b32(brx, sigr);
|
||||
VERIFY_CHECK(secp256k1_fe_set_b32(&fx, brx)); /* brx comes from a scalar, so is less than the order; certainly less than p */
|
||||
r = secp256k1_fe_set_b32(&fx, brx);
|
||||
(void)r;
|
||||
VERIFY_CHECK(r); /* brx comes from a scalar, so is less than the order; certainly less than p */
|
||||
if (recid & 2) {
|
||||
if (secp256k1_fe_cmp_var(&fx, &secp256k1_ecdsa_const_p_minus_order) >= 0) {
|
||||
return 0;
|
||||
|
|
|
@ -40,8 +40,13 @@ static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context *ctx
|
|||
static const unsigned char nums_b32[33] = "The scalar for this x is unknown";
|
||||
secp256k1_fe nums_x;
|
||||
secp256k1_ge nums_ge;
|
||||
VERIFY_CHECK(secp256k1_fe_set_b32(&nums_x, nums_b32));
|
||||
VERIFY_CHECK(secp256k1_ge_set_xo_var(&nums_ge, &nums_x, 0));
|
||||
int r;
|
||||
r = secp256k1_fe_set_b32(&nums_x, nums_b32);
|
||||
(void)r;
|
||||
VERIFY_CHECK(r);
|
||||
r = secp256k1_ge_set_xo_var(&nums_ge, &nums_x, 0);
|
||||
(void)r;
|
||||
VERIFY_CHECK(r);
|
||||
secp256k1_gej_set_ge(&nums_gej, &nums_ge);
|
||||
/* Add G to make the bits in x uniformly distributed. */
|
||||
secp256k1_gej_add_ge_var(&nums_gej, &nums_gej, &secp256k1_ge_const_g, NULL);
|
||||
|
|
|
@ -224,6 +224,7 @@ static void secp256k1_fe_inv_var(secp256k1_fe *r, const secp256k1_fe *a) {
|
|||
0xFF,0xFF,0xFF,0xFE,0xFF,0xFF,0xFC,0x2F
|
||||
};
|
||||
unsigned char b[32];
|
||||
int res;
|
||||
secp256k1_fe c = *a;
|
||||
secp256k1_fe_normalize_var(&c);
|
||||
secp256k1_fe_get_b32(b, &c);
|
||||
|
@ -231,7 +232,9 @@ static void secp256k1_fe_inv_var(secp256k1_fe *r, const secp256k1_fe *a) {
|
|||
secp256k1_num_set_bin(&m, prime, 32);
|
||||
secp256k1_num_mod_inverse(&n, &n, &m);
|
||||
secp256k1_num_get_bin(b, 32, &n);
|
||||
VERIFY_CHECK(secp256k1_fe_set_b32(r, b));
|
||||
res = secp256k1_fe_set_b32(r, b);
|
||||
(void)res;
|
||||
VERIFY_CHECK(res);
|
||||
/* Verify the result is the (unique) valid inverse using non-GMP code. */
|
||||
secp256k1_fe_mul(&c, &c, r);
|
||||
secp256k1_fe_add(&c, &negone);
|
||||
|
|
|
@ -70,6 +70,7 @@ static void secp256k1_num_add_abs(secp256k1_num *r, const secp256k1_num *a, cons
|
|||
|
||||
static void secp256k1_num_sub_abs(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b) {
|
||||
mp_limb_t c = mpn_sub(r->data, a->data, a->limbs, b->data, b->limbs);
|
||||
(void)c;
|
||||
VERIFY_CHECK(c == 0);
|
||||
r->limbs = a->limbs;
|
||||
while (r->limbs > 1 && r->data[r->limbs-1]==0) {
|
||||
|
@ -125,6 +126,7 @@ static void secp256k1_num_mod_inverse(secp256k1_num *r, const secp256k1_num *a,
|
|||
}
|
||||
sn = NUM_LIMBS+1;
|
||||
gn = mpn_gcdext(g, r->data, &sn, u, m->limbs, v, m->limbs);
|
||||
(void)gn;
|
||||
VERIFY_CHECK(gn == 1);
|
||||
VERIFY_CHECK(g[0] == 1);
|
||||
r->neg = a->neg ^ m->neg;
|
||||
|
|
Loading…
Reference in New Issue