Move secp256k1_scalar_{inverse{_var},is_even} to per-impl files

This temporarily duplicates the inversion code across the 4x64 and 8x32
implementations. Those implementations will be replaced in a later commit.
This commit is contained in:
Pieter Wuille 2020-10-11 15:30:37 -07:00
parent 08d54964e5
commit aa404d53be
4 changed files with 373 additions and 191 deletions

View File

@ -955,4 +955,183 @@ static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const se
r->d[3] = (r->d[3] & mask0) | (a->d[3] & mask1); r->d[3] = (r->d[3] & mask0) | (a->d[3] & mask1);
} }
static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x) {
secp256k1_scalar *t;
int i;
/* First compute xN as x ^ (2^N - 1) for some values of N,
* and uM as x ^ M for some values of M. */
secp256k1_scalar x2, x3, x6, x8, x14, x28, x56, x112, x126;
secp256k1_scalar u2, u5, u9, u11, u13;
secp256k1_scalar_sqr(&u2, x);
secp256k1_scalar_mul(&x2, &u2, x);
secp256k1_scalar_mul(&u5, &u2, &x2);
secp256k1_scalar_mul(&x3, &u5, &u2);
secp256k1_scalar_mul(&u9, &x3, &u2);
secp256k1_scalar_mul(&u11, &u9, &u2);
secp256k1_scalar_mul(&u13, &u11, &u2);
secp256k1_scalar_sqr(&x6, &u13);
secp256k1_scalar_sqr(&x6, &x6);
secp256k1_scalar_mul(&x6, &x6, &u11);
secp256k1_scalar_sqr(&x8, &x6);
secp256k1_scalar_sqr(&x8, &x8);
secp256k1_scalar_mul(&x8, &x8, &x2);
secp256k1_scalar_sqr(&x14, &x8);
for (i = 0; i < 5; i++) {
secp256k1_scalar_sqr(&x14, &x14);
}
secp256k1_scalar_mul(&x14, &x14, &x6);
secp256k1_scalar_sqr(&x28, &x14);
for (i = 0; i < 13; i++) {
secp256k1_scalar_sqr(&x28, &x28);
}
secp256k1_scalar_mul(&x28, &x28, &x14);
secp256k1_scalar_sqr(&x56, &x28);
for (i = 0; i < 27; i++) {
secp256k1_scalar_sqr(&x56, &x56);
}
secp256k1_scalar_mul(&x56, &x56, &x28);
secp256k1_scalar_sqr(&x112, &x56);
for (i = 0; i < 55; i++) {
secp256k1_scalar_sqr(&x112, &x112);
}
secp256k1_scalar_mul(&x112, &x112, &x56);
secp256k1_scalar_sqr(&x126, &x112);
for (i = 0; i < 13; i++) {
secp256k1_scalar_sqr(&x126, &x126);
}
secp256k1_scalar_mul(&x126, &x126, &x14);
/* Then accumulate the final result (t starts at x126). */
t = &x126;
for (i = 0; i < 3; i++) {
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u5); /* 101 */
for (i = 0; i < 4; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &x3); /* 111 */
for (i = 0; i < 4; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u5); /* 101 */
for (i = 0; i < 5; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u11); /* 1011 */
for (i = 0; i < 4; i++) {
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u11); /* 1011 */
for (i = 0; i < 4; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &x3); /* 111 */
for (i = 0; i < 5; i++) { /* 00 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &x3); /* 111 */
for (i = 0; i < 6; i++) { /* 00 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u13); /* 1101 */
for (i = 0; i < 4; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u5); /* 101 */
for (i = 0; i < 3; i++) {
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &x3); /* 111 */
for (i = 0; i < 5; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u9); /* 1001 */
for (i = 0; i < 6; i++) { /* 000 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u5); /* 101 */
for (i = 0; i < 10; i++) { /* 0000000 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &x3); /* 111 */
for (i = 0; i < 4; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &x3); /* 111 */
for (i = 0; i < 9; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &x8); /* 11111111 */
for (i = 0; i < 5; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u9); /* 1001 */
for (i = 0; i < 6; i++) { /* 00 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u11); /* 1011 */
for (i = 0; i < 4; i++) {
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u13); /* 1101 */
for (i = 0; i < 5; i++) {
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &x2); /* 11 */
for (i = 0; i < 6; i++) { /* 00 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u13); /* 1101 */
for (i = 0; i < 10; i++) { /* 000000 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u13); /* 1101 */
for (i = 0; i < 4; i++) {
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u9); /* 1001 */
for (i = 0; i < 6; i++) { /* 00000 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, x); /* 1 */
for (i = 0; i < 8; i++) { /* 00 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(r, t, &x6); /* 111111 */
}
static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) {
#if defined(USE_SCALAR_INV_BUILTIN)
secp256k1_scalar_inverse(r, x);
#elif defined(USE_SCALAR_INV_NUM)
unsigned char b[32];
secp256k1_num n, m;
secp256k1_scalar t = *x;
secp256k1_scalar_get_b32(b, &t);
secp256k1_num_set_bin(&n, b, 32);
secp256k1_scalar_order_get_num(&m);
secp256k1_num_mod_inverse(&n, &n, &m);
secp256k1_num_get_bin(b, 32, &n);
secp256k1_scalar_set_b32(r, b, NULL);
/* Verify that the inverse was computed correctly, without GMP code. */
secp256k1_scalar_mul(&t, &t, r);
CHECK(secp256k1_scalar_is_one(&t));
#else
#error "Please select scalar inverse implementation"
#endif
}
SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) {
return !(a->d[0] & 1);
}
#endif /* SECP256K1_SCALAR_REPR_IMPL_H */ #endif /* SECP256K1_SCALAR_REPR_IMPL_H */

View File

@ -731,4 +731,183 @@ static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const se
r->d[7] = (r->d[7] & mask0) | (a->d[7] & mask1); r->d[7] = (r->d[7] & mask0) | (a->d[7] & mask1);
} }
static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x) {
secp256k1_scalar *t;
int i;
/* First compute xN as x ^ (2^N - 1) for some values of N,
* and uM as x ^ M for some values of M. */
secp256k1_scalar x2, x3, x6, x8, x14, x28, x56, x112, x126;
secp256k1_scalar u2, u5, u9, u11, u13;
secp256k1_scalar_sqr(&u2, x);
secp256k1_scalar_mul(&x2, &u2, x);
secp256k1_scalar_mul(&u5, &u2, &x2);
secp256k1_scalar_mul(&x3, &u5, &u2);
secp256k1_scalar_mul(&u9, &x3, &u2);
secp256k1_scalar_mul(&u11, &u9, &u2);
secp256k1_scalar_mul(&u13, &u11, &u2);
secp256k1_scalar_sqr(&x6, &u13);
secp256k1_scalar_sqr(&x6, &x6);
secp256k1_scalar_mul(&x6, &x6, &u11);
secp256k1_scalar_sqr(&x8, &x6);
secp256k1_scalar_sqr(&x8, &x8);
secp256k1_scalar_mul(&x8, &x8, &x2);
secp256k1_scalar_sqr(&x14, &x8);
for (i = 0; i < 5; i++) {
secp256k1_scalar_sqr(&x14, &x14);
}
secp256k1_scalar_mul(&x14, &x14, &x6);
secp256k1_scalar_sqr(&x28, &x14);
for (i = 0; i < 13; i++) {
secp256k1_scalar_sqr(&x28, &x28);
}
secp256k1_scalar_mul(&x28, &x28, &x14);
secp256k1_scalar_sqr(&x56, &x28);
for (i = 0; i < 27; i++) {
secp256k1_scalar_sqr(&x56, &x56);
}
secp256k1_scalar_mul(&x56, &x56, &x28);
secp256k1_scalar_sqr(&x112, &x56);
for (i = 0; i < 55; i++) {
secp256k1_scalar_sqr(&x112, &x112);
}
secp256k1_scalar_mul(&x112, &x112, &x56);
secp256k1_scalar_sqr(&x126, &x112);
for (i = 0; i < 13; i++) {
secp256k1_scalar_sqr(&x126, &x126);
}
secp256k1_scalar_mul(&x126, &x126, &x14);
/* Then accumulate the final result (t starts at x126). */
t = &x126;
for (i = 0; i < 3; i++) {
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u5); /* 101 */
for (i = 0; i < 4; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &x3); /* 111 */
for (i = 0; i < 4; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u5); /* 101 */
for (i = 0; i < 5; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u11); /* 1011 */
for (i = 0; i < 4; i++) {
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u11); /* 1011 */
for (i = 0; i < 4; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &x3); /* 111 */
for (i = 0; i < 5; i++) { /* 00 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &x3); /* 111 */
for (i = 0; i < 6; i++) { /* 00 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u13); /* 1101 */
for (i = 0; i < 4; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u5); /* 101 */
for (i = 0; i < 3; i++) {
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &x3); /* 111 */
for (i = 0; i < 5; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u9); /* 1001 */
for (i = 0; i < 6; i++) { /* 000 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u5); /* 101 */
for (i = 0; i < 10; i++) { /* 0000000 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &x3); /* 111 */
for (i = 0; i < 4; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &x3); /* 111 */
for (i = 0; i < 9; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &x8); /* 11111111 */
for (i = 0; i < 5; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u9); /* 1001 */
for (i = 0; i < 6; i++) { /* 00 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u11); /* 1011 */
for (i = 0; i < 4; i++) {
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u13); /* 1101 */
for (i = 0; i < 5; i++) {
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &x2); /* 11 */
for (i = 0; i < 6; i++) { /* 00 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u13); /* 1101 */
for (i = 0; i < 10; i++) { /* 000000 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u13); /* 1101 */
for (i = 0; i < 4; i++) {
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u9); /* 1001 */
for (i = 0; i < 6; i++) { /* 00000 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, x); /* 1 */
for (i = 0; i < 8; i++) { /* 00 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(r, t, &x6); /* 111111 */
}
static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) {
#if defined(USE_SCALAR_INV_BUILTIN)
secp256k1_scalar_inverse(r, x);
#elif defined(USE_SCALAR_INV_NUM)
unsigned char b[32];
secp256k1_num n, m;
secp256k1_scalar t = *x;
secp256k1_scalar_get_b32(b, &t);
secp256k1_num_set_bin(&n, b, 32);
secp256k1_scalar_order_get_num(&m);
secp256k1_num_mod_inverse(&n, &n, &m);
secp256k1_num_get_bin(b, 32, &n);
secp256k1_scalar_set_b32(r, b, NULL);
/* Verify that the inverse was computed correctly, without GMP code. */
secp256k1_scalar_mul(&t, &t, r);
CHECK(secp256k1_scalar_is_one(&t));
#else
#error "Please select scalar inverse implementation"
#endif
}
SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) {
return !(a->d[0] & 1);
}
#endif /* SECP256K1_SCALAR_REPR_IMPL_H */ #endif /* SECP256K1_SCALAR_REPR_IMPL_H */

View File

@ -65,197 +65,6 @@ static int secp256k1_scalar_set_b32_seckey(secp256k1_scalar *r, const unsigned c
return (!overflow) & (!secp256k1_scalar_is_zero(r)); return (!overflow) & (!secp256k1_scalar_is_zero(r));
} }
static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x) {
#if defined(EXHAUSTIVE_TEST_ORDER)
int i;
*r = 0;
for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++)
if ((i * *x) % EXHAUSTIVE_TEST_ORDER == 1)
*r = i;
/* If this VERIFY_CHECK triggers we were given a noninvertible scalar (and thus
* have a composite group order; fix it in exhaustive_tests.c). */
VERIFY_CHECK(*r != 0);
}
#else
secp256k1_scalar *t;
int i;
/* First compute xN as x ^ (2^N - 1) for some values of N,
* and uM as x ^ M for some values of M. */
secp256k1_scalar x2, x3, x6, x8, x14, x28, x56, x112, x126;
secp256k1_scalar u2, u5, u9, u11, u13;
secp256k1_scalar_sqr(&u2, x);
secp256k1_scalar_mul(&x2, &u2, x);
secp256k1_scalar_mul(&u5, &u2, &x2);
secp256k1_scalar_mul(&x3, &u5, &u2);
secp256k1_scalar_mul(&u9, &x3, &u2);
secp256k1_scalar_mul(&u11, &u9, &u2);
secp256k1_scalar_mul(&u13, &u11, &u2);
secp256k1_scalar_sqr(&x6, &u13);
secp256k1_scalar_sqr(&x6, &x6);
secp256k1_scalar_mul(&x6, &x6, &u11);
secp256k1_scalar_sqr(&x8, &x6);
secp256k1_scalar_sqr(&x8, &x8);
secp256k1_scalar_mul(&x8, &x8, &x2);
secp256k1_scalar_sqr(&x14, &x8);
for (i = 0; i < 5; i++) {
secp256k1_scalar_sqr(&x14, &x14);
}
secp256k1_scalar_mul(&x14, &x14, &x6);
secp256k1_scalar_sqr(&x28, &x14);
for (i = 0; i < 13; i++) {
secp256k1_scalar_sqr(&x28, &x28);
}
secp256k1_scalar_mul(&x28, &x28, &x14);
secp256k1_scalar_sqr(&x56, &x28);
for (i = 0; i < 27; i++) {
secp256k1_scalar_sqr(&x56, &x56);
}
secp256k1_scalar_mul(&x56, &x56, &x28);
secp256k1_scalar_sqr(&x112, &x56);
for (i = 0; i < 55; i++) {
secp256k1_scalar_sqr(&x112, &x112);
}
secp256k1_scalar_mul(&x112, &x112, &x56);
secp256k1_scalar_sqr(&x126, &x112);
for (i = 0; i < 13; i++) {
secp256k1_scalar_sqr(&x126, &x126);
}
secp256k1_scalar_mul(&x126, &x126, &x14);
/* Then accumulate the final result (t starts at x126). */
t = &x126;
for (i = 0; i < 3; i++) {
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u5); /* 101 */
for (i = 0; i < 4; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &x3); /* 111 */
for (i = 0; i < 4; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u5); /* 101 */
for (i = 0; i < 5; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u11); /* 1011 */
for (i = 0; i < 4; i++) {
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u11); /* 1011 */
for (i = 0; i < 4; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &x3); /* 111 */
for (i = 0; i < 5; i++) { /* 00 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &x3); /* 111 */
for (i = 0; i < 6; i++) { /* 00 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u13); /* 1101 */
for (i = 0; i < 4; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u5); /* 101 */
for (i = 0; i < 3; i++) {
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &x3); /* 111 */
for (i = 0; i < 5; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u9); /* 1001 */
for (i = 0; i < 6; i++) { /* 000 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u5); /* 101 */
for (i = 0; i < 10; i++) { /* 0000000 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &x3); /* 111 */
for (i = 0; i < 4; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &x3); /* 111 */
for (i = 0; i < 9; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &x8); /* 11111111 */
for (i = 0; i < 5; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u9); /* 1001 */
for (i = 0; i < 6; i++) { /* 00 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u11); /* 1011 */
for (i = 0; i < 4; i++) {
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u13); /* 1101 */
for (i = 0; i < 5; i++) {
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &x2); /* 11 */
for (i = 0; i < 6; i++) { /* 00 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u13); /* 1101 */
for (i = 0; i < 10; i++) { /* 000000 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u13); /* 1101 */
for (i = 0; i < 4; i++) {
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &u9); /* 1001 */
for (i = 0; i < 6; i++) { /* 00000 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, x); /* 1 */
for (i = 0; i < 8; i++) { /* 00 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(r, t, &x6); /* 111111 */
}
SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) {
return !(a->d[0] & 1);
}
#endif
static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) {
#if defined(USE_SCALAR_INV_BUILTIN)
secp256k1_scalar_inverse(r, x);
#elif defined(USE_SCALAR_INV_NUM)
unsigned char b[32];
secp256k1_num n, m;
secp256k1_scalar t = *x;
secp256k1_scalar_get_b32(b, &t);
secp256k1_num_set_bin(&n, b, 32);
secp256k1_scalar_order_get_num(&m);
secp256k1_num_mod_inverse(&n, &n, &m);
secp256k1_num_get_bin(b, 32, &n);
secp256k1_scalar_set_b32(r, b, NULL);
/* Verify that the inverse was computed correctly, without GMP code. */
secp256k1_scalar_mul(&t, &t, r);
CHECK(secp256k1_scalar_is_one(&t));
#else
#error "Please select scalar inverse implementation"
#endif
}
/* These parameters are generated using sage/gen_exhaustive_groups.sage. */ /* These parameters are generated using sage/gen_exhaustive_groups.sage. */
#if defined(EXHAUSTIVE_TEST_ORDER) #if defined(EXHAUSTIVE_TEST_ORDER)
# if EXHAUSTIVE_TEST_ORDER == 13 # if EXHAUSTIVE_TEST_ORDER == 13

View File

@ -125,4 +125,19 @@ static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const se
*r = (*r & mask0) | (*a & mask1); *r = (*r & mask0) | (*a & mask1);
} }
static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x) {
int i;
*r = 0;
for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++)
if ((i * *x) % EXHAUSTIVE_TEST_ORDER == 1)
*r = i;
/* If this VERIFY_CHECK triggers we were given a noninvertible scalar (and thus
* have a composite group order; fix it in exhaustive_tests.c). */
VERIFY_CHECK(*r != 0);
}
static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) {
secp256k1_scalar_inverse(r, x);
}
#endif /* SECP256K1_SCALAR_REPR_IMPL_H */ #endif /* SECP256K1_SCALAR_REPR_IMPL_H */