diff --git a/src/bench_internal.c b/src/bench_internal.c index 7809f5f..d24ddc2 100644 --- a/src/bench_internal.c +++ b/src/bench_internal.c @@ -227,6 +227,15 @@ void bench_group_add_affine_var(void* arg) { } } +void bench_group_jacobi_var(void* arg) { + int i; + bench_inv_t *data = (bench_inv_t*)arg; + + for (i = 0; i < 20000; i++) { + secp256k1_gej_has_quad_y_var(&data->gej_x); + } +} + void bench_ecmult_wnaf(void* arg) { int i; bench_inv_t *data = (bench_inv_t*)arg; @@ -299,6 +308,21 @@ void bench_context_sign(void* arg) { } } +#ifndef USE_NUM_NONE +void bench_num_jacobi(void* arg) { + int i; + bench_inv_t *data = (bench_inv_t*)arg; + secp256k1_num nx, norder; + + secp256k1_scalar_get_num(&nx, &data->scalar_x); + secp256k1_scalar_order_get_num(&norder); + secp256k1_scalar_get_num(&norder, &data->scalar_y); + + for (i = 0; i < 200000; i++) { + secp256k1_num_jacobi(&nx, &norder); + } +} +#endif int have_flag(int argc, char** argv, char *flag) { char** argm = argv + argc; @@ -339,6 +363,7 @@ int main(int argc, char **argv) { if (have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_var", bench_group_add_var, bench_setup, NULL, &data, 10, 200000); if (have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_affine", bench_group_add_affine, bench_setup, NULL, &data, 10, 200000); if (have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_affine_var", bench_group_add_affine_var, bench_setup, NULL, &data, 10, 200000); + if (have_flag(argc, argv, "group") || have_flag(argc, argv, "jacobi")) run_benchmark("group_jacobi_var", bench_group_jacobi_var, bench_setup, NULL, &data, 10, 20000); if (have_flag(argc, argv, "ecmult") || have_flag(argc, argv, "wnaf")) run_benchmark("wnaf_const", bench_wnaf_const, bench_setup, NULL, &data, 10, 20000); if (have_flag(argc, argv, "ecmult") || have_flag(argc, argv, "wnaf")) run_benchmark("ecmult_wnaf", bench_ecmult_wnaf, bench_setup, NULL, &data, 10, 20000); @@ -350,5 +375,8 @@ int main(int argc, char **argv) { if (have_flag(argc, argv, "context") || have_flag(argc, argv, "verify")) run_benchmark("context_verify", bench_context_verify, bench_setup, NULL, &data, 10, 20); if (have_flag(argc, argv, "context") || have_flag(argc, argv, "sign")) run_benchmark("context_sign", bench_context_sign, bench_setup, NULL, &data, 10, 200); +#ifndef USE_NUM_NONE + if (have_flag(argc, argv, "num") || have_flag(argc, argv, "jacobi")) run_benchmark("num_jacobi", bench_num_jacobi, bench_setup, NULL, &data, 10, 200000); +#endif return 0; } diff --git a/src/field.h b/src/field.h index 2d52af5..47d4685 100644 --- a/src/field.h +++ b/src/field.h @@ -94,6 +94,9 @@ static void secp256k1_fe_sqr(secp256k1_fe *r, const secp256k1_fe *a); * itself. */ static int secp256k1_fe_sqrt_var(secp256k1_fe *r, const secp256k1_fe *a); +/** Checks whether a field element is a quadratic residue. */ +static int secp256k1_fe_is_quad_var(const secp256k1_fe *a); + /** Sets a field element to be the (modular) inverse of another. Requires the input's magnitude to be * at most 8. The output magnitude is 1 (but not guaranteed to be normalized). */ static void secp256k1_fe_inv(secp256k1_fe *r, const secp256k1_fe *a); diff --git a/src/field_impl.h b/src/field_impl.h index 77f4aae..00ba6c8 100644 --- a/src/field_impl.h +++ b/src/field_impl.h @@ -280,4 +280,29 @@ static void secp256k1_fe_inv_all_var(size_t len, secp256k1_fe *r, const secp256k r[0] = u; } +static int secp256k1_fe_is_quad_var(const secp256k1_fe *a) { +#ifndef USE_NUM_NONE + unsigned char b[32]; + secp256k1_num n; + secp256k1_num m; + /* secp256k1 field prime, value p defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */ + static const unsigned char prime[32] = { + 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, + 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, + 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, + 0xFF,0xFF,0xFF,0xFE,0xFF,0xFF,0xFC,0x2F + }; + + secp256k1_fe c = *a; + secp256k1_fe_normalize_var(&c); + secp256k1_fe_get_b32(b, &c); + secp256k1_num_set_bin(&n, b, 32); + secp256k1_num_set_bin(&m, prime, 32); + return secp256k1_num_jacobi(&n, &m) >= 0; +#else + secp256k1_fe r; + return secp256k1_fe_sqrt_var(&r, a) == 1; +#endif +} + #endif diff --git a/src/group.h b/src/group.h index ebfe1ca..852bb64 100644 --- a/src/group.h +++ b/src/group.h @@ -94,6 +94,9 @@ static void secp256k1_gej_neg(secp256k1_gej *r, const secp256k1_gej *a); /** Check whether a group element is the point at infinity. */ static int secp256k1_gej_is_infinity(const secp256k1_gej *a); +/** Check whether a group element's y coordinate is a quadratic residue. */ +static int secp256k1_gej_has_quad_y_var(const secp256k1_gej *a); + /** Set r equal to the double of a. If rzr is not-NULL, r->z = a->z * *rzr (where infinity means an implicit z = 0). * a may not be zero. Constant time. */ static void secp256k1_gej_double_nonzero(secp256k1_gej *r, const secp256k1_gej *a, secp256k1_fe *rzr); diff --git a/src/group_impl.h b/src/group_impl.h index afb1470..f903685 100644 --- a/src/group_impl.h +++ b/src/group_impl.h @@ -629,4 +629,18 @@ static void secp256k1_ge_mul_lambda(secp256k1_ge *r, const secp256k1_ge *a) { } #endif +static int secp256k1_gej_has_quad_y_var(const secp256k1_gej *a) { + secp256k1_fe yz; + + if (a->infinity) { + return 0; + } + + /* We rely on the fact that the Jacobi symbol of 1 / a->z^3 is the same as + * that of a->z. Thus a->y / a->z^3 is a quadratic residue iff a->y * a->z + is */ + secp256k1_fe_mul(&yz, &a->y, &a->z); + return secp256k1_fe_is_quad_var(&yz); +} + #endif diff --git a/src/num.h b/src/num.h index ebfa71e..7bb9c5b 100644 --- a/src/num.h +++ b/src/num.h @@ -32,6 +32,9 @@ static void secp256k1_num_set_bin(secp256k1_num *r, const unsigned char *a, unsi /** Compute a modular inverse. The input must be less than the modulus. */ static void secp256k1_num_mod_inverse(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *m); +/** Compute the jacobi symbol (a|b). b must be positive and odd. */ +static int secp256k1_num_jacobi(const secp256k1_num *a, const secp256k1_num *b); + /** Compare the absolute value of two numbers. */ static int secp256k1_num_cmp(const secp256k1_num *a, const secp256k1_num *b); @@ -57,6 +60,9 @@ static void secp256k1_num_shift(secp256k1_num *r, int bits); /** Check whether a number is zero. */ static int secp256k1_num_is_zero(const secp256k1_num *a); +/** Check whether a number is one. */ +static int secp256k1_num_is_one(const secp256k1_num *a); + /** Check whether a number is strictly negative. */ static int secp256k1_num_is_neg(const secp256k1_num *a); diff --git a/src/num_gmp_impl.h b/src/num_gmp_impl.h index 7b6a897..3a46495 100644 --- a/src/num_gmp_impl.h +++ b/src/num_gmp_impl.h @@ -144,6 +144,32 @@ static void secp256k1_num_mod_inverse(secp256k1_num *r, const secp256k1_num *a, memset(v, 0, sizeof(v)); } +static int secp256k1_num_jacobi(const secp256k1_num *a, const secp256k1_num *b) { + int ret; + mpz_t ga, gb; + secp256k1_num_sanity(a); + secp256k1_num_sanity(b); + VERIFY_CHECK(!b->neg && (b->limbs > 0) && (b->data[0] & 1)); + + mpz_inits(ga, gb, NULL); + + mpz_import(gb, b->limbs, -1, sizeof(mp_limb_t), 0, 0, b->data); + mpz_import(ga, a->limbs, -1, sizeof(mp_limb_t), 0, 0, a->data); + if (a->neg) { + mpz_neg(ga, ga); + } + + ret = mpz_jacobi(ga, gb); + + mpz_clears(ga, gb, NULL); + + return ret; +} + +static int secp256k1_num_is_one(const secp256k1_num *a) { + return (a->limbs == 1 && a->data[0] == 1); +} + static int secp256k1_num_is_zero(const secp256k1_num *a) { return (a->limbs == 1 && a->data[0] == 0); } diff --git a/src/tests.c b/src/tests.c index 5300560..424c67d 100644 --- a/src/tests.c +++ b/src/tests.c @@ -473,6 +473,8 @@ void test_num_negate(void) { } void test_num_add_sub(void) { + int i; + secp256k1_scalar s; secp256k1_num n1; secp256k1_num n2; secp256k1_num n1p2, n2p1, n1m2, n2m1; @@ -498,6 +500,110 @@ void test_num_add_sub(void) { CHECK(!secp256k1_num_eq(&n2p1, &n1)); secp256k1_num_sub(&n2p1, &n2p1, &n2); /* n2p1 = R2 + R1 - R2 = R1 */ CHECK(secp256k1_num_eq(&n2p1, &n1)); + + /* check is_one */ + secp256k1_scalar_set_int(&s, 1); + secp256k1_scalar_get_num(&n1, &s); + CHECK(secp256k1_num_is_one(&n1)); + /* check that 2^n + 1 is never 1 */ + secp256k1_scalar_get_num(&n2, &s); + for (i = 0; i < 250; ++i) { + secp256k1_num_add(&n1, &n1, &n1); /* n1 *= 2 */ + secp256k1_num_add(&n1p2, &n1, &n2); /* n1p2 = n1 + 1 */ + CHECK(!secp256k1_num_is_one(&n1p2)); + } +} + +void test_num_mod(void) { + int i; + secp256k1_scalar s; + secp256k1_num order, n; + + /* check that 0 mod anything is 0 */ + random_scalar_order_test(&s); + secp256k1_scalar_get_num(&order, &s); + secp256k1_scalar_set_int(&s, 0); + secp256k1_scalar_get_num(&n, &s); + secp256k1_num_mod(&n, &order); + CHECK(secp256k1_num_is_zero(&n)); + + /* check that anything mod 1 is 0 */ + secp256k1_scalar_set_int(&s, 1); + secp256k1_scalar_get_num(&order, &s); + secp256k1_scalar_get_num(&n, &s); + secp256k1_num_mod(&n, &order); + CHECK(secp256k1_num_is_zero(&n)); + + /* check that increasing the number past 2^256 does not break this */ + random_scalar_order_test(&s); + secp256k1_scalar_get_num(&n, &s); + /* multiply by 2^8, which'll test this case with high probability */ + for (i = 0; i < 8; ++i) { + secp256k1_num_add(&n, &n, &n); + } + secp256k1_num_mod(&n, &order); + CHECK(secp256k1_num_is_zero(&n)); +} + +void test_num_jacobi(void) { + secp256k1_scalar sqr; + secp256k1_scalar small; + secp256k1_scalar five; /* five is not a quadratic residue */ + secp256k1_num order, n; + int i; + /* squares mod 5 are 1, 4 */ + const int jacobi5[10] = { 0, 1, -1, -1, 1, 0, 1, -1, -1, 1 }; + + /* check some small values with 5 as the order */ + secp256k1_scalar_set_int(&five, 5); + secp256k1_scalar_get_num(&order, &five); + for (i = 0; i < 10; ++i) { + secp256k1_scalar_set_int(&small, i); + secp256k1_scalar_get_num(&n, &small); + CHECK(secp256k1_num_jacobi(&n, &order) == jacobi5[i]); + } + + /** test large values with 5 as group order */ + secp256k1_scalar_get_num(&order, &five); + /* we first need a scalar which is not a multiple of 5 */ + do { + secp256k1_num fiven; + random_scalar_order_test(&sqr); + secp256k1_scalar_get_num(&fiven, &five); + secp256k1_scalar_get_num(&n, &sqr); + secp256k1_num_mod(&n, &fiven); + } while (secp256k1_num_is_zero(&n)); + /* next force it to be a residue. 2 is a nonresidue mod 5 so we can + * just multiply by two, i.e. add the number to itself */ + if (secp256k1_num_jacobi(&n, &order) == -1) { + secp256k1_num_add(&n, &n, &n); + } + + /* test residue */ + CHECK(secp256k1_num_jacobi(&n, &order) == 1); + /* test nonresidue */ + secp256k1_num_add(&n, &n, &n); + CHECK(secp256k1_num_jacobi(&n, &order) == -1); + + /** test with secp group order as order */ + secp256k1_scalar_order_get_num(&order); + random_scalar_order_test(&sqr); + secp256k1_scalar_sqr(&sqr, &sqr); + /* test residue */ + secp256k1_scalar_get_num(&n, &sqr); + CHECK(secp256k1_num_jacobi(&n, &order) == 1); + /* test nonresidue */ + secp256k1_scalar_mul(&sqr, &sqr, &five); + secp256k1_scalar_get_num(&n, &sqr); + CHECK(secp256k1_num_jacobi(&n, &order) == -1); + /* test multiple of the order*/ + CHECK(secp256k1_num_jacobi(&order, &order) == 0); + + /* check one less than the order */ + secp256k1_scalar_set_int(&small, 1); + secp256k1_scalar_get_num(&n, &small); + secp256k1_num_sub(&n, &order, &n); + CHECK(secp256k1_num_jacobi(&n, &order) == 1); /* sage confirms this is 1 */ } void run_num_smalltests(void) { @@ -505,6 +611,8 @@ void run_num_smalltests(void) { for (i = 0; i < 100*count; i++) { test_num_negate(); test_num_add_sub(); + test_num_mod(); + test_num_jacobi(); } } #endif @@ -689,6 +797,10 @@ void scalar_test(void) { secp256k1_scalar_inverse(&inv, &inv); /* Inverting one must result in one. */ CHECK(secp256k1_scalar_is_one(&inv)); +#ifndef USE_NUM_NONE + secp256k1_scalar_get_num(&invnum, &inv); + CHECK(secp256k1_num_is_one(&invnum)); +#endif } } @@ -2067,9 +2179,10 @@ void run_ec_combine(void) { void test_group_decompress(const secp256k1_fe* x) { /* The input itself, normalized. */ secp256k1_fe fex = *x; - secp256k1_fe tmp; + secp256k1_fe fez; /* Results of set_xquad_var, set_xo_var(..., 0), set_xo_var(..., 1). */ secp256k1_ge ge_quad, ge_even, ge_odd; + secp256k1_gej gej_quad; /* Return values of the above calls. */ int res_quad, res_even, res_odd; @@ -2101,13 +2214,29 @@ void test_group_decompress(const secp256k1_fe* x) { CHECK(secp256k1_fe_equal_var(&ge_odd.x, x)); /* Check that the Y coordinate result in ge_quad is a square. */ - CHECK(secp256k1_fe_sqrt_var(&tmp, &ge_quad.y)); - secp256k1_fe_sqr(&tmp, &tmp); - CHECK(secp256k1_fe_equal_var(&tmp, &ge_quad.y)); + CHECK(secp256k1_fe_is_quad_var(&ge_quad.y)); /* Check odd/even Y in ge_odd, ge_even. */ CHECK(secp256k1_fe_is_odd(&ge_odd.y)); CHECK(!secp256k1_fe_is_odd(&ge_even.y)); + + /* Check secp256k1_gej_has_quad_y_var. */ + secp256k1_gej_set_ge(&gej_quad, &ge_quad); + CHECK(secp256k1_gej_has_quad_y_var(&gej_quad)); + do { + random_fe_test(&fez); + } while (secp256k1_fe_is_zero(&fez)); + secp256k1_gej_rescale(&gej_quad, &fez); + CHECK(secp256k1_gej_has_quad_y_var(&gej_quad)); + secp256k1_gej_neg(&gej_quad, &gej_quad); + CHECK(!secp256k1_gej_has_quad_y_var(&gej_quad)); + do { + random_fe_test(&fez); + } while (secp256k1_fe_is_zero(&fez)); + secp256k1_gej_rescale(&gej_quad, &fez); + CHECK(!secp256k1_gej_has_quad_y_var(&gej_quad)); + secp256k1_gej_neg(&gej_quad, &gej_quad); + CHECK(secp256k1_gej_has_quad_y_var(&gej_quad)); } }