Add exhaustive tests for group arithmetic, signing, and ecmult on a small group
If you compile without ./configure --enable-exhaustive-tests=no, this will create a binary ./exhaustive_tests which will execute every function possible on a group of small order obtained by moving to a twist of our curve and locating a generator of small order. Currently defaults to order 13, though by changing some #ifdefs you can get a couple other ones. (Currently 199, which will take forever to run, and 14, which won't work because it's composite.) TODO exhaustive tests for the various modules
This commit is contained in:
parent
20b8877be1
commit
83836a9547
|
@ -12,9 +12,11 @@ noinst_HEADERS =
|
||||||
noinst_HEADERS += src/scalar.h
|
noinst_HEADERS += src/scalar.h
|
||||||
noinst_HEADERS += src/scalar_4x64.h
|
noinst_HEADERS += src/scalar_4x64.h
|
||||||
noinst_HEADERS += src/scalar_8x32.h
|
noinst_HEADERS += src/scalar_8x32.h
|
||||||
|
noinst_HEADERS += src/scalar_low.h
|
||||||
noinst_HEADERS += src/scalar_impl.h
|
noinst_HEADERS += src/scalar_impl.h
|
||||||
noinst_HEADERS += src/scalar_4x64_impl.h
|
noinst_HEADERS += src/scalar_4x64_impl.h
|
||||||
noinst_HEADERS += src/scalar_8x32_impl.h
|
noinst_HEADERS += src/scalar_8x32_impl.h
|
||||||
|
noinst_HEADERS += src/scalar_low_impl.h
|
||||||
noinst_HEADERS += src/group.h
|
noinst_HEADERS += src/group.h
|
||||||
noinst_HEADERS += src/group_impl.h
|
noinst_HEADERS += src/group_impl.h
|
||||||
noinst_HEADERS += src/num_gmp.h
|
noinst_HEADERS += src/num_gmp.h
|
||||||
|
@ -150,7 +152,6 @@ $(gen_context_BIN): $(gen_context_OBJECTS)
|
||||||
|
|
||||||
$(libsecp256k1_la_OBJECTS): src/ecmult_static_context.h
|
$(libsecp256k1_la_OBJECTS): src/ecmult_static_context.h
|
||||||
$(tests_OBJECTS): src/ecmult_static_context.h
|
$(tests_OBJECTS): src/ecmult_static_context.h
|
||||||
$(exhaustive_tests_OBJECTS): src/ecmult_static_context.h
|
|
||||||
$(bench_internal_OBJECTS): src/ecmult_static_context.h
|
$(bench_internal_OBJECTS): src/ecmult_static_context.h
|
||||||
|
|
||||||
src/ecmult_static_context.h: $(gen_context_BIN)
|
src/ecmult_static_context.h: $(gen_context_BIN)
|
||||||
|
|
|
@ -78,7 +78,7 @@ static int secp256k1_wnaf_const(int *wnaf, secp256k1_scalar s, int w) {
|
||||||
/* Negative numbers will be negated to keep their bit representation below the maximum width */
|
/* Negative numbers will be negated to keep their bit representation below the maximum width */
|
||||||
flip = secp256k1_scalar_is_high(&s);
|
flip = secp256k1_scalar_is_high(&s);
|
||||||
/* We add 1 to even numbers, 2 to odd ones, noting that negation flips parity */
|
/* We add 1 to even numbers, 2 to odd ones, noting that negation flips parity */
|
||||||
bit = flip ^ (s.d[0] & 1);
|
bit = flip ^ !secp256k1_scalar_is_even(&s);
|
||||||
/* We check for negative one, since adding 2 to it will cause an overflow */
|
/* We check for negative one, since adding 2 to it will cause an overflow */
|
||||||
secp256k1_scalar_negate(&neg_s, &s);
|
secp256k1_scalar_negate(&neg_s, &s);
|
||||||
not_neg_one = !secp256k1_scalar_is_one(&neg_s);
|
not_neg_one = !secp256k1_scalar_is_one(&neg_s);
|
||||||
|
|
|
@ -13,20 +13,23 @@
|
||||||
#include "scalar.h"
|
#include "scalar.h"
|
||||||
#include "ecmult.h"
|
#include "ecmult.h"
|
||||||
|
|
||||||
#include <string.h>
|
|
||||||
|
|
||||||
/* optimal for 128-bit and 256-bit exponents. */
|
|
||||||
#define WINDOW_A 5
|
|
||||||
|
|
||||||
#if defined(EXHAUSTIVE_TEST_ORDER)
|
#if defined(EXHAUSTIVE_TEST_ORDER)
|
||||||
|
/* We need to lower these values for exhaustive tests because
|
||||||
|
* the tables cannot have infinities in them (this breaks the
|
||||||
|
* affine-isomorphism stuff which tracks z-ratios) */
|
||||||
# if EXHAUSTIVE_TEST_ORDER > 128
|
# if EXHAUSTIVE_TEST_ORDER > 128
|
||||||
|
# define WINDOW_A 5
|
||||||
# define WINDOW_G 8
|
# define WINDOW_G 8
|
||||||
# elif EXHAUSTIVE_TEST_ORDER > 8
|
# elif EXHAUSTIVE_TEST_ORDER > 8
|
||||||
|
# define WINDOW_A 4
|
||||||
# define WINDOW_G 4
|
# define WINDOW_G 4
|
||||||
# else
|
# else
|
||||||
|
# define WINDOW_A 2
|
||||||
# define WINDOW_G 2
|
# define WINDOW_G 2
|
||||||
# endif
|
# endif
|
||||||
#else
|
#else
|
||||||
|
/* optimal for 128-bit and 256-bit exponents. */
|
||||||
|
#define WINDOW_A 5
|
||||||
/** larger numbers may result in slightly better performance, at the cost of
|
/** larger numbers may result in slightly better performance, at the cost of
|
||||||
exponentially larger precomputed tables. */
|
exponentially larger precomputed tables. */
|
||||||
#ifdef USE_ENDOMORPHISM
|
#ifdef USE_ENDOMORPHISM
|
||||||
|
|
|
@ -11,6 +11,31 @@
|
||||||
#include "field.h"
|
#include "field.h"
|
||||||
#include "group.h"
|
#include "group.h"
|
||||||
|
|
||||||
|
/* These points can be generated in sage as follows:
|
||||||
|
*
|
||||||
|
* 0. Setup a worksheet with the following parameters.
|
||||||
|
* b = 4 # whatever CURVE_B will be set to
|
||||||
|
* F = FiniteField (0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F)
|
||||||
|
* C = EllipticCurve ([F (0), F (b)])
|
||||||
|
*
|
||||||
|
* 1. Determine all the small orders available to you. (If there are
|
||||||
|
* no satisfactory ones, go back and change b.)
|
||||||
|
* print C.order().factor(limit=1000)
|
||||||
|
*
|
||||||
|
* 2. Choose an order as one of the prime factors listed in the above step.
|
||||||
|
* (You can also multiply some to get a composite order, though the
|
||||||
|
* tests will crash trying to invert scalars during signing.) We take a
|
||||||
|
* random point and scale it to drop its order to the desired value.
|
||||||
|
* There is some probability this won't work; just try again.
|
||||||
|
* order = 199
|
||||||
|
* P = C.random_point()
|
||||||
|
* P = (int(P.order()) / int(order)) * P
|
||||||
|
* assert(P.order() == order)
|
||||||
|
*
|
||||||
|
* 3. Print the values. You'll need to use a vim macro or something to
|
||||||
|
* split the hex output into 4-byte chunks.
|
||||||
|
* print "%x %x" % P.xy()
|
||||||
|
*/
|
||||||
#if defined(EXHAUSTIVE_TEST_ORDER)
|
#if defined(EXHAUSTIVE_TEST_ORDER)
|
||||||
# if EXHAUSTIVE_TEST_ORDER == 199
|
# if EXHAUSTIVE_TEST_ORDER == 199
|
||||||
const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST(
|
const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST(
|
||||||
|
@ -19,6 +44,16 @@ const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST(
|
||||||
0x78AC123A, 0x5ED8AEF3, 0x8732BC91, 0x1F3A2868,
|
0x78AC123A, 0x5ED8AEF3, 0x8732BC91, 0x1F3A2868,
|
||||||
0x48DF246C, 0x808DAE72, 0xCFE52572, 0x7F0501ED
|
0x48DF246C, 0x808DAE72, 0xCFE52572, 0x7F0501ED
|
||||||
);
|
);
|
||||||
|
|
||||||
|
const int CURVE_B = 4;
|
||||||
|
# elif EXHAUSTIVE_TEST_ORDER == 13
|
||||||
|
const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST(
|
||||||
|
0xedc60018, 0xa51a786b, 0x2ea91f4d, 0x4c9416c0,
|
||||||
|
0x9de54c3b, 0xa1316554, 0x6cf4345c, 0x7277ef15,
|
||||||
|
0x54cb1b6b, 0xdc8c1273, 0x087844ea, 0x43f4603e,
|
||||||
|
0x0eaf9a43, 0xf6effe55, 0x939f806d, 0x37adf8ac
|
||||||
|
);
|
||||||
|
const int CURVE_B = 2;
|
||||||
# else
|
# else
|
||||||
# error No known generator for the specified exhaustive test group order.
|
# error No known generator for the specified exhaustive test group order.
|
||||||
# endif
|
# endif
|
||||||
|
@ -32,6 +67,8 @@ static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST(
|
||||||
0x483ADA77UL, 0x26A3C465UL, 0x5DA4FBFCUL, 0x0E1108A8UL,
|
0x483ADA77UL, 0x26A3C465UL, 0x5DA4FBFCUL, 0x0E1108A8UL,
|
||||||
0xFD17B448UL, 0xA6855419UL, 0x9C47D08FUL, 0xFB10D4B8UL
|
0xFD17B448UL, 0xA6855419UL, 0x9C47D08FUL, 0xFB10D4B8UL
|
||||||
);
|
);
|
||||||
|
|
||||||
|
const int CURVE_B = 7;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void secp256k1_ge_set_gej_zinv(secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zi) {
|
static void secp256k1_ge_set_gej_zinv(secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zi) {
|
||||||
|
@ -188,7 +225,7 @@ static int secp256k1_ge_set_xquad(secp256k1_ge *r, const secp256k1_fe *x) {
|
||||||
secp256k1_fe_sqr(&x2, x);
|
secp256k1_fe_sqr(&x2, x);
|
||||||
secp256k1_fe_mul(&x3, x, &x2);
|
secp256k1_fe_mul(&x3, x, &x2);
|
||||||
r->infinity = 0;
|
r->infinity = 0;
|
||||||
secp256k1_fe_set_int(&c, 7);
|
secp256k1_fe_set_int(&c, CURVE_B);
|
||||||
secp256k1_fe_add(&c, &x3);
|
secp256k1_fe_add(&c, &x3);
|
||||||
return secp256k1_fe_sqrt(&r->y, &c);
|
return secp256k1_fe_sqrt(&r->y, &c);
|
||||||
}
|
}
|
||||||
|
@ -247,7 +284,7 @@ static int secp256k1_gej_is_valid_var(const secp256k1_gej *a) {
|
||||||
secp256k1_fe_sqr(&x3, &a->x); secp256k1_fe_mul(&x3, &x3, &a->x);
|
secp256k1_fe_sqr(&x3, &a->x); secp256k1_fe_mul(&x3, &x3, &a->x);
|
||||||
secp256k1_fe_sqr(&z2, &a->z);
|
secp256k1_fe_sqr(&z2, &a->z);
|
||||||
secp256k1_fe_sqr(&z6, &z2); secp256k1_fe_mul(&z6, &z6, &z2);
|
secp256k1_fe_sqr(&z6, &z2); secp256k1_fe_mul(&z6, &z6, &z2);
|
||||||
secp256k1_fe_mul_int(&z6, 7);
|
secp256k1_fe_mul_int(&z6, CURVE_B);
|
||||||
secp256k1_fe_add(&x3, &z6);
|
secp256k1_fe_add(&x3, &z6);
|
||||||
secp256k1_fe_normalize_weak(&x3);
|
secp256k1_fe_normalize_weak(&x3);
|
||||||
return secp256k1_fe_equal_var(&y2, &x3);
|
return secp256k1_fe_equal_var(&y2, &x3);
|
||||||
|
@ -261,7 +298,7 @@ static int secp256k1_ge_is_valid_var(const secp256k1_ge *a) {
|
||||||
/* y^2 = x^3 + 7 */
|
/* y^2 = x^3 + 7 */
|
||||||
secp256k1_fe_sqr(&y2, &a->y);
|
secp256k1_fe_sqr(&y2, &a->y);
|
||||||
secp256k1_fe_sqr(&x3, &a->x); secp256k1_fe_mul(&x3, &x3, &a->x);
|
secp256k1_fe_sqr(&x3, &a->x); secp256k1_fe_mul(&x3, &x3, &a->x);
|
||||||
secp256k1_fe_set_int(&c, 7);
|
secp256k1_fe_set_int(&c, CURVE_B);
|
||||||
secp256k1_fe_add(&x3, &c);
|
secp256k1_fe_add(&x3, &c);
|
||||||
secp256k1_fe_normalize_weak(&x3);
|
secp256k1_fe_normalize_weak(&x3);
|
||||||
return secp256k1_fe_equal_var(&y2, &x3);
|
return secp256k1_fe_equal_var(&y2, &x3);
|
||||||
|
|
|
@ -13,7 +13,9 @@
|
||||||
#include "libsecp256k1-config.h"
|
#include "libsecp256k1-config.h"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(USE_SCALAR_4X64)
|
#if defined(EXHAUSTIVE_TEST_ORDER)
|
||||||
|
#include "scalar_low.h"
|
||||||
|
#elif defined(USE_SCALAR_4X64)
|
||||||
#include "scalar_4x64.h"
|
#include "scalar_4x64.h"
|
||||||
#elif defined(USE_SCALAR_8X32)
|
#elif defined(USE_SCALAR_8X32)
|
||||||
#include "scalar_8x32.h"
|
#include "scalar_8x32.h"
|
||||||
|
|
|
@ -14,7 +14,9 @@
|
||||||
#include "libsecp256k1-config.h"
|
#include "libsecp256k1-config.h"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(USE_SCALAR_4X64)
|
#if defined(EXHAUSTIVE_TEST_ORDER)
|
||||||
|
#include "scalar_low_impl.h"
|
||||||
|
#elif defined(USE_SCALAR_4X64)
|
||||||
#include "scalar_4x64_impl.h"
|
#include "scalar_4x64_impl.h"
|
||||||
#elif defined(USE_SCALAR_8X32)
|
#elif defined(USE_SCALAR_8X32)
|
||||||
#include "scalar_8x32_impl.h"
|
#include "scalar_8x32_impl.h"
|
||||||
|
@ -31,17 +33,37 @@ static void secp256k1_scalar_get_num(secp256k1_num *r, const secp256k1_scalar *a
|
||||||
|
|
||||||
/** secp256k1 curve order, see secp256k1_ecdsa_const_order_as_fe in ecdsa_impl.h */
|
/** secp256k1 curve order, see secp256k1_ecdsa_const_order_as_fe in ecdsa_impl.h */
|
||||||
static void secp256k1_scalar_order_get_num(secp256k1_num *r) {
|
static void secp256k1_scalar_order_get_num(secp256k1_num *r) {
|
||||||
|
#if defined(EXHAUSTIVE_TEST_ORDER)
|
||||||
|
static const unsigned char order[32] = {
|
||||||
|
0,0,0,0,0,0,0,0,
|
||||||
|
0,0,0,0,0,0,0,0,
|
||||||
|
0,0,0,0,0,0,0,0,
|
||||||
|
0,0,0,0,0,0,0,EXHAUSTIVE_TEST_ORDER
|
||||||
|
};
|
||||||
|
#else
|
||||||
static const unsigned char order[32] = {
|
static const unsigned char order[32] = {
|
||||||
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
|
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
|
||||||
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFE,
|
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFE,
|
||||||
0xBA,0xAE,0xDC,0xE6,0xAF,0x48,0xA0,0x3B,
|
0xBA,0xAE,0xDC,0xE6,0xAF,0x48,0xA0,0x3B,
|
||||||
0xBF,0xD2,0x5E,0x8C,0xD0,0x36,0x41,0x41
|
0xBF,0xD2,0x5E,0x8C,0xD0,0x36,0x41,0x41
|
||||||
};
|
};
|
||||||
|
#endif
|
||||||
secp256k1_num_set_bin(r, order, 32);
|
secp256k1_num_set_bin(r, order, 32);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x) {
|
static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x) {
|
||||||
|
#if defined(EXHAUSTIVE_TEST_ORDER)
|
||||||
|
int i;
|
||||||
|
*r = 0;
|
||||||
|
for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++)
|
||||||
|
if ((i * *x) % EXHAUSTIVE_TEST_ORDER == 1)
|
||||||
|
*r = i;
|
||||||
|
/* If this VERIFY_CHECK triggers we were given a noninvertible scalar (and thus
|
||||||
|
* have a composite group order; fix it in exhaustive_tests.c). */
|
||||||
|
VERIFY_CHECK(*r != 0);
|
||||||
|
}
|
||||||
|
#else
|
||||||
secp256k1_scalar *t;
|
secp256k1_scalar *t;
|
||||||
int i;
|
int i;
|
||||||
/* First compute x ^ (2^N - 1) for some values of N. */
|
/* First compute x ^ (2^N - 1) for some values of N. */
|
||||||
|
@ -233,9 +255,9 @@ static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar
|
||||||
}
|
}
|
||||||
|
|
||||||
SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) {
|
SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) {
|
||||||
/* d[0] is present and is the lowest word for all representations */
|
|
||||||
return !(a->d[0] & 1);
|
return !(a->d[0] & 1);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) {
|
static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) {
|
||||||
#if defined(USE_SCALAR_INV_BUILTIN)
|
#if defined(USE_SCALAR_INV_BUILTIN)
|
||||||
|
@ -259,6 +281,18 @@ static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_sc
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef USE_ENDOMORPHISM
|
#ifdef USE_ENDOMORPHISM
|
||||||
|
#if defined(EXHAUSTIVE_TEST_ORDER)
|
||||||
|
/**
|
||||||
|
* Find k1 and k2 given k, such that k1 + k2 * lambda == k mod n; unlike in the
|
||||||
|
* full case we don't bother making k1 and k2 be small, we just want them to be
|
||||||
|
* nontrivial to get full test coverage for the exhaustive tests. We therefore
|
||||||
|
* (arbitrarily) set k2 = k + 5 and k1 = k - k2 * lambda.
|
||||||
|
*/
|
||||||
|
static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
|
||||||
|
*r2 = (*a + 5) % EXHAUSTIVE_TEST_ORDER;
|
||||||
|
*r1 = (*a + (EXHAUSTIVE_TEST_ORDER - *r2) * EXHAUSTIVE_TEST_LAMBDA) % EXHAUSTIVE_TEST_ORDER;
|
||||||
|
}
|
||||||
|
#else
|
||||||
/**
|
/**
|
||||||
* The Secp256k1 curve has an endomorphism, where lambda * (x, y) = (beta * x, y), where
|
* The Secp256k1 curve has an endomorphism, where lambda * (x, y) = (beta * x, y), where
|
||||||
* lambda is {0x53,0x63,0xad,0x4c,0xc0,0x5c,0x30,0xe0,0xa5,0x26,0x1c,0x02,0x88,0x12,0x64,0x5a,
|
* lambda is {0x53,0x63,0xad,0x4c,0xc0,0x5c,0x30,0xe0,0xa5,0x26,0x1c,0x02,0x88,0x12,0x64,0x5a,
|
||||||
|
@ -331,5 +365,6 @@ static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar
|
||||||
secp256k1_scalar_add(r1, r1, a);
|
secp256k1_scalar_add(r1, r1, a);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -0,0 +1,15 @@
|
||||||
|
/**********************************************************************
|
||||||
|
* Copyright (c) 2015 Andrew Poelstra *
|
||||||
|
* Distributed under the MIT software license, see the accompanying *
|
||||||
|
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
||||||
|
**********************************************************************/
|
||||||
|
|
||||||
|
#ifndef _SECP256K1_SCALAR_REPR_
|
||||||
|
#define _SECP256K1_SCALAR_REPR_
|
||||||
|
|
||||||
|
#include <stdint.h>
|
||||||
|
|
||||||
|
/** A scalar modulo the group order of the secp256k1 curve. */
|
||||||
|
typedef uint32_t secp256k1_scalar;
|
||||||
|
|
||||||
|
#endif
|
|
@ -0,0 +1,114 @@
|
||||||
|
/**********************************************************************
|
||||||
|
* Copyright (c) 2015 Andrew Poelstra *
|
||||||
|
* Distributed under the MIT software license, see the accompanying *
|
||||||
|
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
||||||
|
**********************************************************************/
|
||||||
|
|
||||||
|
#ifndef _SECP256K1_SCALAR_REPR_IMPL_H_
|
||||||
|
#define _SECP256K1_SCALAR_REPR_IMPL_H_
|
||||||
|
|
||||||
|
#include "scalar.h"
|
||||||
|
|
||||||
|
#include <string.h>
|
||||||
|
|
||||||
|
SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) {
|
||||||
|
return !(*a & 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar *r) { *r = 0; }
|
||||||
|
SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v) { *r = v; }
|
||||||
|
|
||||||
|
SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
|
||||||
|
if (offset < 32)
|
||||||
|
return ((*a >> offset) & ((((uint32_t)1) << count) - 1));
|
||||||
|
else
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
|
||||||
|
return secp256k1_scalar_get_bits(a, offset, count);
|
||||||
|
}
|
||||||
|
|
||||||
|
SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar *a) { return *a >= EXHAUSTIVE_TEST_ORDER; }
|
||||||
|
|
||||||
|
static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
|
||||||
|
*r = (*a + *b) % EXHAUSTIVE_TEST_ORDER;
|
||||||
|
return *r < *b;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) {
|
||||||
|
if (flag && bit < 32)
|
||||||
|
*r += (1 << bit);
|
||||||
|
#ifdef VERIFY
|
||||||
|
VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) {
|
||||||
|
const int base = 0x100 % EXHAUSTIVE_TEST_ORDER;
|
||||||
|
int i;
|
||||||
|
*r = 0;
|
||||||
|
for (i = 0; i < 32; i++) {
|
||||||
|
*r = ((*r * base) + b32[i]) % EXHAUSTIVE_TEST_ORDER;
|
||||||
|
}
|
||||||
|
/* just deny overflow, it basically always happens */
|
||||||
|
if (overflow) *overflow = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) {
|
||||||
|
memset(bin, 0, 32);
|
||||||
|
bin[28] = *a >> 24; bin[29] = *a >> 16; bin[30] = *a >> 8; bin[31] = *a;
|
||||||
|
}
|
||||||
|
|
||||||
|
SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) {
|
||||||
|
return *a == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) {
|
||||||
|
if (*a == 0) {
|
||||||
|
*r = 0;
|
||||||
|
} else {
|
||||||
|
*r = EXHAUSTIVE_TEST_ORDER - *a;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) {
|
||||||
|
return *a == 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int secp256k1_scalar_is_high(const secp256k1_scalar *a) {
|
||||||
|
return *a > EXHAUSTIVE_TEST_ORDER / 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
|
||||||
|
if (flag) secp256k1_scalar_negate(r, r);
|
||||||
|
return flag ? -1 : 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
|
||||||
|
*r = (*a * *b) % EXHAUSTIVE_TEST_ORDER;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) {
|
||||||
|
int ret;
|
||||||
|
VERIFY_CHECK(n > 0);
|
||||||
|
VERIFY_CHECK(n < 16);
|
||||||
|
ret = *r & ((1 << n) - 1);
|
||||||
|
*r >>= n;
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a) {
|
||||||
|
*r = (*a * *a) % EXHAUSTIVE_TEST_ORDER;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
|
||||||
|
*r1 = *a;
|
||||||
|
*r2 = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) {
|
||||||
|
return *a == *b;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
|
@ -1,5 +1,5 @@
|
||||||
/**********************************************************************
|
/**********************************************************************
|
||||||
* Copyright (c) 2015 Andrew Poelstra *
|
* Copyright (c) 2016 Andrew Poelstra *
|
||||||
* Distributed under the MIT software license, see the accompanying *
|
* Distributed under the MIT software license, see the accompanying *
|
||||||
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
||||||
**********************************************************************/
|
**********************************************************************/
|
||||||
|
@ -13,8 +13,12 @@
|
||||||
|
|
||||||
#include <time.h>
|
#include <time.h>
|
||||||
|
|
||||||
|
#undef USE_ECMULT_STATIC_PRECOMPUTATION
|
||||||
|
|
||||||
#ifndef EXHAUSTIVE_TEST_ORDER
|
#ifndef EXHAUSTIVE_TEST_ORDER
|
||||||
#define EXHAUSTIVE_TEST_ORDER 199
|
/* see group_impl.h for allowable values */
|
||||||
|
#define EXHAUSTIVE_TEST_ORDER 13
|
||||||
|
#define EXHAUSTIVE_TEST_LAMBDA 9 /* cube root of 1 mod 13 */
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include "include/secp256k1.h"
|
#include "include/secp256k1.h"
|
||||||
|
@ -60,6 +64,37 @@ void random_fe(secp256k1_fe *x) {
|
||||||
}
|
}
|
||||||
/** END stolen from tests.c */
|
/** END stolen from tests.c */
|
||||||
|
|
||||||
|
int secp256k1_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg32,
|
||||||
|
const unsigned char *key32, const unsigned char *algo16,
|
||||||
|
void *data, unsigned int attempt) {
|
||||||
|
secp256k1_scalar s;
|
||||||
|
int *idata = data;
|
||||||
|
(void)msg32;
|
||||||
|
(void)key32;
|
||||||
|
(void)algo16;
|
||||||
|
/* Some nonces cannot be used because they'd cause s and/or r to be zero.
|
||||||
|
* The signing function has retry logic here that just re-calls the nonce
|
||||||
|
* function with an increased `attempt`. So if attempt > 0 this means we
|
||||||
|
* need to change the nonce to avoid an infinite loop. */
|
||||||
|
if (attempt > 0) {
|
||||||
|
(*idata)++;
|
||||||
|
}
|
||||||
|
secp256k1_scalar_set_int(&s, *idata);
|
||||||
|
secp256k1_scalar_get_b32(nonce32, &s);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef USE_ENDOMORPHISM
|
||||||
|
void test_exhaustive_endomorphism(const secp256k1_ge *group, int order) {
|
||||||
|
int i;
|
||||||
|
for (i = 0; i < order; i++) {
|
||||||
|
secp256k1_ge res;
|
||||||
|
secp256k1_ge_mul_lambda(&res, &group[i]);
|
||||||
|
ge_equals_ge(&group[i * EXHAUSTIVE_TEST_LAMBDA % EXHAUSTIVE_TEST_ORDER], &res);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
void test_exhaustive_addition(const secp256k1_ge *group, const secp256k1_gej *groupj, int order) {
|
void test_exhaustive_addition(const secp256k1_ge *group, const secp256k1_gej *groupj, int order) {
|
||||||
int i, j;
|
int i, j;
|
||||||
|
|
||||||
|
@ -120,26 +155,90 @@ void test_exhaustive_addition(const secp256k1_ge *group, const secp256k1_gej *gr
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void test_exhaustive_ecmult(secp256k1_context *ctx, secp256k1_ge *group, secp256k1_gej *groupj, int order) {
|
void test_exhaustive_ecmult(const secp256k1_context *ctx, const secp256k1_ge *group, const secp256k1_gej *groupj, int order) {
|
||||||
int i, j;
|
int i, j, r_log;
|
||||||
const int r_log = secp256k1_rand32() % order; /* TODO be less biased */
|
for (r_log = 1; r_log < order; r_log++) {
|
||||||
for (j = 0; j < order; j++) {
|
for (j = 0; j < order; j++) {
|
||||||
for (i = 0; i < order; i++) {
|
for (i = 0; i < order; i++) {
|
||||||
secp256k1_gej tmp;
|
secp256k1_gej tmp;
|
||||||
secp256k1_scalar na, ng;
|
secp256k1_scalar na, ng;
|
||||||
secp256k1_scalar_set_int(&na, i);
|
secp256k1_scalar_set_int(&na, i);
|
||||||
secp256k1_scalar_set_int(&ng, j);
|
secp256k1_scalar_set_int(&ng, j);
|
||||||
|
|
||||||
secp256k1_ecmult(&ctx->ecmult_ctx, &tmp, &groupj[r_log], &na, &ng);
|
secp256k1_ecmult(&ctx->ecmult_ctx, &tmp, &groupj[r_log], &na, &ng);
|
||||||
ge_equals_gej(&group[(i * r_log + j) % order], &tmp);
|
ge_equals_gej(&group[(i * r_log + j) % order], &tmp);
|
||||||
|
|
||||||
/* TODO we cannot exhaustively test ecmult_const as it does a scalar
|
if (i > 0) {
|
||||||
* negation for even numbers, and our code is not designed to handle
|
secp256k1_ecmult_const(&tmp, &group[i], &ng);
|
||||||
* such a small scalar modulus. */
|
ge_equals_gej(&group[(i * j) % order], &tmp);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void r_from_k(secp256k1_scalar *r, const secp256k1_ge *group, int k) {
|
||||||
|
secp256k1_fe x;
|
||||||
|
unsigned char x_bin[32];
|
||||||
|
k %= EXHAUSTIVE_TEST_ORDER;
|
||||||
|
x = group[k].x;
|
||||||
|
secp256k1_fe_normalize(&x);
|
||||||
|
secp256k1_fe_get_b32(x_bin, &x);
|
||||||
|
secp256k1_scalar_set_b32(r, x_bin, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* hee hee hee */
|
||||||
|
int solve_discrete_log(const secp256k1_scalar *x_coord, const secp256k1_ge *group, int order) {
|
||||||
|
int i;
|
||||||
|
for (i = 0; i < order; i++) {
|
||||||
|
secp256k1_scalar check_x;
|
||||||
|
r_from_k(&check_x, group, i);
|
||||||
|
if (*x_coord == check_x) {
|
||||||
|
return i;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
void test_exhaustive_sign(const secp256k1_context *ctx, const secp256k1_ge *group, int order) {
|
||||||
|
int i, j, k;
|
||||||
|
|
||||||
|
/* Loop */
|
||||||
|
for (i = 1; i < order; i++) { /* message */
|
||||||
|
for (j = 1; j < order; j++) { /* key */
|
||||||
|
for (k = 1; k < order; k++) { /* nonce */
|
||||||
|
secp256k1_ecdsa_signature sig;
|
||||||
|
secp256k1_scalar sk, msg, r, s, expected_r;
|
||||||
|
unsigned char sk32[32], msg32[32];
|
||||||
|
secp256k1_scalar_set_int(&msg, i);
|
||||||
|
secp256k1_scalar_set_int(&sk, j);
|
||||||
|
secp256k1_scalar_get_b32(sk32, &sk);
|
||||||
|
secp256k1_scalar_get_b32(msg32, &msg);
|
||||||
|
|
||||||
|
secp256k1_ecdsa_sign(ctx, &sig, msg32, sk32, secp256k1_nonce_function_smallint, &k);
|
||||||
|
|
||||||
|
secp256k1_ecdsa_signature_load(ctx, &r, &s, &sig);
|
||||||
|
/* Note that we compute expected_r *after* signing -- this is important
|
||||||
|
* because our nonce-computing function function might change k during
|
||||||
|
* signing. */
|
||||||
|
r_from_k(&expected_r, group, k);
|
||||||
|
CHECK(r == expected_r);
|
||||||
|
CHECK((k * s) % order == (i + r * j) % order ||
|
||||||
|
(k * (EXHAUSTIVE_TEST_ORDER - s)) % order == (i + r * j) % order);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* We would like to verify zero-knowledge here by counting how often every
|
||||||
|
* possible (s, r) tuple appears, but because the group order is larger
|
||||||
|
* than the field order, when coercing the x-values to scalar values, some
|
||||||
|
* appear more often than others, so we are actually not zero-knowledge.
|
||||||
|
* (This effect also appears in the real code, but the difference is on the
|
||||||
|
* order of 1/2^128th the field order, so the deviation is not useful to a
|
||||||
|
* computationally bounded attacker.)
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
|
||||||
int main(void) {
|
int main(void) {
|
||||||
int i;
|
int i;
|
||||||
secp256k1_gej groupj[EXHAUSTIVE_TEST_ORDER];
|
secp256k1_gej groupj[EXHAUSTIVE_TEST_ORDER];
|
||||||
|
@ -151,18 +250,42 @@ int main(void) {
|
||||||
/* TODO set z = 1, then do num_tests runs with random z values */
|
/* TODO set z = 1, then do num_tests runs with random z values */
|
||||||
|
|
||||||
/* Generate the entire group */
|
/* Generate the entire group */
|
||||||
secp256k1_ge_set_infinity(&group[0]);
|
|
||||||
secp256k1_gej_set_infinity(&groupj[0]);
|
secp256k1_gej_set_infinity(&groupj[0]);
|
||||||
|
secp256k1_ge_set_gej(&group[0], &groupj[0]);
|
||||||
for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) {
|
for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) {
|
||||||
|
/* Set a different random z-value for each Jacobian point */
|
||||||
secp256k1_fe z;
|
secp256k1_fe z;
|
||||||
random_fe(&z);
|
random_fe(&z);
|
||||||
|
|
||||||
secp256k1_gej_add_ge(&groupj[i], &groupj[i - 1], &secp256k1_ge_const_g);
|
secp256k1_gej_add_ge(&groupj[i], &groupj[i - 1], &secp256k1_ge_const_g);
|
||||||
secp256k1_ge_set_gej(&group[i], &groupj[i]);
|
secp256k1_ge_set_gej(&group[i], &groupj[i]);
|
||||||
secp256k1_gej_rescale(&groupj[i], &z);
|
secp256k1_gej_rescale(&groupj[i], &z);
|
||||||
|
|
||||||
|
/* Verify against ecmult_gen */
|
||||||
|
{
|
||||||
|
secp256k1_scalar scalar_i;
|
||||||
|
secp256k1_gej generatedj;
|
||||||
|
secp256k1_ge generated;
|
||||||
|
|
||||||
|
secp256k1_scalar_set_int(&scalar_i, i);
|
||||||
|
secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &generatedj, &scalar_i);
|
||||||
|
secp256k1_ge_set_gej(&generated, &generatedj);
|
||||||
|
|
||||||
|
CHECK(group[i].infinity == 0);
|
||||||
|
CHECK(generated.infinity == 0);
|
||||||
|
CHECK(secp256k1_fe_equal_var(&generated.x, &group[i].x));
|
||||||
|
CHECK(secp256k1_fe_equal_var(&generated.y, &group[i].y));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Run the tests */
|
/* Run the tests */
|
||||||
|
test_exhaustive_sign(ctx, group, EXHAUSTIVE_TEST_ORDER);
|
||||||
|
/* cannot exhaustively test verify, since our verify code
|
||||||
|
* depends on the field order being less than twice the
|
||||||
|
* group order */
|
||||||
|
#ifdef USE_ENDOMORPHISM
|
||||||
|
test_exhaustive_endomorphism(group, EXHAUSTIVE_TEST_ORDER);
|
||||||
|
#endif
|
||||||
test_exhaustive_addition(group, groupj, EXHAUSTIVE_TEST_ORDER);
|
test_exhaustive_addition(group, groupj, EXHAUSTIVE_TEST_ORDER);
|
||||||
test_exhaustive_ecmult(ctx, group, groupj, EXHAUSTIVE_TEST_ORDER);
|
test_exhaustive_ecmult(ctx, group, groupj, EXHAUSTIVE_TEST_ORDER);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue