Convert lambda splitter to pure scalar code.

This enables the use of the endomorphism optimization without bignum.
This commit is contained in:
Pieter Wuille 2014-12-01 18:22:04 +01:00
parent cc604e9842
commit c35ff1ea44
4 changed files with 65 additions and 71 deletions

View File

@ -19,6 +19,7 @@ env:
- FIELD=32bit
- FIELD=32bit ENDOMORPHISM=yes
- BIGNUM=none
- BIGNUM=none ENDOMORPHISM=yes
- BUILD=distcheck
- EXTRAFLAGS=CFLAGS=-DDETERMINISTIC
before_script: ./autogen.sh

View File

@ -270,10 +270,7 @@ if test x"$set_field" = x"gmp" || test x"$set_bignum" = x"gmp"; then
fi
if test x"$use_endomorphism" = x"yes"; then
if test x"$set_bignum" = x"none"; then
AC_MSG_ERROR([Cannot use endomorphism optimization without a bignum implementation])
fi
AC_DEFINE(USE_ENDOMORPHISM, 1, [Define this symbol to use endomorphism])
AC_DEFINE(USE_ENDOMORPHISM, 1, [Define this symbol to use endomorphism optimization])
fi
AC_MSG_NOTICE([Using field implementation: $set_field])

View File

@ -168,10 +168,10 @@ static void secp256k1_ecmult(secp256k1_gej_t *r, const secp256k1_gej_t *a, const
secp256k1_scalar_split_lambda_var(&na_1, &na_lam, na);
/* build wnaf representation for na_1 and na_lam. */
int wnaf_na_1[129]; int bits_na_1 = secp256k1_ecmult_wnaf(wnaf_na_1, &na_1, WINDOW_A);
int wnaf_na_lam[129]; int bits_na_lam = secp256k1_ecmult_wnaf(wnaf_na_lam, &na_lam, WINDOW_A);
VERIFY_CHECK(bits_na_1 <= 129);
VERIFY_CHECK(bits_na_lam <= 129);
int wnaf_na_1[130]; int bits_na_1 = secp256k1_ecmult_wnaf(wnaf_na_1, &na_1, WINDOW_A);
int wnaf_na_lam[130]; int bits_na_lam = secp256k1_ecmult_wnaf(wnaf_na_lam, &na_lam, WINDOW_A);
VERIFY_CHECK(bits_na_1 <= 130);
VERIFY_CHECK(bits_na_lam <= 130);
int bits = bits_na_1;
if (bits_na_lam > bits) bits = bits_na_lam;
#else

View File

@ -29,7 +29,7 @@ typedef struct {
secp256k1_num_t order;
#endif
#ifdef USE_ENDOMORPHISM
secp256k1_num_t a1b2, b1, a2, g1, g2;
secp256k1_scalar_t minus_lambda, minus_b1, minus_b2, g1, g2;
#endif
} secp256k1_scalar_consts_t;
@ -52,20 +52,30 @@ static void secp256k1_scalar_start(void) {
secp256k1_num_set_bin(&ret->order, secp256k1_scalar_consts_order, sizeof(secp256k1_scalar_consts_order));
#endif
#ifdef USE_ENDOMORPHISM
static const unsigned char secp256k1_scalar_consts_a1b2[] = {
0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd,
0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15
};
static const unsigned char secp256k1_scalar_consts_b1[] = {
0xe4,0x43,0x7e,0xd6,0x01,0x0e,0x88,0x28,
0x6f,0x54,0x7f,0xa9,0x0a,0xbf,0xe4,0xc3
};
static const unsigned char secp256k1_scalar_consts_a2[] = {
0x01,
0x14,0xca,0x50,0xf7,0xa8,0xe2,0xf3,0xf6,
0x57,0xc1,0x10,0x8d,0x9d,0x44,0xcf,0xd8
/**
* Lambda is a scalar which has the property for secp256k1 that point multiplication by
* it is efficiently computable (see secp256k1_gej_mul_lambda). */
static const unsigned char secp256k1_scalar_consts_lambda[32] = {
0x53,0x63,0xad,0x4c,0xc0,0x5c,0x30,0xe0,
0xa5,0x26,0x1c,0x02,0x88,0x12,0x64,0x5a,
0x12,0x2e,0x22,0xea,0x20,0x81,0x66,0x78,
0xdf,0x02,0x96,0x7c,0x1b,0x23,0xbd,0x72
};
/**
* "Guide to Elliptic Curve Cryptography" (Hankerson, Menezes, Vanstone) gives an algorithm
* (algorithm 3.74) to find k1 and k2 given k, such that k1 + k2 * lambda == k mod n, and k1
* and k2 have a small size.
* It relies on constants a1, b1, a2, b2. These constants for the value of lambda above are:
*
* - a1 = {0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15}
* - b1 = -{0xe4,0x43,0x7e,0xd6,0x01,0x0e,0x88,0x28,0x6f,0x54,0x7f,0xa9,0x0a,0xbf,0xe4,0xc3}
* - a2 = {0x01,0x14,0xca,0x50,0xf7,0xa8,0xe2,0xf3,0xf6,0x57,0xc1,0x10,0x8d,0x9d,0x44,0xcf,0xd8}
* - b2 = {0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15}
*
* The algorithm then computes c1 = round(b1 * k / n) and c2 = round(b2 * k / n), and gives
* k1 = k - (c1*a1 + c2*a2) and k2 = -(c1*b1 + c2*b2). Instead, we use modular arithmetic, and
* compute k1 as k - k2 * lambda, avoiding the need for constants a1 and a2.
*
* g1, g2 are precomputed constants used to replace division with a rounded multiplication
* when decomposing the scalar for an endomorphism-based point multiplication.
*
@ -82,21 +92,38 @@ static void secp256k1_scalar_start(void) {
* (Note that 'd' is also equal to the curve order here because [a1,b1] and [a2,b2] are found
* as outputs of the Extended Euclidean Algorithm on inputs 'order' and 'lambda').
*/
static const unsigned char secp256k1_scalar_consts_g1[] = {
0x30,0x86,
static const unsigned char secp256k1_scalar_consts_minus_b1[32] = {
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0xe4,0x43,0x7e,0xd6,0x01,0x0e,0x88,0x28,
0x6f,0x54,0x7f,0xa9,0x0a,0xbf,0xe4,0xc3
};
static const unsigned char secp256k1_scalar_consts_b2[32] = {
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd,
0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15
};
static const unsigned char secp256k1_scalar_consts_g1[32] = {
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x86,
0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c,
0x90,0xe4,0x92,0x84,0xeb,0x15,0x3d,0xab
};
static const unsigned char secp256k1_scalar_consts_g2[] = {
0xe4,0x43,
static const unsigned char secp256k1_scalar_consts_g2[32] = {
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0xe4,0x43,
0x7e,0xd6,0x01,0x0e,0x88,0x28,0x6f,0x54,
0x7f,0xa9,0x0a,0xbf,0xe4,0xc4,0x22,0x12
};
secp256k1_num_set_bin(&ret->a1b2, secp256k1_scalar_consts_a1b2, sizeof(secp256k1_scalar_consts_a1b2));
secp256k1_num_set_bin(&ret->a2, secp256k1_scalar_consts_a2, sizeof(secp256k1_scalar_consts_a2));
secp256k1_num_set_bin(&ret->b1, secp256k1_scalar_consts_b1, sizeof(secp256k1_scalar_consts_b1));
secp256k1_num_set_bin(&ret->g1, secp256k1_scalar_consts_g1, sizeof(secp256k1_scalar_consts_g1));
secp256k1_num_set_bin(&ret->g2, secp256k1_scalar_consts_g2, sizeof(secp256k1_scalar_consts_g2));
secp256k1_scalar_set_b32(&ret->minus_lambda, secp256k1_scalar_consts_lambda, NULL);
secp256k1_scalar_negate(&ret->minus_lambda, &ret->minus_lambda);
secp256k1_scalar_set_b32(&ret->minus_b1, secp256k1_scalar_consts_minus_b1, NULL);
secp256k1_scalar_set_b32(&ret->minus_b2, secp256k1_scalar_consts_b2, NULL);
secp256k1_scalar_negate(&ret->minus_b2, &ret->minus_b2);
secp256k1_scalar_set_b32(&ret->g1, secp256k1_scalar_consts_g1, NULL);
secp256k1_scalar_set_b32(&ret->g2, secp256k1_scalar_consts_g2, NULL);
#endif
/* Set the global pointer. */
@ -293,47 +320,16 @@ static void secp256k1_scalar_inverse_var(secp256k1_scalar_t *r, const secp256k1_
#ifdef USE_ENDOMORPHISM
static void secp256k1_scalar_split_lambda_var(secp256k1_scalar_t *r1, secp256k1_scalar_t *r2, const secp256k1_scalar_t *a) {
unsigned char b[32];
secp256k1_scalar_get_b32(b, a);
secp256k1_num_t na;
secp256k1_num_set_bin(&na, b, 32);
secp256k1_num_t rn1, rn2;
const secp256k1_scalar_consts_t *c = secp256k1_scalar_consts;
secp256k1_num_t d1, d2, t, one;
unsigned char cone[1] = {0x01};
secp256k1_num_set_bin(&one, cone, 1);
secp256k1_num_mul(&d1, &na, &c->g1);
secp256k1_num_shift(&d1, 271);
secp256k1_num_add(&d1, &d1, &one);
secp256k1_num_shift(&d1, 1);
secp256k1_num_mul(&d2, &na, &c->g2);
secp256k1_num_shift(&d2, 271);
secp256k1_num_add(&d2, &d2, &one);
secp256k1_num_shift(&d2, 1);
secp256k1_num_mul(&t, &d1, &c->a1b2);
secp256k1_num_sub(&rn1, &na, &t);
secp256k1_num_mul(&t, &d2, &c->a2);
secp256k1_num_sub(&rn1, &rn1, &t);
secp256k1_num_mul(&rn2, &d1, &c->b1);
secp256k1_num_mul(&t, &d2, &c->a1b2);
secp256k1_num_sub(&rn2, &rn2, &t);
secp256k1_num_get_bin(b, 32, &rn1);
secp256k1_scalar_set_b32(r1, b, NULL);
if (secp256k1_num_is_neg(&rn1)) {
secp256k1_scalar_negate(r1, r1);
}
secp256k1_num_get_bin(b, 32, &rn2);
secp256k1_scalar_set_b32(r2, b, NULL);
if (secp256k1_num_is_neg(&rn2)) {
secp256k1_scalar_negate(r2, r2);
}
VERIFY_CHECK(r1 != a);
VERIFY_CHECK(r2 != a);
secp256k1_scalar_t c1, c2;
secp256k1_scalar_mul_shift_var(&c1, a, &secp256k1_scalar_consts->g1, 272);
secp256k1_scalar_mul_shift_var(&c2, a, &secp256k1_scalar_consts->g2, 272);
secp256k1_scalar_mul(&c1, &c1, &secp256k1_scalar_consts->minus_b1);
secp256k1_scalar_mul(&c2, &c2, &secp256k1_scalar_consts->minus_b2);
secp256k1_scalar_add(r2, &c1, &c2);
secp256k1_scalar_mul(r1, r2, &secp256k1_scalar_consts->minus_lambda);
secp256k1_scalar_add(r1, r1, a);
}
#endif