Add _fe_half and use in _gej_add_ge

- Trades 1 _half for 3 _mul_int and 2 _normalize_weak
- Updated formula and comments in _gej_add_ge
- Added internal benchmark for _fe_half
This commit is contained in:
Peter Dettman 2021-12-06 01:24:30 +07:00
parent d8a2463246
commit 925f78d55e
5 changed files with 172 additions and 20 deletions

View File

@ -140,6 +140,15 @@ void bench_scalar_inverse_var(void* arg, int iters) {
CHECK(j <= iters); CHECK(j <= iters);
} }
void bench_field_half(void* arg, int iters) {
int i;
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
secp256k1_fe_half(&data->fe[0]);
}
}
void bench_field_normalize(void* arg, int iters) { void bench_field_normalize(void* arg, int iters) {
int i; int i;
bench_inv *data = (bench_inv*)arg; bench_inv *data = (bench_inv*)arg;
@ -354,6 +363,7 @@ int main(int argc, char **argv) {
if (d || have_flag(argc, argv, "scalar") || have_flag(argc, argv, "inverse")) run_benchmark("scalar_inverse", bench_scalar_inverse, bench_setup, NULL, &data, 10, iters); if (d || have_flag(argc, argv, "scalar") || have_flag(argc, argv, "inverse")) run_benchmark("scalar_inverse", bench_scalar_inverse, bench_setup, NULL, &data, 10, iters);
if (d || have_flag(argc, argv, "scalar") || have_flag(argc, argv, "inverse")) run_benchmark("scalar_inverse_var", bench_scalar_inverse_var, bench_setup, NULL, &data, 10, iters); if (d || have_flag(argc, argv, "scalar") || have_flag(argc, argv, "inverse")) run_benchmark("scalar_inverse_var", bench_scalar_inverse_var, bench_setup, NULL, &data, 10, iters);
if (d || have_flag(argc, argv, "field") || have_flag(argc, argv, "half")) run_benchmark("field_half", bench_field_half, bench_setup, NULL, &data, 10, iters*100);
if (d || have_flag(argc, argv, "field") || have_flag(argc, argv, "normalize")) run_benchmark("field_normalize", bench_field_normalize, bench_setup, NULL, &data, 10, iters*100); if (d || have_flag(argc, argv, "field") || have_flag(argc, argv, "normalize")) run_benchmark("field_normalize", bench_field_normalize, bench_setup, NULL, &data, 10, iters*100);
if (d || have_flag(argc, argv, "field") || have_flag(argc, argv, "normalize")) run_benchmark("field_normalize_weak", bench_field_normalize_weak, bench_setup, NULL, &data, 10, iters*100); if (d || have_flag(argc, argv, "field") || have_flag(argc, argv, "normalize")) run_benchmark("field_normalize_weak", bench_field_normalize_weak, bench_setup, NULL, &data, 10, iters*100);
if (d || have_flag(argc, argv, "field") || have_flag(argc, argv, "sqr")) run_benchmark("field_sqr", bench_field_sqr, bench_setup, NULL, &data, 10, iters*10); if (d || have_flag(argc, argv, "field") || have_flag(argc, argv, "sqr")) run_benchmark("field_sqr", bench_field_sqr, bench_setup, NULL, &data, 10, iters*10);

View File

@ -130,4 +130,9 @@ static void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r, const secp256k1_f
/** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. Both *r and *a must be initialized.*/ /** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. Both *r and *a must be initialized.*/
static void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag); static void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag);
/** Halves the value of a field element modulo the field prime. Constant-time.
* For an input magnitude 'm', the output magnitude is set to 'floor(m/2) + 1'.
* The output is not guaranteed to be normalized, regardless of the input. */
static void secp256k1_fe_half(secp256k1_fe *r);
#endif /* SECP256K1_FIELD_H */ #endif /* SECP256K1_FIELD_H */

View File

@ -1133,6 +1133,82 @@ static SECP256K1_INLINE void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_
#endif #endif
} }
static SECP256K1_INLINE void secp256k1_fe_half(secp256k1_fe *r) {
uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4],
t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9];
uint32_t one = (uint32_t)1;
uint32_t mask = -(t0 & one) >> 6;
#ifdef VERIFY
secp256k1_fe_verify(r);
VERIFY_CHECK(r->magnitude < 32);
#endif
/* Bounds analysis (over the rationals).
*
* Let m = r->magnitude
* C = 0x3FFFFFFUL * 2
* D = 0x03FFFFFUL * 2
*
* Initial bounds: t0..t8 <= C * m
* t9 <= D * m
*/
t0 += 0x3FFFC2FUL & mask;
t1 += 0x3FFFFBFUL & mask;
t2 += mask;
t3 += mask;
t4 += mask;
t5 += mask;
t6 += mask;
t7 += mask;
t8 += mask;
t9 += mask >> 4;
VERIFY_CHECK((t0 & one) == 0);
/* t0..t8: added <= C/2
* t9: added <= D/2
*
* Current bounds: t0..t8 <= C * (m + 1/2)
* t9 <= D * (m + 1/2)
*/
r->n[0] = (t0 >> 1) + ((t1 & one) << 25);
r->n[1] = (t1 >> 1) + ((t2 & one) << 25);
r->n[2] = (t2 >> 1) + ((t3 & one) << 25);
r->n[3] = (t3 >> 1) + ((t4 & one) << 25);
r->n[4] = (t4 >> 1) + ((t5 & one) << 25);
r->n[5] = (t5 >> 1) + ((t6 & one) << 25);
r->n[6] = (t6 >> 1) + ((t7 & one) << 25);
r->n[7] = (t7 >> 1) + ((t8 & one) << 25);
r->n[8] = (t8 >> 1) + ((t9 & one) << 25);
r->n[9] = (t9 >> 1);
/* t0..t8: shifted right and added <= C/4 + 1/2
* t9: shifted right
*
* Current bounds: t0..t8 <= C * (m/2 + 1/2)
* t9 <= D * (m/2 + 1/4)
*/
#ifdef VERIFY
/* Therefore the output magnitude (M) has to be set such that:
* t0..t8: C * M >= C * (m/2 + 1/2)
* t9: D * M >= D * (m/2 + 1/4)
*
* It suffices for all limbs that, for any input magnitude m:
* M >= m/2 + 1/2
*
* and since we want the smallest such integer value for M:
* M == floor(m/2) + 1
*/
r->magnitude = (r->magnitude >> 1) + 1;
r->normalized = 0;
secp256k1_fe_verify(r);
#endif
}
static SECP256K1_INLINE void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r, const secp256k1_fe_storage *a, int flag) { static SECP256K1_INLINE void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r, const secp256k1_fe_storage *a, int flag) {
uint32_t mask0, mask1; uint32_t mask0, mask1;
VG_CHECK_VERIFY(r->n, sizeof(r->n)); VG_CHECK_VERIFY(r->n, sizeof(r->n));

View File

@ -477,6 +477,71 @@ static SECP256K1_INLINE void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_
#endif #endif
} }
static SECP256K1_INLINE void secp256k1_fe_half(secp256k1_fe *r) {
uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
uint64_t one = (uint64_t)1;
uint64_t mask = -(t0 & one) >> 12;
#ifdef VERIFY
secp256k1_fe_verify(r);
VERIFY_CHECK(r->magnitude < 32);
#endif
/* Bounds analysis (over the rationals).
*
* Let m = r->magnitude
* C = 0xFFFFFFFFFFFFFULL * 2
* D = 0x0FFFFFFFFFFFFULL * 2
*
* Initial bounds: t0..t3 <= C * m
* t4 <= D * m
*/
t0 += 0xFFFFEFFFFFC2FULL & mask;
t1 += mask;
t2 += mask;
t3 += mask;
t4 += mask >> 4;
VERIFY_CHECK((t0 & one) == 0);
/* t0..t3: added <= C/2
* t4: added <= D/2
*
* Current bounds: t0..t3 <= C * (m + 1/2)
* t4 <= D * (m + 1/2)
*/
r->n[0] = (t0 >> 1) + ((t1 & one) << 51);
r->n[1] = (t1 >> 1) + ((t2 & one) << 51);
r->n[2] = (t2 >> 1) + ((t3 & one) << 51);
r->n[3] = (t3 >> 1) + ((t4 & one) << 51);
r->n[4] = (t4 >> 1);
/* t0..t3: shifted right and added <= C/4 + 1/2
* t4: shifted right
*
* Current bounds: t0..t3 <= C * (m/2 + 1/2)
* t4 <= D * (m/2 + 1/4)
*/
#ifdef VERIFY
/* Therefore the output magnitude (M) has to be set such that:
* t0..t3: C * M >= C * (m/2 + 1/2)
* t4: D * M >= D * (m/2 + 1/4)
*
* It suffices for all limbs that, for any input magnitude m:
* M >= m/2 + 1/2
*
* and since we want the smallest such integer value for M:
* M == floor(m/2) + 1
*/
r->magnitude = (r->magnitude >> 1) + 1;
r->normalized = 0;
secp256k1_fe_verify(r);
#endif
}
static SECP256K1_INLINE void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r, const secp256k1_fe_storage *a, int flag) { static SECP256K1_INLINE void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r, const secp256k1_fe_storage *a, int flag) {
uint64_t mask0, mask1; uint64_t mask0, mask1;
VG_CHECK_VERIFY(r->n, sizeof(r->n)); VG_CHECK_VERIFY(r->n, sizeof(r->n));

View File

@ -492,7 +492,7 @@ static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a,
static void secp256k1_gej_add_ge(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b) { static void secp256k1_gej_add_ge(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b) {
/* Operations: 7 mul, 5 sqr, 4 normalize, 21 mul_int/add/negate/cmov */ /* Operations: 7 mul, 5 sqr, 24 add/cmov/half/mul_int/negate/normalize_weak/normalizes_to_zero */
secp256k1_fe zz, u1, u2, s1, s2, t, tt, m, n, q, rr; secp256k1_fe zz, u1, u2, s1, s2, t, tt, m, n, q, rr;
secp256k1_fe m_alt, rr_alt; secp256k1_fe m_alt, rr_alt;
int infinity, degenerate; int infinity, degenerate;
@ -513,11 +513,11 @@ static void secp256k1_gej_add_ge(secp256k1_gej *r, const secp256k1_gej *a, const
* Z = Z1*Z2 * Z = Z1*Z2
* T = U1+U2 * T = U1+U2
* M = S1+S2 * M = S1+S2
* Q = T*M^2 * Q = -T*M^2
* R = T^2-U1*U2 * R = T^2-U1*U2
* X3 = 4*(R^2-Q) * X3 = R^2+Q
* Y3 = 4*(R*(3*Q-2*R^2)-M^4) * Y3 = -(R*(2*X3+Q)+M^4)/2
* Z3 = 2*M*Z * Z3 = M*Z
* (Note that the paper uses xi = Xi / Zi and yi = Yi / Zi instead.) * (Note that the paper uses xi = Xi / Zi and yi = Yi / Zi instead.)
* *
* This formula has the benefit of being the same for both addition * This formula has the benefit of being the same for both addition
@ -581,7 +581,8 @@ static void secp256k1_gej_add_ge(secp256k1_gej *r, const secp256k1_gej *a, const
* and denominator of lambda; R and M represent the explicit * and denominator of lambda; R and M represent the explicit
* expressions x1^2 + x2^2 + x1x2 and y1 + y2. */ * expressions x1^2 + x2^2 + x1x2 and y1 + y2. */
secp256k1_fe_sqr(&n, &m_alt); /* n = Malt^2 (1) */ secp256k1_fe_sqr(&n, &m_alt); /* n = Malt^2 (1) */
secp256k1_fe_mul(&q, &n, &t); /* q = Q = T*Malt^2 (1) */ secp256k1_fe_negate(&q, &t, 2); /* q = -T (3) */
secp256k1_fe_mul(&q, &q, &n); /* q = Q = -T*Malt^2 (1) */
/* These two lines use the observation that either M == Malt or M == 0, /* These two lines use the observation that either M == Malt or M == 0,
* so M^3 * Malt is either Malt^4 (which is computed by squaring), or * so M^3 * Malt is either Malt^4 (which is computed by squaring), or
* zero (which is "computed" by cmov). So the cost is one squaring * zero (which is "computed" by cmov). So the cost is one squaring
@ -589,21 +590,16 @@ static void secp256k1_gej_add_ge(secp256k1_gej *r, const secp256k1_gej *a, const
secp256k1_fe_sqr(&n, &n); secp256k1_fe_sqr(&n, &n);
secp256k1_fe_cmov(&n, &m, degenerate); /* n = M^3 * Malt (2) */ secp256k1_fe_cmov(&n, &m, degenerate); /* n = M^3 * Malt (2) */
secp256k1_fe_sqr(&t, &rr_alt); /* t = Ralt^2 (1) */ secp256k1_fe_sqr(&t, &rr_alt); /* t = Ralt^2 (1) */
secp256k1_fe_mul(&r->z, &a->z, &m_alt); /* r->z = Malt*Z (1) */ secp256k1_fe_mul(&r->z, &a->z, &m_alt); /* r->z = Z3 = Malt*Z (1) */
infinity = secp256k1_fe_normalizes_to_zero(&r->z) & ~a->infinity; infinity = secp256k1_fe_normalizes_to_zero(&r->z) & ~a->infinity;
secp256k1_fe_mul_int(&r->z, 2); /* r->z = Z3 = 2*Malt*Z (2) */ secp256k1_fe_add(&t, &q); /* t = Ralt^2 + Q (2) */
secp256k1_fe_negate(&q, &q, 1); /* q = -Q (2) */ r->x = t; /* r->x = X3 = Ralt^2 + Q (2) */
secp256k1_fe_add(&t, &q); /* t = Ralt^2-Q (3) */ secp256k1_fe_mul_int(&t, 2); /* t = 2*X3 (4) */
secp256k1_fe_normalize_weak(&t); secp256k1_fe_add(&t, &q); /* t = 2*X3 + Q (5) */
r->x = t; /* r->x = Ralt^2-Q (1) */ secp256k1_fe_mul(&t, &t, &rr_alt); /* t = Ralt*(2*X3 + Q) (1) */
secp256k1_fe_mul_int(&t, 2); /* t = 2*x3 (2) */ secp256k1_fe_add(&t, &n); /* t = Ralt*(2*X3 + Q) + M^3*Malt (3) */
secp256k1_fe_add(&t, &q); /* t = 2*x3 - Q: (4) */ secp256k1_fe_negate(&r->y, &t, 3); /* r->y = -(Ralt*(2*X3 + Q) + M^3*Malt) (4) */
secp256k1_fe_mul(&t, &t, &rr_alt); /* t = Ralt*(2*x3 - Q) (1) */ secp256k1_fe_half(&r->y); /* r->y = Y3 = -(Ralt*(2*X3 + Q) + M^3*Malt)/2 (3) */
secp256k1_fe_add(&t, &n); /* t = Ralt*(2*x3 - Q) + M^3*Malt (3) */
secp256k1_fe_negate(&r->y, &t, 3); /* r->y = Ralt*(Q - 2x3) - M^3*Malt (4) */
secp256k1_fe_normalize_weak(&r->y);
secp256k1_fe_mul_int(&r->x, 4); /* r->x = X3 = 4*(Ralt^2-Q) */
secp256k1_fe_mul_int(&r->y, 4); /* r->y = Y3 = 4*Ralt*(Q - 2x3) - 4*M^3*Malt (4) */
/** In case a->infinity == 1, replace r with (b->x, b->y, 1). */ /** In case a->infinity == 1, replace r with (b->x, b->y, 1). */
secp256k1_fe_cmov(&r->x, &b->x, a->infinity); secp256k1_fe_cmov(&r->x, &b->x, a->infinity);