Merge pull request #134
29ae131
Make scalar_add_bit test's overflow detection exact (Pieter Wuille)
This commit is contained in:
commit
4d879a3a66
|
@ -42,8 +42,8 @@ static void secp256k1_scalar_set_int(secp256k1_scalar_t *r, unsigned int v);
|
|||
/** Convert a scalar to a byte array. */
|
||||
static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar_t* a);
|
||||
|
||||
/** Add two scalars together (modulo the group order). */
|
||||
static void secp256k1_scalar_add(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b);
|
||||
/** Add two scalars together (modulo the group order). Returns whether it overflowed. */
|
||||
static int secp256k1_scalar_add(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b);
|
||||
|
||||
/** Add a power of two to a scalar. The result is not allowed to overflow. */
|
||||
static void secp256k1_scalar_add_bit(secp256k1_scalar_t *r, unsigned int bit);
|
||||
|
|
|
@ -81,7 +81,7 @@ SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar_t *r, unsig
|
|||
return overflow;
|
||||
}
|
||||
|
||||
static void secp256k1_scalar_add(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) {
|
||||
static int secp256k1_scalar_add(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) {
|
||||
uint128_t t = (uint128_t)a->d[0] + b->d[0];
|
||||
r->d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
|
||||
t += (uint128_t)a->d[1] + b->d[1];
|
||||
|
@ -90,7 +90,10 @@ static void secp256k1_scalar_add(secp256k1_scalar_t *r, const secp256k1_scalar_t
|
|||
r->d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
|
||||
t += (uint128_t)a->d[3] + b->d[3];
|
||||
r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
|
||||
secp256k1_scalar_reduce(r, t + secp256k1_scalar_check_overflow(r));
|
||||
int overflow = t + secp256k1_scalar_check_overflow(r);
|
||||
VERIFY_CHECK(overflow == 0 || overflow == 1);
|
||||
secp256k1_scalar_reduce(r, overflow);
|
||||
return overflow;
|
||||
}
|
||||
|
||||
static void secp256k1_scalar_add_bit(secp256k1_scalar_t *r, unsigned int bit) {
|
||||
|
|
|
@ -111,7 +111,7 @@ SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar_t *r, uint3
|
|||
return overflow;
|
||||
}
|
||||
|
||||
static void secp256k1_scalar_add(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) {
|
||||
static int secp256k1_scalar_add(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) {
|
||||
uint64_t t = (uint64_t)a->d[0] + b->d[0];
|
||||
r->d[0] = t & 0xFFFFFFFFULL; t >>= 32;
|
||||
t += (uint64_t)a->d[1] + b->d[1];
|
||||
|
@ -128,7 +128,10 @@ static void secp256k1_scalar_add(secp256k1_scalar_t *r, const secp256k1_scalar_t
|
|||
r->d[6] = t & 0xFFFFFFFFULL; t >>= 32;
|
||||
t += (uint64_t)a->d[7] + b->d[7];
|
||||
r->d[7] = t & 0xFFFFFFFFULL; t >>= 32;
|
||||
secp256k1_scalar_reduce(r, t + secp256k1_scalar_check_overflow(r));
|
||||
int overflow = t + secp256k1_scalar_check_overflow(r);
|
||||
VERIFY_CHECK(overflow == 0 || overflow == 1);
|
||||
secp256k1_scalar_reduce(r, overflow);
|
||||
return overflow;
|
||||
}
|
||||
|
||||
static void secp256k1_scalar_add_bit(secp256k1_scalar_t *r, unsigned int bit) {
|
||||
|
|
|
@ -346,8 +346,7 @@ void scalar_test(void) {
|
|||
secp256k1_scalar_add(&b, &b, &b);
|
||||
}
|
||||
secp256k1_scalar_t r1 = s1, r2 = s1;
|
||||
secp256k1_scalar_add(&r1, &r1, &b);
|
||||
if (!(secp256k1_scalar_get_bits(&s1, 255, 1) == 1 && secp256k1_scalar_get_bits(&r1, 255, 1) == 0)) {
|
||||
if (!secp256k1_scalar_add(&r1, &r1, &b)) {
|
||||
/* No overflow happened. */
|
||||
secp256k1_scalar_add_bit(&r2, bit);
|
||||
CHECK(secp256k1_scalar_eq(&r1, &r2));
|
||||
|
|
Loading…
Reference in New Issue