From 71712b27e56112987bbfc6d3abf72a79eaa54562 Mon Sep 17 00:00:00 2001 From: Gregory Maxwell Date: Sat, 15 Nov 2014 15:28:10 +0000 Subject: [PATCH] Switch to C89 comments in prep for making the whole codebase C89 compatible. This should be whitespace/comment only changes and should produce the same object code. --- src/bench_inv.c | 9 +- src/bench_sign.c | 15 +- src/bench_verify.c | 14 +- src/ecdsa.h | 8 +- src/ecdsa_impl.h | 9 +- src/eckey.h | 8 +- src/eckey_impl.h | 18 +- src/ecmult.h | 8 +- src/ecmult_gen.h | 8 +- src/ecmult_gen_impl.h | 60 +++---- src/ecmult_impl.h | 44 ++--- src/field.h | 8 +- src/field_10x26.h | 10 +- src/field_10x26_impl.h | 324 ++++++++++++++++++----------------- src/field_5x52.h | 10 +- src/field_5x52_asm_impl.h | 8 +- src/field_5x52_impl.h | 22 +-- src/field_5x52_int128_impl.h | 138 +++++++-------- src/field_gmp.h | 8 +- src/field_gmp_impl.h | 19 +- src/field_impl.h | 30 ++-- src/group.h | 24 +-- src/group_impl.h | 155 +++++++++-------- src/num.h | 8 +- src/num_gmp.h | 8 +- src/num_gmp_impl.h | 29 ++-- src/num_impl.h | 8 +- src/scalar.h | 8 +- src/scalar_4x64.h | 8 +- src/scalar_4x64_impl.h | 44 ++--- src/scalar_8x32.h | 8 +- src/scalar_8x32_impl.h | 52 +++--- src/scalar_impl.h | 144 ++++++++-------- src/secp256k1.c | 12 +- src/testrand.h | 8 +- src/testrand_impl.h | 8 +- src/tests.c | 204 +++++++++++----------- src/util.h | 12 +- 38 files changed, 802 insertions(+), 716 deletions(-) diff --git a/src/bench_inv.c b/src/bench_inv.c index 641b56a..d6f6643 100644 --- a/src/bench_inv.c +++ b/src/bench_inv.c @@ -1,7 +1,8 @@ -// Copyright (c) 2014 Pieter Wuille -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. - +/********************************************************************** + * Copyright (c) 2014 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ #include #include "include/secp256k1.h" diff --git a/src/bench_sign.c b/src/bench_sign.c index 92a8d11..f01f11d 100644 --- a/src/bench_sign.c +++ b/src/bench_sign.c @@ -1,7 +1,8 @@ -// Copyright (c) 2014 Pieter Wuille -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. - +/********************************************************************** + * Copyright (c) 2014 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ #include #include @@ -25,9 +26,9 @@ int main(void) { int recid = 0; CHECK(secp256k1_ecdsa_sign_compact(msg, 32, sig, key, nonce, &recid)); for (int j = 0; j < 32; j++) { - nonce[j] = key[j]; // Move former key to nonce - msg[j] = sig[j]; // Move former R to message. - key[j] = sig[j + 32]; // Move former S to key. + nonce[j] = key[j]; /* Move former key to nonce */ + msg[j] = sig[j]; /* Move former R to message. */ + key[j] = sig[j + 32]; /* Move former S to key. */ } } diff --git a/src/bench_verify.c b/src/bench_verify.c index c074f88..6905955 100644 --- a/src/bench_verify.c +++ b/src/bench_verify.c @@ -1,6 +1,8 @@ -// Copyright (c) 2014 Pieter Wuille -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. +/********************************************************************** + * Copyright (c) 2014 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ #include #include @@ -22,9 +24,9 @@ int main(void) { int pubkeylen = 33; CHECK(secp256k1_ecdsa_recover_compact(msg, 32, sig, pubkey, &pubkeylen, 1, i % 2)); for (int j = 0; j < 32; j++) { - sig[j + 32] = msg[j]; // Move former message to S. - msg[j] = sig[j]; // Move former R to message. - sig[j] = pubkey[j + 1]; // Move recovered pubkey X coordinate to R (which must be a valid X coordinate). + sig[j + 32] = msg[j]; /* Move former message to S. */ + msg[j] = sig[j]; /* Move former R to message. */ + sig[j] = pubkey[j + 1]; /* Move recovered pubkey X coordinate to R (which must be a valid X coordinate). */ } } diff --git a/src/ecdsa.h b/src/ecdsa.h index faec2f4..3b1e048 100644 --- a/src/ecdsa.h +++ b/src/ecdsa.h @@ -1,6 +1,8 @@ -// Copyright (c) 2013 Pieter Wuille -// Distributed under the MIT/X11 software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. +/********************************************************************** + * Copyright (c) 2013, 2014 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ #ifndef _SECP256K1_ECDSA_ #define _SECP256K1_ECDSA_ diff --git a/src/ecdsa_impl.h b/src/ecdsa_impl.h index 88ad216..57e9978 100644 --- a/src/ecdsa_impl.h +++ b/src/ecdsa_impl.h @@ -1,6 +1,9 @@ -// Copyright (c) 2013 Pieter Wuille -// Distributed under the MIT/X11 software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. +/********************************************************************** + * Copyright (c) 2013, 2014 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ + #ifndef _SECP256K1_ECDSA_IMPL_H_ #define _SECP256K1_ECDSA_IMPL_H_ diff --git a/src/eckey.h b/src/eckey.h index fe4c02e..725a549 100644 --- a/src/eckey.h +++ b/src/eckey.h @@ -1,6 +1,8 @@ -// Copyright (c) 2013-2014 Pieter Wuille -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. +/********************************************************************** + * Copyright (c) 2013, 2014 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ #ifndef _SECP256K1_ECKEY_ #define _SECP256K1_ECKEY_ diff --git a/src/eckey_impl.h b/src/eckey_impl.h index 38572e3..59b191d 100644 --- a/src/eckey_impl.h +++ b/src/eckey_impl.h @@ -1,6 +1,8 @@ -// Copyright (c) 2013-2014 Pieter Wuille -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. +/********************************************************************** + * Copyright (c) 2013, 2014 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ #ifndef _SECP256K1_ECKEY_IMPL_H_ #define _SECP256K1_ECKEY_IMPL_H_ @@ -46,11 +48,11 @@ static void secp256k1_eckey_pubkey_serialize(secp256k1_ge_t *elem, unsigned char static int secp256k1_eckey_privkey_parse(secp256k1_scalar_t *key, const unsigned char *privkey, int privkeylen) { const unsigned char *end = privkey + privkeylen; - // sequence header + /* sequence header */ if (end < privkey+1 || *privkey != 0x30) return 0; privkey++; - // sequence length constructor + /* sequence length constructor */ int lenb = 0; if (end < privkey+1 || !(*privkey & 0x80)) return 0; @@ -59,17 +61,17 @@ static int secp256k1_eckey_privkey_parse(secp256k1_scalar_t *key, const unsigned return 0; if (end < privkey+lenb) return 0; - // sequence length + /* sequence length */ int len = 0; len = privkey[lenb-1] | (lenb > 1 ? privkey[lenb-2] << 8 : 0); privkey += lenb; if (end < privkey+len) return 0; - // sequence element 0: version number (=1) + /* sequence element 0: version number (=1) */ if (end < privkey+3 || privkey[0] != 0x02 || privkey[1] != 0x01 || privkey[2] != 0x01) return 0; privkey += 3; - // sequence element 1: octet string, up to 32 bytes + /* sequence element 1: octet string, up to 32 bytes */ if (end < privkey+2 || privkey[0] != 0x04 || privkey[1] > 0x20 || end < privkey+2+privkey[1]) return 0; int overflow = 0; diff --git a/src/ecmult.h b/src/ecmult.h index 7d74b5e..e3cf18b 100644 --- a/src/ecmult.h +++ b/src/ecmult.h @@ -1,6 +1,8 @@ -// Copyright (c) 2013-2014 Pieter Wuille -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. +/********************************************************************** + * Copyright (c) 2013, 2014 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ #ifndef _SECP256K1_ECMULT_ #define _SECP256K1_ECMULT_ diff --git a/src/ecmult_gen.h b/src/ecmult_gen.h index 79aa4bf..42f822f 100644 --- a/src/ecmult_gen.h +++ b/src/ecmult_gen.h @@ -1,6 +1,8 @@ -// Copyright (c) 2013-2014 Pieter Wuille -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. +/********************************************************************** + * Copyright (c) 2013, 2014 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ #ifndef _SECP256K1_ECMULT_GEN_ #define _SECP256K1_ECMULT_GEN_ diff --git a/src/ecmult_gen_impl.h b/src/ecmult_gen_impl.h index 36a6fd9..07859ab 100644 --- a/src/ecmult_gen_impl.h +++ b/src/ecmult_gen_impl.h @@ -1,6 +1,8 @@ -// Copyright (c) 2013-2014 Pieter Wuille -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. +/********************************************************************** + * Copyright (c) 2013, 2014 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ #ifndef _SECP256K1_ECMULT_GEN_IMPL_H_ #define _SECP256K1_ECMULT_GEN_IMPL_H_ @@ -10,19 +12,19 @@ #include "ecmult_gen.h" typedef struct { - // For accelerating the computation of a*G: - // To harden against timing attacks, use the following mechanism: - // * Break up the multiplicand into groups of 4 bits, called n_0, n_1, n_2, ..., n_63. - // * Compute sum(n_i * 16^i * G + U_i, i=0..63), where: - // * U_i = U * 2^i (for i=0..62) - // * U_i = U * (1-2^63) (for i=63) - // where U is a point with no known corresponding scalar. Note that sum(U_i, i=0..63) = 0. - // For each i, and each of the 16 possible values of n_i, (n_i * 16^i * G + U_i) is - // precomputed (call it prec(i, n_i)). The formula now becomes sum(prec(i, n_i), i=0..63). - // None of the resulting prec group elements have a known scalar, and neither do any of - // the intermediate sums while computing a*G. - // To make memory access uniform, the bytes of prec(i, n_i) are sliced per value of n_i. - unsigned char prec[64][sizeof(secp256k1_ge_t)][16]; // prec[j][k][i] = k'th byte of (16^j * i * G + U_i) + /* For accelerating the computation of a*G: + * To harden against timing attacks, use the following mechanism: + * * Break up the multiplicand into groups of 4 bits, called n_0, n_1, n_2, ..., n_63. + * * Compute sum(n_i * 16^i * G + U_i, i=0..63), where: + * * U_i = U * 2^i (for i=0..62) + * * U_i = U * (1-2^63) (for i=63) + * where U is a point with no known corresponding scalar. Note that sum(U_i, i=0..63) = 0. + * For each i, and each of the 16 possible values of n_i, (n_i * 16^i * G + U_i) is + * precomputed (call it prec(i, n_i)). The formula now becomes sum(prec(i, n_i), i=0..63). + * None of the resulting prec group elements have a known scalar, and neither do any of + * the intermediate sums while computing a*G. + * To make memory access uniform, the bytes of prec(i, n_i) are sliced per value of n_i. */ + unsigned char prec[64][sizeof(secp256k1_ge_t)][16]; /* prec[j][k][i] = k'th byte of (16^j * i * G + U_i) */ } secp256k1_ecmult_gen_consts_t; static const secp256k1_ecmult_gen_consts_t *secp256k1_ecmult_gen_consts = NULL; @@ -31,14 +33,14 @@ static void secp256k1_ecmult_gen_start(void) { if (secp256k1_ecmult_gen_consts != NULL) return; - // Allocate the precomputation table. + /* Allocate the precomputation table. */ secp256k1_ecmult_gen_consts_t *ret = (secp256k1_ecmult_gen_consts_t*)malloc(sizeof(secp256k1_ecmult_gen_consts_t)); - // get the generator + /* get the generator */ const secp256k1_ge_t *g = &secp256k1_ge_consts->g; secp256k1_gej_t gj; secp256k1_gej_set_ge(&gj, g); - // Construct a group element with no known corresponding scalar (nothing up my sleeve). + /* Construct a group element with no known corresponding scalar (nothing up my sleeve). */ secp256k1_gej_t nums_gej; { static const unsigned char nums_b32[32] = "The scalar for this x is unknown"; @@ -47,30 +49,30 @@ static void secp256k1_ecmult_gen_start(void) { secp256k1_ge_t nums_ge; VERIFY_CHECK(secp256k1_ge_set_xo(&nums_ge, &nums_x, 0)); secp256k1_gej_set_ge(&nums_gej, &nums_ge); - // Add G to make the bits in x uniformly distributed. + /* Add G to make the bits in x uniformly distributed. */ secp256k1_gej_add_ge_var(&nums_gej, &nums_gej, g); } - // compute prec. + /* compute prec. */ secp256k1_ge_t prec[1024]; { - secp256k1_gej_t precj[1024]; // Jacobian versions of prec. - secp256k1_gej_t gbase; gbase = gj; // 16^j * G - secp256k1_gej_t numsbase; numsbase = nums_gej; // 2^j * nums. + secp256k1_gej_t precj[1024]; /* Jacobian versions of prec. */ + secp256k1_gej_t gbase; gbase = gj; /* 16^j * G */ + secp256k1_gej_t numsbase; numsbase = nums_gej; /* 2^j * nums. */ for (int j=0; j<64; j++) { - // Set precj[j*16 .. j*16+15] to (numsbase, numsbase + gbase, ..., numsbase + 15*gbase). + /* Set precj[j*16 .. j*16+15] to (numsbase, numsbase + gbase, ..., numsbase + 15*gbase). */ precj[j*16] = numsbase; for (int i=1; i<16; i++) { secp256k1_gej_add_var(&precj[j*16 + i], &precj[j*16 + i - 1], &gbase); } - // Multiply gbase by 16. + /* Multiply gbase by 16. */ for (int i=0; i<4; i++) { secp256k1_gej_double_var(&gbase, &gbase); } - // Multiply numbase by 2. + /* Multiply numbase by 2. */ secp256k1_gej_double_var(&numsbase, &numsbase); if (j == 62) { - // In the last iteration, numsbase is (1 - 2^j) * nums instead. + /* In the last iteration, numsbase is (1 - 2^j) * nums instead. */ secp256k1_gej_neg(&numsbase, &numsbase); secp256k1_gej_add_var(&numsbase, &numsbase, &nums_gej); } @@ -85,7 +87,7 @@ static void secp256k1_ecmult_gen_start(void) { } } - // Set the global pointer to the precomputation table. + /* Set the global pointer to the precomputation table. */ secp256k1_ecmult_gen_consts = ret; } diff --git a/src/ecmult_impl.h b/src/ecmult_impl.h index e5d24d2..5089025 100644 --- a/src/ecmult_impl.h +++ b/src/ecmult_impl.h @@ -1,6 +1,8 @@ -// Copyright (c) 2013-2014 Pieter Wuille -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. +/********************************************************************** + * Copyright (c) 2013, 2014 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ #ifndef _SECP256K1_ECMULT_IMPL_H_ #define _SECP256K1_ECMULT_IMPL_H_ @@ -9,11 +11,11 @@ #include "group.h" #include "ecmult.h" -// optimal for 128-bit and 256-bit exponents. +/* optimal for 128-bit and 256-bit exponents. */ #define WINDOW_A 5 -// larger numbers may result in slightly better performance, at the cost of -// exponentially larger precomputed tables. WINDOW_G == 14 results in 640 KiB. +/** larger numbers may result in slightly better performance, at the cost of + exponentially larger precomputed tables. WINDOW_G == 14 results in 640 KiB. */ #define WINDOW_G 14 /** Fill a table 'pre' with precomputed odd multiples of a. W determines the size of the table. @@ -65,9 +67,9 @@ static void secp256k1_ecmult_table_precomp_ge_var(secp256k1_ge_t *pre, const sec #define ECMULT_TABLE_GET_GE(r,pre,n,w) ECMULT_TABLE_GET((r),(pre),(n),(w),secp256k1_ge_neg) typedef struct { - // For accelerating the computation of a*P + b*G: - secp256k1_ge_t pre_g[ECMULT_TABLE_SIZE(WINDOW_G)]; // odd multiples of the generator - secp256k1_ge_t pre_g_128[ECMULT_TABLE_SIZE(WINDOW_G)]; // odd multiples of 2^128*generator + /* For accelerating the computation of a*P + b*G: */ + secp256k1_ge_t pre_g[ECMULT_TABLE_SIZE(WINDOW_G)]; /* odd multiples of the generator */ + secp256k1_ge_t pre_g_128[ECMULT_TABLE_SIZE(WINDOW_G)]; /* odd multiples of 2^128*generator */ } secp256k1_ecmult_consts_t; static const secp256k1_ecmult_consts_t *secp256k1_ecmult_consts = NULL; @@ -76,23 +78,23 @@ static void secp256k1_ecmult_start(void) { if (secp256k1_ecmult_consts != NULL) return; - // Allocate the precomputation table. + /* Allocate the precomputation table. */ secp256k1_ecmult_consts_t *ret = (secp256k1_ecmult_consts_t*)malloc(sizeof(secp256k1_ecmult_consts_t)); - // get the generator + /* get the generator */ const secp256k1_ge_t *g = &secp256k1_ge_consts->g; secp256k1_gej_t gj; secp256k1_gej_set_ge(&gj, g); - // calculate 2^128*generator + /* calculate 2^128*generator */ secp256k1_gej_t g_128j = gj; for (int i=0; i<128; i++) secp256k1_gej_double_var(&g_128j, &g_128j); - // precompute the tables with odd multiples + /* precompute the tables with odd multiples */ secp256k1_ecmult_table_precomp_ge_var(ret->pre_g, &gj, WINDOW_G); secp256k1_ecmult_table_precomp_ge_var(ret->pre_g_128, &g_128j, WINDOW_G); - // Set the global pointer to the precomputation table. + /* Set the global pointer to the precomputation table. */ secp256k1_ecmult_consts = ret; } @@ -148,21 +150,21 @@ static void secp256k1_ecmult(secp256k1_gej_t *r, const secp256k1_gej_t *a, const #ifdef USE_ENDOMORPHISM secp256k1_num_t na_1, na_lam; - // split na into na_1 and na_lam (where na = na_1 + na_lam*lambda, and na_1 and na_lam are ~128 bit) + /* split na into na_1 and na_lam (where na = na_1 + na_lam*lambda, and na_1 and na_lam are ~128 bit) */ secp256k1_gej_split_exp_var(&na_1, &na_lam, na); - // build wnaf representation for na_1 and na_lam. + /* build wnaf representation for na_1 and na_lam. */ int wnaf_na_1[129]; int bits_na_1 = secp256k1_ecmult_wnaf(wnaf_na_1, &na_1, WINDOW_A); int wnaf_na_lam[129]; int bits_na_lam = secp256k1_ecmult_wnaf(wnaf_na_lam, &na_lam, WINDOW_A); int bits = bits_na_1; if (bits_na_lam > bits) bits = bits_na_lam; #else - // build wnaf representation for na. + /* build wnaf representation for na. */ int wnaf_na[257]; int bits_na = secp256k1_ecmult_wnaf(wnaf_na, na, WINDOW_A); int bits = bits_na; #endif - // calculate odd multiples of a + /* calculate odd multiples of a */ secp256k1_gej_t pre_a[ECMULT_TABLE_SIZE(WINDOW_A)]; secp256k1_ecmult_table_precomp_gej_var(pre_a, a, WINDOW_A); @@ -172,13 +174,13 @@ static void secp256k1_ecmult(secp256k1_gej_t *r, const secp256k1_gej_t *a, const secp256k1_gej_mul_lambda(&pre_a_lam[i], &pre_a[i]); #endif - // Splitted G factors. + /* Splitted G factors. */ secp256k1_num_t ng_1, ng_128; - // split ng into ng_1 and ng_128 (where gn = gn_1 + gn_128*2^128, and gn_1 and gn_128 are ~128 bit) + /* split ng into ng_1 and ng_128 (where gn = gn_1 + gn_128*2^128, and gn_1 and gn_128 are ~128 bit) */ secp256k1_num_split(&ng_1, &ng_128, ng, 128); - // Build wnaf representation for ng_1 and ng_128 + /* Build wnaf representation for ng_1 and ng_128 */ int wnaf_ng_1[129]; int bits_ng_1 = secp256k1_ecmult_wnaf(wnaf_ng_1, &ng_1, WINDOW_G); int wnaf_ng_128[129]; int bits_ng_128 = secp256k1_ecmult_wnaf(wnaf_ng_128, &ng_128, WINDOW_G); if (bits_ng_1 > bits) bits = bits_ng_1; diff --git a/src/field.h b/src/field.h index fe9bf65..c7feead 100644 --- a/src/field.h +++ b/src/field.h @@ -1,6 +1,8 @@ -// Copyright (c) 2013 Pieter Wuille -// Distributed under the MIT/X11 software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. +/********************************************************************** + * Copyright (c) 2013, 2014 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ #ifndef _SECP256K1_FIELD_ #define _SECP256K1_FIELD_ diff --git a/src/field_10x26.h b/src/field_10x26.h index d544139..66fb3f2 100644 --- a/src/field_10x26.h +++ b/src/field_10x26.h @@ -1,6 +1,8 @@ -// Copyright (c) 2013 Pieter Wuille -// Distributed under the MIT/X11 software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. +/********************************************************************** + * Copyright (c) 2013, 2014 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ #ifndef _SECP256K1_FIELD_REPR_ #define _SECP256K1_FIELD_REPR_ @@ -8,7 +10,7 @@ #include typedef struct { - // X = sum(i=0..9, elem[i]*2^26) mod n + /* X = sum(i=0..9, elem[i]*2^26) mod n */ uint32_t n[10]; #ifdef VERIFY int magnitude; diff --git a/src/field_10x26_impl.h b/src/field_10x26_impl.h index 47e7b0d..c0f1be0 100644 --- a/src/field_10x26_impl.h +++ b/src/field_10x26_impl.h @@ -1,6 +1,8 @@ -// Copyright (c) 2013 Pieter Wuille -// Distributed under the MIT/X11 software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. +/********************************************************************** + * Copyright (c) 2013, 2014 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ #ifndef _SECP256K1_FIELD_REPR_IMPL_H_ #define _SECP256K1_FIELD_REPR_IMPL_H_ @@ -50,11 +52,11 @@ static void secp256k1_fe_normalize(secp256k1_fe_t *r) { uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4], t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9]; - // Reduce t9 at the start so there will be at most a single carry from the first pass + /* Reduce t9 at the start so there will be at most a single carry from the first pass */ uint32_t x = t9 >> 22; t9 &= 0x03FFFFFUL; uint32_t m; - // The first pass ensures the magnitude is 1, ... + /* The first pass ensures the magnitude is 1, ... */ t0 += x * 0x3D1UL; t1 += (x << 6); t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL; t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL; @@ -66,14 +68,14 @@ static void secp256k1_fe_normalize(secp256k1_fe_t *r) { t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL; m &= t7; t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL; m &= t8; - // ... except for a possible carry at bit 22 of t9 (i.e. bit 256 of the field element) + /* ... except for a possible carry at bit 22 of t9 (i.e. bit 256 of the field element) */ VERIFY_CHECK(t9 >> 23 == 0); - // At most a single final reduction is needed; check if the value is >= the field characteristic + /* At most a single final reduction is needed; check if the value is >= the field characteristic */ x = (t9 >> 22) | ((t9 == 0x03FFFFFUL) & (m == 0x3FFFFFFUL) & ((t1 + 0x40UL + ((t0 + 0x3D1UL) >> 26)) > 0x3FFFFFFUL)); - // Apply the final reduction (for constant-time behaviour, we do it always) + /* Apply the final reduction (for constant-time behaviour, we do it always) */ t0 += x * 0x3D1UL; t1 += (x << 6); t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL; t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL; @@ -85,10 +87,10 @@ static void secp256k1_fe_normalize(secp256k1_fe_t *r) { t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL; t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL; - // If t9 didn't carry to bit 22 already, then it should have after any final reduction + /* If t9 didn't carry to bit 22 already, then it should have after any final reduction */ VERIFY_CHECK(t9 >> 22 == x); - // Mask off the possible multiple of 2^256 from the final reduction + /* Mask off the possible multiple of 2^256 from the final reduction */ t9 &= 0x03FFFFFUL; r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4; @@ -274,9 +276,10 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(const uint32_t *a, const uin VERIFY_BITS(b[9], 26); const uint32_t M = 0x3FFFFFFUL, R0 = 0x3D10UL, R1 = 0x400UL; - // [... a b c] is a shorthand for ... + a<<52 + b<<26 + c<<0 mod n. - // px is a shorthand for sum(a[i]*b[x-i], i=0..x). - // Note that [x 0 0 0 0 0 0 0 0 0 0] = [x*R1 x*R0]. + /** [... a b c] is a shorthand for ... + a<<52 + b<<26 + c<<0 mod n. + * px is a shorthand for sum(a[i]*b[x-i], i=0..x). + * Note that [x 0 0 0 0 0 0 0 0 0 0] = [x*R1 x*R0]. + */ uint64_t c, d; @@ -290,16 +293,16 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(const uint32_t *a, const uin + (uint64_t)a[7] * b[2] + (uint64_t)a[8] * b[1] + (uint64_t)a[9] * b[0]; - // VERIFY_BITS(d, 64); - // [d 0 0 0 0 0 0 0 0 0] = [p9 0 0 0 0 0 0 0 0 0] + /* VERIFY_BITS(d, 64); */ + /* [d 0 0 0 0 0 0 0 0 0] = [p9 0 0 0 0 0 0 0 0 0] */ uint32_t t9 = d & M; d >>= 26; VERIFY_BITS(t9, 26); VERIFY_BITS(d, 38); - // [d t9 0 0 0 0 0 0 0 0 0] = [p9 0 0 0 0 0 0 0 0 0] + /* [d t9 0 0 0 0 0 0 0 0 0] = [p9 0 0 0 0 0 0 0 0 0] */ c = (uint64_t)a[0] * b[0]; VERIFY_BITS(c, 60); - // [d t9 0 0 0 0 0 0 0 0 c] = [p9 0 0 0 0 0 0 0 0 p0] + /* [d t9 0 0 0 0 0 0 0 0 c] = [p9 0 0 0 0 0 0 0 0 p0] */ d += (uint64_t)a[1] * b[9] + (uint64_t)a[2] * b[8] + (uint64_t)a[3] * b[7] @@ -310,22 +313,22 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(const uint32_t *a, const uin + (uint64_t)a[8] * b[2] + (uint64_t)a[9] * b[1]; VERIFY_BITS(d, 63); - // [d t9 0 0 0 0 0 0 0 0 c] = [p10 p9 0 0 0 0 0 0 0 0 p0] + /* [d t9 0 0 0 0 0 0 0 0 c] = [p10 p9 0 0 0 0 0 0 0 0 p0] */ uint64_t u0 = d & M; d >>= 26; c += u0 * R0; VERIFY_BITS(u0, 26); VERIFY_BITS(d, 37); VERIFY_BITS(c, 61); - // [d u0 t9 0 0 0 0 0 0 0 0 c-u0*R0] = [p10 p9 0 0 0 0 0 0 0 0 p0] + /* [d u0 t9 0 0 0 0 0 0 0 0 c-u0*R0] = [p10 p9 0 0 0 0 0 0 0 0 p0] */ uint32_t t0 = c & M; c >>= 26; c += u0 * R1; VERIFY_BITS(t0, 26); VERIFY_BITS(c, 37); - // [d u0 t9 0 0 0 0 0 0 0 c-u0*R1 t0-u0*R0] = [p10 p9 0 0 0 0 0 0 0 0 p0] - // [d 0 t9 0 0 0 0 0 0 0 c t0] = [p10 p9 0 0 0 0 0 0 0 0 p0] + /* [d u0 t9 0 0 0 0 0 0 0 c-u0*R1 t0-u0*R0] = [p10 p9 0 0 0 0 0 0 0 0 p0] */ + /* [d 0 t9 0 0 0 0 0 0 0 c t0] = [p10 p9 0 0 0 0 0 0 0 0 p0] */ c += (uint64_t)a[0] * b[1] + (uint64_t)a[1] * b[0]; VERIFY_BITS(c, 62); - // [d 0 t9 0 0 0 0 0 0 0 c t0] = [p10 p9 0 0 0 0 0 0 0 p1 p0] + /* [d 0 t9 0 0 0 0 0 0 0 c t0] = [p10 p9 0 0 0 0 0 0 0 p1 p0] */ d += (uint64_t)a[2] * b[9] + (uint64_t)a[3] * b[8] + (uint64_t)a[4] * b[7] @@ -335,23 +338,23 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(const uint32_t *a, const uin + (uint64_t)a[8] * b[3] + (uint64_t)a[9] * b[2]; VERIFY_BITS(d, 63); - // [d 0 t9 0 0 0 0 0 0 0 c t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] + /* [d 0 t9 0 0 0 0 0 0 0 c t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] */ uint64_t u1 = d & M; d >>= 26; c += u1 * R0; VERIFY_BITS(u1, 26); VERIFY_BITS(d, 37); VERIFY_BITS(c, 63); - // [d u1 0 t9 0 0 0 0 0 0 0 c-u1*R0 t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] + /* [d u1 0 t9 0 0 0 0 0 0 0 c-u1*R0 t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] */ uint32_t t1 = c & M; c >>= 26; c += u1 * R1; VERIFY_BITS(t1, 26); VERIFY_BITS(c, 38); - // [d u1 0 t9 0 0 0 0 0 0 c-u1*R1 t1-u1*R0 t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] - // [d 0 0 t9 0 0 0 0 0 0 c t1 t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] + /* [d u1 0 t9 0 0 0 0 0 0 c-u1*R1 t1-u1*R0 t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] */ + /* [d 0 0 t9 0 0 0 0 0 0 c t1 t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] */ c += (uint64_t)a[0] * b[2] + (uint64_t)a[1] * b[1] + (uint64_t)a[2] * b[0]; VERIFY_BITS(c, 62); - // [d 0 0 t9 0 0 0 0 0 0 c t1 t0] = [p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] + /* [d 0 0 t9 0 0 0 0 0 0 c t1 t0] = [p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */ d += (uint64_t)a[3] * b[9] + (uint64_t)a[4] * b[8] + (uint64_t)a[5] * b[7] @@ -360,24 +363,24 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(const uint32_t *a, const uin + (uint64_t)a[8] * b[4] + (uint64_t)a[9] * b[3]; VERIFY_BITS(d, 63); - // [d 0 0 t9 0 0 0 0 0 0 c t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] + /* [d 0 0 t9 0 0 0 0 0 0 c t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */ uint64_t u2 = d & M; d >>= 26; c += u2 * R0; VERIFY_BITS(u2, 26); VERIFY_BITS(d, 37); VERIFY_BITS(c, 63); - // [d u2 0 0 t9 0 0 0 0 0 0 c-u2*R0 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] + /* [d u2 0 0 t9 0 0 0 0 0 0 c-u2*R0 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */ uint32_t t2 = c & M; c >>= 26; c += u2 * R1; VERIFY_BITS(t2, 26); VERIFY_BITS(c, 38); - // [d u2 0 0 t9 0 0 0 0 0 c-u2*R1 t2-u2*R0 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] - // [d 0 0 0 t9 0 0 0 0 0 c t2 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] + /* [d u2 0 0 t9 0 0 0 0 0 c-u2*R1 t2-u2*R0 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */ + /* [d 0 0 0 t9 0 0 0 0 0 c t2 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */ c += (uint64_t)a[0] * b[3] + (uint64_t)a[1] * b[2] + (uint64_t)a[2] * b[1] + (uint64_t)a[3] * b[0]; VERIFY_BITS(c, 63); - // [d 0 0 0 t9 0 0 0 0 0 c t2 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] + /* [d 0 0 0 t9 0 0 0 0 0 c t2 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */ d += (uint64_t)a[4] * b[9] + (uint64_t)a[5] * b[8] + (uint64_t)a[6] * b[7] @@ -385,17 +388,17 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(const uint32_t *a, const uin + (uint64_t)a[8] * b[5] + (uint64_t)a[9] * b[4]; VERIFY_BITS(d, 63); - // [d 0 0 0 t9 0 0 0 0 0 c t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] + /* [d 0 0 0 t9 0 0 0 0 0 c t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */ uint64_t u3 = d & M; d >>= 26; c += u3 * R0; VERIFY_BITS(u3, 26); VERIFY_BITS(d, 37); - // VERIFY_BITS(c, 64); - // [d u3 0 0 0 t9 0 0 0 0 0 c-u3*R0 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] + /* VERIFY_BITS(c, 64); */ + /* [d u3 0 0 0 t9 0 0 0 0 0 c-u3*R0 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */ uint32_t t3 = c & M; c >>= 26; c += u3 * R1; VERIFY_BITS(t3, 26); VERIFY_BITS(c, 39); - // [d u3 0 0 0 t9 0 0 0 0 c-u3*R1 t3-u3*R0 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] - // [d 0 0 0 0 t9 0 0 0 0 c t3 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] + /* [d u3 0 0 0 t9 0 0 0 0 c-u3*R1 t3-u3*R0 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */ + /* [d 0 0 0 0 t9 0 0 0 0 c t3 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */ c += (uint64_t)a[0] * b[4] + (uint64_t)a[1] * b[3] @@ -403,24 +406,24 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(const uint32_t *a, const uin + (uint64_t)a[3] * b[1] + (uint64_t)a[4] * b[0]; VERIFY_BITS(c, 63); - // [d 0 0 0 0 t9 0 0 0 0 c t3 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] + /* [d 0 0 0 0 t9 0 0 0 0 c t3 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */ d += (uint64_t)a[5] * b[9] + (uint64_t)a[6] * b[8] + (uint64_t)a[7] * b[7] + (uint64_t)a[8] * b[6] + (uint64_t)a[9] * b[5]; VERIFY_BITS(d, 62); - // [d 0 0 0 0 t9 0 0 0 0 c t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] + /* [d 0 0 0 0 t9 0 0 0 0 c t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */ uint64_t u4 = d & M; d >>= 26; c += u4 * R0; VERIFY_BITS(u4, 26); VERIFY_BITS(d, 36); - // VERIFY_BITS(c, 64); - // [d u4 0 0 0 0 t9 0 0 0 0 c-u4*R0 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] + /* VERIFY_BITS(c, 64); */ + /* [d u4 0 0 0 0 t9 0 0 0 0 c-u4*R0 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */ uint32_t t4 = c & M; c >>= 26; c += u4 * R1; VERIFY_BITS(t4, 26); VERIFY_BITS(c, 39); - // [d u4 0 0 0 0 t9 0 0 0 c-u4*R1 t4-u4*R0 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] - // [d 0 0 0 0 0 t9 0 0 0 c t4 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] + /* [d u4 0 0 0 0 t9 0 0 0 c-u4*R1 t4-u4*R0 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */ + /* [d 0 0 0 0 0 t9 0 0 0 c t4 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */ c += (uint64_t)a[0] * b[5] + (uint64_t)a[1] * b[4] @@ -429,23 +432,23 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(const uint32_t *a, const uin + (uint64_t)a[4] * b[1] + (uint64_t)a[5] * b[0]; VERIFY_BITS(c, 63); - // [d 0 0 0 0 0 t9 0 0 0 c t4 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] + /* [d 0 0 0 0 0 t9 0 0 0 c t4 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */ d += (uint64_t)a[6] * b[9] + (uint64_t)a[7] * b[8] + (uint64_t)a[8] * b[7] + (uint64_t)a[9] * b[6]; VERIFY_BITS(d, 62); - // [d 0 0 0 0 0 t9 0 0 0 c t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] + /* [d 0 0 0 0 0 t9 0 0 0 c t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */ uint64_t u5 = d & M; d >>= 26; c += u5 * R0; VERIFY_BITS(u5, 26); VERIFY_BITS(d, 36); - // VERIFY_BITS(c, 64); - // [d u5 0 0 0 0 0 t9 0 0 0 c-u5*R0 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] + /* VERIFY_BITS(c, 64); */ + /* [d u5 0 0 0 0 0 t9 0 0 0 c-u5*R0 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */ uint32_t t5 = c & M; c >>= 26; c += u5 * R1; VERIFY_BITS(t5, 26); VERIFY_BITS(c, 39); - // [d u5 0 0 0 0 0 t9 0 0 c-u5*R1 t5-u5*R0 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] - // [d 0 0 0 0 0 0 t9 0 0 c t5 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] + /* [d u5 0 0 0 0 0 t9 0 0 c-u5*R1 t5-u5*R0 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */ + /* [d 0 0 0 0 0 0 t9 0 0 c t5 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */ c += (uint64_t)a[0] * b[6] + (uint64_t)a[1] * b[5] @@ -455,22 +458,22 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(const uint32_t *a, const uin + (uint64_t)a[5] * b[1] + (uint64_t)a[6] * b[0]; VERIFY_BITS(c, 63); - // [d 0 0 0 0 0 0 t9 0 0 c t5 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] + /* [d 0 0 0 0 0 0 t9 0 0 c t5 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */ d += (uint64_t)a[7] * b[9] + (uint64_t)a[8] * b[8] + (uint64_t)a[9] * b[7]; VERIFY_BITS(d, 61); - // [d 0 0 0 0 0 0 t9 0 0 c t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] + /* [d 0 0 0 0 0 0 t9 0 0 c t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */ uint64_t u6 = d & M; d >>= 26; c += u6 * R0; VERIFY_BITS(u6, 26); VERIFY_BITS(d, 35); - // VERIFY_BITS(c, 64); - // [d u6 0 0 0 0 0 0 t9 0 0 c-u6*R0 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] + /* VERIFY_BITS(c, 64); */ + /* [d u6 0 0 0 0 0 0 t9 0 0 c-u6*R0 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */ uint32_t t6 = c & M; c >>= 26; c += u6 * R1; VERIFY_BITS(t6, 26); VERIFY_BITS(c, 39); - // [d u6 0 0 0 0 0 0 t9 0 c-u6*R1 t6-u6*R0 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] - // [d 0 0 0 0 0 0 0 t9 0 c t6 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] + /* [d u6 0 0 0 0 0 0 t9 0 c-u6*R1 t6-u6*R0 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */ + /* [d 0 0 0 0 0 0 0 t9 0 c t6 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */ c += (uint64_t)a[0] * b[7] + (uint64_t)a[1] * b[6] @@ -480,24 +483,24 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(const uint32_t *a, const uin + (uint64_t)a[5] * b[2] + (uint64_t)a[6] * b[1] + (uint64_t)a[7] * b[0]; - // VERIFY_BITS(c, 64); + /* VERIFY_BITS(c, 64); */ VERIFY_CHECK(c <= 0x8000007C00000007ULL); - // [d 0 0 0 0 0 0 0 t9 0 c t6 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] + /* [d 0 0 0 0 0 0 0 t9 0 c t6 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */ d += (uint64_t)a[8] * b[9] + (uint64_t)a[9] * b[8]; VERIFY_BITS(d, 58); - // [d 0 0 0 0 0 0 0 t9 0 c t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] + /* [d 0 0 0 0 0 0 0 t9 0 c t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */ uint64_t u7 = d & M; d >>= 26; c += u7 * R0; VERIFY_BITS(u7, 26); VERIFY_BITS(d, 32); - // VERIFY_BITS(c, 64); + /* VERIFY_BITS(c, 64); */ VERIFY_CHECK(c <= 0x800001703FFFC2F7ULL); - // [d u7 0 0 0 0 0 0 0 t9 0 c-u7*R0 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] + /* [d u7 0 0 0 0 0 0 0 t9 0 c-u7*R0 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */ uint32_t t7 = c & M; c >>= 26; c += u7 * R1; VERIFY_BITS(t7, 26); VERIFY_BITS(c, 38); - // [d u7 0 0 0 0 0 0 0 t9 c-u7*R1 t7-u7*R0 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] - // [d 0 0 0 0 0 0 0 0 t9 c t7 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] + /* [d u7 0 0 0 0 0 0 0 t9 c-u7*R1 t7-u7*R0 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */ + /* [d 0 0 0 0 0 0 0 0 t9 c t7 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */ c += (uint64_t)a[0] * b[8] + (uint64_t)a[1] * b[7] @@ -508,73 +511,73 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(const uint32_t *a, const uin + (uint64_t)a[6] * b[2] + (uint64_t)a[7] * b[1] + (uint64_t)a[8] * b[0]; - // VERIFY_BITS(c, 64); + /* VERIFY_BITS(c, 64); */ VERIFY_CHECK(c <= 0x9000007B80000008ULL); - // [d 0 0 0 0 0 0 0 0 t9 c t7 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [d 0 0 0 0 0 0 0 0 t9 c t7 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ d += (uint64_t)a[9] * b[9]; VERIFY_BITS(d, 57); - // [d 0 0 0 0 0 0 0 0 t9 c t7 t6 t5 t4 t3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [d 0 0 0 0 0 0 0 0 t9 c t7 t6 t5 t4 t3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ uint64_t u8 = d & M; d >>= 26; c += u8 * R0; VERIFY_BITS(u8, 26); VERIFY_BITS(d, 31); - // VERIFY_BITS(c, 64); + /* VERIFY_BITS(c, 64); */ VERIFY_CHECK(c <= 0x9000016FBFFFC2F8ULL); - // [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 t5 t4 t3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 t5 t4 t3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ r[3] = t3; VERIFY_BITS(r[3], 26); - // [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 t5 t4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 t5 t4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ r[4] = t4; VERIFY_BITS(r[4], 26); - // [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 t5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 t5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ r[5] = t5; VERIFY_BITS(r[5], 26); - // [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ r[6] = t6; VERIFY_BITS(r[6], 26); - // [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ r[7] = t7; VERIFY_BITS(r[7], 26); - // [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ r[8] = c & M; c >>= 26; c += u8 * R1; VERIFY_BITS(r[8], 26); VERIFY_BITS(c, 39); - // [d u8 0 0 0 0 0 0 0 0 t9+c-u8*R1 r8-u8*R0 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] - // [d 0 0 0 0 0 0 0 0 0 t9+c r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [d u8 0 0 0 0 0 0 0 0 t9+c-u8*R1 r8-u8*R0 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ + /* [d 0 0 0 0 0 0 0 0 0 t9+c r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ c += d * R0 + t9; VERIFY_BITS(c, 45); - // [d 0 0 0 0 0 0 0 0 0 c-d*R0 r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [d 0 0 0 0 0 0 0 0 0 c-d*R0 r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ r[9] = c & (M >> 4); c >>= 22; c += d * (R1 << 4); VERIFY_BITS(r[9], 22); VERIFY_BITS(c, 46); - // [d 0 0 0 0 0 0 0 0 r9+((c-d*R1<<4)<<22)-d*R0 r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] - // [d 0 0 0 0 0 0 0 -d*R1 r9+(c<<22)-d*R0 r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] - // [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [d 0 0 0 0 0 0 0 0 r9+((c-d*R1<<4)<<22)-d*R0 r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ + /* [d 0 0 0 0 0 0 0 -d*R1 r9+(c<<22)-d*R0 r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ + /* [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ d = c * (R0 >> 4) + t0; VERIFY_BITS(d, 56); - // [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 t1 d-c*R0>>4] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 t1 d-c*R0>>4] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ r[0] = d & M; d >>= 26; VERIFY_BITS(r[0], 26); VERIFY_BITS(d, 30); - // [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 t1+d r0-c*R0>>4] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 t1+d r0-c*R0>>4] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ d += c * (R1 >> 4) + t1; VERIFY_BITS(d, 53); VERIFY_CHECK(d <= 0x10000003FFFFBFULL); - // [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 d-c*R1>>4 r0-c*R0>>4] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] - // [r9 r8 r7 r6 r5 r4 r3 t2 d r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 d-c*R1>>4 r0-c*R0>>4] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ + /* [r9 r8 r7 r6 r5 r4 r3 t2 d r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ r[1] = d & M; d >>= 26; VERIFY_BITS(r[1], 26); VERIFY_BITS(d, 27); VERIFY_CHECK(d <= 0x4000000ULL); - // [r9 r8 r7 r6 r5 r4 r3 t2+d r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [r9 r8 r7 r6 r5 r4 r3 t2+d r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ d += t2; VERIFY_BITS(d, 27); - // [r9 r8 r7 r6 r5 r4 r3 d r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [r9 r8 r7 r6 r5 r4 r3 d r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ r[2] = d; VERIFY_BITS(r[2], 27); - // [r9 r8 r7 r6 r5 r4 r3 r2 r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [r9 r8 r7 r6 r5 r4 r3 r2 r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ } SECP256K1_INLINE static void secp256k1_fe_sqr_inner(const uint32_t *a, uint32_t *r) { @@ -590,9 +593,10 @@ SECP256K1_INLINE static void secp256k1_fe_sqr_inner(const uint32_t *a, uint32_t VERIFY_BITS(a[9], 26); const uint32_t M = 0x3FFFFFFUL, R0 = 0x3D10UL, R1 = 0x400UL; - // [... a b c] is a shorthand for ... + a<<52 + b<<26 + c<<0 mod n. - // px is a shorthand for sum(a[i]*a[x-i], i=0..x). - // Note that [x 0 0 0 0 0 0 0 0 0 0] = [x*R1 x*R0]. + /** [... a b c] is a shorthand for ... + a<<52 + b<<26 + c<<0 mod n. + * px is a shorthand for sum(a[i]*a[x-i], i=0..x). + * Note that [x 0 0 0 0 0 0 0 0 0 0] = [x*R1 x*R0]. + */ uint64_t c, d; @@ -601,251 +605,251 @@ SECP256K1_INLINE static void secp256k1_fe_sqr_inner(const uint32_t *a, uint32_t + (uint64_t)(a[2]*2) * a[7] + (uint64_t)(a[3]*2) * a[6] + (uint64_t)(a[4]*2) * a[5]; - // VERIFY_BITS(d, 64); - // [d 0 0 0 0 0 0 0 0 0] = [p9 0 0 0 0 0 0 0 0 0] + /* VERIFY_BITS(d, 64); */ + /* [d 0 0 0 0 0 0 0 0 0] = [p9 0 0 0 0 0 0 0 0 0] */ uint32_t t9 = d & M; d >>= 26; VERIFY_BITS(t9, 26); VERIFY_BITS(d, 38); - // [d t9 0 0 0 0 0 0 0 0 0] = [p9 0 0 0 0 0 0 0 0 0] + /* [d t9 0 0 0 0 0 0 0 0 0] = [p9 0 0 0 0 0 0 0 0 0] */ c = (uint64_t)a[0] * a[0]; VERIFY_BITS(c, 60); - // [d t9 0 0 0 0 0 0 0 0 c] = [p9 0 0 0 0 0 0 0 0 p0] + /* [d t9 0 0 0 0 0 0 0 0 c] = [p9 0 0 0 0 0 0 0 0 p0] */ d += (uint64_t)(a[1]*2) * a[9] + (uint64_t)(a[2]*2) * a[8] + (uint64_t)(a[3]*2) * a[7] + (uint64_t)(a[4]*2) * a[6] + (uint64_t)a[5] * a[5]; VERIFY_BITS(d, 63); - // [d t9 0 0 0 0 0 0 0 0 c] = [p10 p9 0 0 0 0 0 0 0 0 p0] + /* [d t9 0 0 0 0 0 0 0 0 c] = [p10 p9 0 0 0 0 0 0 0 0 p0] */ uint64_t u0 = d & M; d >>= 26; c += u0 * R0; VERIFY_BITS(u0, 26); VERIFY_BITS(d, 37); VERIFY_BITS(c, 61); - // [d u0 t9 0 0 0 0 0 0 0 0 c-u0*R0] = [p10 p9 0 0 0 0 0 0 0 0 p0] + /* [d u0 t9 0 0 0 0 0 0 0 0 c-u0*R0] = [p10 p9 0 0 0 0 0 0 0 0 p0] */ uint32_t t0 = c & M; c >>= 26; c += u0 * R1; VERIFY_BITS(t0, 26); VERIFY_BITS(c, 37); - // [d u0 t9 0 0 0 0 0 0 0 c-u0*R1 t0-u0*R0] = [p10 p9 0 0 0 0 0 0 0 0 p0] - // [d 0 t9 0 0 0 0 0 0 0 c t0] = [p10 p9 0 0 0 0 0 0 0 0 p0] + /* [d u0 t9 0 0 0 0 0 0 0 c-u0*R1 t0-u0*R0] = [p10 p9 0 0 0 0 0 0 0 0 p0] */ + /* [d 0 t9 0 0 0 0 0 0 0 c t0] = [p10 p9 0 0 0 0 0 0 0 0 p0] */ c += (uint64_t)(a[0]*2) * a[1]; VERIFY_BITS(c, 62); - // [d 0 t9 0 0 0 0 0 0 0 c t0] = [p10 p9 0 0 0 0 0 0 0 p1 p0] + /* [d 0 t9 0 0 0 0 0 0 0 c t0] = [p10 p9 0 0 0 0 0 0 0 p1 p0] */ d += (uint64_t)(a[2]*2) * a[9] + (uint64_t)(a[3]*2) * a[8] + (uint64_t)(a[4]*2) * a[7] + (uint64_t)(a[5]*2) * a[6]; VERIFY_BITS(d, 63); - // [d 0 t9 0 0 0 0 0 0 0 c t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] + /* [d 0 t9 0 0 0 0 0 0 0 c t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] */ uint64_t u1 = d & M; d >>= 26; c += u1 * R0; VERIFY_BITS(u1, 26); VERIFY_BITS(d, 37); VERIFY_BITS(c, 63); - // [d u1 0 t9 0 0 0 0 0 0 0 c-u1*R0 t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] + /* [d u1 0 t9 0 0 0 0 0 0 0 c-u1*R0 t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] */ uint32_t t1 = c & M; c >>= 26; c += u1 * R1; VERIFY_BITS(t1, 26); VERIFY_BITS(c, 38); - // [d u1 0 t9 0 0 0 0 0 0 c-u1*R1 t1-u1*R0 t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] - // [d 0 0 t9 0 0 0 0 0 0 c t1 t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] + /* [d u1 0 t9 0 0 0 0 0 0 c-u1*R1 t1-u1*R0 t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] */ + /* [d 0 0 t9 0 0 0 0 0 0 c t1 t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] */ c += (uint64_t)(a[0]*2) * a[2] + (uint64_t)a[1] * a[1]; VERIFY_BITS(c, 62); - // [d 0 0 t9 0 0 0 0 0 0 c t1 t0] = [p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] + /* [d 0 0 t9 0 0 0 0 0 0 c t1 t0] = [p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */ d += (uint64_t)(a[3]*2) * a[9] + (uint64_t)(a[4]*2) * a[8] + (uint64_t)(a[5]*2) * a[7] + (uint64_t)a[6] * a[6]; VERIFY_BITS(d, 63); - // [d 0 0 t9 0 0 0 0 0 0 c t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] + /* [d 0 0 t9 0 0 0 0 0 0 c t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */ uint64_t u2 = d & M; d >>= 26; c += u2 * R0; VERIFY_BITS(u2, 26); VERIFY_BITS(d, 37); VERIFY_BITS(c, 63); - // [d u2 0 0 t9 0 0 0 0 0 0 c-u2*R0 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] + /* [d u2 0 0 t9 0 0 0 0 0 0 c-u2*R0 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */ uint32_t t2 = c & M; c >>= 26; c += u2 * R1; VERIFY_BITS(t2, 26); VERIFY_BITS(c, 38); - // [d u2 0 0 t9 0 0 0 0 0 c-u2*R1 t2-u2*R0 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] - // [d 0 0 0 t9 0 0 0 0 0 c t2 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] + /* [d u2 0 0 t9 0 0 0 0 0 c-u2*R1 t2-u2*R0 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */ + /* [d 0 0 0 t9 0 0 0 0 0 c t2 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */ c += (uint64_t)(a[0]*2) * a[3] + (uint64_t)(a[1]*2) * a[2]; VERIFY_BITS(c, 63); - // [d 0 0 0 t9 0 0 0 0 0 c t2 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] + /* [d 0 0 0 t9 0 0 0 0 0 c t2 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */ d += (uint64_t)(a[4]*2) * a[9] + (uint64_t)(a[5]*2) * a[8] + (uint64_t)(a[6]*2) * a[7]; VERIFY_BITS(d, 63); - // [d 0 0 0 t9 0 0 0 0 0 c t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] + /* [d 0 0 0 t9 0 0 0 0 0 c t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */ uint64_t u3 = d & M; d >>= 26; c += u3 * R0; VERIFY_BITS(u3, 26); VERIFY_BITS(d, 37); - // VERIFY_BITS(c, 64); - // [d u3 0 0 0 t9 0 0 0 0 0 c-u3*R0 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] + /* VERIFY_BITS(c, 64); */ + /* [d u3 0 0 0 t9 0 0 0 0 0 c-u3*R0 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */ uint32_t t3 = c & M; c >>= 26; c += u3 * R1; VERIFY_BITS(t3, 26); VERIFY_BITS(c, 39); - // [d u3 0 0 0 t9 0 0 0 0 c-u3*R1 t3-u3*R0 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] - // [d 0 0 0 0 t9 0 0 0 0 c t3 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] + /* [d u3 0 0 0 t9 0 0 0 0 c-u3*R1 t3-u3*R0 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */ + /* [d 0 0 0 0 t9 0 0 0 0 c t3 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */ c += (uint64_t)(a[0]*2) * a[4] + (uint64_t)(a[1]*2) * a[3] + (uint64_t)a[2] * a[2]; VERIFY_BITS(c, 63); - // [d 0 0 0 0 t9 0 0 0 0 c t3 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] + /* [d 0 0 0 0 t9 0 0 0 0 c t3 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */ d += (uint64_t)(a[5]*2) * a[9] + (uint64_t)(a[6]*2) * a[8] + (uint64_t)a[7] * a[7]; VERIFY_BITS(d, 62); - // [d 0 0 0 0 t9 0 0 0 0 c t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] + /* [d 0 0 0 0 t9 0 0 0 0 c t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */ uint64_t u4 = d & M; d >>= 26; c += u4 * R0; VERIFY_BITS(u4, 26); VERIFY_BITS(d, 36); - // VERIFY_BITS(c, 64); - // [d u4 0 0 0 0 t9 0 0 0 0 c-u4*R0 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] + /* VERIFY_BITS(c, 64); */ + /* [d u4 0 0 0 0 t9 0 0 0 0 c-u4*R0 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */ uint32_t t4 = c & M; c >>= 26; c += u4 * R1; VERIFY_BITS(t4, 26); VERIFY_BITS(c, 39); - // [d u4 0 0 0 0 t9 0 0 0 c-u4*R1 t4-u4*R0 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] - // [d 0 0 0 0 0 t9 0 0 0 c t4 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] + /* [d u4 0 0 0 0 t9 0 0 0 c-u4*R1 t4-u4*R0 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */ + /* [d 0 0 0 0 0 t9 0 0 0 c t4 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */ c += (uint64_t)(a[0]*2) * a[5] + (uint64_t)(a[1]*2) * a[4] + (uint64_t)(a[2]*2) * a[3]; VERIFY_BITS(c, 63); - // [d 0 0 0 0 0 t9 0 0 0 c t4 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] + /* [d 0 0 0 0 0 t9 0 0 0 c t4 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */ d += (uint64_t)(a[6]*2) * a[9] + (uint64_t)(a[7]*2) * a[8]; VERIFY_BITS(d, 62); - // [d 0 0 0 0 0 t9 0 0 0 c t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] + /* [d 0 0 0 0 0 t9 0 0 0 c t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */ uint64_t u5 = d & M; d >>= 26; c += u5 * R0; VERIFY_BITS(u5, 26); VERIFY_BITS(d, 36); - // VERIFY_BITS(c, 64); - // [d u5 0 0 0 0 0 t9 0 0 0 c-u5*R0 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] + /* VERIFY_BITS(c, 64); */ + /* [d u5 0 0 0 0 0 t9 0 0 0 c-u5*R0 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */ uint32_t t5 = c & M; c >>= 26; c += u5 * R1; VERIFY_BITS(t5, 26); VERIFY_BITS(c, 39); - // [d u5 0 0 0 0 0 t9 0 0 c-u5*R1 t5-u5*R0 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] - // [d 0 0 0 0 0 0 t9 0 0 c t5 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] + /* [d u5 0 0 0 0 0 t9 0 0 c-u5*R1 t5-u5*R0 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */ + /* [d 0 0 0 0 0 0 t9 0 0 c t5 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */ c += (uint64_t)(a[0]*2) * a[6] + (uint64_t)(a[1]*2) * a[5] + (uint64_t)(a[2]*2) * a[4] + (uint64_t)a[3] * a[3]; VERIFY_BITS(c, 63); - // [d 0 0 0 0 0 0 t9 0 0 c t5 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] + /* [d 0 0 0 0 0 0 t9 0 0 c t5 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */ d += (uint64_t)(a[7]*2) * a[9] + (uint64_t)a[8] * a[8]; VERIFY_BITS(d, 61); - // [d 0 0 0 0 0 0 t9 0 0 c t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] + /* [d 0 0 0 0 0 0 t9 0 0 c t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */ uint64_t u6 = d & M; d >>= 26; c += u6 * R0; VERIFY_BITS(u6, 26); VERIFY_BITS(d, 35); - // VERIFY_BITS(c, 64); - // [d u6 0 0 0 0 0 0 t9 0 0 c-u6*R0 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] + /* VERIFY_BITS(c, 64); */ + /* [d u6 0 0 0 0 0 0 t9 0 0 c-u6*R0 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */ uint32_t t6 = c & M; c >>= 26; c += u6 * R1; VERIFY_BITS(t6, 26); VERIFY_BITS(c, 39); - // [d u6 0 0 0 0 0 0 t9 0 c-u6*R1 t6-u6*R0 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] - // [d 0 0 0 0 0 0 0 t9 0 c t6 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] + /* [d u6 0 0 0 0 0 0 t9 0 c-u6*R1 t6-u6*R0 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */ + /* [d 0 0 0 0 0 0 0 t9 0 c t6 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */ c += (uint64_t)(a[0]*2) * a[7] + (uint64_t)(a[1]*2) * a[6] + (uint64_t)(a[2]*2) * a[5] + (uint64_t)(a[3]*2) * a[4]; - // VERIFY_BITS(c, 64); + /* VERIFY_BITS(c, 64); */ VERIFY_CHECK(c <= 0x8000007C00000007ULL); - // [d 0 0 0 0 0 0 0 t9 0 c t6 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] + /* [d 0 0 0 0 0 0 0 t9 0 c t6 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */ d += (uint64_t)(a[8]*2) * a[9]; VERIFY_BITS(d, 58); - // [d 0 0 0 0 0 0 0 t9 0 c t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] + /* [d 0 0 0 0 0 0 0 t9 0 c t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */ uint64_t u7 = d & M; d >>= 26; c += u7 * R0; VERIFY_BITS(u7, 26); VERIFY_BITS(d, 32); - // VERIFY_BITS(c, 64); + /* VERIFY_BITS(c, 64); */ VERIFY_CHECK(c <= 0x800001703FFFC2F7ULL); - // [d u7 0 0 0 0 0 0 0 t9 0 c-u7*R0 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] + /* [d u7 0 0 0 0 0 0 0 t9 0 c-u7*R0 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */ uint32_t t7 = c & M; c >>= 26; c += u7 * R1; VERIFY_BITS(t7, 26); VERIFY_BITS(c, 38); - // [d u7 0 0 0 0 0 0 0 t9 c-u7*R1 t7-u7*R0 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] - // [d 0 0 0 0 0 0 0 0 t9 c t7 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] + /* [d u7 0 0 0 0 0 0 0 t9 c-u7*R1 t7-u7*R0 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */ + /* [d 0 0 0 0 0 0 0 0 t9 c t7 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */ c += (uint64_t)(a[0]*2) * a[8] + (uint64_t)(a[1]*2) * a[7] + (uint64_t)(a[2]*2) * a[6] + (uint64_t)(a[3]*2) * a[5] + (uint64_t)a[4] * a[4]; - // VERIFY_BITS(c, 64); + /* VERIFY_BITS(c, 64); */ VERIFY_CHECK(c <= 0x9000007B80000008ULL); - // [d 0 0 0 0 0 0 0 0 t9 c t7 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [d 0 0 0 0 0 0 0 0 t9 c t7 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ d += (uint64_t)a[9] * a[9]; VERIFY_BITS(d, 57); - // [d 0 0 0 0 0 0 0 0 t9 c t7 t6 t5 t4 t3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [d 0 0 0 0 0 0 0 0 t9 c t7 t6 t5 t4 t3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ uint64_t u8 = d & M; d >>= 26; c += u8 * R0; VERIFY_BITS(u8, 26); VERIFY_BITS(d, 31); - // VERIFY_BITS(c, 64); + /* VERIFY_BITS(c, 64); */ VERIFY_CHECK(c <= 0x9000016FBFFFC2F8ULL); - // [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 t5 t4 t3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 t5 t4 t3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ r[3] = t3; VERIFY_BITS(r[3], 26); - // [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 t5 t4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 t5 t4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ r[4] = t4; VERIFY_BITS(r[4], 26); - // [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 t5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 t5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ r[5] = t5; VERIFY_BITS(r[5], 26); - // [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ r[6] = t6; VERIFY_BITS(r[6], 26); - // [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ r[7] = t7; VERIFY_BITS(r[7], 26); - // [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ r[8] = c & M; c >>= 26; c += u8 * R1; VERIFY_BITS(r[8], 26); VERIFY_BITS(c, 39); - // [d u8 0 0 0 0 0 0 0 0 t9+c-u8*R1 r8-u8*R0 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] - // [d 0 0 0 0 0 0 0 0 0 t9+c r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [d u8 0 0 0 0 0 0 0 0 t9+c-u8*R1 r8-u8*R0 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ + /* [d 0 0 0 0 0 0 0 0 0 t9+c r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ c += d * R0 + t9; VERIFY_BITS(c, 45); - // [d 0 0 0 0 0 0 0 0 0 c-d*R0 r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [d 0 0 0 0 0 0 0 0 0 c-d*R0 r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ r[9] = c & (M >> 4); c >>= 22; c += d * (R1 << 4); VERIFY_BITS(r[9], 22); VERIFY_BITS(c, 46); - // [d 0 0 0 0 0 0 0 0 r9+((c-d*R1<<4)<<22)-d*R0 r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] - // [d 0 0 0 0 0 0 0 -d*R1 r9+(c<<22)-d*R0 r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] - // [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [d 0 0 0 0 0 0 0 0 r9+((c-d*R1<<4)<<22)-d*R0 r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ + /* [d 0 0 0 0 0 0 0 -d*R1 r9+(c<<22)-d*R0 r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ + /* [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ d = c * (R0 >> 4) + t0; VERIFY_BITS(d, 56); - // [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 t1 d-c*R0>>4] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 t1 d-c*R0>>4] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ r[0] = d & M; d >>= 26; VERIFY_BITS(r[0], 26); VERIFY_BITS(d, 30); - // [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 t1+d r0-c*R0>>4] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 t1+d r0-c*R0>>4] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ d += c * (R1 >> 4) + t1; VERIFY_BITS(d, 53); VERIFY_CHECK(d <= 0x10000003FFFFBFULL); - // [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 d-c*R1>>4 r0-c*R0>>4] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] - // [r9 r8 r7 r6 r5 r4 r3 t2 d r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 d-c*R1>>4 r0-c*R0>>4] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ + /* [r9 r8 r7 r6 r5 r4 r3 t2 d r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ r[1] = d & M; d >>= 26; VERIFY_BITS(r[1], 26); VERIFY_BITS(d, 27); VERIFY_CHECK(d <= 0x4000000ULL); - // [r9 r8 r7 r6 r5 r4 r3 t2+d r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [r9 r8 r7 r6 r5 r4 r3 t2+d r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ d += t2; VERIFY_BITS(d, 27); - // [r9 r8 r7 r6 r5 r4 r3 d r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [r9 r8 r7 r6 r5 r4 r3 d r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ r[2] = d; VERIFY_BITS(r[2], 27); - // [r9 r8 r7 r6 r5 r4 r3 r2 r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [r9 r8 r7 r6 r5 r4 r3 r2 r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ } diff --git a/src/field_5x52.h b/src/field_5x52.h index 9d5de2c..aeb0a6a 100644 --- a/src/field_5x52.h +++ b/src/field_5x52.h @@ -1,6 +1,8 @@ -// Copyright (c) 2013 Pieter Wuille -// Distributed under the MIT/X11 software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. +/********************************************************************** + * Copyright (c) 2013, 2014 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ #ifndef _SECP256K1_FIELD_REPR_ #define _SECP256K1_FIELD_REPR_ @@ -8,7 +10,7 @@ #include typedef struct { - // X = sum(i=0..4, elem[i]*2^52) mod n + /* X = sum(i=0..4, elem[i]*2^52) mod n */ uint64_t n[5]; #ifdef VERIFY int magnitude; diff --git a/src/field_5x52_asm_impl.h b/src/field_5x52_asm_impl.h index 93c6ab6..f29605b 100644 --- a/src/field_5x52_asm_impl.h +++ b/src/field_5x52_asm_impl.h @@ -1,6 +1,8 @@ -// Copyright (c) 2013 Pieter Wuille -// Distributed under the MIT/X11 software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. +/********************************************************************** + * Copyright (c) 2013 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ #ifndef _SECP256K1_FIELD_INNER5X52_IMPL_H_ #define _SECP256K1_FIELD_INNER5X52_IMPL_H_ diff --git a/src/field_5x52_impl.h b/src/field_5x52_impl.h index b638a82..d1b06d0 100644 --- a/src/field_5x52_impl.h +++ b/src/field_5x52_impl.h @@ -1,6 +1,8 @@ -// Copyright (c) 2013 Pieter Wuille -// Distributed under the MIT/X11 software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. +/********************************************************************** + * Copyright (c) 2013, 2014 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ #ifndef _SECP256K1_FIELD_REPR_IMPL_H_ #define _SECP256K1_FIELD_REPR_IMPL_H_ @@ -60,35 +62,35 @@ static void secp256k1_fe_verify(const secp256k1_fe_t *a) { static void secp256k1_fe_normalize(secp256k1_fe_t *r) { uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4]; - // Reduce t4 at the start so there will be at most a single carry from the first pass + /* Reduce t4 at the start so there will be at most a single carry from the first pass */ uint64_t x = t4 >> 48; t4 &= 0x0FFFFFFFFFFFFULL; uint64_t m; - // The first pass ensures the magnitude is 1, ... + /* The first pass ensures the magnitude is 1, ... */ t0 += x * 0x1000003D1ULL; t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL; t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; m = t1; t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; m &= t2; t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; m &= t3; - // ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) + /* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */ VERIFY_CHECK(t4 >> 49 == 0); - // At most a single final reduction is needed; check if the value is >= the field characteristic + /* At most a single final reduction is needed; check if the value is >= the field characteristic */ x = (t4 >> 48) | ((t4 == 0x0FFFFFFFFFFFFULL) & (m == 0xFFFFFFFFFFFFFULL) & (t0 >= 0xFFFFEFFFFFC2FULL)); - // Apply the final reduction (for constant-time behaviour, we do it always) + /* Apply the final reduction (for constant-time behaviour, we do it always) */ t0 += x * 0x1000003D1ULL; t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL; t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; - // If t4 didn't carry to bit 48 already, then it should have after any final reduction + /* If t4 didn't carry to bit 48 already, then it should have after any final reduction */ VERIFY_CHECK(t4 >> 48 == x); - // Mask off the possible multiple of 2^256 from the final reduction + /* Mask off the possible multiple of 2^256 from the final reduction */ t4 &= 0x0FFFFFFFFFFFFULL; r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4; diff --git a/src/field_5x52_int128_impl.h b/src/field_5x52_int128_impl.h index a7b83ca..c476428 100644 --- a/src/field_5x52_int128_impl.h +++ b/src/field_5x52_int128_impl.h @@ -1,6 +1,8 @@ -// Copyright (c) 2013 Pieter Wuille -// Distributed under the MIT/X11 software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. +/********************************************************************** + * Copyright (c) 2013, 2014 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ #ifndef _SECP256K1_FIELD_INNER5X52_IMPL_H_ #define _SECP256K1_FIELD_INNER5X52_IMPL_H_ @@ -26,9 +28,10 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(const uint64_t *a, const uin VERIFY_BITS(b[4], 52); const uint64_t M = 0xFFFFFFFFFFFFFULL, R = 0x1000003D10ULL; - // [... a b c] is a shorthand for ... + a<<104 + b<<52 + c<<0 mod n. - // px is a shorthand for sum(a[i]*b[x-i], i=0..x). - // Note that [x 0 0 0 0 0] = [x*R]. + /* [... a b c] is a shorthand for ... + a<<104 + b<<52 + c<<0 mod n. + * px is a shorthand for sum(a[i]*b[x-i], i=0..x). + * Note that [x 0 0 0 0 0] = [x*R]. + */ __int128 c, d; @@ -37,18 +40,18 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(const uint64_t *a, const uin + (__int128)a[2] * b[1] + (__int128)a[3] * b[0]; VERIFY_BITS(d, 114); - // [d 0 0 0] = [p3 0 0 0] + /* [d 0 0 0] = [p3 0 0 0] */ c = (__int128)a[4] * b[4]; VERIFY_BITS(c, 112); - // [c 0 0 0 0 d 0 0 0] = [p8 0 0 0 0 p3 0 0 0] + /* [c 0 0 0 0 d 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */ d += (c & M) * R; c >>= 52; VERIFY_BITS(d, 115); VERIFY_BITS(c, 60); - // [c 0 0 0 0 0 d 0 0 0] = [p8 0 0 0 0 p3 0 0 0] + /* [c 0 0 0 0 0 d 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */ uint64_t t3 = d & M; d >>= 52; VERIFY_BITS(t3, 52); VERIFY_BITS(d, 63); - // [c 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 0 p3 0 0 0] + /* [c 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */ d += (__int128)a[0] * b[4] + (__int128)a[1] * b[3] @@ -56,99 +59,99 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(const uint64_t *a, const uin + (__int128)a[3] * b[1] + (__int128)a[4] * b[0]; VERIFY_BITS(d, 115); - // [c 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] + /* [c 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ d += c * R; VERIFY_BITS(d, 116); - // [d t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] + /* [d t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ uint64_t t4 = d & M; d >>= 52; VERIFY_BITS(t4, 52); VERIFY_BITS(d, 64); - // [d t4 t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] + /* [d t4 t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ uint64_t tx = (t4 >> 48); t4 &= (M >> 4); VERIFY_BITS(tx, 4); VERIFY_BITS(t4, 48); - // [d t4+(tx<<48) t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] + /* [d t4+(tx<<48) t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ c = (__int128)a[0] * b[0]; VERIFY_BITS(c, 112); - // [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 0 p4 p3 0 0 p0] + /* [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 0 p4 p3 0 0 p0] */ d += (__int128)a[1] * b[4] + (__int128)a[2] * b[3] + (__int128)a[3] * b[2] + (__int128)a[4] * b[1]; VERIFY_BITS(d, 115); - // [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] + /* [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ uint64_t u0 = d & M; d >>= 52; VERIFY_BITS(u0, 52); VERIFY_BITS(d, 63); - // [d u0 t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] - // [d 0 t4+(tx<<48)+(u0<<52) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] + /* [d u0 t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ + /* [d 0 t4+(tx<<48)+(u0<<52) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ u0 = (u0 << 4) | tx; VERIFY_BITS(u0, 56); - // [d 0 t4+(u0<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] + /* [d 0 t4+(u0<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ c += (__int128)u0 * (R >> 4); VERIFY_BITS(c, 115); - // [d 0 t4 t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] + /* [d 0 t4 t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ uint64_t t0 = c & M; c >>= 52; VERIFY_BITS(t0, 52); VERIFY_BITS(c, 61); - // [d 0 t4 t3 0 c t0] = [p8 0 0 p5 p4 p3 0 0 p0] + /* [d 0 t4 t3 0 c t0] = [p8 0 0 p5 p4 p3 0 0 p0] */ c += (__int128)a[0] * b[1] + (__int128)a[1] * b[0]; VERIFY_BITS(c, 114); - // [d 0 t4 t3 0 c t0] = [p8 0 0 p5 p4 p3 0 p1 p0] + /* [d 0 t4 t3 0 c t0] = [p8 0 0 p5 p4 p3 0 p1 p0] */ d += (__int128)a[2] * b[4] + (__int128)a[3] * b[3] + (__int128)a[4] * b[2]; VERIFY_BITS(d, 114); - // [d 0 t4 t3 0 c t0] = [p8 0 p6 p5 p4 p3 0 p1 p0] + /* [d 0 t4 t3 0 c t0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */ c += (d & M) * R; d >>= 52; VERIFY_BITS(c, 115); VERIFY_BITS(d, 62); - // [d 0 0 t4 t3 0 c t0] = [p8 0 p6 p5 p4 p3 0 p1 p0] + /* [d 0 0 t4 t3 0 c t0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */ uint64_t t1 = c & M; c >>= 52; VERIFY_BITS(t1, 52); VERIFY_BITS(c, 63); - // [d 0 0 t4 t3 c t1 t0] = [p8 0 p6 p5 p4 p3 0 p1 p0] + /* [d 0 0 t4 t3 c t1 t0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */ c += (__int128)a[0] * b[2] + (__int128)a[1] * b[1] + (__int128)a[2] * b[0]; VERIFY_BITS(c, 114); - // [d 0 0 t4 t3 c t1 t0] = [p8 0 p6 p5 p4 p3 p2 p1 p0] + /* [d 0 0 t4 t3 c t1 t0] = [p8 0 p6 p5 p4 p3 p2 p1 p0] */ d += (__int128)a[3] * b[4] + (__int128)a[4] * b[3]; VERIFY_BITS(d, 114); - // [d 0 0 t4 t3 c t1 t0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [d 0 0 t4 t3 c t1 t0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ c += (d & M) * R; d >>= 52; VERIFY_BITS(c, 115); VERIFY_BITS(d, 62); - // [d 0 0 0 t4 t3 c t1 t0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [d 0 0 0 t4 t3 c t1 t0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ r[0] = t0; VERIFY_BITS(r[0], 52); - // [d 0 0 0 t4 t3 c t1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [d 0 0 0 t4 t3 c t1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ r[1] = t1; VERIFY_BITS(r[1], 52); - // [d 0 0 0 t4 t3 c r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [d 0 0 0 t4 t3 c r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ r[2] = c & M; c >>= 52; VERIFY_BITS(r[2], 52); VERIFY_BITS(c, 63); - // [d 0 0 0 t4 t3+c r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [d 0 0 0 t4 t3+c r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ c += d * R + t3;; VERIFY_BITS(c, 100); - // [t4 c r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [t4 c r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ r[3] = c & M; c >>= 52; VERIFY_BITS(r[3], 52); VERIFY_BITS(c, 48); - // [t4+c r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [t4+c r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ c += t4; VERIFY_BITS(c, 49); - // [c r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] - r[4] = c; + /* [c r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ + r[4] = c; VERIFY_BITS(r[4], 49); - // [r4 r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [r4 r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ } SECP256K1_INLINE static void secp256k1_fe_sqr_inner(const uint64_t *a, uint64_t *r) { @@ -159,9 +162,10 @@ SECP256K1_INLINE static void secp256k1_fe_sqr_inner(const uint64_t *a, uint64_t VERIFY_BITS(a[4], 52); const uint64_t M = 0xFFFFFFFFFFFFFULL, R = 0x1000003D10ULL; - // [... a b c] is a shorthand for ... + a<<104 + b<<52 + c<<0 mod n. - // px is a shorthand for sum(a[i]*a[x-i], i=0..x). - // Note that [x 0 0 0 0 0] = [x*R]. + /** [... a b c] is a shorthand for ... + a<<104 + b<<52 + c<<0 mod n. + * px is a shorthand for sum(a[i]*a[x-i], i=0..x). + * Note that [x 0 0 0 0 0] = [x*R]. + */ __int128 c, d; @@ -170,106 +174,106 @@ SECP256K1_INLINE static void secp256k1_fe_sqr_inner(const uint64_t *a, uint64_t d = (__int128)(a0*2) * a3 + (__int128)(a1*2) * a2; VERIFY_BITS(d, 114); - // [d 0 0 0] = [p3 0 0 0] + /* [d 0 0 0] = [p3 0 0 0] */ c = (__int128)a4 * a4; VERIFY_BITS(c, 112); - // [c 0 0 0 0 d 0 0 0] = [p8 0 0 0 0 p3 0 0 0] + /* [c 0 0 0 0 d 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */ d += (c & M) * R; c >>= 52; VERIFY_BITS(d, 115); VERIFY_BITS(c, 60); - // [c 0 0 0 0 0 d 0 0 0] = [p8 0 0 0 0 p3 0 0 0] + /* [c 0 0 0 0 0 d 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */ uint64_t t3 = d & M; d >>= 52; VERIFY_BITS(t3, 52); VERIFY_BITS(d, 63); - // [c 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 0 p3 0 0 0] + /* [c 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */ a4 *= 2; d += (__int128)a0 * a4 + (__int128)(a1*2) * a3 + (__int128)a2 * a2; VERIFY_BITS(d, 115); - // [c 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] + /* [c 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ d += c * R; VERIFY_BITS(d, 116); - // [d t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] + /* [d t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ uint64_t t4 = d & M; d >>= 52; VERIFY_BITS(t4, 52); VERIFY_BITS(d, 64); - // [d t4 t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] + /* [d t4 t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ uint64_t tx = (t4 >> 48); t4 &= (M >> 4); VERIFY_BITS(tx, 4); VERIFY_BITS(t4, 48); - // [d t4+(tx<<48) t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] + /* [d t4+(tx<<48) t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ c = (__int128)a0 * a0; VERIFY_BITS(c, 112); - // [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 0 p4 p3 0 0 p0] + /* [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 0 p4 p3 0 0 p0] */ d += (__int128)a1 * a4 + (__int128)(a2*2) * a3; VERIFY_BITS(d, 114); - // [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] + /* [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ uint64_t u0 = d & M; d >>= 52; VERIFY_BITS(u0, 52); VERIFY_BITS(d, 62); - // [d u0 t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] - // [d 0 t4+(tx<<48)+(u0<<52) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] + /* [d u0 t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ + /* [d 0 t4+(tx<<48)+(u0<<52) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ u0 = (u0 << 4) | tx; VERIFY_BITS(u0, 56); - // [d 0 t4+(u0<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] + /* [d 0 t4+(u0<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ c += (__int128)u0 * (R >> 4); VERIFY_BITS(c, 113); - // [d 0 t4 t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] + /* [d 0 t4 t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ r[0] = c & M; c >>= 52; VERIFY_BITS(r[0], 52); VERIFY_BITS(c, 61); - // [d 0 t4 t3 0 c r0] = [p8 0 0 p5 p4 p3 0 0 p0] + /* [d 0 t4 t3 0 c r0] = [p8 0 0 p5 p4 p3 0 0 p0] */ a0 *= 2; c += (__int128)a0 * a1; VERIFY_BITS(c, 114); - // [d 0 t4 t3 0 c r0] = [p8 0 0 p5 p4 p3 0 p1 p0] + /* [d 0 t4 t3 0 c r0] = [p8 0 0 p5 p4 p3 0 p1 p0] */ d += (__int128)a2 * a4 + (__int128)a3 * a3; VERIFY_BITS(d, 114); - // [d 0 t4 t3 0 c r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] + /* [d 0 t4 t3 0 c r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */ c += (d & M) * R; d >>= 52; VERIFY_BITS(c, 115); VERIFY_BITS(d, 62); - // [d 0 0 t4 t3 0 c r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] + /* [d 0 0 t4 t3 0 c r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */ r[1] = c & M; c >>= 52; VERIFY_BITS(r[1], 52); VERIFY_BITS(c, 63); - // [d 0 0 t4 t3 c r1 r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] + /* [d 0 0 t4 t3 c r1 r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */ c += (__int128)a0 * a2 + (__int128)a1 * a1; VERIFY_BITS(c, 114); - // [d 0 0 t4 t3 c r1 r0] = [p8 0 p6 p5 p4 p3 p2 p1 p0] + /* [d 0 0 t4 t3 c r1 r0] = [p8 0 p6 p5 p4 p3 p2 p1 p0] */ d += (__int128)a3 * a4; VERIFY_BITS(d, 114); - // [d 0 0 t4 t3 c r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [d 0 0 t4 t3 c r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ c += (d & M) * R; d >>= 52; VERIFY_BITS(c, 115); VERIFY_BITS(d, 62); - // [d 0 0 0 t4 t3 c r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [d 0 0 0 t4 t3 c r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ r[2] = c & M; c >>= 52; VERIFY_BITS(r[2], 52); VERIFY_BITS(c, 63); - // [d 0 0 0 t4 t3+c r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [d 0 0 0 t4 t3+c r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ c += d * R + t3;; VERIFY_BITS(c, 100); - // [t4 c r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [t4 c r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ r[3] = c & M; c >>= 52; VERIFY_BITS(r[3], 52); VERIFY_BITS(c, 48); - // [t4+c r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [t4+c r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ c += t4; VERIFY_BITS(c, 49); - // [c r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] - r[4] = c; + /* [c r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ + r[4] = c; VERIFY_BITS(r[4], 49); - // [r4 r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] + /* [r4 r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ } #endif diff --git a/src/field_gmp.h b/src/field_gmp.h index d51dea0..b390fd9 100644 --- a/src/field_gmp.h +++ b/src/field_gmp.h @@ -1,6 +1,8 @@ -// Copyright (c) 2013 Pieter Wuille -// Distributed under the MIT/X11 software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. +/********************************************************************** + * Copyright (c) 2013, 2014 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ #ifndef _SECP256K1_FIELD_REPR_ #define _SECP256K1_FIELD_REPR_ diff --git a/src/field_gmp_impl.h b/src/field_gmp_impl.h index 98865b5..af4728e 100644 --- a/src/field_gmp_impl.h +++ b/src/field_gmp_impl.h @@ -1,6 +1,8 @@ -// Copyright (c) 2013 Pieter Wuille -// Distributed under the MIT/X11 software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. +/********************************************************************** + * Copyright (c) 2013, 2014 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ #ifndef _SECP256K1_FIELD_REPR_IMPL_H_ #define _SECP256K1_FIELD_REPR_IMPL_H_ @@ -33,7 +35,7 @@ static void secp256k1_fe_normalize(secp256k1_fe_t *r) { mp_limb_t carry = mpn_add_1(r->n, r->n, FIELD_LIMBS, 0x1000003D1ULL * r->n[FIELD_LIMBS]); mpn_add_1(r->n, r->n, FIELD_LIMBS, 0x1000003D1ULL * carry); #else - mp_limb_t carry = mpn_add_1(r->n, r->n, FIELD_LIMBS, 0x3D1UL * r->n[FIELD_LIMBS]) + + mp_limb_t carry = mpn_add_1(r->n, r->n, FIELD_LIMBS, 0x3D1UL * r->n[FIELD_LIMBS]) + mpn_add_1(r->n+(32/GMP_NUMB_BITS), r->n+(32/GMP_NUMB_BITS), FIELD_LIMBS-(32/GMP_NUMB_BITS), r->n[FIELD_LIMBS] << (32 % GMP_NUMB_BITS)); mpn_add_1(r->n, r->n, FIELD_LIMBS, 0x3D1UL * carry); mpn_add_1(r->n+(32/GMP_NUMB_BITS), r->n+(32/GMP_NUMB_BITS), FIELD_LIMBS-(32/GMP_NUMB_BITS), carry << (32%GMP_NUMB_BITS)); @@ -119,10 +121,11 @@ SECP256K1_INLINE static void secp256k1_fe_add(secp256k1_fe_t *r, const secp256k1 } static void secp256k1_fe_reduce(secp256k1_fe_t *r, mp_limb_t *tmp) { - // - // B1 B2 B3 B4 - // + C * A1 A2 A3 A4 - // + A1 A2 A3 A4 + /** + * B1 B2 B3 B4 + * + C * A1 A2 A3 A4 + * + A1 A2 A3 A4 + */ #if (GMP_NUMB_BITS >= 33) mp_limb_t o = mpn_addmul_1(tmp, tmp+FIELD_LIMBS, FIELD_LIMBS, 0x1000003D1ULL); diff --git a/src/field_impl.h b/src/field_impl.h index 1e52343..3a31e18 100644 --- a/src/field_impl.h +++ b/src/field_impl.h @@ -1,6 +1,8 @@ -// Copyright (c) 2013 Pieter Wuille -// Distributed under the MIT/X11 software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. +/********************************************************************** + * Copyright (c) 2013, 2014 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ #ifndef _SECP256K1_FIELD_IMPL_H_ #define _SECP256K1_FIELD_IMPL_H_ @@ -66,9 +68,10 @@ static void secp256k1_fe_set_hex(secp256k1_fe_t *r, const char *a, int alen) { static int secp256k1_fe_sqrt(secp256k1_fe_t *r, const secp256k1_fe_t *a) { - // The binary representation of (p + 1)/4 has 3 blocks of 1s, with lengths in - // { 2, 22, 223 }. Use an addition chain to calculate 2^n - 1 for each block: - // 1, [2], 3, 6, 9, 11, [22], 44, 88, 176, 220, [223] + /** The binary representation of (p + 1)/4 has 3 blocks of 1s, with lengths in + * { 2, 22, 223 }. Use an addition chain to calculate 2^n - 1 for each block: + * 1, [2], 3, 6, 9, 11, [22], 44, 88, 176, 220, [223] + */ secp256k1_fe_t x2; secp256k1_fe_sqr(&x2, a); @@ -114,7 +117,7 @@ static int secp256k1_fe_sqrt(secp256k1_fe_t *r, const secp256k1_fe_t *a) { for (int j=0; j<3; j++) secp256k1_fe_sqr(&x223, &x223); secp256k1_fe_mul(&x223, &x223, &x3); - // The final result is then assembled using a sliding window over the blocks. + /* The final result is then assembled using a sliding window over the blocks. */ secp256k1_fe_t t1 = x223; for (int j=0; j<23; j++) secp256k1_fe_sqr(&t1, &t1); @@ -124,7 +127,7 @@ static int secp256k1_fe_sqrt(secp256k1_fe_t *r, const secp256k1_fe_t *a) { secp256k1_fe_sqr(&t1, &t1); secp256k1_fe_sqr(r, &t1); - // Check that a square root was actually calculated + /* Check that a square root was actually calculated */ secp256k1_fe_sqr(&t1, r); secp256k1_fe_negate(&t1, &t1, 1); @@ -135,9 +138,10 @@ static int secp256k1_fe_sqrt(secp256k1_fe_t *r, const secp256k1_fe_t *a) { static void secp256k1_fe_inv(secp256k1_fe_t *r, const secp256k1_fe_t *a) { - // The binary representation of (p - 2) has 5 blocks of 1s, with lengths in - // { 1, 2, 22, 223 }. Use an addition chain to calculate 2^n - 1 for each block: - // [1], [2], 3, 6, 9, 11, [22], 44, 88, 176, 220, [223] + /** The binary representation of (p - 2) has 5 blocks of 1s, with lengths in + * { 1, 2, 22, 223 }. Use an addition chain to calculate 2^n - 1 for each block: + * [1], [2], 3, 6, 9, 11, [22], 44, 88, 176, 220, [223] + */ secp256k1_fe_t x2; secp256k1_fe_sqr(&x2, a); @@ -183,7 +187,7 @@ static void secp256k1_fe_inv(secp256k1_fe_t *r, const secp256k1_fe_t *a) { for (int j=0; j<3; j++) secp256k1_fe_sqr(&x223, &x223); secp256k1_fe_mul(&x223, &x223, &x3); - // The final result is then assembled using a sliding window over the blocks. + /* The final result is then assembled using a sliding window over the blocks. */ secp256k1_fe_t t1 = x223; for (int j=0; j<23; j++) secp256k1_fe_sqr(&t1, &t1); @@ -204,7 +208,7 @@ static void secp256k1_fe_inv_var(secp256k1_fe_t *r, const secp256k1_fe_t *a) { secp256k1_fe_t c = *a; secp256k1_fe_normalize(&c); secp256k1_fe_get_b32(b, &c); - secp256k1_num_t n; + secp256k1_num_t n; secp256k1_num_set_bin(&n, b, 32); secp256k1_num_mod_inverse(&n, &n, &secp256k1_fe_consts->p); secp256k1_num_get_bin(b, 32, &n); diff --git a/src/group.h b/src/group.h index bc5f7b5..ba02549 100644 --- a/src/group.h +++ b/src/group.h @@ -1,6 +1,8 @@ -// Copyright (c) 2013 Pieter Wuille -// Distributed under the MIT/X11 software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. +/********************************************************************** + * Copyright (c) 2013, 2014 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ #ifndef _SECP256K1_GROUP_ #define _SECP256K1_GROUP_ @@ -12,25 +14,25 @@ typedef struct { secp256k1_fe_t x; secp256k1_fe_t y; - int infinity; // whether this represents the point at infinity + int infinity; /* whether this represents the point at infinity */ } secp256k1_ge_t; /** A group element of the secp256k1 curve, in jacobian coordinates. */ typedef struct { - secp256k1_fe_t x; // actual X: x/z^2 - secp256k1_fe_t y; // actual Y: y/z^3 + secp256k1_fe_t x; /* actual X: x/z^2 */ + secp256k1_fe_t y; /* actual Y: y/z^3 */ secp256k1_fe_t z; - int infinity; // whether this represents the point at infinity + int infinity; /* whether this represents the point at infinity */ } secp256k1_gej_t; /** Global constants related to the group */ typedef struct { - secp256k1_num_t order; // the order of the curve (= order of its generator) - secp256k1_num_t half_order; // half the order of the curve (= order of its generator) - secp256k1_ge_t g; // the generator point + secp256k1_num_t order; /* the order of the curve (= order of its generator) */ + secp256k1_num_t half_order; /* half the order of the curve (= order of its generator) */ + secp256k1_ge_t g; /* the generator point */ #ifdef USE_ENDOMORPHISM - // constants related to secp256k1's efficiently computable endomorphism + /* constants related to secp256k1's efficiently computable endomorphism */ secp256k1_fe_t beta; secp256k1_num_t lambda, a1b2, b1, a2; #endif diff --git a/src/group_impl.h b/src/group_impl.h index 7becd2c..1edbc6e 100644 --- a/src/group_impl.h +++ b/src/group_impl.h @@ -1,6 +1,8 @@ -// Copyright (c) 2013 Pieter Wuille -// Distributed under the MIT/X11 software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. +/********************************************************************** + * Copyright (c) 2013, 2014 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ #ifndef _SECP256K1_GROUP_IMPL_H_ #define _SECP256K1_GROUP_IMPL_H_ @@ -176,10 +178,11 @@ static int secp256k1_gej_is_infinity(const secp256k1_gej_t *a) { static int secp256k1_gej_is_valid(const secp256k1_gej_t *a) { if (a->infinity) return 0; - // y^2 = x^3 + 7 - // (Y/Z^3)^2 = (X/Z^2)^3 + 7 - // Y^2 / Z^6 = X^3 / Z^6 + 7 - // Y^2 = X^3 + 7*Z^6 + /** y^2 = x^3 + 7 + * (Y/Z^3)^2 = (X/Z^2)^3 + 7 + * Y^2 / Z^6 = X^3 / Z^6 + 7 + * Y^2 = X^3 + 7*Z^6 + */ secp256k1_fe_t y2; secp256k1_fe_sqr(&y2, &a->y); secp256k1_fe_t x3; secp256k1_fe_sqr(&x3, &a->x); secp256k1_fe_mul(&x3, &x3, &a->x); secp256k1_fe_t z2; secp256k1_fe_sqr(&z2, &a->z); @@ -194,7 +197,7 @@ static int secp256k1_gej_is_valid(const secp256k1_gej_t *a) { static int secp256k1_ge_is_valid(const secp256k1_ge_t *a) { if (a->infinity) return 0; - // y^2 = x^3 + 7 + /* y^2 = x^3 + 7 */ secp256k1_fe_t y2; secp256k1_fe_sqr(&y2, &a->y); secp256k1_fe_t x3; secp256k1_fe_sqr(&x3, &a->x); secp256k1_fe_mul(&x3, &x3, &a->x); secp256k1_fe_t c; secp256k1_fe_set_int(&c, 7); @@ -219,25 +222,25 @@ static void secp256k1_gej_double_var(secp256k1_gej_t *r, const secp256k1_gej_t * secp256k1_fe_t t1,t2,t3,t4; secp256k1_fe_mul(&r->z, &t5, &a->z); - secp256k1_fe_mul_int(&r->z, 2); // Z' = 2*Y*Z (2) + secp256k1_fe_mul_int(&r->z, 2); /* Z' = 2*Y*Z (2) */ secp256k1_fe_sqr(&t1, &a->x); - secp256k1_fe_mul_int(&t1, 3); // T1 = 3*X^2 (3) - secp256k1_fe_sqr(&t2, &t1); // T2 = 9*X^4 (1) + secp256k1_fe_mul_int(&t1, 3); /* T1 = 3*X^2 (3) */ + secp256k1_fe_sqr(&t2, &t1); /* T2 = 9*X^4 (1) */ secp256k1_fe_sqr(&t3, &t5); - secp256k1_fe_mul_int(&t3, 2); // T3 = 2*Y^2 (2) + secp256k1_fe_mul_int(&t3, 2); /* T3 = 2*Y^2 (2) */ secp256k1_fe_sqr(&t4, &t3); - secp256k1_fe_mul_int(&t4, 2); // T4 = 8*Y^4 (2) - secp256k1_fe_mul(&t3, &a->x, &t3); // T3 = 2*X*Y^2 (1) + secp256k1_fe_mul_int(&t4, 2); /* T4 = 8*Y^4 (2) */ + secp256k1_fe_mul(&t3, &a->x, &t3); /* T3 = 2*X*Y^2 (1) */ r->x = t3; - secp256k1_fe_mul_int(&r->x, 4); // X' = 8*X*Y^2 (4) - secp256k1_fe_negate(&r->x, &r->x, 4); // X' = -8*X*Y^2 (5) - secp256k1_fe_add(&r->x, &t2); // X' = 9*X^4 - 8*X*Y^2 (6) - secp256k1_fe_negate(&t2, &t2, 1); // T2 = -9*X^4 (2) - secp256k1_fe_mul_int(&t3, 6); // T3 = 12*X*Y^2 (6) - secp256k1_fe_add(&t3, &t2); // T3 = 12*X*Y^2 - 9*X^4 (8) - secp256k1_fe_mul(&r->y, &t1, &t3); // Y' = 36*X^3*Y^2 - 27*X^6 (1) - secp256k1_fe_negate(&t2, &t4, 2); // T2 = -8*Y^4 (3) - secp256k1_fe_add(&r->y, &t2); // Y' = 36*X^3*Y^2 - 27*X^6 - 8*Y^4 (4) + secp256k1_fe_mul_int(&r->x, 4); /* X' = 8*X*Y^2 (4) */ + secp256k1_fe_negate(&r->x, &r->x, 4); /* X' = -8*X*Y^2 (5) */ + secp256k1_fe_add(&r->x, &t2); /* X' = 9*X^4 - 8*X*Y^2 (6) */ + secp256k1_fe_negate(&t2, &t2, 1); /* T2 = -9*X^4 (2) */ + secp256k1_fe_mul_int(&t3, 6); /* T3 = 12*X*Y^2 (6) */ + secp256k1_fe_add(&t3, &t2); /* T3 = 12*X*Y^2 - 9*X^4 (8) */ + secp256k1_fe_mul(&r->y, &t1, &t3); /* Y' = 36*X^3*Y^2 - 27*X^6 (1) */ + secp256k1_fe_negate(&t2, &t4, 2); /* T2 = -8*Y^4 (3) */ + secp256k1_fe_add(&r->y, &t2); /* Y' = 36*X^3*Y^2 - 27*X^6 - 8*Y^4 (4) */ r->infinity = 0; } @@ -329,63 +332,65 @@ static void secp256k1_gej_add_ge(secp256k1_gej_t *r, const secp256k1_gej_t *a, c VERIFY_CHECK(!b->infinity); VERIFY_CHECK(a->infinity == 0 || a->infinity == 1); - // In: - // Eric Brier and Marc Joye, Weierstrass Elliptic Curves and Side-Channel Attacks. - // In D. Naccache and P. Paillier, Eds., Public Key Cryptography, vol. 2274 of Lecture Notes in Computer Science, pages 335-345. Springer-Verlag, 2002. - // we find as solution for a unified addition/doubling formula: - // lambda = ((x1 + x2)^2 - x1 * x2 + a) / (y1 + y2), with a = 0 for secp256k1's curve equation. - // x3 = lambda^2 - (x1 + x2) - // 2*y3 = lambda * (x1 + x2 - 2 * x3) - (y1 + y2). - // - // Substituting x_i = Xi / Zi^2 and yi = Yi / Zi^3, for i=1,2,3, gives: - // U1 = X1*Z2^2, U2 = X2*Z1^2 - // S1 = X1*Z2^3, S2 = X2*Z2^3 - // Z = Z1*Z2 - // T = U1+U2 - // M = S1+S2 - // Q = T*M^2 - // R = T^2-U1*U2 - // X3 = 4*(R^2-Q) - // Y3 = 4*(R*(3*Q-2*R^2)-M^4) - // Z3 = 2*M*Z - // (Note that the paper uses xi = Xi / Zi and yi = Yi / Zi instead.) + /** In: + * Eric Brier and Marc Joye, Weierstrass Elliptic Curves and Side-Channel Attacks. + * In D. Naccache and P. Paillier, Eds., Public Key Cryptography, vol. 2274 of Lecture Notes in Computer Science, pages 335-345. Springer-Verlag, 2002. + * we find as solution for a unified addition/doubling formula: + * lambda = ((x1 + x2)^2 - x1 * x2 + a) / (y1 + y2), with a = 0 for secp256k1's curve equation. + * x3 = lambda^2 - (x1 + x2) + * 2*y3 = lambda * (x1 + x2 - 2 * x3) - (y1 + y2). + * + * Substituting x_i = Xi / Zi^2 and yi = Yi / Zi^3, for i=1,2,3, gives: + * U1 = X1*Z2^2, U2 = X2*Z1^2 + * S1 = X1*Z2^3, S2 = X2*Z2^3 + * Z = Z1*Z2 + * T = U1+U2 + * M = S1+S2 + * Q = T*M^2 + * R = T^2-U1*U2 + * X3 = 4*(R^2-Q) + * Y3 = 4*(R*(3*Q-2*R^2)-M^4) + * Z3 = 2*M*Z + * (Note that the paper uses xi = Xi / Zi and yi = Yi / Zi instead.) + */ - secp256k1_fe_t zz; secp256k1_fe_sqr(&zz, &a->z); // z = Z1^2 - secp256k1_fe_t u1 = a->x; secp256k1_fe_normalize(&u1); // u1 = U1 = X1*Z2^2 (1) - secp256k1_fe_t u2; secp256k1_fe_mul(&u2, &b->x, &zz); // u2 = U2 = X2*Z1^2 (1) - secp256k1_fe_t s1 = a->y; secp256k1_fe_normalize(&s1); // s1 = S1 = Y1*Z2^3 (1) - secp256k1_fe_t s2; secp256k1_fe_mul(&s2, &b->y, &zz); // s2 = Y2*Z2^2 (1) - secp256k1_fe_mul(&s2, &s2, &a->z); // s2 = S2 = Y2*Z1^3 (1) - secp256k1_fe_t z = a->z; // z = Z = Z1*Z2 (8) - secp256k1_fe_t t = u1; secp256k1_fe_add(&t, &u2); // t = T = U1+U2 (2) - secp256k1_fe_t m = s1; secp256k1_fe_add(&m, &s2); // m = M = S1+S2 (2) - secp256k1_fe_t n; secp256k1_fe_sqr(&n, &m); // n = M^2 (1) - secp256k1_fe_t q; secp256k1_fe_mul(&q, &n, &t); // q = Q = T*M^2 (1) - secp256k1_fe_sqr(&n, &n); // n = M^4 (1) - secp256k1_fe_t rr; secp256k1_fe_sqr(&rr, &t); // rr = T^2 (1) - secp256k1_fe_mul(&t, &u1, &u2); secp256k1_fe_negate(&t, &t, 1); // t = -U1*U2 (2) - secp256k1_fe_add(&rr, &t); // rr = R = T^2-U1*U2 (3) - secp256k1_fe_sqr(&t, &rr); // t = R^2 (1) - secp256k1_fe_mul(&r->z, &m, &z); // r->z = M*Z (1) + secp256k1_fe_t zz; secp256k1_fe_sqr(&zz, &a->z); /* z = Z1^2 */ + secp256k1_fe_t u1 = a->x; secp256k1_fe_normalize(&u1); /* u1 = U1 = X1*Z2^2 (1) */ + secp256k1_fe_t u2; secp256k1_fe_mul(&u2, &b->x, &zz); /* u2 = U2 = X2*Z1^2 (1) */ + secp256k1_fe_t s1 = a->y; secp256k1_fe_normalize(&s1); /* s1 = S1 = Y1*Z2^3 (1) */ + secp256k1_fe_t s2; secp256k1_fe_mul(&s2, &b->y, &zz); /* s2 = Y2*Z2^2 (1) */ + secp256k1_fe_mul(&s2, &s2, &a->z); /* s2 = S2 = Y2*Z1^3 (1) */ + secp256k1_fe_t z = a->z; /* z = Z = Z1*Z2 (8) */ + secp256k1_fe_t t = u1; secp256k1_fe_add(&t, &u2); /* t = T = U1+U2 (2) */ + secp256k1_fe_t m = s1; secp256k1_fe_add(&m, &s2); /* m = M = S1+S2 (2) */ + secp256k1_fe_t n; secp256k1_fe_sqr(&n, &m); /* n = M^2 (1) */ + secp256k1_fe_t q; secp256k1_fe_mul(&q, &n, &t); /* q = Q = T*M^2 (1) */ + secp256k1_fe_sqr(&n, &n); /* n = M^4 (1) */ + secp256k1_fe_t rr; secp256k1_fe_sqr(&rr, &t); /* rr = T^2 (1) */ + secp256k1_fe_mul(&t, &u1, &u2); secp256k1_fe_negate(&t, &t, 1); /* t = -U1*U2 (2) */ + secp256k1_fe_add(&rr, &t); /* rr = R = T^2-U1*U2 (3) */ + secp256k1_fe_sqr(&t, &rr); /* t = R^2 (1) */ + secp256k1_fe_mul(&r->z, &m, &z); /* r->z = M*Z (1) */ secp256k1_fe_normalize(&r->z); int infinity = secp256k1_fe_is_zero(&r->z) * (1 - a->infinity); - secp256k1_fe_mul_int(&r->z, 2 * (1 - a->infinity)); // r->z = Z3 = 2*M*Z (2) - r->x = t; // r->x = R^2 (1) - secp256k1_fe_negate(&q, &q, 1); // q = -Q (2) - secp256k1_fe_add(&r->x, &q); // r->x = R^2-Q (3) + secp256k1_fe_mul_int(&r->z, 2 * (1 - a->infinity)); /* r->z = Z3 = 2*M*Z (2) */ + r->x = t; /* r->x = R^2 (1) */ + secp256k1_fe_negate(&q, &q, 1); /* q = -Q (2) */ + secp256k1_fe_add(&r->x, &q); /* r->x = R^2-Q (3) */ secp256k1_fe_normalize(&r->x); - secp256k1_fe_mul_int(&q, 3); // q = -3*Q (6) - secp256k1_fe_mul_int(&t, 2); // t = 2*R^2 (2) - secp256k1_fe_add(&t, &q); // t = 2*R^2-3*Q (8) - secp256k1_fe_mul(&t, &t, &rr); // t = R*(2*R^2-3*Q) (1) - secp256k1_fe_add(&t, &n); // t = R*(2*R^2-3*Q)+M^4 (2) - secp256k1_fe_negate(&r->y, &t, 2); // r->y = R*(3*Q-2*R^2)-M^4 (3) + secp256k1_fe_mul_int(&q, 3); /* q = -3*Q (6) */ + secp256k1_fe_mul_int(&t, 2); /* t = 2*R^2 (2) */ + secp256k1_fe_add(&t, &q); /* t = 2*R^2-3*Q (8) */ + secp256k1_fe_mul(&t, &t, &rr); /* t = R*(2*R^2-3*Q) (1) */ + secp256k1_fe_add(&t, &n); /* t = R*(2*R^2-3*Q)+M^4 (2) */ + secp256k1_fe_negate(&r->y, &t, 2); /* r->y = R*(3*Q-2*R^2)-M^4 (3) */ secp256k1_fe_normalize(&r->y); - secp256k1_fe_mul_int(&r->x, 4 * (1 - a->infinity)); // r->x = X3 = 4*(R^2-Q) - secp256k1_fe_mul_int(&r->y, 4 * (1 - a->infinity)); // r->y = Y3 = 4*R*(3*Q-2*R^2)-4*M^4 (4) + secp256k1_fe_mul_int(&r->x, 4 * (1 - a->infinity)); /* r->x = X3 = 4*(R^2-Q) */ + secp256k1_fe_mul_int(&r->y, 4 * (1 - a->infinity)); /* r->y = Y3 = 4*R*(3*Q-2*R^2)-4*M^4 (4) */ - // In case a->infinity == 1, the above code results in r->x, r->y, and r->z all equal to 0. - // Add b->x to x, b->y to y, and 1 to z in that case. + /** In case a->infinity == 1, the above code results in r->x, r->y, and r->z all equal to 0. + * Add b->x to x, b->y to y, and 1 to z in that case. + */ t = b->x; secp256k1_fe_mul_int(&t, a->infinity); secp256k1_fe_add(&r->x, &t); t = b->y; secp256k1_fe_mul_int(&t, a->infinity); @@ -456,7 +461,7 @@ static void secp256k1_ge_start(void) { 0x9C,0x47,0xD0,0x8F,0xFB,0x10,0xD4,0xB8 }; #ifdef USE_ENDOMORPHISM - // properties of secp256k1's efficiently computable endomorphism + /* properties of secp256k1's efficiently computable endomorphism */ static const unsigned char secp256k1_ge_consts_lambda[] = { 0x53,0x63,0xad,0x4c,0xc0,0x5c,0x30,0xe0, 0xa5,0x26,0x1c,0x02,0x88,0x12,0x64,0x5a, diff --git a/src/num.h b/src/num.h index eaff8d3..c86f847 100644 --- a/src/num.h +++ b/src/num.h @@ -1,6 +1,8 @@ -// Copyright (c) 2013 Pieter Wuille -// Distributed under the MIT/X11 software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. +/********************************************************************** + * Copyright (c) 2013, 2014 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ #ifndef _SECP256K1_NUM_ #define _SECP256K1_NUM_ diff --git a/src/num_gmp.h b/src/num_gmp.h index 960df86..baa1f2b 100644 --- a/src/num_gmp.h +++ b/src/num_gmp.h @@ -1,6 +1,8 @@ -// Copyright (c) 2013 Pieter Wuille -// Distributed under the MIT/X11 software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. +/********************************************************************** + * Copyright (c) 2013, 2014 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ #ifndef _SECP256K1_NUM_REPR_ #define _SECP256K1_NUM_REPR_ diff --git a/src/num_gmp_impl.h b/src/num_gmp_impl.h index 36a2464..e45a59e 100644 --- a/src/num_gmp_impl.h +++ b/src/num_gmp_impl.h @@ -1,6 +1,8 @@ -// Copyright (c) 2013 Pieter Wuille -// Distributed under the MIT/X11 software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. +/********************************************************************** + * Copyright (c) 2013, 2014 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ #ifndef _SECP256K1_NUM_REPR_IMPL_H_ #define _SECP256K1_NUM_REPR_IMPL_H_ @@ -119,15 +121,16 @@ static void secp256k1_num_mod_inverse(secp256k1_num_t *r, const secp256k1_num_t secp256k1_num_sanity(a); secp256k1_num_sanity(m); - // mpn_gcdext computes: (G,S) = gcdext(U,V), where - // * G = gcd(U,V) - // * G = U*S + V*T - // * U has equal or more limbs than V, and V has no padding - // If we set U to be (a padded version of) a, and V = m: - // G = a*S + m*T - // G = a*S mod m - // Assuming G=1: - // S = 1/a mod m + /** mpn_gcdext computes: (G,S) = gcdext(U,V), where + * * G = gcd(U,V) + * * G = U*S + V*T + * * U has equal or more limbs than V, and V has no padding + * If we set U to be (a padded version of) a, and V = m: + * G = a*S + m*T + * G = a*S mod m + * Assuming G=1: + * S = 1/a mod m + */ VERIFY_CHECK(m->limbs <= NUM_LIMBS); VERIFY_CHECK(m->data[m->limbs-1] != 0); mp_limb_t g[NUM_LIMBS+1]; @@ -180,7 +183,7 @@ static int secp256k1_num_eq(const secp256k1_num_t *a, const secp256k1_num_t *b) } static void secp256k1_num_subadd(secp256k1_num_t *r, const secp256k1_num_t *a, const secp256k1_num_t *b, int bneg) { - if (!(b->neg ^ bneg ^ a->neg)) { // a and b have the same sign + if (!(b->neg ^ bneg ^ a->neg)) { /* a and b have the same sign */ r->neg = a->neg; if (a->limbs >= b->limbs) { secp256k1_num_add_abs(r, a, b); diff --git a/src/num_impl.h b/src/num_impl.h index a6644fd..f73d3ce 100644 --- a/src/num_impl.h +++ b/src/num_impl.h @@ -1,6 +1,8 @@ -// Copyright (c) 2013 Pieter Wuille -// Distributed under the MIT/X11 software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. +/********************************************************************** + * Copyright (c) 2013, 2014 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ #ifndef _SECP256K1_NUM_IMPL_H_ #define _SECP256K1_NUM_IMPL_H_ diff --git a/src/scalar.h b/src/scalar.h index 97a8c04..3baacb3 100644 --- a/src/scalar.h +++ b/src/scalar.h @@ -1,6 +1,8 @@ -// Copyright (c) 2014 Pieter Wuille -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. +/********************************************************************** + * Copyright (c) 2014 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ #ifndef _SECP256K1_SCALAR_ #define _SECP256K1_SCALAR_ diff --git a/src/scalar_4x64.h b/src/scalar_4x64.h index 22ebe4f..5a751c6 100644 --- a/src/scalar_4x64.h +++ b/src/scalar_4x64.h @@ -1,6 +1,8 @@ -// Copyright (c) 2014 Pieter Wuille -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. +/********************************************************************** + * Copyright (c) 2014 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ #ifndef _SECP256K1_SCALAR_REPR_ #define _SECP256K1_SCALAR_REPR_ diff --git a/src/scalar_4x64_impl.h b/src/scalar_4x64_impl.h index 760367c..f787182 100644 --- a/src/scalar_4x64_impl.h +++ b/src/scalar_4x64_impl.h @@ -1,24 +1,26 @@ -// Copyright (c) 2014 Pieter Wuille -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. +/********************************************************************** + * Copyright (c) 2013, 2014 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ #ifndef _SECP256K1_SCALAR_REPR_IMPL_H_ #define _SECP256K1_SCALAR_REPR_IMPL_H_ typedef unsigned __int128 uint128_t; -// Limbs of the secp256k1 order. +/* Limbs of the secp256k1 order. */ #define SECP256K1_N_0 ((uint64_t)0xBFD25E8CD0364141ULL) #define SECP256K1_N_1 ((uint64_t)0xBAAEDCE6AF48A03BULL) #define SECP256K1_N_2 ((uint64_t)0xFFFFFFFFFFFFFFFEULL) #define SECP256K1_N_3 ((uint64_t)0xFFFFFFFFFFFFFFFFULL) -// Limbs of 2^256 minus the secp256k1 order. +/* Limbs of 2^256 minus the secp256k1 order. */ #define SECP256K1_N_C_0 (~SECP256K1_N_0 + 1) #define SECP256K1_N_C_1 (~SECP256K1_N_1) #define SECP256K1_N_C_2 (1) -// Limbs of half the secp256k1 order. +/* Limbs of half the secp256k1 order. */ #define SECP256K1_N_H_0 ((uint64_t)0xDFE92F46681B20A0ULL) #define SECP256K1_N_H_1 ((uint64_t)0x5D576E7357A4501DULL) #define SECP256K1_N_H_2 ((uint64_t)0xFFFFFFFFFFFFFFFFULL) @@ -39,7 +41,7 @@ SECP256K1_INLINE static int secp256k1_scalar_get_bits(const secp256k1_scalar_t * SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar_t *a) { int yes = 0; int no = 0; - no |= (a->d[3] < SECP256K1_N_3); // No need for a > check. + no |= (a->d[3] < SECP256K1_N_3); /* No need for a > check. */ no |= (a->d[2] < SECP256K1_N_2); yes |= (a->d[2] > SECP256K1_N_2) & ~no; no |= (a->d[1] < SECP256K1_N_1); @@ -116,14 +118,14 @@ static int secp256k1_scalar_is_high(const secp256k1_scalar_t *a) { int no = 0; no |= (a->d[3] < SECP256K1_N_H_3); yes |= (a->d[3] > SECP256K1_N_H_3) & ~no; - no |= (a->d[2] < SECP256K1_N_H_2) & ~yes; // No need for a > check. + no |= (a->d[2] < SECP256K1_N_H_2) & ~yes; /* No need for a > check. */ no |= (a->d[1] < SECP256K1_N_H_1) & ~yes; yes |= (a->d[1] > SECP256K1_N_H_1) & ~no; yes |= (a->d[0] > SECP256K1_N_H_0) & ~no; return yes; } -// Inspired by the macros in OpenSSL's crypto/bn/asm/x86_64-gcc.c. +/* Inspired by the macros in OpenSSL's crypto/bn/asm/x86_64-gcc.c. */ /** Add a*b to the number defined by (c0,c1,c2). c2 must never overflow. */ #define muladd(a,b) { \ @@ -211,12 +213,12 @@ static int secp256k1_scalar_is_high(const secp256k1_scalar_t *a) { static void secp256k1_scalar_reduce_512(secp256k1_scalar_t *r, const uint64_t *l) { uint64_t n0 = l[4], n1 = l[5], n2 = l[6], n3 = l[7]; - // 160 bit accumulator. + /* 160 bit accumulator. */ uint64_t c0, c1; uint32_t c2; - // Reduce 512 bits into 385. - // m[0..6] = l[0..3] + n[0..3] * SECP256K1_N_C. + /* Reduce 512 bits into 385. */ + /* m[0..6] = l[0..3] + n[0..3] * SECP256K1_N_C. */ c0 = l[0]; c1 = 0; c2 = 0; muladd_fast(n0, SECP256K1_N_C_0); uint64_t m0; extract_fast(m0); @@ -242,8 +244,8 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar_t *r, const uint64_t *l VERIFY_CHECK(c0 <= 1); uint32_t m6 = c0; - // Reduce 385 bits into 258. - // p[0..4] = m[0..3] + m[4..6] * SECP256K1_N_C. + /* Reduce 385 bits into 258. */ + /* p[0..4] = m[0..3] + m[4..6] * SECP256K1_N_C. */ c0 = m0; c1 = 0; c2 = 0; muladd_fast(m4, SECP256K1_N_C_0); uint64_t p0; extract_fast(p0); @@ -263,8 +265,8 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar_t *r, const uint64_t *l uint32_t p4 = c0 + m6; VERIFY_CHECK(p4 <= 2); - // Reduce 258 bits into 256. - // r[0..3] = p[0..3] + p[4] * SECP256K1_N_C. + /* Reduce 258 bits into 256. */ + /* r[0..3] = p[0..3] + p[4] * SECP256K1_N_C. */ uint128_t c = p0 + (uint128_t)SECP256K1_N_C_0 * p4; r->d[0] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64; c += p1 + (uint128_t)SECP256K1_N_C_1 * p4; @@ -274,18 +276,18 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar_t *r, const uint64_t *l c += p3; r->d[3] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64; - // Final reduction of r. + /* Final reduction of r. */ secp256k1_scalar_reduce(r, c + secp256k1_scalar_check_overflow(r)); } static void secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) { - // 160 bit accumulator. + /* 160 bit accumulator. */ uint64_t c0 = 0, c1 = 0; uint32_t c2 = 0; uint64_t l[8]; - // l[0..7] = a[0..3] * b[0..3]. + /* l[0..7] = a[0..3] * b[0..3]. */ muladd_fast(a->d[0], b->d[0]); extract_fast(l[0]); muladd(a->d[0], b->d[1]); @@ -316,13 +318,13 @@ static void secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t } static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t *a) { - // 160 bit accumulator. + /* 160 bit accumulator. */ uint64_t c0 = 0, c1 = 0; uint32_t c2 = 0; uint64_t l[8]; - // l[0..7] = a[0..3] * b[0..3]. + /* l[0..7] = a[0..3] * b[0..3]. */ muladd_fast(a->d[0], a->d[0]); extract_fast(l[0]); muladd2(a->d[0], a->d[1]); diff --git a/src/scalar_8x32.h b/src/scalar_8x32.h index da7c63c..f70328c 100644 --- a/src/scalar_8x32.h +++ b/src/scalar_8x32.h @@ -1,6 +1,8 @@ -// Copyright (c) 2014 Pieter Wuille -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. +/********************************************************************** + * Copyright (c) 2014 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ #ifndef _SECP256K1_SCALAR_REPR_ #define _SECP256K1_SCALAR_REPR_ diff --git a/src/scalar_8x32_impl.h b/src/scalar_8x32_impl.h index dc148d7..e58be13 100644 --- a/src/scalar_8x32_impl.h +++ b/src/scalar_8x32_impl.h @@ -1,11 +1,13 @@ -// Copyright (c) 2014 Pieter Wuille -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. +/********************************************************************** + * Copyright (c) 2014 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ #ifndef _SECP256K1_SCALAR_REPR_IMPL_H_ #define _SECP256K1_SCALAR_REPR_IMPL_H_ -// Limbs of the secp256k1 order. +/* Limbs of the secp256k1 order. */ #define SECP256K1_N_0 ((uint32_t)0xD0364141UL) #define SECP256K1_N_1 ((uint32_t)0xBFD25E8CUL) #define SECP256K1_N_2 ((uint32_t)0xAF48A03BUL) @@ -15,14 +17,14 @@ #define SECP256K1_N_6 ((uint32_t)0xFFFFFFFFUL) #define SECP256K1_N_7 ((uint32_t)0xFFFFFFFFUL) -// Limbs of 2^256 minus the secp256k1 order. +/* Limbs of 2^256 minus the secp256k1 order. */ #define SECP256K1_N_C_0 (~SECP256K1_N_0 + 1) #define SECP256K1_N_C_1 (~SECP256K1_N_1) #define SECP256K1_N_C_2 (~SECP256K1_N_2) #define SECP256K1_N_C_3 (~SECP256K1_N_3) #define SECP256K1_N_C_4 (1) -// Limbs of half the secp256k1 order. +/* Limbs of half the secp256k1 order. */ #define SECP256K1_N_H_0 ((uint32_t)0x681B20A0UL) #define SECP256K1_N_H_1 ((uint32_t)0xDFE92F46UL) #define SECP256K1_N_H_2 ((uint32_t)0x57A4501DUL) @@ -51,9 +53,9 @@ SECP256K1_INLINE static int secp256k1_scalar_get_bits(const secp256k1_scalar_t * SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar_t *a) { int yes = 0; int no = 0; - no |= (a->d[7] < SECP256K1_N_7); // No need for a > check. - no |= (a->d[6] < SECP256K1_N_6); // No need for a > check. - no |= (a->d[5] < SECP256K1_N_5); // No need for a > check. + no |= (a->d[7] < SECP256K1_N_7); /* No need for a > check. */ + no |= (a->d[6] < SECP256K1_N_6); /* No need for a > check. */ + no |= (a->d[5] < SECP256K1_N_5); /* No need for a > check. */ no |= (a->d[4] < SECP256K1_N_4); yes |= (a->d[4] > SECP256K1_N_4) & ~no; no |= (a->d[3] < SECP256K1_N_3) & ~yes; @@ -166,9 +168,9 @@ static int secp256k1_scalar_is_high(const secp256k1_scalar_t *a) { int no = 0; no |= (a->d[7] < SECP256K1_N_H_7); yes |= (a->d[7] > SECP256K1_N_H_7) & ~no; - no |= (a->d[6] < SECP256K1_N_H_6) & ~yes; // No need for a > check. - no |= (a->d[5] < SECP256K1_N_H_5) & ~yes; // No need for a > check. - no |= (a->d[4] < SECP256K1_N_H_4) & ~yes; // No need for a > check. + no |= (a->d[6] < SECP256K1_N_H_6) & ~yes; /* No need for a > check. */ + no |= (a->d[5] < SECP256K1_N_H_5) & ~yes; /* No need for a > check. */ + no |= (a->d[4] < SECP256K1_N_H_4) & ~yes; /* No need for a > check. */ no |= (a->d[3] < SECP256K1_N_H_3) & ~yes; yes |= (a->d[3] > SECP256K1_N_H_3) & ~no; no |= (a->d[2] < SECP256K1_N_H_2) & ~yes; @@ -179,7 +181,7 @@ static int secp256k1_scalar_is_high(const secp256k1_scalar_t *a) { return yes; } -// Inspired by the macros in OpenSSL's crypto/bn/asm/x86_64-gcc.c. +/* Inspired by the macros in OpenSSL's crypto/bn/asm/x86_64-gcc.c. */ /** Add a*b to the number defined by (c0,c1,c2). c2 must never overflow. */ #define muladd(a,b) { \ @@ -267,11 +269,11 @@ static int secp256k1_scalar_is_high(const secp256k1_scalar_t *a) { static void secp256k1_scalar_reduce_512(secp256k1_scalar_t *r, const uint32_t *l) { uint32_t n0 = l[8], n1 = l[9], n2 = l[10], n3 = l[11], n4 = l[12], n5 = l[13], n6 = l[14], n7 = l[15]; - // 96 bit accumulator. + /* 96 bit accumulator. */ uint32_t c0, c1, c2; - // Reduce 512 bits into 385. - // m[0..12] = l[0..7] + n[0..7] * SECP256K1_N_C. + /* Reduce 512 bits into 385. */ + /* m[0..12] = l[0..7] + n[0..7] * SECP256K1_N_C. */ c0 = l[0]; c1 = 0; c2 = 0; muladd_fast(n0, SECP256K1_N_C_0); uint32_t m0; extract_fast(m0); @@ -335,8 +337,8 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar_t *r, const uint32_t *l VERIFY_CHECK(c0 <= 1); uint32_t m12 = c0; - // Reduce 385 bits into 258. - // p[0..8] = m[0..7] + m[8..12] * SECP256K1_N_C. + /* Reduce 385 bits into 258. */ + /* p[0..8] = m[0..7] + m[8..12] * SECP256K1_N_C. */ c0 = m0; c1 = 0; c2 = 0; muladd_fast(m8, SECP256K1_N_C_0); uint32_t p0; extract_fast(p0); @@ -380,8 +382,8 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar_t *r, const uint32_t *l uint32_t p8 = c0 + m12; VERIFY_CHECK(p8 <= 2); - // Reduce 258 bits into 256. - // r[0..7] = p[0..7] + p[8] * SECP256K1_N_C. + /* Reduce 258 bits into 256. */ + /* r[0..7] = p[0..7] + p[8] * SECP256K1_N_C. */ uint64_t c = p0 + (uint64_t)SECP256K1_N_C_0 * p8; r->d[0] = c & 0xFFFFFFFFUL; c >>= 32; c += p1 + (uint64_t)SECP256K1_N_C_1 * p8; @@ -399,17 +401,17 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar_t *r, const uint32_t *l c += p7; r->d[7] = c & 0xFFFFFFFFUL; c >>= 32; - // Final reduction of r. + /* Final reduction of r. */ secp256k1_scalar_reduce(r, c + secp256k1_scalar_check_overflow(r)); } static void secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) { - // 96 bit accumulator. + /* 96 bit accumulator. */ uint32_t c0 = 0, c1 = 0, c2 = 0; uint32_t l[16]; - // l[0..15] = a[0..7] * b[0..7]. + /* l[0..15] = a[0..7] * b[0..7]. */ muladd_fast(a->d[0], b->d[0]); extract_fast(l[0]); muladd(a->d[0], b->d[1]); @@ -496,12 +498,12 @@ static void secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t } static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t *a) { - // 96 bit accumulator. + /* 96 bit accumulator. */ uint32_t c0 = 0, c1 = 0, c2 = 0; uint32_t l[16]; - // l[0..15] = a[0..7]^2. + /* l[0..15] = a[0..7]^2. */ muladd_fast(a->d[0], a->d[0]); extract_fast(l[0]); muladd2(a->d[0], a->d[1]); diff --git a/src/scalar_impl.h b/src/scalar_impl.h index b255753..ddc5061 100644 --- a/src/scalar_impl.h +++ b/src/scalar_impl.h @@ -1,6 +1,8 @@ -// Copyright (c) 2014 Pieter Wuille -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. +/********************************************************************** + * Copyright (c) 2014 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ #ifndef _SECP256K1_SCALAR_IMPL_H_ #define _SECP256K1_SCALAR_IMPL_H_ @@ -29,7 +31,7 @@ static void secp256k1_scalar_get_num(secp256k1_num_t *r, const secp256k1_scalar_ static void secp256k1_scalar_inverse(secp256k1_scalar_t *r, const secp256k1_scalar_t *x) { - // First compute x ^ (2^N - 1) for some values of N. + /* First compute x ^ (2^N - 1) for some values of N. */ secp256k1_scalar_t x2, x3, x4, x6, x7, x8, x15, x30, x60, x120, x127; secp256k1_scalar_sqr(&x2, x); @@ -76,107 +78,107 @@ static void secp256k1_scalar_inverse(secp256k1_scalar_t *r, const secp256k1_scal secp256k1_scalar_sqr(&x127, &x127); secp256k1_scalar_mul(&x127, &x127, &x7); - // Then accumulate the final result (t starts at x127). + /* Then accumulate the final result (t starts at x127). */ secp256k1_scalar_t *t = &x127; - for (int i=0; i<2; i++) // 0 + for (int i=0; i<2; i++) /* 0 */ secp256k1_scalar_sqr(t, t); - secp256k1_scalar_mul(t, t, x); // 1 - for (int i=0; i<4; i++) // 0 + secp256k1_scalar_mul(t, t, x); /* 1 */ + for (int i=0; i<4; i++) /* 0 */ secp256k1_scalar_sqr(t, t); - secp256k1_scalar_mul(t, t, &x3); // 111 - for (int i=0; i<2; i++) // 0 + secp256k1_scalar_mul(t, t, &x3); /* 111 */ + for (int i=0; i<2; i++) /* 0 */ secp256k1_scalar_sqr(t, t); - secp256k1_scalar_mul(t, t, x); // 1 - for (int i=0; i<2; i++) // 0 + secp256k1_scalar_mul(t, t, x); /* 1 */ + for (int i=0; i<2; i++) /* 0 */ secp256k1_scalar_sqr(t, t); - secp256k1_scalar_mul(t, t, x); // 1 - for (int i=0; i<2; i++) // 0 + secp256k1_scalar_mul(t, t, x); /* 1 */ + for (int i=0; i<2; i++) /* 0 */ secp256k1_scalar_sqr(t, t); - secp256k1_scalar_mul(t, t, x); // 1 - for (int i=0; i<4; i++) // 0 + secp256k1_scalar_mul(t, t, x); /* 1 */ + for (int i=0; i<4; i++) /* 0 */ secp256k1_scalar_sqr(t, t); - secp256k1_scalar_mul(t, t, &x3); // 111 - for (int i=0; i<3; i++) // 0 + secp256k1_scalar_mul(t, t, &x3); /* 111 */ + for (int i=0; i<3; i++) /* 0 */ secp256k1_scalar_sqr(t, t); - secp256k1_scalar_mul(t, t, &x2); // 11 - for (int i=0; i<4; i++) // 0 + secp256k1_scalar_mul(t, t, &x2); /* 11 */ + for (int i=0; i<4; i++) /* 0 */ secp256k1_scalar_sqr(t, t); - secp256k1_scalar_mul(t, t, &x3); // 111 - for (int i=0; i<5; i++) // 00 + secp256k1_scalar_mul(t, t, &x3); /* 111 */ + for (int i=0; i<5; i++) /* 00 */ secp256k1_scalar_sqr(t, t); - secp256k1_scalar_mul(t, t, &x3); // 111 - for (int i=0; i<4; i++) // 00 + secp256k1_scalar_mul(t, t, &x3); /* 111 */ + for (int i=0; i<4; i++) /* 00 */ secp256k1_scalar_sqr(t, t); - secp256k1_scalar_mul(t, t, &x2); // 11 - for (int i=0; i<2; i++) // 0 + secp256k1_scalar_mul(t, t, &x2); /* 11 */ + for (int i=0; i<2; i++) /* 0 */ secp256k1_scalar_sqr(t, t); - secp256k1_scalar_mul(t, t, x); // 1 - for (int i=0; i<2; i++) // 0 + secp256k1_scalar_mul(t, t, x); /* 1 */ + for (int i=0; i<2; i++) /* 0 */ secp256k1_scalar_sqr(t, t); - secp256k1_scalar_mul(t, t, x); // 1 - for (int i=0; i<5; i++) // 0 + secp256k1_scalar_mul(t, t, x); /* 1 */ + for (int i=0; i<5; i++) /* 0 */ secp256k1_scalar_sqr(t, t); - secp256k1_scalar_mul(t, t, &x4); // 1111 - for (int i=0; i<2; i++) // 0 + secp256k1_scalar_mul(t, t, &x4); /* 1111 */ + for (int i=0; i<2; i++) /* 0 */ secp256k1_scalar_sqr(t, t); - secp256k1_scalar_mul(t, t, x); // 1 - for (int i=0; i<3; i++) // 00 + secp256k1_scalar_mul(t, t, x); /* 1 */ + for (int i=0; i<3; i++) /* 00 */ secp256k1_scalar_sqr(t, t); - secp256k1_scalar_mul(t, t, x); // 1 - for (int i=0; i<4; i++) // 000 + secp256k1_scalar_mul(t, t, x); /* 1 */ + for (int i=0; i<4; i++) /* 000 */ secp256k1_scalar_sqr(t, t); - secp256k1_scalar_mul(t, t, x); // 1 - for (int i=0; i<2; i++) // 0 + secp256k1_scalar_mul(t, t, x); /* 1 */ + for (int i=0; i<2; i++) /* 0 */ secp256k1_scalar_sqr(t, t); - secp256k1_scalar_mul(t, t, x); // 1 - for (int i=0; i<10; i++) // 0000000 + secp256k1_scalar_mul(t, t, x); /* 1 */ + for (int i=0; i<10; i++) /* 0000000 */ secp256k1_scalar_sqr(t, t); - secp256k1_scalar_mul(t, t, &x3); // 111 - for (int i=0; i<4; i++) // 0 + secp256k1_scalar_mul(t, t, &x3); /* 111 */ + for (int i=0; i<4; i++) /* 0 */ secp256k1_scalar_sqr(t, t); - secp256k1_scalar_mul(t, t, &x3); // 111 - for (int i=0; i<9; i++) // 0 + secp256k1_scalar_mul(t, t, &x3); /* 111 */ + for (int i=0; i<9; i++) /* 0 */ secp256k1_scalar_sqr(t, t); - secp256k1_scalar_mul(t, t, &x8); // 11111111 - for (int i=0; i<2; i++) // 0 + secp256k1_scalar_mul(t, t, &x8); /* 11111111 */ + for (int i=0; i<2; i++) /* 0 */ secp256k1_scalar_sqr(t, t); - secp256k1_scalar_mul(t, t, x); // 1 - for (int i=0; i<3; i++) // 00 + secp256k1_scalar_mul(t, t, x); /* 1 */ + for (int i=0; i<3; i++) /* 00 */ secp256k1_scalar_sqr(t, t); - secp256k1_scalar_mul(t, t, x); // 1 - for (int i=0; i<3; i++) // 00 + secp256k1_scalar_mul(t, t, x); /* 1 */ + for (int i=0; i<3; i++) /* 00 */ secp256k1_scalar_sqr(t, t); - secp256k1_scalar_mul(t, t, x); // 1 - for (int i=0; i<5; i++) // 0 + secp256k1_scalar_mul(t, t, x); /* 1 */ + for (int i=0; i<5; i++) /* 0 */ secp256k1_scalar_sqr(t, t); - secp256k1_scalar_mul(t, t, &x4); // 1111 - for (int i=0; i<2; i++) // 0 + secp256k1_scalar_mul(t, t, &x4); /* 1111 */ + for (int i=0; i<2; i++) /* 0 */ secp256k1_scalar_sqr(t, t); - secp256k1_scalar_mul(t, t, x); // 1 - for (int i=0; i<5; i++) // 000 + secp256k1_scalar_mul(t, t, x); /* 1 */ + for (int i=0; i<5; i++) /* 000 */ secp256k1_scalar_sqr(t, t); - secp256k1_scalar_mul(t, t, &x2); // 11 - for (int i=0; i<4; i++) // 00 + secp256k1_scalar_mul(t, t, &x2); /* 11 */ + for (int i=0; i<4; i++) /* 00 */ secp256k1_scalar_sqr(t, t); - secp256k1_scalar_mul(t, t, &x2); // 11 - for (int i=0; i<2; i++) // 0 + secp256k1_scalar_mul(t, t, &x2); /* 11 */ + for (int i=0; i<2; i++) /* 0 */ secp256k1_scalar_sqr(t, t); - secp256k1_scalar_mul(t, t, x); // 1 - for (int i=0; i<8; i++) // 000000 + secp256k1_scalar_mul(t, t, x); /* 1 */ + for (int i=0; i<8; i++) /* 000000 */ secp256k1_scalar_sqr(t, t); - secp256k1_scalar_mul(t, t, &x2); // 11 - for (int i=0; i<3; i++) // 0 + secp256k1_scalar_mul(t, t, &x2); /* 11 */ + for (int i=0; i<3; i++) /* 0 */ secp256k1_scalar_sqr(t, t); - secp256k1_scalar_mul(t, t, &x2); // 11 - for (int i=0; i<3; i++) // 00 + secp256k1_scalar_mul(t, t, &x2); /* 11 */ + for (int i=0; i<3; i++) /* 00 */ secp256k1_scalar_sqr(t, t); - secp256k1_scalar_mul(t, t, x); // 1 - for (int i=0; i<6; i++) // 00000 + secp256k1_scalar_mul(t, t, x); /* 1 */ + for (int i=0; i<6; i++) /* 00000 */ secp256k1_scalar_sqr(t, t); - secp256k1_scalar_mul(t, t, x); // 1 - for (int i=0; i<8; i++) // 00 + secp256k1_scalar_mul(t, t, x); /* 1 */ + for (int i=0; i<8; i++) /* 00 */ secp256k1_scalar_sqr(t, t); - secp256k1_scalar_mul(r, t, &x6); // 111111 + secp256k1_scalar_mul(r, t, &x6); /* 111111 */ } #endif diff --git a/src/secp256k1.c b/src/secp256k1.c index f017342..a7d9d16 100644 --- a/src/secp256k1.c +++ b/src/secp256k1.c @@ -1,6 +1,8 @@ -// Copyright (c) 2013 Pieter Wuille -// Distributed under the MIT/X11 software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. +/********************************************************************** + * Copyright (c) 2013, 2014 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ #define SECP256K1_BUILD (1) @@ -42,7 +44,7 @@ int secp256k1_ecdsa_verify(const unsigned char *msg, int msglen, const unsigned DEBUG_CHECK(pubkey != NULL); int ret = -3; - secp256k1_num_t m; + secp256k1_num_t m; secp256k1_ecdsa_sig_t s; secp256k1_ge_t q; secp256k1_num_set_bin(&m, msg, msglen); @@ -140,7 +142,7 @@ int secp256k1_ecdsa_recover_compact(const unsigned char *msg, int msglen, const DEBUG_CHECK(recid >= 0 && recid <= 3); int ret = 0; - secp256k1_num_t m; + secp256k1_num_t m; secp256k1_ecdsa_sig_t sig; secp256k1_num_set_bin(&sig.r, sig64, 32); secp256k1_num_set_bin(&sig.s, sig64 + 32, 32); diff --git a/src/testrand.h b/src/testrand.h index 07cb2d2..018b65c 100644 --- a/src/testrand.h +++ b/src/testrand.h @@ -1,6 +1,8 @@ -// Copyright (c) 2013 Pieter Wuille -// Distributed under the MIT/X11 software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. +/********************************************************************** + * Copyright (c) 2013, 2014 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ #ifndef _SECP256K1_TESTRAND_H_ #define _SECP256K1_TESTRAND_H_ diff --git a/src/testrand_impl.h b/src/testrand_impl.h index 8939187..677c4b9 100644 --- a/src/testrand_impl.h +++ b/src/testrand_impl.h @@ -1,6 +1,8 @@ -// Copyright (c) 2013 Pieter Wuille -// Distributed under the MIT/X11 software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. +/********************************************************************** + * Copyright (c) 2013, 2014 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ #ifndef _SECP256K1_TESTRAND_IMPL_H_ #define _SECP256K1_TESTRAND_IMPL_H_ diff --git a/src/tests.c b/src/tests.c index 5b07c42..2f6585a 100644 --- a/src/tests.c +++ b/src/tests.c @@ -1,6 +1,8 @@ -// Copyright (c) 2013 Pieter Wuille -// Distributed under the MIT/X11 software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. +/********************************************************************** + * Copyright (c) 2013, 2014 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ #if defined HAVE_CONFIG_H #include "libsecp256k1-config.h" @@ -131,12 +133,12 @@ void test_num_get_set_hex(void) { secp256k1_num_set_hex(&n2, c, 64); CHECK(secp256k1_num_eq(&n1, &n2)); for (int i=0; i<64; i++) { - // check whether the lower 4 bits correspond to the last hex character + /* check whether the lower 4 bits correspond to the last hex character */ int low1 = secp256k1_num_shift(&n1, 4); int lowh = c[63]; int low2 = ((lowh>>6)*9+(lowh-'0'))&15; CHECK(low1 == low2); - // shift bits off the hex representation, and compare + /* shift bits off the hex representation, and compare */ memmove(c+1, c, 63); c[0] = '0'; secp256k1_num_set_hex(&n2, c, 64); @@ -152,11 +154,11 @@ void test_num_get_set_bin(void) { secp256k1_num_set_bin(&n2, c, 32); CHECK(secp256k1_num_eq(&n1, &n2)); for (int i=0; i<32; i++) { - // check whether the lower 8 bits correspond to the last byte + /* check whether the lower 8 bits correspond to the last byte */ int low1 = secp256k1_num_shift(&n1, 8); int low2 = c[31]; CHECK(low1 == low2); - // shift bits off the byte representation, and compare + /* shift bits off the byte representation, and compare */ memmove(c+1, c, 31); c[0] = 0; secp256k1_num_set_bin(&n2, c, 32); @@ -179,20 +181,20 @@ void run_num_int(void) { void test_num_negate(void) { secp256k1_num_t n1; secp256k1_num_t n2; - random_num_order_test(&n1); // n1 = R + random_num_order_test(&n1); /* n1 = R */ random_num_negate(&n1); - secp256k1_num_copy(&n2, &n1); // n2 = R - secp256k1_num_sub(&n1, &n2, &n1); // n1 = n2-n1 = 0 + secp256k1_num_copy(&n2, &n1); /* n2 = R */ + secp256k1_num_sub(&n1, &n2, &n1); /* n1 = n2-n1 = 0 */ CHECK(secp256k1_num_is_zero(&n1)); - secp256k1_num_copy(&n1, &n2); // n1 = R - secp256k1_num_negate(&n1); // n1 = -R + secp256k1_num_copy(&n1, &n2); /* n1 = R */ + secp256k1_num_negate(&n1); /* n1 = -R */ CHECK(!secp256k1_num_is_zero(&n1)); - secp256k1_num_add(&n1, &n2, &n1); // n1 = n2+n1 = 0 + secp256k1_num_add(&n1, &n2, &n1); /* n1 = n2+n1 = 0 */ CHECK(secp256k1_num_is_zero(&n1)); - secp256k1_num_copy(&n1, &n2); // n1 = R - secp256k1_num_negate(&n1); // n1 = -R + secp256k1_num_copy(&n1, &n2); /* n1 = R */ + secp256k1_num_negate(&n1); /* n1 = -R */ CHECK(secp256k1_num_is_neg(&n1) != secp256k1_num_is_neg(&n2)); - secp256k1_num_negate(&n1); // n1 = R + secp256k1_num_negate(&n1); /* n1 = R */ CHECK(secp256k1_num_eq(&n1, &n2)); } @@ -200,28 +202,28 @@ void test_num_add_sub(void) { int r = secp256k1_rand32(); secp256k1_num_t n1; secp256k1_num_t n2; - random_num_order_test(&n1); // n1 = R1 + random_num_order_test(&n1); /* n1 = R1 */ if (r & 1) { random_num_negate(&n1); } - random_num_order_test(&n2); // n2 = R2 + random_num_order_test(&n2); /* n2 = R2 */ if (r & 2) { random_num_negate(&n2); } secp256k1_num_t n1p2, n2p1, n1m2, n2m1; - secp256k1_num_add(&n1p2, &n1, &n2); // n1p2 = R1 + R2 - secp256k1_num_add(&n2p1, &n2, &n1); // n2p1 = R2 + R1 - secp256k1_num_sub(&n1m2, &n1, &n2); // n1m2 = R1 - R2 - secp256k1_num_sub(&n2m1, &n2, &n1); // n2m1 = R2 - R1 + secp256k1_num_add(&n1p2, &n1, &n2); /* n1p2 = R1 + R2 */ + secp256k1_num_add(&n2p1, &n2, &n1); /* n2p1 = R2 + R1 */ + secp256k1_num_sub(&n1m2, &n1, &n2); /* n1m2 = R1 - R2 */ + secp256k1_num_sub(&n2m1, &n2, &n1); /* n2m1 = R2 - R1 */ CHECK(secp256k1_num_eq(&n1p2, &n2p1)); CHECK(!secp256k1_num_eq(&n1p2, &n1m2)); - secp256k1_num_negate(&n2m1); // n2m1 = -R2 + R1 + secp256k1_num_negate(&n2m1); /* n2m1 = -R2 + R1 */ CHECK(secp256k1_num_eq(&n2m1, &n1m2)); CHECK(!secp256k1_num_eq(&n2m1, &n1)); - secp256k1_num_add(&n2m1, &n2m1, &n2); // n2m1 = -R2 + R1 + R2 = R1 + secp256k1_num_add(&n2m1, &n2m1, &n2); /* n2m1 = -R2 + R1 + R2 = R1 */ CHECK(secp256k1_num_eq(&n2m1, &n1)); CHECK(!secp256k1_num_eq(&n2p1, &n1)); - secp256k1_num_sub(&n2p1, &n2p1, &n2); // n2p1 = R2 + R1 - R2 = R1 + secp256k1_num_sub(&n2p1, &n2p1, &n2); /* n2p1 = R2 + R1 - R2 = R1 */ CHECK(secp256k1_num_eq(&n2p1, &n1)); } @@ -249,7 +251,7 @@ int secp256k1_scalar_eq(const secp256k1_scalar_t *s1, const secp256k1_scalar_t * void scalar_test(void) { unsigned char c[32]; - // Set 's' to a random scalar, with value 'snum'. + /* Set 's' to a random scalar, with value 'snum'. */ secp256k1_rand256_test(c); secp256k1_scalar_t s; secp256k1_scalar_set_b32(&s, c, NULL); @@ -257,7 +259,7 @@ void scalar_test(void) { secp256k1_num_set_bin(&snum, c, 32); secp256k1_num_mod(&snum, &secp256k1_ge_consts->order); - // Set 's1' to a random scalar, with value 's1num'. + /* Set 's1' to a random scalar, with value 's1num'. */ secp256k1_rand256_test(c); secp256k1_scalar_t s1; secp256k1_scalar_set_b32(&s1, c, NULL); @@ -265,7 +267,7 @@ void scalar_test(void) { secp256k1_num_set_bin(&s1num, c, 32); secp256k1_num_mod(&s1num, &secp256k1_ge_consts->order); - // Set 's2' to a random scalar, with value 'snum2', and byte array representation 'c'. + /* Set 's2' to a random scalar, with value 'snum2', and byte array representation 'c'. */ secp256k1_rand256_test(c); secp256k1_scalar_t s2; int overflow = 0; @@ -275,7 +277,7 @@ void scalar_test(void) { secp256k1_num_mod(&s2num, &secp256k1_ge_consts->order); { - // Test that fetching groups of 4 bits from a scalar and recursing n(i)=16*n(i-1)+p(i) reconstructs it. + /* Test that fetching groups of 4 bits from a scalar and recursing n(i)=16*n(i-1)+p(i) reconstructs it. */ secp256k1_num_t n, t, m; secp256k1_num_set_int(&n, 0); secp256k1_num_set_int(&m, 16); @@ -288,18 +290,18 @@ void scalar_test(void) { } { - // Test that get_b32 returns the same as get_bin on the number. + /* Test that get_b32 returns the same as get_bin on the number. */ unsigned char r1[32]; secp256k1_scalar_get_b32(r1, &s2); unsigned char r2[32]; secp256k1_num_get_bin(r2, 32, &s2num); CHECK(memcmp(r1, r2, 32) == 0); - // If no overflow occurred when assigning, it should also be equal to the original byte array. + /* If no overflow occurred when assigning, it should also be equal to the original byte array. */ CHECK((memcmp(r1, c, 32) == 0) == (overflow == 0)); } { - // Test that adding the scalars together is equal to adding their numbers together modulo the order. + /* Test that adding the scalars together is equal to adding their numbers together modulo the order. */ secp256k1_num_t rnum; secp256k1_num_add(&rnum, &snum, &s2num); secp256k1_num_mod(&rnum, &secp256k1_ge_consts->order); @@ -311,7 +313,7 @@ void scalar_test(void) { } { - // Test that multipying the scalars is equal to multiplying their numbers modulo the order. + /* Test that multipying the scalars is equal to multiplying their numbers modulo the order. */ secp256k1_num_t rnum; secp256k1_num_mul(&rnum, &snum, &s2num); secp256k1_num_mod(&rnum, &secp256k1_ge_consts->order); @@ -320,41 +322,41 @@ void scalar_test(void) { secp256k1_num_t r2num; secp256k1_scalar_get_num(&r2num, &r); CHECK(secp256k1_num_eq(&rnum, &r2num)); - // The result can only be zero if at least one of the factors was zero. + /* The result can only be zero if at least one of the factors was zero. */ CHECK(secp256k1_scalar_is_zero(&r) == (secp256k1_scalar_is_zero(&s) || secp256k1_scalar_is_zero(&s2))); - // The results can only be equal to one of the factors if that factor was zero, or the other factor was one. + /* The results can only be equal to one of the factors if that factor was zero, or the other factor was one. */ CHECK(secp256k1_num_eq(&rnum, &snum) == (secp256k1_scalar_is_zero(&s) || secp256k1_scalar_is_one(&s2))); CHECK(secp256k1_num_eq(&rnum, &s2num) == (secp256k1_scalar_is_zero(&s2) || secp256k1_scalar_is_one(&s))); } { - // Check that comparison with zero matches comparison with zero on the number. + /* Check that comparison with zero matches comparison with zero on the number. */ CHECK(secp256k1_num_is_zero(&snum) == secp256k1_scalar_is_zero(&s)); - // Check that comparison with the half order is equal to testing for high scalar. + /* Check that comparison with the half order is equal to testing for high scalar. */ CHECK(secp256k1_scalar_is_high(&s) == (secp256k1_num_cmp(&snum, &secp256k1_ge_consts->half_order) > 0)); secp256k1_scalar_t neg; secp256k1_scalar_negate(&neg, &s); secp256k1_num_t negnum; secp256k1_num_sub(&negnum, &secp256k1_ge_consts->order, &snum); secp256k1_num_mod(&negnum, &secp256k1_ge_consts->order); - // Check that comparison with the half order is equal to testing for high scalar after negation. + /* Check that comparison with the half order is equal to testing for high scalar after negation. */ CHECK(secp256k1_scalar_is_high(&neg) == (secp256k1_num_cmp(&negnum, &secp256k1_ge_consts->half_order) > 0)); - // Negating should change the high property, unless the value was already zero. + /* Negating should change the high property, unless the value was already zero. */ CHECK((secp256k1_scalar_is_high(&s) == secp256k1_scalar_is_high(&neg)) == secp256k1_scalar_is_zero(&s)); secp256k1_num_t negnum2; secp256k1_scalar_get_num(&negnum2, &neg); - // Negating a scalar should be equal to (order - n) mod order on the number. + /* Negating a scalar should be equal to (order - n) mod order on the number. */ CHECK(secp256k1_num_eq(&negnum, &negnum2)); secp256k1_scalar_add(&neg, &neg, &s); - // Adding a number to its negation should result in zero. + /* Adding a number to its negation should result in zero. */ CHECK(secp256k1_scalar_is_zero(&neg)); secp256k1_scalar_negate(&neg, &neg); - // Negating zero should still result in zero. + /* Negating zero should still result in zero. */ CHECK(secp256k1_scalar_is_zero(&neg)); } { - // Test that scalar inverses are equal to the inverse of their number modulo the order. + /* Test that scalar inverses are equal to the inverse of their number modulo the order. */ if (!secp256k1_scalar_is_zero(&s)) { secp256k1_scalar_t inv; secp256k1_scalar_inverse(&inv, &s); @@ -364,16 +366,16 @@ void scalar_test(void) { secp256k1_scalar_get_num(&invnum2, &inv); CHECK(secp256k1_num_eq(&invnum, &invnum2)); secp256k1_scalar_mul(&inv, &inv, &s); - // Multiplying a scalar with its inverse must result in one. + /* Multiplying a scalar with its inverse must result in one. */ CHECK(secp256k1_scalar_is_one(&inv)); secp256k1_scalar_inverse(&inv, &inv); - // Inverting one must result in one. + /* Inverting one must result in one. */ CHECK(secp256k1_scalar_is_one(&inv)); } } { - // Test commutativity of add. + /* Test commutativity of add. */ secp256k1_scalar_t r1, r2; secp256k1_scalar_add(&r1, &s1, &s2); secp256k1_scalar_add(&r2, &s2, &s1); @@ -381,7 +383,7 @@ void scalar_test(void) { } { - // Test commutativity of mul. + /* Test commutativity of mul. */ secp256k1_scalar_t r1, r2; secp256k1_scalar_mul(&r1, &s1, &s2); secp256k1_scalar_mul(&r2, &s2, &s1); @@ -389,7 +391,7 @@ void scalar_test(void) { } { - // Test associativity of add. + /* Test associativity of add. */ secp256k1_scalar_t r1, r2; secp256k1_scalar_add(&r1, &s1, &s2); secp256k1_scalar_add(&r1, &r1, &s); @@ -399,7 +401,7 @@ void scalar_test(void) { } { - // Test associativity of mul. + /* Test associativity of mul. */ secp256k1_scalar_t r1, r2; secp256k1_scalar_mul(&r1, &s1, &s2); secp256k1_scalar_mul(&r1, &r1, &s); @@ -409,7 +411,7 @@ void scalar_test(void) { } { - // Test distributitivity of mul over add. + /* Test distributitivity of mul over add. */ secp256k1_scalar_t r1, r2, t; secp256k1_scalar_add(&r1, &s1, &s2); secp256k1_scalar_mul(&r1, &r1, &s); @@ -420,7 +422,7 @@ void scalar_test(void) { } { - // Test square. + /* Test square. */ secp256k1_scalar_t r1, r2; secp256k1_scalar_sqr(&r1, &s1); secp256k1_scalar_mul(&r2, &s1, &s1); @@ -450,7 +452,7 @@ void random_fe_non_zero(secp256k1_fe_t *nz) { if (!secp256k1_fe_is_zero(nz)) break; } - // Infinitesimal probability of spurious failure here + /* Infinitesimal probability of spurious failure here */ CHECK(tries >= 0); } @@ -498,7 +500,7 @@ void run_field_inv_var(void) { void run_field_inv_all(void) { secp256k1_fe_t x[16], xi[16], xii[16]; - // Check it's safe to call for 0 elements + /* Check it's safe to call for 0 elements */ secp256k1_fe_inv_all(0, xi, x); for (int i=0; iorder; for (int i=0; i<200*count; i++) { - // in each iteration, compute X = xn*X + gn*G; + /* in each iteration, compute X = xn*X + gn*G; */ secp256k1_ecmult(&x, &x, &xn, &gn); - // also compute ae and ge: the actual accumulated factors for A and G - // if X was (ae*A+ge*G), xn*X + gn*G results in (xn*ae*A + (xn*ge+gn)*G) + /* also compute ae and ge: the actual accumulated factors for A and G */ + /* if X was (ae*A+ge*G), xn*X + gn*G results in (xn*ae*A + (xn*ge+gn)*G) */ secp256k1_num_mod_mul(&ae, &ae, &xn, order); secp256k1_num_mod_mul(&ge, &ge, &xn, order); secp256k1_num_add(&ge, &ge, &gn); secp256k1_num_mod(&ge, order); - // modify xn and gn + /* modify xn and gn */ secp256k1_num_mod_mul(&xn, &xn, &xf, order); secp256k1_num_mod_mul(&gn, &gn, &gf, order); - // verify + /* verify */ if (i == 19999) { char res[132]; int resl = 132; secp256k1_gej_get_hex(res, &resl, &x); CHECK(strcmp(res, "(D6E96687F9B10D092A6F35439D86CEBEA4535D0D409F53586440BD74B933E830,B95CBCA2C77DA786539BE8FD53354D2D3B4F566AE658045407ED6015EE1B2A88)") == 0); } } - // redo the computation, but directly with the resulting ae and ge coefficients: + /* redo the computation, but directly with the resulting ae and ge coefficients: */ secp256k1_gej_t x2; secp256k1_ecmult(&x2, &a, &ae, &ge); char res[132]; int resl = 132; char res2[132]; int resl2 = 132; @@ -747,12 +749,12 @@ void run_ecmult_chain(void) { } void test_point_times_order(const secp256k1_gej_t *point) { - // multiplying a point by the order results in O + /* multiplying a point by the order results in O */ const secp256k1_num_t *order = &secp256k1_ge_consts->order; secp256k1_num_t zero; secp256k1_num_set_int(&zero, 0); secp256k1_gej_t res; - secp256k1_ecmult(&res, point, order, order); // calc res = order * point + order * G; + secp256k1_ecmult(&res, point, order, order); /* calc res = order * point + order * G; */ CHECK(secp256k1_gej_is_infinity(&res)); } @@ -785,19 +787,19 @@ void test_wnaf(const secp256k1_num_t *number, int w) { secp256k1_num_mul(&x, &x, &two); int v = wnaf[i]; if (v) { - CHECK(zeroes == -1 || zeroes >= w-1); // check that distance between non-zero elements is at least w-1 + CHECK(zeroes == -1 || zeroes >= w-1); /* check that distance between non-zero elements is at least w-1 */ zeroes=0; - CHECK((v & 1) == 1); // check non-zero elements are odd - CHECK(v <= (1 << (w-1)) - 1); // check range below - CHECK(v >= -(1 << (w-1)) - 1); // check range above + CHECK((v & 1) == 1); /* check non-zero elements are odd */ + CHECK(v <= (1 << (w-1)) - 1); /* check range below */ + CHECK(v >= -(1 << (w-1)) - 1); /* check range above */ } else { - CHECK(zeroes != -1); // check that no unnecessary zero padding exists + CHECK(zeroes != -1); /* check that no unnecessary zero padding exists */ zeroes++; } secp256k1_num_set_int(&t, v); secp256k1_num_add(&x, &x, &t); } - CHECK(secp256k1_num_eq(&x, number)); // check that wnaf represents number + CHECK(secp256k1_num_eq(&x, number)); /* check that wnaf represents number */ } void run_wnaf(void) { @@ -842,7 +844,7 @@ void test_ecdsa_end_to_end(void) { unsigned char privkey[32]; unsigned char message[32]; - // Generate a random key and message. + /* Generate a random key and message. */ { secp256k1_num_t msg, key; random_num_order_test(&msg); @@ -851,20 +853,20 @@ void test_ecdsa_end_to_end(void) { secp256k1_num_get_bin(message, 32, &msg); } - // Construct and verify corresponding public key. + /* Construct and verify corresponding public key. */ CHECK(secp256k1_ec_seckey_verify(privkey) == 1); unsigned char pubkey[65]; int pubkeylen = 65; CHECK(secp256k1_ec_pubkey_create(pubkey, &pubkeylen, privkey, secp256k1_rand32() % 2) == 1); CHECK(secp256k1_ec_pubkey_verify(pubkey, pubkeylen)); - // Verify private key import and export. + /* Verify private key import and export. */ unsigned char seckey[300]; int seckeylen = 300; CHECK(secp256k1_ec_privkey_export(privkey, seckey, &seckeylen, secp256k1_rand32() % 2) == 1); unsigned char privkey2[32]; CHECK(secp256k1_ec_privkey_import(privkey2, seckey, seckeylen) == 1); CHECK(memcmp(privkey, privkey2, 32) == 0); - // Optionally tweak the keys using addition. + /* Optionally tweak the keys using addition. */ if (secp256k1_rand32() % 3 == 0) { unsigned char rnd[32]; secp256k1_rand256_test(rnd); @@ -877,7 +879,7 @@ void test_ecdsa_end_to_end(void) { CHECK(memcmp(pubkey, pubkey2, pubkeylen) == 0); } - // Optionally tweak the keys using multiplication. + /* Optionally tweak the keys using multiplication. */ if (secp256k1_rand32() % 3 == 0) { unsigned char rnd[32]; secp256k1_rand256_test(rnd); @@ -890,7 +892,7 @@ void test_ecdsa_end_to_end(void) { CHECK(memcmp(pubkey, pubkey2, pubkeylen) == 0); } - // Sign. + /* Sign. */ unsigned char signature[72]; int signaturelen = 72; while(1) { unsigned char rnd[32]; @@ -899,13 +901,13 @@ void test_ecdsa_end_to_end(void) { break; } } - // Verify. + /* Verify. */ CHECK(secp256k1_ecdsa_verify(message, 32, signature, signaturelen, pubkey, pubkeylen) == 1); - // Destroy signature and verify again. + /* Destroy signature and verify again. */ signature[signaturelen - 1 - secp256k1_rand32() % 20] += 1 + (secp256k1_rand32() % 255); CHECK(secp256k1_ecdsa_verify(message, 32, signature, signaturelen, pubkey, pubkeylen) != 1); - // Compact sign. + /* Compact sign. */ unsigned char csignature[64]; int recid = 0; while(1) { unsigned char rnd[32]; @@ -914,12 +916,12 @@ void test_ecdsa_end_to_end(void) { break; } } - // Recover. + /* Recover. */ unsigned char recpubkey[65]; int recpubkeylen = 0; CHECK(secp256k1_ecdsa_recover_compact(message, 32, csignature, recpubkey, &recpubkeylen, pubkeylen == 33, recid) == 1); CHECK(recpubkeylen == pubkeylen); CHECK(memcmp(pubkey, recpubkey, pubkeylen) == 0); - // Destroy signature and verify again. + /* Destroy signature and verify again. */ csignature[secp256k1_rand32() % 64] += 1 + (secp256k1_rand32() % 255); CHECK(secp256k1_ecdsa_recover_compact(message, 32, csignature, recpubkey, &recpubkeylen, pubkeylen == 33, recid) != 1 || memcmp(pubkey, recpubkey, pubkeylen) != 0); @@ -986,12 +988,12 @@ void run_ecdsa_openssl(void) { #endif int main(int argc, char **argv) { - // find iteration count + /* find iteration count */ if (argc > 1) { count = strtol(argv[1], NULL, 0); } - // find random seed + /* find random seed */ uint64_t seed; if (argc > 2) { seed = strtoull(argv[2], NULL, 0); @@ -1007,16 +1009,16 @@ int main(int argc, char **argv) { printf("test count = %i\n", count); printf("random seed = %llu\n", (unsigned long long)seed); - // initialize + /* initialize */ secp256k1_start(SECP256K1_START_SIGN | SECP256K1_START_VERIFY); - // num tests + /* num tests */ run_num_smalltests(); - // scalar tests + /* scalar tests */ run_scalar_tests(); - // field tests + /* field tests */ run_field_inv(); run_field_inv_var(); run_field_inv_all(); @@ -1024,15 +1026,15 @@ int main(int argc, char **argv) { run_sqr(); run_sqrt(); - // group tests + /* group tests */ run_ge(); - // ecmult tests + /* ecmult tests */ run_wnaf(); run_point_times_order(); run_ecmult_chain(); - // ecdsa tests + /* ecdsa tests */ run_ecdsa_sign_verify(); run_ecdsa_end_to_end(); #ifdef ENABLE_OPENSSL_TESTS @@ -1041,7 +1043,7 @@ int main(int argc, char **argv) { printf("random run = %llu\n", (unsigned long long)secp256k1_rand32() + ((unsigned long long)secp256k1_rand32() << 32)); - // shutdown + /* shutdown */ secp256k1_stop(); return 0; } diff --git a/src/util.h b/src/util.h index 696f24a..126db05 100644 --- a/src/util.h +++ b/src/util.h @@ -1,6 +1,8 @@ -// Copyright (c) 2013 Pieter Wuille -// Distributed under the MIT/X11 software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. +/********************************************************************** + * Copyright (c) 2013, 2014 Pieter Wuille * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ #ifndef _SECP256K1_UTIL_H_ #define _SECP256K1_UTIL_H_ @@ -30,14 +32,14 @@ } \ } while(0) -// Like assert(), but safe to use on expressions with side effects. +/* Like assert(), but safe to use on expressions with side effects. */ #ifndef NDEBUG #define DEBUG_CHECK CHECK #else #define DEBUG_CHECK(cond) do { (void)(cond); } while(0) #endif -// Like DEBUG_CHECK(), but when VERIFY is defined instead of NDEBUG not defined. +/* Like DEBUG_CHECK(), but when VERIFY is defined instead of NDEBUG not defined. */ #ifdef VERIFY #define VERIFY_CHECK CHECK #else