Merge #600: scratch space: use single allocation

98836b1 scratch: replace frames with "checkpoint" system (Andrew Poelstra)
7623cf2 scratch: save a couple bytes of unnecessarily-allocated memory (Andrew Poelstra)
a7a164f scratch: rename `max_size` to `size`, document that extra will actually be allocated (Andrew Poelstra)
5a4bc0b scratch: unify allocations (Andrew Poelstra)
c2b028a scratch space: thread `error_callback` into all scratch space functions (Andrew Poelstra)
0be1a4a scratch: add magic bytes to beginning of structure (Andrew Poelstra)
92a48a7 scratch space: use single allocation (Andrew Poelstra)

Pull request description:

ACKs for commit 98836b:

Tree-SHA512: 6e251f704644a5f61b24aa05c6f7a31ad8c58d147195079d52fe45daacd28a9fd2f4aaf71273183b99b3795a01a88f8389170d4280489b2a28a14a56e03153d7
This commit is contained in:
Gregory Maxwell 2019-05-26 07:37:54 +00:00
commit 6c36de7a33
No known key found for this signature in database
GPG Key ID: EAB5AF94D9E9ABE7
9 changed files with 217 additions and 150 deletions

View File

@ -285,21 +285,24 @@ SECP256K1_API void secp256k1_context_set_error_callback(
*
* Returns: a newly created scratch space.
* Args: ctx: an existing context object (cannot be NULL)
* In: max_size: maximum amount of memory to allocate
* In: size: amount of memory to be available as scratch space. Some extra
* (<100 bytes) will be allocated for extra accounting.
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT secp256k1_scratch_space* secp256k1_scratch_space_create(
const secp256k1_context* ctx,
size_t max_size
size_t size
) SECP256K1_ARG_NONNULL(1);
/** Destroy a secp256k1 scratch space.
*
* The pointer may not be used afterwards.
* Args: scratch: space to destroy
* Args: ctx: a secp256k1 context object.
* scratch: space to destroy
*/
SECP256K1_API void secp256k1_scratch_space_destroy(
const secp256k1_context* ctx,
secp256k1_scratch_space* scratch
);
) SECP256K1_ARG_NONNULL(1);
/** Parse a variable-length public key into the pubkey object.
*

View File

@ -64,7 +64,7 @@ static void bench_ecmult(void* arg) {
size_t iter;
for (iter = 0; iter < iters; ++iter) {
data->ecmult_multi(&data->ctx->ecmult_ctx, data->scratch, &data->output[iter], data->includes_g ? &data->scalars[data->offset1] : NULL, bench_callback, arg, count - includes_g);
data->ecmult_multi(&data->ctx->error_callback, &data->ctx->ecmult_ctx, data->scratch, &data->output[iter], data->includes_g ? &data->scalars[data->offset1] : NULL, bench_callback, arg, count - includes_g);
data->offset1 = (data->offset1 + count) % POINTS;
data->offset2 = (data->offset2 + count - 1) % POINTS;
}
@ -154,7 +154,7 @@ int main(int argc, char **argv) {
} else if(have_flag(argc, argv, "simple")) {
printf("Using simple algorithm:\n");
data.ecmult_multi = secp256k1_ecmult_multi_var;
secp256k1_scratch_space_destroy(data.scratch);
secp256k1_scratch_space_destroy(data.ctx, data.scratch);
data.scratch = NULL;
} else {
fprintf(stderr, "%s: unrecognized argument '%s'.\n", argv[0], argv[1]);
@ -193,10 +193,10 @@ int main(int argc, char **argv) {
run_test(&data, i << p, 1);
}
}
secp256k1_context_destroy(data.ctx);
if (data.scratch != NULL) {
secp256k1_scratch_space_destroy(data.scratch);
secp256k1_scratch_space_destroy(data.ctx, data.scratch);
}
secp256k1_context_destroy(data.ctx);
free(data.scalars);
free(data.pubkeys);
free(data.seckeys);

View File

@ -43,6 +43,6 @@ typedef int (secp256k1_ecmult_multi_callback)(secp256k1_scalar *sc, secp256k1_ge
* 0 if there is not enough scratch space for a single point or
* callback returns 0
*/
static int secp256k1_ecmult_multi_var(const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n);
static int secp256k1_ecmult_multi_var(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n);
#endif /* SECP256K1_ECMULT_H */

View File

@ -648,52 +648,55 @@ static size_t secp256k1_strauss_scratch_size(size_t n_points) {
return n_points*point_size;
}
static int secp256k1_ecmult_strauss_batch(const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) {
static int secp256k1_ecmult_strauss_batch(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) {
secp256k1_gej* points;
secp256k1_scalar* scalars;
struct secp256k1_strauss_state state;
size_t i;
const size_t scratch_checkpoint = secp256k1_scratch_checkpoint(error_callback, scratch);
secp256k1_gej_set_infinity(r);
if (inp_g_sc == NULL && n_points == 0) {
return 1;
}
if (!secp256k1_scratch_allocate_frame(scratch, secp256k1_strauss_scratch_size(n_points), STRAUSS_SCRATCH_OBJECTS)) {
return 0;
}
points = (secp256k1_gej*)secp256k1_scratch_alloc(scratch, n_points * sizeof(secp256k1_gej));
scalars = (secp256k1_scalar*)secp256k1_scratch_alloc(scratch, n_points * sizeof(secp256k1_scalar));
state.prej = (secp256k1_gej*)secp256k1_scratch_alloc(scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_gej));
state.zr = (secp256k1_fe*)secp256k1_scratch_alloc(scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_fe));
points = (secp256k1_gej*)secp256k1_scratch_alloc(error_callback, scratch, n_points * sizeof(secp256k1_gej));
scalars = (secp256k1_scalar*)secp256k1_scratch_alloc(error_callback, scratch, n_points * sizeof(secp256k1_scalar));
state.prej = (secp256k1_gej*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_gej));
state.zr = (secp256k1_fe*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_fe));
#ifdef USE_ENDOMORPHISM
state.pre_a = (secp256k1_ge*)secp256k1_scratch_alloc(scratch, n_points * 2 * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_ge));
state.pre_a = (secp256k1_ge*)secp256k1_scratch_alloc(error_callback, scratch, n_points * 2 * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_ge));
state.pre_a_lam = state.pre_a + n_points * ECMULT_TABLE_SIZE(WINDOW_A);
#else
state.pre_a = (secp256k1_ge*)secp256k1_scratch_alloc(scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_ge));
state.pre_a = (secp256k1_ge*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_ge));
#endif
state.ps = (struct secp256k1_strauss_point_state*)secp256k1_scratch_alloc(scratch, n_points * sizeof(struct secp256k1_strauss_point_state));
state.ps = (struct secp256k1_strauss_point_state*)secp256k1_scratch_alloc(error_callback, scratch, n_points * sizeof(struct secp256k1_strauss_point_state));
if (points == NULL || scalars == NULL || state.prej == NULL || state.zr == NULL || state.pre_a == NULL) {
secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
return 0;
}
for (i = 0; i < n_points; i++) {
secp256k1_ge point;
if (!cb(&scalars[i], &point, i+cb_offset, cbdata)) {
secp256k1_scratch_deallocate_frame(scratch);
secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
return 0;
}
secp256k1_gej_set_ge(&points[i], &point);
}
secp256k1_ecmult_strauss_wnaf(ctx, &state, r, n_points, points, scalars, inp_g_sc);
secp256k1_scratch_deallocate_frame(scratch);
secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
return 1;
}
/* Wrapper for secp256k1_ecmult_multi_func interface */
static int secp256k1_ecmult_strauss_batch_single(const secp256k1_ecmult_context *actx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) {
return secp256k1_ecmult_strauss_batch(actx, scratch, r, inp_g_sc, cb, cbdata, n, 0);
static int secp256k1_ecmult_strauss_batch_single(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *actx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) {
return secp256k1_ecmult_strauss_batch(error_callback, actx, scratch, r, inp_g_sc, cb, cbdata, n, 0);
}
static size_t secp256k1_strauss_max_points(secp256k1_scratch *scratch) {
return secp256k1_scratch_max_allocation(scratch, STRAUSS_SCRATCH_OBJECTS) / secp256k1_strauss_scratch_size(1);
static size_t secp256k1_strauss_max_points(const secp256k1_callback* error_callback, secp256k1_scratch *scratch) {
return secp256k1_scratch_max_allocation(error_callback, scratch, STRAUSS_SCRATCH_OBJECTS) / secp256k1_strauss_scratch_size(1);
}
/** Convert a number to WNAF notation.
@ -985,7 +988,8 @@ static size_t secp256k1_pippenger_scratch_size(size_t n_points, int bucket_windo
return (sizeof(secp256k1_gej) << bucket_window) + sizeof(struct secp256k1_pippenger_state) + entries * entry_size;
}
static int secp256k1_ecmult_pippenger_batch(const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) {
static int secp256k1_ecmult_pippenger_batch(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) {
const size_t scratch_checkpoint = secp256k1_scratch_checkpoint(error_callback, scratch);
/* Use 2(n+1) with the endomorphism, n+1 without, when calculating batch
* sizes. The reason for +1 is that we add the G scalar to the list of
* other scalars. */
@ -1010,15 +1014,21 @@ static int secp256k1_ecmult_pippenger_batch(const secp256k1_ecmult_context *ctx,
}
bucket_window = secp256k1_pippenger_bucket_window(n_points);
if (!secp256k1_scratch_allocate_frame(scratch, secp256k1_pippenger_scratch_size(n_points, bucket_window), PIPPENGER_SCRATCH_OBJECTS)) {
points = (secp256k1_ge *) secp256k1_scratch_alloc(error_callback, scratch, entries * sizeof(*points));
scalars = (secp256k1_scalar *) secp256k1_scratch_alloc(error_callback, scratch, entries * sizeof(*scalars));
state_space = (struct secp256k1_pippenger_state *) secp256k1_scratch_alloc(error_callback, scratch, sizeof(*state_space));
if (points == NULL || scalars == NULL || state_space == NULL) {
secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
return 0;
}
state_space->ps = (struct secp256k1_pippenger_point_state *) secp256k1_scratch_alloc(error_callback, scratch, entries * sizeof(*state_space->ps));
state_space->wnaf_na = (int *) secp256k1_scratch_alloc(error_callback, scratch, entries*(WNAF_SIZE(bucket_window+1)) * sizeof(int));
buckets = (secp256k1_gej *) secp256k1_scratch_alloc(error_callback, scratch, (1<<bucket_window) * sizeof(*buckets));
if (state_space->ps == NULL || state_space->wnaf_na == NULL || buckets == NULL) {
secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
return 0;
}
points = (secp256k1_ge *) secp256k1_scratch_alloc(scratch, entries * sizeof(*points));
scalars = (secp256k1_scalar *) secp256k1_scratch_alloc(scratch, entries * sizeof(*scalars));
state_space = (struct secp256k1_pippenger_state *) secp256k1_scratch_alloc(scratch, sizeof(*state_space));
state_space->ps = (struct secp256k1_pippenger_point_state *) secp256k1_scratch_alloc(scratch, entries * sizeof(*state_space->ps));
state_space->wnaf_na = (int *) secp256k1_scratch_alloc(scratch, entries*(WNAF_SIZE(bucket_window+1)) * sizeof(int));
buckets = (secp256k1_gej *) secp256k1_scratch_alloc(scratch, sizeof(*buckets) << bucket_window);
if (inp_g_sc != NULL) {
scalars[0] = *inp_g_sc;
@ -1032,7 +1042,7 @@ static int secp256k1_ecmult_pippenger_batch(const secp256k1_ecmult_context *ctx,
while (point_idx < n_points) {
if (!cb(&scalars[idx], &points[idx], point_idx + cb_offset, cbdata)) {
secp256k1_scratch_deallocate_frame(scratch);
secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
return 0;
}
idx++;
@ -1056,13 +1066,13 @@ static int secp256k1_ecmult_pippenger_batch(const secp256k1_ecmult_context *ctx,
for(i = 0; i < 1<<bucket_window; i++) {
secp256k1_gej_clear(&buckets[i]);
}
secp256k1_scratch_deallocate_frame(scratch);
secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
return 1;
}
/* Wrapper for secp256k1_ecmult_multi_func interface */
static int secp256k1_ecmult_pippenger_batch_single(const secp256k1_ecmult_context *actx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) {
return secp256k1_ecmult_pippenger_batch(actx, scratch, r, inp_g_sc, cb, cbdata, n, 0);
static int secp256k1_ecmult_pippenger_batch_single(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *actx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) {
return secp256k1_ecmult_pippenger_batch(error_callback, actx, scratch, r, inp_g_sc, cb, cbdata, n, 0);
}
/**
@ -1070,8 +1080,8 @@ static int secp256k1_ecmult_pippenger_batch_single(const secp256k1_ecmult_contex
* a given scratch space. The function ensures that fewer points may also be
* used.
*/
static size_t secp256k1_pippenger_max_points(secp256k1_scratch *scratch) {
size_t max_alloc = secp256k1_scratch_max_allocation(scratch, PIPPENGER_SCRATCH_OBJECTS);
static size_t secp256k1_pippenger_max_points(const secp256k1_callback* error_callback, secp256k1_scratch *scratch) {
size_t max_alloc = secp256k1_scratch_max_allocation(error_callback, scratch, PIPPENGER_SCRATCH_OBJECTS);
int bucket_window;
size_t res = 0;
@ -1153,11 +1163,11 @@ static int secp256k1_ecmult_multi_batch_size_helper(size_t *n_batches, size_t *n
return 1;
}
typedef int (*secp256k1_ecmult_multi_func)(const secp256k1_ecmult_context*, secp256k1_scratch*, secp256k1_gej*, const secp256k1_scalar*, secp256k1_ecmult_multi_callback cb, void*, size_t);
static int secp256k1_ecmult_multi_var(const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) {
typedef int (*secp256k1_ecmult_multi_func)(const secp256k1_callback* error_callback, const secp256k1_ecmult_context*, secp256k1_scratch*, secp256k1_gej*, const secp256k1_scalar*, secp256k1_ecmult_multi_callback cb, void*, size_t);
static int secp256k1_ecmult_multi_var(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) {
size_t i;
int (*f)(const secp256k1_ecmult_context*, secp256k1_scratch*, secp256k1_gej*, const secp256k1_scalar*, secp256k1_ecmult_multi_callback cb, void*, size_t, size_t);
int (*f)(const secp256k1_callback* error_callback, const secp256k1_ecmult_context*, secp256k1_scratch*, secp256k1_gej*, const secp256k1_scalar*, secp256k1_ecmult_multi_callback cb, void*, size_t, size_t);
size_t n_batches;
size_t n_batch_points;
@ -1178,13 +1188,13 @@ static int secp256k1_ecmult_multi_var(const secp256k1_ecmult_context *ctx, secp2
* a threshold use Pippenger's algorithm. Otherwise use Strauss' algorithm.
* As a first step check if there's enough space for Pippenger's algo (which requires less space
* than Strauss' algo) and if not, use the simple algorithm. */
if (!secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, secp256k1_pippenger_max_points(scratch), n)) {
if (!secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, secp256k1_pippenger_max_points(error_callback, scratch), n)) {
return secp256k1_ecmult_multi_simple_var(ctx, r, inp_g_sc, cb, cbdata, n);
}
if (n_batch_points >= ECMULT_PIPPENGER_THRESHOLD) {
f = secp256k1_ecmult_pippenger_batch;
} else {
if (!secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, secp256k1_strauss_max_points(scratch), n)) {
if (!secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, secp256k1_strauss_max_points(error_callback, scratch), n)) {
return secp256k1_ecmult_multi_simple_var(ctx, r, inp_g_sc, cb, cbdata, n);
}
f = secp256k1_ecmult_strauss_batch;
@ -1193,7 +1203,7 @@ static int secp256k1_ecmult_multi_var(const secp256k1_ecmult_context *ctx, secp2
size_t nbp = n < n_batch_points ? n : n_batch_points;
size_t offset = n_batch_points*i;
secp256k1_gej tmp;
if (!f(ctx, scratch, &tmp, i == 0 ? inp_g_sc : NULL, cb, cbdata, nbp, offset)) {
if (!f(error_callback, ctx, scratch, &tmp, i == 0 ? inp_g_sc : NULL, cb, cbdata, nbp, offset)) {
return 0;
}
secp256k1_gej_add_var(r, r, &tmp, NULL);

View File

@ -7,33 +7,36 @@
#ifndef _SECP256K1_SCRATCH_
#define _SECP256K1_SCRATCH_
#define SECP256K1_SCRATCH_MAX_FRAMES 5
/* The typedef is used internally; the struct name is used in the public API
* (where it is exposed as a different typedef) */
typedef struct secp256k1_scratch_space_struct {
void *data[SECP256K1_SCRATCH_MAX_FRAMES];
size_t offset[SECP256K1_SCRATCH_MAX_FRAMES];
size_t frame_size[SECP256K1_SCRATCH_MAX_FRAMES];
size_t frame;
/** guard against interpreting this object as other types */
unsigned char magic[8];
/** actual allocated data */
void *data;
/** amount that has been allocated (i.e. `data + offset` is the next
* available pointer) */
size_t alloc_size;
/** maximum size available to allocate */
size_t max_size;
const secp256k1_callback* error_callback;
} secp256k1_scratch;
static secp256k1_scratch* secp256k1_scratch_create(const secp256k1_callback* error_callback, size_t max_size);
static void secp256k1_scratch_destroy(secp256k1_scratch* scratch);
static void secp256k1_scratch_destroy(const secp256k1_callback* error_callback, secp256k1_scratch* scratch);
/** Attempts to allocate a new stack frame with `n` available bytes. Returns 1 on success, 0 on failure */
static int secp256k1_scratch_allocate_frame(secp256k1_scratch* scratch, size_t n, size_t objects);
/** Returns an opaque object used to "checkpoint" a scratch space. Used
* with `secp256k1_scratch_apply_checkpoint` to undo allocations. */
static size_t secp256k1_scratch_checkpoint(const secp256k1_callback* error_callback, const secp256k1_scratch* scratch);
/** Deallocates a stack frame */
static void secp256k1_scratch_deallocate_frame(secp256k1_scratch* scratch);
/** Applies a check point received from `secp256k1_scratch_checkpoint`,
* undoing all allocations since that point. */
static void secp256k1_scratch_apply_checkpoint(const secp256k1_callback* error_callback, secp256k1_scratch* scratch, size_t checkpoint);
/** Returns the maximum allocation the scratch space will allow */
static size_t secp256k1_scratch_max_allocation(const secp256k1_scratch* scratch, size_t n_objects);
static size_t secp256k1_scratch_max_allocation(const secp256k1_callback* error_callback, const secp256k1_scratch* scratch, size_t n_objects);
/** Returns a pointer into the most recently allocated frame, or NULL if there is insufficient available space */
static void *secp256k1_scratch_alloc(secp256k1_scratch* scratch, size_t n);
static void *secp256k1_scratch_alloc(const secp256k1_callback* error_callback, secp256k1_scratch* scratch, size_t n);
#endif

View File

@ -10,70 +10,77 @@
#include "util.h"
#include "scratch.h"
static secp256k1_scratch* secp256k1_scratch_create(const secp256k1_callback* error_callback, size_t max_size) {
secp256k1_scratch* ret = (secp256k1_scratch*)checked_malloc(error_callback, sizeof(*ret));
static secp256k1_scratch* secp256k1_scratch_create(const secp256k1_callback* error_callback, size_t size) {
const size_t base_alloc = ((sizeof(secp256k1_scratch) + ALIGNMENT - 1) / ALIGNMENT) * ALIGNMENT;
void *alloc = checked_malloc(error_callback, base_alloc + size);
secp256k1_scratch* ret = (secp256k1_scratch *)alloc;
if (ret != NULL) {
memset(ret, 0, sizeof(*ret));
ret->max_size = max_size;
ret->error_callback = error_callback;
memcpy(ret->magic, "scratch", 8);
ret->data = (void *) ((char *) alloc + base_alloc);
ret->max_size = size;
}
return ret;
}
static void secp256k1_scratch_destroy(secp256k1_scratch* scratch) {
static void secp256k1_scratch_destroy(const secp256k1_callback* error_callback, secp256k1_scratch* scratch) {
if (scratch != NULL) {
VERIFY_CHECK(scratch->frame == 0);
VERIFY_CHECK(scratch->alloc_size == 0); /* all checkpoints should be applied */
if (memcmp(scratch->magic, "scratch", 8) != 0) {
secp256k1_callback_call(error_callback, "invalid scratch space");
return;
}
memset(scratch->magic, 0, sizeof(scratch->magic));
free(scratch);
}
}
static size_t secp256k1_scratch_max_allocation(const secp256k1_scratch* scratch, size_t objects) {
size_t i = 0;
size_t allocated = 0;
for (i = 0; i < scratch->frame; i++) {
allocated += scratch->frame_size[i];
}
if (scratch->max_size - allocated <= objects * ALIGNMENT) {
static size_t secp256k1_scratch_checkpoint(const secp256k1_callback* error_callback, const secp256k1_scratch* scratch) {
if (memcmp(scratch->magic, "scratch", 8) != 0) {
secp256k1_callback_call(error_callback, "invalid scratch space");
return 0;
}
return scratch->max_size - allocated - objects * ALIGNMENT;
return scratch->alloc_size;
}
static int secp256k1_scratch_allocate_frame(secp256k1_scratch* scratch, size_t n, size_t objects) {
VERIFY_CHECK(scratch->frame < SECP256K1_SCRATCH_MAX_FRAMES);
static void secp256k1_scratch_apply_checkpoint(const secp256k1_callback* error_callback, secp256k1_scratch* scratch, size_t checkpoint) {
if (memcmp(scratch->magic, "scratch", 8) != 0) {
secp256k1_callback_call(error_callback, "invalid scratch space");
return;
}
if (checkpoint > scratch->alloc_size) {
secp256k1_callback_call(error_callback, "invalid checkpoint");
return;
}
scratch->alloc_size = checkpoint;
}
if (n <= secp256k1_scratch_max_allocation(scratch, objects)) {
n += objects * ALIGNMENT;
scratch->data[scratch->frame] = checked_malloc(scratch->error_callback, n);
if (scratch->data[scratch->frame] == NULL) {
return 0;
}
scratch->frame_size[scratch->frame] = n;
scratch->offset[scratch->frame] = 0;
scratch->frame++;
return 1;
} else {
static size_t secp256k1_scratch_max_allocation(const secp256k1_callback* error_callback, const secp256k1_scratch* scratch, size_t objects) {
if (memcmp(scratch->magic, "scratch", 8) != 0) {
secp256k1_callback_call(error_callback, "invalid scratch space");
return 0;
}
if (scratch->max_size - scratch->alloc_size <= objects * (ALIGNMENT - 1)) {
return 0;
}
return scratch->max_size - scratch->alloc_size - objects * (ALIGNMENT - 1);
}
static void secp256k1_scratch_deallocate_frame(secp256k1_scratch* scratch) {
VERIFY_CHECK(scratch->frame > 0);
scratch->frame -= 1;
free(scratch->data[scratch->frame]);
}
static void *secp256k1_scratch_alloc(secp256k1_scratch* scratch, size_t size) {
static void *secp256k1_scratch_alloc(const secp256k1_callback* error_callback, secp256k1_scratch* scratch, size_t size) {
void *ret;
size_t frame = scratch->frame - 1;
size = ROUND_TO_ALIGN(size);
if (scratch->frame == 0 || size + scratch->offset[frame] > scratch->frame_size[frame]) {
if (memcmp(scratch->magic, "scratch", 8) != 0) {
secp256k1_callback_call(error_callback, "invalid scratch space");
return NULL;
}
ret = (void *) ((unsigned char *) scratch->data[frame] + scratch->offset[frame]);
if (size > scratch->max_size - scratch->alloc_size) {
return NULL;
}
ret = (void *) ((char *) scratch->data + scratch->alloc_size);
memset(ret, 0, size);
scratch->offset[frame] += size;
scratch->alloc_size += size;
return ret;
}

View File

@ -199,8 +199,9 @@ secp256k1_scratch_space* secp256k1_scratch_space_create(const secp256k1_context*
return secp256k1_scratch_create(&ctx->error_callback, max_size);
}
void secp256k1_scratch_space_destroy(secp256k1_scratch_space* scratch) {
secp256k1_scratch_destroy(scratch);
void secp256k1_scratch_space_destroy(const secp256k1_context *ctx, secp256k1_scratch_space* scratch) {
VERIFY_CHECK(ctx != NULL);
secp256k1_scratch_destroy(&ctx->error_callback, scratch);
}
static int secp256k1_pubkey_load(const secp256k1_context* ctx, secp256k1_ge* ge, const secp256k1_pubkey* pubkey) {

View File

@ -333,40 +333,73 @@ void run_context_tests(int use_prealloc) {
}
void run_scratch_tests(void) {
const size_t adj_alloc = ((500 + ALIGNMENT - 1) / ALIGNMENT) * ALIGNMENT;
int32_t ecount = 0;
size_t checkpoint;
size_t checkpoint_2;
secp256k1_context *none = secp256k1_context_create(SECP256K1_CONTEXT_NONE);
secp256k1_scratch_space *scratch;
secp256k1_scratch_space local_scratch;
/* Test public API */
secp256k1_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount);
secp256k1_context_set_error_callback(none, counting_illegal_callback_fn, &ecount);
scratch = secp256k1_scratch_space_create(none, 1000);
CHECK(scratch != NULL);
CHECK(ecount == 0);
/* Test internal API */
CHECK(secp256k1_scratch_max_allocation(scratch, 0) == 1000);
CHECK(secp256k1_scratch_max_allocation(scratch, 1) < 1000);
CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 0) == 1000);
CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 1) == 1000 - (ALIGNMENT - 1));
CHECK(scratch->alloc_size == 0);
CHECK(scratch->alloc_size % ALIGNMENT == 0);
/* Allocating 500 bytes with no frame fails */
CHECK(secp256k1_scratch_alloc(scratch, 500) == NULL);
CHECK(secp256k1_scratch_max_allocation(scratch, 0) == 1000);
/* Allocating 500 bytes succeeds */
checkpoint = secp256k1_scratch_checkpoint(&none->error_callback, scratch);
CHECK(secp256k1_scratch_alloc(&none->error_callback, scratch, 500) != NULL);
CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 0) == 1000 - adj_alloc);
CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 1) == 1000 - adj_alloc - (ALIGNMENT - 1));
CHECK(scratch->alloc_size != 0);
CHECK(scratch->alloc_size % ALIGNMENT == 0);
/* ...but pushing a new stack frame does affect the max allocation */
CHECK(secp256k1_scratch_allocate_frame(scratch, 500, 1 == 1));
CHECK(secp256k1_scratch_max_allocation(scratch, 1) < 500); /* 500 - ALIGNMENT */
CHECK(secp256k1_scratch_alloc(scratch, 500) != NULL);
CHECK(secp256k1_scratch_alloc(scratch, 500) == NULL);
/* Allocating another 500 bytes fails */
CHECK(secp256k1_scratch_alloc(&none->error_callback, scratch, 500) == NULL);
CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 0) == 1000 - adj_alloc);
CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 1) == 1000 - adj_alloc - (ALIGNMENT - 1));
CHECK(scratch->alloc_size != 0);
CHECK(scratch->alloc_size % ALIGNMENT == 0);
CHECK(secp256k1_scratch_allocate_frame(scratch, 500, 1) == 0);
/* ...but it succeeds once we apply the checkpoint to undo it */
secp256k1_scratch_apply_checkpoint(&none->error_callback, scratch, checkpoint);
CHECK(scratch->alloc_size == 0);
CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 0) == 1000);
CHECK(secp256k1_scratch_alloc(&none->error_callback, scratch, 500) != NULL);
CHECK(scratch->alloc_size != 0);
/* ...and this effect is undone by popping the frame */
secp256k1_scratch_deallocate_frame(scratch);
CHECK(secp256k1_scratch_max_allocation(scratch, 0) == 1000);
CHECK(secp256k1_scratch_alloc(scratch, 500) == NULL);
/* try to apply a bad checkpoint */
checkpoint_2 = secp256k1_scratch_checkpoint(&none->error_callback, scratch);
secp256k1_scratch_apply_checkpoint(&none->error_callback, scratch, checkpoint);
CHECK(ecount == 0);
secp256k1_scratch_apply_checkpoint(&none->error_callback, scratch, checkpoint_2); /* checkpoint_2 is after checkpoint */
CHECK(ecount == 1);
secp256k1_scratch_apply_checkpoint(&none->error_callback, scratch, (size_t) -1); /* this is just wildly invalid */
CHECK(ecount == 2);
/* try to use badly initialized scratch space */
secp256k1_scratch_space_destroy(none, scratch);
memset(&local_scratch, 0, sizeof(local_scratch));
scratch = &local_scratch;
CHECK(!secp256k1_scratch_max_allocation(&none->error_callback, scratch, 0));
CHECK(ecount == 3);
CHECK(secp256k1_scratch_alloc(&none->error_callback, scratch, 500) == NULL);
CHECK(ecount == 4);
secp256k1_scratch_space_destroy(none, scratch);
CHECK(ecount == 5);
/* cleanup */
secp256k1_scratch_space_destroy(scratch);
secp256k1_scratch_space_destroy(none, NULL); /* no-op */
secp256k1_context_destroy(none);
}
@ -2655,7 +2688,7 @@ void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func e
secp256k1_scalar_set_int(&szero, 0);
/* No points to multiply */
CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, NULL, ecmult_multi_callback, &data, 0));
CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, NULL, ecmult_multi_callback, &data, 0));
/* Check 1- and 2-point multiplies against ecmult */
for (ncount = 0; ncount < count; ncount++) {
@ -2671,31 +2704,31 @@ void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func e
/* only G scalar */
secp256k1_ecmult(&ctx->ecmult_ctx, &r2, &ptgj, &szero, &sc[0]);
CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &sc[0], ecmult_multi_callback, &data, 0));
CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &sc[0], ecmult_multi_callback, &data, 0));
secp256k1_gej_neg(&r2, &r2);
secp256k1_gej_add_var(&r, &r, &r2, NULL);
CHECK(secp256k1_gej_is_infinity(&r));
/* 1-point */
secp256k1_ecmult(&ctx->ecmult_ctx, &r2, &ptgj, &sc[0], &szero);
CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 1));
CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 1));
secp256k1_gej_neg(&r2, &r2);
secp256k1_gej_add_var(&r, &r, &r2, NULL);
CHECK(secp256k1_gej_is_infinity(&r));
/* Try to multiply 1 point, but callback returns false */
CHECK(!ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_false_callback, &data, 1));
CHECK(!ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_false_callback, &data, 1));
/* 2-point */
secp256k1_ecmult(&ctx->ecmult_ctx, &r2, &ptgj, &sc[0], &sc[1]);
CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 2));
CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 2));
secp256k1_gej_neg(&r2, &r2);
secp256k1_gej_add_var(&r, &r, &r2, NULL);
CHECK(secp256k1_gej_is_infinity(&r));
/* 2-point with G scalar */
secp256k1_ecmult(&ctx->ecmult_ctx, &r2, &ptgj, &sc[0], &sc[1]);
CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &sc[1], ecmult_multi_callback, &data, 1));
CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &sc[1], ecmult_multi_callback, &data, 1));
secp256k1_gej_neg(&r2, &r2);
secp256k1_gej_add_var(&r, &r, &r2, NULL);
CHECK(secp256k1_gej_is_infinity(&r));
@ -2712,7 +2745,7 @@ void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func e
random_scalar_order(&sc[i]);
secp256k1_ge_set_infinity(&pt[i]);
}
CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j]));
CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j]));
CHECK(secp256k1_gej_is_infinity(&r));
}
@ -2722,7 +2755,7 @@ void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func e
pt[i] = ptg;
secp256k1_scalar_set_int(&sc[i], 0);
}
CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j]));
CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j]));
CHECK(secp256k1_gej_is_infinity(&r));
}
@ -2735,7 +2768,7 @@ void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func e
pt[2 * i + 1] = ptg;
}
CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j]));
CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j]));
CHECK(secp256k1_gej_is_infinity(&r));
random_scalar_order(&sc[0]);
@ -2748,7 +2781,7 @@ void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func e
secp256k1_ge_neg(&pt[2*i+1], &pt[2*i]);
}
CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j]));
CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j]));
CHECK(secp256k1_gej_is_infinity(&r));
}
@ -2763,7 +2796,7 @@ void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func e
secp256k1_scalar_negate(&sc[i], &sc[i]);
}
CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 32));
CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 32));
CHECK(secp256k1_gej_is_infinity(&r));
}
@ -2782,7 +2815,7 @@ void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func e
}
secp256k1_ecmult(&ctx->ecmult_ctx, &r2, &r, &sc[0], &szero);
CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 20));
CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 20));
secp256k1_gej_neg(&r2, &r2);
secp256k1_gej_add_var(&r, &r, &r2, NULL);
CHECK(secp256k1_gej_is_infinity(&r));
@ -2805,7 +2838,7 @@ void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func e
secp256k1_gej_set_ge(&p0j, &pt[0]);
secp256k1_ecmult(&ctx->ecmult_ctx, &r2, &p0j, &rs, &szero);
CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 20));
CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 20));
secp256k1_gej_neg(&r2, &r2);
secp256k1_gej_add_var(&r, &r, &r2, NULL);
CHECK(secp256k1_gej_is_infinity(&r));
@ -2818,13 +2851,13 @@ void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func e
}
secp256k1_scalar_clear(&sc[0]);
CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 20));
CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 20));
secp256k1_scalar_clear(&sc[1]);
secp256k1_scalar_clear(&sc[2]);
secp256k1_scalar_clear(&sc[3]);
secp256k1_scalar_clear(&sc[4]);
CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 6));
CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 5));
CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 6));
CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 5));
CHECK(secp256k1_gej_is_infinity(&r));
/* Run through s0*(t0*P) + s1*(t1*P) exhaustively for many small values of s0, s1, t0, t1 */
@ -2869,7 +2902,7 @@ void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func e
secp256k1_scalar_add(&tmp1, &tmp1, &tmp2);
secp256k1_ecmult(&ctx->ecmult_ctx, &expected, &ptgj, &tmp1, &szero);
CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &actual, &szero, ecmult_multi_callback, &data, 2));
CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &actual, &szero, ecmult_multi_callback, &data, 2));
secp256k1_gej_neg(&expected, &expected);
secp256k1_gej_add_var(&actual, &actual, &expected, NULL);
CHECK(secp256k1_gej_is_infinity(&actual));
@ -2894,8 +2927,8 @@ void test_ecmult_multi_batch_single(secp256k1_ecmult_multi_func ecmult_multi) {
/* Try to multiply 1 point, but scratch space is empty.*/
scratch_empty = secp256k1_scratch_create(&ctx->error_callback, 0);
CHECK(!ecmult_multi(&ctx->ecmult_ctx, scratch_empty, &r, &szero, ecmult_multi_callback, &data, 1));
secp256k1_scratch_destroy(scratch_empty);
CHECK(!ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch_empty, &r, &szero, ecmult_multi_callback, &data, 1));
secp256k1_scratch_destroy(&ctx->error_callback, scratch_empty);
}
void test_secp256k1_pippenger_bucket_window_inv(void) {
@ -2928,17 +2961,27 @@ void test_ecmult_multi_pippenger_max_points(void) {
int bucket_window = 0;
for(; scratch_size < max_size; scratch_size+=256) {
size_t i;
size_t total_alloc;
size_t checkpoint;
scratch = secp256k1_scratch_create(&ctx->error_callback, scratch_size);
CHECK(scratch != NULL);
n_points_supported = secp256k1_pippenger_max_points(scratch);
checkpoint = secp256k1_scratch_checkpoint(&ctx->error_callback, scratch);
n_points_supported = secp256k1_pippenger_max_points(&ctx->error_callback, scratch);
if (n_points_supported == 0) {
secp256k1_scratch_destroy(scratch);
secp256k1_scratch_destroy(&ctx->error_callback, scratch);
continue;
}
bucket_window = secp256k1_pippenger_bucket_window(n_points_supported);
CHECK(secp256k1_scratch_allocate_frame(scratch, secp256k1_pippenger_scratch_size(n_points_supported, bucket_window), PIPPENGER_SCRATCH_OBJECTS));
secp256k1_scratch_deallocate_frame(scratch);
secp256k1_scratch_destroy(scratch);
/* allocate `total_alloc` bytes over `PIPPENGER_SCRATCH_OBJECTS` many allocations */
total_alloc = secp256k1_pippenger_scratch_size(n_points_supported, bucket_window);
for (i = 0; i < PIPPENGER_SCRATCH_OBJECTS - 1; i++) {
CHECK(secp256k1_scratch_alloc(&ctx->error_callback, scratch, 1));
total_alloc--;
}
CHECK(secp256k1_scratch_alloc(&ctx->error_callback, scratch, total_alloc));
secp256k1_scratch_apply_checkpoint(&ctx->error_callback, scratch, checkpoint);
secp256k1_scratch_destroy(&ctx->error_callback, scratch);
}
CHECK(bucket_window == PIPPENGER_MAX_BUCKET_WINDOW);
}
@ -3026,19 +3069,19 @@ void test_ecmult_multi_batching(void) {
/* Test with empty scratch space. It should compute the correct result using
* ecmult_mult_simple algorithm which doesn't require a scratch space. */
scratch = secp256k1_scratch_create(&ctx->error_callback, 0);
CHECK(secp256k1_ecmult_multi_var(&ctx->ecmult_ctx, scratch, &r, &scG, ecmult_multi_callback, &data, n_points));
CHECK(secp256k1_ecmult_multi_var(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &scG, ecmult_multi_callback, &data, n_points));
secp256k1_gej_add_var(&r, &r, &r2, NULL);
CHECK(secp256k1_gej_is_infinity(&r));
secp256k1_scratch_destroy(scratch);
secp256k1_scratch_destroy(&ctx->error_callback, scratch);
/* Test with space for 1 point in pippenger. That's not enough because
* ecmult_multi selects strauss which requires more memory. It should
* therefore select the simple algorithm. */
scratch = secp256k1_scratch_create(&ctx->error_callback, secp256k1_pippenger_scratch_size(1, 1) + PIPPENGER_SCRATCH_OBJECTS*ALIGNMENT);
CHECK(secp256k1_ecmult_multi_var(&ctx->ecmult_ctx, scratch, &r, &scG, ecmult_multi_callback, &data, n_points));
CHECK(secp256k1_ecmult_multi_var(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &scG, ecmult_multi_callback, &data, n_points));
secp256k1_gej_add_var(&r, &r, &r2, NULL);
CHECK(secp256k1_gej_is_infinity(&r));
secp256k1_scratch_destroy(scratch);
secp256k1_scratch_destroy(&ctx->error_callback, scratch);
for(i = 1; i <= n_points; i++) {
if (i > ECMULT_PIPPENGER_THRESHOLD) {
@ -3049,10 +3092,10 @@ void test_ecmult_multi_batching(void) {
size_t scratch_size = secp256k1_strauss_scratch_size(i);
scratch = secp256k1_scratch_create(&ctx->error_callback, scratch_size + STRAUSS_SCRATCH_OBJECTS*ALIGNMENT);
}
CHECK(secp256k1_ecmult_multi_var(&ctx->ecmult_ctx, scratch, &r, &scG, ecmult_multi_callback, &data, n_points));
CHECK(secp256k1_ecmult_multi_var(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &scG, ecmult_multi_callback, &data, n_points));
secp256k1_gej_add_var(&r, &r, &r2, NULL);
CHECK(secp256k1_gej_is_infinity(&r));
secp256k1_scratch_destroy(scratch);
secp256k1_scratch_destroy(&ctx->error_callback, scratch);
}
free(sc);
free(pt);
@ -3070,12 +3113,12 @@ void run_ecmult_multi_tests(void) {
test_ecmult_multi_batch_single(secp256k1_ecmult_pippenger_batch_single);
test_ecmult_multi(scratch, secp256k1_ecmult_strauss_batch_single);
test_ecmult_multi_batch_single(secp256k1_ecmult_strauss_batch_single);
secp256k1_scratch_destroy(scratch);
secp256k1_scratch_destroy(&ctx->error_callback, scratch);
/* Run test_ecmult_multi with space for exactly one point */
scratch = secp256k1_scratch_create(&ctx->error_callback, secp256k1_strauss_scratch_size(1) + STRAUSS_SCRATCH_OBJECTS*ALIGNMENT);
test_ecmult_multi(scratch, secp256k1_ecmult_multi_var);
secp256k1_scratch_destroy(scratch);
secp256k1_scratch_destroy(&ctx->error_callback, scratch);
test_ecmult_multi_batch_size_helper();
test_ecmult_multi_batching();

View File

@ -212,14 +212,14 @@ void test_exhaustive_ecmult_multi(const secp256k1_context *ctx, const secp256k1_
data.pt[0] = group[x];
data.pt[1] = group[y];
secp256k1_ecmult_multi_var(&ctx->ecmult_ctx, scratch, &tmp, &g_sc, ecmult_multi_callback, &data, 2);
secp256k1_ecmult_multi_var(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &tmp, &g_sc, ecmult_multi_callback, &data, 2);
ge_equals_gej(&group[(i * x + j * y + k) % order], &tmp);
}
}
}
}
}
secp256k1_scratch_destroy(scratch);
secp256k1_scratch_destroy(&ctx->error_callback, scratch);
}
void r_from_k(secp256k1_scalar *r, const secp256k1_ge *group, int k) {